From 73b34d4a9d32b2cf5c40c10ccd4e1a749deb6b50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=8B=8F=E9=98=B3?= Date: Mon, 15 Jan 2024 12:43:39 +0800 Subject: [PATCH] Fix bug of low_cpu_mem_usage in finetune.py. --- finetune.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/finetune.py b/finetune.py index dd7df12..4a4e334 100644 --- a/finetune.py +++ b/finetune.py @@ -291,9 +291,9 @@ def train(): ): raise RuntimeError("ZeRO3 is incompatible with LoRA when finetuning on base model.") - model_load_kwargs = {} - if deepspeed.is_deepspeed_zero3_enabled(): - model_load_kwargs['low_cpu_mem_usage'] = False + model_load_kwargs = { + 'low_cpu_mem_usage': not deepspeed.is_deepspeed_zero3_enabled(), + } # Set RoPE scaling factor config = transformers.AutoConfig.from_pretrained(