From f3d7c69be225c3a6b26bb949c0c7126dfd54e26a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=A6=E5=85=B8?= <30826840+dlutsniper@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:24:45 +0800 Subject: [PATCH] Update finetune.py Just lora finetuing need low cpu memory usage --- finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finetune.py b/finetune.py index ccd5f70..a74a258 100644 --- a/finetune.py +++ b/finetune.py @@ -302,7 +302,7 @@ def train(): config=config, cache_dir=training_args.cache_dir, device_map=device_map, - low_cpu_mem_usage=True, + low_cpu_mem_usage=True if training_args.use_lora and not lora_args.q_lora, trust_remote_code=True, quantization_config=GPTQConfig( bits=4, disable_exllama=True