From 67b3f949b6ff9e9929db47067c78fab6326f5070 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=A6=E5=85=B8?= <30826840+dlutsniper@users.noreply.github.com> Date: Mon, 16 Oct 2023 16:13:10 +0800 Subject: [PATCH 1/2] Update finetune.py For lower cpu memory 1.Change default device_map to "auto" device_map = "auto" 2.For lower cpu memory, add parameter at load model low_cpu_mem_usage=True, --- finetune.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/finetune.py b/finetune.py index a4fee87..ccd5f70 100644 --- a/finetune.py +++ b/finetune.py @@ -278,7 +278,7 @@ def train(): local_rank = training_args.local_rank - device_map = None + device_map = "auto" world_size = int(os.environ.get("WORLD_SIZE", 1)) ddp = world_size != 1 if lora_args.q_lora: @@ -302,6 +302,7 @@ def train(): config=config, cache_dir=training_args.cache_dir, device_map=device_map, + low_cpu_mem_usage=True, trust_remote_code=True, quantization_config=GPTQConfig( bits=4, disable_exllama=True From f3d7c69be225c3a6b26bb949c0c7126dfd54e26a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A2=A6=E5=85=B8?= <30826840+dlutsniper@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:24:45 +0800 Subject: [PATCH 2/2] Update finetune.py Just lora finetuing need low cpu memory usage --- finetune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/finetune.py b/finetune.py index ccd5f70..a74a258 100644 --- a/finetune.py +++ b/finetune.py @@ -302,7 +302,7 @@ def train(): config=config, cache_dir=training_args.cache_dir, device_map=device_map, - low_cpu_mem_usage=True, + low_cpu_mem_usage=True if training_args.use_lora and not lora_args.q_lora, trust_remote_code=True, quantization_config=GPTQConfig( bits=4, disable_exllama=True