Merge pull request #964 from QwenLM/fix-finetune

Fix bug of low_cpu_mem_usage in finetune.py.
main
yang fan 1 year ago committed by GitHub
commit 204c2c59f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -291,9 +291,9 @@ def train():
):
raise RuntimeError("ZeRO3 is incompatible with LoRA when finetuning on base model.")
model_load_kwargs = {}
if deepspeed.is_deepspeed_zero3_enabled():
model_load_kwargs['low_cpu_mem_usage'] = False
model_load_kwargs = {
'low_cpu_mem_usage': not deepspeed.is_deepspeed_zero3_enabled(),
}
# Set RoPE scaling factor
config = transformers.AutoConfig.from_pretrained(

Loading…
Cancel
Save