Skip to main content

PPU vs Nvidia A100

Created on January 6|Last edited on January 6

单机单卡


meta
["--stage","sft","--model_name_or_path","/mnt/public/base_llms/hub/Qwen/Qwen2.5-7B-Instruct","--do_train","True","--dataset_dir","../data","--dataset","zhengwen_dataset_v8","--template","qwen","--finetuning_type","lora","--use_dora","False","--use_rslora","False","--use_unsloth","False","--use_unsloth_gc","False","--enable_liger_kernel","False","--lora_alpha","64","--lora_rank","32","--lora_target","all","--lora_dropout","0","--output_dir","/mnt/public/sunjinfeng/gongwen_llms/a100_Qwen2.5-7B-Instruct-v8-bl","--overwrite_output_dir","--overwrite_cache","--per_device_train_batch_size","4","--per_device_eval_batch_size","4","--gradient_accumulation_steps","4","--cutoff_len","5000","--lr_scheduler_type","cosine","--logging_steps","1","--save_steps","4900","--learning_rate","1e-5","--num_train_epochs","1","--warmup_ratio","0.01","--weight_decay","0","--plot_loss","True","--flash_attn","auto","--report_to","wandb","--run_name","a100_Qwen2.5-7B-Instruct-v8-bl","--bf16","True","--preprocessing_num_workers","64","--eval_steps","490","--val_size","0.05","--compute_accuracy","True","--evaluation_strategy","steps","--load_best_model_at_end","True","--additional_target","embed_tokens,lm_head","--resize_vocab","True"]
["--stage","sft","--model_name_or_path","/mnt/base_llms/hub/Qwen/Qwen2.5-7B-Instruct","--do_train","True","--dataset_dir","../data","--dataset","zhengwen_dataset_v8","--template","qwen","--finetuning_type","lora","--use_dora","False","--use_rslora","False","--use_unsloth","False","--use_unsloth_gc","False","--enable_liger_kernel","False","--lora_alpha","64","--lora_rank","32","--lora_target","all","--lora_dropout","0","--output_dir","/mnt/sunjinfeng/gongwen_llms/ppu_Qwen2.5-7B-Instruct-v8-bl","--overwrite_output_dir","--overwrite_cache","--per_device_train_batch_size","4","--per_device_eval_batch_size","4","--gradient_accumulation_steps","4","--cutoff_len","5000","--lr_scheduler_type","cosine","--logging_steps","1","--save_steps","4900","--learning_rate","1e-5","--num_train_epochs","1","--warmup_ratio","0.01","--weight_decay","0","--plot_loss","True","--flash_attn","auto","--report_to","wandb","--run_name","ppu_Qwen2.5-7B-Instruct-v8-bl","--bf16","True","--preprocessing_num_workers","64","--eval_steps","490","--val_size","0.05","--compute_accuracy","True","--evaluation_strategy","steps","--load_best_model_at_end","True","--additional_target","embed_tokens,lm_head","--resize_vocab","True"]
64
176
128
176
{"remote":"https://github.com/hiyouga/LLaMA-Factory.git","commit":"51ef90ce0ace4a45f9c01ba7e674adf5e3c92baa","__typename":"GitInfo"}
{"remote":"https://github.com/hiyouga/LLaMA-Factory.git","commit":"b55890291b0049dd90ef4d1d0bf0ba1efb1e4f0a","__typename":"GitInfo"}
NVIDIA A100-SXM4-80GB
PPU-ZW810E
8
16
Linux-5.15.0-105-generic-x86_64-with-glibc2.35
Linux-5.10.134-008.12.kangaroo.al8.x86_64-x86_64-with-glibc2.35
/root/miniconda3/envs/llm/bin/llamafactory-cli
/usr/local/bin/llamafactory-cli
CPython 3.10.15
CPython 3.10.13+gc
11h 35m 47s
1d 22h 20m 55s
summary
_wandb
41731
166851
eval
0.65322
0.65483
1.47101
1.46346
1093.164
1974.9064
3.839
2.125
train
Run set
2


单机八卡


Run set
2