-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain.sh
53 lines (48 loc) · 1.44 KB
/
train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
#!/bin/bash
#SBATCH --job-name=sft-internlm
#SBATCH --output=Results/output.txt
#SBATCH --error=Results/error.err
#SBATCH --mail-type=END
#SBATCH [email protected]
#SBATCH --account=vemuri
#SBATCH --qos=vemuri
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=20gb
#SBATCH --partition=gpu
#SBATCH --gres=gpu:a100:4
#SBATCH --time=96:00:00
FILE="Results/output.txt"
echo "Date = $(date)" > $FILE
echo "host = $(hostname -s)" >> $FILE
echo "Directory = $(pwd)" >> $FILE
echo >> $FILE
T1=$(date +%s)
ml conda
conda activate /blue/amolstad/y.jin/anaconda3/envs/MyNlp
srun deepspeed sft_run.py \
--deepspeed ds_zero2_no_offload.json \
--model_name_or_path /blue/amolstad/y.jin/sft-internlm/internlm-7b \
--use_lora true \
--cache_dir /blue/amolstad/y.jin/sft-internlm/internlm-7b \
--use_deepspeed true \
--data_path /blue/amolstad/y.jin/sft-internlm/sft_data \
--bf16 true \
--fp16 false \
--output_dir /blue/amolstad/y.jin/sft-internlm/Results \
--num_train_epochs 2 \
--per_device_train_batch_size 3 \
--per_device_eval_batch_size 1 \
--gradient_accumulation_steps 8 \
--evaluation_strategy "no" \
--save_strategy "epoch" \
--save_total_limit 3 \
--learning_rate 4e-4 \
--logging_steps 10 \
--tf32 False \
--model_max_length 2048 >> $FILE
echo >> $FILE
T2=$(date +%s)
ELAPSED=$((T2 - T1))
echo "Elapsed Time = $ELAPSED" >> $FILE