-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun.sh
59 lines (51 loc) · 1.57 KB
/
run.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
#!/bin/bash
#SBATCH --job-name=train-llava
#SBATCH --output=Results/output.txt
#SBATCH --error=Results/error.err
#SBATCH --mail-type=END
#SBATCH [email protected]
#SBATCH --account=vemuri
#SBATCH --qos=vemuri
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=20gb
#SBATCH --partition=gpu
#SBATCH --gres=gpu:a100:4
#SBATCH --time=96:00:00
export MASTER_PORT=29999
FILE="Results/output.txt"
echo "Date = $(date)" > $FILE
echo "host = $(hostname -s)" >> $FILE
echo "Directory = $(pwd)" >> $FILE
echo "MASTER_PORT = $MASTER_PORT" >> $FILE
echo >> $FILE
nvidia-smi >> $FILE
free -h >> $FILE
echo >> $FILE
T1=$(date +%s)
ml conda
conda activate /blue/amolstad/y.jin/anaconda3/envs/MyNlp
srun --mpi=pmi2 --exclusive deepspeed llava_run.py \
--deepspeed ds_zero2_no_offload.json \
--model_name_or_path /blue/amolstad/y.jin/train-llava/my-model/model-01 \
--train_type use_lora \
--data_path /blue/amolstad/y.jin/train-llava/data \
--resume_from_checkpoint /blue/amolstad/y.jin/train-llava/Results/checkpoint-1000 \
--bf16 true \
--fp16 false \
--output_dir /blue/amolstad/y.jin/train-llava/Results \
--num_train_epochs 5 \
--per_device_train_batch_size 6 \
--per_device_eval_batch_size 12 \
--gradient_accumulation_steps 8 \
--evaluation_strategy "no" \
--save_strategy "steps" \
--save_total_limit 3 \
--report_to "tensorboard" \
--learning_rate 4e-4 \
--logging_steps 50 >> $FILE
echo >> $FILE
T2=$(date +%s)
ELAPSED=$((T2 - T1))
echo "Elapsed Time = $ELAPSED" >> $FILE