1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162 |
- #!/usr/bin/env bash
- function _set_params(){
- run_mode=${1:-"sp"}
- batch_size=${2:-"64"}
- fp_item=${3:-"fp32"}
- max_epoch=${4:-"10"}
- model_item=${5:-"model_item"}
- run_log_path=${TRAIN_LOG_DIR:-$(pwd)}
- base_batch_size=${batch_size}
- mission_name="OCR"
- direction_id="0"
- ips_unit="images/sec"
- skip_steps=2
- keyword="ips:"
- index="1"
- model_name=${model_item}_bs${batch_size}_${fp_item}
- device=${CUDA_VISIBLE_DEVICES//,/ }
- arr=(${device})
- num_gpu_devices=${#arr[*]}
- log_file=${run_log_path}/${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
- }
- function _train(){
- echo "Train on ${num_gpu_devices} GPUs"
- echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
- train_cmd="-c configs/det/${model_item}.yml -o Train.loader.batch_size_per_card=${batch_size} Global.epoch_num=${max_epoch} Global.eval_batch_step=[0,20000] Global.print_batch_step=2"
- case ${run_mode} in
- sp)
- train_cmd="python tools/train.py "${train_cmd}""
- ;;
- mp)
- rm -rf ./mylog
- train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --gpus=$CUDA_VISIBLE_DEVICES tools/train.py ${train_cmd}"
- ;;
- *) echo "choose run_mode(sp or mp)"; exit 1;
- esac
- echo ${train_cmd}
- timeout 15m ${train_cmd} > ${log_file} 2>&1
- if [ $? -ne 0 ];then
- echo -e "${model_name}, FAIL"
- export job_fail_flag=1
- else
- echo -e "${model_name}, SUCCESS"
- export job_fail_flag=0
- fi
- if [ $run_mode = "mp" -a -d mylog ]; then
- rm ${log_file}
- cp mylog/workerlog.0 ${log_file}
- fi
- }
- source ${BENCHMARK_ROOT}/scripts/run_model.sh
- _set_params $@
- _run
|