run_benchmark_det.sh 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. #!/usr/bin/env bash
  2. # 运行示例:CUDA_VISIBLE_DEVICES=0 bash run_benchmark.sh ${run_mode} ${bs_item} ${fp_item} 500 ${model_mode}
  3. # 参数说明
  4. function _set_params(){
  5. run_mode=${1:-"sp"} # 单卡sp|多卡mp
  6. batch_size=${2:-"64"}
  7. fp_item=${3:-"fp32"} # fp32|fp16
  8. max_epoch=${4:-"10"} # 可选,如果需要修改代码提前中断
  9. model_item=${5:-"model_item"}
  10. run_log_path=${TRAIN_LOG_DIR:-$(pwd)} # TRAIN_LOG_DIR 后续QA设置该参数
  11. # 日志解析所需参数
  12. base_batch_size=${batch_size}
  13. mission_name="OCR"
  14. direction_id="0"
  15. ips_unit="images/sec"
  16. skip_steps=2 # 解析日志,有些模型前几个step耗时长,需要跳过 (必填)
  17. keyword="ips:" # 解析日志,筛选出数据所在行的关键字 (必填)
  18. index="1"
  19. model_name=${model_item}_bs${batch_size}_${fp_item} # model_item 用于yml文件名匹配,model_name 用于数据入库前端展示
  20. # 以下不用修改
  21. device=${CUDA_VISIBLE_DEVICES//,/ }
  22. arr=(${device})
  23. num_gpu_devices=${#arr[*]}
  24. log_file=${run_log_path}/${model_item}_${run_mode}_bs${batch_size}_${fp_item}_${num_gpu_devices}
  25. }
  26. function _train(){
  27. echo "Train on ${num_gpu_devices} GPUs"
  28. echo "current CUDA_VISIBLE_DEVICES=$CUDA_VISIBLE_DEVICES, gpus=$num_gpu_devices, batch_size=$batch_size"
  29. train_cmd="-c configs/det/${model_item}.yml -o Train.loader.batch_size_per_card=${batch_size} Global.epoch_num=${max_epoch} Global.eval_batch_step=[0,20000] Global.print_batch_step=2"
  30. case ${run_mode} in
  31. sp)
  32. train_cmd="python tools/train.py "${train_cmd}""
  33. ;;
  34. mp)
  35. rm -rf ./mylog
  36. train_cmd="python -m paddle.distributed.launch --log_dir=./mylog --gpus=$CUDA_VISIBLE_DEVICES tools/train.py ${train_cmd}"
  37. ;;
  38. *) echo "choose run_mode(sp or mp)"; exit 1;
  39. esac
  40. # 以下不用修改
  41. echo ${train_cmd}
  42. timeout 15m ${train_cmd} > ${log_file} 2>&1
  43. if [ $? -ne 0 ];then
  44. echo -e "${model_name}, FAIL"
  45. export job_fail_flag=1
  46. else
  47. echo -e "${model_name}, SUCCESS"
  48. export job_fail_flag=0
  49. fi
  50. if [ $run_mode = "mp" -a -d mylog ]; then
  51. rm ${log_file}
  52. cp mylog/workerlog.0 ${log_file}
  53. fi
  54. }
  55. source ${BENCHMARK_ROOT}/scripts/run_model.sh # 在该脚本中会对符合benchmark规范的log使用analysis.py 脚本进行性能数据解析;该脚本在连调时可从benchmark repo中下载https://github.com/PaddlePaddle/benchmark/blob/master/scripts/run_model.sh;如果不联调只想要产出训练log可以注掉本行,提交时需打开
  56. _set_params $@
  57. #_train # 如果只想产出训练log,不解析,可取消注释
  58. _run # 该函数在run_model.sh中,执行时会调用_train; 如果不联调只想要产出训练log可以注掉本行,提交时需打开