test_ptq_inference_python.sh 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. #!/bin/bash
  2. source test_tipc/common_func.sh
  3. FILENAME=$1
  4. # MODE be one of [''whole_infer']
  5. MODE=$2
  6. IFS=$'\n'
  7. # parser klquant_infer params
  8. dataline=$(awk 'NR==1, NR==17{print}' $FILENAME)
  9. lines=(${dataline})
  10. model_name=$(func_parser_value "${lines[1]}")
  11. python=$(func_parser_value "${lines[2]}")
  12. export_weight=$(func_parser_key "${lines[3]}")
  13. save_infer_key=$(func_parser_key "${lines[4]}")
  14. # parser inference model
  15. infer_model_dir_list=$(func_parser_value "${lines[5]}")
  16. infer_export_list=$(func_parser_value "${lines[6]}")
  17. infer_is_quant=$(func_parser_value "${lines[7]}")
  18. # parser inference
  19. inference_py=$(func_parser_value "${lines[8]}")
  20. use_gpu_key=$(func_parser_key "${lines[9]}")
  21. use_gpu_list=$(func_parser_value "${lines[9]}")
  22. use_mkldnn_key=$(func_parser_key "${lines[10]}")
  23. use_mkldnn_list=$(func_parser_value "${lines[10]}")
  24. cpu_threads_key=$(func_parser_key "${lines[11]}")
  25. cpu_threads_list=$(func_parser_value "${lines[11]}")
  26. batch_size_key=$(func_parser_key "${lines[12]}")
  27. batch_size_list=$(func_parser_value "${lines[12]}")
  28. use_trt_key=$(func_parser_key "${lines[13]}")
  29. use_trt_list=$(func_parser_value "${lines[13]}")
  30. precision_key=$(func_parser_key "${lines[14]}")
  31. precision_list=$(func_parser_value "${lines[14]}")
  32. infer_model_key=$(func_parser_key "${lines[15]}")
  33. image_dir_key=$(func_parser_key "${lines[16]}")
  34. infer_img_dir=$(func_parser_value "${lines[16]}")
  35. save_log_key=$(func_parser_key "${lines[17]}")
  36. save_log_value=$(func_parser_value "${lines[17]}")
  37. benchmark_key=$(func_parser_key "${lines[18]}")
  38. benchmark_value=$(func_parser_value "${lines[18]}")
  39. infer_key1=$(func_parser_key "${lines[19]}")
  40. infer_value1=$(func_parser_value "${lines[19]}")
  41. LOG_PATH="./test_tipc/output/${model_name}/${MODE}"
  42. mkdir -p ${LOG_PATH}
  43. status_log="${LOG_PATH}/results_python.log"
  44. function func_inference(){
  45. IFS='|'
  46. _python=$1
  47. _script=$2
  48. _model_dir=$3
  49. _log_path=$4
  50. _img_dir=$5
  51. _flag_quant=$6
  52. # inference
  53. for use_gpu in ${use_gpu_list[*]}; do
  54. if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then
  55. for use_mkldnn in ${use_mkldnn_list[*]}; do
  56. for threads in ${cpu_threads_list[*]}; do
  57. for batch_size in ${batch_size_list[*]}; do
  58. for precision in ${precision_list[*]}; do
  59. if [ ${use_mkldnn} = "False" ] && [ ${precision} = "fp16" ]; then
  60. continue
  61. fi # skip when enable fp16 but disable mkldnn
  62. if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
  63. continue
  64. fi # skip when quant model inference but precision is not int8
  65. set_precision=$(func_set_params "${precision_key}" "${precision}")
  66. _save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
  67. set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
  68. set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
  69. set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
  70. set_mkldnn=$(func_set_params "${use_mkldnn_key}" "${use_mkldnn}")
  71. set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}")
  72. set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
  73. set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
  74. set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
  75. command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_params0} ${set_infer_data} ${set_benchmark} ${set_precision} ${set_infer_params1} > ${_save_log_path} 2>&1 "
  76. eval $command
  77. last_status=${PIPESTATUS[0]}
  78. eval "cat ${_save_log_path}"
  79. status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
  80. done
  81. done
  82. done
  83. done
  84. elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then
  85. for use_trt in ${use_trt_list[*]}; do
  86. for precision in ${precision_list[*]}; do
  87. if [ ${_flag_quant} = "True" ] && [ ${precision} != "int8" ]; then
  88. continue
  89. fi # skip when quant model inference but precision is not int8
  90. for batch_size in ${batch_size_list[*]}; do
  91. _save_log_path="${_log_path}/python_infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
  92. set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}")
  93. set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}")
  94. set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}")
  95. set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}")
  96. set_precision=$(func_set_params "${precision_key}" "${precision}")
  97. set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}")
  98. set_infer_params0=$(func_set_params "${save_log_key}" "${save_log_value}")
  99. set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}")
  100. command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} ${set_infer_params0} > ${_save_log_path} 2>&1 "
  101. eval $command
  102. last_status=${PIPESTATUS[0]}
  103. eval "cat ${_save_log_path}"
  104. status_check $last_status "${command}" "${status_log}" "${model_name}" "${_save_log_path}"
  105. done
  106. done
  107. done
  108. else
  109. echo "Does not support hardware other than CPU and GPU Currently!"
  110. fi
  111. done
  112. }
  113. if [ ${MODE} = "whole_infer" ]; then
  114. GPUID=$3
  115. if [ ${#GPUID} -le 0 ];then
  116. env=" "
  117. else
  118. env="export CUDA_VISIBLE_DEVICES=${GPUID}"
  119. fi
  120. # set CUDA_VISIBLE_DEVICES
  121. eval $env
  122. export Count=0
  123. IFS="|"
  124. infer_run_exports=(${infer_export_list})
  125. infer_quant_flag=(${infer_is_quant})
  126. for infer_model in ${infer_model_dir_list[*]}; do
  127. # run export
  128. if [ ${infer_run_exports[Count]} != "null" ];then
  129. save_infer_dir="${infer_model}_klquant"
  130. set_export_weight=$(func_set_params "${export_weight}" "${infer_model}")
  131. set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}")
  132. export_log_path="${LOG_PATH}/${MODE}_export_${Count}.log"
  133. export_cmd="${python} ${infer_run_exports[Count]} ${set_export_weight} ${set_save_infer_key} > ${export_log_path} 2>&1 "
  134. echo ${infer_run_exports[Count]}
  135. echo $export_cmd
  136. eval $export_cmd
  137. status_export=$?
  138. status_check $status_export "${export_cmd}" "${status_log}" "${model_name}" "${export_log_path}"
  139. else
  140. save_infer_dir=${infer_model}
  141. fi
  142. #run inference
  143. is_quant="True"
  144. func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
  145. Count=$(($Count + 1))
  146. done
  147. fi