ocr_det.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "paddle_api.h"
  16. #include "paddle_inference_api.h"
  17. #include <include/postprocess_op.h>
  18. #include <include/preprocess_op.h>
  19. namespace PaddleOCR {
  20. class DBDetector {
  21. public:
  22. explicit DBDetector(const std::string &model_dir, const bool &use_gpu,
  23. const int &gpu_id, const int &gpu_mem,
  24. const int &cpu_math_library_num_threads,
  25. const bool &use_mkldnn, const std::string &limit_type,
  26. const int &limit_side_len, const double &det_db_thresh,
  27. const double &det_db_box_thresh,
  28. const double &det_db_unclip_ratio,
  29. const std::string &det_db_score_mode,
  30. const bool &use_dilation, const bool &use_tensorrt,
  31. const std::string &precision) {
  32. this->use_gpu_ = use_gpu;
  33. this->gpu_id_ = gpu_id;
  34. this->gpu_mem_ = gpu_mem;
  35. this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
  36. this->use_mkldnn_ = use_mkldnn;
  37. this->limit_type_ = limit_type;
  38. this->limit_side_len_ = limit_side_len;
  39. this->det_db_thresh_ = det_db_thresh;
  40. this->det_db_box_thresh_ = det_db_box_thresh;
  41. this->det_db_unclip_ratio_ = det_db_unclip_ratio;
  42. this->det_db_score_mode_ = det_db_score_mode;
  43. this->use_dilation_ = use_dilation;
  44. this->use_tensorrt_ = use_tensorrt;
  45. this->precision_ = precision;
  46. LoadModel(model_dir);
  47. }
  48. // Load Paddle inference model
  49. void LoadModel(const std::string &model_dir);
  50. // Run predictor
  51. void Run(cv::Mat &img, std::vector<std::vector<std::vector<int>>> &boxes,
  52. std::vector<double> &times);
  53. private:
  54. std::shared_ptr<paddle_infer::Predictor> predictor_;
  55. bool use_gpu_ = false;
  56. int gpu_id_ = 0;
  57. int gpu_mem_ = 4000;
  58. int cpu_math_library_num_threads_ = 4;
  59. bool use_mkldnn_ = false;
  60. std::string limit_type_ = "max";
  61. int limit_side_len_ = 960;
  62. double det_db_thresh_ = 0.3;
  63. double det_db_box_thresh_ = 0.5;
  64. double det_db_unclip_ratio_ = 2.0;
  65. std::string det_db_score_mode_ = "slow";
  66. bool use_dilation_ = false;
  67. bool visualize_ = true;
  68. bool use_tensorrt_ = false;
  69. std::string precision_ = "fp32";
  70. std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
  71. std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
  72. bool is_scale_ = true;
  73. // pre-process
  74. ResizeImgType0 resize_op_;
  75. Normalize normalize_op_;
  76. Permute permute_op_;
  77. // post-process
  78. DBPostProcessor post_processor_;
  79. };
  80. } // namespace PaddleOCR