structure_layout.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "paddle_api.h"
  16. #include "paddle_inference_api.h"
  17. #include <include/postprocess_op.h>
  18. #include <include/preprocess_op.h>
  19. namespace PaddleOCR {
  20. class StructureLayoutRecognizer {
  21. public:
  22. explicit StructureLayoutRecognizer(
  23. const std::string &model_dir, const bool &use_gpu, const int &gpu_id,
  24. const int &gpu_mem, const int &cpu_math_library_num_threads,
  25. const bool &use_mkldnn, const std::string &label_path,
  26. const bool &use_tensorrt, const std::string &precision,
  27. const double &layout_score_threshold,
  28. const double &layout_nms_threshold) {
  29. this->use_gpu_ = use_gpu;
  30. this->gpu_id_ = gpu_id;
  31. this->gpu_mem_ = gpu_mem;
  32. this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
  33. this->use_mkldnn_ = use_mkldnn;
  34. this->use_tensorrt_ = use_tensorrt;
  35. this->precision_ = precision;
  36. this->post_processor_.init(label_path, layout_score_threshold,
  37. layout_nms_threshold);
  38. LoadModel(model_dir);
  39. }
  40. // Load Paddle inference model
  41. void LoadModel(const std::string &model_dir);
  42. void Run(cv::Mat img, std::vector<StructurePredictResult> &result,
  43. std::vector<double> &times);
  44. private:
  45. std::shared_ptr<paddle_infer::Predictor> predictor_;
  46. bool use_gpu_ = false;
  47. int gpu_id_ = 0;
  48. int gpu_mem_ = 4000;
  49. int cpu_math_library_num_threads_ = 4;
  50. bool use_mkldnn_ = false;
  51. std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
  52. std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
  53. bool is_scale_ = true;
  54. bool use_tensorrt_ = false;
  55. std::string precision_ = "fp32";
  56. // pre-process
  57. Resize resize_op_;
  58. Normalize normalize_op_;
  59. Permute permute_op_;
  60. // post-process
  61. PicodetPostProcessor post_processor_;
  62. };
  63. } // namespace PaddleOCR