structure_table.h 2.7 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "paddle_api.h"
  16. #include "paddle_inference_api.h"
  17. #include <include/postprocess_op.h>
  18. #include <include/preprocess_op.h>
  19. namespace PaddleOCR {
  20. class StructureTableRecognizer {
  21. public:
  22. explicit StructureTableRecognizer(
  23. const std::string &model_dir, const bool &use_gpu, const int &gpu_id,
  24. const int &gpu_mem, const int &cpu_math_library_num_threads,
  25. const bool &use_mkldnn, const std::string &label_path,
  26. const bool &use_tensorrt, const std::string &precision,
  27. const int &table_batch_num, const int &table_max_len,
  28. const bool &merge_no_span_structure) {
  29. this->use_gpu_ = use_gpu;
  30. this->gpu_id_ = gpu_id;
  31. this->gpu_mem_ = gpu_mem;
  32. this->cpu_math_library_num_threads_ = cpu_math_library_num_threads;
  33. this->use_mkldnn_ = use_mkldnn;
  34. this->use_tensorrt_ = use_tensorrt;
  35. this->precision_ = precision;
  36. this->table_batch_num_ = table_batch_num;
  37. this->table_max_len_ = table_max_len;
  38. this->post_processor_.init(label_path, merge_no_span_structure);
  39. LoadModel(model_dir);
  40. }
  41. // Load Paddle inference model
  42. void LoadModel(const std::string &model_dir);
  43. void Run(std::vector<cv::Mat> img_list,
  44. std::vector<std::vector<std::string>> &rec_html_tags,
  45. std::vector<float> &rec_scores,
  46. std::vector<std::vector<std::vector<int>>> &rec_boxes,
  47. std::vector<double> &times);
  48. private:
  49. std::shared_ptr<paddle_infer::Predictor> predictor_;
  50. bool use_gpu_ = false;
  51. int gpu_id_ = 0;
  52. int gpu_mem_ = 4000;
  53. int cpu_math_library_num_threads_ = 4;
  54. bool use_mkldnn_ = false;
  55. int table_max_len_ = 488;
  56. std::vector<float> mean_ = {0.485f, 0.456f, 0.406f};
  57. std::vector<float> scale_ = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
  58. bool is_scale_ = true;
  59. bool use_tensorrt_ = false;
  60. std::string precision_ = "fp32";
  61. int table_batch_num_ = 1;
  62. // pre-process
  63. TableResizeImg resize_op_;
  64. Normalize normalize_op_;
  65. PermuteBatch permute_op_;
  66. TablePadImg pad_op_;
  67. // post-process
  68. TablePostProcessor post_processor_;
  69. }; // class StructureTableRecognizer
  70. } // namespace PaddleOCR