Skip to content

Commit

Permalink
Add infer num threads in openvino and paddle version.
Browse files Browse the repository at this point in the history
  • Loading branch information
SWHL committed Mar 5, 2024
1 parent bd2eba4 commit c594ed1
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 15 deletions.
16 changes: 13 additions & 3 deletions python/rapidocr_openvino/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,14 @@ Global:
min_height: 30
width_height_ratio: 8

use_cuda: &use_cuda false

inference_num_threads: &infer_num_threads -1

Det:
use_cuda: false
inference_num_threads: *infer_num_threads

use_cuda: *use_cuda

model_path: models/ch_PP-OCRv4_det_infer.onnx

Expand All @@ -23,7 +29,9 @@ Det:
score_mode: fast

Cls:
use_cuda: false
inference_num_threads: *infer_num_threads

use_cuda: *use_cuda

model_path: models/ch_ppocr_mobile_v2.0_cls_infer.onnx

Expand All @@ -33,7 +41,9 @@ Cls:
label_list: ['0', '180']

Rec:
use_cuda: false
inference_num_threads: *infer_num_threads

use_cuda: *use_cuda

model_path: models/ch_PP-OCRv4_rec_infer.onnx

Expand Down
13 changes: 10 additions & 3 deletions python/rapidocr_openvino/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
# @Contact: [email protected]
import argparse
import math
import os
import random
from io import BytesIO
from pathlib import Path
Expand All @@ -20,11 +21,17 @@

class OpenVINOInferSession:
def __init__(self, config):
ie = Core()
core = Core()

self._verify_model(config["model_path"])
model_onnx = ie.read_model(config["model_path"])
compile_model = ie.compile_model(model=model_onnx, device_name="CPU")
model_onnx = core.read_model(config["model_path"])

cpu_nums = os.cpu_count()
infer_num_threads = config.get("inference_num_threads", -1)
if infer_num_threads != -1 and 1 <= infer_num_threads <= cpu_nums:
core.set_property("CPU", {"INFERENCE_NUM_THREADS": str(infer_num_threads)})

compile_model = core.compile_model(model=model_onnx, device_name="CPU")
self.session = compile_model.create_infer_request()

def __call__(self, input_content: np.ndarray) -> np.ndarray:
Expand Down
30 changes: 21 additions & 9 deletions python/rapidocr_paddle/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,18 @@ Global:
min_height: 30
width_height_ratio: 8

use_cuda: &use_cuda false
gpu_id: &gpu_id 0
gpu_mem: &gpu_mem 500

cpu_math_library_num_threads: &infer_num_threads -1

Det:
use_cuda: false
gpu_id: 0
gpu_mem: 500
use_cuda: *use_cuda
gpu_id: *gpu_id
gpu_mem: *gpu_mem

cpu_math_library_num_threads: *infer_num_threads

model_path: models/ch_PP-OCRv4_det_infer

Expand All @@ -25,9 +33,11 @@ Det:
score_mode: fast

Cls:
use_cuda: false
gpu_id: 0
gpu_mem: 500
use_cuda: *use_cuda
gpu_id: *gpu_id
gpu_mem: *gpu_mem

cpu_math_library_num_threads: *infer_num_threads

model_path: models/ch_ppocr_mobile_v2_cls_infer

Expand All @@ -37,9 +47,11 @@ Cls:
label_list: ['0', '180']

Rec:
use_cuda: false
gpu_id: 0
gpu_mem: 500
use_cuda: *use_cuda
gpu_id: *gpu_id
gpu_mem: *gpu_mem

cpu_math_library_num_threads: *infer_num_threads

model_path: models/ch_PP-OCRv4_rec_infer

Expand Down
5 changes: 5 additions & 0 deletions python/rapidocr_paddle/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,11 @@ def __init__(self, config, mode: Optional[str] = None) -> None:
else:
infer_opts.disable_gpu()

cpu_nums = os.cpu_count()
infer_num_threads = config.get("cpu_math_library_num_threads", -1)
if infer_num_threads != -1 and 1 <= infer_num_threads <= cpu_nums:
infer_opts.set_cpu_math_library_num_threads(infer_num_threads)

# enable memory optim
infer_opts.enable_memory_optim()
infer_opts.disable_glog_info()
Expand Down

0 comments on commit c594ed1

Please sign in to comment.