Skip to content

Commit 11cbb96

Browse files
[Feat][Deploy] Add cpp inference on windows (#2911)
1 parent f8e8ba7 commit 11cbb96

8 files changed

+839
-329
lines changed

deploy/cpp/CMakeLists.txt

+112-79
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,16 @@ option(WITH_STATIC_LIB "Compile demo with static/shared library, default use sta
77
option(USE_TENSORRT "Compile demo with TensorRT." OFF)
88
option(WITH_ROCM "Compile demo with rocm." OFF)
99

10+
if (WIN32)
11+
SET(PADDLE_LIB "" CACHE PATH "Location of libraries")
12+
SET(PADDLE_LIB_NAME "" CACHE STRING "libpaddle_inference")
13+
14+
include(cmake/yaml-cpp.cmake)
15+
include_directories("${CMAKE_SOURCE_DIR}/")
16+
include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
17+
link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
18+
endif()
19+
1020
if(NOT WITH_STATIC_LIB)
1121
add_definitions("-DPADDLE_WITH_SHARED_LIB")
1222
else()
@@ -77,37 +87,35 @@ endif()
7787
if(WITH_GPU)
7888
if(NOT WIN32)
7989
set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library")
80-
else()
81-
if(CUDA_LIB STREQUAL "")
82-
set(CUDA_LIB "C:\\Program\ Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v8.0\\lib\\x64")
83-
endif()
8490
endif(NOT WIN32)
8591
endif()
8692

87-
if (USE_TENSORRT AND WITH_GPU)
88-
set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library")
89-
if("${TENSORRT_ROOT}" STREQUAL "")
90-
message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ")
91-
endif()
92-
set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
93-
set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
94-
file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
95-
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
96-
"${TENSORRT_VERSION_FILE_CONTENTS}")
97-
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
98-
file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
99-
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
100-
"${TENSORRT_VERSION_FILE_CONTENTS}")
101-
endif()
102-
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
103-
message(SEND_ERROR "Failed to detect TensorRT version.")
104-
endif()
105-
string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
106-
TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
107-
message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
108-
"Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
109-
include_directories("${TENSORRT_INCLUDE_DIR}")
110-
link_directories("${TENSORRT_LIB_DIR}")
93+
if(NOT WIN32)
94+
if (USE_TENSORRT AND WITH_GPU)
95+
set(TENSORRT_ROOT "" CACHE STRING "The root directory of TensorRT library")
96+
if("${TENSORRT_ROOT}" STREQUAL "")
97+
message(FATAL_ERROR "The TENSORRT_ROOT is empty, you must assign it a value with CMake command. Such as: -DTENSORRT_ROOT=TENSORRT_ROOT_PATH ")
98+
endif()
99+
set(TENSORRT_INCLUDE_DIR ${TENSORRT_ROOT}/include)
100+
set(TENSORRT_LIB_DIR ${TENSORRT_ROOT}/lib)
101+
file(READ ${TENSORRT_INCLUDE_DIR}/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
102+
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
103+
"${TENSORRT_VERSION_FILE_CONTENTS}")
104+
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
105+
file(READ ${TENSORRT_INCLUDE_DIR}/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
106+
string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
107+
"${TENSORRT_VERSION_FILE_CONTENTS}")
108+
endif()
109+
if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
110+
message(SEND_ERROR "Failed to detect TensorRT version.")
111+
endif()
112+
string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
113+
TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
114+
message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
115+
"Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
116+
include_directories("${TENSORRT_INCLUDE_DIR}")
117+
link_directories("${TENSORRT_LIB_DIR}")
118+
endif()
111119
endif()
112120

113121
if(WITH_MKL)
@@ -125,9 +133,9 @@ if(WITH_MKL)
125133
include_directories("${MKLDNN_PATH}/include")
126134
if(WIN32)
127135
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
128-
else(WIN32)
136+
else()
129137
set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
130-
endif(WIN32)
138+
endif()
131139
endif()
132140
else()
133141
set(OPENBLAS_LIB_PATH "${PADDLE_LIB_THIRD_PARTY_PATH}openblas")
@@ -140,10 +148,14 @@ else()
140148
endif()
141149

142150
if(WITH_STATIC_LIB)
143-
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
151+
if(WIN32)
152+
set(DEPS ${PADDLE_LIB}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
153+
else()
154+
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
155+
endif()
144156
else()
145157
if(WIN32)
146-
set(DEPS ${PADDLE_LIB}/paddle/lib/paddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
158+
set(DEPS ${PADDLE_LIB}/paddle/lib/${PADDLE_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX})
147159
else()
148160
set(DEPS ${PADDLE_LIB}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
149161
endif()
@@ -158,8 +170,8 @@ if (NOT WIN32)
158170
else()
159171
set(DEPS ${DEPS}
160172
${MATH_LIB} ${MKLDNN_LIB}
161-
glog gflags_static libprotobuf xxhash cryptopp-static ${EXTERNAL_LIB})
162-
set(DEPS ${DEPS} shlwapi.lib)
173+
glog gflags_static libprotobuf xxhash cryptopp-static libyaml-cppmt ${EXTERNAL_LIB})
174+
set(DEPS ${DEPS} libcmt shlwapi)
163175
endif(NOT WIN32)
164176

165177
if(WITH_GPU)
@@ -169,13 +181,12 @@ if(WITH_GPU)
169181
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
170182
endif()
171183
set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
184+
set(DEPS ${DEPS} ${CUDA_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
172185
else()
173-
if(USE_TENSORRT)
174-
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
175-
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
176-
if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
177-
set(DEPS ${DEPS} ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX})
178-
endif()
186+
SET(CUDA_LIB "" CACHE PATH "Location of libraries")
187+
if (USE_TENSORRT)
188+
set(DEPS ${DEPS} ${CUDA_LIB}/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
189+
set(DEPS ${DEPS} ${CUDA_LIB}/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
179190
endif()
180191
set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
181192
set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
@@ -189,49 +200,71 @@ if(WITH_ROCM)
189200
endif()
190201
endif()
191202

192-
include_directories(/usr/local/include)
193-
link_directories(/usr/local/lib)
203+
if(NOT WIN32)
204+
include_directories(/usr/local/include)
205+
link_directories(/usr/local/lib)
194206

195-
find_package(yaml-cpp REQUIRED)
196-
include_directories(${YAML_CPP_INCLUDE_DIRS})
197-
link_directories(${YAML_CPP_LIBRARIES})
198-
set(DEPS ${DEPS} "-lyaml-cpp")
207+
find_package(yaml-cpp REQUIRED)
208+
include_directories(${YAML_CPP_INCLUDE_DIRS})
209+
link_directories(${YAML_CPP_LIBRARIES})
210+
set(DEPS ${DEPS} "-lyaml-cpp")
199211

200-
find_package(OpenCV REQUIRED)
201-
include_directories(${OpenCV_INCLUDE_DIRS})
202-
set(DEPS ${DEPS} ${OpenCV_LIBS})
212+
find_package(OpenCV REQUIRED)
213+
include_directories(${OpenCV_INCLUDE_DIRS})
214+
set(DEPS ${DEPS} ${OpenCV_LIBS})
215+
216+
add_executable(${DEMO_NAME} src/${DEMO_NAME}.cc)
217+
target_link_libraries(${DEMO_NAME} ${DEPS})
218+
else()
219+
include_directories("${PADDLE_LIB}/paddle/fluid/inference")
220+
include_directories("${PADDLE_LIB}/paddle/include")
221+
link_directories("${PADDLE_LIB}/paddle/fluid/inference")
222+
223+
SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
224+
find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
225+
include_directories(${OpenCV_INCLUDE_DIRS})
226+
set(DEPS ${DEPS} ${OpenCV_LIBS})
227+
228+
add_executable(${DEMO_NAME} src/${DEMO_NAME}.cc)
229+
ADD_DEPENDENCIES(${DEMO_NAME} ext-yaml-cpp)
230+
message("DEPS:" ${DEPS})
231+
target_link_libraries(${DEMO_NAME} ${DEPS})
232+
endif()
203233

204-
add_executable(${DEMO_NAME} src/${DEMO_NAME}.cc)
205-
target_link_libraries(${DEMO_NAME} ${DEPS})
234+
if(WIN32 AND USE_TENSORRT)
235+
SET(TENSORRT_DLL "" CACHE PATH "Location of TensorRT .dll")
236+
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
237+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${TENSORRT_DLL}/nvinfer.dll ./nvinfer.dll
238+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${TENSORRT_DLL}/nvinfer.dll ./release/nvinfer.dll
239+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${TENSORRT_DLL}/nvinfer_plugin.dll ./nvinfer_plugin.dll
240+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${TENSORRT_DLL}/nvinfer_plugin.dll ./release/nvinfer_plugin.dll
241+
)
242+
endif()
206243

207-
if(WIN32)
208-
if(USE_TENSORRT)
209-
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
210-
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer${CMAKE_SHARED_LIBRARY_SUFFIX}
211-
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
212-
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/nvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX}
213-
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
214-
)
215-
if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
216-
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
217-
COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_LIB_DIR}/myelin64_1${CMAKE_SHARED_LIBRARY_SUFFIX}
218-
${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE})
219-
endif()
220-
endif()
221-
if(WITH_MKL)
222-
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
223-
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/mklml.dll ${CMAKE_BINARY_DIR}/Release
224-
COMMAND ${CMAKE_COMMAND} -E copy ${MATH_LIB_PATH}/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/Release
225-
COMMAND ${CMAKE_COMMAND} -E copy ${MKLDNN_PATH}/lib/mkldnn.dll ${CMAKE_BINARY_DIR}/Release
226-
)
227-
else()
244+
if(WIN32 AND WITH_MKL)
245+
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
246+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
247+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
248+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
249+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
250+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
251+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
252+
)
253+
endif()
254+
255+
if(WIN32 AND NOT WITH_MKL)
256+
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
257+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/openblas/lib/openblas.dll ./openblas.dll
258+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/openblas/lib/openblas.dll ./release/openblas.dll
259+
)
260+
endif()
261+
262+
if (WIN32)
228263
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
229-
COMMAND ${CMAKE_COMMAND} -E copy ${OPENBLAS_LIB_PATH}/lib/openblas.dll ${CMAKE_BINARY_DIR}/Release
264+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/onnxruntime/lib/onnxruntime.dll ./onnxruntime.dll
265+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/paddle2onnx/lib/paddle2onnx.dll ./paddle2onnx.dll
266+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/onnxruntime/lib/onnxruntime.dll ./release/onnxruntime.dll
267+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/third_party/install/paddle2onnx/lib/paddle2onnx.dll ./release/paddle2onnx.dll
268+
COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_LIB}/paddle/lib/${PADDLE_LIB_NAME}.dll ./release/${PADDLE_LIB_NAME}.dll
230269
)
231-
endif()
232-
if(NOT WITH_STATIC_LIB)
233-
add_custom_command(TARGET ${DEMO_NAME} POST_BUILD
234-
COMMAND ${CMAKE_COMMAND} -E copy "${PADDLE_LIB}/paddle/lib/paddle_inference.dll" ${CMAKE_BINARY_DIR}/${CMAKE_BUILD_TYPE}
235-
)
236-
endif()
237270
endif()

deploy/cpp/cmake/yaml-cpp.cmake

+30
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
2+
find_package(Git REQUIRED)
3+
4+
include(ExternalProject)
5+
6+
message("${CMAKE_BUILD_TYPE}")
7+
8+
ExternalProject_Add(
9+
ext-yaml-cpp
10+
URL https://bj.bcebos.com/paddlex/deploy/deps/yaml-cpp.zip
11+
URL_MD5 9542d6de397d1fbd649ed468cb5850e6
12+
CMAKE_ARGS
13+
-DYAML_CPP_BUILD_TESTS=OFF
14+
-DYAML_CPP_BUILD_TOOLS=OFF
15+
-DYAML_CPP_INSTALL=OFF
16+
-DYAML_CPP_BUILD_CONTRIB=OFF
17+
-DMSVC_SHARED_RT=OFF
18+
-DBUILD_SHARED_LIBS=OFF
19+
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
20+
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
21+
-DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
22+
-DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
23+
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
24+
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=${CMAKE_BINARY_DIR}/ext/yaml-cpp/lib
25+
PREFIX "${CMAKE_BINARY_DIR}/ext/yaml-cpp"
26+
# Disable install step
27+
INSTALL_COMMAND ""
28+
LOG_DOWNLOAD ON
29+
LOG_BUILD 1
30+
)
+28-80
Original file line numberDiff line numberDiff line change
@@ -1,80 +1,28 @@
1-
English | [简体中文](cpp_inference_cn.md)
2-
# Paddle Inference Deployment (C++)
3-
4-
## 1. Description
5-
6-
This document introduces an example of deploying a segmentation model on a Linux server (NV GPU or X86 CPU) using Paddle Inference's C++ interface. The main steps include:
7-
* Prepare the environment
8-
* Prepare models and pictures
9-
* Compile and execute
10-
11-
PaddlePaddle provides multiple prediction engine deployment models (as shown in the figure below) for different scenarios. For details, please refer to [document](https://paddleinference.paddlepaddle.org.cn/product_introduction/summary.html).
12-
13-
![inference_ecosystem](https://user-images.githubusercontent.com/52520497/130720374-26947102-93ec-41e2-8207-38081dcc27aa.png)
14-
15-
## 2. Prepare the environment
16-
17-
### Prepare Paddle Inference C++ prediction library
18-
19-
You can download the Paddle Inference C++ prediction library from [link](https://www.paddlepaddle.org.cn/inference/v2.3/user_guides/download_lib.html).
20-
21-
Pay attention to select the exact version according to the machine's CUDA version, cudnn version, using MKLDNN or OpenBlas, whether to use TenorRT and other information. It is recommended to choose a prediction library with version >= 2.0.1.
22-
23-
Download the `paddle_inference.tgz` compressed file and decompress it, and save the decompressed paddle_inference file to `PaddleSeg/deploy/cpp/`.
24-
25-
If you need to compile the Paddle Inference C++ prediction library, you can refer to the [document](https://www.paddlepaddle.org.cn/inference/v2.3/user_guides/source_compile.html), which will not be repeated here.
26-
27-
### Prepare OpenCV
28-
29-
This example uses OpenCV to read images, so OpenCV needs to be prepared.
30-
31-
Run the following commands to download, compile, and install OpenCV.
32-
````
33-
sh install_opencv.sh
34-
````
35-
36-
### Install Yaml, Gflags and Glog
37-
38-
This example uses Yaml, Gflags and Glog.
39-
40-
Run the following commands to download, compile, and install these libs.
41-
42-
````
43-
sh install_yaml.sh
44-
sh install_gflags.sh
45-
sh install_glog.sh
46-
````
47-
48-
## 3. Prepare models and pictures
49-
50-
Execute the following command in the `PaddleSeg/deploy/cpp/` directory to download the [test model](https://paddleseg.bj.bcebos.com/dygraph/demo/pp_liteseg_infer_model.tar.gz) for testing. If you need to test other models, please refer to [documentation](../../model_export.md) to export the prediction model.
51-
52-
````
53-
wget https://paddleseg.bj.bcebos.com/dygraph/demo/pp_liteseg_infer_model.tar.gz
54-
tar xf pp_liteseg_infer_model.tar.gz
55-
````
56-
57-
Download one [image](https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png) from the validation set of cityscapes.
58-
59-
````
60-
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
61-
````
62-
63-
## 4. Compile and execute
64-
65-
Please check that `PaddleSeg/deploy/cpp/` stores prediction libraries, models, and pictures, as follows.
66-
67-
````
68-
PaddleSeg/deploy/cpp
69-
|-- paddle_inference # prediction library
70-
|-- pp_liteseg_infer_model # model
71-
|-- cityscapes_demo.png # image
72-
````
73-
74-
Execute `sh run_seg_cpu.sh`, it will compile and then perform prediction on X86 CPU.
75-
76-
Execute `sh run_seg_gpu.sh`, it will compile and then perform prediction on Nvidia GPU.
77-
78-
The segmentation result will be saved in the "out_img.jpg" image in the current directory, as shown below. Note that this image is using histogram equalization for easy visualization.
79-
80-
![out_img](https://user-images.githubusercontent.com/52520497/131456277-260352b5-4047-46d5-a38f-c50bbcfb6fd0.jpg)
1+
English | [简体中文](cpp_inference_cn.md)
2+
3+
# C++ prediction deployment overview
4+
5+
### 1. Compilation and deployment tutorial for different environment
6+
7+
* [Compilation and deployment on Linux](cpp_inference_linux.md)
8+
* [Compilation and deployment on Windows](cpp_inference_windows.md)
9+
10+
### 2. Illustration
11+
`PaddleSeg/deploy/cpp` provides users with a cross-platform C++deployment scheme. After exporting the PaddleSeg training model, users can quickly run based on the project, or quickly integrate the code into their own project application.
12+
The main design objectives include the following two points:
13+
14+
* Cross-platform, supporting compilation, secondary development integration and deployment on Windows and Linux
15+
* Extensibility, supporting users to develop their own special data preprocessing and other logic for the new model
16+
17+
The main directory and documents are described as follows:
18+
```
19+
deploy/cpp
20+
|
21+
├── cmake # Dependent external project cmake (currently only yaml-cpp)
22+
23+
├── src ── test_seg.cc # Sample code file
24+
25+
├── CMakeList.txt # Cmake compilation entry file
26+
27+
└── *.sh # Install related packages or run sample scripts under Linux
28+
```

0 commit comments

Comments
 (0)