Skip to content

Commit 96946b5

Browse files
[TVM] Relax API (#576)
Добавлена поддержка Relax API для Apache TVM.
1 parent 9c2326a commit 96946b5

30 files changed

+249
-168
lines changed

demo/benchmark_configs/TVM.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
<ChannelSwap></ChannelSwap>
3131
<Target>llvm</Target>
3232
<Layout>NCHW</Layout>
33-
<VirtualMachine>False</VirtualMachine>
33+
<HighLevelAPI>Relay</HighLevelAPI>
3434
<OptimizationLevel>3</OptimizationLevel>
3535
</FrameworkDependent>
3636
</Test>

docker/TVM/Dockerfile

Lines changed: 32 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -5,37 +5,60 @@ WORKDIR /root/
55
# Installing miniconda
66
RUN wget -q --no-check-certificate -c https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
77
bash Miniconda3-latest-Linux-x86_64.sh -b && \
8-
./miniconda3/bin/conda create -n tvm-env -y python=3.7.16 && \
8+
./miniconda3/bin/conda create -n tvm-env -y python=3.8.20 && \
99
rm -rf /root/miniconda3/pkgs/* && \
1010
rm ~/Miniconda3-latest-Linux-x86_64.sh -f
11-
11+
1212
ENV PATH /root/miniconda3/envs/tvm-env/bin:/root/miniconda3/bin:$PATH
1313
RUN echo "source activate tvm-env" > ~/.bashrc
1414
RUN export LD_LIBRARY_PATH=/root/miniconda3/envs/tvm-env/lib:${LD_LIBRARY_PATH}
1515
RUN conda config --add channels intel
1616

1717
# Installing dependencies
18-
RUN python3 -m pip install pycocotools docker PyYAML
18+
RUN python3 -m pip install pycocotools docker PyYAML gluoncv[full] opencv-python Cython psutil
1919
RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6
20-
RUN python3 -m pip install gluoncv[full]
21-
RUN python3 -m pip install opencv-python
22-
20+
RUN apt-get update && apt-get install -y -qq --no-install-recommends cmake software-properties-common
21+
RUN apt-add-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-19 main" && \
22+
wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add -
23+
RUN apt-get update && \
24+
apt-get install -y -qq --no-install-recommends llvm-19 llvm-19-dev && \
25+
rm -rf /var/lib/apt/lists/*
26+
2327
# Installing Apache-TVM
24-
RUN python3 -m pip install apache-tvm==0.14.dev264
28+
ARG TVM_VERSION=v0.20.0
29+
30+
RUN git clone --recursive https://github.com/apache/tvm tvm --branch ${TVM_VERSION} --single-branch
31+
ENV TVM_BUILD_DIR=/tmp/build-tvm
32+
RUN mkdir $TVM_BUILD_DIR && cd $TVM_BUILD_DIR
33+
RUN cp /root/tvm/cmake/config.cmake .
34+
RUN echo "set(CMAKE_BUILD_TYPE RelWithDebInfo)" >> config.cmake
35+
RUN echo "set(USE_LLVM /usr/bin/llvm-config-19)" >> config.cmake
36+
RUN echo "set(HIDE_PRIVATE_SYMBOLS ON)" >> config.cmake
37+
RUN echo "set(USE_CUDA OFF)" >> config.cmake
38+
RUN echo "set(USE_METAL OFF)" >> config.cmake
39+
RUN echo "set(USE_VULKAN OFF)" >> config.cmake
40+
RUN echo "set(USE_OPENCL OFF)" >> config.cmake
41+
RUN echo "set(USE_CUBLAS OFF)" >> config.cmake
42+
RUN echo "set(USE_CUDNN OFF)" >> config.cmake
43+
RUN echo "set(USE_CUTLASS OFF)" >> config.cmake
44+
45+
RUN /bin/bash -c 'cmake /root/tvm/ && cmake --build . -- -j$(nproc --all)'
46+
47+
WORKDIR /root/
48+
RUN export TVM_LIBRARY_PATH=$TVM_BUILD_DIR
49+
RUN python3 -m pip install -e /root/tvm/python
2550

2651
# ARG
2752
ARG TORCH_VERSION=2.0.1
2853
ARG TORCHVISION_VERSION=0.15.2
2954
ARG TFLite_VERSION=2.14.0
3055
ARG MXNET_VERSION=1.9.1
3156
ARG ONNX_VERSION=1.15.0
32-
ARG CAFFE_VERSION=1.1.0
3357

3458
ARG PyTorch
3559
ARG TFLite
3660
ARG MXNet
3761
ARG ONNX
38-
ARG CAFFE
3962

4063
RUN if [ "${PyTorch}" = "true" ]; then \
4164
python3 -m pip install torch==${TORCH_VERSION} torchvision==${TORCHVISION_VERSION}; \
@@ -65,14 +88,6 @@ RUN if [ "$ONNX" = "true" ]; then \
6588
python3 -m pip install onnxruntime==${ONNX_VERSION} && python3 -m pip install onnx==${ONNX_VERSION}; \
6689
fi
6790

68-
RUN if [ "$CAFFE" = "true" ]; then \
69-
/bin/bash -c "conda install -n tvm-env -y -c intel openvino-ie4py-ubuntu20=2022.1.0"; \
70-
/bin/bash -c "conda install -n tvm-env -y -c intel scikit-image=0.17.2"; \
71-
/bin/bash -c "conda install -n tvm-env -y -c intel caffe"; \
72-
/bin/bash -c "conda install -n tvm-env -y -c anaconda libgcc-ng=11.2.0"; \
73-
/bin/bash -c "conda install -n tvm-env -y -c defaults protobuf=3.17.2 libprotobuf=3.17.2 requests=2.27.1"; \
74-
fi
75-
7691
WORKDIR /tmp/open_model_zoo/
7792
RUN git remote add omz_custom_tvm https://github.com/itlab-vision/open_model_zoo_tvm.git && \
7893
git fetch omz_custom_tvm && \

src/benchmark/frameworks/tvm/tvm_parameters_parser.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def parse_parameters(self, curr_test):
1515
CONFIG_FRAMEWORK_DEPENDENT_STD_TAG = 'Std'
1616
CONFIG_FRAMEWORK_DEPENDENT_CHANNEL_SWAP_TAG = 'ChannelSwap'
1717
CONFIG_FRAMEWORK_DEPENDENT_LAYOUT_TAG = 'Layout'
18-
CONFIG_FRAMEWORK_DEPENDENT_VIRTUAL_MACHINE = 'VirtualMachine'
18+
CONFIG_FRAMEWORK_DEPENDENT_HIGH_LEVEL_API = 'HighLevelAPI'
1919

2020
dep_parameters_tag = curr_test.getElementsByTagName(CONFIG_FRAMEWORK_DEPENDENT_TAG)[0]
2121

@@ -39,8 +39,8 @@ def parse_parameters(self, curr_test):
3939
CONFIG_FRAMEWORK_DEPENDENT_LAYOUT_TAG)[0].firstChild
4040
_target = dep_parameters_tag.getElementsByTagName(
4141
CONFIG_FRAMEWORK_DEPENDENT_TARGET)[0].firstChild
42-
_vm = dep_parameters_tag.getElementsByTagName(
43-
CONFIG_FRAMEWORK_DEPENDENT_VIRTUAL_MACHINE)[0].firstChild
42+
_high_level_api = dep_parameters_tag.getElementsByTagName(
43+
CONFIG_FRAMEWORK_DEPENDENT_HIGH_LEVEL_API)[0].firstChild
4444

4545
return TVMParameters(
4646
framework=_framework.data if _framework else None,
@@ -53,14 +53,15 @@ def parse_parameters(self, curr_test):
5353
optimization_level=_optimization_level.data if _optimization_level else None,
5454
layout=_layout.data if _layout else None,
5555
target=_target.data if _target else None,
56-
vm=_vm.data if _vm else None,
56+
high_level_api=_high_level_api.data if _high_level_api else None,
5757
)
5858

5959

6060
class TVMParameters(FrameworkParameters):
6161
def __init__(self, framework, input_name, input_shape,
6262
normalize, mean, std, channel_swap,
63-
optimization_level, layout, target, vm):
63+
optimization_level, layout, target,
64+
high_level_api):
6465
self.framework = None
6566
self.input_name = None
6667
self.input_shape = None
@@ -71,7 +72,7 @@ def __init__(self, framework, input_name, input_shape,
7172
self.optimization_level = None
7273
self.layout = None
7374
self.target = 'llvm'
74-
self.vm = None
75+
self.high_level_api = None
7576

7677
if self._framework_is_correct(framework):
7778
self.framework = framework
@@ -93,8 +94,8 @@ def __init__(self, framework, input_name, input_shape,
9394
self.layout = layout
9495
if self._parameter_is_not_none(target):
9596
self.target = target
96-
if self._parameter_is_not_none(vm):
97-
self.vm = vm
97+
if self._parameter_is_not_none(high_level_api):
98+
self.high_level_api = high_level_api
9899

99100
@staticmethod
100101
def _framework_is_correct(framework):

src/benchmark/frameworks/tvm/tvm_process.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -57,11 +57,6 @@ def _fill_command_line(self):
5757
common_params = TVMProcess._add_flag_to_cmd_line(
5858
common_params, '--norm')
5959

60-
vm = self._test.dep_parameters.vm
61-
if vm == 'True':
62-
common_params = TVMProcess._add_flag_to_cmd_line(
63-
common_params, '-vm')
64-
6560
mean = self._test.dep_parameters.mean
6661
common_params = TVMProcess._add_optional_argument_to_cmd_line(
6762
common_params, '--mean', mean)
@@ -86,6 +81,10 @@ def _fill_command_line(self):
8681
common_params = TVMProcess._add_optional_argument_to_cmd_line(
8782
common_params, '--target', target)
8883

84+
high_level_api = self._test.dep_parameters.high_level_api
85+
common_params = TVMProcess._add_optional_argument_to_cmd_line(
86+
common_params, '--high_level_api', high_level_api)
87+
8988
return f'{common_params}'
9089

9190

src/configs/README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,6 +354,7 @@
354354
изображения.
355355
- `ChannelSwap` - тег, необязательный для заполнения. Описывает изменение порядка каналов на
356356
входном изображении. По умолчанию будет установлен порядок (2, 0, 1), что соответствует BGR.
357+
- `HighLevelAPI` - тег, необязательный для заполнения. Определяет используемое высокоуровневое API: `Relay`, `RelayVM` или `RelaxVM`. По умолчанию задается значение `Relay`.
357358
- `OptimizationLevel` - тег, необязательный для заполнения. Определяет уровень оптимизаций для
358359
графа вычислений, которые ускоряют инференс. По умолчанию оптимизации не применяются.
359360
- `Framework` - тег, обязательный для заполнения. Определяет фреймворк, модели которого будут
@@ -766,7 +767,7 @@
766767
<ChannelSwap></ChannelSwap>
767768
<Layout>NCHW</Layout>
768769
<Target>llvm</Target>
769-
<VirtualMachine>True</VirtualMachine>
770+
<HighLevelAPI>RelaxVM</HighLevelAPI>
770771
<OptimizationLevel>3</OptimizationLevel>
771772
</FrameworkDependent>
772773
</Test>

src/configs/benchmark_configuration_file_template.xml

Lines changed: 1 addition & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -446,41 +446,7 @@
446446
<Std></Std>
447447
<Layout></Layout>
448448
<ChannelSwap></ChannelSwap>
449-
<VirtualMachine></VirtualMachine>
450-
<Target></Target>
451-
<OptimizationLevel></OptimizationLevel>
452-
</FrameworkDependent>
453-
</Test>
454-
<Test>
455-
<Model>
456-
<Task></Task>
457-
<Name></Name>
458-
<Precision></Precision>
459-
<SourceFramework>TVM</SourceFramework>
460-
<ModelPath></ModelPath>
461-
<WeightsPath></WeightsPath>
462-
</Model>
463-
<Dataset>
464-
<Name></Name>
465-
<Path></Path>
466-
</Dataset>
467-
<FrameworkIndependent>
468-
<InferenceFramework>TVM</InferenceFramework>
469-
<BatchSize></BatchSize>
470-
<Device></Device>
471-
<IterationCount></IterationCount>
472-
<TestTimeLimit></TestTimeLimit>
473-
</FrameworkIndependent>
474-
<FrameworkDependent>
475-
<InputName></InputName>
476-
<InputShape></InputShape>
477-
<Framework></Framework> <!--Принимает значения: caffe, onnx, pytorch, mxnet, tvm, tflite-->
478-
<Normalize></Normalize>
479-
<Mean></Mean>
480-
<Std></Std>
481-
<Layout></Layout>
482-
<ChannelSwap></ChannelSwap>
483-
<VirtualMachine></VirtualMachine>
449+
<HighLevelAPI></HighLevelAPI>
484450
<Target></Target>
485451
<OptimizationLevel></OptimizationLevel>
486452
</FrameworkDependent>

src/inference/README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -956,6 +956,7 @@ inference_tvm.py
956956
ImageNet.
957957
- `-ni / --number_iter` - количество прямых проходов по сети.
958958
По умолчанию выполняется один проход по сети.
959+
- `--high_level_api` - высокоуровневое API: `Relay`, `RelayVM` или `RelaxVM`. По умолчанию `Relay`.
959960
- `-ol / --opt_level` - параметр, определяющий уровень оптимизации
960961
графа вычислений нейронной сети для ускорения инференса. По умолчанию
961962
оптимизации не применяются.

src/inference/inference_tvm.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -166,10 +166,12 @@ def cli_argument_parser():
166166
default=0.5,
167167
type=float,
168168
dest='threshold')
169-
parser.add_argument('-vm', '--virtual_machine',
170-
help='Flag to use VirtualMachine API',
171-
action='store_true',
172-
dest='vm')
169+
parser.add_argument('--high_level_api',
170+
help='Type of high level API',
171+
choices=['Relay', 'RelayVM', 'RelaxVM'],
172+
default='Relay',
173+
type=str,
174+
dest='high_level_api')
173175
parser.add_argument('--raw_output',
174176
help='Raw output without logs.',
175177
default=False,
@@ -209,8 +211,7 @@ def main():
209211
args.input_name,
210212
io.get_slice_input,
211213
args.time,
212-
args.vm)
213-
214+
args.high_level_api)
214215
if not args.raw_output:
215216
if args.number_iter == 1:
216217
try:

src/inference/tvm_auxiliary.py

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,13 @@ def _infer_slice(self, input_name, module, slice_input):
2222
pass
2323

2424
@staticmethod
25-
def get_helper(vm):
26-
if vm:
27-
return InferenceVMApi()
28-
else:
25+
def get_helper(high_level_api):
26+
if high_level_api == 'Relay':
2927
return InferenceRelayAPI()
28+
elif high_level_api == 'RelayVM':
29+
return InferenceRelayVMApi()
30+
elif high_level_api == 'RelaxVM':
31+
return InferenceRelaxVMApi()
3032

3133
@abc.abstractmethod
3234
def _inference_tvm(self, module, input_name, slice_input):
@@ -77,22 +79,37 @@ def _inference_tvm(self, module, input_name, slice_input):
7779
return [module.get_output(i) for i in range(num_of_outputs)]
7880

7981

80-
class InferenceVMApi(InferenceHelper):
82+
class InferenceRelayVMApi(InferenceHelper):
8183
def __init__(self):
8284
super().__init__()
8385

8486
def _infer_slice(self, input_name, module, slice_input):
8587
module.set_input('main', slice_input[input_name])
8688
module.run()
87-
res = module.get_outputs()
88-
return res
89+
return module.get_outputs()
8990

9091
def _inference_tvm(self, module, input_name, slice_input):
9192
module.set_input('main', slice_input[input_name])
9293
module.run()
9394
return module.get_outputs()
9495

9596

97+
class InferenceRelaxVMApi(InferenceHelper):
98+
def __init__(self):
99+
super().__init__()
100+
101+
def _infer_slice(self, input_name, module, slice_input):
102+
module.set_input('main', slice_input[input_name])
103+
module.invoke_stateful('main')
104+
return module.get_outputs('main')
105+
106+
def _inference_tvm(self, module, input_name, slice_input):
107+
import tvm
108+
module.set_input('main', slice_input[input_name])
109+
module.invoke_stateful('main')
110+
return [tvm.nd.array(module.get_outputs('main'))]
111+
112+
96113
class OutputPreparer:
97114
def __init__(self, source_framework):
98115
self.source_framework = source_framework
@@ -163,7 +180,7 @@ def create_dict_for_converter(args):
163180
'opt_level': args.opt_level,
164181
'target': args.target,
165182
'module': args.module,
166-
'vm': args.vm,
183+
'high_level_api': args.high_level_api,
167184
'source_framework': args.source_framework,
168185
}
169186
return dictionary
@@ -199,8 +216,8 @@ def create_dict_for_output_preparer(args):
199216
return dictionary
200217

201218

202-
def inference_tvm(module, num_of_iterations, input_name, get_slice, test_duration, vm):
203-
inference_helper = InferenceHelper.get_helper(vm)
219+
def inference_tvm(module, num_of_iterations, input_name, get_slice, test_duration, high_level_api):
220+
inference_helper = InferenceHelper.get_helper(high_level_api)
204221
return inference_helper.inference_tvm(module, num_of_iterations, input_name, get_slice, test_duration)
205222

206223

src/model_converters/tvm_converter/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ tvm_compiler.py --mod <model> \
6868
--params <parameters> \
6969
--target <target> \
7070
--opt_level <opt_level> \
71-
--virtual_machine <virtual_machine> \
71+
--high_level_api <high_level_api> \
7272
--lib_name <lib_name> \
7373
--output_dir <output_dir>
7474
```
@@ -82,7 +82,7 @@ for the Relay API or to the `.so`+`.ro` format for the VirtualMachine API.
8282
- `-p / --params` is a path to an `.params` file with a model parameters.
8383
- `-t / --target` is target device information, for example `llvm` for CPU.
8484
- `--opt_level` is the optimization level of the task extractions.
85-
- `-vm / --virtual_machine` is a flag to use the VirtualMachine API.
85+
- `--high_level_api` is a high level API: `Relay`, `RelayVM`, `RelaxVM`.
8686
- `--lib_name` is a file name to save compiled model.
8787
- `-op / --output_dir` is a path to save the model.
8888

src/model_converters/tvm_converter/tvm_auxiliary/caffe_format.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,11 @@ def _convert_model_from_framework(self):
2323

2424
with open(self.model_params, 'rb') as f:
2525
init_net.ParseFromString(f.read())
26-
27-
model, params = self.tvm.relay.frontend.from_caffe(init_net,
28-
predict_net,
29-
shape_dict,
30-
dtype_dict)
31-
return model, params
26+
if self.high_level_api in ['Relay', 'RelayVM']:
27+
model, params = self.tvm.relay.frontend.from_caffe(init_net,
28+
predict_net,
29+
shape_dict,
30+
dtype_dict)
31+
return model, params
32+
else:
33+
raise ValueError(f'API {self.high_level_api} is not supported')

0 commit comments

Comments
 (0)