1
1
import subprocess
2
2
3
- import pytest
4
3
import torch
5
-
6
- try :
7
- import onnx
8
- import torch .onnx
9
-
10
- print (
11
- onnx .__version__
12
- ) # Adding this so onnx import doesn't get removed by pre-commit
13
- ONNX_ENABLED = True
14
- except :
15
- ONNX_ENABLED = False
4
+ import torch .onnx
16
5
17
6
18
7
class ToyModel (torch .nn .Module ):
@@ -28,7 +17,6 @@ def forward(self, x):
28
17
29
18
30
19
# For a custom model you still need to manually author your converter, as far as I can tell there isn't a nice out of the box that exists
31
- @pytest .mark .skipif (ONNX_ENABLED == False , reason = "ONNX is not installed" )
32
20
def test_convert_to_onnx ():
33
21
model = ToyModel ()
34
22
dummy_input = torch .randn (1 , 1 )
@@ -55,7 +43,6 @@ def test_convert_to_onnx():
55
43
)
56
44
57
45
58
- @pytest .mark .skipif (ONNX_ENABLED == False , reason = "ONNX is not installed" )
59
46
def test_model_packaging_and_start ():
60
47
subprocess .run ("mkdir model_store" , shell = True )
61
48
subprocess .run (
@@ -65,7 +52,6 @@ def test_model_packaging_and_start():
65
52
)
66
53
67
54
68
- @pytest .mark .skipif (ONNX_ENABLED == False , reason = "ONNX is not installed" )
69
55
def test_model_start ():
70
56
subprocess .run (
71
57
"torchserve --start --ncs --model-store model_store --models onnx.mar" ,
@@ -74,14 +60,12 @@ def test_model_start():
74
60
)
75
61
76
62
77
- @pytest .mark .skipif (ONNX_ENABLED == False , reason = "ONNX is not installed" )
78
63
def test_inference ():
79
64
subprocess .run (
80
65
"curl -X POST http://127.0.0.1:8080/predictions/onnx --data-binary '1'" ,
81
66
shell = True ,
82
67
)
83
68
84
69
85
- @pytest .mark .skipif (ONNX_ENABLED == False , reason = "ONNX is not installed" )
86
70
def test_stop ():
87
71
subprocess .run ("torchserve --stop" , shell = True , check = True )
0 commit comments