@@ -68,27 +68,39 @@ def chdir_example(monkeypatch):
68
68
@pytest .mark .skipif (PT_2_AVAILABLE == False , reason = "torch version is < 2.0.0" )
69
69
class TestTorchCompile :
70
70
def teardown_class (self ):
71
- subprocess .run ("torchserve --stop" , shell = True , check = True )
71
+ subprocess .run ([ "torchserve" , " --stop"] , check = True )
72
72
time .sleep (10 )
73
73
74
74
def test_archive_model_artifacts (self ):
75
75
assert len (glob .glob (MODEL_FILE )) == 1
76
76
assert len (glob .glob (YAML_CONFIG_STR )) == 1
77
77
assert len (glob .glob (YAML_CONFIG_DICT )) == 1
78
- subprocess .run (f"cd { TEST_DATA_DIR } && python model.py", shell = True , check = True )
79
- subprocess . run ( f"mkdir -p { MODEL_STORE_DIR } " , shell = True , check = True )
78
+ subprocess .run ([ " python" , " model.py"], cwd = TEST_DATA_DIR , check = True )
79
+ os . makedirs ( MODEL_STORE_DIR , exist_ok = True )
80
80
81
81
# register 2 models, one with the backend as str config, the other with the kwargs as dict config
82
- subprocess .run (
83
- f"torch-model-archiver --model-name { MODEL_NAME } _str --version 1.0 --model-file { MODEL_FILE } --serialized-file { SERIALIZED_FILE } --config-file { YAML_CONFIG_STR } --export-path { MODEL_STORE_DIR } --handler { HANDLER_FILE } -f" ,
84
- shell = True ,
85
- check = True ,
86
- )
87
- subprocess .run (
88
- f"torch-model-archiver --model-name { MODEL_NAME } _dict --version 1.0 --model-file { MODEL_FILE } --serialized-file { SERIALIZED_FILE } --config-file { YAML_CONFIG_DICT } --export-path { MODEL_STORE_DIR } --handler { HANDLER_FILE } -f" ,
89
- shell = True ,
90
- check = True ,
91
- )
82
+ subprocess .run ([
83
+ "torch-model-archiver" ,
84
+ "--model-name" , f"{ MODEL_NAME } _str" ,
85
+ "--version" , "1.0" ,
86
+ "--model-file" , MODEL_FILE ,
87
+ "--serialized-file" , SERIALIZED_FILE ,
88
+ "--config-file" , YAML_CONFIG_STR ,
89
+ "--export-path" , MODEL_STORE_DIR ,
90
+ "--handler" , HANDLER_FILE ,
91
+ "-f"
92
+ ], check = True )
93
+ subprocess .run ([
94
+ "torch-model-archiver" ,
95
+ "--model-name" , f"{ MODEL_NAME } _dict" ,
96
+ "--version" , "1.0" ,
97
+ "--model-file" , MODEL_FILE ,
98
+ "--serialized-file" , SERIALIZED_FILE ,
99
+ "--config-file" , YAML_CONFIG_DICT ,
100
+ "--export-path" , MODEL_STORE_DIR ,
101
+ "--handler" , HANDLER_FILE ,
102
+ "-f"
103
+ ], check = True )
92
104
assert len (glob .glob (SERIALIZED_FILE )) == 1
93
105
assert (
94
106
len (glob .glob (os .path .join (MODEL_STORE_DIR , f"{ MODEL_NAME } _str.mar" ))) == 1
@@ -98,12 +110,16 @@ def test_archive_model_artifacts(self):
98
110
)
99
111
100
112
def test_start_torchserve (self ):
101
- cmd = f"torchserve --start --ncs --models { MODEL_NAME } _str.mar,{ MODEL_NAME } _dict.mar --model-store { MODEL_STORE_DIR } --enable-model-api --disable-token-auth"
102
- subprocess .run (
103
- cmd ,
104
- shell = True ,
105
- check = True ,
106
- )
113
+ command = [
114
+ "torchserve" ,
115
+ "--start" ,
116
+ "--ncs" ,
117
+ "--models" , f"{ MODEL_NAME } _str.mar,{ MODEL_NAME } _dict.mar" ,
118
+ "--model-store" , MODEL_STORE_DIR ,
119
+ "--enable-model-api" ,
120
+ "--disable-token-auth"
121
+ ]
122
+ subprocess .run (command , check = True )
107
123
time .sleep (10 )
108
124
assert len (glob .glob ("logs/access_log.log" )) == 1
109
125
assert len (glob .glob ("logs/model_log.log" )) == 1
@@ -114,12 +130,7 @@ def test_start_torchserve(self):
114
130
reason = "Test to be run outside docker" ,
115
131
)
116
132
def test_server_status (self ):
117
- result = subprocess .run (
118
- "curl http://localhost:8080/ping" ,
119
- shell = True ,
120
- capture_output = True ,
121
- check = True ,
122
- )
133
+ result = subprocess .run (["curl" , "http://localhost:8080/ping" ], capture_output = True , check = True )
123
134
expected_server_status_str = '{"status": "Healthy"}'
124
135
expected_server_status = json .loads (expected_server_status_str )
125
136
assert json .loads (result .stdout ) == expected_server_status
@@ -129,12 +140,7 @@ def test_server_status(self):
129
140
reason = "Test to be run outside docker" ,
130
141
)
131
142
def test_registered_model (self ):
132
- result = subprocess .run (
133
- "curl http://localhost:8081/models" ,
134
- shell = True ,
135
- capture_output = True ,
136
- check = True ,
137
- )
143
+ result = subprocess .run (["curl" , "http://localhost:8081/models" ], capture_output = True , check = True )
138
144
139
145
def _response_to_tuples (response_str ):
140
146
models = json .loads (response_str )["models" ]
@@ -155,13 +161,15 @@ def test_serve_inference(self):
155
161
request_json = json .dumps (request_data )
156
162
157
163
for model_name in [f"{ MODEL_NAME } _str" , f"{ MODEL_NAME } _dict" ]:
158
- result = subprocess .run (
159
- f"curl -s -X POST -H \" Content-Type: application/json;\" http://localhost:8080/predictions/{ model_name } -d '{ request_json } '" ,
160
- shell = True ,
161
- capture_output = True ,
162
- check = True ,
163
- )
164
-
164
+ command = [
165
+ "curl" ,
166
+ "-s" ,
167
+ "-X" , "POST" ,
168
+ "-H" , "Content-Type: application/json" ,
169
+ f"http://localhost:8080/predictions/{ model_name } " ,
170
+ "-d" , request_json
171
+ ]
172
+ result = subprocess .run (command , capture_output = True , check = True )
165
173
string_result = result .stdout .decode ("utf-8" )
166
174
float_result = float (string_result )
167
175
expected_result = 3.5
0 commit comments