Skip to content

Commit 187ed53

Browse files
author
ChengyuZhu6
committed
security-fix: resolve security issue with starting a process with a shell
- Issue: Fixed [B605:start_process_with_a_shell] identified by Bandit, which flagged starting a process with a shell as a high-severity security risk (CWE-78: OS Command Injection). - Replaced os.system call with a safer alternative to prevent shell injection vulnerabilities. Signed-off-by: ChengyuZhu6 <[email protected]>
1 parent 155fc18 commit 187ed53

20 files changed

+588
-258
lines changed

binaries/build.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
import argparse
22
import glob
33
import os
4+
import shlex
5+
import subprocess
46
import sys
57

68
# To help discover local modules
@@ -49,10 +51,12 @@ def build_dist_whl(args):
4951
print(f"## In directory: {os.getcwd()} | Executing command: {cur_wheel_cmd}")
5052

5153
if not args.dry_run:
52-
build_exit_code = os.system(cur_wheel_cmd)
53-
# If any one of the steps fail, exit with error
54-
if build_exit_code != 0:
55-
sys.exit(f"## {binary} build Failed !")
54+
try:
55+
cur_wheel_cmd_list = shlex.split(cur_wheel_cmd)
56+
subprocess.run(cur_wheel_cmd_list, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
57+
except subprocess.CalledProcessError as e:
58+
print(f"## {binary} build Failed! Error: {e.stderr.decode()}")
59+
sys.exit(1)
5660

5761

5862
def build(args):

binaries/conda/build_packages.py

+18-22
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,16 @@
11
import argparse
22
import os
3+
import subprocess
34
from datetime import date
45

5-
from ts_scripts.utils import try_and_handle
6+
from ts_scripts.utils import try_and_handle, find_conda_binary
67

78
conda_build_dir = os.path.dirname(os.path.abspath(__file__))
89
REPO_ROOT = os.path.join(conda_build_dir, "..", "..")
910
MINICONDA_DOWNLOAD_URL = (
1011
"https://repo.anaconda.com/miniconda/Miniconda3-py39_4.9.2-Linux-x86_64.sh"
1112
)
12-
CONDA_BINARY = (
13-
os.popen("which conda").read().strip()
14-
if os.system(f"conda --version") == 0
15-
else f"$HOME/miniconda/condabin/conda"
16-
)
13+
CONDA_BINARY = find_conda_binary()
1714

1815
CONDA_PACKAGES_PATH = os.path.join(REPO_ROOT, "binaries", "conda", "output")
1916
CONDA_LINUX_PACKAGES_PATH = os.path.join(
@@ -32,8 +29,7 @@
3229

3330
if os.name == "nt":
3431
# Assumes miniconda is installed in windows
35-
CONDA_BINARY = "conda"
36-
32+
CONDA_BINARY = "conda"
3733

3834
def add_nightly_suffix_conda(binary_name: str) -> str:
3935
"""
@@ -52,29 +48,29 @@ def install_conda_build(dry_run):
5248
"""
5349

5450
# Check if conda binary already exists
55-
exit_code = os.system(f"conda --version")
56-
if exit_code == 0:
57-
print(
58-
f"'conda' already present on the system. Proceeding without a fresh conda installation."
59-
)
51+
try:
52+
subprocess.run(["conda", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
53+
print("'conda' already present on the system. Proceeding without a fresh conda installation.")
6054
return
61-
try_and_handle(
62-
f"{CONDA_BINARY} install python=3.8 conda-build anaconda-client -y", dry_run
63-
)
55+
except subprocess.CalledProcessError:
56+
# Conda is not available, proceed with installation
57+
try_and_handle(
58+
f"{CONDA_BINARY} install python=3.8 conda-build anaconda-client -y", dry_run
59+
)
6460

6561

6662
def install_miniconda(dry_run):
6763
"""
6864
Installs miniconda, a slimmer anaconda installation to build conda packages
6965
"""
7066

71-
# Check if conda binary already exists
72-
exit_code = os.system(f"conda --version")
73-
if exit_code == 0:
74-
print(
75-
f"'conda' already present on the system. Proceeding without a fresh minconda installation."
76-
)
67+
try:
68+
subprocess.run(["conda", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
69+
print("'conda' already present on the system. Proceeding without a fresh conda installation.")
7770
return
71+
except subprocess.CalledProcessError as e:
72+
raise (e)
73+
7874
if os.name == "nt":
7975
print(
8076
"Identified as Windows system. Please install miniconda using this URL: https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe"

binaries/install.py

+15-8
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
import subprocess
23
import sys
34
import glob
45

@@ -13,19 +14,25 @@ def install():
1314
if is_conda_env():
1415
print("## Using conda to install torchserve and torch-model-archiver")
1516
channel_dir = os.path.abspath(os.path.join(REPO_ROOT, "binaries", "conda", "output"))
16-
conda_cmd = f"conda install --channel {channel_dir} -y torchserve torch-model-archiver"
17-
print(f"## In directory: {os.getcwd()} | Executing command: {conda_cmd}")
18-
install_exit_code = os.system(conda_cmd)
17+
conda_cmd = ["conda", "install", "--channel", channel_dir, "-y", "torchserve", "torch-model-archiver"]
18+
print(f"## In directory: {os.getcwd()} | Executing command: {' '.join(conda_cmd)}")
19+
20+
try:
21+
subprocess.run(conda_cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
22+
except subprocess.CalledProcessError as e:
23+
sys.exit("## Torchserve/Model archiver Installation Failed!")
24+
1925
else:
2026
print("## Using pip to install torchserve and torch-model-archiver")
2127
ts_wheel = glob.glob(os.path.join(REPO_ROOT, "dist", "*.whl"))[0]
2228
ma_wheel = glob.glob(os.path.join(REPO_ROOT, "model-archiver", "dist", "*.whl"))[0]
23-
pip_cmd = f"pip install {ts_wheel} {ma_wheel}"
24-
print(f"## In directory: {os.getcwd()} | Executing command: {pip_cmd}")
25-
install_exit_code = os.system(pip_cmd)
29+
pip_cmd = ["pip", "install", ts_wheel, ma_wheel]
30+
print(f"## In directory: {os.getcwd()} | Executing command: {' '.join(pip_cmd)}")
2631

27-
if install_exit_code != 0:
28-
sys.exit("## Torchserve \ Model archiver Installation Failed !")
32+
try:
33+
subprocess.run(pip_cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
34+
except subprocess.CalledProcessError as e:
35+
sys.exit("## Torchserve/Model archiver Installation Failed!")
2936

3037

3138
if __name__ == "__main__":

binaries/upload.py

+12-5
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import glob
44
import os
55
import sys
6+
import subprocess
67

78
# To help discover local modules
89
REPO_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
@@ -44,11 +45,17 @@ def upload_conda_packages(args, PACKAGES, CONDA_PACKAGES_PATH):
4445
"tar.bz2"
4546
):
4647
print(f"Uploading to anaconda package: {name}")
47-
anaconda_upload_command = f"anaconda upload {file_path} --force"
48-
exit_code = os.system(anaconda_upload_command)
49-
if exit_code != 0:
50-
print(f"Anaconda package upload failed for package {name}")
51-
return exit_code
48+
anaconda_upload_command = ["anaconda", "upload", file_path, "--force"]
49+
50+
try:
51+
subprocess.run(
52+
anaconda_upload_command,
53+
check=True,
54+
stdout=subprocess.PIPE,
55+
stderr=subprocess.PIPE
56+
)
57+
except subprocess.CalledProcessError as e:
58+
return e.returncode
5259
print(f"All packages uploaded to anaconda successfully")
5360

5461

examples/image_classifier/near_real_time_video/create_mar_file.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import argparse
66
import os
77
import shutil
8+
import subprocess
89

910
MODEL_PTH_FILE = "resnet18-f37072fd.pth"
1011
MODEL_STORE = "model_store"
@@ -15,7 +16,7 @@ def download_pth_file(output_file: str) -> None:
1516
if not os.path.exists(output_file):
1617
cmd = ["wget", " https://download.pytorch.org/models/resnet18-f37072fd.pth"]
1718
print("Downloading resnet-18 pth file")
18-
os.system(" ".join(cmd))
19+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1920

2021

2122
def create_mar():
@@ -42,9 +43,8 @@ def create_mar():
4243
"--handler image_classifier",
4344
"--force",
4445
]
45-
4646
print(f"Archiving resnet-18 model into {MAR_FILE}")
47-
os.system(" ".join(cmd))
47+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
4848

4949

5050
def move_mar_file():

examples/torchrec_dlrm/create_dlrm_mar.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
This script creates a DLRM model and packs it into a TorchServe mar file
33
"""
44

5-
import os
6-
5+
import subprocess
76
import torch
87
from dlrm_factory import DLRMFactory
98

@@ -32,7 +31,7 @@ def main():
3231
]
3332

3433
print("Archiving model into dlrm.mar")
35-
os.system(" ".join(cmd))
34+
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
3635
print("Done")
3736

3837

test/pytest/test_distributed_inference_handler.py

+8-4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import os
22
import sys
3-
3+
import subprocess
44
import pytest
55
import test_utils
66

@@ -37,10 +37,14 @@ def test_large_model_inference():
3737
)
3838

3939
try:
40-
command = f"newman run -e {POSTMAN_ENV_FILE} {POSTMAN_COLLECTION_INFERENCE} -d {POSTMAN_LARGE_MODEL_INFERENCE_DATA_FILE} -r cli,htmlextra --reporter-htmlextra-export {ARTIFACTS_INFERENCE_DIR}/{REPORT_FILE} --verbose"
41-
result = os.system(command)
40+
command = [
41+
"newman", "run", "-e", POSTMAN_ENV_FILE, POSTMAN_COLLECTION_INFERENCE,
42+
"-d", POSTMAN_LARGE_MODEL_INFERENCE_DATA_FILE, "-r", "cli,htmlextra",
43+
"--reporter-htmlextra-export", f"{ARTIFACTS_INFERENCE_DIR}/{REPORT_FILE}", "--verbose"
44+
]
45+
result = subprocess.run(command, check=True)
4246
assert (
43-
result == 0
47+
result.returncode == 0
4448
), "Error: Distributed inference failed, the exit code is not zero"
4549
finally:
4650
test_utils.stop_torchserve()

test/pytest/test_handler.py

+15-9
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import json
33
import logging
44
import os
5-
5+
import subprocess
66
import numpy as np
77
import pytest
88
import requests
@@ -314,10 +314,14 @@ def test_huggingface_bert_batch_inference():
314314

315315
# Make 2 curl requests in parallel with &
316316
# curl --header \"X-Forwarded-For: 1.2.3.4\" won't work since you can't access local host anymore
317-
response = os.popen(
318-
f"curl http://127.0.0.1:8080/predictions/BERTSeqClassification -T {input_text} & curl http://127.0.0.1:8080/predictions/BERTSeqClassification -T {input_text}"
319-
)
320-
response = response.read()
317+
curl_command = [
318+
"curl", "http://127.0.0.1:8080/predictions/BERTSeqClassification", "-T", input_text
319+
]
320+
process1 = subprocess.Popen(curl_command, stdout=subprocess.PIPE)
321+
process2 = subprocess.Popen(curl_command, stdout=subprocess.PIPE)
322+
response1, _ = process1.communicate()
323+
response2, _ = process2.communicate()
324+
response = response1.decode() + response2.decode()
321325

322326
## Assert that 2 responses are returned from the same batch
323327
assert response == "Not AcceptedNot Accepted"
@@ -360,7 +364,7 @@ def test_MMF_activity_recognition_model_register_and_inference_on_valid_model():
360364

361365
def test_huggingface_bert_model_parallel_inference():
362366
number_of_gpus = torch.cuda.device_count()
363-
check = os.popen(f"curl http://localhost:8081/models")
367+
check = subprocess.run(["curl", "http://localhost:8081/models"], capture_output=True, text=True)
364368
print(check)
365369
if number_of_gpus > 1:
366370
batch_size = 1
@@ -385,10 +389,12 @@ def test_huggingface_bert_model_parallel_inference():
385389
"sample_text_captum_input.txt",
386390
)
387391

388-
response = os.popen(
389-
f"curl http://127.0.0.1:8080/predictions/Textgeneration -T {input_text}"
392+
response = subprocess.run(
393+
["curl", "http://127.0.0.1:8080/predictions/Textgeneration", "-T", input_text],
394+
capture_output=True,
395+
text=True
390396
)
391-
response = response.read()
397+
response = response.stdout
392398

393399
assert (
394400
"Bloomberg has decided to publish a new report on the global economy"

test/pytest/test_sm_mme_requirements.py

+16-17
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import os
22
import pathlib
3-
3+
import subprocess
44
import pytest
55
import requests
66
import test_utils
@@ -103,25 +103,24 @@ def test_oom_on_invoke():
103103

104104
# Make 8 curl requests in parallel with &
105105
# Send multiple requests to make sure to hit OOM
106+
processes = []
106107
for i in range(10):
107-
response = os.popen(
108-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
109-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
110-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
111-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
112-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
113-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
114-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} && "
115-
f"curl http://127.0.0.1:8080/models/BERTSeqClassification/invoke -T {input_text} "
116-
)
117-
response = response.read()
108+
for _ in range(8):
109+
process = subprocess.Popen(
110+
["curl", "http://127.0.0.1:8080/models/BERTSeqClassification/invoke", "-T", input_text],
111+
stdout=subprocess.PIPE,
112+
stderr=subprocess.PIPE
113+
)
114+
processes.append(process)
115+
responses = []
116+
for process in processes:
117+
stdout, stderr = process.communicate()
118+
responses.append(stdout.decode())
118119

119120
# If OOM is hit, we expect code 507 to be present in the response string
120-
lines = response.split("\n")
121121
output = ""
122-
for line in lines:
123-
if "code" in line:
124-
line = line.strip()
125-
output = line
122+
for response in responses:
123+
if '"code": 507,' in response:
124+
output = '"code": 507,'
126125
break
127126
assert output == '"code": 507,', "OOM Error expected"

0 commit comments

Comments
 (0)