Skip to content

Commit b782e5c

Browse files
authored
server : add more test cases (#10569)
* server : add split model test * add test speculative * add invalid cases
1 parent 3a8e9af commit b782e5c

File tree

6 files changed

+186
-1
lines changed

6 files changed

+186
-1
lines changed

examples/server/tests/unit/test_basic.py

+14
Original file line numberDiff line numberDiff line change
@@ -32,3 +32,17 @@ def test_server_models():
3232
assert res.status_code == 200
3333
assert len(res.body["data"]) == 1
3434
assert res.body["data"][0]["id"] == server.model_alias
35+
36+
def test_load_split_model():
37+
global server
38+
server.model_hf_repo = "ggml-org/models"
39+
server.model_hf_file = "tinyllamas/split/stories15M-q8_0-00001-of-00003.gguf"
40+
server.model_alias = "tinyllama-split"
41+
server.start()
42+
res = server.make_request("POST", "/completion", data={
43+
"n_predict": 16,
44+
"prompt": "Hello",
45+
"temperature": 0.0,
46+
})
47+
assert res.status_code == 200
48+
assert match_regex("(little|girl)+", res.body["content"])

examples/server/tests/unit/test_chat_completion.py

+19
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,22 @@ def test_completion_with_response_format(response_format: dict, n_predicted: int
127127
assert res.status_code != 200
128128
assert "error" in res.body
129129

130+
131+
@pytest.mark.parametrize("messages", [
132+
None,
133+
"string",
134+
[123],
135+
[{}],
136+
[{"role": 123}],
137+
[{"role": "system", "content": 123}],
138+
# [{"content": "hello"}], # TODO: should not be a valid case
139+
[{"role": "system", "content": "test"}, {}],
140+
])
141+
def test_invalid_chat_completion_req(messages):
142+
global server
143+
server.start()
144+
res = server.make_request("POST", "/chat/completions", data={
145+
"messages": messages,
146+
})
147+
assert res.status_code == 400 or res.status_code == 500
148+
assert "error" in res.body

examples/server/tests/unit/test_infill.py

+22
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ def create_server():
88
global server
99
server = ServerPreset.tinyllama_infill()
1010

11+
1112
def test_infill_without_input_extra():
1213
global server
1314
server.start()
@@ -19,6 +20,7 @@ def test_infill_without_input_extra():
1920
assert res.status_code == 200
2021
assert match_regex("(One|day|she|saw|big|scary|bird)+", res.body["content"])
2122

23+
2224
def test_infill_with_input_extra():
2325
global server
2426
server.start()
@@ -33,3 +35,23 @@ def test_infill_with_input_extra():
3335
})
3436
assert res.status_code == 200
3537
assert match_regex("(cuts|Jimmy|mom|came|into|the|room)+", res.body["content"])
38+
39+
40+
@pytest.mark.parametrize("input_extra", [
41+
{},
42+
{"filename": "ok"},
43+
{"filename": 123},
44+
{"filename": 123, "text": "abc"},
45+
{"filename": 123, "text": 456},
46+
])
47+
def test_invalid_input_extra_req(input_extra):
48+
global server
49+
server.start()
50+
res = server.make_request("POST", "/infill", data={
51+
"prompt": "Complete this",
52+
"input_extra": [input_extra],
53+
"input_prefix": "#include <cstdio>\n#include \"llama.h\"\n\nint main() {\n int n_threads = llama_",
54+
"input_suffix": "}\n",
55+
})
56+
assert res.status_code == 400
57+
assert "error" in res.body

examples/server/tests/unit/test_rerank.py

+17
Original file line numberDiff line numberDiff line change
@@ -36,3 +36,20 @@ def test_rerank():
3636
assert most_relevant["relevance_score"] > least_relevant["relevance_score"]
3737
assert most_relevant["index"] == 2
3838
assert least_relevant["index"] == 3
39+
40+
41+
@pytest.mark.parametrize("documents", [
42+
[],
43+
None,
44+
123,
45+
[1, 2, 3],
46+
])
47+
def test_invalid_rerank_req(documents):
48+
global server
49+
server.start()
50+
res = server.make_request("POST", "/rerank", data={
51+
"query": "Machine learning is",
52+
"documents": documents,
53+
})
54+
assert res.status_code == 400
55+
assert "error" in res.body
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,103 @@
1+
import pytest
2+
from utils import *
3+
4+
# We use a F16 MOE gguf as main model, and q4_0 as draft model
5+
6+
server = ServerPreset.stories15m_moe()
7+
8+
MODEL_DRAFT_FILE_URL = "https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories15M-q4_0.gguf"
9+
10+
def create_server():
11+
global server
12+
server = ServerPreset.stories15m_moe()
13+
# download draft model file if needed
14+
file_name = MODEL_DRAFT_FILE_URL.split('/').pop()
15+
model_draft_file = f'../../../{file_name}'
16+
if not os.path.exists(model_draft_file):
17+
print(f"Downloading {MODEL_DRAFT_FILE_URL} to {model_draft_file}")
18+
with open(model_draft_file, 'wb') as f:
19+
f.write(requests.get(MODEL_DRAFT_FILE_URL).content)
20+
print(f"Done downloading draft model file")
21+
# set default values
22+
server.model_draft = model_draft_file
23+
server.draft_min = 4
24+
server.draft_max = 8
25+
26+
27+
@pytest.fixture(scope="module", autouse=True)
28+
def fixture_create_server():
29+
return create_server()
30+
31+
32+
def test_with_and_without_draft():
33+
global server
34+
server.model_draft = None # disable draft model
35+
server.start()
36+
res = server.make_request("POST", "/completion", data={
37+
"prompt": "I believe the meaning of life is",
38+
"temperature": 0.0,
39+
"top_k": 1,
40+
})
41+
assert res.status_code == 200
42+
content_no_draft = res.body["content"]
43+
server.stop()
44+
45+
# create new server with draft model
46+
create_server()
47+
server.start()
48+
res = server.make_request("POST", "/completion", data={
49+
"prompt": "I believe the meaning of life is",
50+
"temperature": 0.0,
51+
"top_k": 1,
52+
})
53+
assert res.status_code == 200
54+
content_draft = res.body["content"]
55+
56+
assert content_no_draft == content_draft
57+
58+
59+
def test_different_draft_min_draft_max():
60+
global server
61+
test_values = [
62+
(1, 2),
63+
(1, 4),
64+
(4, 8),
65+
(4, 12),
66+
(8, 16),
67+
]
68+
last_content = None
69+
for draft_min, draft_max in test_values:
70+
server.stop()
71+
server.draft_min = draft_min
72+
server.draft_max = draft_max
73+
server.start()
74+
res = server.make_request("POST", "/completion", data={
75+
"prompt": "I believe the meaning of life is",
76+
"temperature": 0.0,
77+
"top_k": 1,
78+
})
79+
assert res.status_code == 200
80+
if last_content is not None:
81+
assert last_content == res.body["content"]
82+
last_content = res.body["content"]
83+
84+
85+
@pytest.mark.parametrize("n_slots,n_requests", [
86+
(1, 2),
87+
(2, 2),
88+
])
89+
def test_multi_requests_parallel(n_slots: int, n_requests: int):
90+
global server
91+
server.n_slots = n_slots
92+
server.start()
93+
tasks = []
94+
for _ in range(n_requests):
95+
tasks.append((server.make_request, ("POST", "/completion", {
96+
"prompt": "I believe the meaning of life is",
97+
"temperature": 0.0,
98+
"top_k": 1,
99+
})))
100+
results = parallel_function_calls(tasks)
101+
for res in results:
102+
assert res.status_code == 200
103+
assert match_regex("(wise|kind|owl|answer)+", res.body["content"])

examples/server/tests/utils.py

+11-1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ class ServerProcess:
4646
model_alias: str | None = None
4747
model_url: str | None = None
4848
model_file: str | None = None
49+
model_draft: str | None = None
4950
n_threads: int | None = None
5051
n_gpu_layer: int | None = None
5152
n_batch: int | None = None
@@ -68,6 +69,8 @@ class ServerProcess:
6869
response_format: str | None = None
6970
lora_files: List[str] | None = None
7071
disable_ctx_shift: int | None = False
72+
draft_min: int | None = None
73+
draft_max: int | None = None
7174

7275
# session variables
7376
process: subprocess.Popen | None = None
@@ -102,6 +105,8 @@ def start(self, timeout_seconds: int = 10) -> None:
102105
server_args.extend(["--model", self.model_file])
103106
if self.model_url:
104107
server_args.extend(["--model-url", self.model_url])
108+
if self.model_draft:
109+
server_args.extend(["--model-draft", self.model_draft])
105110
if self.model_hf_repo:
106111
server_args.extend(["--hf-repo", self.model_hf_repo])
107112
if self.model_hf_file:
@@ -147,6 +152,10 @@ def start(self, timeout_seconds: int = 10) -> None:
147152
server_args.extend(["--no-context-shift"])
148153
if self.api_key:
149154
server_args.extend(["--api-key", self.api_key])
155+
if self.draft_max:
156+
server_args.extend(["--draft-max", self.draft_max])
157+
if self.draft_min:
158+
server_args.extend(["--draft-min", self.draft_min])
150159

151160
args = [str(arg) for arg in [server_path, *server_args]]
152161
print(f"bench: starting server with: {' '.join(args)}")
@@ -185,7 +194,8 @@ def start(self, timeout_seconds: int = 10) -> None:
185194
raise TimeoutError(f"Server did not start within {timeout_seconds} seconds")
186195

187196
def stop(self) -> None:
188-
server_instances.remove(self)
197+
if self in server_instances:
198+
server_instances.remove(self)
189199
if self.process:
190200
print(f"Stopping server with pid={self.process.pid}")
191201
self.process.kill()

0 commit comments

Comments
 (0)