Skip to content

Commit 4dd2fc8

Browse files
authored
Group llm integration test output using github ::group:: (#546)
Use the ::group:: [GitHub Workflow Command](https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/workflow-commands-for-github-actions) to make it easier to navigate integration logs. Also adds a summary to easily look at what the generation results are.
1 parent 7c5bd86 commit 4dd2fc8

File tree

3 files changed

+39
-7
lines changed

3 files changed

+39
-7
lines changed

app_tests/integration_tests/llm/conftest.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@
1919
compile_model,
2020
find_available_port,
2121
start_llm_server,
22+
start_log_group,
23+
end_log_group,
2224
)
2325

2426
logger = logging.getLogger(__name__)
@@ -40,7 +42,9 @@ def model_test_dir(request, tmp_path_factory):
4042
Yields:
4143
Tuple[Path, Path]: The paths to the Hugging Face home and the temp dir.
4244
"""
43-
logger.info("Preparing model artifacts...")
45+
logger.info(
46+
"Preparing model artifacts..." + start_log_group("Preparing model artifacts")
47+
)
4448

4549
repo_id = request.param["repo_id"]
4650
model_file = request.param["model_file"]
@@ -85,7 +89,7 @@ def model_test_dir(request, tmp_path_factory):
8589
logger.info(f"Config: {json.dumps(config, indent=2)}")
8690
with open(edited_config_path, "w") as f:
8791
json.dump(config, f)
88-
logger.info("Model artifacts setup successfully")
92+
logger.info("Model artifacts setup successfully" + end_log_group())
8993
yield hf_home, tmp_dir
9094
finally:
9195
shutil.rmtree(tmp_dir)
@@ -110,7 +114,7 @@ def llm_server(request, model_test_dir, available_port):
110114
Yields:
111115
subprocess.Popen: The server process that was started.
112116
"""
113-
logger.info("Starting LLM server...")
117+
logger.info("Starting LLM server..." + start_log_group("Starting LLM server"))
114118
hf_home, tmp_dir = model_test_dir
115119
model_file = request.param["model_file"]
116120
settings = request.param["settings"]
@@ -129,6 +133,7 @@ def llm_server(request, model_test_dir, available_port):
129133
parameters_path,
130134
settings,
131135
)
136+
logger.info("LLM server started!" + end_log_group())
132137
yield server_process
133138
# Teardown: kill the server
134139
server_process.terminate()

app_tests/integration_tests/llm/cpu_llm_server_test.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import requests
1111
import uuid
1212

13-
from .utils import AccuracyValidationException
13+
from .utils import AccuracyValidationException, start_log_group, end_log_group
1414

1515
logger = logging.getLogger(__name__)
1616

@@ -37,7 +37,7 @@ def do_generate(prompt, port):
3737
# Create a GenerateReqInput-like structure
3838
data = {
3939
"text": prompt,
40-
"sampling_params": {"max_completion_tokens": 50, "temperature": 0.7},
40+
"sampling_params": {"max_completion_tokens": 15, "temperature": 0.7},
4141
"rid": uuid.uuid4().hex,
4242
"return_logprob": False,
4343
"logprob_start_len": -1,
@@ -82,10 +82,23 @@ def test_llm_server(llm_server, available_port):
8282
# Here you would typically make requests to your server
8383
# and assert on the responses
8484
assert llm_server.poll() is None
85-
output = do_generate("1 2 3 4 5 ", available_port)
86-
logger.info(output)
85+
PROMPT = "1 2 3 4 5 "
8786
expected_output_prefix = "6 7 8"
87+
logger.info(
88+
"Sending HTTP Generation Request"
89+
+ start_log_group("Sending HTTP Generation Request")
90+
)
91+
output = do_generate(PROMPT, available_port)
92+
# log to GITHUB_STEP_SUMMARY if we are in a GitHub Action
93+
if "GITHUB_ACTION" in os.environ:
94+
with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
95+
# log prompt
96+
f.write("LLM results:\n")
97+
f.write(f"- llm_prompt:`{PROMPT}`\n")
98+
f.write(f"- llm_output:`{output}`\n")
99+
logger.info(output)
88100
if not output.startswith(expected_output_prefix):
89101
raise AccuracyValidationException(
90102
f"Expected '{output}' to start with '{expected_output_prefix}'"
91103
)
104+
logger.info("HTTP Generation Request Successful" + end_log_group())

app_tests/integration_tests/llm/utils.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,3 +178,17 @@ def start_llm_server(
178178
# Wait for server to start
179179
wait_for_server(f"http://localhost:{port}", timeout)
180180
return server_process
181+
182+
183+
def start_log_group(headline):
184+
# check if we are in github ci
185+
if os.environ.get("GITHUB_ACTIONS") == "true":
186+
return f"\n::group::{headline}"
187+
return ""
188+
189+
190+
def end_log_group():
191+
# check if we are in github ci
192+
if os.environ.get("GITHUB_ACTIONS") == "true":
193+
return "\n::endgroup::"
194+
return ""

0 commit comments

Comments
 (0)