Skip to content

Commit

Permalink
Group llm integration test output using github ::group:: (#546)
Browse files Browse the repository at this point in the history
Use the ::group:: [GitHub Workflow
Command](https://docs.github.com/en/actions/writing-workflows/choosing-what-your-workflow-does/workflow-commands-for-github-actions)
to make it easier to navigate integration logs.

Also adds a summary to easily look at what the generation results are.
  • Loading branch information
renxida authored Nov 19, 2024
1 parent 7c5bd86 commit 4dd2fc8
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 7 deletions.
11 changes: 8 additions & 3 deletions app_tests/integration_tests/llm/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
compile_model,
find_available_port,
start_llm_server,
start_log_group,
end_log_group,
)

logger = logging.getLogger(__name__)
Expand All @@ -40,7 +42,9 @@ def model_test_dir(request, tmp_path_factory):
Yields:
Tuple[Path, Path]: The paths to the Hugging Face home and the temp dir.
"""
logger.info("Preparing model artifacts...")
logger.info(
"Preparing model artifacts..." + start_log_group("Preparing model artifacts")
)

repo_id = request.param["repo_id"]
model_file = request.param["model_file"]
Expand Down Expand Up @@ -85,7 +89,7 @@ def model_test_dir(request, tmp_path_factory):
logger.info(f"Config: {json.dumps(config, indent=2)}")
with open(edited_config_path, "w") as f:
json.dump(config, f)
logger.info("Model artifacts setup successfully")
logger.info("Model artifacts setup successfully" + end_log_group())
yield hf_home, tmp_dir
finally:
shutil.rmtree(tmp_dir)
Expand All @@ -110,7 +114,7 @@ def llm_server(request, model_test_dir, available_port):
Yields:
subprocess.Popen: The server process that was started.
"""
logger.info("Starting LLM server...")
logger.info("Starting LLM server..." + start_log_group("Starting LLM server"))
hf_home, tmp_dir = model_test_dir
model_file = request.param["model_file"]
settings = request.param["settings"]
Expand All @@ -129,6 +133,7 @@ def llm_server(request, model_test_dir, available_port):
parameters_path,
settings,
)
logger.info("LLM server started!" + end_log_group())
yield server_process
# Teardown: kill the server
server_process.terminate()
Expand Down
21 changes: 17 additions & 4 deletions app_tests/integration_tests/llm/cpu_llm_server_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import requests
import uuid

from .utils import AccuracyValidationException
from .utils import AccuracyValidationException, start_log_group, end_log_group

logger = logging.getLogger(__name__)

Expand All @@ -37,7 +37,7 @@ def do_generate(prompt, port):
# Create a GenerateReqInput-like structure
data = {
"text": prompt,
"sampling_params": {"max_completion_tokens": 50, "temperature": 0.7},
"sampling_params": {"max_completion_tokens": 15, "temperature": 0.7},
"rid": uuid.uuid4().hex,
"return_logprob": False,
"logprob_start_len": -1,
Expand Down Expand Up @@ -82,10 +82,23 @@ def test_llm_server(llm_server, available_port):
# Here you would typically make requests to your server
# and assert on the responses
assert llm_server.poll() is None
output = do_generate("1 2 3 4 5 ", available_port)
logger.info(output)
PROMPT = "1 2 3 4 5 "
expected_output_prefix = "6 7 8"
logger.info(
"Sending HTTP Generation Request"
+ start_log_group("Sending HTTP Generation Request")
)
output = do_generate(PROMPT, available_port)
# log to GITHUB_STEP_SUMMARY if we are in a GitHub Action
if "GITHUB_ACTION" in os.environ:
with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
# log prompt
f.write("LLM results:\n")
f.write(f"- llm_prompt:`{PROMPT}`\n")
f.write(f"- llm_output:`{output}`\n")
logger.info(output)
if not output.startswith(expected_output_prefix):
raise AccuracyValidationException(
f"Expected '{output}' to start with '{expected_output_prefix}'"
)
logger.info("HTTP Generation Request Successful" + end_log_group())
14 changes: 14 additions & 0 deletions app_tests/integration_tests/llm/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,3 +178,17 @@ def start_llm_server(
# Wait for server to start
wait_for_server(f"http://localhost:{port}", timeout)
return server_process


def start_log_group(headline):
# check if we are in github ci
if os.environ.get("GITHUB_ACTIONS") == "true":
return f"\n::group::{headline}"
return ""


def end_log_group():
# check if we are in github ci
if os.environ.get("GITHUB_ACTIONS") == "true":
return "\n::endgroup::"
return ""

0 comments on commit 4dd2fc8

Please sign in to comment.