Skip to content

Commit c23fef1

Browse files
AnishaUdayakumaradrianboguszewskiCopilotAntonio Martinezdependabot[bot]
authored
CI fixes and Model Export logger silencing (#283)
* Fix CI for multimodal generator * Allowed server to accpet different models * Replaced prints with logger * Moved model selection to the UI * Moved image size selection back to the UI * Update demos/paint_your_dreams_demo/main.py Co-authored-by: Copilot <[email protected]> * Remove model selection from setup scripts * Replaced command line conversion with python code (#276) * Replaced command line with python code * Update demos/paint_your_dreams_demo/main.py Co-authored-by: Copilot <[email protected]> * Update main.py --------- Co-authored-by: Copilot <[email protected]> * docs: include medium article to AI agent RAG kit (#277) Signed-off-by: Antonio Martinez <[email protected]> * Enabled more models in paint your dreams (#279) * Enabled more models in paint your dreams * Enabled more tokenizers * Fixed sdxl inference * Used better pipeline for conversion * Added nncf * Removed a few models * Updated CI to skip HF login if token not provided (#280) * Updated CI to skip HF login if token not provided * Update sanity-check-demos.yml * Bump tar-fs from 3.0.8 to 3.0.9 in /demos/hide_your_mess_behind_demo (#282) Bumps [tar-fs](https://github.com/mafintosh/tar-fs) from 3.0.8 to 3.0.9. - [Commits](mafintosh/tar-fs@v3.0.8...v3.0.9) --- updated-dependencies: - dependency-name: tar-fs dependency-version: 3.0.9 dependency-type: indirect ... Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * CI fixes and Model Export logger silencing * Refactor test.py with log-based FastAPI readiness check and adding missing file as copilot suggested * Move Demo is ready to test.py after API tests, revert action.yml with updated Windows handling * Fix CRLF to LF in action.yml for clean CI diff * Update ai_ref_kits/multimodal_ai_visual_generator/ci/test.py --------- Signed-off-by: Antonio Martinez <[email protected]> Signed-off-by: dependabot[bot] <[email protected]> Co-authored-by: Adrian Boguszewski <[email protected]> Co-authored-by: Copilot <[email protected]> Co-authored-by: Antonio Martinez <[email protected]> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Adrian Boguszewski <[email protected]>
1 parent 296df7d commit c23fef1

File tree

5 files changed

+97
-71
lines changed

5 files changed

+97
-71
lines changed

.github/reusable-steps/gradio-action/action.yml

Lines changed: 47 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ runs:
2424
else
2525
python ${{ inputs.script }} 2>&1 | tee gradio_log.txt &
2626
fi
27-
27+
2828
# Assign process ID
2929
app_pid=$(ps aux | grep -i '[p]ython ${{ inputs.script }}' | awk '{print $2}')
3030
@@ -44,33 +44,62 @@ runs:
4444
4545
# Exit with the readiness check status
4646
exit $status
47+
4748
- name: Run Gradio App (Windows)
4849
if: ${{ runner.os == 'Windows' }}
4950
shell: powershell
5051
run: |
5152
cd ${{ inputs.project }}
53+
Write-Output "==> Running script: ${{ inputs.script }}"
5254
53-
# Start the Gradio app in the background and redirect both output and error to the same file
54-
Start-Process -NoNewWindow -FilePath "python" -ArgumentList "${{ inputs.script }}" -RedirectStandardOutput gradio_stdout.txt -RedirectStandardError gradio_stderr.txt
55+
$timeout = ${{ inputs.timeout }}
56+
$start_time = Get-Date
57+
$success = $false
5558
56-
# Wait a moment to ensure the process starts
57-
Start-Sleep -Seconds 5
59+
if ("${{ inputs.script }}" -like "*test.py") {
60+
Write-Output "==> test.py detected. Running in foreground..."
5861
59-
# Assign process ID
60-
$app_pid = (Get-Process | Where-Object { $_.Name -eq "python.*" }).Id
62+
$output = python "${{ inputs.script }}"
63+
$output | Out-File -FilePath gradio_log.txt -Encoding utf8
64+
Get-Content -Path gradio_log.txt
6165
62-
$timeout = ${{ inputs.timeout }}
63-
# Wait for the specific log message
64-
$start_time = Get-Date
65-
Get-Content -Wait gradio_stderr.txt, gradio_stdout.txt | ForEach-Object {
66-
if ($_ -match "Demo is ready!") { break }
67-
if (((Get-Date) - $start_time).TotalSeconds -ge $timeout) {
68-
exit 1
66+
if ($LASTEXITCODE -eq 0) {
67+
$success = $true
68+
} else {
69+
Write-Error "Script exited with code $LASTEXITCODE"
70+
}
71+
72+
} else {
73+
Write-Output "==> Long-running app detected. Launching in background..."
74+
$proc = Start-Process -NoNewWindow -FilePath "python" -ArgumentList "${{ inputs.script }}"
75+
-RedirectStandardOutput gradio_stdout.txt -RedirectStandardError gradio_stderr.txt -PassThru
76+
$app_pid = $proc.Id
77+
Write-Output "==> App PID: $app_pid"
78+
79+
while ($true) {
80+
if (Test-Path gradio_stdout.txt) {
81+
$content = Get-Content gradio_stdout.txt -Raw
82+
if ($content -match "Demo is ready!") {
83+
$success = $true
84+
break
85+
}
86+
}
87+
if (((Get-Date) - $start_time).TotalSeconds -ge $timeout) {
88+
Write-Output "==> Timeout waiting for readiness."
89+
break
90+
}
91+
Start-Sleep -Seconds 2
6992
}
93+
94+
Write-Output "==> Stopping background process..."
95+
Stop-Process -Id $app_pid -Force -ErrorAction SilentlyContinue
7096
}
7197
72-
# Stop the Gradio app process
73-
Stop-Process -Id $app_pid -Force -ErrorAction SilentlyContinue
98+
Write-Output "==> Gradio Log Output:"
99+
if (Test-Path gradio_log.txt) { Get-Content gradio_log.txt }
100+
if (Test-Path gradio_stdout.txt) { Get-Content gradio_stdout.txt }
101+
if (Test-Path gradio_stderr.txt) { Get-Content gradio_stderr.txt }
74102
75-
# Exit with the readiness check status
76-
exit 0
103+
if (-not $success) {
104+
exit 1
105+
}

ai_ref_kits/multimodal_ai_visual_generator/ci/test.py

Lines changed: 26 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -19,12 +19,14 @@
1919
from convert_and_optimize_text2image import convert_image_model
2020

2121
# ----- Configuration -----
22+
2223
MODEL_DIR = Path("models")
2324
LLM_MODEL_TYPE = "tiny-llama-1b-chat"
2425
IMAGE_MODEL_TYPE = "lcm"
2526
PRECISION = "int4"
27+
LOG_FILE = Path("gradio_log.txt")
2628

27-
# ----- Step 1: Export Models if Needed -----
29+
# ----- Step 1: Export Models if Needed (will handle download internally) -----
2830
logger.info("Checking and exporting LLM + Text2Image models if necessary...")
2931
convert_chat_model(LLM_MODEL_TYPE, PRECISION, MODEL_DIR)
3032
convert_image_model(IMAGE_MODEL_TYPE, PRECISION, MODEL_DIR)
@@ -38,22 +40,29 @@
3840
"MODEL_PRECISION": PRECISION
3941
})
4042

41-
process = subprocess.Popen(
42-
[sys.executable, "-m", "uvicorn", "main:app", "--host", "127.0.0.1", "--port", "8000"],
43-
env=env
44-
)
43+
with LOG_FILE.open("w") as lf:
44+
process = subprocess.Popen(
45+
[sys.executable, "-m", "uvicorn", "main:app", "--host", "127.0.0.1", "--port", "8000"],
46+
env=env,
47+
stdout=lf,
48+
stderr=subprocess.STDOUT
49+
)
4550

4651
try:
47-
# Wait up to ~130 seconds (130 retries x 1s sleep) for FastAPI server to come up
48-
for _ in range(130):
49-
try:
50-
r = requests.get("http://localhost:8000/docs", timeout=2)
51-
if r.status_code == 200:
52+
# ----- Wait for Readiness from Logs -----
53+
logger.info("Waiting for FastAPI log to report readiness...")
54+
start_time = time.time()
55+
timeout = 130 # seconds
56+
57+
while time.time() - start_time < timeout:
58+
if LOG_FILE.exists():
59+
content = LOG_FILE.read_text()
60+
if "Uvicorn running on" in content or "Application startup complete." in content:
61+
logger.info("FastAPI server is up.")
5262
break
53-
except requests.ConnectionError:
54-
time.sleep(1)
63+
time.sleep(1)
5564
else:
56-
raise RuntimeError("FastAPI server did not start within 130 seconds.")
65+
raise RuntimeError("FastAPI server did not start within timeout period.")
5766

5867
# ----- Step 3: Test Story Prompt Generation -----
5968
logger.info("Testing /generate_story_prompts endpoint...")
@@ -63,8 +72,7 @@
6372
)
6473
assert response1.status_code == 200, f"Story generation failed: {response1.text}"
6574
scenes = response1.json()["scenes"]
66-
logger.info("Generated scenes: %s", scenes)
67-
logger.info("Scene prompt generation test passed.")
75+
logger.info("Scene prompt generation test passed. Example: %s", scenes)
6876

6977
# ----- Step 4: Test Image Generation -----
7078
logger.info("Testing /generate_images endpoint...")
@@ -74,10 +82,10 @@
7482
)
7583
assert response2.status_code == 200, f"Image generation failed: {response2.text}"
7684
image = response2.json()["image"]
77-
logger.info("Image string (truncated): %s", image[:100])
78-
logger.info("Image generation test passed.")
85+
logger.info("Image generation test passed. Base64 (truncated): %s", image[:100])
86+
logger.info("Demo is ready!")
7987

8088
finally:
8189
logger.info("Shutting down FastAPI server...")
8290
process.terminate()
83-
process.wait()
91+
process.wait()

ai_ref_kits/multimodal_ai_visual_generator/convert_and_optimize_llm.py

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,7 @@ def validate_export(output_dir: Path) -> list[str]:
7575
Validates the presence of all required files in the exported model directory.
7676
Returns a list of any missing critical files.
7777
"""
78-
logger.info("Verifying exported files:")
79-
missing = []
80-
for file in CRITICAL_FILES:
81-
if not (output_dir / file).exists():
82-
logger.error(f"Missing: {file}")
83-
missing.append(file)
84-
else:
85-
logger.info(f"Found: {file}")
86-
return missing
78+
return [file for file in CRITICAL_FILES if not (output_dir / file).exists()]
8779

8880
def quantize_model(model, precision: str, output_dir: Path):
8981
"""
@@ -137,7 +129,7 @@ def convert_chat_model(model_type: str, precision: str, model_dir: Path) -> Path
137129
}
138130
(output_dir / "model_index.json").write_text(json.dumps(model_index, indent=2))
139131

140-
logger.info("Verifying critical files:")
132+
logger.info("Checking for missing critical files silently...")
141133
missing_files = validate_export(output_dir)
142134
if missing_files:
143135
logger.warning("Export completed with missing files.")

ai_ref_kits/multimodal_ai_visual_generator/convert_and_optimize_text2image.py

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -55,22 +55,11 @@ def run_optimum_export(model_id: str, output_dir: Path, precision: str):
5555
subprocess.run(cmd, shell=(platform.system() == "Windows"), check=True)
5656

5757
def validate_export(output_dir: Path, critical_files: list[str]) -> list[str]:
58-
logger.info("Verifying exported files:")
59-
missing = []
60-
61-
for file in critical_files:
62-
if not (output_dir / file).exists():
63-
logger.error(f"Missing: {file}")
64-
missing.append(file)
65-
else:
66-
logger.info(f"Found: {file}")
67-
68-
if missing:
69-
logger.warning("Export completed with missing files.")
70-
else:
71-
logger.info("All critical files verified successfully.")
72-
73-
return missing
58+
"""
59+
Silently checks for the presence of required exported files.
60+
Returns a list of missing files, without logging each one.
61+
"""
62+
return [file for file in critical_files if not (output_dir / file).exists()]
7463

7564
def convert_image_model(model_type: str, precision: str, model_dir: Path) -> Path:
7665
"""
@@ -88,12 +77,15 @@ def convert_image_model(model_type: str, precision: str, model_dir: Path) -> Pat
8877
logger.info("Skipping re-export.\n")
8978
return output_dir
9079
else:
91-
logger.warning(f"Export folder exists but missing files: {missing}")
9280
logger.info("Re-exporting model...\n")
9381

9482
# Run export and validate output
9583
run_optimum_export(model_id, output_dir, precision)
9684
missing_files = validate_export(output_dir, CRITICAL_FILES)
85+
if missing_files:
86+
logger.warning(f"Export completed with missing files: {missing_files}")
87+
else:
88+
logger.info("All critical files verified successfully.")
9789

9890
logger.info(f"Model exported to: {output_dir}\n")
9991
return output_dir

ai_ref_kits/multimodal_ai_visual_generator/main.py

Lines changed: 13 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
import sys
1212
import yaml
1313
import openvino_genai as ov_genai
14+
import openvino as ov
1415
import logging
1516
import random
1617

@@ -61,29 +62,34 @@
6162
# ---------- Load Config ----------
6263
with open(CONFIG_PATH, "r") as f:
6364
config = yaml.safe_load(f)
65+
66+
# ---------- Determine Device (GPU if available, else fallback) ----------
67+
core = ov.Core()
68+
preferred_device = "GPU" if "GPU" in core.available_devices else "CPU"
69+
print(f"Using OpenVINO device: {preferred_device}")
6470

65-
# ---------- Lazy load models if available ----------
71+
# ---------- Load models ----------
6672
image_pipe = None
6773
llm_pipe = None
6874

6975
if image_model_dir.exists():
7076
try:
71-
image_pipe = ov_genai.Text2ImagePipeline(image_model_dir, device="GPU")
72-
logger.info("Image model loaded.")
77+
image_pipe = ov_genai.Text2ImagePipeline(image_model_dir, device=preferred_device)
78+
logger.info("Image model loaded successfully.")
7379
except Exception as e:
74-
logger.error(f"Failed to load Image model: {e}")
80+
logger.error(f"Failed to load image model: {e}")
7581
else:
7682
logger.warning(f"Image model not found at {image_model_dir}")
7783

7884
if llm_model_dir.exists():
7985
try:
80-
llm_pipe = ov_genai.LLMPipeline(str(llm_model_dir), device="GPU")
81-
logger.info("LLM model loaded.")
86+
llm_pipe = ov_genai.LLMPipeline(str(llm_model_dir), device=preferred_device)
87+
logger.info("LLM model loaded successfully.")
8288
except Exception as e:
8389
logger.error(f"Failed to load LLM model: {e}")
8490
else:
8591
logger.warning(f"LLM model not found at {llm_model_dir}")
86-
92+
8793
llm_config = ov_genai.GenerationConfig()
8894
llm_config.max_new_tokens = 256
8995
llm_config.apply_chat_template = False
@@ -209,7 +215,6 @@ def callback(step, num_steps, latent):
209215

210216
# ---------- Server Start Print ----------
211217
if image_pipe or llm_pipe:
212-
logger.info("Demo is ready!")
213218
logger.info("FastAPI backend is running.")
214219
logger.info("In a separate terminal, start the Streamlit app using: streamlit run streamlit_app.py")
215220
else:

0 commit comments

Comments
 (0)