Skip to content

Commit 9e5f1a9

Browse files
authored
feat(ai-contracts-test): add prompt renderer component (ethereum-optimism#17578)
* feat(ai-contracts-test): add prompt renderer component - implement render.py script for prompt generation with placeholder replacement - add prompt/ directory with canonical prompt template (prompt.md) - extend justfile with new commands: just render and just prompt (combined workflow) - update runbook documentation with multi-step workflow instructions * fix(semgrep): exclude prompt files from sol-safety-trycatch-eip150 rule
1 parent b951077 commit 9e5f1a9

File tree

5 files changed

+611
-1
lines changed

5 files changed

+611
-1
lines changed

.semgrep/rules/sol-rules.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ rules:
110110
exclude:
111111
- packages/contracts-bedrock/test
112112
- packages/contracts-bedrock/scripts
113+
- ops/ai-eng/contracts-test-maintenance/prompt/prompt.md
113114

114115
- id: sol-style-input-arg-fmt
115116
languages: [solidity]

ops/ai-eng/contracts-test-maintenance/docs/runbook.md

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,12 +10,23 @@ The system uses a two-branch scoring algorithm: tests whose contracts have moved
1010

1111
```bash
1212
# From the ai-eng directory
13+
14+
# Option 1: Run both steps in one command (recommended)
15+
just prompt
16+
17+
# Option 2: Run steps individually
18+
# Step 1: Rank tests by staleness
1319
just rank
20+
21+
# Step 2: Generate AI prompt for the highest-priority test
22+
just render
1423
```
1524

1625
## Output
1726

18-
The tool generates `tests_ranker/output/ranking.json`:
27+
### Test Ranking Output
28+
29+
The `just rank` command generates `tests_ranker/output/ranking.json`:
1930

2031
```json
2132
{
@@ -40,3 +51,9 @@ The tool generates `tests_ranker/output/ranking.json`:
4051
- `contract_commit_ts` - Unix timestamp of contract file's last commit
4152
- `staleness_days` - Calculated staleness (positive = contract newer)
4253
- `score` - Priority score (higher = more urgent)
54+
55+
### Prompt Renderer Output
56+
57+
The `just render` command generates a markdown file in `prompt-renderer/output/` with the name format `{ContractName}_prompt.md`. This file contains the AI prompt template with the highest-priority test and contract paths filled in, ready to be used for test maintenance analysis.
58+
59+
For example, if the top-ranked test is `ProtocolVersions.t.sol`, the output file will be `ProtocolVersions_prompt.md`.
Lines changed: 92 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,92 @@
1+
#!/usr/bin/env python3
2+
"""
3+
Script to render a prompt instance by replacing placeholders with actual test and contract paths
4+
from the first entry in the ranking JSON file.
5+
"""
6+
7+
import json
8+
from pathlib import Path
9+
10+
11+
def load_ranking_data():
12+
"""Load the ranking JSON file and return the first entry."""
13+
ranking_file = (
14+
Path(__file__).parent.parent / "tests_ranker" / "output" / "ranking.json"
15+
)
16+
17+
with open(ranking_file, "r") as f:
18+
data = json.load(f)
19+
20+
if not data.get("entries"):
21+
raise ValueError("No entries found in ranking.json")
22+
23+
return data["entries"][0]
24+
25+
26+
def load_prompt_template():
27+
"""Load the prompt template markdown file."""
28+
prompt_file = Path(__file__).parent.parent / "prompt" / "prompt.md"
29+
30+
with open(prompt_file, "r") as f:
31+
return f.read()
32+
33+
34+
def render_prompt(template, test_path, contract_path):
35+
"""Replace the placeholders in the template with actual paths."""
36+
return template.replace("{TEST_PATH}", test_path).replace(
37+
"{CONTRACT_PATH}", contract_path
38+
)
39+
40+
41+
def save_prompt_instance(rendered_prompt, test_path, contract_path):
42+
"""Save the rendered prompt to the output folder with a descriptive name."""
43+
output_dir = Path(__file__).parent / "output"
44+
output_dir.mkdir(exist_ok=True)
45+
46+
# Extract test name and remove .t suffix if present
47+
test_name = Path(test_path).stem
48+
if test_name.endswith(".t"):
49+
test_name = test_name[:-2]
50+
51+
# Create descriptive filename
52+
filename = f"{test_name}_prompt.md"
53+
output_file = output_dir / filename
54+
55+
with open(output_file, "w") as f:
56+
f.write(rendered_prompt)
57+
58+
return output_file
59+
60+
61+
def main():
62+
"""Main function to render and save the prompt instance."""
63+
try:
64+
# Load the first entry from ranking
65+
first_entry = load_ranking_data()
66+
test_path = first_entry["test_path"]
67+
contract_path = first_entry["contract_path"]
68+
69+
print(f"Using first ranking entry:")
70+
print(f" Test path: {test_path}")
71+
print(f" Contract path: {contract_path}")
72+
73+
# Load prompt template
74+
template = load_prompt_template()
75+
76+
# Render the prompt with actual paths
77+
rendered_prompt = render_prompt(template, test_path, contract_path)
78+
79+
# Save the rendered prompt
80+
output_file = save_prompt_instance(rendered_prompt, test_path, contract_path)
81+
82+
print(f"Prompt instance saved to: {output_file}")
83+
84+
except Exception as e:
85+
print(f"Error: {e}")
86+
return 1
87+
88+
return 0
89+
90+
91+
if __name__ == "__main__":
92+
exit(main())

0 commit comments

Comments
 (0)