Skip to content

Commit 319feef

Browse files
authored
Merge pull request #11 from ravilushqa/review_prompt_improvements
Review prompt improvements
2 parents 219cc7f + bc6a4e8 commit 319feef

File tree

3 files changed

+121
-42
lines changed

3 files changed

+121
-42
lines changed

cmd/review/main.go

+92-38
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,11 @@ package main
22

33
import (
44
"context"
5+
"encoding/json"
6+
"errors"
57
"fmt"
68
"os"
79
"os/signal"
8-
"strings"
910
"syscall"
1011

1112
"github.com/google/go-github/v51/github"
@@ -55,69 +56,122 @@ func run(ctx context.Context) error {
5556
return fmt.Errorf("error getting commits: %w", err)
5657
}
5758

58-
var OverallReviewCompletion string
59-
for _, file := range diff.Files {
60-
if file.Patch == nil || file.GetStatus() == "removed" || file.GetStatus() == "renamed" {
59+
var comments []*github.PullRequestComment
60+
61+
for i, file := range diff.Files {
62+
patch := file.GetPatch()
63+
fmt.Printf("processing file: %s %d/%d\n", file.GetFilename(), i+1, len(diff.Files))
64+
if patch == "" || file.GetStatus() == "removed" || file.GetStatus() == "renamed" {
6165
continue
6266
}
6367

64-
prompt := fmt.Sprintf(oAIClient.PromptReview, *file.Patch)
65-
66-
if len(prompt) > 4096 {
67-
prompt = fmt.Sprintf("%s...", prompt[:4093])
68+
if len(patch) > 3000 {
69+
fmt.Println("Patch is too long, truncating")
70+
patch = fmt.Sprintf("%s...", patch[:3000])
6871
}
69-
7072
completion, err := openAIClient.ChatCompletion(ctx, []openai.ChatCompletionMessage{
7173
{
7274
Role: openai.ChatMessageRoleUser,
73-
Content: prompt,
75+
Content: oAIClient.PromptReview,
76+
},
77+
{
78+
Role: openai.ChatMessageRoleUser,
79+
Content: patch,
7480
},
7581
})
82+
7683
if err != nil {
77-
return fmt.Errorf("error getting review: %w", err)
84+
return fmt.Errorf("error getting completion: %w", err)
85+
}
86+
87+
if opts.Test {
88+
fmt.Println("Completion:", completion)
7889
}
79-
OverallReviewCompletion += fmt.Sprintf("File: %s \nReview: %s \n\n", file.GetFilename(), completion)
8090

81-
position := len(strings.Split(*file.Patch, "\n")) - 1
91+
review, err := extractJSON(completion)
92+
if err != nil {
93+
fmt.Println("Error extracting JSON:", err)
94+
continue
95+
}
8296

83-
comment := &github.PullRequestComment{
84-
CommitID: diff.Commits[len(diff.Commits)-1].SHA,
85-
Path: file.Filename,
86-
Body: &completion,
87-
Position: &position,
97+
if review.Quality == Good {
98+
fmt.Println("Review is good")
99+
continue
100+
}
101+
for _, issue := range review.Issues {
102+
body := fmt.Sprintf("[%s] %s", issue.Type, issue.Description)
103+
comment := &github.PullRequestComment{
104+
CommitID: diff.Commits[len(diff.Commits)-1].SHA,
105+
Path: file.Filename,
106+
Body: &body,
107+
Position: &issue.Line,
108+
}
109+
comments = append(comments, comment)
88110
}
89111

90112
if opts.Test {
91113
continue
92114
}
93115

94-
if _, err := githubClient.CreatePullRequestComment(ctx, opts.Owner, opts.Repo, opts.PRNumber, comment); err != nil {
95-
return fmt.Errorf("error creating comment: %w", err)
116+
for i, c := range comments {
117+
fmt.Printf("creating comment: %s %d/%d\n", *c.Path, i+1, len(comments))
118+
if _, err := githubClient.CreatePullRequestComment(ctx, opts.Owner, opts.Repo, opts.PRNumber, c); err != nil {
119+
return fmt.Errorf("error creating comment: %w", err)
120+
}
96121
}
97122
}
123+
return nil
124+
}
98125

99-
overallCompletion, err := openAIClient.ChatCompletion(ctx, []openai.ChatCompletionMessage{
100-
{
101-
Role: openai.ChatMessageRoleUser,
102-
Content: fmt.Sprintf(oAIClient.PromptOverallReview, OverallReviewCompletion),
103-
},
104-
})
105-
if err != nil {
106-
return fmt.Errorf("error getting overall review: %w", err)
107-
}
126+
type Review struct {
127+
Quality Quality `json:"quality"`
128+
Issues []struct {
129+
Type string `json:"type"`
130+
Line int `json:"line"`
131+
Description string `json:"description"`
132+
} `json:"issues"`
133+
}
108134

109-
if opts.Test {
110-
fmt.Println(OverallReviewCompletion)
111-
fmt.Println("=====================================")
112-
fmt.Println(overallCompletion)
135+
type Quality string
113136

114-
return nil
137+
const (
138+
Good Quality = "good"
139+
Bad Quality = "bad"
140+
Neutral Quality = "neutral"
141+
)
142+
143+
func extractJSON(input string) (*Review, error) {
144+
var jsonObj *Review
145+
146+
// find the start and end positions of the JSON object
147+
start := 0
148+
end := len(input)
149+
for i, c := range input {
150+
if c == '{' {
151+
start = i
152+
break
153+
}
154+
if i == len(input)-1 {
155+
return nil, errors.New("invalid JSON object")
156+
}
115157
}
158+
for i := len(input) - 1; i >= 0; i-- {
159+
if input[i] == '}' {
160+
end = i + 1
161+
break
162+
}
116163

117-
comment := &github.PullRequestReviewRequest{Body: &overallCompletion}
118-
if _, err = githubClient.CreateReview(ctx, opts.Owner, opts.Repo, opts.PRNumber, comment); err != nil {
119-
return fmt.Errorf("error creating comment: %w", err)
164+
if i == 0 {
165+
return nil, errors.New("invalid JSON object")
166+
}
120167
}
121168

122-
return nil
169+
// extract the JSON object from the input
170+
jsonStr := input[start:end]
171+
err := json.Unmarshal([]byte(jsonStr), &jsonObj)
172+
if err != nil {
173+
return nil, errors.New("invalid JSON object")
174+
}
175+
176+
return jsonObj, nil
123177
}

openai/assets/review.txt

+22
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
You are CodeReviewGPT, an AI agent that specializes in generating code reviews for software projects using advanced natural language processing and machine learning techniques.
2+
Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.
3+
4+
GOALS:
5+
6+
1. Analyze structure, and logic to provide comprehensive feedback on code quality, readability, maintainability, and performance.
7+
2. Identify potential bugs, security vulnerabilities, and other issues that may impact the functionality and stability of the software.
8+
3. Possible quality values: good, bad, neutral. If quality is good, issues should be empty.
9+
4. Generate a json report in specific format to help developers improve their code. If context is not enough quality is good. You should only respond in JSON format as described below
10+
Response Format:
11+
```
12+
{
13+
"quality": "good",
14+
"issues": [
15+
{
16+
"type": "bug",
17+
"line": 10,
18+
"description": "You are missing a semicolon at the end of the line."
19+
}
20+
]
21+
}
22+
```

openai/openai.go

+7-4
Original file line numberDiff line numberDiff line change
@@ -2,16 +2,18 @@ package openai
22

33
import (
44
"context"
5+
_ "embed"
56
"fmt"
67

78
"github.com/sashabaranov/go-openai"
89
)
910

11+
//go:embed assets/review.txt
12+
var PromptReview string
13+
1014
const (
1115
PromptDescribeChanges = "Below is the code patch, Generate a GitHub pull request description based on the following comments without basic prefix\n%s\n"
1216
PromptOverallDescribe = "Below comments are generated by AI, Generate a GitHub pull request description based on the following comments without basic prefix in markdown format with ### Description and ### Changes blocks:\n%s\n"
13-
PromptReview = "Below is the code patch, please help me do a brief code review, Answer me in English, if any bug risk and improvement suggestion are welcome\n%s\n"
14-
PromptOverallReview = "Below comments are generated by AI, please help me do a brief code review, Answer me in English, if any bug risk and improvement suggestion are welcome\n%s\n"
1517
)
1618

1719
type Client struct {
@@ -28,8 +30,9 @@ func (o *Client) ChatCompletion(ctx context.Context, messages []openai.ChatCompl
2830
resp, err := o.client.CreateChatCompletion(
2931
ctx,
3032
openai.ChatCompletionRequest{
31-
Model: openai.GPT3Dot5Turbo,
32-
Messages: messages,
33+
Model: openai.GPT3Dot5Turbo,
34+
Messages: messages,
35+
Temperature: 0.1,
3336
},
3437
)
3538

0 commit comments

Comments
 (0)