Skip to content

Commit 0ee119a

Browse files
chore: drop internal prompt
Also seed and stream_options.include_usage will be set in all requests
1 parent 160a733 commit 0ee119a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

56 files changed

+95
-145
lines changed

docs/docs/04-command-line-reference/gptscript_eval.md

-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@ gptscript eval [flags]
1414
```
1515
--chat Enable chat ($GPTSCRIPT_EVAL_CHAT)
1616
-h, --help help for eval
17-
--internal-prompt ($GPTSCRIPT_EVAL_INTERNAL_PROMPT)
1817
--json Output JSON ($GPTSCRIPT_EVAL_JSON)
1918
--max-tokens int Maximum number of tokens to output ($GPTSCRIPT_EVAL_MAX_TOKENS)
2019
--model string The model to use ($GPTSCRIPT_EVAL_MODEL)

pkg/cli/eval.go

+12-14
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,12 @@ import (
1515
)
1616

1717
type Eval struct {
18-
Tools []string `usage:"Tools available to call"`
19-
Chat bool `usage:"Enable chat"`
20-
MaxTokens int `usage:"Maximum number of tokens to output"`
21-
Model string `usage:"The model to use"`
22-
JSON bool `usage:"Output JSON"`
23-
Temperature string `usage:"Set the temperature, \"creativity\""`
24-
InternalPrompt *bool `Usage:"Set to false to disable the internal prompt"`
18+
Tools []string `usage:"Tools available to call"`
19+
Chat bool `usage:"Enable chat"`
20+
MaxTokens int `usage:"Maximum number of tokens to output"`
21+
Model string `usage:"The model to use"`
22+
JSON bool `usage:"Output JSON"`
23+
Temperature string `usage:"Set the temperature, \"creativity\""`
2524

2625
gptscript *GPTScript
2726
}
@@ -30,13 +29,12 @@ func (e *Eval) Run(cmd *cobra.Command, args []string) error {
3029
tool := types.Tool{
3130
ToolDef: types.ToolDef{
3231
Parameters: types.Parameters{
33-
Description: "inline script",
34-
Tools: e.Tools,
35-
MaxTokens: e.MaxTokens,
36-
ModelName: e.Model,
37-
JSONResponse: e.JSON,
38-
InternalPrompt: e.InternalPrompt,
39-
Chat: e.Chat,
32+
Description: "inline script",
33+
Tools: e.Tools,
34+
MaxTokens: e.MaxTokens,
35+
ModelName: e.Model,
36+
JSONResponse: e.JSON,
37+
Chat: e.Chat,
4038
},
4139
Instructions: strings.Join(args, " "),
4240
},

pkg/engine/cmd_test.go

+59
Large diffs are not rendered by default.

pkg/engine/engine.go

+6-11
Original file line numberDiff line numberDiff line change
@@ -287,17 +287,12 @@ func (e *Engine) Start(ctx Context, input string) (ret *Return, _ error) {
287287
}
288288

289289
completion := types.CompletionRequest{
290-
Model: tool.Parameters.ModelName,
291-
MaxTokens: tool.Parameters.MaxTokens,
292-
JSONResponse: tool.Parameters.JSONResponse,
293-
Cache: tool.Parameters.Cache,
294-
Chat: tool.Parameters.Chat,
295-
Temperature: tool.Parameters.Temperature,
296-
InternalSystemPrompt: tool.Parameters.InternalPrompt,
297-
}
298-
299-
if tool.Chat && completion.InternalSystemPrompt == nil {
300-
completion.InternalSystemPrompt = new(bool)
290+
Model: tool.Parameters.ModelName,
291+
MaxTokens: tool.Parameters.MaxTokens,
292+
JSONResponse: tool.Parameters.JSONResponse,
293+
Cache: tool.Parameters.Cache,
294+
Chat: tool.Parameters.Chat,
295+
Temperature: tool.Parameters.Temperature,
301296
}
302297

303298
var err error

pkg/gptscript/gptscript.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -110,8 +110,7 @@ func New(ctx context.Context, o ...Options) (*GPTScript, error) {
110110

111111
if opts.DefaultModelProvider == "" {
112112
oaiClient, err := openai.NewClient(ctx, credStore, opts.OpenAI, openai.Options{
113-
Cache: cacheClient,
114-
SetSeed: true,
113+
Cache: cacheClient,
115114
})
116115
if err != nil {
117116
return nil, err

pkg/loader/loader_test.go

-6
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ func TestHelloWorld(t *testing.T) {
7979
"toolSet": {
8080
"https://raw.githubusercontent.com/ibuildthecloud/test/bafe5a62174e8a0ea162277dcfe3a2ddb7eea928/example/bob.gpt:": {
8181
"modelName": "gpt-4o",
82-
"internalPrompt": null,
8382
"instructions": "Say hello world",
8483
"id": "https://raw.githubusercontent.com/ibuildthecloud/test/bafe5a62174e8a0ea162277dcfe3a2ddb7eea928/example/bob.gpt:",
8584
"localTools": {
@@ -93,7 +92,6 @@ func TestHelloWorld(t *testing.T) {
9392
},
9493
"https://raw.githubusercontent.com/ibuildthecloud/test/bafe5a62174e8a0ea162277dcfe3a2ddb7eea928/example/sub/tool.gpt:": {
9594
"modelName": "gpt-4o",
96-
"internalPrompt": null,
9795
"tools": [
9896
"../bob.gpt"
9997
],
@@ -129,7 +127,6 @@ func TestHelloWorld(t *testing.T) {
129127
"https://get.gptscript.ai/echo.gpt:": {
130128
"description": "Returns back the input of the script",
131129
"modelName": "gpt-4o",
132-
"internalPrompt": null,
133130
"arguments": {
134131
"properties": {
135132
"input": {
@@ -164,7 +161,6 @@ func TestDefault(t *testing.T) {
164161
"testdata/tool/tool.gpt:tool": {
165162
"name": "tool",
166163
"modelName": "gpt-4o",
167-
"internalPrompt": null,
168164
"instructions": "a tool",
169165
"id": "testdata/tool/tool.gpt:tool",
170166
"localTools": {
@@ -188,7 +184,6 @@ func TestDefault(t *testing.T) {
188184
"testdata/agent/agent.gpt:agent": {
189185
"name": "agent",
190186
"modelName": "gpt-4o",
191-
"internalPrompt": null,
192187
"instructions": "an agent",
193188
"id": "testdata/agent/agent.gpt:agent",
194189
"localTools": {
@@ -212,7 +207,6 @@ func TestDefault(t *testing.T) {
212207
"testdata/bothtoolagent/agent.gpt:agent": {
213208
"name": "agent",
214209
"modelName": "gpt-4o",
215-
"internalPrompt": null,
216210
"instructions": "an agent",
217211
"id": "testdata/bothtoolagent/agent.gpt:agent",
218212
"localTools": {

pkg/openai/client.go

+5-15
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,6 @@ type Client struct {
4444
cache *cache.Client
4545
invalidAuth bool
4646
cacheKeyBase string
47-
setSeed bool
4847
credStore credentials.CredentialStore
4948
}
5049

@@ -54,7 +53,6 @@ type Options struct {
5453
OrgID string `usage:"OpenAI organization ID" name:"openai-org-id" env:"OPENAI_ORG_ID"`
5554
DefaultModel string `usage:"Default LLM model to use" default:"gpt-4o"`
5655
ConfigFile string `usage:"Path to GPTScript config file" name:"config"`
57-
SetSeed bool `usage:"-"`
5856
CacheKey string `usage:"-"`
5957
Cache *cache.Client
6058
}
@@ -66,7 +64,6 @@ func Complete(opts ...Options) (result Options) {
6664
result.OrgID = types.FirstSet(opt.OrgID, result.OrgID)
6765
result.Cache = types.FirstSet(opt.Cache, result.Cache)
6866
result.DefaultModel = types.FirstSet(opt.DefaultModel, result.DefaultModel)
69-
result.SetSeed = types.FirstSet(opt.SetSeed, result.SetSeed)
7067
result.CacheKey = types.FirstSet(opt.CacheKey, result.CacheKey)
7168
}
7269

@@ -125,7 +122,6 @@ func NewClient(ctx context.Context, credStore credentials.CredentialStore, opts
125122
defaultModel: opt.DefaultModel,
126123
cacheKeyBase: cacheKeyBase,
127124
invalidAuth: opt.APIKey == "" && opt.BaseURL == "",
128-
setSeed: opt.SetSeed,
129125
credStore: credStore,
130126
}, nil
131127
}
@@ -227,16 +223,12 @@ func toToolCall(call types.CompletionToolCall) openai.ToolCall {
227223
}
228224
}
229225

230-
func toMessages(request types.CompletionRequest, compat bool) (result []openai.ChatCompletionMessage, err error) {
226+
func toMessages(request types.CompletionRequest) (result []openai.ChatCompletionMessage, err error) {
231227
var (
232228
systemPrompts []string
233229
msgs []types.CompletionMessage
234230
)
235231

236-
if !compat && (request.InternalSystemPrompt == nil || *request.InternalSystemPrompt) {
237-
systemPrompts = append(systemPrompts, system.InternalSystemPrompt)
238-
}
239-
240232
for _, message := range request.Messages {
241233
if message.Role == types.CompletionMessageRoleTypeSystem {
242234
systemPrompts = append(systemPrompts, message.Content[0].Text)
@@ -304,7 +296,7 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques
304296
messageRequest.Model = c.defaultModel
305297
}
306298

307-
msgs, err := toMessages(messageRequest, !c.setSeed)
299+
msgs, err := toMessages(messageRequest)
308300
if err != nil {
309301
return nil, err
310302
}
@@ -365,11 +357,9 @@ func (c *Client) Call(ctx context.Context, messageRequest types.CompletionReques
365357
}
366358

367359
var cacheResponse bool
368-
if c.setSeed {
369-
request.Seed = ptr(c.seed(request))
370-
request.StreamOptions = &openai.StreamOptions{
371-
IncludeUsage: true,
372-
}
360+
request.Seed = ptr(c.seed(request))
361+
request.StreamOptions = &openai.StreamOptions{
362+
IncludeUsage: true,
373363
}
374364
response, ok, err := c.fromCache(ctx, messageRequest, request)
375365
if err != nil {

pkg/parser/parser.go

+1-5
Original file line numberDiff line numberDiff line change
@@ -90,11 +90,7 @@ func isParam(line string, tool *types.Tool) (_ bool, err error) {
9090
case "description":
9191
tool.Parameters.Description = value
9292
case "internalprompt":
93-
v, err := toBool(value)
94-
if err != nil {
95-
return false, err
96-
}
97-
tool.Parameters.InternalPrompt = &v
93+
// deprecated and ignored
9894
case "chat":
9995
v, err := toBool(value)
10096
if err != nil {

pkg/system/prompt.go

-18
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ package system
22

33
import (
44
"encoding/json"
5-
"os"
65
"strings"
76

87
"github.com/getkin/kin-openapi/openapi3"
@@ -11,17 +10,6 @@ import (
1110
// Suffix is default suffix of gptscript files
1211
const Suffix = ".gpt"
1312

14-
// InternalSystemPrompt is added to all threads. Changing this is very dangerous as it has a
15-
// terrible global effect and changes the behavior of all scripts.
16-
var InternalSystemPrompt = `
17-
You are task oriented system.
18-
You receive input from a user, process the input from the given instructions, and then output the result.
19-
Your objective is to provide consistent and correct results.
20-
You do not need to explain the steps taken, only provide the result to the given instructions.
21-
You are referred to as a tool.
22-
You don't move to the next step until you have a result.
23-
`
24-
2513
// DefaultPromptParameter is used as the key in a json map to indication that we really wanted
2614
// to just send pure text but the interface required JSON (as that is the fundamental interface of tools in OpenAI)
2715
var DefaultPromptParameter = "defaultPromptParameter"
@@ -50,12 +38,6 @@ var DefaultChatSchema = openapi3.Schema{
5038
},
5139
}
5240

53-
func init() {
54-
if p := os.Getenv("GPTSCRIPT_INTERNAL_SYSTEM_PROMPT"); p != "" {
55-
InternalSystemPrompt = p
56-
}
57-
}
58-
5941
// IsDefaultPrompt Checks if the content is a json blob that has the defaultPromptParameter in it. If so
6042
// it will extract out the value and return it. If not it will return the original content as is and false.
6143
func IsDefaultPrompt(content string) (string, bool) {

pkg/tests/runner_test.go

-7
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ func TestAsterick(t *testing.T) {
3636
"testdata/TestAsterick/other.gpt:a": {
3737
"name": "a",
3838
"modelName": "gpt-4o",
39-
"internalPrompt": null,
4039
"instructions": "a",
4140
"id": "testdata/TestAsterick/other.gpt:a",
4241
"localTools": {
@@ -55,7 +54,6 @@ func TestAsterick(t *testing.T) {
5554
"testdata/TestAsterick/other.gpt:afoo": {
5655
"name": "afoo",
5756
"modelName": "gpt-4o",
58-
"internalPrompt": null,
5957
"instructions": "afoo",
6058
"id": "testdata/TestAsterick/other.gpt:afoo",
6159
"localTools": {
@@ -73,7 +71,6 @@ func TestAsterick(t *testing.T) {
7371
},
7472
"testdata/TestAsterick/test.gpt:": {
7573
"modelName": "gpt-4o",
76-
"internalPrompt": null,
7774
"tools": [
7875
"a* from ./other.gpt"
7976
],
@@ -396,7 +393,6 @@ func TestSubChat(t *testing.T) {
396393
"state": {
397394
"completion": {
398395
"model": "gpt-4o",
399-
"internalSystemPrompt": false,
400396
"messages": [
401397
{
402398
"role": "system",
@@ -521,7 +517,6 @@ func TestSubChat(t *testing.T) {
521517
"state": {
522518
"completion": {
523519
"model": "gpt-4o",
524-
"internalSystemPrompt": false,
525520
"messages": [
526521
{
527522
"role": "system",
@@ -598,7 +593,6 @@ func TestChat(t *testing.T) {
598593
"input": "Hello",
599594
"completion": {
600595
"model": "gpt-4o",
601-
"internalSystemPrompt": false,
602596
"messages": [
603597
{
604598
"role": "system",
@@ -650,7 +644,6 @@ func TestChat(t *testing.T) {
650644
"input": "Hello",
651645
"completion": {
652646
"model": "gpt-4o",
653-
"internalSystemPrompt": false,
654647
"messages": [
655648
{
656649
"role": "system",

pkg/tests/testdata/TestAgentOnly/call1.golden

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
`{
22
"model": "gpt-4o",
3-
"internalSystemPrompt": false,
43
"tools": [
54
{
65
"function": {

pkg/tests/testdata/TestAgentOnly/call2.golden

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
`{
22
"model": "gpt-4o",
3-
"internalSystemPrompt": false,
43
"tools": [
54
{
65
"function": {

pkg/tests/testdata/TestAgentOnly/step1.golden

-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88
"input": "Input 1",
99
"completion": {
1010
"model": "gpt-4o",
11-
"internalSystemPrompt": false,
1211
"tools": [
1312
{
1413
"function": {
@@ -92,7 +91,6 @@
9291
"input": "Agent 2 input",
9392
"completion": {
9493
"model": "gpt-4o",
95-
"internalSystemPrompt": false,
9694
"tools": [
9795
{
9896
"function": {

pkg/tests/testdata/TestAgents/call1.golden

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
`{
22
"model": "gpt-4o",
3-
"internalSystemPrompt": false,
43
"tools": [
54
{
65
"function": {

pkg/tests/testdata/TestAgents/call2.golden

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
`{
22
"model": "gpt-4o",
3-
"internalSystemPrompt": false,
43
"tools": [
54
{
65
"function": {

pkg/tests/testdata/TestAgents/call3.golden

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
`{
22
"model": "gpt-4o",
3-
"internalSystemPrompt": false,
43
"tools": [
54
{
65
"function": {

pkg/tests/testdata/TestAgents/call4.golden

-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
`{
22
"model": "gpt-4o",
3-
"internalSystemPrompt": false,
43
"messages": [
54
{
65
"role": "system",

0 commit comments

Comments
 (0)