Skip to content

Commit 0f91732

Browse files
feat: profiles cli api (#9)
* Base profiles * Base profile commands * Install openai langchain * Update markdown command to require profiles, better mocking * Use newest granite models in examples * Reject path traversal
1 parent d2d741a commit 0f91732

File tree

15 files changed

+675
-73
lines changed

15 files changed

+675
-73
lines changed

package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
"@ibm-cloud/watsonx-ai": "^1.7.0",
1212
"@langchain/community": "^1.0.0",
1313
"@langchain/core": "^1.0.2",
14+
"@langchain/openai": "^1.0.0",
1415
"@langchain/textsplitters": "^1.0.0",
1516
"@oclif/core": "^4",
1617
"@oclif/plugin-help": "^6",

pnpm-lock.yaml

Lines changed: 3 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/commands/markdown.ts

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,8 @@
11
import {Args, Command, Flags} from '@oclif/core'
22

3-
import {createClient} from '../core/providers/watsonx.js'
43
import {MarkdownTranslator} from '../core/translators/markdown.js'
4+
import {createProviderFromProfile} from '../lib/profile/factory.js'
5+
import {loadProfile} from '../lib/profile/storage.js'
56

67
export default class Markdown extends Command {
78
static args = {
@@ -12,16 +13,20 @@ export default class Markdown extends Command {
1213
}
1314
static description = 'Translate markdown'
1415
static examples = [
15-
'<%= config.bin %> <%= command.id %> --from EN --to ES "Hello"',
16-
'<%= config.bin %> <%= command.id %> --from EN --to ES --stream "Hello"',
17-
'cat doc.md | <%= config.bin %> <%= command.id %> --from EN --to ES',
18-
'echo "# Hello" | <%= config.bin %> <%= command.id %> --from EN --to ES',
16+
'<%= config.bin %> <%= command.id %> --profile default-openai --from EN --to ES "Hello"',
17+
'<%= config.bin %> <%= command.id %> --profile default-openai --from EN --to ES --stream "Hello"',
18+
'cat doc.md | <%= config.bin %> <%= command.id %> --profile default-openai --from EN --to ES',
19+
'echo "# Hello" | <%= config.bin %> <%= command.id %> --profile default-openai --from EN --to ES',
1920
]
2021
static flags = {
2122
from: Flags.string({
2223
description: 'Source language',
2324
required: true,
2425
}),
26+
profile: Flags.string({
27+
description: 'Profile to use for translation',
28+
required: true,
29+
}),
2530
stream: Flags.boolean({
2631
default: false,
2732
description: 'Stream the translation output',
@@ -48,7 +53,8 @@ export default class Markdown extends Command {
4853
input = Buffer.concat(chunks).toString('utf8')
4954
}
5055

51-
const llm = createClient()
56+
const profile = loadProfile(flags.profile)
57+
const llm = createProviderFromProfile(profile)
5258
const translator = new MarkdownTranslator(llm)
5359

5460
if (flags.stream) {

src/commands/profiles/delete.ts

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
import {Args, Command} from '@oclif/core'
2+
3+
import {deleteProfile} from '../../lib/profile/storage.js'
4+
5+
export default class ProfilesDelete extends Command {
6+
static args = {
7+
name: Args.string({
8+
description: 'Profile name to delete',
9+
required: true,
10+
}),
11+
}
12+
static description = 'Delete a profile'
13+
static examples = ['<%= config.bin %> <%= command.id %> my-profile']
14+
15+
async run(): Promise<void> {
16+
const {args} = await this.parse(ProfilesDelete)
17+
18+
try {
19+
deleteProfile(args.name)
20+
this.log(`Profile "${args.name}" deleted successfully`)
21+
} catch (error) {
22+
if (error instanceof Error) {
23+
this.error(error.message)
24+
}
25+
26+
throw error
27+
}
28+
}
29+
}

src/commands/profiles/list.ts

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import {Command} from '@oclif/core'
2+
3+
import {listProfiles} from '../../lib/profile/storage.js'
4+
5+
export default class ProfilesList extends Command {
6+
static description = 'List all profiles'
7+
static examples = ['<%= config.bin %> <%= command.id %>']
8+
9+
async run(): Promise<void> {
10+
const profiles = listProfiles()
11+
12+
if (profiles.length === 0) {
13+
this.log('No profiles found')
14+
return
15+
}
16+
17+
for (const profile of profiles) {
18+
this.log(profile)
19+
}
20+
}
21+
}

src/commands/profiles/set.ts

Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
import {Args, Command, Flags} from '@oclif/core'
2+
3+
import type {OpenAIProfileConfig, WatsonxProfileConfig} from '../../lib/profile/types.js'
4+
5+
import {saveProfile} from '../../lib/profile/storage.js'
6+
7+
export default class ProfilesSet extends Command {
8+
static args = {
9+
name: Args.string({
10+
description: 'Profile name',
11+
required: true,
12+
}),
13+
}
14+
static description = 'Create or update a profile'
15+
static examples = [
16+
'<%= config.bin %> <%= command.id %> my-openai-profile --provider openai --api-key sk-... --model gpt-4o',
17+
'<%= config.bin %> <%= command.id %> my-watsonx-profile --provider watsonx --api-key ... --project-id ... --service-url https://... --model ibm/granite-4-h-small',
18+
]
19+
static flags = {
20+
'api-key': Flags.string({
21+
description: 'API key for the provider',
22+
required: true,
23+
}),
24+
model: Flags.string({
25+
description: 'Model to use',
26+
required: true,
27+
}),
28+
'project-id': Flags.string({
29+
description: 'Watsonx project ID (required for watsonx)',
30+
required: false,
31+
}),
32+
provider: Flags.string({
33+
description: 'LLM provider',
34+
options: ['openai', 'watsonx'],
35+
required: true,
36+
}),
37+
'service-url': Flags.string({
38+
description: 'Watsonx service URL (required for watsonx)',
39+
required: false,
40+
}),
41+
}
42+
43+
async run(): Promise<void> {
44+
const {args, flags} = await this.parse(ProfilesSet)
45+
46+
if (flags.provider === 'openai') {
47+
const config: OpenAIProfileConfig = {
48+
apiKey: flags['api-key'],
49+
model: flags.model,
50+
}
51+
52+
saveProfile({
53+
config,
54+
name: args.name,
55+
provider: 'openai',
56+
})
57+
58+
this.log(`Profile "${args.name}" saved successfully`)
59+
} else if (flags.provider === 'watsonx') {
60+
if (!flags['project-id']) {
61+
this.error('--project-id is required for watsonx provider')
62+
}
63+
64+
if (!flags['service-url']) {
65+
this.error('--service-url is required for watsonx provider')
66+
}
67+
68+
const config: WatsonxProfileConfig = {
69+
apiKey: flags['api-key'],
70+
model: flags.model,
71+
projectId: flags['project-id'],
72+
serviceUrl: flags['service-url'],
73+
}
74+
75+
saveProfile({
76+
config,
77+
name: args.name,
78+
provider: 'watsonx',
79+
})
80+
81+
this.log(`Profile "${args.name}" saved successfully`)
82+
}
83+
}
84+
}

src/core/providers/fake.ts

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import {FakeListChatModel} from '@langchain/core/utils/testing'
2+
3+
export interface FakeConfig {
4+
responses: string[]
5+
}
6+
7+
export function createClient(config: FakeConfig): FakeListChatModel {
8+
return new FakeListChatModel({
9+
responses: config.responses,
10+
})
11+
}

src/core/providers/openai.ts

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import {ChatOpenAI} from '@langchain/openai'
2+
3+
export interface OpenAIConfig {
4+
apiKey: string
5+
maxTokens?: number
6+
model: string
7+
temperature?: number
8+
}
9+
10+
export function createClient(config: OpenAIConfig): ChatOpenAI {
11+
return new ChatOpenAI({
12+
apiKey: config.apiKey,
13+
maxRetries: 3,
14+
maxTokens: config.maxTokens || 2000,
15+
model: config.model,
16+
temperature: config.temperature || 0.3,
17+
})
18+
}

src/core/providers/watsonx.ts

Lines changed: 8 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,49 +1,24 @@
11
import {ChatWatsonx} from '@langchain/community/chat_models/ibm'
22

3-
/**
4-
* Configuration for watsonx.ai client
5-
*/
63
export interface WatsonxConfig {
74
apiKey: string
85
maxNewTokens?: number
9-
model?: string
6+
model: string
107
projectId: string
118
serviceUrl: string
129
temperature?: number
1310
}
1411

15-
/**
16-
* Creates and returns a configured watsonx.ai chat model with IAM authentication
17-
*/
18-
export function createClient(config?: Partial<WatsonxConfig>): ChatWatsonx {
19-
// Get configuration from environment variables or provided config
20-
const serviceUrl = config?.serviceUrl || process.env.WATSONX_AI_SERVICE_URL
21-
const projectId = config?.projectId || process.env.WATSONX_AI_PROJECT_ID
22-
const apiKey = config?.apiKey || process.env.WATSONX_AI_APIKEY
23-
24-
// Validate required configuration
25-
if (!serviceUrl) {
26-
throw new Error('WATSONX_AI_SERVICE_URL is required')
27-
}
28-
29-
if (!projectId) {
30-
throw new Error('WATSONX_AI_PROJECT_ID is required')
31-
}
32-
33-
if (!apiKey) {
34-
throw new Error('WATSONX_AI_APIKEY is required')
35-
}
36-
37-
// Create and return the chat model with IAM authentication
12+
export function createClient(config: WatsonxConfig): ChatWatsonx {
3813
return new ChatWatsonx({
3914
maxRetries: 3,
40-
maxTokens: config?.maxNewTokens || 2000,
41-
model: config?.model || 'ibm/granite-4-h-small',
42-
projectId,
43-
serviceUrl,
44-
temperature: config?.temperature || 0.3,
15+
maxTokens: config.maxNewTokens || 2000,
16+
model: config.model,
17+
projectId: config.projectId,
18+
serviceUrl: config.serviceUrl,
19+
temperature: config.temperature || 0.3,
4520
version: '2024-05-31',
46-
watsonxAIApikey: apiKey,
21+
watsonxAIApikey: config.apiKey,
4722
watsonxAIAuthType: 'iam',
4823
})
4924
}

src/lib/profile/factory.ts

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import type {BaseChatModel} from '@langchain/core/language_models/chat_models'
2+
3+
import type {Profile} from './types.js'
4+
5+
import {createClient as createFakeClient} from '../../core/providers/fake.js'
6+
import {createClient as createOpenAIClient} from '../../core/providers/openai.js'
7+
import {createClient as createWatsonxClient} from '../../core/providers/watsonx.js'
8+
9+
export function createProviderFromProfile(profile: Profile): BaseChatModel {
10+
switch (profile.provider) {
11+
case 'fake': {
12+
return createFakeClient(profile.config)
13+
}
14+
15+
case 'openai': {
16+
return createOpenAIClient(profile.config)
17+
}
18+
19+
case 'watsonx': {
20+
return createWatsonxClient(profile.config)
21+
}
22+
}
23+
}

0 commit comments

Comments
 (0)