forked from op7418/CodePilot
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtext-generator.ts
More file actions
121 lines (111 loc) · 4.08 KB
/
text-generator.ts
File metadata and controls
121 lines (111 loc) · 4.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import { streamText } from 'ai';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { createAnthropic } from '@ai-sdk/anthropic';
import { createOpenAI } from '@ai-sdk/openai';
import { createAmazonBedrock } from '@ai-sdk/amazon-bedrock';
import { createVertexAnthropic } from '@ai-sdk/google-vertex/anthropic';
import { resolveProvider as resolveProviderUnified, toAiSdkConfig } from './provider-resolver';
export interface StreamTextParams {
providerId: string;
model: string;
system: string;
prompt: string;
maxTokens?: number;
abortSignal?: AbortSignal;
}
/**
* Stream text from the user's current provider.
* Returns an async iterable of text chunks.
*
* Provider resolution is fully delegated to the unified resolver.
* No fallback logic here — the resolver's chain (explicit → session → global default → env)
* is the single source of truth, matching the Claude Code SDK path.
*
* NOTE: Do NOT expand model aliases (sonnet/opus/haiku) here.
* toAiSdkConfig() resolves model IDs through the provider's availableModels catalog,
* which uses the short alias as modelId. Expanding aliases would break that lookup
* for SDK proxy providers (Kimi, GLM, MiniMax, etc.) that expect short aliases.
*/
export async function* streamTextFromProvider(params: StreamTextParams): AsyncIterable<string> {
const resolved = resolveProviderUnified({ providerId: params.providerId });
if (!resolved.hasCredentials && !resolved.provider) {
throw new Error('No text generation provider available. Please configure a provider in Settings.');
}
const config = toAiSdkConfig(resolved, params.model);
// Inject process env if needed (bedrock/vertex)
for (const [k, v] of Object.entries(config.processEnvInjections)) {
process.env[k] = v;
}
// Build headers object for SDK clients (only if non-empty)
const hasHeaders = config.headers && Object.keys(config.headers).length > 0;
let model;
switch (config.sdkType) {
case 'anthropic': {
const anthropic = createAnthropic({
// apiKey and authToken are mutually exclusive in @ai-sdk/anthropic
...(config.authToken
? { authToken: config.authToken }
: { apiKey: config.apiKey }),
baseURL: config.baseUrl,
...(hasHeaders ? { headers: config.headers } : {}),
});
model = anthropic(config.modelId);
break;
}
case 'openai': {
const openai = createOpenAI({
apiKey: config.apiKey,
baseURL: config.baseUrl,
...(hasHeaders ? { headers: config.headers } : {}),
});
model = openai(config.modelId);
break;
}
case 'google': {
const google = createGoogleGenerativeAI({
apiKey: config.apiKey,
baseURL: config.baseUrl,
...(hasHeaders ? { headers: config.headers } : {}),
});
model = google(config.modelId);
break;
}
case 'bedrock': {
// Auth via process.env (AWS_REGION, AWS_ACCESS_KEY_ID, etc.) — already injected above
const bedrock = createAmazonBedrock({
...(hasHeaders ? { headers: config.headers } : {}),
});
model = bedrock(config.modelId);
break;
}
case 'vertex': {
// Anthropic-on-Vertex: auth via process.env (CLOUD_ML_REGION, GOOGLE_APPLICATION_CREDENTIALS, etc.)
const vertex = createVertexAnthropic({
...(hasHeaders ? { headers: config.headers } : {}),
});
model = vertex(config.modelId);
break;
}
}
const result = streamText({
model: model!,
system: params.system,
prompt: params.prompt,
maxOutputTokens: params.maxTokens || 4096,
abortSignal: params.abortSignal || AbortSignal.timeout(120_000),
});
for await (const chunk of result.textStream) {
yield chunk;
}
}
/**
* Generate complete text (non-streaming) from the user's current provider.
* Useful when you need the full response as a string.
*/
export async function generateTextFromProvider(params: StreamTextParams): Promise<string> {
const chunks: string[] = [];
for await (const chunk of streamTextFromProvider(params)) {
chunks.push(chunk);
}
return chunks.join('');
}