133 lines
3.2 KiB
TypeScript
133 lines
3.2 KiB
TypeScript
import { assemblePrompt } from './prompts';
|
|
import { streamingOpenAIResponses } from './llm';
|
|
|
|
export interface IGenerateCodeParams {
|
|
generationType: string;
|
|
image: string;
|
|
text: string;
|
|
openAiApiKey: string;
|
|
openAiBaseURL: string;
|
|
isImageGenerationEnabled: true;
|
|
generatedCodeConfig: string;
|
|
resultImage?: string;
|
|
promptCode: string;
|
|
history: any[];
|
|
llm: string;
|
|
slug?: string;
|
|
anthropicApiKey: string;
|
|
anthropicBaseURL: string;
|
|
components: string[];
|
|
useCVModel: boolean;
|
|
}
|
|
|
|
const encoder = new TextEncoder();
|
|
export async function streamGenerateCode(
|
|
params: IGenerateCodeParams,
|
|
socket: { enqueue: (v: any) => any },
|
|
origin?: string,
|
|
) {
|
|
console.log('streamGenerateCode begin')
|
|
function noticeHost(data: Record<any, any>) {
|
|
if (socket.enqueue) {
|
|
socket.enqueue(encoder.encode(`${JSON.stringify(data)}\n`));
|
|
}
|
|
}
|
|
const generated_code_config = params['generatedCodeConfig'];
|
|
let prompt_messages;
|
|
const history = params['history'];
|
|
const initTemplateCode = history && params.slug && params.slug !== 'create' ? history.splice(0, 1)[0] : '';
|
|
try {
|
|
if (params['resultImage']) {
|
|
prompt_messages = await assemblePrompt(
|
|
params['image'],
|
|
params['text'],
|
|
generated_code_config,
|
|
params['promptCode'],
|
|
params.slug,
|
|
initTemplateCode,
|
|
params['components'],
|
|
params['resultImage'],
|
|
);
|
|
} else {
|
|
prompt_messages = await assemblePrompt(
|
|
params['image'],
|
|
params['text'],
|
|
generated_code_config,
|
|
params['promptCode'],
|
|
params.slug,
|
|
initTemplateCode,
|
|
params['components'],
|
|
);
|
|
}
|
|
} catch (e) {
|
|
console.log(e);
|
|
noticeHost({
|
|
type: 'error',
|
|
value: 'Prompt error!',
|
|
});
|
|
}
|
|
|
|
if (params['generationType'] === 'update') {
|
|
const history = params['history'];
|
|
if (params.slug && params.slug !== 'create') {
|
|
history.forEach((item, index) => {
|
|
prompt_messages.push({
|
|
role: index % 2 === 0 ? 'user' : 'assistant',
|
|
content: item,
|
|
});
|
|
});
|
|
} else {
|
|
history.forEach((item, index) => {
|
|
prompt_messages.push({
|
|
role: index % 2 === 0 ? 'assistant' : 'user',
|
|
content: item,
|
|
});
|
|
});
|
|
}
|
|
}
|
|
|
|
let completion;
|
|
try {
|
|
completion = await streamingOpenAIResponses(
|
|
prompt_messages,
|
|
(content: string, event?: string) => {
|
|
if (event === 'error') {
|
|
noticeHost({
|
|
type: 'error',
|
|
value: content,
|
|
});
|
|
} else {
|
|
noticeHost({
|
|
type: 'chunk',
|
|
value: content,
|
|
});
|
|
}
|
|
},
|
|
{
|
|
openAiApiKey: params.openAiApiKey,
|
|
openAiBaseURL: params.openAiBaseURL,
|
|
llm: params.llm,
|
|
anthropicApiKey: params.anthropicApiKey,
|
|
anthropicBaseURL: params.anthropicBaseURL
|
|
},
|
|
);
|
|
} catch (e) {
|
|
console.log(e);
|
|
noticeHost({
|
|
type: 'error',
|
|
value: 'openAI request error!',
|
|
});
|
|
}
|
|
const updated_html = completion;
|
|
noticeHost({
|
|
type: 'setCode',
|
|
value: updated_html,
|
|
});
|
|
noticeHost({
|
|
type: 'status',
|
|
value: 'Code generation complete.',
|
|
});
|
|
|
|
return updated_html;
|
|
}
|