11import RateLimiter from '@/common/utils/RateLimiter' ;
2- import { BaseMessage } from '@langchain/core/messages' ;
32import { inject , injectable } from 'inversify' ;
43import DpTaskService from '@/backend/services/DpTaskService' ;
54import TYPES from '@/backend/ioc/types' ;
65import ChatService from '@/backend/services/ChatService' ;
7- import { ChatOpenAI } from '@langchain/openai' ;
8- import ClientProviderService from '@/backend/services/ClientProviderService' ;
96import { ZodObject } from 'zod' ;
10- import { storeGet } from '@/backend/store' ;
11-
12-
7+ import { CoreMessage , streamObject , streamText } from 'ai' ;
8+ import AiProviderService from '@/backend/services/AiProviderService' ;
139@injectable ( )
1410export default class ChatServiceImpl implements ChatService {
1511
1612 @inject ( TYPES . DpTaskService )
1713 private dpTaskService ! : DpTaskService ;
1814
19- @inject ( TYPES . OpenAiClientProvider )
20- private aiProviderService ! : ClientProviderService < ChatOpenAI > ;
15+ @inject ( TYPES . AiProviderService )
16+ private aiProviderService ! : AiProviderService ;
2117
2218
23- public async chat ( taskId : number , msgs : BaseMessage [ ] ) {
19+ public async chat ( taskId : number , msgs : CoreMessage [ ] ) {
2420 await RateLimiter . wait ( 'gpt' ) ;
25- const chat = this . aiProviderService . getClient ( ) ;
26- if ( chat ) {
21+ const model = this . aiProviderService . getModel ( ) ;
22+ if ( ! model ) {
2723 this . dpTaskService . fail ( taskId , {
2824 progress : 'OpenAI api key or endpoint is empty'
2925 } ) ;
26+ return ;
3027 }
3128 this . dpTaskService . process ( taskId , {
3229 progress : 'AI is thinking...'
3330 } ) ;
34- // eslint-disable-next-line @typescript-eslint/ban-ts-comment
35- // @ts -ignore
36- const resStream = await chat . stream ( msgs ) ;
37- const chunks = [ ] ;
31+
32+ const result = streamText ( {
33+ model : model ,
34+ messages : msgs
35+ } ) ;
3836 let res = '' ;
39- for await ( const chunk of resStream ) {
40- res += chunk . content ;
41- chunks . push ( chunk ) ;
37+ for await ( const chunk of result . textStream ) {
38+ res += chunk ;
4239 this . dpTaskService . process ( taskId , {
4340 progress : `AI typing, ${ res . length } characters` ,
4441 result : res
@@ -52,38 +49,29 @@ export default class ChatServiceImpl implements ChatService {
5249
5350 public async run ( taskId : number , resultSchema : ZodObject < any > , promptStr : string ) {
5451 await RateLimiter . wait ( 'gpt' ) ;
55- const chat = this . aiProviderService . getClient ( ) ;
56- if ( ! chat ) {
52+ const model = this . aiProviderService . getModel ( ) ;
53+ if ( ! model ) {
5754 this . dpTaskService . fail ( taskId , {
5855 progress : 'OpenAI api key or endpoint is empty'
5956 } ) ;
6057 return ;
6158 }
62- const structuredLlm = chat . withStructuredOutput ( resultSchema ) ;
63-
59+ const { partialObjectStream } = streamObject ( {
60+ model : model ,
61+ schema : resultSchema ,
62+ prompt : promptStr ,
63+ } ) ;
6464 this . dpTaskService . process ( taskId , {
6565 progress : 'AI is analyzing...'
6666 } ) ;
67-
68- const streaming = storeGet ( 'apiKeys.openAi.stream' ) === 'on' ;
69-
70- let resStr = null ;
71- if ( streaming ) {
72- const resStream = await structuredLlm . stream ( promptStr ) ;
73- for await ( const chunk of resStream ) {
74- resStr = JSON . stringify ( chunk ) ;
75- this . dpTaskService . process ( taskId , {
76- progress : 'AI is analyzing...' ,
77- result : resStr
78- } ) ;
79- }
80- } else {
81- const res = await structuredLlm . invoke ( promptStr ) ;
82- resStr = JSON . stringify ( res ) ;
67+ for await ( const partialObject of partialObjectStream ) {
68+ this . dpTaskService . process ( taskId , {
69+ progress : 'AI is analyzing...' ,
70+ result : JSON . stringify ( partialObject )
71+ } ) ;
8372 }
8473 this . dpTaskService . finish ( taskId , {
85- progress : 'AI has responded' ,
86- result : resStr
74+ progress : 'AI has responded'
8775 } ) ;
8876 }
8977}
0 commit comments