1+ using ProjectVG . Infrastructure . Integrations . LLMClient ;
2+ using ProjectVG . Infrastructure . Integrations . LLMClient . Models ;
3+
4+ namespace ProjectVG . Tests . Application . TestUtilities
5+ {
6+ /// <summary>
7+ /// Mock implementation of ILLMClient for testing scenarios
8+ /// Provides configurable responses and error simulation
9+ /// </summary>
10+ public class MockLLMClient : ILLMClient
11+ {
12+ private readonly Queue < LLMResponse > _responseQueue ;
13+ private readonly Queue < Exception > _exceptionQueue ;
14+ private LLMResponse ? _defaultResponse ;
15+ private Exception ? _defaultException ;
16+
17+ public List < LLMRequest > SentRequests { get ; } = new ( ) ;
18+ public int CallCount { get ; private set ; }
19+
20+ public MockLLMClient ( )
21+ {
22+ _responseQueue = new Queue < LLMResponse > ( ) ;
23+ _exceptionQueue = new Queue < Exception > ( ) ;
24+ }
25+
26+ public MockLLMClient WithDefaultResponse ( LLMResponse response )
27+ {
28+ _defaultResponse = response ;
29+ return this ;
30+ }
31+
32+ public MockLLMClient WithQueuedResponse ( LLMResponse response )
33+ {
34+ _responseQueue . Enqueue ( response ) ;
35+ return this ;
36+ }
37+
38+ public MockLLMClient WithQueuedResponses ( params LLMResponse [ ] responses )
39+ {
40+ foreach ( var response in responses )
41+ {
42+ _responseQueue . Enqueue ( response ) ;
43+ }
44+ return this ;
45+ }
46+
47+ public MockLLMClient WithDefaultException ( Exception exception )
48+ {
49+ _defaultException = exception ;
50+ return this ;
51+ }
52+
53+ public MockLLMClient WithQueuedException ( Exception exception )
54+ {
55+ _exceptionQueue . Enqueue ( exception ) ;
56+ return this ;
57+ }
58+
59+ public MockLLMClient WithDelay ( TimeSpan delay )
60+ {
61+ DelayBeforeResponse = delay ;
62+ return this ;
63+ }
64+
65+ public TimeSpan DelayBeforeResponse { get ; set ; } = TimeSpan . Zero ;
66+
67+ public async Task < LLMResponse > SendRequestAsync ( LLMRequest request )
68+ {
69+ CallCount ++ ;
70+ SentRequests . Add ( request ) ;
71+
72+ if ( DelayBeforeResponse > TimeSpan . Zero )
73+ {
74+ await Task . Delay ( DelayBeforeResponse ) ;
75+ }
76+
77+ // Check for queued exceptions first
78+ if ( _exceptionQueue . Count > 0 )
79+ {
80+ throw _exceptionQueue . Dequeue ( ) ;
81+ }
82+
83+ // Check for default exception
84+ if ( _defaultException != null && _responseQueue . Count == 0 )
85+ {
86+ throw _defaultException ;
87+ }
88+
89+ // Return queued response if available
90+ if ( _responseQueue . Count > 0 )
91+ {
92+ return _responseQueue . Dequeue ( ) ;
93+ }
94+
95+ // Return default response
96+ if ( _defaultResponse != null )
97+ {
98+ return _defaultResponse ;
99+ }
100+
101+ // Fallback response
102+ return new LLMResponse
103+ {
104+ OutputText = "Mock response" ,
105+ InputTokens = 10 ,
106+ OutputTokens = 5 ,
107+ TotalTokens = 15
108+ } ;
109+ }
110+
111+ public async Task < LLMResponse > CreateTextResponseAsync (
112+ string systemMessage ,
113+ string userMessage ,
114+ string ? instructions = "" ,
115+ List < string > ? conversationHistory = null ,
116+ string ? model = "gpt-4o-mini" ,
117+ int ? maxTokens = 1000 ,
118+ float ? temperature = 0.7f )
119+ {
120+ var request = new LLMRequest
121+ {
122+ RequestId = Guid . NewGuid ( ) . ToString ( ) ,
123+ SystemPrompt = systemMessage ,
124+ UserPrompt = userMessage ,
125+ Instructions = instructions ?? "" ,
126+ ConversationHistory = conversationHistory ? . Select ( msg => new History { Role = "user" , Content = msg } ) . ToList ( ) ?? new List < History > ( ) ,
127+ Model = model ?? "gpt-4o-mini" ,
128+ MaxTokens = maxTokens ?? 1000 ,
129+ Temperature = temperature ?? 0.7f ,
130+ OpenAiApiKey = "" ,
131+ UseUserApiKey = false
132+ } ;
133+
134+ return await SendRequestAsync ( request ) ;
135+ }
136+
137+ public void Reset ( )
138+ {
139+ _responseQueue . Clear ( ) ;
140+ _exceptionQueue . Clear ( ) ;
141+ SentRequests . Clear ( ) ;
142+ CallCount = 0 ;
143+ _defaultResponse = null ;
144+ _defaultException = null ;
145+ DelayBeforeResponse = TimeSpan . Zero ;
146+ }
147+
148+ public LLMRequest ? GetLastRequest ( )
149+ {
150+ return SentRequests . LastOrDefault ( ) ;
151+ }
152+
153+ public LLMRequest ? GetRequest ( int index )
154+ {
155+ return index < SentRequests . Count ? SentRequests [ index ] : null ;
156+ }
157+ }
158+
159+ /// <summary>
160+ /// Builder for creating common LLM response scenarios
161+ /// </summary>
162+ public static class LLMResponseBuilder
163+ {
164+ public static LLMResponse Success ( string outputText = "Mock success response" , int inputTokens = 50 , int outputTokens = 25 )
165+ {
166+ return new LLMResponse
167+ {
168+ OutputText = outputText ,
169+ InputTokens = inputTokens ,
170+ OutputTokens = outputTokens ,
171+ TotalTokens = inputTokens + outputTokens
172+ } ;
173+ }
174+
175+ public static LLMResponse LargeResponse ( int approximateTokens = 2000 )
176+ {
177+ var text = new string ( 'a' , approximateTokens * 4 ) ; // Approximate 4 chars per token
178+ return new LLMResponse
179+ {
180+ OutputText = text ,
181+ InputTokens = 100 ,
182+ OutputTokens = approximateTokens ,
183+ TotalTokens = 100 + approximateTokens
184+ } ;
185+ }
186+
187+ public static LLMResponse EmptyResponse ( )
188+ {
189+ return new LLMResponse
190+ {
191+ OutputText = "" ,
192+ InputTokens = 10 ,
193+ OutputTokens = 0 ,
194+ TotalTokens = 10
195+ } ;
196+ }
197+
198+ public static LLMResponse HighCostResponse ( )
199+ {
200+ return new LLMResponse
201+ {
202+ OutputText = "This is an expensive response with many tokens used for processing complex requests." ,
203+ InputTokens = 1500 ,
204+ OutputTokens = 800 ,
205+ TotalTokens = 2300
206+ } ;
207+ }
208+
209+ public static LLMResponse ChatResponse ( string message )
210+ {
211+ return new LLMResponse
212+ {
213+ OutputText = message ,
214+ InputTokens = message . Length / 4 , // Rough estimation
215+ OutputTokens = message . Length / 4 ,
216+ TotalTokens = message . Length / 2
217+ } ;
218+ }
219+ }
220+
221+ /// <summary>
222+ /// Common LLM service exceptions for testing
223+ /// </summary>
224+ public static class LLMExceptions
225+ {
226+ public static HttpRequestException ServiceUnavailable ( )
227+ {
228+ return new HttpRequestException ( "LLM service is temporarily unavailable" ) ;
229+ }
230+
231+ public static HttpRequestException RateLimited ( )
232+ {
233+ return new HttpRequestException ( "Rate limit exceeded" ) ;
234+ }
235+
236+ public static TimeoutException RequestTimeout ( )
237+ {
238+ return new TimeoutException ( "LLM request timed out" ) ;
239+ }
240+
241+ public static InvalidOperationException InvalidRequest ( )
242+ {
243+ return new InvalidOperationException ( "Invalid request parameters" ) ;
244+ }
245+
246+ public static ArgumentException InvalidModel ( string model )
247+ {
248+ return new ArgumentException ( $ "Unsupported model: { model } ") ;
249+ }
250+ }
251+ }
0 commit comments