9696 'httpcore' ).setLevel (logging .WARNING )
9797 logging .getLogger (
9898 'markdown_it.rules_block' ).setLevel (logging .WARNING )
99+ logging .getLogger (
100+ 'comtypes' ).setLevel (logging .WARNING )
99101 else :
100102 logging .basicConfig (level = level , format = LOGGING_FORMAT )
101103else :
@@ -152,7 +154,7 @@ async def follow_conversation(
152154 Returns:
153155 The conversation memory.
154156 """
155- logging .info ('Starting conversation with user input from line 151 : %s' , user_text )
157+ logging .info ('Starting conversation with user input from line 157 : %s' , user_text )
156158
157159 ind = min (mem_size , len (memory ))
158160 if ind == 0 :
@@ -167,12 +169,12 @@ async def follow_conversation(
167169 ):
168170 ind -= 1
169171 memory .pop (0 ) # Removes the oldest messages if the limit is exceeded
170- logging .debug ('Line 166 Removed oldest message due to context limit' )
172+ logging .debug ('Line 172 Removed oldest message due to context limit' )
171173
172174 response = await main_client .chat .completions .create (
173175 model = model , messages = memory [- ind :]
174176 )
175- logging .info ('Line 171 Received response from chat completion' )
177+ logging .info ('Line 177 Received response from chat completion' )
176178
177179 # Checks if the response has the expected structure and content
178180 if (
@@ -182,7 +184,7 @@ async def follow_conversation(
182184 ):
183185 tr = response .choices [0 ].message .content
184186 memory .append ({"role" : "assistant" , "content" : tr })
185- logging .info ('Line 181 Added assistant response to memory: %s' , tr )
187+ logging .info ('Line 187 Added assistant response to memory: %s' , tr )
186188 else :
187189 # Handles the case where the expected content is not available
188190 memory .append (
@@ -191,7 +193,7 @@ async def follow_conversation(
191193 "content" : "I'm not sure how to respond to that."
192194 }
193195 )
194- logging .warning ('Line 190 Expected content not available in response' )
196+ logging .warning ('Line 196 Expected content not available in response' )
195197
196198 return memory
197199
@@ -221,7 +223,7 @@ async def run_conversation(
221223 The final response from the model.
222224 """
223225 logging .info (
224- 'Starting conversation with user input line 219 : %s' ,
226+ 'Starting conversation with user input line 225 : %s' ,
225227 original_user_input
226228 )
227229
@@ -235,7 +237,7 @@ async def run_conversation(
235237
236238 while len (json .dumps (memory )) > 128000 :
237239 memory .pop (0 )
238- logging .debug ('Line 231 removed oldest message due to context limit' )
240+ logging .debug ('Line 240 removed oldest message due to context limit' )
239241
240242 response = await main_client .chat .completions .create (
241243 model = openai_defaults ["model" ],
@@ -248,7 +250,7 @@ async def run_conversation(
248250 frequency_penalty = openai_defaults ["frequency_penalty" ],
249251 presence_penalty = openai_defaults ["presence_penalty" ],
250252 )
251- logging .info ('Line 244 received response from chat completion' )
253+ logging .info ('Line 253 received response from chat completion' )
252254
253255 response_message = response .choices [0 ].message
254256 tool_calls = (
@@ -264,7 +266,7 @@ async def run_conversation(
264266 }
265267 )
266268 logging .info (
267- 'Line 259 added assistant response to memory: %s' ,
269+ 'Line 268 added assistant response to memory: %s' ,
268270 response_message .content
269271 )
270272
@@ -277,7 +279,7 @@ async def run_conversation(
277279
278280 if function_name not in available_functions :
279281 logging .warning (
280- 'Line 269 function %s is not available' ,
282+ 'Line 281 function %s is not available' ,
281283 function_name
282284 )
283285 continue
@@ -286,7 +288,7 @@ async def run_conversation(
286288 function_args = json .loads (tool_call .function .arguments )
287289
288290 logging .info (
289- "Line 276 calling function: %s args: %s" ,
291+ "Line 290 calling function: %s args: %s" ,
290292 function_name ,
291293 function_args ,
292294 )
@@ -296,7 +298,7 @@ async def run_conversation(
296298 else :
297299 function_response = function_to_call (** function_args )
298300 logging .info (
299- "Line 285 function %s returned: %s" ,
301+ "Line 300 function %s returned: %s" ,
300302 function_name ,
301303 function_response ,
302304 )
@@ -343,7 +345,7 @@ async def run_conversation(
343345 frequency_penalty = openai_defaults ["frequency_penalty" ],
344346 presence_penalty = openai_defaults ["presence_penalty" ],
345347 )
346- logging .info ('Line 333 received second response from chat completion' )
348+ logging .info ('Line 348 received second response from chat completion' )
347349 return second_response , memory
348350 else :
349351 return response , memory
@@ -380,7 +382,7 @@ async def main():
380382 "ask_chat_gpt_4_0613_asynchronous" : ask_chat_gpt_4_0613_asynchronous ,
381383 # Add more core functions here
382384 }
383- logging .info ('Initialized available functions line 372 ' )
385+ logging .info ('Initialized available functions line 385 ' )
384386
385387 # Define the available core tools
386388 tools = [
@@ -492,26 +494,26 @@ async def main():
492494 },
493495 },
494496 ]
495- logging .info ('Defined available core tools line 484 ' )
497+ logging .info ('Defined available core tools line 497 ' )
496498
497499 # Use the load_plugins_and_get_tools function to conditionally add tools
498500 available_functions , tools = await enable_plugins (
499501 available_functions ,
500502 tools
501503 )
502- logging .info ('Enabled plugins line 491 ' )
504+ logging .info ('Enabled plugins line 504 ' )
503505
504506 # Initialize the conversation memory
505507 memory = []
506- logging .info ('Initialized conversation memory line 495 ' )
508+ logging .info ('Initialized conversation memory line 508 ' )
507509
508510 # Main Loop
509511 while True :
510512 # Ask the user for input
511513 user_input = Prompt .ask (
512514 "\n How can I be of assistance? ([yellow]/tools[/yellow] or [bold yellow]quit[/bold yellow])" ,
513515 )
514- logging .info ('Line 503 received user input: %s' , user_input )
516+ logging .info ('Line 516 received user input: %s' , user_input )
515517
516518 # Check if the user wants to exit the program
517519 if user_input .lower () == "quit" :
@@ -537,13 +539,12 @@ async def main():
537539 },
538540 {"role" : "user" , "content" : f"{ user_input } " },
539541 ]
540- logging .info ('Line 529 prepared conversation messages' )
542+ logging .info ('Line 542 prepared conversation messages' )
541543
542544 # Start the spinner
543545 with live_spinner :
544546 # Start the spinner
545547 live_spinner .start ()
546- logging .info ('Started spinner' )
547548
548549 # Pass the user input and memory to the run_conversation function
549550 final_response , memory = await run_conversation (
@@ -559,23 +560,28 @@ async def main():
559560
560561 # Print the final response from the model or use TTS
561562 if final_response :
562- final_text = final_response .choices [0 ].message .content
563- logging .info ("Line 554 final response from model: %s', final_text" )
564- if use_tts :
565- # Use TTS to output the final response
566- console .print ("\n " + final_text , style = "green" )
567- tts_output (final_text )
563+ response_message = final_response .choices [0 ].message
564+ if response_message .content is not None :
565+ final_text = response_message .content
566+ if use_tts :
567+ # Use TTS to output the final response
568+ console .print ("\n " + final_text , style = "green" )
569+ tts_output (final_text )
570+ else :
571+ # Print the final response to the console
572+ console .print ("\n " + final_text , style = "green" )
568573 else :
569- # Print the final response to the console
570- console .print ("\n " + final_text , style = "green" )
574+ # Print an error message if the model did not return a response
575+ logging .warning ('Model did not return a response line 575' )
576+ console .print ("\n I'm not sure how to help with that." , style = "red" )
571577 else :
572578 # Print an error message if the model did not return a response
573- logging .warning ('Model did not return a response line 564 ' )
579+ logging .warning ('Model did not return a response line 579 ' )
574580 console .print ("\n I'm not sure how to help with that." , style = "red" )
575581
576582 # Remove tools from the tools list after processing
577583 tools [:] = [tool for tool in tools if not tool .get ("function" , {}).get ("name" , "" ).lower () in user_input .lower ()]
578- logging .info ('Removed used tools from the tools list line 569 ' )
584+ logging .info ('Removed used tools from the tools list line 584 ' )
579585
580586
581587# Run the main function
0 commit comments