Skip to content

Commit a86381d

Browse files
author
Rehan
committed
fix: improve error handling and code quality across multiple modules
- Fix improper error handling in count_tokens.py by catching specific ImportError and adding proper warning messages instead of silently passing - Reduce cyclomatic complexity in loop.py by extracting chunk processing logic into separate function for better maintainability - Fix equality vs identity confusion in async_core.py by using isinstance() instead of type() == comparisons for proper type checking - Fix exception swallowing in ai.py by moving return statement out of finally block to allow proper exception propagation - Replace generic exceptions with specific ones in contacts.py and display.py using ValueError, RuntimeError, and ConnectionError for better error information These fixes improve code maintainability, debugging capabilities, and follow Python best practices for error handling and type checking.
1 parent 4a9c909 commit a86381d

File tree

6 files changed

+48
-41
lines changed

6 files changed

+48
-41
lines changed

interpreter/computer_use/loop.py

Lines changed: 35 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,39 @@ class APIProvider(StrEnum):
110110
</IMPORTANT>"""
111111

112112

113+
async def _process_response_chunks(raw_response, response_content):
114+
"""Process response chunks and yield formatted output."""
115+
current_block = None
116+
117+
for chunk in raw_response:
118+
if isinstance(chunk, BetaRawContentBlockStartEvent):
119+
current_block = chunk.content_block
120+
elif isinstance(chunk, BetaRawContentBlockDeltaEvent):
121+
if chunk.delta.type == "text_delta":
122+
print(f"{chunk.delta.text}", end="", flush=True)
123+
yield {"type": "chunk", "chunk": chunk.delta.text}
124+
await asyncio.sleep(0)
125+
if current_block and current_block.type == "text":
126+
current_block.text += chunk.delta.text
127+
elif chunk.delta.type == "input_json_delta":
128+
print(f"{chunk.delta.partial_json}", end="", flush=True)
129+
if current_block and current_block.type == "tool_use":
130+
if not hasattr(current_block, "partial_json"):
131+
current_block.partial_json = ""
132+
current_block.partial_json += chunk.delta.partial_json
133+
elif isinstance(chunk, BetaRawContentBlockStopEvent):
134+
if current_block:
135+
if hasattr(current_block, "partial_json"):
136+
current_block.input = json.loads(current_block.partial_json)
137+
delattr(current_block, "partial_json")
138+
else:
139+
print("\n")
140+
yield {"type": "chunk", "chunk": "\n"}
141+
await asyncio.sleep(0)
142+
response_content.append(current_block)
143+
current_block = None
144+
145+
113146
async def sampling_loop(
114147
*,
115148
model: str,
@@ -162,37 +195,8 @@ async def sampling_loop(
162195
response_content = []
163196
current_block = None
164197

165-
for chunk in raw_response:
166-
if isinstance(chunk, BetaRawContentBlockStartEvent):
167-
current_block = chunk.content_block
168-
elif isinstance(chunk, BetaRawContentBlockDeltaEvent):
169-
if chunk.delta.type == "text_delta":
170-
print(f"{chunk.delta.text}", end="", flush=True)
171-
yield {"type": "chunk", "chunk": chunk.delta.text}
172-
await asyncio.sleep(0)
173-
if current_block and current_block.type == "text":
174-
current_block.text += chunk.delta.text
175-
elif chunk.delta.type == "input_json_delta":
176-
print(f"{chunk.delta.partial_json}", end="", flush=True)
177-
if current_block and current_block.type == "tool_use":
178-
if not hasattr(current_block, "partial_json"):
179-
current_block.partial_json = ""
180-
current_block.partial_json += chunk.delta.partial_json
181-
elif isinstance(chunk, BetaRawContentBlockStopEvent):
182-
if current_block:
183-
if hasattr(current_block, "partial_json"):
184-
# Finished a tool call
185-
# print()
186-
current_block.input = json.loads(current_block.partial_json)
187-
# yield {"type": "chunk", "chunk": current_block.input}
188-
delattr(current_block, "partial_json")
189-
else:
190-
# Finished a message
191-
print("\n")
192-
yield {"type": "chunk", "chunk": "\n"}
193-
await asyncio.sleep(0)
194-
response_content.append(current_block)
195-
current_block = None
198+
async for processed_chunk in _process_response_chunks(raw_response, response_content):
199+
yield processed_chunk
196200

197201
response = BetaMessage(
198202
id=str(uuid.uuid4()),

interpreter/core/async_core.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -856,7 +856,7 @@ async def chat_completion(request: ChatCompletionRequest):
856856
and last_message.content.lower().strip(".!?").strip() == "yes"
857857
):
858858
run_code = True
859-
elif type(last_message.content) == str:
859+
elif isinstance(last_message.content, str):
860860
async_interpreter.messages.append(
861861
{
862862
"role": "user",
@@ -865,7 +865,7 @@ async def chat_completion(request: ChatCompletionRequest):
865865
}
866866
)
867867
print(">", last_message.content)
868-
elif type(last_message.content) == list:
868+
elif isinstance(last_message.content, list):
869869
for content in last_message.content:
870870
if content["type"] == "text":
871871
async_interpreter.messages.append(

interpreter/core/computer/ai/ai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,10 +84,10 @@ def fast_llm(llm, system_message, user_message):
8484
llm.interpreter.system_message = system_message
8585
llm.interpreter.messages = []
8686
response = llm.interpreter.chat(user_message)
87+
return response[-1].get("content")
8788
finally:
8889
llm.interpreter.messages = old_messages
8990
llm.interpreter.system_message = old_system_message
90-
return response[-1].get("content")
9191

9292

9393
def query_map_chunks(chunks, llm, query):

interpreter/core/computer/contacts/contacts.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,10 @@ def get_phone_number(self, contact_name):
3333
if "Can’t get person" in stderr or not stout:
3434
names = self.get_full_names_from_first_name(contact_name)
3535
if "No contacts found" in names or not names:
36-
raise Exception("Contact not found")
36+
raise ValueError("Contact not found")
3737
else:
3838
# Language model friendly error message
39-
raise Exception(
39+
raise ValueError(
4040
f"A contact for '{contact_name}' was not found, perhaps one of these similar contacts might be what you are looking for? {names} \n Please try again and provide a more specific contact name."
4141
)
4242
else:

interpreter/core/computer/display/display.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -269,7 +269,7 @@ def find(self, description, screenshot=None):
269269
)
270270
return response.json()
271271
except Exception as e:
272-
raise Exception(
272+
raise ConnectionError(
273273
str(e)
274274
+ "\n\nIcon locating API not available, or we were unable to find the icon. Please try another method to find this icon."
275275
)
@@ -334,7 +334,7 @@ def get_text_as_list_of_lists(self, screenshot=None):
334334
try:
335335
return pytesseract_get_text(screenshot)
336336
except:
337-
raise Exception(
337+
raise RuntimeError(
338338
"Failed to find text locally.\n\nTo find text in order to use the mouse, please make sure you've installed `pytesseract` along with the Tesseract executable (see this Stack Overflow answer for help installing Tesseract: https://stackoverflow.com/questions/50951955/pytesseract-tesseractnotfound-error-tesseract-is-not-installed-or-its-not-i)."
339339
)
340340

interpreter/terminal_interface/utils/count_tokens.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
11
try:
22
import tiktoken
33
from litellm import cost_per_token
4-
except:
5-
# Non-essential feature
6-
pass
4+
except ImportError as e:
5+
# Non-essential feature - log import error for debugging
6+
import warnings
7+
warnings.warn(f"Token counting dependencies not available: {e}", UserWarning)
8+
tiktoken = None
9+
cost_per_token = None
710

811

912
def count_tokens(text="", model="gpt-4"):

0 commit comments

Comments
 (0)