Skip to content

Commit fb645f5

Browse files
authored
Fix logging callback (#77)
1 parent d702c56 commit fb645f5

File tree

4 files changed

+11
-8
lines changed

4 files changed

+11
-8
lines changed

dailalib/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
__version__ = "3.15.4"
1+
__version__ = "3.15.5"
22

33
import os
44
# stop LiteLLM from querying at all to the remote server

dailalib/api/ai_api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,9 @@ def _requires_function(*args, ai_api: "AIAPI" = None, **kwargs):
118118

119119
return _requires_function
120120

121-
def on_query(self, query_name, model, prompt_style, function, decompilation, **kwargs):
121+
def on_query(self, query_name, model, prompt_style, function, decompilation, response, **kwargs):
122122
for func in self.query_callbacks:
123123
t = threading.Thread(
124-
target=func, args=(query_name, model, prompt_style, function, decompilation), kwargs=kwargs
124+
target=func, args=(query_name, model, prompt_style, function, decompilation, response), kwargs=kwargs
125125
)
126126
t.start()

dailalib/api/litellm/prompts/prompt.py

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -109,13 +109,13 @@ def _query_model(ai_api=self.ai_api, function=function, dec_text=dec_text, **_kw
109109
total_time = end_time - start_time
110110

111111
# callback to handlers of post-query
112-
ai_api.on_query(
113-
self.name, self.ai_api.model, self.ai_api.prompt_style, function, dec_text, total_time=total_time, cost=cost
114-
)
112+
callback_args = [self.name, self.ai_api.model, self.ai_api.prompt_style, function, dec_text, response]
113+
callback_kwargs = {"total_time": total_time, "cost": cost, "success": False}
115114

116115
default_response = {} if self._json_response else ""
117116
if not response:
118117
ai_api.warning(f"Response received from AI was empty! AI failed to answer.")
118+
ai_api.on_query(*callback_args, **callback_kwargs)
119119
return default_response
120120

121121
# changes response type to a dict
@@ -126,6 +126,7 @@ def _query_model(ai_api=self.ai_api, function=function, dec_text=dec_text, **_kw
126126

127127
json_matches = JSON_REGEX.findall(response)
128128
if not json_matches:
129+
ai_api.on_query(*callback_args, **callback_kwargs)
129130
return default_response
130131

131132
json_data = json_matches[-1]
@@ -148,6 +149,8 @@ def _query_model(ai_api=self.ai_api, function=function, dec_text=dec_text, **_kw
148149
log_str += f" AI likely failed to answer coherently."
149150
ai_api.info(log_str)
150151

152+
callback_kwargs["success"] = True
153+
ai_api.on_query(*callback_args, **callback_kwargs)
151154
if ai_api.has_decompiler_gui and response:
152155
ai_api.info("Updating the decompiler with the AI response...")
153156
self._gui_result_callback(response, function, ai_api, context=context)

dailalib/llm_chat/llm_chat_ui.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ def send_message(self, add_text=True, role="user"):
137137
if self.ai_api:
138138
send_callback = self.ai_api.chat_event_callbacks.get("send", None)
139139
if send_callback:
140-
send_callback(user_text)
140+
send_callback(user_text, model=self.model)
141141

142142
# Display user message
143143
if add_text:
@@ -164,7 +164,7 @@ def receive_message(self, assistant_message):
164164
if self.ai_api:
165165
recv_callback = self.ai_api.chat_event_callbacks.get("receive", None)
166166
if recv_callback:
167-
recv_callback(assistant_message)
167+
recv_callback(assistant_message, model=self.model)
168168

169169
# Append to chat history
170170
self.chat_history.append({"role": "user", "content": assistant_message})

0 commit comments

Comments
 (0)