Skip to content
This repository was archived by the owner on Jul 26, 2025. It is now read-only.

Commit b1efcf9

Browse files
authored
Merge pull request #350 from mraniki/dev
💄 improve history export, added import capability
2 parents d6475a5 + a5b50d0 commit b1efcf9

File tree

4 files changed

+43
-8
lines changed

4 files changed

+43
-8
lines changed

myllm/default_settings.toml

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ llm_model= "gpt-4" # model to use e.g. gpt-3.5-turbo, gpt-4, gpt-4-32k
2424
llm_provider = "g4f.Provider.Liaobots" # only for g4f. Refer to https://github.com/xtekky/gpt4free
2525
llm_provider_key = "" # only for bard and openai to pass either the cookie or the api key
2626
max_memory = 100 # Conversation history size
27+
load_history = false # load conversation history via a json file
2728
timeout = 5 # time lag to wait ai response
2829
llm_prefix = "🐻" # prefix use to filter the AI response
2930
llm_template = """
@@ -38,7 +39,8 @@ Be courteuous, simple and direct omitting any form of greeting or salutation.
3839
# llm_provider = ""
3940
# llm_provider_key = "DEADBE4F"
4041
# llm_base_url = "http://localhost:8080"
41-
# max_memory = 100
42+
# max_memory = 100
43+
# load_history = false
4244
# timeout = 5
4345
# llm_prefix = ""
4446
# llm_template = """
@@ -53,7 +55,8 @@ Be courteuous, simple and direct omitting any form of greeting or salutation.
5355
# llm_provider = ""
5456
# llm_provider_key = "DEADBE4F"
5557
# max_memory = 100
56-
# timeout = 5
58+
# timeout = 5
59+
# load_history = false
5760
# llm_prefix = ""
5861
# llm_template = """
5962
# You are a friendly AI, helping me with
@@ -66,7 +69,8 @@ Be courteuous, simple and direct omitting any form of greeting or salutation.
6669
# llm_model= ""
6770
# llm_provider = "g4f.Provider.Llama2"
6871
# llm_provider_key = ""
69-
# max_memory = 10
72+
# max_memory = 10
73+
# load_history = false
7074
# timeout = 1
7175
# llm_prefix = ""
7276
# llm_template = """
@@ -82,6 +86,7 @@ Be courteuous, simple and direct omitting any form of greeting or salutation.
8286
# llm_provider = "g4f.Provider.Bing"
8387
# llm_provider_key = ""
8488
# max_memory = 10
89+
# load_history = false
8590
# timeout = 2
8691
# llm_prefix = ""
8792
# llm_template = """
@@ -97,6 +102,7 @@ Be courteuous, simple and direct omitting any form of greeting or salutation.
97102
# llm_provider = ""
98103
# llm_provider_key = { __Secure-1PAPISID = "", __Secure-1PSID = "", __Secure-1PSIDCC = "", __Secure-1PSIDTS = "" }
99104
# max_memory = 100 # Conversation history size
105+
# load_history = false
100106
# timeout = 5 # time lag to wait ai response
101107
# llm_prefix = ""
102108
# llm_template = """

myllm/main.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ def __init__(self):
5858
llm_provider_key=_config.get("llm_provider_key"),
5959
llm_base_url=_config.get("llm_base_url") or None,
6060
max_memory=_config.get("max_memory") or 5,
61+
load_history=_config.get("load_history") or False,
6162
timeout=_config.get("timeout") or 10,
6263
llm_prefix=_config.get("llm_prefix") or "",
6364
llm_template=_config.get("llm_template")

myllm/provider/client.py

Lines changed: 32 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import json
2+
import os
23

34

45
class AIClient:
@@ -15,6 +16,7 @@ class AIClient:
1516
chat(self, prompt)
1617
clear_chat_history(self)
1718
export_chat_history(self)
19+
import_chat_history(self)
1820
1921
"""
2022

@@ -28,6 +30,8 @@ def __init__(
2830
llm_provider_key=None,
2931
llm_base_url=None,
3032
max_memory=None,
33+
load_history=False,
34+
history_filename="",
3135
timeout=None,
3236
llm_prefix=None,
3337
llm_template=None,
@@ -47,6 +51,8 @@ def __init__(
4751
self.llm_base_url = llm_base_url
4852
self.llm_prefix = llm_prefix
4953
self.max_memory = max_memory
54+
self.load_history = load_history
55+
self.history_filename = history_filename or f"history-{self.name}.json"
5056
self.timeout = timeout
5157
self.conversation = Conversation(
5258
max_memory=max_memory, llm_template=llm_template
@@ -74,7 +80,13 @@ async def export_chat_history(self):
7480
"""
7581
Clears the chat history
7682
"""
77-
self.conversation.export_messages()
83+
self.conversation.export_messages(self.history_filename)
84+
85+
async def import_chat_history(self):
86+
"""
87+
Import chat history
88+
"""
89+
self.conversation.import_messages(self.history_filename)
7890

7991

8092
class Conversation:
@@ -123,15 +135,31 @@ def get_messages_as_string(self, separator="\n"):
123135
)
124136
return messages_str
125137

126-
def export_messages(self):
138+
def export_messages(self, filename):
127139
"""
128140
Export messages to a JSON file.
129141
130142
Parameters:
131-
self: the instance of the class
143+
filename (str): the name of the file
132144
133145
Returns:
134146
None
135147
"""
136-
with open("history.json", "w") as f:
148+
with open(filename, "w") as f:
137149
json.dump(self.messages, f, indent=4)
150+
151+
def import_messages(self, filename):
152+
"""
153+
Import messages from a JSON file
154+
155+
Parameters:
156+
filename (str): the name of the file
157+
158+
Returns:
159+
None
160+
"""
161+
if not os.path.exists(filename):
162+
return
163+
if self.load_history:
164+
with open(filename, "r") as f:
165+
self.messages = json.load(f)

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ loguru = ">=0.6.0"
2929
httpx = ">=0.24.1"
3030
js2py = "^0.74"
3131
PyExecJS2="1.6.1"
32-
g4f = "0.2.1.5"
32+
g4f = "0.2.1.6"
3333
curl_cffi = "0.5.10"
3434
Brotli = "1.1.0"
3535
openai = "1.12.0"

0 commit comments

Comments
 (0)