Skip to content

Commit 3ff27c5

Browse files
committed
Fixed Llama Prompt Template to generate summary
1 parent c5ff3da commit 3ff27c5

File tree

6 files changed

+436
-12
lines changed

6 files changed

+436
-12
lines changed

chws.py

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
import json
2+
from datetime import datetime
3+
import ollama
4+
import streamlit as st
5+
import yaml
6+
7+
8+
with open('config.yaml', 'r') as f:
9+
config = yaml.safe_load(f)
10+
11+
session_dir = config["session_path"]
12+
13+
def save_chat_history(chat_history,session_key, session_model):
14+
file_name = f"{session_dir}/{session_key}"
15+
chws = get_summary(chat_history, session_model)
16+
with open(file_name, "w") as f:
17+
json.dump(chws, f)
18+
19+
def load_chat_history_json(session_name):
20+
filename = f"{session_dir}/{session_name}"
21+
with open(filename, "r") as f:
22+
json_data = json.load(f)
23+
return json_data
24+
25+
def get_timestamp():
26+
return datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
27+
28+
def create_new_chat():
29+
st.session_state["session_key"] = "new_session"
30+
del st.session_state["messages"]
31+
st.session_state["messages"] = []
32+
33+
def load_chat():
34+
for message in st.session_state["messages"]:
35+
with st.chat_message(message["role"]):
36+
st.markdown(message["content"])
37+
38+
def set_session_name(session):
39+
st.session_state.session_key = session
40+
del st.session_state["messages"]
41+
st.session_state["messages"] = load_chat_history_json(session)
42+
43+
def save_session(session_key):
44+
45+
if "messages" in st.session_state:
46+
47+
if st.session_state.session_key == "new_session":
48+
st.session_state.session_key = get_timestamp() + '.json'
49+
save_chat_history(st.session_state['messages'], st.session_state.session_key, st.session_state['model'])
50+
else:
51+
save_chat_history(st.session_state['messages'], st.session_state.session_key, st.session_state['model'])
52+
53+
54+
55+
56+
def get_summary(session_messages, model):
57+
print("Saving....")
58+
conversation_string = "\n".join(
59+
f"{message['role']}: {message['content']}" for message in session_messages
60+
)
61+
word_limit = max(10, len(conversation_string) // 4)
62+
63+
fpc = ['Knowly', 'I', 'Me', 'My', 'Mine', 'Myself']
64+
spc = ['You', 'Your', 'Yours', 'Yourself', 'Yourselves']
65+
66+
prompt = f"""**In the following conversation,**
67+
68+
Rewrite the summary but this time replace any references to Assistant or system with first person perspective (Helping first person pronouns: {fpc}). And replace any references to the user in second person perspective.
69+
70+
{conversation_string}
71+
"""
72+
73+
prompt_1 = f"""
74+
Summarize this conversation in a single paragraph, ensuring the summary is within {word_limit} words.
75+
76+
Start with "Sure here is the conversation summary."
77+
{prompt}
78+
"""
79+
80+
llama_prompt = f"""
81+
<s>[INST] <<SYS>>
82+
Summarize the following conversation in a single paragraph within {word_limit} words, starting with "Here is a summary of the conversation:"
83+
<</SYS>>
84+
85+
{conversation_string} [/INST]
86+
"""
87+
llama_prompt_2 = f"""
88+
<s>[INST] <<SYS>>
89+
**In the following conversation,**
90+
Rewrite the summary but this time replace any references to Assistant or system with first person perspective (Helping first person pronouns: {fpc}). And replace any references to the user in second person perspective.
91+
<</SYS>>
92+
93+
{llama_prompt} [/INST]
94+
"""
95+
if "gemma" in model:
96+
response = ollama.chat(model=model, messages=[
97+
{'role': 'system', 'content': prompt_1},
98+
{'role': 'user', 'content': conversation_string},
99+
])
100+
else:
101+
response = ollama.chat(model=model, messages=[
102+
{'role': 'system', 'content': llama_prompt},
103+
{'role': 'user', 'content': conversation_string},
104+
])
105+
106+
session_messages.append({"role": "assistant", "content": response})
107+
108+
return session_messages

config.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@ image_dir: "images"
1010
session_path: "sessions"
1111
pdf_path: "docs"
1212

13-
models: ["Llama2", "Gemma7B", "Llava", "TinyLlama"]
13+
models: ["Llama2", "Gemma", "Llava", "TinyLlama"]
1414
model_map: {
1515
"Llama2": "llama2-uncensored:latest",
16-
"Gemma7B": "gemma:latest",
16+
"Gemma": "gemma:2b",
1717
"Llava": "llava:latest",
1818
"TinyLlama": "tinyllama:latest"
1919
}

llm_response.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,4 +31,4 @@ def model_res_generator(rag:bool=False):
3131
if rag:
3232
st.session_state["messages"][-1]["content"] = prompt
3333
for chunk in stream:
34-
yield chunk["message"]["content"]
34+
yield chunk["message"]["content"]

test_get_summary.py

Lines changed: 121 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,121 @@
1+
import ollama
2+
import json
3+
4+
def get_gemma_summary(session_messages, model):
5+
6+
conversation_string = "\n".join(
7+
f"{message['role']}: {message['content']}" for message in session_messages
8+
)
9+
word_limit = max(10, len(conversation_string) // 4)
10+
11+
fpc = ['Knowly', 'I', 'Me', 'My', 'Mine', 'Myself']
12+
spc = ['You', 'Your', 'Yours', 'Yourself', 'Yourselves']
13+
14+
prompt = f"""**In the following conversation,**
15+
16+
Rewrite the summary but this time replace any references to Assistant or system with first person perspective (Helping first person pronouns: {fpc}). And replace any references to the user in second person perspective.
17+
18+
{conversation_string}
19+
"""
20+
21+
prompt_1 = f"""
22+
Summarize this conversation in a single paragraph, ensuring the summary is within {word_limit} words.
23+
24+
Start with "Sure here is the conversation summary."
25+
{prompt}
26+
"""
27+
28+
llama_prompt = f"""
29+
<s>[INST] <<SYS>>
30+
Summarize the following conversation in a single paragraph within {word_limit} words, starting with "Here is a summary of the conversation:"
31+
<</SYS>>
32+
33+
{conversation_string} [/INST]
34+
"""
35+
print("\n\ngenerating response...")
36+
if "gemma" in model:
37+
response = ollama.chat(model=model, messages=[
38+
{'role': 'system', 'content': prompt_1},
39+
{'role': 'user', 'content': conversation_string},
40+
])
41+
else:
42+
response = ollama.chat(model=model, messages=[
43+
{'role': 'system', 'content': llama_prompt},
44+
{'role': 'user', 'content': conversation_string},
45+
])
46+
47+
summary = response["message"]["content"]
48+
49+
return summary
50+
51+
def get_summary(session_messages, model):
52+
53+
conversation_string = "\n".join(
54+
f"{message['role']}: {message['content']}" for message in session_messages
55+
)
56+
word_limit = max(10, len(conversation_string) // 4)
57+
58+
fpc = ['Knowly', 'I', 'Me', 'My', 'Mine', 'Myself']
59+
spc = ['You', 'Your', 'Yours', 'Yourself', 'Yourselves']
60+
61+
prompt = f"""**In the following conversation,**
62+
63+
Rewrite the summary but this time replace any references to Assistant or system with first person perspective (Helping first person pronouns: {fpc}). And replace any references to the user in second person perspective.
64+
65+
{conversation_string}
66+
"""
67+
68+
prompt_1 = f"""
69+
Summarize this conversation in a single paragraph, ensuring the summary is within {word_limit} words.
70+
71+
Start with "Sure here is the conversation summary."
72+
{prompt}
73+
"""
74+
75+
llama_prompt = f"""
76+
<s>[INST] <<SYS>>
77+
Summarize the following conversation in a single paragraph within {word_limit} words, starting with "Here is a summary of the conversation:"
78+
<</SYS>>
79+
80+
{conversation_string} [/INST]
81+
"""
82+
llama_prompt_2 = f"""
83+
<s>[INST] <<SYS>>
84+
**In the following conversation,**
85+
Rewrite the summary but this time replace any references to Assistant or system with first person perspective (Helping first person pronouns: {fpc}). And replace any references to the user in second person perspective.
86+
<</SYS>>
87+
88+
{llama_prompt} [/INST]
89+
"""
90+
print("\n\ngenerating response...")
91+
if "gemma" in model:
92+
response = ollama.chat(model=model, messages=[
93+
{'role': 'system', 'content': prompt_1},
94+
{'role': 'user', 'content': conversation_string},
95+
])
96+
else:
97+
response = ollama.chat(model=model, messages=[
98+
{'role': 'system', 'content': llama_prompt},
99+
{'role': 'user', 'content': conversation_string},
100+
])
101+
print("\n\n")
102+
print(response)
103+
print("\n\n")
104+
105+
106+
return summary
107+
108+
109+
filename = './sessions/2024-03-30-09-50-00.json'
110+
print("\n\n")
111+
with open(filename,"r",encoding='utf-8') as f:
112+
try:
113+
# Read the entire file content
114+
data = f.read()
115+
# Check for potential hidden characters at the beginning/end (optional)
116+
data = data.strip()
117+
session_messages = json.loads(data)
118+
except json.decoder.JSONDecodeError as e:
119+
print(f"Error decoding JSON: {e}")
120+
summary = get_summary(session_messages=session_messages,model='tinyllama')
121+
print(summary)

utils.py

Lines changed: 34 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
import ollama
44
import streamlit as st
55
import yaml
6-
import os
76

87

98
with open('config.yaml', 'r') as f:
@@ -41,15 +40,20 @@ def set_session_name(session):
4140
st.session_state["messages"] = load_chat_history_json(session)
4241

4342
def save_session(session_key):
43+
4444
if "messages" in st.session_state:
45+
4546
if st.session_state.session_key == "new_session":
4647
st.session_state.session_key = get_timestamp() + '.json'
47-
save_chat_history(st.session_state['messages'], st.session_state.session_key)
48+
save_chat_history(st.session_state['messages'], st.session_state.session_key, st.session_state['model'])
4849
else:
49-
save_chat_history(st.session_state['messages'], st.session_state.session_key)
50+
save_chat_history(st.session_state['messages'], st.session_state.session_key, st.session_state['model'])
5051

51-
def get_summary(session_messages):
5252

53+
54+
55+
def get_summary(session_messages, model):
56+
print("Saving....")
5357
conversation_string = "\n".join(
5458
f"{message['role']}: {message['content']}" for message in session_messages
5559
)
@@ -71,12 +75,33 @@ def get_summary(session_messages):
7175
Start with "Sure here is the conversation summary."
7276
{prompt}
7377
"""
78+
79+
llama_prompt = f"""
80+
<s>[INST] <<SYS>>
81+
Summarize the following conversation in a single paragraph within {word_limit} words, starting with "Here is a summary of the conversation:"
82+
<</SYS>>
7483
75-
response = ollama.chat(model='gemma:2b', messages=[
76-
{'role': 'system', 'content': prompt_1},
77-
{'role': 'user', 'content': conversation_string},
78-
])
84+
{conversation_string} [/INST]
85+
"""
86+
llama_prompt_2 = f"""
87+
<s>[INST] <<SYS>>
88+
**In the following conversation,**
89+
Rewrite the summary but this time replace any references to Assistant or system with first person perspective (Helping first person pronouns: {fpc}). And replace any references to the user in second person perspective.
90+
<</SYS>>
91+
92+
{llama_prompt} [/INST]
93+
"""
94+
if "gemma" in model:
95+
response = ollama.chat(model=model, messages=[
96+
{'role': 'system', 'content': prompt_1},
97+
{'role': 'user', 'content': conversation_string},
98+
])
99+
else:
100+
response = ollama.chat(model=model, messages=[
101+
{'role': 'system', 'content': llama_prompt},
102+
{'role': 'user', 'content': conversation_string},
103+
])
79104

80105
summary = response["message"]["content"]
81-
106+
82107
return summary

0 commit comments

Comments
 (0)