From 2e89aa3726a7f042e5b6f8cbe73841ad09159548 Mon Sep 17 00:00:00 2001
From: 1308100560 <1308100560@qq.com>
Date: Mon, 29 Apr 2024 17:20:48 +0800
Subject: [PATCH 1/6] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=B8=89=E4=B8=AA?=
=?UTF-8?q?=E7=95=8C=E9=9D=A2?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
6_Speech_Recognition.py | 123 +++++++++++++++++++++++++++++++++++++
7_Text_error_correction.py | 91 +++++++++++++++++++++++++++
8_dialogue.py | 91 +++++++++++++++++++++++++++
3 files changed, 305 insertions(+)
create mode 100644 6_Speech_Recognition.py
create mode 100644 7_Text_error_correction.py
create mode 100644 8_dialogue.py
diff --git a/6_Speech_Recognition.py b/6_Speech_Recognition.py
new file mode 100644
index 000000000..ef2dacd24
--- /dev/null
+++ b/6_Speech_Recognition.py
@@ -0,0 +1,123 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def language_selector():
+ lang_options = ["ar", "de", "en", "es", "fr", "it", "ja", "nl", "pl", "pt", "ru", "zh"]
+ with st.sidebar:
+ return st.selectbox("Speech Language", ["en"] + lang_options)
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"
{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def record_voice(language="en"):
+ # https://github.com/B4PT0R/streamlit-mic-recorder?tab=readme-ov-file#example
+
+ state = st.session_state
+
+ if "text_received" not in state:
+ state.text_received = []
+
+ text = speech_to_text(
+ start_prompt="🎤 Click and speak to ask question",
+ stop_prompt="⚠️Stop recording🚨",
+ language=language,
+ use_container_width=True,
+ just_once=True,
+ )
+
+ if text:
+ state.text_received.append(text)
+
+ result = ""
+ for text in state.text_received:
+ result += text
+
+ state.text_received = []
+
+ return result if result else None
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🎙 语音识别")
+
+
+model = llm_selector()
+chat_key = f"语音识别_chat_history_{model}" # Unique key for each mode and model
+default_prompt = "你是一名语音识别助手,请把语音识别出的文字加上标点符号输出,不得改变原文。"
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = record_voice(language=language_selector())
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "语音识别", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/7_Text_error_correction.py b/7_Text_error_correction.py
new file mode 100644
index 000000000..d9127afab
--- /dev/null
+++ b/7_Text_error_correction.py
@@ -0,0 +1,91 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("📝 文本纠错")
+
+
+model = llm_selector()
+chat_key = f"文本纠错_chat_history_{model}" # Unique key for each mode and model
+default_prompt = "你是一名文本纠错助手,请修改文本中的错别字并输出结果。"
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.text_input("Enter text for correction")
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "文本纠错", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/8_dialogue.py b/8_dialogue.py
new file mode 100644
index 000000000..8cb8b0375
--- /dev/null
+++ b/8_dialogue.py
@@ -0,0 +1,91 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("💬 对话模式")
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = "你是一位有用的助手。"
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
From 0cc45f9d43f8bcfc3767ff3a377b90bf1e190f98 Mon Sep 17 00:00:00 2001
From: XY <1308100560@qq.com>
Date: Wed, 8 May 2024 11:18:25 +0800
Subject: [PATCH 2/6] =?UTF-8?q?=E6=9B=B4=E6=96=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
7_Text_error_correction.py | 2 +-
9_Patent disclosure document.py | 101 ++++++++++++++++++++++++++++++++
2 files changed, 102 insertions(+), 1 deletion(-)
create mode 100644 9_Patent disclosure document.py
diff --git a/7_Text_error_correction.py b/7_Text_error_correction.py
index d9127afab..328c28bb1 100644
--- a/7_Text_error_correction.py
+++ b/7_Text_error_correction.py
@@ -50,7 +50,7 @@ def llm_selector():
model = llm_selector()
chat_key = f"文本纠错_chat_history_{model}" # Unique key for each mode and model
-default_prompt = "你是一名文本纠错助手,请修改文本中的错别字并输出结果。"
+default_prompt = "你是一名文本纠错助手,请将下列语句中的错字、多字、少字修改正确,只输出修改后的句子,如果没有需要修改的地方,直接回复原句。不必做解释,直接输出即可。。"
system_prompt = system_prompt_input(default_prompt)
init_chat_history(chat_key, system_prompt)
diff --git a/9_Patent disclosure document.py b/9_Patent disclosure document.py
new file mode 100644
index 000000000..7b3fde084
--- /dev/null
+++ b/9_Patent disclosure document.py
@@ -0,0 +1,101 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("📝 Patent disclosure document")
+uploaded_file = st.file_uploader("Upload an article", type=("txt", "md", "docx"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("你是一位有用的中文助手,回答我的任何问题都要详细说明,并且用中文回答我。"
+ "我要升成一篇专利交底书,请用中文回答我."
+ "内容包括发明名称、技术领域、现有技术一的技术方案、现有技术一的缺点、"
+ "与本发明相关的现有技术二、本发明所要解决的技术问题、本发明提供的完整技术方案、"
+ "本发明技术方案带来的有益效果、针对本发明提供的完整技术方案中的技术方案,"
+ "是否还有别的替代方案同样能完成发明目的、本发明的技术关键点和欲保护点是什么。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
From 922bc11677514c97df6c4f93d92887a205a619e4 Mon Sep 17 00:00:00 2001
From: 1308100560
Date: Thu, 9 May 2024 14:57:10 +0800
Subject: [PATCH 3/6] =?UTF-8?q?=E6=9B=B4=E6=96=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
1_File_Q&A.py | 33 ++++++++++++++++++
2_Chat_with_search.py | 48 ++++++++++++++++++++++++++
3_Langchain_Quickstart.py | 22 ++++++++++++
4_Langchain_PromptTemplate.py | 29 ++++++++++++++++
5_Chat_with_user_feedback.py | 65 +++++++++++++++++++++++++++++++++++
5 files changed, 197 insertions(+)
create mode 100644 1_File_Q&A.py
create mode 100644 2_Chat_with_search.py
create mode 100644 3_Langchain_Quickstart.py
create mode 100644 4_Langchain_PromptTemplate.py
create mode 100644 5_Chat_with_user_feedback.py
diff --git a/1_File_Q&A.py b/1_File_Q&A.py
new file mode 100644
index 000000000..417474c4f
--- /dev/null
+++ b/1_File_Q&A.py
@@ -0,0 +1,33 @@
+import streamlit as st
+import anthropic
+
+with st.sidebar:
+ anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password")
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
+
+st.title("📝 File Q&A with Anthropic")
+uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
+question = st.text_input(
+ "Ask something about the article",
+ placeholder="Can you give me a short summary?",
+ disabled=not uploaded_file,
+)
+
+if uploaded_file and question and not anthropic_api_key:
+ st.info("Please add your Anthropic API key to continue.")
+
+if uploaded_file and question and anthropic_api_key:
+ article = uploaded_file.read().decode()
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {article}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ client = anthropic.Client(api_key=anthropic_api_key)
+ response = client.completions.create(
+ prompt=prompt,
+ stop_sequences=[anthropic.HUMAN_PROMPT],
+ model="claude-v1", # "claude-2" for Claude 2 model
+ max_tokens_to_sample=100,
+ )
+ st.write("### Answer")
+ st.write(response.completion)
diff --git a/2_Chat_with_search.py b/2_Chat_with_search.py
new file mode 100644
index 000000000..399c58219
--- /dev/null
+++ b/2_Chat_with_search.py
@@ -0,0 +1,48 @@
+import streamlit as st
+
+from langchain.agents import initialize_agent, AgentType
+from langchain.callbacks import StreamlitCallbackHandler
+from langchain.chat_models import ChatOpenAI
+from langchain.tools import DuckDuckGoSearchRun
+
+with st.sidebar:
+ openai_api_key = st.text_input(
+ "OpenAI API Key", key="langchain_search_api_key_openai", type="password"
+ )
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
+
+st.title("🔎 LangChain - Chat with search")
+
+"""
+In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
+Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
+"""
+
+if "messages" not in st.session_state:
+ st.session_state["messages"] = [
+ {"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
+ ]
+
+for msg in st.session_state.messages:
+ st.chat_message(msg["role"]).write(msg["content"])
+
+if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ st.chat_message("user").write(prompt)
+
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ st.stop()
+
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True)
+ search = DuckDuckGoSearchRun(name="Search")
+ search_agent = initialize_agent(
+ [search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True
+ )
+ with st.chat_message("assistant"):
+ st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
+ response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
+ st.session_state.messages.append({"role": "assistant", "content": response})
+ st.write(response)
diff --git a/3_Langchain_Quickstart.py b/3_Langchain_Quickstart.py
new file mode 100644
index 000000000..38c820f24
--- /dev/null
+++ b/3_Langchain_Quickstart.py
@@ -0,0 +1,22 @@
+import streamlit as st
+from langchain.llms import OpenAI
+
+st.title("🦜🔗 Langchain Quickstart App")
+
+with st.sidebar:
+ openai_api_key = st.text_input("OpenAI API Key", type="password")
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
+
+
+def generate_response(input_text):
+ llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
+ st.info(llm(input_text))
+
+
+with st.form("my_form"):
+ text = st.text_area("Enter text:", "What are 3 key advice for learning how to code?")
+ submitted = st.form_submit_button("Submit")
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ elif submitted:
+ generate_response(text)
diff --git a/4_Langchain_PromptTemplate.py b/4_Langchain_PromptTemplate.py
new file mode 100644
index 000000000..3755419ea
--- /dev/null
+++ b/4_Langchain_PromptTemplate.py
@@ -0,0 +1,29 @@
+import streamlit as st
+from langchain.llms import OpenAI
+from langchain.prompts import PromptTemplate
+
+st.title("🦜🔗 Langchain - Blog Outline Generator App")
+
+openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
+
+
+def blog_outline(topic):
+ # Instantiate LLM model
+ llm = OpenAI(model_name="text-davinci-003", openai_api_key=openai_api_key)
+ # Prompt
+ template = "As an experienced data scientist and technical writer, generate an outline for a blog about {topic}."
+ prompt = PromptTemplate(input_variables=["topic"], template=template)
+ prompt_query = prompt.format(topic=topic)
+ # Run LLM model
+ response = llm(prompt_query)
+ # Print results
+ return st.info(response)
+
+
+with st.form("myform"):
+ topic_text = st.text_input("Enter prompt:", "")
+ submitted = st.form_submit_button("Submit")
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ elif submitted:
+ blog_outline(topic_text)
diff --git a/5_Chat_with_user_feedback.py b/5_Chat_with_user_feedback.py
new file mode 100644
index 000000000..5f58f139c
--- /dev/null
+++ b/5_Chat_with_user_feedback.py
@@ -0,0 +1,65 @@
+from openai import OpenAI
+import streamlit as st
+from streamlit_feedback import streamlit_feedback
+import trubrics
+
+with st.sidebar:
+ openai_api_key = st.text_input("OpenAI API Key", key="feedback_api_key", type="password")
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/5_Chat_with_user_feedback.py)"
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
+
+st.title("📝 Chat with feedback (Trubrics)")
+
+"""
+In this example, we're using [streamlit-feedback](https://github.com/trubrics/streamlit-feedback) and Trubrics to collect and store feedback
+from the user about the LLM responses.
+"""
+
+if "messages" not in st.session_state:
+ st.session_state.messages = [
+ {"role": "assistant", "content": "How can I help you? Leave feedback to help me improve!"}
+ ]
+if "response" not in st.session_state:
+ st.session_state["response"] = None
+
+messages = st.session_state.messages
+for msg in messages:
+ st.chat_message(msg["role"]).write(msg["content"])
+
+if prompt := st.chat_input(placeholder="Tell me a joke about sharks"):
+ messages.append({"role": "user", "content": prompt})
+ st.chat_message("user").write(prompt)
+
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ st.stop()
+ client = OpenAI(api_key=openai_api_key)
+ response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
+ st.session_state["response"] = response.choices[0].message.content
+ with st.chat_message("assistant"):
+ messages.append({"role": "assistant", "content": st.session_state["response"]})
+ st.write(st.session_state["response"])
+
+if st.session_state["response"]:
+ feedback = streamlit_feedback(
+ feedback_type="thumbs",
+ optional_text_label="[Optional] Please provide an explanation",
+ key=f"feedback_{len(messages)}",
+ )
+ # This app is logging feedback to Trubrics backend, but you can send it anywhere.
+ # The return value of streamlit_feedback() is just a dict.
+ # Configure your own account at https://trubrics.streamlit.app/
+ if feedback and "TRUBRICS_EMAIL" in st.secrets:
+ config = trubrics.init(
+ email=st.secrets.TRUBRICS_EMAIL,
+ password=st.secrets.TRUBRICS_PASSWORD,
+ )
+ collection = trubrics.collect(
+ component_name="default",
+ model="gpt",
+ response=feedback,
+ metadata={"chat": messages},
+ )
+ trubrics.save(config, collection)
+ st.toast("Feedback recorded!", icon="📝")
From fa485b3d87beed070468e4390be49ea458138fc9 Mon Sep 17 00:00:00 2001
From: 1308100560
Date: Sat, 11 May 2024 17:07:19 +0800
Subject: [PATCH 4/6] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E5=8E=86=E5=8F=B2?=
=?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=94=B6=E9=9B=86=E3=80=81=E7=BD=91=E6=83=85?=
=?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=94=B6=E9=9B=86=E3=80=81=E4=B8=8A=E7=BA=A7?=
=?UTF-8?q?=E4=BB=BB=E5=8A=A1=E6=95=B0=E6=8D=AE=E6=94=B6=E9=9B=86?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
10_Historical_data_collection.py | 96 ++++++++++++++++++++++++++
11_Internet_data_collection.py | 96 ++++++++++++++++++++++++++
12_Collection_of_superior_task_data.py | 96 ++++++++++++++++++++++++++
9_Patent disclosure document.py | 2 +-
4 files changed, 289 insertions(+), 1 deletion(-)
create mode 100644 10_Historical_data_collection.py
create mode 100644 11_Internet_data_collection.py
create mode 100644 12_Collection_of_superior_task_data.py
diff --git a/10_Historical_data_collection.py b/10_Historical_data_collection.py
new file mode 100644
index 000000000..1402ce9af
--- /dev/null
+++ b/10_Historical_data_collection.py
@@ -0,0 +1,96 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🕔 Historical data collection")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("我现在将要给你传送历史数据,你需要整理这个数据然后给我发送清洗后的历史数据。要求格式:json格式。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/11_Internet_data_collection.py b/11_Internet_data_collection.py
new file mode 100644
index 000000000..1a05d5504
--- /dev/null
+++ b/11_Internet_data_collection.py
@@ -0,0 +1,96 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🖥️ Internet data collection")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("我现在将要给你传送网情数据,你需要提取这个数据特征之后发给我,要求格式:json格式。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/12_Collection_of_superior_task_data.py b/12_Collection_of_superior_task_data.py
new file mode 100644
index 000000000..35ab8680c
--- /dev/null
+++ b/12_Collection_of_superior_task_data.py
@@ -0,0 +1,96 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🪪 Collection of superior task data")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("我现在将要给你传送上级任务数据,你需要整理这个数据然后给我发送清洗后的任务数据。要求格式:json格式。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/9_Patent disclosure document.py b/9_Patent disclosure document.py
index 7b3fde084..eb8814c5b 100644
--- a/9_Patent disclosure document.py
+++ b/9_Patent disclosure document.py
@@ -46,7 +46,7 @@ def llm_selector():
st.title("📝 Patent disclosure document")
-uploaded_file = st.file_uploader("Upload an article", type=("txt", "md", "docx"))
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
model = llm_selector()
From f16469c9774365d380e88ae572b855c09dcb48d5 Mon Sep 17 00:00:00 2001
From: 1308100560
Date: Sat, 11 May 2024 17:42:50 +0800
Subject: [PATCH 5/6] =?UTF-8?q?=E6=96=B0=E5=A2=9E=E5=8E=86=E5=8F=B2?=
=?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=94=B6=E9=9B=86=E3=80=81=E7=BD=91=E6=83=85?=
=?UTF-8?q?=E6=95=B0=E6=8D=AE=E6=94=B6=E9=9B=86=E3=80=81=E4=B8=8A=E7=BA=A7?=
=?UTF-8?q?=E4=BB=BB=E5=8A=A1=E6=95=B0=E6=8D=AE=E6=94=B6=E9=9B=86?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
12_Collection_of_superior_task_data.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/12_Collection_of_superior_task_data.py b/12_Collection_of_superior_task_data.py
index 35ab8680c..ebc2133b1 100644
--- a/12_Collection_of_superior_task_data.py
+++ b/12_Collection_of_superior_task_data.py
@@ -45,7 +45,7 @@ def llm_selector():
-st.title("🪪 Collection of superior task data")
+st.title("🪪 ollection of superior task data")
uploaded_file = st.file_uploader("Upload an article", type=("txt"))
From 06453396126f2b6278d318aa0708b2d763e055b7 Mon Sep 17 00:00:00 2001
From: 1308100560
Date: Sat, 11 May 2024 17:43:54 +0800
Subject: [PATCH 6/6] =?UTF-8?q?=E6=9B=B4=E6=96=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
12_Collection_of_superior_task_data.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/12_Collection_of_superior_task_data.py b/12_Collection_of_superior_task_data.py
index ebc2133b1..35ab8680c 100644
--- a/12_Collection_of_superior_task_data.py
+++ b/12_Collection_of_superior_task_data.py
@@ -45,7 +45,7 @@ def llm_selector():
-st.title("🪪 ollection of superior task data")
+st.title("🪪 Collection of superior task data")
uploaded_file = st.file_uploader("Upload an article", type=("txt"))