diff --git a/10_Historical_data_collection.py b/10_Historical_data_collection.py
new file mode 100644
index 000000000..1402ce9af
--- /dev/null
+++ b/10_Historical_data_collection.py
@@ -0,0 +1,96 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"
{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🕔 Historical data collection")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("我现在将要给你传送历史数据,你需要整理这个数据然后给我发送清洗后的历史数据。要求格式:json格式。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/11_Internet_data_collection.py b/11_Internet_data_collection.py
new file mode 100644
index 000000000..1a05d5504
--- /dev/null
+++ b/11_Internet_data_collection.py
@@ -0,0 +1,96 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🖥️ Internet data collection")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("我现在将要给你传送网情数据,你需要提取这个数据特征之后发给我,要求格式:json格式。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/12_Collection_of_superior_task_data.py b/12_Collection_of_superior_task_data.py
new file mode 100644
index 000000000..35ab8680c
--- /dev/null
+++ b/12_Collection_of_superior_task_data.py
@@ -0,0 +1,96 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🪪 Collection of superior task data")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("我现在将要给你传送上级任务数据,你需要整理这个数据然后给我发送清洗后的任务数据。要求格式:json格式。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/1_File_Q&A.py b/1_File_Q&A.py
new file mode 100644
index 000000000..417474c4f
--- /dev/null
+++ b/1_File_Q&A.py
@@ -0,0 +1,33 @@
+import streamlit as st
+import anthropic
+
+with st.sidebar:
+ anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password")
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
+
+st.title("📝 File Q&A with Anthropic")
+uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
+question = st.text_input(
+ "Ask something about the article",
+ placeholder="Can you give me a short summary?",
+ disabled=not uploaded_file,
+)
+
+if uploaded_file and question and not anthropic_api_key:
+ st.info("Please add your Anthropic API key to continue.")
+
+if uploaded_file and question and anthropic_api_key:
+ article = uploaded_file.read().decode()
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {article}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ client = anthropic.Client(api_key=anthropic_api_key)
+ response = client.completions.create(
+ prompt=prompt,
+ stop_sequences=[anthropic.HUMAN_PROMPT],
+ model="claude-v1", # "claude-2" for Claude 2 model
+ max_tokens_to_sample=100,
+ )
+ st.write("### Answer")
+ st.write(response.completion)
diff --git a/2_Chat_with_search.py b/2_Chat_with_search.py
new file mode 100644
index 000000000..399c58219
--- /dev/null
+++ b/2_Chat_with_search.py
@@ -0,0 +1,48 @@
+import streamlit as st
+
+from langchain.agents import initialize_agent, AgentType
+from langchain.callbacks import StreamlitCallbackHandler
+from langchain.chat_models import ChatOpenAI
+from langchain.tools import DuckDuckGoSearchRun
+
+with st.sidebar:
+ openai_api_key = st.text_input(
+ "OpenAI API Key", key="langchain_search_api_key_openai", type="password"
+ )
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
+
+st.title("🔎 LangChain - Chat with search")
+
+"""
+In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
+Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
+"""
+
+if "messages" not in st.session_state:
+ st.session_state["messages"] = [
+ {"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
+ ]
+
+for msg in st.session_state.messages:
+ st.chat_message(msg["role"]).write(msg["content"])
+
+if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ st.chat_message("user").write(prompt)
+
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ st.stop()
+
+ llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True)
+ search = DuckDuckGoSearchRun(name="Search")
+ search_agent = initialize_agent(
+ [search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True
+ )
+ with st.chat_message("assistant"):
+ st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
+ response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
+ st.session_state.messages.append({"role": "assistant", "content": response})
+ st.write(response)
diff --git a/3_Langchain_Quickstart.py b/3_Langchain_Quickstart.py
new file mode 100644
index 000000000..38c820f24
--- /dev/null
+++ b/3_Langchain_Quickstart.py
@@ -0,0 +1,22 @@
+import streamlit as st
+from langchain.llms import OpenAI
+
+st.title("🦜🔗 Langchain Quickstart App")
+
+with st.sidebar:
+ openai_api_key = st.text_input("OpenAI API Key", type="password")
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
+
+
+def generate_response(input_text):
+ llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
+ st.info(llm(input_text))
+
+
+with st.form("my_form"):
+ text = st.text_area("Enter text:", "What are 3 key advice for learning how to code?")
+ submitted = st.form_submit_button("Submit")
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ elif submitted:
+ generate_response(text)
diff --git a/4_Langchain_PromptTemplate.py b/4_Langchain_PromptTemplate.py
new file mode 100644
index 000000000..3755419ea
--- /dev/null
+++ b/4_Langchain_PromptTemplate.py
@@ -0,0 +1,29 @@
+import streamlit as st
+from langchain.llms import OpenAI
+from langchain.prompts import PromptTemplate
+
+st.title("🦜🔗 Langchain - Blog Outline Generator App")
+
+openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
+
+
+def blog_outline(topic):
+ # Instantiate LLM model
+ llm = OpenAI(model_name="text-davinci-003", openai_api_key=openai_api_key)
+ # Prompt
+ template = "As an experienced data scientist and technical writer, generate an outline for a blog about {topic}."
+ prompt = PromptTemplate(input_variables=["topic"], template=template)
+ prompt_query = prompt.format(topic=topic)
+ # Run LLM model
+ response = llm(prompt_query)
+ # Print results
+ return st.info(response)
+
+
+with st.form("myform"):
+ topic_text = st.text_input("Enter prompt:", "")
+ submitted = st.form_submit_button("Submit")
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ elif submitted:
+ blog_outline(topic_text)
diff --git a/5_Chat_with_user_feedback.py b/5_Chat_with_user_feedback.py
new file mode 100644
index 000000000..5f58f139c
--- /dev/null
+++ b/5_Chat_with_user_feedback.py
@@ -0,0 +1,65 @@
+from openai import OpenAI
+import streamlit as st
+from streamlit_feedback import streamlit_feedback
+import trubrics
+
+with st.sidebar:
+ openai_api_key = st.text_input("OpenAI API Key", key="feedback_api_key", type="password")
+ "[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
+ "[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/5_Chat_with_user_feedback.py)"
+ "[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
+
+st.title("📝 Chat with feedback (Trubrics)")
+
+"""
+In this example, we're using [streamlit-feedback](https://github.com/trubrics/streamlit-feedback) and Trubrics to collect and store feedback
+from the user about the LLM responses.
+"""
+
+if "messages" not in st.session_state:
+ st.session_state.messages = [
+ {"role": "assistant", "content": "How can I help you? Leave feedback to help me improve!"}
+ ]
+if "response" not in st.session_state:
+ st.session_state["response"] = None
+
+messages = st.session_state.messages
+for msg in messages:
+ st.chat_message(msg["role"]).write(msg["content"])
+
+if prompt := st.chat_input(placeholder="Tell me a joke about sharks"):
+ messages.append({"role": "user", "content": prompt})
+ st.chat_message("user").write(prompt)
+
+ if not openai_api_key:
+ st.info("Please add your OpenAI API key to continue.")
+ st.stop()
+ client = OpenAI(api_key=openai_api_key)
+ response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
+ st.session_state["response"] = response.choices[0].message.content
+ with st.chat_message("assistant"):
+ messages.append({"role": "assistant", "content": st.session_state["response"]})
+ st.write(st.session_state["response"])
+
+if st.session_state["response"]:
+ feedback = streamlit_feedback(
+ feedback_type="thumbs",
+ optional_text_label="[Optional] Please provide an explanation",
+ key=f"feedback_{len(messages)}",
+ )
+ # This app is logging feedback to Trubrics backend, but you can send it anywhere.
+ # The return value of streamlit_feedback() is just a dict.
+ # Configure your own account at https://trubrics.streamlit.app/
+ if feedback and "TRUBRICS_EMAIL" in st.secrets:
+ config = trubrics.init(
+ email=st.secrets.TRUBRICS_EMAIL,
+ password=st.secrets.TRUBRICS_PASSWORD,
+ )
+ collection = trubrics.collect(
+ component_name="default",
+ model="gpt",
+ response=feedback,
+ metadata={"chat": messages},
+ )
+ trubrics.save(config, collection)
+ st.toast("Feedback recorded!", icon="📝")
diff --git a/6_Speech_Recognition.py b/6_Speech_Recognition.py
new file mode 100644
index 000000000..ef2dacd24
--- /dev/null
+++ b/6_Speech_Recognition.py
@@ -0,0 +1,123 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def language_selector():
+ lang_options = ["ar", "de", "en", "es", "fr", "it", "ja", "nl", "pl", "pt", "ru", "zh"]
+ with st.sidebar:
+ return st.selectbox("Speech Language", ["en"] + lang_options)
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def record_voice(language="en"):
+ # https://github.com/B4PT0R/streamlit-mic-recorder?tab=readme-ov-file#example
+
+ state = st.session_state
+
+ if "text_received" not in state:
+ state.text_received = []
+
+ text = speech_to_text(
+ start_prompt="🎤 Click and speak to ask question",
+ stop_prompt="⚠️Stop recording🚨",
+ language=language,
+ use_container_width=True,
+ just_once=True,
+ )
+
+ if text:
+ state.text_received.append(text)
+
+ result = ""
+ for text in state.text_received:
+ result += text
+
+ state.text_received = []
+
+ return result if result else None
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("🎙 语音识别")
+
+
+model = llm_selector()
+chat_key = f"语音识别_chat_history_{model}" # Unique key for each mode and model
+default_prompt = "你是一名语音识别助手,请把语音识别出的文字加上标点符号输出,不得改变原文。"
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = record_voice(language=language_selector())
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "语音识别", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/7_Text_error_correction.py b/7_Text_error_correction.py
new file mode 100644
index 000000000..328c28bb1
--- /dev/null
+++ b/7_Text_error_correction.py
@@ -0,0 +1,91 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("📝 文本纠错")
+
+
+model = llm_selector()
+chat_key = f"文本纠错_chat_history_{model}" # Unique key for each mode and model
+default_prompt = "你是一名文本纠错助手,请将下列语句中的错字、多字、少字修改正确,只输出修改后的句子,如果没有需要修改的地方,直接回复原句。不必做解释,直接输出即可。。"
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.text_input("Enter text for correction")
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "文本纠错", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/8_dialogue.py b/8_dialogue.py
new file mode 100644
index 000000000..8cb8b0375
--- /dev/null
+++ b/8_dialogue.py
@@ -0,0 +1,91 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("💬 对话模式")
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = "你是一位有用的助手。"
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history
diff --git a/9_Patent disclosure document.py b/9_Patent disclosure document.py
new file mode 100644
index 000000000..eb8814c5b
--- /dev/null
+++ b/9_Patent disclosure document.py
@@ -0,0 +1,101 @@
+import streamlit as st
+import anthropic
+import ollama as ol
+import streamlit as st
+from streamlit_mic_recorder import speech_to_text
+import datetime
+import json
+
+def log_interaction(action, data):
+ timestamp = datetime.datetime.now().isoformat()
+ log = {"timestamp": timestamp, "action": action, "data": data}
+ with open("user_interactions_log.json", "a") as logfile:
+ logfile.write(json.dumps(log) + "\n")
+
+def print_txt(text):
+ if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
+ text = f"{text}
"
+ st.markdown(text, unsafe_allow_html=True)
+
+def print_chat_message(message):
+ text = message["content"]
+ if message["role"] == "user":
+ with st.chat_message("user", avatar="🎙️"):
+ print_txt(text)
+ elif message["role"] == "assistant":
+ with st.chat_message("assistant", avatar="🦙"):
+ print_txt(text)
+
+def get_chat_history(key):
+ return st.session_state.chat_history[key]
+
+def init_chat_history(key, system_prompt):
+ if "chat_history" not in st.session_state:
+ st.session_state.chat_history = {}
+ if key not in st.session_state.chat_history:
+ st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]
+
+def system_prompt_input(default_prompt):
+ return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)
+
+def llm_selector():
+ ollama_models = [m['name'] for m in ol.list()['models']]
+ with st.sidebar:
+ return st.selectbox("LLM", ollama_models)
+
+
+
+st.title("📝 Patent disclosure document")
+uploaded_file = st.file_uploader("Upload an article", type=("txt"))
+
+
+model = llm_selector()
+chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
+default_prompt = ("你是一位有用的中文助手,回答我的任何问题都要详细说明,并且用中文回答我。"
+ "我要升成一篇专利交底书,请用中文回答我."
+ "内容包括发明名称、技术领域、现有技术一的技术方案、现有技术一的缺点、"
+ "与本发明相关的现有技术二、本发明所要解决的技术问题、本发明提供的完整技术方案、"
+ "本发明技术方案带来的有益效果、针对本发明提供的完整技术方案中的技术方案,"
+ "是否还有别的替代方案同样能完成发明目的、本发明的技术关键点和欲保护点是什么。")
+
+system_prompt = system_prompt_input(default_prompt)
+init_chat_history(chat_key, system_prompt)
+chat_history = get_chat_history(chat_key)
+for message in chat_history:
+ print_chat_message(message)
+
+question = st.chat_input()
+
+debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
+log_interaction("User input", {"mode": "对话", "question": question})
+
+if question:
+ prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n
+ {question}\n\n\n\n{question}{anthropic.AI_PROMPT}"""
+
+ if question:
+ user_message = {"role": "user", "content": question}
+
+ # if app_mode == "语音识别":
+ print_chat_message(user_message)
+ chat_history.append(user_message)
+ if uploaded_file:
+ article = uploaded_file.read().decode()
+ chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
+ response = ol.chat(model=model, messages=chat_history)
+ answer = response['message']['content']
+ ai_message = {"role": "assistant", "content": answer}
+ print_chat_message(ai_message)
+ chat_history.append(ai_message)
+ debug_info = {"messages": chat_history, "response": response}
+
+ if debug_mode:
+ st.write("Debug Info: Complete Prompt Interaction")
+ st.json(debug_info)
+
+ # truncate chat history to keep 20 messages max
+ if len(chat_history) > 20:
+ chat_history = chat_history[-20:]
+
+ # update chat history
+ st.session_state.chat_history[chat_key] = chat_history