Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Master #50

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
96 changes: 96 additions & 0 deletions 10_Historical_data_collection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import streamlit as st
import anthropic
import ollama as ol
import streamlit as st
from streamlit_mic_recorder import speech_to_text
import datetime
import json

def log_interaction(action, data):
timestamp = datetime.datetime.now().isoformat()
log = {"timestamp": timestamp, "action": action, "data": data}
with open("user_interactions_log.json", "a") as logfile:
logfile.write(json.dumps(log) + "\n")

def print_txt(text):
if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
text = f"<p style='direction: rtl; text-align: right;'>{text}</p>"
st.markdown(text, unsafe_allow_html=True)

def print_chat_message(message):
text = message["content"]
if message["role"] == "user":
with st.chat_message("user", avatar="🎙️"):
print_txt(text)
elif message["role"] == "assistant":
with st.chat_message("assistant", avatar="🦙"):
print_txt(text)

def get_chat_history(key):
return st.session_state.chat_history[key]

def init_chat_history(key, system_prompt):
if "chat_history" not in st.session_state:
st.session_state.chat_history = {}
if key not in st.session_state.chat_history:
st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]

def system_prompt_input(default_prompt):
return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)

def llm_selector():
ollama_models = [m['name'] for m in ol.list()['models']]
with st.sidebar:
return st.selectbox("LLM", ollama_models)



st.title("🕔 Historical data collection")
uploaded_file = st.file_uploader("Upload an article", type=("txt"))


model = llm_selector()
chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
default_prompt = ("我现在将要给你传送历史数据,你需要整理这个数据然后给我发送清洗后的历史数据。要求格式:json格式。")

system_prompt = system_prompt_input(default_prompt)
init_chat_history(chat_key, system_prompt)
chat_history = get_chat_history(chat_key)
for message in chat_history:
print_chat_message(message)

question = st.chat_input()

debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
log_interaction("User input", {"mode": "对话", "question": question})

if question:
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{question}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""

if question:
user_message = {"role": "user", "content": question}

# if app_mode == "语音识别":
print_chat_message(user_message)
chat_history.append(user_message)
if uploaded_file:
article = uploaded_file.read().decode()
chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
response = ol.chat(model=model, messages=chat_history)
answer = response['message']['content']
ai_message = {"role": "assistant", "content": answer}
print_chat_message(ai_message)
chat_history.append(ai_message)
debug_info = {"messages": chat_history, "response": response}

if debug_mode:
st.write("Debug Info: Complete Prompt Interaction")
st.json(debug_info)

# truncate chat history to keep 20 messages max
if len(chat_history) > 20:
chat_history = chat_history[-20:]

# update chat history
st.session_state.chat_history[chat_key] = chat_history
96 changes: 96 additions & 0 deletions 11_Internet_data_collection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import streamlit as st
import anthropic
import ollama as ol
import streamlit as st
from streamlit_mic_recorder import speech_to_text
import datetime
import json

def log_interaction(action, data):
timestamp = datetime.datetime.now().isoformat()
log = {"timestamp": timestamp, "action": action, "data": data}
with open("user_interactions_log.json", "a") as logfile:
logfile.write(json.dumps(log) + "\n")

def print_txt(text):
if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
text = f"<p style='direction: rtl; text-align: right;'>{text}</p>"
st.markdown(text, unsafe_allow_html=True)

def print_chat_message(message):
text = message["content"]
if message["role"] == "user":
with st.chat_message("user", avatar="🎙️"):
print_txt(text)
elif message["role"] == "assistant":
with st.chat_message("assistant", avatar="🦙"):
print_txt(text)

def get_chat_history(key):
return st.session_state.chat_history[key]

def init_chat_history(key, system_prompt):
if "chat_history" not in st.session_state:
st.session_state.chat_history = {}
if key not in st.session_state.chat_history:
st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]

def system_prompt_input(default_prompt):
return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)

def llm_selector():
ollama_models = [m['name'] for m in ol.list()['models']]
with st.sidebar:
return st.selectbox("LLM", ollama_models)



st.title("🖥️ Internet data collection")
uploaded_file = st.file_uploader("Upload an article", type=("txt"))


model = llm_selector()
chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
default_prompt = ("我现在将要给你传送网情数据,你需要提取这个数据特征之后发给我,要求格式:json格式。")

system_prompt = system_prompt_input(default_prompt)
init_chat_history(chat_key, system_prompt)
chat_history = get_chat_history(chat_key)
for message in chat_history:
print_chat_message(message)

question = st.chat_input()

debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
log_interaction("User input", {"mode": "对话", "question": question})

if question:
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{question}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""

if question:
user_message = {"role": "user", "content": question}

# if app_mode == "语音识别":
print_chat_message(user_message)
chat_history.append(user_message)
if uploaded_file:
article = uploaded_file.read().decode()
chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
response = ol.chat(model=model, messages=chat_history)
answer = response['message']['content']
ai_message = {"role": "assistant", "content": answer}
print_chat_message(ai_message)
chat_history.append(ai_message)
debug_info = {"messages": chat_history, "response": response}

if debug_mode:
st.write("Debug Info: Complete Prompt Interaction")
st.json(debug_info)

# truncate chat history to keep 20 messages max
if len(chat_history) > 20:
chat_history = chat_history[-20:]

# update chat history
st.session_state.chat_history[chat_key] = chat_history
96 changes: 96 additions & 0 deletions 12_Collection_of_superior_task_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
import streamlit as st
import anthropic
import ollama as ol
import streamlit as st
from streamlit_mic_recorder import speech_to_text
import datetime
import json

def log_interaction(action, data):
timestamp = datetime.datetime.now().isoformat()
log = {"timestamp": timestamp, "action": action, "data": data}
with open("user_interactions_log.json", "a") as logfile:
logfile.write(json.dumps(log) + "\n")

def print_txt(text):
if any("\u0600" <= c <= "\u06FF" for c in text): # check if text contains Arabic characters
text = f"<p style='direction: rtl; text-align: right;'>{text}</p>"
st.markdown(text, unsafe_allow_html=True)

def print_chat_message(message):
text = message["content"]
if message["role"] == "user":
with st.chat_message("user", avatar="🎙️"):
print_txt(text)
elif message["role"] == "assistant":
with st.chat_message("assistant", avatar="🦙"):
print_txt(text)

def get_chat_history(key):
return st.session_state.chat_history[key]

def init_chat_history(key, system_prompt):
if "chat_history" not in st.session_state:
st.session_state.chat_history = {}
if key not in st.session_state.chat_history:
st.session_state.chat_history[key] = [{"role": "system", "content": system_prompt}]

def system_prompt_input(default_prompt):
return st.sidebar.text_area("System Prompt", value=default_prompt, height=100)

def llm_selector():
ollama_models = [m['name'] for m in ol.list()['models']]
with st.sidebar:
return st.selectbox("LLM", ollama_models)



st.title("🪪 Collection of superior task data")
uploaded_file = st.file_uploader("Upload an article", type=("txt"))


model = llm_selector()
chat_key = f"对话_chat_history_{model}" # Unique key for each mode and model
default_prompt = ("我现在将要给你传送上级任务数据,你需要整理这个数据然后给我发送清洗后的任务数据。要求格式:json格式。")

system_prompt = system_prompt_input(default_prompt)
init_chat_history(chat_key, system_prompt)
chat_history = get_chat_history(chat_key)
for message in chat_history:
print_chat_message(message)

question = st.chat_input()

debug_mode = st.sidebar.checkbox("Debug Mode", value=True)
log_interaction("User input", {"mode": "对话", "question": question})

if question:
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{question}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""

if question:
user_message = {"role": "user", "content": question}

# if app_mode == "语音识别":
print_chat_message(user_message)
chat_history.append(user_message)
if uploaded_file:
article = uploaded_file.read().decode()
chat_history.append({"role": "user", "content": article}) # 添加用户上传的文件内容作为对话历史的一部分
response = ol.chat(model=model, messages=chat_history)
answer = response['message']['content']
ai_message = {"role": "assistant", "content": answer}
print_chat_message(ai_message)
chat_history.append(ai_message)
debug_info = {"messages": chat_history, "response": response}

if debug_mode:
st.write("Debug Info: Complete Prompt Interaction")
st.json(debug_info)

# truncate chat history to keep 20 messages max
if len(chat_history) > 20:
chat_history = chat_history[-20:]

# update chat history
st.session_state.chat_history[chat_key] = chat_history
33 changes: 33 additions & 0 deletions 1_File_Q&A.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import streamlit as st
import anthropic

with st.sidebar:
anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password")
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"

st.title("📝 File Q&A with Anthropic")
uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
question = st.text_input(
"Ask something about the article",
placeholder="Can you give me a short summary?",
disabled=not uploaded_file,
)

if uploaded_file and question and not anthropic_api_key:
st.info("Please add your Anthropic API key to continue.")

if uploaded_file and question and anthropic_api_key:
article = uploaded_file.read().decode()
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{article}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""

client = anthropic.Client(api_key=anthropic_api_key)
response = client.completions.create(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
model="claude-v1", # "claude-2" for Claude 2 model
max_tokens_to_sample=100,
)
st.write("### Answer")
st.write(response.completion)
48 changes: 48 additions & 0 deletions 2_Chat_with_search.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import streamlit as st

from langchain.agents import initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.tools import DuckDuckGoSearchRun

with st.sidebar:
openai_api_key = st.text_input(
"OpenAI API Key", key="langchain_search_api_key_openai", type="password"
)
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/streamlit/llm-examples?quickstart=1)"

st.title("🔎 LangChain - Chat with search")

"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""

if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]

for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])

if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)

if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()

llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True)
search = DuckDuckGoSearchRun(name="Search")
search_agent = initialize_agent(
[search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True
)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response)
22 changes: 22 additions & 0 deletions 3_Langchain_Quickstart.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import streamlit as st
from langchain.llms import OpenAI

st.title("🦜🔗 Langchain Quickstart App")

with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"


def generate_response(input_text):
llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
st.info(llm(input_text))


with st.form("my_form"):
text = st.text_area("Enter text:", "What are 3 key advice for learning how to code?")
submitted = st.form_submit_button("Submit")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
elif submitted:
generate_response(text)
Loading