-
Notifications
You must be signed in to change notification settings - Fork 0
/
app1.py
74 lines (62 loc) · 2.32 KB
/
app1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import streamlit as st
import time
from dotenv import load_dotenv
from langchain_community.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_google_genai import GoogleGenerativeAIEmbeddings, ChatGoogleGenerativeAI
from langchain_chroma import Chroma
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
# Load environment variables
load_dotenv()
# Streamlit app title
st.title("RAG Application por Santiago Ramos")
# List of PDF files to load
pdf_files = ["yolov9_paper.pdf", "David Santiago Ramos CV en-output.pdf"]
# Function to load and split documents from PDFs
def load_and_split_pdfs(pdf_files):
all_docs = []
for pdf_file in pdf_files:
loader = PyPDFLoader(pdf_file)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000)
docs = text_splitter.split_documents(data)
all_docs.extend(docs)
return all_docs
# Load and split documents
all_docs = load_and_split_pdfs(pdf_files)
# Create the vectorstore
vectorstore = Chroma.from_documents(
documents=all_docs,
embedding=GoogleGenerativeAIEmbeddings(model="models/embedding-001")
)
# Create retriever
retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 10})
# Initialize the language model
llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0, max_tokens=None, timeout=None)
# Streamlit chat input
query = st.chat_input("Di algo: ")
# Define system prompt
system_prompt = (
"You are an assistant for question-answering tasks. "
"Use the following pieces of retrieved context to answer "
"the question. If you don't know the answer, say that you "
"don't know. Use three sentences maximum and keep the "
"answer concise."
"\n\n"
"{context}"
)
# Create prompt template
prompt = ChatPromptTemplate.from_messages(
[
("system", system_prompt),
("human", "{input}"),
]
)
# Process query if provided
if query:
question_answer_chain = create_stuff_documents_chain(llm, prompt)
rag_chain = create_retrieval_chain(retriever, question_answer_chain)
response = rag_chain.invoke({"input": query})
st.write(response["answer"])