-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
83 lines (70 loc) · 3.01 KB
/
main.py
File metadata and controls
83 lines (70 loc) · 3.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import dotenv
import streamlit as st
import fitz # PyMuPDF
from langchain import hub
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_text_splitters import RecursiveCharacterTextSplitter
dotenv.load_dotenv()
# Define a class to encapsulate document sections
class Document:
def __init__(self, content, metadata=None):
self.page_content = content
if metadata is None:
self.metadata = {} # Default metadata as an empty dictionary
else:
self.metadata = metadata
# Streamlit app interface
st.set_page_config(page_title="Chat with PDF",page_icon="",layout='centered')
st.markdown("<h3 style='background:#0284fe;padding:20px;border-radius:10px;text-align:center;'>Chat with PDF document</h3>",
unsafe_allow_html=True)
st.markdown("")
uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")
if uploaded_file is not None:
doc = fitz.open(stream=uploaded_file.read())
# Extract text from the entire PDF
text = ""
for page in doc:
text += page.get_text()
# Text splitting
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_text(text)
# Wrap splits into Document objects
documents = [Document(split) for split in splits]
# Initialize and populate the vectorstore
vectorstore = Chroma.from_documents(documents=documents, embedding=OpenAIEmbeddings())
# Retrieval and generation
retriever = vectorstore.as_retriever()
prompt = hub.pull("rlm/rag-prompt")
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("You can ask me questions about document."):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
response = rag_chain.invoke(prompt)
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
if st.button("Clear chat"):
st.session_state.clear()