deboramachadoandrade commited on
Commit
a84cc15
·
1 Parent(s): 67901df

Chainlit RAG with GPT4o

Browse files
.chainlit/config.toml ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ # Whether to enable telemetry (default: true). No personal data is collected.
3
+ enable_telemetry = true
4
+
5
+
6
+ # List of environment variables to be provided by each user to use the app.
7
+ user_env = []
8
+
9
+ # Duration (in seconds) during which the session is saved when the connection is lost
10
+ session_timeout = 3600
11
+
12
+ # Enable third parties caching (e.g LangChain cache)
13
+ cache = false
14
+
15
+ # Authorized origins
16
+ allow_origins = ["*"]
17
+
18
+ # Follow symlink for asset mount (see https://github.com/Chainlit/chainlit/issues/317)
19
+ # follow_symlink = false
20
+
21
+ [features]
22
+ # Show the prompt playground
23
+ prompt_playground = true
24
+
25
+ # Process and display HTML in messages. This can be a security risk (see https://stackoverflow.com/questions/19603097/why-is-it-dangerous-to-render-user-generated-html-or-javascript)
26
+ unsafe_allow_html = false
27
+
28
+ # Process and display mathematical expressions. This can clash with "$" characters in messages.
29
+ latex = false
30
+
31
+ # Authorize users to upload files with messages
32
+ multi_modal = true
33
+
34
+ # Allows user to use speech to text
35
+ [features.speech_to_text]
36
+ enabled = false
37
+ # See all languages here https://github.com/JamesBrill/react-speech-recognition/blob/HEAD/docs/API.md#language-string
38
+ # language = "en-US"
39
+
40
+ [UI]
41
+ # Name of the app and chatbot.
42
+ name = "Chatbot"
43
+
44
+ # Show the readme while the thread is empty.
45
+ show_readme_as_default = true
46
+
47
+ # Description of the app and chatbot. This is used for HTML tags.
48
+ # description = ""
49
+
50
+ # Large size content are by default collapsed for a cleaner ui
51
+ default_collapse_content = true
52
+
53
+ # The default value for the expand messages settings.
54
+ default_expand_messages = false
55
+
56
+ # Hide the chain of thought details from the user in the UI.
57
+ hide_cot = false
58
+
59
+ # Link to your github repo. This will add a github button in the UI's header.
60
+ # github = ""
61
+
62
+ # Specify a CSS file that can be used to customize the user interface.
63
+ # The CSS file can be served from the public directory or via an external link.
64
+ # custom_css = "/public/test.css"
65
+
66
+ # Specify a Javascript file that can be used to customize the user interface.
67
+ # The Javascript file can be served from the public directory.
68
+ # custom_js = "/public/test.js"
69
+
70
+ # Specify a custom font url.
71
+ # custom_font = "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;700&display=swap"
72
+
73
+ # Override default MUI light theme. (Check theme.ts)
74
+ [UI.theme]
75
+ #font_family = "Inter, sans-serif"
76
+ [UI.theme.light]
77
+ #background = "#FAFAFA"
78
+ #paper = "#FFFFFF"
79
+
80
+ [UI.theme.light.primary]
81
+ #main = "#F80061"
82
+ #dark = "#980039"
83
+ #light = "#FFE7EB"
84
+
85
+ # Override default MUI dark theme. (Check theme.ts)
86
+ [UI.theme.dark]
87
+ #background = "#FAFAFA"
88
+ #paper = "#FFFFFF"
89
+
90
+ [UI.theme.dark.primary]
91
+ #main = "#F80061"
92
+ #dark = "#980039"
93
+ #light = "#FFE7EB"
94
+
95
+
96
+ [meta]
97
+ generated_by = "1.0.301"
.chainlit/translations/en-US.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Settings",
8
+ "settingsKey": "S",
9
+ "APIKeys": "API Keys",
10
+ "logout": "Logout"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "New Chat"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Task List",
22
+ "loading": "Loading...",
23
+ "error": "An error occured"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancel upload",
28
+ "removeAttachment": "Remove attachment"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Create new chat?",
32
+ "clearChat": "This will clear the current messages and start a new chat.",
33
+ "cancel": "Cancel",
34
+ "confirm": "Confirm"
35
+ },
36
+ "settingsModal": {
37
+ "expandMessages": "Expand Messages",
38
+ "hideChainOfThought": "Hide Chain of Thought",
39
+ "darkMode": "Dark Mode"
40
+ }
41
+ },
42
+ "organisms": {
43
+ "chat": {
44
+ "history": {
45
+ "index": {
46
+ "lastInputs": "Last Inputs",
47
+ "noInputs": "Such empty...",
48
+ "loading": "Loading..."
49
+ }
50
+ },
51
+ "inputBox": {
52
+ "input": {
53
+ "placeholder": "Type your message here..."
54
+ },
55
+ "speechButton": {
56
+ "start": "Start recording",
57
+ "stop": "Stop recording"
58
+ },
59
+ "SubmitButton": {
60
+ "sendMessage": "Send message",
61
+ "stopTask": "Stop Task"
62
+ },
63
+ "UploadButton": {
64
+ "attachFiles": "Attach files"
65
+ },
66
+ "waterMark": {
67
+ "text": "Built with"
68
+ }
69
+ },
70
+ "Messages": {
71
+ "index": {
72
+ "running": "Running",
73
+ "executedSuccessfully": "executed successfully",
74
+ "failed": "failed",
75
+ "feedbackUpdated": "Feedback updated",
76
+ "updating": "Updating"
77
+ }
78
+ },
79
+ "dropScreen": {
80
+ "dropYourFilesHere": "Drop your files here"
81
+ },
82
+ "index": {
83
+ "failedToUpload": "Failed to upload",
84
+ "cancelledUploadOf": "Cancelled upload of",
85
+ "couldNotReachServer": "Could not reach the server",
86
+ "continuingChat": "Continuing previous chat"
87
+ },
88
+ "settings": {
89
+ "settingsPanel": "Settings panel",
90
+ "reset": "Reset",
91
+ "cancel": "Cancel",
92
+ "confirm": "Confirm"
93
+ }
94
+ },
95
+ "threadHistory": {
96
+ "sidebar": {
97
+ "filters": {
98
+ "FeedbackSelect": {
99
+ "feedbackAll": "Feedback: All",
100
+ "feedbackPositive": "Feedback: Positive",
101
+ "feedbackNegative": "Feedback: Negative"
102
+ },
103
+ "SearchBar": {
104
+ "search": "Search"
105
+ }
106
+ },
107
+ "DeleteThreadButton": {
108
+ "confirmMessage": "This will delete the thread as well as it's messages and elements.",
109
+ "cancel": "Cancel",
110
+ "confirm": "Confirm",
111
+ "deletingChat": "Deleting chat",
112
+ "chatDeleted": "Chat deleted"
113
+ },
114
+ "index": {
115
+ "pastChats": "Past Chats"
116
+ },
117
+ "ThreadList": {
118
+ "empty": "Empty..."
119
+ },
120
+ "TriggerButton": {
121
+ "closeSidebar": "Close sidebar",
122
+ "openSidebar": "Open sidebar"
123
+ }
124
+ },
125
+ "Thread": {
126
+ "backToChat": "Go back to chat",
127
+ "chatCreatedOn": "This chat was created on"
128
+ }
129
+ },
130
+ "header": {
131
+ "chat": "Chat",
132
+ "readme": "Readme"
133
+ }
134
+ }
135
+ },
136
+ "hooks": {
137
+ "useLLMProviders": {
138
+ "failedToFetchProviders": "Failed to fetch providers:"
139
+ }
140
+ },
141
+ "pages": {
142
+ "Design": {},
143
+ "Env": {
144
+ "savedSuccessfully": "Saved successfully",
145
+ "requiredApiKeys": "Required API Keys",
146
+ "requiredApiKeysInfo": "To use this app, the following API keys are required. The keys are stored on your device's local storage."
147
+ },
148
+ "Page": {
149
+ "notPartOfProject": "You are not part of this project."
150
+ },
151
+ "ResumeButton": {
152
+ "resumeChat": "Resume Chat"
153
+ }
154
+ }
155
+ }
.chainlit/translations/pt-BR.json ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "components": {
3
+ "atoms": {
4
+ "buttons": {
5
+ "userButton": {
6
+ "menu": {
7
+ "settings": "Configura\u00e7\u00f5es",
8
+ "settingsKey": "S",
9
+ "APIKeys": "Chaves de API",
10
+ "logout": "Sair"
11
+ }
12
+ }
13
+ }
14
+ },
15
+ "molecules": {
16
+ "newChatButton": {
17
+ "newChat": "Nova Conversa"
18
+ },
19
+ "tasklist": {
20
+ "TaskList": {
21
+ "title": "\ud83d\uddd2\ufe0f Lista de Tarefas",
22
+ "loading": "Carregando...",
23
+ "error": "Ocorreu um erro"
24
+ }
25
+ },
26
+ "attachments": {
27
+ "cancelUpload": "Cancelar envio",
28
+ "removeAttachment": "Remover anexo"
29
+ },
30
+ "newChatDialog": {
31
+ "createNewChat": "Criar novo chat?",
32
+ "clearChat": "Isso limpar\u00e1 as mensagens atuais e iniciar\u00e1 uma nova conversa.",
33
+ "cancel": "Cancelar",
34
+ "confirm": "Confirmar"
35
+ },
36
+ "settingsModal": {
37
+ "expandMessages": "Expandir Mensagens",
38
+ "hideChainOfThought": "Esconder Sequ\u00eancia de Pensamento",
39
+ "darkMode": "Modo Escuro"
40
+ }
41
+ },
42
+ "organisms": {
43
+ "chat": {
44
+ "history": {
45
+ "index": {
46
+ "lastInputs": "\u00daltimas Entradas",
47
+ "noInputs": "Vazio...",
48
+ "loading": "Carregando..."
49
+ }
50
+ },
51
+ "inputBox": {
52
+ "input": {
53
+ "placeholder": "Digite sua mensagem aqui..."
54
+ },
55
+ "speechButton": {
56
+ "start": "Iniciar grava\u00e7\u00e3o",
57
+ "stop": "Parar grava\u00e7\u00e3o"
58
+ },
59
+ "SubmitButton": {
60
+ "sendMessage": "Enviar mensagem",
61
+ "stopTask": "Parar Tarefa"
62
+ },
63
+ "UploadButton": {
64
+ "attachFiles": "Anexar arquivos"
65
+ },
66
+ "waterMark": {
67
+ "text": "Constru\u00eddo com"
68
+ }
69
+ },
70
+ "Messages": {
71
+ "index": {
72
+ "running": "Executando",
73
+ "executedSuccessfully": "executado com sucesso",
74
+ "failed": "falhou",
75
+ "feedbackUpdated": "Feedback atualizado",
76
+ "updating": "Atualizando"
77
+ }
78
+ },
79
+ "dropScreen": {
80
+ "dropYourFilesHere": "Solte seus arquivos aqui"
81
+ },
82
+ "index": {
83
+ "failedToUpload": "Falha ao enviar",
84
+ "cancelledUploadOf": "Envio cancelado de",
85
+ "couldNotReachServer": "N\u00e3o foi poss\u00edvel conectar ao servidor",
86
+ "continuingChat": "Continuando o chat anterior"
87
+ },
88
+ "settings": {
89
+ "settingsPanel": "Painel de Configura\u00e7\u00f5es",
90
+ "reset": "Redefinir",
91
+ "cancel": "Cancelar",
92
+ "confirm": "Confirmar"
93
+ }
94
+ },
95
+ "threadHistory": {
96
+ "sidebar": {
97
+ "filters": {
98
+ "FeedbackSelect": {
99
+ "feedbackAll": "Feedback: Todos",
100
+ "feedbackPositive": "Feedback: Positivo",
101
+ "feedbackNegative": "Feedback: Negativo"
102
+ },
103
+ "SearchBar": {
104
+ "search": "Buscar"
105
+ }
106
+ },
107
+ "DeleteThreadButton": {
108
+ "confirmMessage": "Isso deletar\u00e1 a conversa, assim como suas mensagens e elementos.",
109
+ "cancel": "Cancelar",
110
+ "confirm": "Confirmar",
111
+ "deletingChat": "Deletando conversa",
112
+ "chatDeleted": "Conversa deletada"
113
+ },
114
+ "index": {
115
+ "pastChats": "Conversas Anteriores"
116
+ },
117
+ "ThreadList": {
118
+ "empty": "Vazio..."
119
+ },
120
+ "TriggerButton": {
121
+ "closeSidebar": "Fechar barra lateral",
122
+ "openSidebar": "Abrir barra lateral"
123
+ }
124
+ },
125
+ "Thread": {
126
+ "backToChat": "Voltar para a conversa",
127
+ "chatCreatedOn": "Esta conversa foi criada em"
128
+ }
129
+ },
130
+ "header": {
131
+ "chat": "Conversa",
132
+ "readme": "Leia-me"
133
+ }
134
+ },
135
+ "hooks": {
136
+ "useLLMProviders": {
137
+ "failedToFetchProviders": "Falha ao buscar provedores:"
138
+ }
139
+ },
140
+ "pages": {
141
+ "Design": {},
142
+ "Env": {
143
+ "savedSuccessfully": "Salvo com sucesso",
144
+ "requiredApiKeys": "Chaves de API necess\u00e1rias",
145
+ "requiredApiKeysInfo": "Para usar este aplicativo, as seguintes chaves de API s\u00e3o necess\u00e1rias. As chaves s\u00e3o armazenadas localmente em seu dispositivo."
146
+ },
147
+ "Page": {
148
+ "notPartOfProject": "Voc\u00ea n\u00e3o faz parte deste projeto."
149
+ },
150
+ "ResumeButton": {
151
+ "resumeChat": "Continuar Conversa"
152
+ }
153
+ }
154
+ }
155
+ }
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ WORKDIR /code
3
+ COPY ./requirements.txt /code/requirements.txt
4
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
5
+
6
+ RUN useradd -m -u 1000 user
7
+ USER user
8
+ ENV HOME=/home/user \
9
+ PATH=/home/user/.local/bin:$PATH
10
+ WORKDIR $HOME/app
11
+ COPY --chown=user . $HOME/app
12
+
13
+ #CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
14
+ # COPY ./requirements.txt $HOME/Mid_term_copy/requirements.txt
15
+ # COPY . .
16
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FRS 102 The Financial Reporting Standard applicable in the UK and Republic of Ireland
2
+
3
+ # In the following notebook we'll build a RAG pipeline that will allow us to interactively retrieve information from the report
4
+ # "FRS 102 The Financial Reporting Standard applicable in the UK and Republic of Ireland".
5
+
6
+
7
+ import os
8
+ import openai
9
+ from openai import AsyncOpenAI # importing openai for API usage
10
+ import chainlit as cl # importing chainlit for our app
11
+ #from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
12
+ from chainlit.prompt import Prompt, PromptMessage
13
+ from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
14
+ from dotenv import load_dotenv
15
+
16
+ load_dotenv()
17
+
18
+
19
+
20
+
21
+ #os.environ["OPENAI_API_KEY"] = openai.api_key
22
+
23
+
24
+
25
+ #Loading data
26
+ from langchain_community.document_loaders import PyMuPDFLoader
27
+
28
+ loader = PyMuPDFLoader(
29
+ "FRC.pdf",
30
+ )
31
+
32
+ documents = loader.load()
33
+
34
+ #Splitting data
35
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
36
+
37
+ text_splitter = RecursiveCharacterTextSplitter(
38
+ chunk_size = 1500,
39
+ chunk_overlap = 100
40
+ )
41
+
42
+ documents = text_splitter.split_documents(documents)
43
+
44
+ #Loading OpenAI embeddings model:
45
+ from langchain_openai import OpenAIEmbeddings
46
+
47
+ embeddings = OpenAIEmbeddings(
48
+ model="text-embedding-3-small"
49
+ )
50
+
51
+ #Creating a FAISS VectorStore:
52
+ from langchain_community.vectorstores import FAISS
53
+
54
+ vector_store = FAISS.from_documents(documents, embeddings)
55
+
56
+ #Creating a retriever:
57
+ retriever = vector_store.as_retriever()
58
+
59
+ #Creating a prompt template:
60
+ from langchain.prompts import ChatPromptTemplate
61
+
62
+ template = """Answer the question based only on the following context. If you cannot answer the question with the context, please respond with 'I don't know':
63
+
64
+ Context:
65
+ {context}
66
+
67
+ Question:
68
+ {question}
69
+ """
70
+
71
+ prompt = ChatPromptTemplate.from_template(template)
72
+
73
+ #Creating a RAG chain:
74
+ from operator import itemgetter
75
+
76
+ from langchain_openai import ChatOpenAI
77
+ from langchain_core.output_parsers import StrOutputParser
78
+ from langchain_core.runnables import RunnablePassthrough
79
+
80
+ primary_qa_llm = ChatOpenAI(model_name="gpt-4o", temperature=0)
81
+
82
+ retrieval_augmented_qa_chain = (
83
+ # INVOKE CHAIN WITH: {"question" : "<<SOME USER QUESTION>>"}
84
+ # "question" : populated by getting the value of the "question" key
85
+ # "context" : populated by getting the value of the "question" key and chaining it into the base_retriever
86
+ {"context": itemgetter("question") | retriever, "question": itemgetter("question")}
87
+ # "context" : is assigned to a RunnablePassthrough object (will not be called or considered in the next step)
88
+ # by getting the value of the "context" key from the previous step
89
+ | RunnablePassthrough.assign(context=itemgetter("context"))
90
+ # "response" : the "context" and "question" values are used to format our prompt object and then piped
91
+ # into the LLM and stored in a key called "response"
92
+ # "context" : populated by getting the value of the "context" key from the previous step
93
+ | {"response": prompt | primary_qa_llm, "context": itemgetter("context")}
94
+ )
95
+
96
+ #We will be using the advanced Multiquery retriever provided by Langchain:
97
+ from langchain.retrievers import MultiQueryRetriever
98
+ advanced_retriever = MultiQueryRetriever.from_llm(retriever=retriever, llm=primary_qa_llm)
99
+
100
+ #We create a chain to stuff our documents into our prompt:
101
+ from langchain.chains.combine_documents import create_stuff_documents_chain
102
+ from langchain import hub
103
+
104
+ retrieval_qa_prompt = hub.pull("langchain-ai/retrieval-qa-chat")
105
+ document_chain = create_stuff_documents_chain(primary_qa_llm, retrieval_qa_prompt)
106
+
107
+ #Create the new retrieval chain with advanced retriever:
108
+ from langchain.chains import create_retrieval_chain
109
+ retrieval_chain = create_retrieval_chain(advanced_retriever, document_chain)
110
+
111
+ #And we create our chatbot functions:
112
+ user_template = """{input}
113
+ Think through your response step by step.
114
+ """
115
+ @cl.on_chat_start # marks a function that will be executed at the start of a user session
116
+ async def start_chat():
117
+ settings = {
118
+ "model": "gpt-4o",
119
+ #"model": "gpt-4",
120
+ "temperature": 1.0,
121
+ "max_tokens": 1500,
122
+ "top_p": 1,
123
+ "frequency_penalty": 0,
124
+ "presence_penalty": 0,
125
+ }
126
+
127
+ cl.user_session.set("settings", settings)
128
+
129
+
130
+ @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
131
+ async def main(message: cl.Message):
132
+ settings = cl.user_session.get("settings")
133
+
134
+ client = AsyncOpenAI(
135
+ api_key=os.environ.get("OPENAI_API_KEY"),
136
+ )
137
+ print(message.content)
138
+
139
+ prompt = Prompt(
140
+ #provider=ChatOpenAI.id,
141
+ provider="ChatOpenAI",
142
+ messages=[
143
+ PromptMessage(
144
+ role="system",
145
+ template=template,
146
+ formatted=template,
147
+ ),
148
+ PromptMessage(
149
+ role="user",
150
+ template=user_template,
151
+ formatted=user_template.format(input=message.content),
152
+ ),
153
+ ],
154
+ inputs={"input": message.content},
155
+ settings=settings,
156
+ )
157
+
158
+ print([m.to_openai() for m in prompt.messages])
159
+
160
+ msg = cl.Message(content="")
161
+
162
+
163
+ # Update the prompt object with the completion
164
+ result = retrieval_chain.invoke({"input":message.content})
165
+ msg.content = result["answer"]
166
+ msg.prompt = prompt
167
+
168
+ # Send and close the message stream
169
+ await msg.send()
170
+
171
+
172
+ # Example query: "What are the key differences between FRS 102 and IFRS for SMEs?
173
+ # Please provide page numbers corresponding to references in the provided material for further information"
chainlit.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ## What would you like to ask about the FRS 102?
2
+
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ chainlit==0.7.700
2
+ cohere==4.37
3
+ openai
4
+ tiktoken
5
+ python-dotenv==1.0.0
6
+ langchain
7
+ langchain-openai
8
+ langchain_core
9
+ langchain-community
10
+ langchainhub
11
+ ragas
12
+ faiss_cpu
13
+ pymupdf
14
+ pandas