testing llamaparse
Browse files
app.py
CHANGED
@@ -1,15 +1,17 @@
|
|
|
|
1 |
import streamlit as st
|
2 |
|
3 |
from llama_index.llms.gemini import Gemini
|
4 |
from llama_index.llms.huggingface import HuggingFaceLLM
|
5 |
from llama_index.llms.mistralai import MistralAI
|
6 |
from llama_index.llms.openai import OpenAI
|
7 |
-
|
8 |
from llama_index.core import (
|
9 |
VectorStoreIndex,
|
10 |
Settings,
|
11 |
)
|
12 |
|
|
|
|
|
13 |
from streamlit_pdf_viewer import pdf_viewer
|
14 |
|
15 |
# Global configurations
|
@@ -80,9 +82,15 @@ with st.sidebar:
|
|
80 |
Settings.context_window = 4096 # max possible
|
81 |
|
82 |
|
83 |
-
# Enter Token
|
84 |
-
|
85 |
-
"Enter your token",
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
value=None
|
87 |
)
|
88 |
|
@@ -94,12 +102,18 @@ with st.sidebar:
|
|
94 |
|
95 |
if uploaded_file is not None:
|
96 |
# Parse the file
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
col1, col2 = st.columns(2)
|
100 |
|
101 |
with col1:
|
102 |
-
|
103 |
|
104 |
with col2:
|
105 |
if uploaded_file is not None:
|
|
|
1 |
+
import tempfile
|
2 |
import streamlit as st
|
3 |
|
4 |
from llama_index.llms.gemini import Gemini
|
5 |
from llama_index.llms.huggingface import HuggingFaceLLM
|
6 |
from llama_index.llms.mistralai import MistralAI
|
7 |
from llama_index.llms.openai import OpenAI
|
|
|
8 |
from llama_index.core import (
|
9 |
VectorStoreIndex,
|
10 |
Settings,
|
11 |
)
|
12 |
|
13 |
+
from llama_parse import LlamaParse
|
14 |
+
|
15 |
from streamlit_pdf_viewer import pdf_viewer
|
16 |
|
17 |
# Global configurations
|
|
|
82 |
Settings.context_window = 4096 # max possible
|
83 |
|
84 |
|
85 |
+
# Enter LLM Token
|
86 |
+
llm_token = st.text_input(
|
87 |
+
"Enter your LLM token",
|
88 |
+
value=None
|
89 |
+
)
|
90 |
+
|
91 |
+
# Enter parsing Token
|
92 |
+
parse_token = st.text_input(
|
93 |
+
"Enter your LlamaParse token",
|
94 |
value=None
|
95 |
)
|
96 |
|
|
|
102 |
|
103 |
if uploaded_file is not None:
|
104 |
# Parse the file
|
105 |
+
# temp_dir = tempfile.TemporaryDirectory()
|
106 |
+
parser = LlamaParse(
|
107 |
+
api_key=parse_token, # can also be set in your env as LLAMA_CLOUD_API_KEY
|
108 |
+
result_type="text" # "markdown" and "text" are available
|
109 |
+
)
|
110 |
+
parsed_document = parser.load_data(uploaded_file)
|
111 |
+
# temp_dir.cleanup()
|
112 |
|
113 |
col1, col2 = st.columns(2)
|
114 |
|
115 |
with col1:
|
116 |
+
st.write(parsed_document)
|
117 |
|
118 |
with col2:
|
119 |
if uploaded_file is not None:
|