ofermend commited on
Commit
ae62deb
Β·
1 Parent(s): 1f9b570

version bump

Browse files
Files changed (4) hide show
  1. agent.py +5 -5
  2. app.py +3 -171
  3. requirements.txt +1 -1
  4. st_app.py +163 -0
agent.py CHANGED
@@ -50,22 +50,22 @@ def create_assistant_tools(cfg):
50
  vec_factory = VectaraToolFactory(vectara_api_key=cfg.api_key,
51
  vectara_customer_id=cfg.customer_id,
52
  vectara_corpus_id=cfg.corpus_id)
53
- tools_factory = ToolsFactory()
54
  query_tool = vec_factory.create_rag_tool(
55
- tool_name = "justice_harvard_query",
56
  tool_description = """
57
  Answer questions about the justice, morality, politics and related topics,
58
  based on transcripts of recordings from the Justice Harvard class that includes a lot of content on these topics.
59
- When using the tool it's best to ask simple short questions. You can break complex questions into sub-queries.
60
  """,
61
  tool_args_schema = JusticeHarvardArgs,
62
- reranker = "multilingual_reranker_v1", rerank_k = 100,
63
  n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
64
  summary_num_results = 10,
65
- vectara_summarizer = 'vectara-experimental-summary-ext-2023-12-11-med-omni',
66
  include_citations = True,
67
  )
68
 
 
69
  return (
70
  [tools_factory.create_tool(tool) for tool in
71
  [
 
50
  vec_factory = VectaraToolFactory(vectara_api_key=cfg.api_key,
51
  vectara_customer_id=cfg.customer_id,
52
  vectara_corpus_id=cfg.corpus_id)
53
+ summarizer = 'vectara-summary-ext-24-05-med-omni'
54
  query_tool = vec_factory.create_rag_tool(
55
+ tool_name = "ask_about_justice_harvard",
56
  tool_description = """
57
  Answer questions about the justice, morality, politics and related topics,
58
  based on transcripts of recordings from the Justice Harvard class that includes a lot of content on these topics.
 
59
  """,
60
  tool_args_schema = JusticeHarvardArgs,
61
+ reranker = "multilingual_reranker_v1", rerank_k = 100,
62
  n_sentences_before = 2, n_sentences_after = 2, lambda_val = 0.005,
63
  summary_num_results = 10,
64
+ vectara_summarizer = summarizer,
65
  include_citations = True,
66
  )
67
 
68
+ tools_factory = ToolsFactory()
69
  return (
70
  [tools_factory.create_tool(tool) for tool in
71
  [
app.py CHANGED
@@ -1,184 +1,16 @@
1
- from PIL import Image
2
- import sys
3
  import uuid
4
 
5
  import nest_asyncio
6
  import asyncio
7
 
8
- import streamlit as st
9
- from streamlit_pills import pills
10
- from streamlit_feedback import streamlit_feedback
11
-
12
- from vectara_agentic.agent import AgentStatusType
13
-
14
- from agent import initialize_agent, get_agent_config, teaching_styles, languages
15
- from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
16
-
17
- initial_prompt = "How can I help you today?"
18
-
19
  # Setup for HTTP API Calls to Amplitude Analytics
20
  if 'device_id' not in st.session_state:
21
  st.session_state.device_id = str(uuid.uuid4())
22
 
23
  if "feedback_key" not in st.session_state:
24
- st.session_state.feedback_key = 0
25
-
26
- def toggle_logs():
27
- st.session_state.show_logs = not st.session_state.show_logs
28
-
29
- def show_example_questions():
30
- if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
31
- selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
32
- if selected_example:
33
- st.session_state.ex_prompt = selected_example
34
- st.session_state.first_turn = False
35
- return True
36
- return False
37
-
38
- def update_func(status_type: AgentStatusType, msg: str):
39
- if status_type != AgentStatusType.AGENT_UPDATE:
40
- output = f"{status_type.value} - {msg}"
41
- st.session_state.log_messages.append(output)
42
-
43
- async def launch_bot():
44
- def reset():
45
- st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "πŸ¦–"}]
46
- st.session_state.thinking_message = "Agent at work..."
47
- st.session_state.log_messages = []
48
- st.session_state.prompt = None
49
- st.session_state.ex_prompt = None
50
- st.session_state.first_turn = True
51
- st.session_state.show_logs = False
52
- st.session_state.agent = initialize_agent(cfg, update_func=update_func)
53
- st.session_state.agent.report()
54
-
55
- if 'cfg' not in st.session_state:
56
- cfg = get_agent_config()
57
- st.session_state.style = cfg.style
58
- st.session_state.language = cfg.language
59
- st.session_state.student_age = cfg.student_age
60
- st.session_state.cfg = cfg
61
- st.session_state.ex_prompt = None
62
- example_messages = [example.strip() for example in cfg.examples.split(";")] if cfg.examples else []
63
- st.session_state.example_messages = [em for em in example_messages if len(em)>0]
64
- reset()
65
-
66
- cfg = st.session_state.cfg
67
-
68
- # left side content
69
- with st.sidebar:
70
- image = Image.open('Vectara-logo.png')
71
- st.image(image, width=175)
72
- st.markdown(f"## {cfg['demo_welcome']}")
73
- st.markdown(f"{cfg['demo_description']}")
74
-
75
- st.markdown("\n")
76
- cfg.style = st.selectbox('Teacher Style:', teaching_styles)
77
- if st.session_state.style != cfg.style:
78
- st.session_state.style = cfg.style
79
- reset()
80
- st.rerun()
81
-
82
- cfg.language = st.selectbox('Language:', languages.keys())
83
- if st.session_state.language != cfg.language:
84
- st.session_state.language = cfg.language
85
- reset()
86
- st.rerun()
87
-
88
- cfg.student_age = st.number_input(
89
- 'Student age:', min_value=13, max_value=99, value=cfg.student_age,
90
- step=1, format='%i'
91
- )
92
- if st.session_state.student_age != cfg.student_age:
93
- st.session_state.student_age = cfg.student_age
94
- reset()
95
- st.rerun()
96
-
97
- st.markdown("\n")
98
- bc1, _ = st.columns([1, 1])
99
- with bc1:
100
- if st.button('Start Over'):
101
- reset()
102
- st.rerun()
103
-
104
- st.divider()
105
- st.markdown(
106
- "## How this works?\n"
107
- "This app was built with [Vectara](https://vectara.com).\n\n"
108
- "It demonstrates the use of Agentic RAG functionality with Vectara"
109
- )
110
-
111
- if "messages" not in st.session_state.keys():
112
- reset()
113
-
114
- # Display chat messages
115
- for message in st.session_state.messages:
116
- with st.chat_message(message["role"], avatar=message["avatar"]):
117
- st.write(message["content"])
118
-
119
- example_container = st.empty()
120
- with example_container:
121
- if show_example_questions():
122
- example_container.empty()
123
- st.session_state.first_turn = False
124
- st.rerun()
125
-
126
- # User-provided prompt
127
- if st.session_state.ex_prompt:
128
- prompt = st.session_state.ex_prompt
129
- else:
130
- prompt = st.chat_input()
131
- if prompt:
132
- st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
133
- st.session_state.prompt = prompt # Save the prompt in session state
134
- st.session_state.log_messages = []
135
- st.session_state.show_logs = False
136
- with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
137
- print(f"Starting new question: {prompt}\n")
138
- st.write(prompt)
139
- st.session_state.ex_prompt = None
140
-
141
- # Generate a new response if last message is not from assistant
142
- if st.session_state.prompt:
143
- with st.chat_message("assistant", avatar='πŸ€–'):
144
- with st.spinner(st.session_state.thinking_message):
145
- res = st.session_state.agent.chat(st.session_state.prompt)
146
- res = escape_dollars_outside_latex(res)
147
- message = {"role": "assistant", "content": res, "avatar": 'πŸ€–'}
148
- st.session_state.messages.append(message)
149
- st.markdown(res)
150
-
151
- send_amplitude_data(
152
- user_query=st.session_state.messages[-2]["content"],
153
- bot_response=st.session_state.messages[-1]["content"],
154
- demo_name=cfg['demo_name']
155
- )
156
-
157
- st.session_state.ex_prompt = None
158
- st.session_state.prompt = None
159
- st.session_state.first_turn = False
160
- st.rerun()
161
-
162
- # Record user feedback
163
- if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != "How can I help you today?"):
164
- streamlit_feedback(
165
- feedback_type="thumbs", on_submit = thumbs_feedback, key = st.session_state.feedback_key,
166
- kwargs = {"user_query": st.session_state.messages[-2]["content"],
167
- "bot_response": st.session_state.messages[-1]["content"],
168
- "demo_name": cfg["demo_name"]}
169
- )
170
-
171
- log_placeholder = st.empty()
172
- with log_placeholder.container():
173
- if st.session_state.show_logs:
174
- st.button("Hide Logs", on_click=toggle_logs)
175
- for msg in st.session_state.log_messages:
176
- st.text(msg)
177
- else:
178
- if len(st.session_state.log_messages) > 0:
179
- st.button("Show Logs", on_click=toggle_logs)
180
-
181
- sys.stdout.flush()
182
 
183
  if __name__ == "__main__":
184
  st.set_page_config(page_title="Justice Harard Teching Assistant", layout="wide")
 
1
+ import streamlit as st
2
+ from st_app import launch_bot
3
  import uuid
4
 
5
  import nest_asyncio
6
  import asyncio
7
 
 
 
 
 
 
 
 
 
 
 
 
8
  # Setup for HTTP API Calls to Amplitude Analytics
9
  if 'device_id' not in st.session_state:
10
  st.session_state.device_id = str(uuid.uuid4())
11
 
12
  if "feedback_key" not in st.session_state:
13
+ st.session_state.feedback_key = 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  if __name__ == "__main__":
16
  st.set_page_config(page_title="Justice Harard Teching Assistant", layout="wide")
requirements.txt CHANGED
@@ -6,4 +6,4 @@ streamlit_feedback==0.1.3
6
  uuid==1.30
7
  langdetect==1.0.9
8
  langcodes==3.4.0
9
- vectara-agentic==0.1.15
 
6
  uuid==1.30
7
  langdetect==1.0.9
8
  langcodes==3.4.0
9
+ vectara-agentic==0.1.16
st_app.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import sys
3
+
4
+ import streamlit as st
5
+ from streamlit_pills import pills
6
+ from streamlit_feedback import streamlit_feedback
7
+
8
+ from utils import thumbs_feedback, escape_dollars_outside_latex, send_amplitude_data
9
+
10
+ from vectara_agentic.agent import AgentStatusType
11
+ from agent import initialize_agent, get_agent_config
12
+
13
+ initial_prompt = "How can I help you today?"
14
+
15
+ def toggle_logs():
16
+ st.session_state.show_logs = not st.session_state.show_logs
17
+
18
+ def show_example_questions():
19
+ if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
20
+ selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
21
+ if selected_example:
22
+ st.session_state.ex_prompt = selected_example
23
+ st.session_state.first_turn = False
24
+ return True
25
+ return False
26
+
27
+ def update_func(status_type: AgentStatusType, msg: str):
28
+ if status_type != AgentStatusType.AGENT_UPDATE:
29
+ output = f"{status_type.value} - {msg}"
30
+ st.session_state.log_messages.append(output)
31
+
32
+ async def launch_bot():
33
+ def reset():
34
+ st.session_state.messages = [{"role": "assistant", "content": initial_prompt, "avatar": "πŸ¦–"}]
35
+ st.session_state.thinking_message = "Agent at work..."
36
+ st.session_state.log_messages = []
37
+ st.session_state.prompt = None
38
+ st.session_state.ex_prompt = None
39
+ st.session_state.first_turn = True
40
+ st.session_state.logs_enabled = False
41
+ st.session_state.show_logs = False
42
+ if 'agent' not in st.session_state:
43
+ st.session_state.agent = initialize_agent(cfg, update_func=update_func)
44
+ else:
45
+ st.session_state.agent.clear_memory()
46
+
47
+ if 'cfg' not in st.session_state:
48
+ cfg = get_agent_config()
49
+ st.session_state.cfg = cfg
50
+ st.session_state.ex_prompt = None
51
+ example_messages = [example.strip() for example in cfg.examples.split(";")] if cfg.examples else []
52
+ st.session_state.example_messages = [em for em in example_messages if len(em)>0]
53
+ reset()
54
+
55
+ cfg = st.session_state.cfg
56
+
57
+ # left side content
58
+ with st.sidebar:
59
+ image = Image.open('Vectara-logo.png')
60
+ st.image(image, width=175)
61
+ st.markdown(f"## {cfg['demo_welcome']}")
62
+ st.markdown(f"{cfg['demo_description']}")
63
+
64
+ st.markdown("\n\n")
65
+ bc1, bc2 = st.columns([1, 1])
66
+ with bc1:
67
+ if st.button('Start Over'):
68
+ reset()
69
+ st.rerun()
70
+ with bc2: # Updated button for enabling/disabling logs
71
+ if st.session_state.logs_enabled:
72
+ if st.button('Disable Logs', key='disable_logs'):
73
+ st.session_state.logs_enabled = False
74
+ st.rerun()
75
+ else:
76
+ if st.button('Enable Logs', key='enable_logs'):
77
+ st.session_state.logs_enabled = True
78
+ st.rerun()
79
+
80
+ st.divider()
81
+ st.markdown(
82
+ "## How this works?\n"
83
+ "This app was built with [Vectara](https://vectara.com).\n\n"
84
+ "It demonstrates the use of Agentic RAG functionality with Vectara"
85
+ )
86
+
87
+ if "messages" not in st.session_state.keys():
88
+ reset()
89
+
90
+ # Display chat messages
91
+ for message in st.session_state.messages:
92
+ with st.chat_message(message["role"], avatar=message["avatar"]):
93
+ st.write(message["content"])
94
+
95
+ example_container = st.empty()
96
+ with example_container:
97
+ if show_example_questions():
98
+ example_container.empty()
99
+ st.session_state.first_turn = False
100
+ st.rerun()
101
+
102
+ # User-provided prompt
103
+ if st.session_state.ex_prompt:
104
+ prompt = st.session_state.ex_prompt
105
+ else:
106
+ prompt = st.chat_input()
107
+ if prompt:
108
+ st.session_state.messages.append({"role": "user", "content": prompt, "avatar": 'πŸ§‘β€πŸ’»'})
109
+ st.session_state.prompt = prompt # Save the prompt in session state
110
+ st.session_state.log_messages = []
111
+ st.session_state.show_logs = False
112
+ with st.chat_message("user", avatar='πŸ§‘β€πŸ’»'):
113
+ print(f"Starting new question: {prompt}\n")
114
+ st.write(prompt)
115
+ st.session_state.ex_prompt = None
116
+
117
+ # Generate a new response if last message is not from assistant
118
+ if st.session_state.prompt:
119
+ with st.chat_message("assistant", avatar='πŸ€–'):
120
+ with st.spinner(st.session_state.thinking_message):
121
+ res = st.session_state.agent.chat(st.session_state.prompt)
122
+ res = escape_dollars_outside_latex(res)
123
+ message = {"role": "assistant", "content": res, "avatar": 'πŸ€–'}
124
+ st.session_state.messages.append(message)
125
+ st.markdown(res)
126
+
127
+ send_amplitude_data(
128
+ user_query=st.session_state.messages[-2]["content"],
129
+ bot_response=st.session_state.messages[-1]["content"],
130
+ demo_name=cfg['demo_name']
131
+ )
132
+
133
+ st.session_state.ex_prompt = None
134
+ st.session_state.prompt = None
135
+ st.session_state.first_turn = False
136
+ st.rerun()
137
+
138
+ # Record user feedback
139
+ if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != initial_prompt):
140
+ if st.session_state.show_logs and st.session_state.logs_enabled: # Only show logs if enabled
141
+ streamlit_feedback(
142
+ feedback_type="thumbs", on_submit=thumbs_feedback, key=st.session_state.feedback_key,
143
+ kwargs={"user_query": st.session_state.messages[-2]["content"],
144
+ "bot_response": st.session_state.messages[-1]["content"],
145
+ "demo_name": cfg["demo_name"]}
146
+ )
147
+
148
+ log_placeholder = st.empty()
149
+ with log_placeholder.container():
150
+ if st.session_state.logs_enabled: # Show logs button only if log toggle is enabled
151
+ if st.session_state.show_logs:
152
+ st.button("Hide Logs", on_click=toggle_logs)
153
+ for msg in st.session_state.log_messages:
154
+ if len(msg) > 100: # Use text_area for longer messages
155
+ st.text_area(label="Log", value=msg, height=100, disabled=True)
156
+ else:
157
+ st.text(msg)
158
+ else:
159
+ if len(st.session_state.log_messages) > 0:
160
+ st.button("Show Logs", on_click=toggle_logs)
161
+
162
+
163
+ sys.stdout.flush()