Spaces:
Sleeping
Sleeping
Upload 4 files
Browse filesUploaded AgriXpert Streamlit files
- .env +1 -0
- app.py +253 -0
- chatbot.py +405 -0
- requirements.txt +6 -0
.env
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
OPENAI_API_KEY=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
|
app.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from streamlit_chat import message
|
| 3 |
+
from chatbot import DualChatbot
|
| 4 |
+
import time
|
| 5 |
+
from gtts import gTTS
|
| 6 |
+
from io import BytesIO
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# Define the language type settings
|
| 10 |
+
LANGUAGES = ['English', 'German', 'Spanish', 'French', 'Swahili']
|
| 11 |
+
SESSION_LENGTHS = ['Short', 'Long']
|
| 12 |
+
PROFICIENCY_LEVELS = ['Beginner', 'Intermediate', 'Advanced']
|
| 13 |
+
MAX_EXCHANGE_COUNTS = {
|
| 14 |
+
'Short': {'Conversation': 4, 'Debate': 4},
|
| 15 |
+
'Long': {'Conversation': 8, 'Debate': 8}
|
| 16 |
+
}
|
| 17 |
+
AUDIO_SPEECH = {
|
| 18 |
+
'English': 'en',
|
| 19 |
+
'German': 'de',
|
| 20 |
+
'Spanish': 'es',
|
| 21 |
+
'French': 'fr',
|
| 22 |
+
'Swahili': 'sw'
|
| 23 |
+
}
|
| 24 |
+
AVATAR_SEED = [123, 42]
|
| 25 |
+
|
| 26 |
+
# Define backbone llm
|
| 27 |
+
engine = 'OpenAI'
|
| 28 |
+
|
| 29 |
+
# Set the title of the app
|
| 30 |
+
st.title('Agrixpert Bot 🤖')
|
| 31 |
+
|
| 32 |
+
# Set the description of the app
|
| 33 |
+
st.markdown("""
|
| 34 |
+
This app generates a dialogue between a farmer and an agricultural expert to help farmers make better farming decisions.
|
| 35 |
+
|
| 36 |
+
Choose your desired settings and press 'Generate' to start 🚀
|
| 37 |
+
""")
|
| 38 |
+
|
| 39 |
+
# Add a selectbox for learning mode
|
| 40 |
+
learning_mode = st.sidebar.selectbox('Interaction Mode 📖', ('Conversation', 'Debate'))
|
| 41 |
+
|
| 42 |
+
if learning_mode == 'Conversation':
|
| 43 |
+
role1 = st.sidebar.text_input('Role 1 🎭')
|
| 44 |
+
action1 = st.sidebar.text_input('Action 1 🗣️')
|
| 45 |
+
role2 = st.sidebar.text_input('Role 2 🎭')
|
| 46 |
+
action2 = st.sidebar.text_input('Action 2 🗣️')
|
| 47 |
+
scenario = st.sidebar.text_input('Scenario 🎥')
|
| 48 |
+
time_delay = 2
|
| 49 |
+
|
| 50 |
+
# Configure role dictionary
|
| 51 |
+
role_dict = {
|
| 52 |
+
'role1': {'name': role1, 'action': action1},
|
| 53 |
+
'role2': {'name': role2, 'action': action2}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
else:
|
| 57 |
+
scenario = st.sidebar.text_input('Debate Topic 💬')
|
| 58 |
+
|
| 59 |
+
# Configure role dictionary
|
| 60 |
+
role_dict = {
|
| 61 |
+
'role1': {'name': 'Proponent'},
|
| 62 |
+
'role2': {'name': 'Opponent'}
|
| 63 |
+
}
|
| 64 |
+
time_delay = 5
|
| 65 |
+
|
| 66 |
+
language = st.sidebar.selectbox('Target Language 🔤', LANGUAGES)
|
| 67 |
+
session_length = st.sidebar.selectbox('Session Length ⏰', SESSION_LENGTHS)
|
| 68 |
+
proficiency_level = st.sidebar.selectbox('Proficiency Level 🏆', PROFICIENCY_LEVELS)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
if "bot1_mesg" not in st.session_state:
|
| 72 |
+
st.session_state["bot1_mesg"] = []
|
| 73 |
+
|
| 74 |
+
if "bot2_mesg" not in st.session_state:
|
| 75 |
+
st.session_state["bot2_mesg"] = []
|
| 76 |
+
|
| 77 |
+
if 'batch_flag' not in st.session_state:
|
| 78 |
+
st.session_state["batch_flag"] = False
|
| 79 |
+
|
| 80 |
+
if 'translate_flag' not in st.session_state:
|
| 81 |
+
st.session_state["translate_flag"] = False
|
| 82 |
+
|
| 83 |
+
if 'audio_flag' not in st.session_state:
|
| 84 |
+
st.session_state["audio_flag"] = False
|
| 85 |
+
|
| 86 |
+
if 'message_counter' not in st.session_state:
|
| 87 |
+
st.session_state["message_counter"] = 0
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def show_messages(mesg_1, mesg_2, message_counter,
|
| 91 |
+
time_delay, batch=False, audio=False,
|
| 92 |
+
translation=False):
|
| 93 |
+
"""Display conversation exchanges. This helper function supports
|
| 94 |
+
displaying original texts, translated texts, and audio speech.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
--------
|
| 98 |
+
mesg1: messages spoken by the first bot
|
| 99 |
+
mesg2: messages spoken by the second bot
|
| 100 |
+
message_counter: create unique ID key for chat messages
|
| 101 |
+
time_delay: time interval between conversations
|
| 102 |
+
batch: True/False to indicate if conversations will be shown
|
| 103 |
+
all together or with a certain time delay.
|
| 104 |
+
audio: True/False to indicate if the audio speech need to
|
| 105 |
+
be appended to the texts
|
| 106 |
+
translation: True/False to indicate if the translated texts need to
|
| 107 |
+
be displayed
|
| 108 |
+
|
| 109 |
+
Output:
|
| 110 |
+
-------
|
| 111 |
+
message_counter: updated counter for ID key
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
for i, mesg in enumerate([mesg_1, mesg_2]):
|
| 115 |
+
# Show original exchange ()
|
| 116 |
+
message(f"{mesg['content']}", is_user=i==1, avatar_style="bottts",
|
| 117 |
+
seed=AVATAR_SEED[i],
|
| 118 |
+
key=message_counter)
|
| 119 |
+
message_counter += 1
|
| 120 |
+
|
| 121 |
+
# Mimic time interval between conversations
|
| 122 |
+
# (this time delay only appears when generating
|
| 123 |
+
# the conversation script for the first time)
|
| 124 |
+
if not batch:
|
| 125 |
+
time.sleep(time_delay)
|
| 126 |
+
|
| 127 |
+
# Show translated exchange
|
| 128 |
+
if translation:
|
| 129 |
+
message(f"{mesg['translation']}", is_user=i==1, avatar_style="bottts",
|
| 130 |
+
seed=AVATAR_SEED[i],
|
| 131 |
+
key=message_counter)
|
| 132 |
+
message_counter += 1
|
| 133 |
+
|
| 134 |
+
# Append autio to the exchange
|
| 135 |
+
if audio:
|
| 136 |
+
tts = gTTS(text=mesg['content'], lang=AUDIO_SPEECH[language])
|
| 137 |
+
sound_file = BytesIO()
|
| 138 |
+
tts.write_to_fp(sound_file)
|
| 139 |
+
st.audio(sound_file)
|
| 140 |
+
|
| 141 |
+
return message_counter
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# Define the button layout at the beginning
|
| 145 |
+
translate_col, original_col, audio_col = st.columns(3)
|
| 146 |
+
|
| 147 |
+
# Create the conversation container
|
| 148 |
+
conversation_container = st.container()
|
| 149 |
+
|
| 150 |
+
if 'dual_chatbots' not in st.session_state:
|
| 151 |
+
|
| 152 |
+
if st.sidebar.button('Generate'):
|
| 153 |
+
|
| 154 |
+
# Add flag to indicate if this is the first time running the script
|
| 155 |
+
st.session_state["first_time_exec"] = True
|
| 156 |
+
|
| 157 |
+
with conversation_container:
|
| 158 |
+
if learning_mode == 'Conversation':
|
| 159 |
+
st.write(f"""#### The following conversation happens between
|
| 160 |
+
{role1} and {role2} {scenario} 🎭""")
|
| 161 |
+
|
| 162 |
+
else:
|
| 163 |
+
st.write(f"""#### Debate 💬: {scenario}""")
|
| 164 |
+
|
| 165 |
+
# Instantiate dual-chatbot system
|
| 166 |
+
dual_chatbots = DualChatbot(engine, role_dict, language, scenario,
|
| 167 |
+
proficiency_level, learning_mode, session_length)
|
| 168 |
+
st.session_state['dual_chatbots'] = dual_chatbots
|
| 169 |
+
|
| 170 |
+
# Start exchanges
|
| 171 |
+
for _ in range(MAX_EXCHANGE_COUNTS[session_length][learning_mode]):
|
| 172 |
+
output1, output2, translate1, translate2 = dual_chatbots.step()
|
| 173 |
+
|
| 174 |
+
mesg_1 = {"role": dual_chatbots.chatbots['role1']['name'],
|
| 175 |
+
"content": output1, "translation": translate1}
|
| 176 |
+
mesg_2 = {"role": dual_chatbots.chatbots['role2']['name'],
|
| 177 |
+
"content": output2, "translation": translate2}
|
| 178 |
+
|
| 179 |
+
new_count = show_messages(mesg_1, mesg_2,
|
| 180 |
+
st.session_state["message_counter"],
|
| 181 |
+
time_delay=time_delay, batch=False,
|
| 182 |
+
audio=False, translation=False)
|
| 183 |
+
st.session_state["message_counter"] = new_count
|
| 184 |
+
|
| 185 |
+
# Update session state
|
| 186 |
+
st.session_state.bot1_mesg.append(mesg_1)
|
| 187 |
+
st.session_state.bot2_mesg.append(mesg_2)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
if 'dual_chatbots' in st.session_state:
|
| 192 |
+
|
| 193 |
+
# Show translation
|
| 194 |
+
if translate_col.button('Translate to English'):
|
| 195 |
+
st.session_state['translate_flag'] = True
|
| 196 |
+
st.session_state['batch_flag'] = True
|
| 197 |
+
|
| 198 |
+
# Show original text
|
| 199 |
+
if original_col.button('Show original'):
|
| 200 |
+
st.session_state['translate_flag'] = False
|
| 201 |
+
st.session_state['batch_flag'] = True
|
| 202 |
+
|
| 203 |
+
# Append audio
|
| 204 |
+
if audio_col.button('Play audio'):
|
| 205 |
+
st.session_state['audio_flag'] = True
|
| 206 |
+
st.session_state['batch_flag'] = True
|
| 207 |
+
|
| 208 |
+
# Retrieve generated conversation & chatbots
|
| 209 |
+
mesg1_list = st.session_state.bot1_mesg
|
| 210 |
+
mesg2_list = st.session_state.bot2_mesg
|
| 211 |
+
dual_chatbots = st.session_state['dual_chatbots']
|
| 212 |
+
|
| 213 |
+
# Control message appear
|
| 214 |
+
if st.session_state["first_time_exec"]:
|
| 215 |
+
st.session_state['first_time_exec'] = False
|
| 216 |
+
|
| 217 |
+
else:
|
| 218 |
+
# Show complete message
|
| 219 |
+
with conversation_container:
|
| 220 |
+
|
| 221 |
+
if learning_mode == 'Conversation':
|
| 222 |
+
st.write(f"""#### {role1} and {role2} {scenario} 🎭""")
|
| 223 |
+
|
| 224 |
+
else:
|
| 225 |
+
st.write(f"""#### Debate 💬: {scenario}""")
|
| 226 |
+
|
| 227 |
+
for mesg_1, mesg_2 in zip(mesg1_list, mesg2_list):
|
| 228 |
+
new_count = show_messages(mesg_1, mesg_2,
|
| 229 |
+
st.session_state["message_counter"],
|
| 230 |
+
time_delay=time_delay,
|
| 231 |
+
batch=st.session_state['batch_flag'],
|
| 232 |
+
audio=st.session_state['audio_flag'],
|
| 233 |
+
translation=st.session_state['translate_flag'])
|
| 234 |
+
st.session_state["message_counter"] = new_count
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
# # Create summary for key learning points
|
| 238 |
+
# summary_expander = st.expander('Key Learning Points')
|
| 239 |
+
# scripts = []
|
| 240 |
+
# for mesg_1, mesg_2 in zip(mesg1_list, mesg2_list):
|
| 241 |
+
# for i, mesg in enumerate([mesg_1, mesg_2]):
|
| 242 |
+
# scripts.append(mesg['role'] + ': ' + mesg['content'])
|
| 243 |
+
|
| 244 |
+
# # Compile summary
|
| 245 |
+
# if "summary" not in st.session_state:
|
| 246 |
+
# summary = dual_chatbots.summary(scripts)
|
| 247 |
+
# st.session_state["summary"] = summary
|
| 248 |
+
# else:
|
| 249 |
+
# summary = st.session_state["summary"]
|
| 250 |
+
|
| 251 |
+
# with summary_expander:
|
| 252 |
+
# st.markdown(f"**Here is the learning summary:**")
|
| 253 |
+
# st.write(summary)
|
chatbot.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import openai
|
| 3 |
+
from langchain.prompts import (
|
| 4 |
+
ChatPromptTemplate,
|
| 5 |
+
MessagesPlaceholder,
|
| 6 |
+
SystemMessagePromptTemplate,
|
| 7 |
+
HumanMessagePromptTemplate
|
| 8 |
+
)
|
| 9 |
+
from langchain.prompts import PromptTemplate
|
| 10 |
+
from langchain.chains import LLMChain
|
| 11 |
+
from langchain.chains import ConversationChain
|
| 12 |
+
from langchain.chat_models import ChatOpenAI
|
| 13 |
+
from langchain.memory import ConversationBufferMemory
|
| 14 |
+
from dotenv import load_dotenv, find_dotenv
|
| 15 |
+
|
| 16 |
+
# Load environmental variables
|
| 17 |
+
_ = load_dotenv(find_dotenv())
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Chatbot:
|
| 21 |
+
"""Class definition for a single chatbot with memory, created with LangChain."""
|
| 22 |
+
|
| 23 |
+
def __init__(self, engine):
|
| 24 |
+
"""Select backbone large language model, as well as instantiate
|
| 25 |
+
the memory for creating language chain in LangChain.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
--------------
|
| 29 |
+
engine: the backbone llm-based chat model.
|
| 30 |
+
"""
|
| 31 |
+
|
| 32 |
+
# Instantiate llm
|
| 33 |
+
if engine == 'OpenAI':
|
| 34 |
+
openai.api_key = os.environ['OPENAI_API_KEY']
|
| 35 |
+
self.llm = ChatOpenAI(
|
| 36 |
+
model_name="gpt-3.5-turbo",
|
| 37 |
+
temperature=0.7
|
| 38 |
+
)
|
| 39 |
+
else:
|
| 40 |
+
raise KeyError("Currently unsupported chat model type!")
|
| 41 |
+
|
| 42 |
+
# Instantiate memory
|
| 43 |
+
self.memory = ConversationBufferMemory(return_messages=True)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def instruct(self, role, oppo_role, language, scenario,
|
| 48 |
+
session_length, proficiency_level,
|
| 49 |
+
learning_mode, starter=False):
|
| 50 |
+
"""Determine the context of chatbot interaction.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
-----------
|
| 54 |
+
role: the role played by the current bot.
|
| 55 |
+
oppo_role: the role played by the opponent bot.
|
| 56 |
+
language: the language the conversation/debate will be conducted. This is
|
| 57 |
+
the target language the user is trying to learn.
|
| 58 |
+
scenario: for conversation, scenario represents the place where the conversation
|
| 59 |
+
is happening; for debate, scenario represents the debating topic.
|
| 60 |
+
session_length: the number of exchanges between two chatbots. Two levels are possible:
|
| 61 |
+
"Short" or "Long".
|
| 62 |
+
proficiency_level: assumed user's proficiency level in target language. This
|
| 63 |
+
provides the guideline for the chatbots in terms of the
|
| 64 |
+
language complexity they will use. Three levels are possible:
|
| 65 |
+
"Beginner", "Intermediate", and "Advanced".
|
| 66 |
+
learning_mode: two modes are possible for language learning purposes:
|
| 67 |
+
"Conversation" --> where two bots are chatting in a specified scenario;
|
| 68 |
+
"Debate" --> where two bots are debating on a specified topic.
|
| 69 |
+
starter: flag to indicate if the current chatbot should lead the talking.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
# Define language settings
|
| 73 |
+
self.role = role
|
| 74 |
+
self.oppo_role = oppo_role
|
| 75 |
+
self.language = language
|
| 76 |
+
self.scenario = scenario
|
| 77 |
+
self.session_length = session_length
|
| 78 |
+
self.proficiency_level = proficiency_level
|
| 79 |
+
self.learning_mode = learning_mode
|
| 80 |
+
self.starter = starter
|
| 81 |
+
|
| 82 |
+
# Define prompt template
|
| 83 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 84 |
+
SystemMessagePromptTemplate.from_template(self._specify_system_message()),
|
| 85 |
+
MessagesPlaceholder(variable_name="history"),
|
| 86 |
+
HumanMessagePromptTemplate.from_template("""{input}""")
|
| 87 |
+
])
|
| 88 |
+
|
| 89 |
+
# Create conversation chain
|
| 90 |
+
self.conversation = ConversationChain(memory=self.memory, prompt=prompt,
|
| 91 |
+
llm=self.llm, verbose=False)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _specify_system_message(self):
|
| 96 |
+
"""Specify the behavior of the chatbot, which consists of the following aspects:
|
| 97 |
+
- general context: conducting conversation/debate under given scenario
|
| 98 |
+
- the language spoken
|
| 99 |
+
- purpose of the simulated conversation/debate
|
| 100 |
+
- language complexity requirement
|
| 101 |
+
- exchange length requirement
|
| 102 |
+
- other nuance constraints
|
| 103 |
+
|
| 104 |
+
Outputs:
|
| 105 |
+
--------
|
| 106 |
+
prompt: instructions for the chatbot.
|
| 107 |
+
"""
|
| 108 |
+
|
| 109 |
+
# Determine the number of exchanges between two bots
|
| 110 |
+
exchange_counts_dict = {
|
| 111 |
+
'Short': {'Conversation': 4, 'Debate': 4},
|
| 112 |
+
'Long': {'Conversation': 8, 'Debate': 8}
|
| 113 |
+
}
|
| 114 |
+
exchange_counts = exchange_counts_dict[self.session_length][self.learning_mode]
|
| 115 |
+
|
| 116 |
+
# Determine number of arguments in one debate round
|
| 117 |
+
argument_num_dict = {
|
| 118 |
+
'Beginner': 4,
|
| 119 |
+
'Intermediate': 6,
|
| 120 |
+
'Advanced': 8
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
# Determine language complexity
|
| 124 |
+
if self.proficiency_level == 'Beginner':
|
| 125 |
+
lang_requirement = """use as basic and simple vocabulary and
|
| 126 |
+
sentence structures as possible. Must avoid idioms, slang,
|
| 127 |
+
and complex grammatical constructs."""
|
| 128 |
+
|
| 129 |
+
elif self.proficiency_level == 'Intermediate':
|
| 130 |
+
lang_requirement = """use a wider range of vocabulary and a variety of sentence structures.
|
| 131 |
+
You can include some idioms and colloquial expressions,
|
| 132 |
+
but avoid highly technical language or complex literary expressions."""
|
| 133 |
+
|
| 134 |
+
elif self.proficiency_level == 'Advanced':
|
| 135 |
+
lang_requirement = """use sophisticated vocabulary, complex sentence structures, idioms,
|
| 136 |
+
colloquial expressions, and technical language where appropriate."""
|
| 137 |
+
|
| 138 |
+
else:
|
| 139 |
+
raise KeyError('Currently unsupported proficiency level!')
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
# Compile bot instructions
|
| 143 |
+
if self.learning_mode == 'Conversation':
|
| 144 |
+
prompt = f"""You are an AI that is good at role-playing.
|
| 145 |
+
You are simulating a real-life conversation happening in a {self.scenario} scenario.
|
| 146 |
+
In this scenario, you are playing as a {self.role['name']} {self.role['action']}, speaking to a
|
| 147 |
+
{self.oppo_role['name']} {self.oppo_role['action']}.
|
| 148 |
+
Your conversation should only be conducted in {self.language}. Do not translate.
|
| 149 |
+
This simulated {self.learning_mode} is designed for {self.language} farmers to understand best farming practices in {self.language}.
|
| 150 |
+
You should assume the farmers' proficiency level in
|
| 151 |
+
{self.language} is {self.proficiency_level}. Therefore, you should {lang_requirement}.
|
| 152 |
+
You should finish the conversation within {exchange_counts} exchanges with the {self.oppo_role['name']}.
|
| 153 |
+
Make your conversation with {self.oppo_role['name']} natural and typical in the considered scenario in
|
| 154 |
+
{self.language} cultural."""
|
| 155 |
+
|
| 156 |
+
elif self.learning_mode == 'Debate':
|
| 157 |
+
prompt = f"""You are an AI that is good at debating.
|
| 158 |
+
You are now engaged in a debate with the following topic: {self.scenario}.
|
| 159 |
+
In this debate, you are taking on the role of a {self.role['name']}.
|
| 160 |
+
Always remember your stances in the debate.
|
| 161 |
+
Your debate should only be conducted in {self.language}. Do not translate.
|
| 162 |
+
This simulated debate is designed for {self.language} farmers to understand best farming practices in {self.language}.
|
| 163 |
+
You should assume the farmers' proficiency level in {self.language}
|
| 164 |
+
is {self.proficiency_level}. Therefore, you should {lang_requirement}.
|
| 165 |
+
You will exchange opinions with another AI (who plays the {self.oppo_role['name']} role)
|
| 166 |
+
{exchange_counts} times.
|
| 167 |
+
Everytime you speak, you can only speak no more than
|
| 168 |
+
{argument_num_dict[self.proficiency_level]} sentences."""
|
| 169 |
+
|
| 170 |
+
else:
|
| 171 |
+
raise KeyError('Currently unsupported learning mode!')
|
| 172 |
+
|
| 173 |
+
# Give bot instructions
|
| 174 |
+
if self.starter:
|
| 175 |
+
# In case the current bot is the first one to speak
|
| 176 |
+
prompt += f"You are leading the {self.learning_mode}. \n"
|
| 177 |
+
|
| 178 |
+
else:
|
| 179 |
+
# In case the current bot is the second one to speak
|
| 180 |
+
prompt += f"Wait for the {self.oppo_role['name']}'s statement."
|
| 181 |
+
|
| 182 |
+
return prompt
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
|
| 187 |
+
class DualChatbot:
|
| 188 |
+
"""Class definition for dual-chatbots interaction system, created with LangChain."""
|
| 189 |
+
|
| 190 |
+
|
| 191 |
+
def __init__(self, engine, role_dict, language, scenario, proficiency_level,
|
| 192 |
+
learning_mode, session_length):
|
| 193 |
+
"""Args:
|
| 194 |
+
--------------
|
| 195 |
+
engine: the backbone llm-based chat model.
|
| 196 |
+
"OpenAI" stands for OpenAI chat model;
|
| 197 |
+
Other chat models are also possible in LangChain,
|
| 198 |
+
see https://python.langchain.com/en/latest/modules/models/chat/integrations.html
|
| 199 |
+
role_dict: dictionary to hold information regarding roles.
|
| 200 |
+
For conversation mode, an example role_dict is:
|
| 201 |
+
role_dict = {
|
| 202 |
+
'role1': {'name': 'Customer', 'action': 'ordering food'},
|
| 203 |
+
'role2': {'name': 'Waitstaff', 'action': 'taking the order'}
|
| 204 |
+
}
|
| 205 |
+
For debate mode, an example role_dict is:
|
| 206 |
+
role_dict = {
|
| 207 |
+
'role1': {'name': 'Proponent'},
|
| 208 |
+
'role2': {'name': 'Opponent'}
|
| 209 |
+
}
|
| 210 |
+
language: the language the conversation/debate will be conducted. This is
|
| 211 |
+
the target language the user is trying to learn.
|
| 212 |
+
scenario: for conversation, scenario represents the place where the conversation
|
| 213 |
+
is happening; for debate, scenario represents the debating topic.
|
| 214 |
+
proficiency_level: assumed user's proficiency level in target language. This
|
| 215 |
+
provides the guideline for the chatbots in terms of the
|
| 216 |
+
language complexity they will use. Three levels are possible:
|
| 217 |
+
"Beginner", "Intermediate", and "Advanced".
|
| 218 |
+
session_length: the number of exchanges between two chatbots. Two levels are possible:
|
| 219 |
+
"Short" or "Long".
|
| 220 |
+
learning_mode: two modes are possible for language learning purposes:
|
| 221 |
+
"Conversation" --> where two bots are chatting in a specified scenario;
|
| 222 |
+
"Debate" --> where two bots are debating on a specified topic.
|
| 223 |
+
"""
|
| 224 |
+
|
| 225 |
+
# Instantiate two chatbots
|
| 226 |
+
self.engine = engine
|
| 227 |
+
self.proficiency_level = proficiency_level
|
| 228 |
+
self.language = language
|
| 229 |
+
self.chatbots = role_dict
|
| 230 |
+
for k in role_dict.keys():
|
| 231 |
+
self.chatbots[k].update({'chatbot': Chatbot(engine)})
|
| 232 |
+
|
| 233 |
+
# Assigning roles for two chatbots
|
| 234 |
+
self.chatbots['role1']['chatbot'].instruct(role=self.chatbots['role1'],
|
| 235 |
+
oppo_role=self.chatbots['role2'],
|
| 236 |
+
language=language, scenario=scenario,
|
| 237 |
+
session_length=session_length,
|
| 238 |
+
proficiency_level=proficiency_level,
|
| 239 |
+
learning_mode=learning_mode, starter=True)
|
| 240 |
+
|
| 241 |
+
self.chatbots['role2']['chatbot'].instruct(role=self.chatbots['role2'],
|
| 242 |
+
oppo_role=self.chatbots['role1'],
|
| 243 |
+
language=language, scenario=scenario,
|
| 244 |
+
session_length=session_length,
|
| 245 |
+
proficiency_level=proficiency_level,
|
| 246 |
+
learning_mode=learning_mode, starter=False)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
# Add session length
|
| 250 |
+
self.session_length = session_length
|
| 251 |
+
|
| 252 |
+
# Prepare conversation
|
| 253 |
+
self._reset_conversation_history()
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def step(self):
|
| 258 |
+
"""Make one exchange round between two chatbots.
|
| 259 |
+
|
| 260 |
+
Outputs:
|
| 261 |
+
--------
|
| 262 |
+
output1: response of the first chatbot
|
| 263 |
+
output2: response of the second chatbot
|
| 264 |
+
translate1: translate of the first response
|
| 265 |
+
translate2: translate of the second response
|
| 266 |
+
"""
|
| 267 |
+
|
| 268 |
+
# Chatbot1 speaks
|
| 269 |
+
output1 = self.chatbots['role1']['chatbot'].conversation.predict(input=self.input1)
|
| 270 |
+
self.conversation_history.append({"bot": self.chatbots['role1']['name'], "text": output1})
|
| 271 |
+
|
| 272 |
+
# Pass output of chatbot1 as input to chatbot2
|
| 273 |
+
self.input2 = output1
|
| 274 |
+
|
| 275 |
+
# Chatbot2 speaks
|
| 276 |
+
output2 = self.chatbots['role2']['chatbot'].conversation.predict(input=self.input2)
|
| 277 |
+
self.conversation_history.append({"bot": self.chatbots['role2']['name'], "text": output2})
|
| 278 |
+
|
| 279 |
+
# Pass output of chatbot2 as input to chatbot1
|
| 280 |
+
self.input1 = output2
|
| 281 |
+
|
| 282 |
+
# Translate responses
|
| 283 |
+
translate1 = self.translate(output1)
|
| 284 |
+
translate2 = self.translate(output2)
|
| 285 |
+
|
| 286 |
+
return output1, output2, translate1, translate2
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def translate(self, message):
|
| 291 |
+
"""Translate the generated script into target language.
|
| 292 |
+
|
| 293 |
+
Args:
|
| 294 |
+
--------
|
| 295 |
+
message: input message that needs to be translated.
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
Outputs:
|
| 299 |
+
--------
|
| 300 |
+
translation: translated message.
|
| 301 |
+
"""
|
| 302 |
+
|
| 303 |
+
if self.language == 'English':
|
| 304 |
+
# No translation performed
|
| 305 |
+
translation = 'Translation: ' + message
|
| 306 |
+
|
| 307 |
+
else:
|
| 308 |
+
# Instantiate translator
|
| 309 |
+
if self.engine == 'OpenAI':
|
| 310 |
+
# Reminder: need to set up openAI API key
|
| 311 |
+
# (e.g., via environment variable OPENAI_API_KEY)
|
| 312 |
+
self.translator = ChatOpenAI(
|
| 313 |
+
model_name="gpt-3.5-turbo",
|
| 314 |
+
temperature=0.7
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
else:
|
| 318 |
+
raise KeyError("Currently unsupported translation model type!")
|
| 319 |
+
|
| 320 |
+
# Specify instruction
|
| 321 |
+
instruction = """Translate the following sentence from {src_lang}
|
| 322 |
+
(source language) to {trg_lang} (target language).
|
| 323 |
+
Here is the sentence in source language: \n
|
| 324 |
+
{src_input}."""
|
| 325 |
+
|
| 326 |
+
prompt = PromptTemplate(
|
| 327 |
+
input_variables=["src_lang", "trg_lang", "src_input"],
|
| 328 |
+
template=instruction,
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
# Create a language chain
|
| 332 |
+
translator_chain = LLMChain(llm=self.translator, prompt=prompt)
|
| 333 |
+
translation = translator_chain.predict(src_lang=self.language,
|
| 334 |
+
trg_lang="English",
|
| 335 |
+
src_input=message)
|
| 336 |
+
|
| 337 |
+
return translation
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def summary(self, script):
|
| 342 |
+
"""Distill key language learning points from the generated scripts.
|
| 343 |
+
|
| 344 |
+
Args:
|
| 345 |
+
--------
|
| 346 |
+
script: the generated conversation between two bots.
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
Outputs:
|
| 350 |
+
--------
|
| 351 |
+
summary: summary of the key learning points.
|
| 352 |
+
"""
|
| 353 |
+
|
| 354 |
+
# Instantiate summary bot
|
| 355 |
+
if self.engine == 'OpenAI':
|
| 356 |
+
# Reminder: need to set up openAI API key
|
| 357 |
+
# (e.g., via environment variable OPENAI_API_KEY)
|
| 358 |
+
self.summary_bot = ChatOpenAI(
|
| 359 |
+
model_name="gpt-3.5-turbo",
|
| 360 |
+
temperature=0.7
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
else:
|
| 364 |
+
raise KeyError("Currently unsupported summary model type!")
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
# Specify instruction
|
| 368 |
+
instruction = """The following text is a simulated conversation in
|
| 369 |
+
{src_lang}. The goal of this text is to aid {src_lang} learners to learn
|
| 370 |
+
real-life usage of {src_lang}. Therefore, your task is to summarize the key
|
| 371 |
+
learning points based on the given text. Specifically, you should summarize
|
| 372 |
+
the key vocabulary, grammar points, and function phrases that could be important
|
| 373 |
+
for students learning {src_lang}. Your summary should be conducted in English, but
|
| 374 |
+
use examples from the text in the original language where appropriate.
|
| 375 |
+
Remember your target students have a proficiency level of
|
| 376 |
+
{proficiency} in {src_lang}. You summarization must match with their
|
| 377 |
+
proficiency level.
|
| 378 |
+
|
| 379 |
+
The conversation is: \n
|
| 380 |
+
{script}."""
|
| 381 |
+
|
| 382 |
+
prompt = PromptTemplate(
|
| 383 |
+
input_variables=["src_lang", "proficiency", "script"],
|
| 384 |
+
template=instruction,
|
| 385 |
+
)
|
| 386 |
+
|
| 387 |
+
# Create a language chain
|
| 388 |
+
summary_chain = LLMChain(llm=self.summary_bot, prompt=prompt)
|
| 389 |
+
summary = summary_chain.predict(src_lang=self.language,
|
| 390 |
+
proficiency=self.proficiency_level,
|
| 391 |
+
script=script)
|
| 392 |
+
|
| 393 |
+
return summary
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def _reset_conversation_history(self):
|
| 398 |
+
"""Reset the conversation history.
|
| 399 |
+
"""
|
| 400 |
+
# Placeholder for conversation history
|
| 401 |
+
self.conversation_history = []
|
| 402 |
+
|
| 403 |
+
# Inputs for two chatbots
|
| 404 |
+
self.input1 = "Start the conversation."
|
| 405 |
+
self.input2 = ""
|
requirements.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gTTS==2.3.2
|
| 2 |
+
langchain==0.0.205
|
| 3 |
+
openai==0.27.4
|
| 4 |
+
streamlit==1.23.1
|
| 5 |
+
streamlit_chat==0.0.2.2
|
| 6 |
+
python-dotenv==0.21.0
|