import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) import json import time import traceback import openai import requests import streamlit as st import utils SEED = 42 def get_client(): return openai.OpenAI(api_key = utils.OPENAI_API_KEY,organization=utils.OPENAI_ORGANIZATION_ID) def getListOfCompanies(query, filters = {}): country_filters = filters['country'] if 'country' in filters else st.session_state.country st.session_state.db_search_results = utils.search_index(query, st.session_state.top_k, st.session_state.region, country_filters, st.session_state.retriever, st.session_state.index_namespace) descriptions = "\n".join([f"Description of company \"{res['name']}\": {res['data']['Summary']}.\n" for res in st.session_state.db_search_results[:20] if 'Summary' in res['data']]) return descriptions def report_error(txt): logger.debug(f"\nError: \n{txt}") def wait_for_response(thread, run): timeout = 60 #timeout in seconds started = time.time() while True and time.time()-startedtimeout: report_error(f"Wait for response timeout after {timeout}") report_error(f"Flow not completed") messages = st.session_state.openai_client.beta.threads.messages.list( thread_id=thread.id ) return messages def call_assistant(query, engine="gpt-3.5-turbo"): #, temp=0, top_p=1.0, max_tokens=4048): #Prevent re sending the last message over and over print(f"Last query {st.session_state.last_user_query}, current query {query}") if st.session_state.last_user_query == query: report_error(f"That query '{query}' was just sent. We don't send the same query twice in a row. ") return st.session_state.messages try: thread = st.session_state.assistant_thread assistant_id = st.session_state.assistant_id message = st.session_state.openai_client.beta.threads.messages.create( thread.id, role="user", content=query, ) run = st.session_state.openai_client.beta.threads.runs.create( thread_id=thread.id, assistant_id=assistant_id, ) messages = wait_for_response(thread, run) print(f"====================\nOpen AI response\n {str(messages)[:1000]}\n====================\n") return messages # text = "" # for message in messages: # print(message) # text = text + "\n" + message.content[0].text.value # return text except Exception as e: #except openai.error.OpenAIError as e: print(f"An error occurred: {str(e)}") def call_openai(prompt, engine="gpt-3.5-turbo", temp=0, top_p=1.0, max_tokens=4048): if st.session_state.report_type=="assistant": raise Exception("use call_assistant instead of call_openai") else: try: response = st.session_state.openai_client.chat.completions.create( model=engine, messages=st.session_state.messages + [{"role": "user", "content": prompt}], temperature=temp, seed = SEED, max_tokens=max_tokens ) print(f"====================\nOpen AI response\n {response}\n====================\n") text = response.choices[0].message.content.strip() return text except Exception as e: #except openai.error.OpenAIError as e: print(f"An error occurred: {str(e)}") return "Failed to generate a response." def send_message(role, content): message = st.session_state.openai_client.beta.threads.messages.create( thread_id=st.session_state.assistant_thread.id, role=role, content=content ) def start_conversation(): st.session_state.assistant_thread = st.session_state.openai_client.beta.threads.create() def run_assistant(): run = st.session_state.openai_client.beta.threads.runs.create( thread_id=st.session_state.assistant_thread.id, assistant_id=st.session_state.assistant.id, ) while run.status == "queued" or run.status == "in_progress": run = st.session_state.openai_client.beta.threads.runs.retrieve( thread_id=st.session_state.assistant_thread.id, run_id=run.id, ) time.sleep(0.5) return run