zhangjf commited on
Commit
46d51d2
·
1 Parent(s): be256e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -14
app.py CHANGED
@@ -24,19 +24,17 @@ def ask(question, history, behavior):
24
  {"role":"user" if i%2==0 else "assistant", "content":content}
25
  for i,content in enumerate(history + [question])
26
  ]
27
- length_messages = num_tokens_from_messages(messages)
28
- """
29
- time_penalty = (length_messages-1000)//10
30
- if time_penalty>0:
31
- print(f"sleep for {time_penalty:.2f}s for too long a quest: {length_messages}")
32
- time.sleep(time_penalty)
33
- """
34
- response = openai.ChatCompletion.create(
35
- model="gpt-3.5-turbo",
36
- messages=forget_long_term(messages)
37
- )["choices"][0]["message"]["content"]
38
- while response.startswith("\n"):
39
- response = response[1:]
40
  except Exception as e:
41
  print(e)
42
  response = 'Timeout! Please wait a few minutes and retry'
@@ -64,7 +62,7 @@ def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
64
  raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
65
  See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
66
 
67
- def forget_long_term(messages, max_num_tokens=1500):
68
  while num_tokens_from_messages(messages)>max_num_tokens:
69
  if messages[0]["role"]=="system" and not len(messages[0]["content"]>=max_num_tokens):
70
  messages = messages[:1] + messages[2:]
 
24
  {"role":"user" if i%2==0 else "assistant", "content":content}
25
  for i,content in enumerate(history + [question])
26
  ]
27
+ raw_length = num_tokens_from_messages(messages)
28
+ messages=forget_long_term(messages)
29
+ if len(messages)==0:
30
+ response = 'Your query is too long and expensive: {raw_length}>1000 tokens'
31
+ else:
32
+ response = openai.ChatCompletion.create(
33
+ model="gpt-3.5-turbo",
34
+ messages=messages
35
+ )["choices"][0]["message"]["content"]
36
+ while response.startswith("\n"):
37
+ response = response[1:]
 
 
38
  except Exception as e:
39
  print(e)
40
  response = 'Timeout! Please wait a few minutes and retry'
 
62
  raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {model}.
63
  See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
64
 
65
+ def forget_long_term(messages, max_num_tokens=1000):
66
  while num_tokens_from_messages(messages)>max_num_tokens:
67
  if messages[0]["role"]=="system" and not len(messages[0]["content"]>=max_num_tokens):
68
  messages = messages[:1] + messages[2:]