Spaces:
Running
Running
"max_tokens": 4000,
Browse files
app.py
CHANGED
@@ -42,7 +42,7 @@ def generate_df_summarise(df_string):
|
|
42 |
request_payload = {
|
43 |
"model": "gpt-4-1106-preview",
|
44 |
"messages": messages,
|
45 |
-
"max_tokens":
|
46 |
}
|
47 |
|
48 |
response = client.chat.completions.create(**request_payload)
|
@@ -72,7 +72,7 @@ def generate_questions(df_string):
|
|
72 |
request_payload = {
|
73 |
"model": "gpt-4-1106-preview",
|
74 |
"messages": messages,
|
75 |
-
"max_tokens":
|
76 |
"response_format": response_format
|
77 |
}
|
78 |
|
@@ -108,7 +108,7 @@ def respond(user_message, df_string_output, chat_history):
|
|
108 |
request_payload = {
|
109 |
"model": "gpt-4-1106-preview",
|
110 |
"messages": messages,
|
111 |
-
"max_tokens":
|
112 |
}
|
113 |
|
114 |
response = client.chat.completions.create(**request_payload)
|
|
|
42 |
request_payload = {
|
43 |
"model": "gpt-4-1106-preview",
|
44 |
"messages": messages,
|
45 |
+
"max_tokens": 4000,
|
46 |
}
|
47 |
|
48 |
response = client.chat.completions.create(**request_payload)
|
|
|
72 |
request_payload = {
|
73 |
"model": "gpt-4-1106-preview",
|
74 |
"messages": messages,
|
75 |
+
"max_tokens": 4000,
|
76 |
"response_format": response_format
|
77 |
}
|
78 |
|
|
|
108 |
request_payload = {
|
109 |
"model": "gpt-4-1106-preview",
|
110 |
"messages": messages,
|
111 |
+
"max_tokens": 4000 # 設定一個較大的值,可根據需要調整
|
112 |
}
|
113 |
|
114 |
response = client.chat.completions.create(**request_payload)
|