sanbo
commited on
Commit
·
1d746df
1
Parent(s):
0e6a3d5
update sth. at 2025-01-06 14:25:24
Browse files- README.md +17 -0
- app250105.py → app250106.py +2 -2
README.md
CHANGED
@@ -46,4 +46,21 @@ curl 'https://korea-chat.degpt.ai/api/v0/chat/completion/proxy' \
|
|
46 |
--data-raw '{"model":"Qwen2.5-72B","messages":[{"role":"user","content":"hi"}],"project":"DecentralGPT","stream":true}'
|
47 |
|
48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
```
|
|
|
46 |
--data-raw '{"model":"Qwen2.5-72B","messages":[{"role":"user","content":"hi"}],"project":"DecentralGPT","stream":true}'
|
47 |
|
48 |
|
49 |
+
|
50 |
+
########250106
|
51 |
+
curl 'https://usa-chat.degpt.ai/api/v0/chat/completion/proxy' \
|
52 |
+
-H 'accept: */*' \
|
53 |
+
-H 'content-type: application/json' \
|
54 |
+
--data-raw '{"model":"Llama3.3-70B","messages":[{"role":"user","content":"1"}],"project":"DecentralGPT","stream":true}'
|
55 |
+
|
56 |
+
curl 'https://singapore-chat.degpt.ai/api/v0/chat/completion/proxy' \
|
57 |
+
-H 'Accept: */*' \
|
58 |
+
-H 'Content-Type: application/json' \
|
59 |
+
--data-raw '{"model":"Llama3.3-70B","messages":[{"role":"user","content":"1"}],"project":"DecentralGPT","stream":true}'
|
60 |
+
|
61 |
+
curl 'https://korea-chat.degpt.ai/api/v0/chat/completion/proxy' \
|
62 |
+
-H 'Accept: */*' \
|
63 |
+
-H 'Content-Type: application/json' \
|
64 |
+
--data-raw '{"model":"Llama3.3-70B","messages":[{"role":"user","content":"1"}],"project":"DecentralGPT","stream":true}'
|
65 |
+
|
66 |
```
|
app250105.py → app250106.py
RENAMED
@@ -34,7 +34,7 @@ def get_model_by_id(model_id=None):
|
|
34 |
|
35 |
# 如果model_id无效,默认返回Qwen2.5-72B
|
36 |
if model_id not in valid_ids:
|
37 |
-
model_id = "
|
38 |
|
39 |
# 根据model_id获取对应的模型数据
|
40 |
model_data = next((model for model in models_data if model["id"] == model_id), None)
|
@@ -43,7 +43,7 @@ def get_model_by_id(model_id=None):
|
|
43 |
return model_data["id"] if model_data else None
|
44 |
|
45 |
def chat_completion(
|
46 |
-
user_prompt, user_id: str = None, system_prompt="You are a helpful assistant.", model="
|
47 |
project="DecentralGPT", stream=False, temperature=0.3, max_tokens=1024, top_p=0.5,
|
48 |
frequency_penalty=0, presence_penalty=0):
|
49 |
"""处理用户请求并保留上下文"""
|
|
|
34 |
|
35 |
# 如果model_id无效,默认返回Qwen2.5-72B
|
36 |
if model_id not in valid_ids:
|
37 |
+
model_id = "Llama3.3-70B"
|
38 |
|
39 |
# 根据model_id获取对应的模型数据
|
40 |
model_data = next((model for model in models_data if model["id"] == model_id), None)
|
|
|
43 |
return model_data["id"] if model_data else None
|
44 |
|
45 |
def chat_completion(
|
46 |
+
user_prompt, user_id: str = None, system_prompt="You are a helpful assistant.", model="Llama3.3-70B",
|
47 |
project="DecentralGPT", stream=False, temperature=0.3, max_tokens=1024, top_p=0.5,
|
48 |
frequency_penalty=0, presence_penalty=0):
|
49 |
"""处理用户请求并保留上下文"""
|