KevinHuSh
commited on
Commit
·
e06e08c
1
Parent(s):
99b8bda
add base url for OpenAI (#166)
Browse files- README.md +4 -3
- api/apps/llm_app.py +5 -3
- api/db/services/llm_service.py +5 -3
- deepdoc/vision/recognizer.py +2 -0
- rag/llm/chat_model.py +9 -7
- rag/llm/cv_model.py +6 -5
- rag/llm/embedding_model.py +6 -5
- rag/svr/task_executor.py +2 -1
README.md
CHANGED
@@ -20,7 +20,7 @@
|
|
20 |
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=7d09f1" alt="license">
|
21 |
</a>
|
22 |
</p>
|
23 |
-
[RagFlow](
|
24 |
|
25 |
|
26 |
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
@@ -56,12 +56,12 @@
|
|
56 |
|
57 |
Then, you need to check the following command:
|
58 |
```bash
|
59 |
-
|
60 |
vm.max_map_count = 262144
|
61 |
```
|
62 |
If **vm.max_map_count** is not greater than 65535:
|
63 |
```bash
|
64 |
-
|
65 |
```
|
66 |
Note that this change is reset after a system reboot. To render your change permanent, add or update the following line in **/etc/sysctl.conf**:
|
67 |
|
@@ -126,6 +126,7 @@ Open your browser, enter the IP address of your server, _**Hallelujah**_ again!
|
|
126 |
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
127 |
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
128 |
</div>
|
|
|
129 |
## 🔧 Configurations
|
130 |
|
131 |
If you need to change the default setting of the system when you deploy it. There several ways to configure it.
|
|
|
20 |
<img height="21" src="https://img.shields.io/badge/License-Apache--2.0-ffffff?style=flat-square&labelColor=d4eaf7&color=7d09f1" alt="license">
|
21 |
</a>
|
22 |
</p>
|
23 |
+
[RagFlow](https://demo.ragflow.io) is a knowledge management platform built on custom-build document understanding engine and LLM, with reasoned and well-founded answers to your question. Clone this repository, you can deploy your own knowledge management platform to empower your business with AI.
|
24 |
|
25 |
|
26 |
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
|
|
56 |
|
57 |
Then, you need to check the following command:
|
58 |
```bash
|
59 |
+
$ sysctl vm.max_map_count
|
60 |
vm.max_map_count = 262144
|
61 |
```
|
62 |
If **vm.max_map_count** is not greater than 65535:
|
63 |
```bash
|
64 |
+
$ sudo sysctl -w vm.max_map_count=262144
|
65 |
```
|
66 |
Note that this change is reset after a system reboot. To render your change permanent, add or update the following line in **/etc/sysctl.conf**:
|
67 |
|
|
|
126 |
<div align="center" style="margin-top:20px;margin-bottom:20px;">
|
127 |
<img src="https://github.com/infiniflow/ragflow/assets/12318111/d6ac5664-c237-4200-a7c2-a4a00691b485" width="1000"/>
|
128 |
</div>
|
129 |
+
|
130 |
## 🔧 Configurations
|
131 |
|
132 |
If you need to change the default setting of the system when you deploy it. There several ways to configure it.
|
api/apps/llm_app.py
CHANGED
@@ -45,7 +45,7 @@ def set_api_key():
|
|
45 |
for llm in LLMService.query(fid=factory):
|
46 |
if llm.model_type == LLMType.EMBEDDING.value:
|
47 |
mdl = EmbeddingModel[factory](
|
48 |
-
req["api_key"], llm.llm_name)
|
49 |
try:
|
50 |
arr, tc = mdl.encode(["Test if the api key is available"])
|
51 |
if len(arr[0]) == 0 or tc == 0:
|
@@ -54,7 +54,7 @@ def set_api_key():
|
|
54 |
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
55 |
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
56 |
mdl = ChatModel[factory](
|
57 |
-
req["api_key"], llm.llm_name)
|
58 |
try:
|
59 |
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
60 |
"temperature": 0.9})
|
@@ -83,7 +83,9 @@ def set_api_key():
|
|
83 |
llm_factory=factory,
|
84 |
llm_name=llm.llm_name,
|
85 |
model_type=llm.model_type,
|
86 |
-
api_key=req["api_key"]
|
|
|
|
|
87 |
|
88 |
return get_json_result(data=True)
|
89 |
|
|
|
45 |
for llm in LLMService.query(fid=factory):
|
46 |
if llm.model_type == LLMType.EMBEDDING.value:
|
47 |
mdl = EmbeddingModel[factory](
|
48 |
+
req["api_key"], llm.llm_name, req.get("base_url"))
|
49 |
try:
|
50 |
arr, tc = mdl.encode(["Test if the api key is available"])
|
51 |
if len(arr[0]) == 0 or tc == 0:
|
|
|
54 |
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
55 |
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
56 |
mdl = ChatModel[factory](
|
57 |
+
req["api_key"], llm.llm_name, req.get("base_url"))
|
58 |
try:
|
59 |
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
60 |
"temperature": 0.9})
|
|
|
83 |
llm_factory=factory,
|
84 |
llm_name=llm.llm_name,
|
85 |
model_type=llm.model_type,
|
86 |
+
api_key=req["api_key"],
|
87 |
+
api_base=req.get("base_url", "")
|
88 |
+
)
|
89 |
|
90 |
return get_json_result(data=True)
|
91 |
|
api/db/services/llm_service.py
CHANGED
@@ -84,19 +84,21 @@ class TenantLLMService(CommonService):
|
|
84 |
if model_config["llm_factory"] not in EmbeddingModel:
|
85 |
return
|
86 |
return EmbeddingModel[model_config["llm_factory"]](
|
87 |
-
model_config["api_key"], model_config["llm_name"])
|
88 |
|
89 |
if llm_type == LLMType.IMAGE2TEXT.value:
|
90 |
if model_config["llm_factory"] not in CvModel:
|
91 |
return
|
92 |
return CvModel[model_config["llm_factory"]](
|
93 |
-
model_config["api_key"], model_config["llm_name"], lang
|
|
|
|
|
94 |
|
95 |
if llm_type == LLMType.CHAT.value:
|
96 |
if model_config["llm_factory"] not in ChatModel:
|
97 |
return
|
98 |
return ChatModel[model_config["llm_factory"]](
|
99 |
-
model_config["api_key"], model_config["llm_name"])
|
100 |
|
101 |
@classmethod
|
102 |
@DB.connection_context()
|
|
|
84 |
if model_config["llm_factory"] not in EmbeddingModel:
|
85 |
return
|
86 |
return EmbeddingModel[model_config["llm_factory"]](
|
87 |
+
model_config["api_key"], model_config["llm_name"], model_config["api_base"])
|
88 |
|
89 |
if llm_type == LLMType.IMAGE2TEXT.value:
|
90 |
if model_config["llm_factory"] not in CvModel:
|
91 |
return
|
92 |
return CvModel[model_config["llm_factory"]](
|
93 |
+
model_config["api_key"], model_config["llm_name"], lang,
|
94 |
+
base_url=model_config["api_base"]
|
95 |
+
)
|
96 |
|
97 |
if llm_type == LLMType.CHAT.value:
|
98 |
if model_config["llm_factory"] not in ChatModel:
|
99 |
return
|
100 |
return ChatModel[model_config["llm_factory"]](
|
101 |
+
model_config["api_key"], model_config["llm_name"], model_config["api_base"])
|
102 |
|
103 |
@classmethod
|
104 |
@DB.connection_context()
|
deepdoc/vision/recognizer.py
CHANGED
@@ -43,6 +43,8 @@ class Recognizer(object):
|
|
43 |
if not os.path.exists(model_file_path):
|
44 |
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc")
|
45 |
model_file_path = os.path.join(model_dir, task_name + ".onnx")
|
|
|
|
|
46 |
|
47 |
if not os.path.exists(model_file_path):
|
48 |
raise ValueError("not find model file path {}".format(
|
|
|
43 |
if not os.path.exists(model_file_path):
|
44 |
model_dir = snapshot_download(repo_id="InfiniFlow/deepdoc")
|
45 |
model_file_path = os.path.join(model_dir, task_name + ".onnx")
|
46 |
+
else:
|
47 |
+
model_file_path = os.path.join(model_dir, task_name + ".onnx")
|
48 |
|
49 |
if not os.path.exists(model_file_path):
|
50 |
raise ValueError("not find model file path {}".format(
|
rag/llm/chat_model.py
CHANGED
@@ -31,8 +31,9 @@ class Base(ABC):
|
|
31 |
|
32 |
|
33 |
class GptTurbo(Base):
|
34 |
-
def __init__(self, key, model_name="gpt-3.5-turbo"):
|
35 |
-
|
|
|
36 |
self.model_name = model_name
|
37 |
|
38 |
def chat(self, system, history, gen_conf):
|
@@ -53,9 +54,10 @@ class GptTurbo(Base):
|
|
53 |
|
54 |
|
55 |
class MoonshotChat(GptTurbo):
|
56 |
-
def __init__(self, key, model_name="moonshot-v1-8k"):
|
|
|
57 |
self.client = OpenAI(
|
58 |
-
api_key=key, base_url=
|
59 |
self.model_name = model_name
|
60 |
|
61 |
def chat(self, system, history, gen_conf):
|
@@ -76,7 +78,7 @@ class MoonshotChat(GptTurbo):
|
|
76 |
|
77 |
|
78 |
class QWenChat(Base):
|
79 |
-
def __init__(self, key, model_name=Generation.Models.qwen_turbo):
|
80 |
import dashscope
|
81 |
dashscope.api_key = key
|
82 |
self.model_name = model_name
|
@@ -105,7 +107,7 @@ class QWenChat(Base):
|
|
105 |
|
106 |
|
107 |
class ZhipuChat(Base):
|
108 |
-
def __init__(self, key, model_name="glm-3-turbo"):
|
109 |
self.client = ZhipuAI(api_key=key)
|
110 |
self.model_name = model_name
|
111 |
|
@@ -154,7 +156,7 @@ class LocalLLM(Base):
|
|
154 |
|
155 |
return do_rpc
|
156 |
|
157 |
-
def __init__(self,
|
158 |
self.client = LocalLLM.RPCProxy("127.0.0.1", 7860)
|
159 |
|
160 |
def chat(self, system, history, gen_conf):
|
|
|
31 |
|
32 |
|
33 |
class GptTurbo(Base):
|
34 |
+
def __init__(self, key, model_name="gpt-3.5-turbo", base_url="https://api.openai.com/v1"):
|
35 |
+
if not base_url: base_url="https://api.openai.com/v1"
|
36 |
+
self.client = OpenAI(api_key=key, base_url=base_url)
|
37 |
self.model_name = model_name
|
38 |
|
39 |
def chat(self, system, history, gen_conf):
|
|
|
54 |
|
55 |
|
56 |
class MoonshotChat(GptTurbo):
|
57 |
+
def __init__(self, key, model_name="moonshot-v1-8k", base_url="https://api.moonshot.cn/v1"):
|
58 |
+
if not base_url: base_url="https://api.moonshot.cn/v1"
|
59 |
self.client = OpenAI(
|
60 |
+
api_key=key, base_url=base_url)
|
61 |
self.model_name = model_name
|
62 |
|
63 |
def chat(self, system, history, gen_conf):
|
|
|
78 |
|
79 |
|
80 |
class QWenChat(Base):
|
81 |
+
def __init__(self, key, model_name=Generation.Models.qwen_turbo, **kwargs):
|
82 |
import dashscope
|
83 |
dashscope.api_key = key
|
84 |
self.model_name = model_name
|
|
|
107 |
|
108 |
|
109 |
class ZhipuChat(Base):
|
110 |
+
def __init__(self, key, model_name="glm-3-turbo", **kwargs):
|
111 |
self.client = ZhipuAI(api_key=key)
|
112 |
self.model_name = model_name
|
113 |
|
|
|
156 |
|
157 |
return do_rpc
|
158 |
|
159 |
+
def __init__(self, **kwargs):
|
160 |
self.client = LocalLLM.RPCProxy("127.0.0.1", 7860)
|
161 |
|
162 |
def chat(self, system, history, gen_conf):
|
rag/llm/cv_model.py
CHANGED
@@ -67,8 +67,9 @@ class Base(ABC):
|
|
67 |
|
68 |
|
69 |
class GptV4(Base):
|
70 |
-
def __init__(self, key, model_name="gpt-4-vision-preview", lang="Chinese"):
|
71 |
-
|
|
|
72 |
self.model_name = model_name
|
73 |
self.lang = lang
|
74 |
|
@@ -84,7 +85,7 @@ class GptV4(Base):
|
|
84 |
|
85 |
|
86 |
class QWenCV(Base):
|
87 |
-
def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese"):
|
88 |
import dashscope
|
89 |
dashscope.api_key = key
|
90 |
self.model_name = model_name
|
@@ -123,7 +124,7 @@ class QWenCV(Base):
|
|
123 |
|
124 |
|
125 |
class Zhipu4V(Base):
|
126 |
-
def __init__(self, key, model_name="glm-4v", lang="Chinese"):
|
127 |
self.client = ZhipuAI(api_key=key)
|
128 |
self.model_name = model_name
|
129 |
self.lang = lang
|
@@ -140,7 +141,7 @@ class Zhipu4V(Base):
|
|
140 |
|
141 |
|
142 |
class LocalCV(Base):
|
143 |
-
def __init__(self, key, model_name="glm-4v", lang="Chinese"):
|
144 |
pass
|
145 |
|
146 |
def describe(self, image, max_tokens=1024):
|
|
|
67 |
|
68 |
|
69 |
class GptV4(Base):
|
70 |
+
def __init__(self, key, model_name="gpt-4-vision-preview", lang="Chinese", base_url="https://api.openai.com/v1"):
|
71 |
+
if not base_url: base_url="https://api.openai.com/v1"
|
72 |
+
self.client = OpenAI(api_key=key, base_url=base_url)
|
73 |
self.model_name = model_name
|
74 |
self.lang = lang
|
75 |
|
|
|
85 |
|
86 |
|
87 |
class QWenCV(Base):
|
88 |
+
def __init__(self, key, model_name="qwen-vl-chat-v1", lang="Chinese", **kwargs):
|
89 |
import dashscope
|
90 |
dashscope.api_key = key
|
91 |
self.model_name = model_name
|
|
|
124 |
|
125 |
|
126 |
class Zhipu4V(Base):
|
127 |
+
def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
|
128 |
self.client = ZhipuAI(api_key=key)
|
129 |
self.model_name = model_name
|
130 |
self.lang = lang
|
|
|
141 |
|
142 |
|
143 |
class LocalCV(Base):
|
144 |
+
def __init__(self, key, model_name="glm-4v", lang="Chinese", **kwargs):
|
145 |
pass
|
146 |
|
147 |
def describe(self, image, max_tokens=1024):
|
rag/llm/embedding_model.py
CHANGED
@@ -51,7 +51,7 @@ class Base(ABC):
|
|
51 |
|
52 |
|
53 |
class HuEmbedding(Base):
|
54 |
-
def __init__(self,
|
55 |
"""
|
56 |
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
57 |
|
@@ -81,8 +81,9 @@ class HuEmbedding(Base):
|
|
81 |
|
82 |
|
83 |
class OpenAIEmbed(Base):
|
84 |
-
def __init__(self, key, model_name="text-embedding-ada-002"):
|
85 |
-
|
|
|
86 |
self.model_name = model_name
|
87 |
|
88 |
def encode(self, texts: list, batch_size=32):
|
@@ -98,7 +99,7 @@ class OpenAIEmbed(Base):
|
|
98 |
|
99 |
|
100 |
class QWenEmbed(Base):
|
101 |
-
def __init__(self, key, model_name="text_embedding_v2"):
|
102 |
dashscope.api_key = key
|
103 |
self.model_name = model_name
|
104 |
|
@@ -131,7 +132,7 @@ class QWenEmbed(Base):
|
|
131 |
|
132 |
|
133 |
class ZhipuEmbed(Base):
|
134 |
-
def __init__(self, key, model_name="embedding-2"):
|
135 |
self.client = ZhipuAI(api_key=key)
|
136 |
self.model_name = model_name
|
137 |
|
|
|
51 |
|
52 |
|
53 |
class HuEmbedding(Base):
|
54 |
+
def __init__(self, **kwargs):
|
55 |
"""
|
56 |
If you have trouble downloading HuggingFace models, -_^ this might help!!
|
57 |
|
|
|
81 |
|
82 |
|
83 |
class OpenAIEmbed(Base):
|
84 |
+
def __init__(self, key, model_name="text-embedding-ada-002", base_url="https://api.openai.com/v1"):
|
85 |
+
if not base_url: base_url="https://api.openai.com/v1"
|
86 |
+
self.client = OpenAI(api_key=key, base_url=base_url)
|
87 |
self.model_name = model_name
|
88 |
|
89 |
def encode(self, texts: list, batch_size=32):
|
|
|
99 |
|
100 |
|
101 |
class QWenEmbed(Base):
|
102 |
+
def __init__(self, key, model_name="text_embedding_v2", **kwargs):
|
103 |
dashscope.api_key = key
|
104 |
self.model_name = model_name
|
105 |
|
|
|
132 |
|
133 |
|
134 |
class ZhipuEmbed(Base):
|
135 |
+
def __init__(self, key, model_name="embedding-2", **kwargs):
|
136 |
self.client = ZhipuAI(api_key=key)
|
137 |
self.model_name = model_name
|
138 |
|
rag/svr/task_executor.py
CHANGED
@@ -280,4 +280,5 @@ if __name__ == "__main__":
|
|
280 |
from mpi4py import MPI
|
281 |
|
282 |
comm = MPI.COMM_WORLD
|
283 |
-
|
|
|
|
280 |
from mpi4py import MPI
|
281 |
|
282 |
comm = MPI.COMM_WORLD
|
283 |
+
while True:
|
284 |
+
main(int(sys.argv[2]), int(sys.argv[1]))
|