AFischer1985 commited on
Commit
54fdb1a
·
verified ·
1 Parent(s): 5d430ee

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +26 -29
run.py CHANGED
@@ -119,17 +119,17 @@ else:
119
  #modelPath="/home/af/gguf/models/nous-hermes-2-mixtral-8x7b-dpo.Q4_0.gguf"
120
  if(os.path.exists(modelPath)==False):
121
  #url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf"
122
- #url="https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_0.gguf?download=true"
123
  #url="https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf?download=true"
124
- url="https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF/resolve/main/discolm_german_7b_v1.Q4_0.gguf?download=true"
125
  response = requests.get(url)
126
- with open("./model.gguf", mode="wb") as file:
127
  file.write(response.content)
128
  print("Model downloaded")
129
- modelPath="./model.gguf"
130
  print(modelPath)
131
  n="20"
132
- if("mixtral-8x7b-instruct" in modelPath): n="0" # mixtral seems to cause problems here...
133
  command = ["python3", "-m", "llama_cpp.server", "--model", modelPath, "--host", "0.0.0.0", "--port", "2600", "--n_threads", "8", "--n_gpu_layers", n]
134
  subprocess.Popen(command)
135
  print("Server ready!")
@@ -143,44 +143,40 @@ else:
143
  # Gradio-GUI
144
  #------------
145
 
146
- def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4):
 
147
  startOfString=""
148
  if zeichenlimit is None: zeichenlimit=1000000000 # :-)
149
  template0=" [INST]{system}\n [/INST] </s>"
150
  template1=" [INST] {message} [/INST]"
151
  template2=" {response}</s>"
152
- if("mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
 
 
 
 
153
  startOfString="<s>"
154
- template0=" [INST]{system}\n [/INST] </s>"
155
  template1=" [INST] {message} [/INST]"
156
  template2=" {response}</s>"
157
  if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
158
  startOfString="<s>"
159
- template0="[INST]{system}\n [/INST]</s>"
160
  template1="[INST] {message} [/INST]"
161
  template2=" {response}</s>"
162
- if("discolm_german_7b" in modelPath): #https://huggingface.co/DiscoResearch/DiscoLM_German_7b_v1
163
- template0="<|im_start|>system\n{system}<|im_end|>\n"
164
- template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
165
- template2="{response}<|im_end|>\n"
166
- if("SauerkrautLM-7b-HerO" in modelPath): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
167
- template0="<|im_start|>system\n{system}<|im_end|>\n"
168
- template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
169
- template2="{response}<|im_end|>\n"
170
- if("gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
171
- template0="<start_of_turn>user{system}</end_of_turn>"
172
- template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
173
- template2="{response}</end_of_turn>"
174
- if("openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
175
- startOfString="<s>"
176
  template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
177
  template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
178
  template2="{response}<|end_of_turn|>"
 
 
 
 
179
  if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
180
  template0="{system} " #<s>
181
  template1="USER: {message} ASSISTANT: "
182
  template2="{response}</s>"
183
- if("phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
184
  template0="Instruct: {system}\nOutput: Okay.\n"
185
  template1="Instruct: {message}\nOutput:"
186
  template2="{response}\n"
@@ -188,17 +184,18 @@ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=
188
  if RAGAddon is not None:
189
  system += RAGAddon
190
  if system is not None:
191
- prompt += template0.format(system=system.strip())
192
  if history is not None:
193
  for user_message, bot_response in history[-historylimit:]:
194
- if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit].strip())
195
- if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit].strip())
196
- if message is not None: prompt += template1.format(message=message[:zeichenlimit].strip())
197
  if system2 is not None:
198
- prompt += system2.strip()
199
  return startOfString+prompt
200
 
201
 
 
202
  import gradio as gr
203
  import requests
204
  import json
 
119
  #modelPath="/home/af/gguf/models/nous-hermes-2-mixtral-8x7b-dpo.Q4_0.gguf"
120
  if(os.path.exists(modelPath)==False):
121
  #url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf"
122
+ url="https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_0.gguf?download=true"
123
  #url="https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_0.gguf?download=true"
124
+ #url="https://huggingface.co/TheBloke/DiscoLM_German_7b_v1-GGUF/resolve/main/discolm_german_7b_v1.Q4_0.gguf?download=true"
125
  response = requests.get(url)
126
+ with open("./Mixtral-8x7b-instruct.gguf", mode="wb") as file:
127
  file.write(response.content)
128
  print("Model downloaded")
129
+ modelPath="./Mixtral-8x7b-instruct.gguf"
130
  print(modelPath)
131
  n="20"
132
+ if("Mixtral-8x7b-instruct" in modelPath): n="0" # mixtral seems to cause problems here...
133
  command = ["python3", "-m", "llama_cpp.server", "--model", modelPath, "--host", "0.0.0.0", "--port", "2600", "--n_threads", "8", "--n_gpu_layers", n]
134
  subprocess.Popen(command)
135
  print("Server ready!")
 
143
  # Gradio-GUI
144
  #------------
145
 
146
+
147
+ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4): #float("Inf")
148
  startOfString=""
149
  if zeichenlimit is None: zeichenlimit=1000000000 # :-)
150
  template0=" [INST]{system}\n [/INST] </s>"
151
  template1=" [INST] {message} [/INST]"
152
  template2=" {response}</s>"
153
+ if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
154
+ template0="<start_of_turn>user{system}</end_of_turn>"
155
+ template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
156
+ template2="{response}</end_of_turn>"
157
+ if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
158
  startOfString="<s>"
159
+ template0=" [INST]{system}\n [/INST] </s>"
160
  template1=" [INST] {message} [/INST]"
161
  template2=" {response}</s>"
162
  if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
163
  startOfString="<s>"
164
+ template0="[INST]{system}\n [/INST]</s>"
165
  template1="[INST] {message} [/INST]"
166
  template2=" {response}</s>"
167
+ if("Openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
 
 
 
 
 
 
 
 
 
 
 
 
 
168
  template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
169
  template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
170
  template2="{response}<|end_of_turn|>"
171
+ if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
172
+ template0="<|im_start|>system\n{system}<|im_end|>\n"
173
+ template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
174
+ template2="{response}<|im_end|>\n"
175
  if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
176
  template0="{system} " #<s>
177
  template1="USER: {message} ASSISTANT: "
178
  template2="{response}</s>"
179
+ if("Phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
180
  template0="Instruct: {system}\nOutput: Okay.\n"
181
  template1="Instruct: {message}\nOutput:"
182
  template2="{response}\n"
 
184
  if RAGAddon is not None:
185
  system += RAGAddon
186
  if system is not None:
187
+ prompt += template0.format(system=system) #"<s>"
188
  if history is not None:
189
  for user_message, bot_response in history[-historylimit:]:
190
+ if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit]) #"[INST] {user_prompt} [/INST] "
191
+ if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit]) #"{bot_response}</s> "
192
+ if message is not None: prompt += template1.format(message=message[:zeichenlimit]) #"[INST] {message} [/INST]"
193
  if system2 is not None:
194
+ prompt += system2
195
  return startOfString+prompt
196
 
197
 
198
+
199
  import gradio as gr
200
  import requests
201
  import json