Spaces:
Runtime error
Runtime error
from langchain.llms.base import LLM | |
from typing import Optional, List | |
from langchain.llms.utils import enforce_stop_tokens | |
import requests | |
FORWARD_KEY = 'fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O' | |
class ChatGLM(LLM): | |
max_length: int = 10000 | |
temperature: float = 0 | |
top_p = 0.9 | |
tokenizer: object = None | |
model: object = None | |
history_len: int = 10 | |
history = [] | |
URL = 'http://183.131.3.48:9200' | |
HEADERS = {'Content-Type': 'application/json'} | |
def _llm_type(self) -> str: | |
return "ChatGLM" | |
def _call(self, | |
prompt: str, | |
history: Optional[List[List[str]]] = None, | |
stop: Optional[List[str]] = None) -> str: | |
if history: | |
history = [i for i in history if i[0] is not None] # clear out the system message | |
history = history[-self.history_len:] | |
params = {'tokenizers': self.tokenizer, 'prompt': prompt, 'history': history, 'top_p': self.top_p, | |
'max_length': self.max_length, 'temperature': self.temperature} | |
response = requests.post(self.URL, headers=self.HEADERS, json=params).json() | |
answer = response['response'] | |
if stop is not None: | |
answer = enforce_stop_tokens(answer, stop) | |
# question = prompt.split('question:\n')[-1] | |
# self.history = self.history+[[prompt, response]] | |
return answer | |
class OpenAI3(LLM): | |
max_length: int = 10000 | |
temperature: float = 0.2 | |
top_p = 0.9 | |
tokenizer: object = None | |
model: object = None | |
history_len: int = 10 | |
history = [] | |
HEADERS = {'Content-Type': 'application/json', 'Authorization': 'Bearer fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'} | |
URL ='https://openai.api2d.net/v1/chat/completions' | |
MODEL_NAME = "gpt-3.5-turbo" | |
def _llm_type(self) -> str: | |
return "ChatGLM" | |
def _call(self, | |
prompt: str, | |
history: Optional[List[List[str]]] = None, | |
stop: Optional[List[str]] = None) -> str: | |
system, user = prompt.split('\n\nstop\n\n') | |
message = [{"role": "system", "content": system}, {"role": "user", "content": user}] | |
print(message) | |
# if history: | |
# history = [i for i in history if i[0] is not None] | |
# history = history[-self.history_len:] | |
# message = [[{"role": "user", "content": i[0]}, {"role": "assistant", "content": i[1]}] for i in history] | |
# message = sum(message, []) | |
# else: | |
# message = [] | |
# message.append({"role": "user", "content": prompt}) | |
params = {"model": self.MODEL_NAME, "messages": message, 'temperature': self.temperature} | |
response = requests.post(self.URL, headers=self.HEADERS, json=params).json() | |
answer = response['choices'][0]['message']['content'] | |
if stop is not None: | |
answer = enforce_stop_tokens(answer, stop) | |
# question = prompt.split('question:\n')[-1] | |
# self.history = self.history+[[question, response]] | |
return answer | |
class OpenAI4(LLM): | |
max_length: int = 10000 | |
temperature: float = 0.2 | |
top_p = 0.9 | |
tokenizer: object = None | |
model: object = None | |
history_len: int = 10 | |
history = [] | |
HEADERS = {'Content-Type': 'application/json', 'Authorization': 'Bearer fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'} | |
URL ='https://openai.api2d.net/v1/chat/completions' | |
MODEL_NAME = "gpt-4" | |
def _llm_type(self) -> str: | |
return "ChatGLM" | |
def _call(self, | |
prompt: str, | |
history: Optional[List[List[str]]] = None, | |
stop: Optional[List[str]] = None) -> str: | |
system, user = prompt.split('\n\nstop\n\n') | |
message = [{"role": "system", "content": system}, {"role": "user", "content": user}] | |
# if history: | |
# history = [i for i in history if i[0] is not None] | |
# history = history[-self.history_len:] | |
# message = [[{"role": "user", "content": i[0]}, {"role": "assistant", "content": i[1]}] for i in history] | |
# message = sum(message, []) | |
# else: | |
# message = [] | |
# message.append({"role": "user", "content": prompt}) | |
print(message) | |
params = {"model": self.MODEL_NAME, "messages": message, 'temperature': self.temperature} | |
response = requests.post(self.URL, headers=self.HEADERS, json=params).json() | |
answer = response['choices'][0]['message']['content'] | |
if stop is not None: | |
answer = enforce_stop_tokens(answer, stop) | |
# question = prompt.split('question:\n')[-1] | |
# self.history = self.history+[[question, response]] | |
return answer |