File size: 4,746 Bytes
182d290
 
 
 
 
eafc886
182d290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74cf2ac
 
 
 
 
 
 
 
 
 
 
182d290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74cf2ac
 
 
 
 
 
 
 
 
 
 
182d290
 
 
 
 
 
 
 
74cf2ac
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from langchain.llms.base import LLM
from typing import Optional, List
from langchain.llms.utils import enforce_stop_tokens
import requests

FORWARD_KEY = 'fk198719-5WV1GhGHMAzpDwrmjkMXAPsDWamjfbgr'

class ChatGLM(LLM):
    max_length: int = 10000
    temperature: float = 0
    top_p = 0.9
    tokenizer: object = None
    model: object = None
    history_len: int = 10
    history = []
    URL = 'http://183.131.3.48:9200'
    HEADERS = {'Content-Type': 'application/json'}

    @property
    def _llm_type(self) -> str:
        return "ChatGLM"

    def _call(self,
              prompt: str,
              history:  Optional[List[List[str]]] = None,
              stop: Optional[List[str]] = None) -> str:

        if history:
            history = [i for i in history if i[0] is not None]  # clear out the system message
            history = history[-self.history_len:]

        params = {'tokenizers': self.tokenizer, 'prompt': prompt, 'history': history, 'top_p': self.top_p,
                  'max_length': self.max_length, 'temperature': self.temperature}

        response = requests.post(self.URL, headers=self.HEADERS, json=params).json()
        answer = response['response']
        if stop is not None:
            answer = enforce_stop_tokens(answer, stop)
        # question = prompt.split('question:\n')[-1]
        # self.history = self.history+[[prompt, response]]

        return answer


class OpenAI3(LLM):
    max_length: int = 10000
    temperature: float = 0.2
    top_p = 0.9
    tokenizer: object = None
    model: object = None
    history_len: int = 10
    history = []
    HEADERS = {'Content-Type': 'application/json', 'Authorization': 'Bearer fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'}
    URL ='https://openai.api2d.net/v1/chat/completions'
    MODEL_NAME = "gpt-3.5-turbo"


    @property
    def _llm_type(self) -> str:
        return "ChatGLM"

    def _call(self,
              prompt: str,
              history: Optional[List[List[str]]] = None,
              stop: Optional[List[str]] = None) -> str:

        system, user = prompt.split('\n\nstop\n\n')
        message = [{"role": "system", "content": system}, {"role": "user", "content": user}]
        print(message)
        # if history:
        #     history = [i for i in history if i[0] is not None]
        #     history = history[-self.history_len:]
        #     message = [[{"role": "user", "content": i[0]}, {"role": "assistant", "content": i[1]}] for i in history]
        #     message = sum(message, [])
        # else:
        #     message = []
        # message.append({"role": "user", "content": prompt})
        params = {"model": self.MODEL_NAME, "messages": message, 'temperature': self.temperature}
        response = requests.post(self.URL, headers=self.HEADERS, json=params).json()
        answer = response['choices'][0]['message']['content']
        if stop is not None:
            answer = enforce_stop_tokens(answer, stop)
        # question = prompt.split('question:\n')[-1]
        # self.history = self.history+[[question, response]]

        return answer


class OpenAI4(LLM):
    max_length: int = 10000
    temperature: float = 0.2
    top_p = 0.9
    tokenizer: object = None
    model: object = None
    history_len: int = 10
    history = []
    HEADERS = {'Content-Type': 'application/json', 'Authorization': 'Bearer fk198719-Pmvv22OqZiovaxRq6YxCzkTcd6UVVX5O'}
    URL ='https://openai.api2d.net/v1/chat/completions'
    MODEL_NAME = "gpt-4"


    @property
    def _llm_type(self) -> str:
        return "ChatGLM"

    def _call(self,
              prompt: str,
              history: Optional[List[List[str]]] = None,
              stop: Optional[List[str]] = None) -> str:
        system, user = prompt.split('\n\nstop\n\n')
        message = [{"role": "system", "content": system}, {"role": "user", "content": user}]
        # if history:
        #     history = [i for i in history if i[0] is not None]
        #     history = history[-self.history_len:]
        #     message = [[{"role": "user", "content": i[0]}, {"role": "assistant", "content": i[1]}] for i in history]
        #     message = sum(message, [])
        # else:
        #     message = []
        # message.append({"role": "user", "content": prompt})
        print(message)
        params = {"model": self.MODEL_NAME, "messages": message, 'temperature': self.temperature}
        response = requests.post(self.URL, headers=self.HEADERS, json=params).json()
        answer = response['choices'][0]['message']['content']
        if stop is not None:
            answer = enforce_stop_tokens(answer, stop)
        # question = prompt.split('question:\n')[-1]
        # self.history = self.history+[[question, response]]

        return answer