File size: 995 Bytes
eb6fd1c
 
 
49e4c6b
eb6fd1c
 
 
 
49e4c6b
06342ef
 
 
 
 
 
 
eb6fd1c
49e4c6b
 
06342ef
eb6fd1c
 
06342ef
 
eb6fd1c
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import os
import gradio as gr
import time
from langchain.llms import CTransformers

#checkpoint = "bigscience/bloomz" # english
#checkpoint = "cmarkea/bloomz-3b-sft-chat"
#checkpoint = "bigscience/bloomz-7b1-mt" # non english
#checkpoint = os.getenv('HF_BLOOM_MODEL')
llm_config = {
          'max_new_tokens': 256,
          'temperature' = 0.8,
          'top_p' = 0.5,
          'num_beams' = 2,
          'repetition_penalty': 1.1,
          }


# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
llm = CTransformers(model="TheBloke/WizardLM-1.0-Uncensored-Llama2-13B-GGUF", model_file="wizardlm-1.0-uncensored-llama2-13b.Q4_0.gguf", config=llm_config)

def response(prompt):
    txt = llm(prompt)
    return txt

if __name__ == '__main__':

    title = "Chat"

    demo_status = "Demo is running on CPU"

    gr.Interface(response, inputs="text", outputs="text",
                 title=title,
                 ).launch()