markqiu commited on
Commit
78db486
·
1 Parent(s): 4a25e42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -106
app.py CHANGED
@@ -1,107 +1,135 @@
1
- import os
2
- from typing import Optional, Tuple
3
-
4
  import gradio as gr
5
- from langchain.chains import ConversationChain
6
- from langchain.llms import OpenAI
7
- from threading import Lock
8
-
9
-
10
- def load_chain():
11
- """Logic for loading the chain you want to use should go here."""
12
- llm = OpenAI(temperature=0)
13
- chain = ConversationChain(llm=llm)
14
- return chain
15
-
16
-
17
- def set_openai_api_key(api_key: str):
18
- """Set the api key and return chain.
19
-
20
- If no api_key, then None is returned.
21
- """
22
- if api_key:
23
- os.environ["OPENAI_API_KEY"] = api_key
24
- chain = load_chain()
25
- os.environ["OPENAI_API_KEY"] = ""
26
- return chain
27
-
28
- class ChatWrapper:
29
-
30
- def __init__(self):
31
- self.lock = Lock()
32
- def __call__(
33
- self, api_key: str, inp: str, history: Optional[Tuple[str, str]], chain: Optional[ConversationChain]
34
- ):
35
- """Execute the chat functionality."""
36
- self.lock.acquire()
37
- try:
38
- history = history or []
39
- # If chain is None, that is because no API key was provided.
40
- if chain is None:
41
- history.append((inp, "Please paste your OpenAI key to use"))
42
- return history, history
43
- # Set OpenAI key
44
- import openai
45
- openai.api_key = api_key
46
- # Run chain and append input.
47
- output = chain.run(input=inp)
48
- history.append((inp, output))
49
- except Exception as e:
50
- raise e
51
- finally:
52
- self.lock.release()
53
- return history, history
54
-
55
- chat = ChatWrapper()
56
-
57
- block = gr.Blocks(css=".gradio-container {background-color: lightgray}")
58
-
59
- with block:
60
- with gr.Row():
61
- gr.Markdown("<h3><center>LangChain Demo</center></h3>")
62
-
63
- openai_api_key_textbox = gr.Textbox(
64
- placeholder="Paste your OpenAI API key (sk-...)",
65
- show_label=False,
66
- lines=1,
67
- type="password",
68
- )
69
-
70
- chatbot = gr.Chatbot()
71
-
72
- with gr.Row():
73
- message = gr.Textbox(
74
- label="What's your question?",
75
- placeholder="What's the answer to life, the universe, and everything?",
76
- lines=1,
77
- )
78
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
79
-
80
- gr.Examples(
81
- examples=[
82
- "Hi! How's it going?",
83
- "What should I do tonight?",
84
- "Whats 2 + 2?",
85
- ],
86
- inputs=message,
87
- )
88
-
89
- gr.HTML("Demo application of a LangChain chain.")
90
-
91
- gr.HTML(
92
- "<center>Powered by <a href='https://github.com/hwchase17/langchain'>LangChain 🦜️🔗</a></center>"
93
- )
94
-
95
- state = gr.State()
96
- agent_state = gr.State()
97
-
98
- submit.click(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
99
- message.submit(chat, inputs=[openai_api_key_textbox, message, state, agent_state], outputs=[chatbot, state])
100
-
101
- openai_api_key_textbox.change(
102
- set_openai_api_key,
103
- inputs=[openai_api_key_textbox],
104
- outputs=[agent_state],
105
- )
106
-
107
- block.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import os, gc, copy, torch
3
+ from datetime import datetime
4
+ from huggingface_hub import hf_hub_download
5
+ from pynvml import *
6
+ nvmlInit()
7
+ gpu_h = nvmlDeviceGetHandleByIndex(0)
8
+ ctx_limit = 2000
9
+ title = "RWKV-5-World-1B5-v2-20231025-ctx4096"
10
+
11
+ os.environ["RWKV_JIT_ON"] = '1'
12
+ os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
13
+
14
+ from rwkv.model import RWKV
15
+ model_path = hf_hub_download(repo_id="BlinkDL/rwkv-5-world", filename=f"{title}.pth")
16
+ model = RWKV(model=model_path, strategy='cuda fp16')
17
+ from rwkv.utils import PIPELINE, PIPELINE_ARGS
18
+ pipeline = PIPELINE(model, "rwkv_vocab_v20230424")
19
+
20
+ def generate_prompt(instruction, input=""):
21
+ instruction = instruction.strip().replace('\r\n','\n').replace('\n\n','\n')
22
+ input = input.strip().replace('\r\n','\n').replace('\n\n','\n')
23
+ if input:
24
+ return f"""Instruction: {instruction}
25
+
26
+ Input: {input}
27
+
28
+ Response:"""
29
+ else:
30
+ return f"""User: hi
31
+
32
+ Assistant: Hi. I am your assistant and I will provide expert full response in full details. Please feel free to ask any question and I will always answer it.
33
+
34
+ User: {instruction}
35
+
36
+ Assistant:"""
37
+
38
+ def evaluate(
39
+ ctx,
40
+ token_count=200,
41
+ temperature=1.0,
42
+ top_p=0.7,
43
+ presencePenalty = 0.1,
44
+ countPenalty = 0.1,
45
+ ):
46
+ args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
47
+ alpha_frequency = countPenalty,
48
+ alpha_presence = presencePenalty,
49
+ token_ban = [], # ban the generation of some tokens
50
+ token_stop = [0]) # stop generation whenever you see any token here
51
+ ctx = ctx.strip()
52
+ all_tokens = []
53
+ out_last = 0
54
+ out_str = ''
55
+ occurrence = {}
56
+ state = None
57
+ for i in range(int(token_count)):
58
+ out, state = model.forward(pipeline.encode(ctx)[-ctx_limit:] if i == 0 else [token], state)
59
+ for n in occurrence:
60
+ out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)
61
+
62
+ token = pipeline.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
63
+ if token in args.token_stop:
64
+ break
65
+ all_tokens += [token]
66
+ for xxx in occurrence:
67
+ occurrence[xxx] *= 0.996
68
+ if token not in occurrence:
69
+ occurrence[token] = 1
70
+ else:
71
+ occurrence[token] += 1
72
+
73
+ tmp = pipeline.decode(all_tokens[out_last:])
74
+ if '\ufffd' not in tmp:
75
+ out_str += tmp
76
+ yield out_str.strip()
77
+ out_last = i + 1
78
+
79
+ gpu_info = nvmlDeviceGetMemoryInfo(gpu_h)
80
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
81
+ print(f'{timestamp} - vram {gpu_info.total} used {gpu_info.used} free {gpu_info.free}')
82
+ del out
83
+ del state
84
+ gc.collect()
85
+ torch.cuda.empty_cache()
86
+ yield out_str.strip()
87
+
88
+ examples = [
89
+ ["Assistant: Sure! Here is a very detailed plan to create flying pigs:", 333, 1, 0.3, 0, 1],
90
+ ["Assistant: Sure! Here are some ideas for FTL drive:", 333, 1, 0.3, 0, 1],
91
+ ["A few light taps upon the pane made her turn to the window. It had begun to snow again.", 333, 1, 0.3, 0, 1],
92
+ [generate_prompt("Écrivez un programme Python pour miner 1 Bitcoin, avec des commentaires."), 333, 1, 0.3, 0, 1],
93
+ [generate_prompt("東京で訪れるべき素晴らしい場所とその紹介をいくつか挙げてください。"), 333, 1, 0.3, 0, 1],
94
+ [generate_prompt("Write a story using the following information.", "A man named Alex chops a tree down."), 333, 1, 0.3, 0, 1],
95
+ ["Assistant: Here is a very detailed plan to kill all mosquitoes:", 333, 1, 0.3, 0, 1],
96
+ ['''Edward: I am Edward Elric from fullmetal alchemist. I am in the world of full metal alchemist and know nothing of the real world.
97
+
98
+ Player: Hello Edward. What have you been up to recently?
99
+
100
+ Edward:''', 333, 1, 0.3, 0, 1],
101
+ [generate_prompt("写一篇关于水利工程的流体力学模型的论文,需要详细全面。"), 333, 1, 0.3, 0, 1],
102
+ ['''“当然可以,大宇宙不会因为这五公斤就不坍缩了。”关一帆说,他还有一个没说出来的想法:也许大宇宙真的会因为相差一个原子的质量而由封闭转为开放。大自然的精巧有时超出想象,比如生命的诞生,就需要各项宇宙参数在几亿亿分之一精度上的精确配合。但程心仍然可以留下她的生态球,因为在那无数文明创造的无数小宇宙中,肯定有相当一部分不响应回归运动的号召,所以,大宇宙最终被夺走的质量至少有几亿吨,甚至可能是几亿亿亿吨。
103
+ 但愿大宇宙能够忽略这个误差。
104
+ 程心和关一帆进入了飞船,智子最后也进来了。她早就不再穿那身华丽的和服了,她现在身着迷彩服,再次成为一名轻捷精悍的战士,她的身上佩带着许多武器和生存装备,最引人注目的是那把插在背后的武士刀。
105
+ “放心,我在,你们就在!”智子对两位人类朋友说。
106
+ 聚变发动机启动了,推进器发出幽幽的蓝光,飞船缓缓地穿过了宇宙之门。
107
+ 小宇宙中只剩下漂流瓶和生态球。漂流瓶隐没于黑暗里,在一千米见方的宇宙中,只有生态球里的小太阳发出一点光芒。在这个小小的生命世界中,几只清澈的水球在零重力环境中静静地飘浮着,有一条小鱼从一只水球中蹦出,跃入另一只水球,轻盈地穿游于绿藻之间。在一小块陆地上的草丛中,有一滴露珠从一片草叶上脱离,旋转着飘起,向太空中折射出一缕晶莹的阳光。''', 333, 1, 0.3, 0, 1],
108
+ ]
109
+
110
+ ##########################################################################
111
+
112
+ with gr.Blocks(title=title) as demo:
113
+ gr.HTML(f"<div style=\"text-align: center;\">\n<h1>RWKV-5 World v2 - {title}</h1>\n</div>")
114
+ with gr.Tab("Raw Generation"):
115
+ gr.Markdown(f"This is [RWKV-5 World v2](https://huggingface.co/BlinkDL/rwkv-5-world) with 1.5B params - a 100% attention-free RNN [RWKV-LM](https://github.com/BlinkDL/RWKV-LM). Supports all 100+ world languages and code. And we have [200+ Github RWKV projects](https://github.com/search?o=desc&p=1&q=rwkv&s=updated&type=Repositories). *** Please try examples first (bottom of page) *** (edit them to use your question). Demo limited to ctxlen {ctx_limit}.")
116
+ with gr.Row():
117
+ with gr.Column():
118
+ prompt = gr.Textbox(lines=2, label="Prompt", value="Assistant: Sure! Here is a very detailed plan to create flying pigs:")
119
+ token_count = gr.Slider(10, 333, label="Max Tokens", step=10, value=333)
120
+ temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
121
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.3)
122
+ presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
123
+ count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=1)
124
+ with gr.Column():
125
+ with gr.Row():
126
+ submit = gr.Button("Submit", variant="primary")
127
+ clear = gr.Button("Clear", variant="secondary")
128
+ output = gr.Textbox(label="Output", lines=5)
129
+ data = gr.Dataset(components=[prompt, token_count, temperature, top_p, presence_penalty, count_penalty], samples=examples, label="Example Instructions", headers=["Prompt", "Max Tokens", "Temperature", "Top P", "Presence Penalty", "Count Penalty"])
130
+ submit.click(evaluate, [prompt, token_count, temperature, top_p, presence_penalty, count_penalty], [output])
131
+ clear.click(lambda: None, [], [output])
132
+ data.click(lambda x: x, [data], [prompt, token_count, temperature, top_p, presence_penalty, count_penalty])
133
+
134
+ demo.queue(concurrency_count=1, max_size=10)
135
+ demo.launch(share=False)