File size: 9,982 Bytes
eccdb1c
 
 
 
 
e926a44
eccdb1c
 
 
 
 
 
 
 
 
 
008db2f
eccdb1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e926a44
 
 
 
 
 
 
 
eccdb1c
e926a44
 
 
eccdb1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e926a44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eccdb1c
 
 
 
 
 
 
 
008db2f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
import gradio as gr
import torch
import gc, os
os.environ["RWKV_V7_ON"] = '1'
os.environ["RWKV_JIT_ON"] = '1'
os.environ["RWKV_CUDA_ON"] = '1' # if '1' then use CUDA kernel for seq mode (much faster)
from rwkv.model import RWKV
from rwkv.utils import PIPELINE, PIPELINE_ARGS

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ctx_limit = 4096
gen_limit = 4096

########################## text rwkv ################################################################

title_v6 = "RWKV_v7_G1_0.4B_Translate_ctx4096_20250620"
model_path_v6 = "./RWKV_v7_G1a_0.4B_Translate_ctx4096_20250914_95%25.pth"
model_v6 = RWKV(model=model_path_v6.replace('.pth',''), strategy='cuda fp16')
pipeline_v6 = PIPELINE(model_v6, "rwkv_vocab_v20230424")

args = model_v6.args

penalty_decay = 0.996

def evaluate(
    ctx,
    token_count=200,
    temperature=1.0,
    top_p=0.7,
    presencePenalty = 0.1,
    countPenalty = 0.1,
):
    args = PIPELINE_ARGS(temperature = max(0.2, float(temperature)), top_p = float(top_p),
                     alpha_frequency = countPenalty,
                     alpha_presence = presencePenalty,
                     token_ban = [], # ban the generation of some tokens
                     token_stop = [0]) # stop generation whenever you see any token here
    ctx = ctx.strip()
    all_tokens = []
    out_last = 0
    out_str = ''
    occurrence = {}
    state = None
    for i in range(int(token_count)):

        input_ids = pipeline_v6.encode(ctx)[-ctx_limit:] if i == 0 else [token]
        out, state = model_v6.forward(input_ids, state)
        for n in occurrence:
            out[n] -= (args.alpha_presence + occurrence[n] * args.alpha_frequency)

        token = pipeline_v6.sample_logits(out, temperature=args.temperature, top_p=args.top_p)
        if token in args.token_stop:
            break
        all_tokens += [token]
        for xxx in occurrence:
            occurrence[xxx] *= penalty_decay
            
        ttt = pipeline_v6.decode([token])
        www = 1
        if ttt in ' \t0123456789':
            www = 0
        #elif ttt in '\r\n,.;?!"\':+-*/=#@$%^&_`~|<>\\()[]{},。;“”:?!()【】':
        #    www = 0.5
        if token not in occurrence:
            occurrence[token] = www
        else:
            occurrence[token] += www
            
        tmp = pipeline_v6.decode(all_tokens[out_last:])
        if '\ufffd' not in tmp:
            out_str += tmp
            yield out_str.strip()
            out_last = i + 1
    del out
    del state
    gc.collect()
    torch.cuda.empty_cache()
    yield out_str.strip()

def translate_english_to_chinese(english_text, token_count, temperature, top_p, presence_penalty, count_penalty):
    if not english_text.strip():
        return "Chinese:\n请输入英文内容。"
    
    full_prompt = f"English: {english_text}\n\nChinese:"
    for output in evaluate(full_prompt, token_count, temperature, top_p, presence_penalty, count_penalty):
        yield output  

def translate_chinese_to_chinses(Chinese_text, token_count, temperature, top_p, presence_penalty, count_penalty):
    if not Chinese_text.strip():
        return "Chinses:\n请输入中文内容。"
    
    full_prompt = f"Chinese: {Chinese_text}\n\nEnglish:"
    for output in evaluate(full_prompt, token_count, temperature, top_p, presence_penalty, count_penalty):
        yield output  


with gr.Blocks(title="RWKV_v7_G1_1.5B_Translate_ctx4096 English -> Chinese") as demo:
    with gr.Tab("English To Chinses"):    
        gr.HTML(f"<div style='text-align:center;'><h1>RWKV_v7_G1_1.5B_Translate_ctx4096_2025062 English -> Chinese</h1></div>")
        with gr.Row():
            with gr.Column():
                english_input = gr.Textbox(
                    label="英文输入(注意不能有空行)",
                    lines=20,
                    placeholder="请输入英文内容...",
                    value="ROCm is an open-source stack, composed primarily of open-source software, designed for graphics processing unit (GPU) computation. ROCm consists of a collection of drivers, development tools, and APIs that enable GPU programming from low-level kernel to end-user applications.\n"
                        "With ROCm, you can customize your GPU software to meet your specific needs.You can develop, collaborate, test, and deploy your applications in a free, open source, integrated, and secure software ecosystem. ROCm is particularly well-suited to GPU-accelerated high-performance computing (HPC), artificial intelligence (AI), scientific computing, and computer aided design (CAD).\n"
                        "ROCm is powered by AMD’s Heterogeneous-computing Interface for Portability (HIP), an open-source software C++ GPU programming environment and its corresponding runtime. HIP allows ROCm developers to create portable applications on different platforms by deploying code on a range of platforms, from dedicated gaming GPUs to exascale HPC clusters.\n"
                        "ROCm supports programming models, such as OpenMP and OpenCL, and includes all necessary open source software compilers, debuggers, and libraries. ROCm is fully integrated into machine learning (ML) frameworks, such as PyTorch and TensorFlow."
                )

            with gr.Column():
                chinese_output = gr.Textbox(
                    label="中文输出",
                    lines=20,
                    placeholder="翻译结果将显示在此处",
                    value=""
                )

        with gr.Row():
            translate_btn = gr.Button("Translate", variant="primary")
            clear_btn = gr.Button("Clear", variant="secondary")
            stop_btn = gr.Button("Stop", variant="stop")

        with gr.Accordion("Advanced Settings", open=False):
            token_count = gr.Slider(10, gen_limit, label="Max Tokens", step=10, value=gen_limit)
            temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
            top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0)
            presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
            count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0)

        translate_event = translate_btn.click(
            fn=translate_english_to_chinese,
            inputs=[english_input, token_count, temperature, top_p, presence_penalty, count_penalty],
            outputs=[chinese_output]
        )

        clear_btn.click(
            fn=lambda: ("", ""),
            inputs=[],
            outputs=[english_input, chinese_output]
        )

        stop_btn.click(
            fn=None,
            inputs=None,
            outputs=None,
            cancels=[translate_event]  
        )
    with gr.Tab("Chinses To English"):    
        gr.HTML(f"<div style='text-align:center;'><h1>RWKV_v7_G1_1.5B_Translate_ctx4096 Chinses -> English</h1></div>")
        with gr.Row():
            with gr.Column():
                chinese_input = gr.Textbox(
                    label="中文输入(注意不能有空行)",
                    lines=20,
                    placeholder="请输入中文内容...",
                    value="ROCm是一个开源栈,主要由开源软件组成,旨在用于图形处理单元(GPU)计算。ROCm由一系列驱动程序、开发工具和API组成,这些工具和API允许从低级内核到最终用户应用程序对GPU进行编程。"
                        "使用ROCm,您可以根据您的特定需求定制GPU软件。您可以在一个免费、开源、集成和安全的软件生态系统中开发、协作、测试和部署应用程序。ROCm特别适合GPU加速的高性能计算(HPC)、人工智能(AI)、科学计算和计算机辅助设计(CAD)。"
                        "ROCm由AMD的可移植性图形处理接口(HIP)驱动,这是一个开源的C++ GPU编程环境及其相应的运行时。HIP允许ROCm开发者在不同平台上创建可移植应用程序,通过在从专用游戏GPU到exascale HPC集群的各种平台上部署代码来实现这一目标。"
                        "ROCm支持编程模型,如OpenMP和OpenCL,并包含所有必要的开源软件编译器、调试器和库。ROCm完全集成到机器学习(ML)框架中,如PyTorch和TensorFlow。"
                )

            with gr.Column():
                english_output = gr.Textbox(
                    label="英文输出",
                    lines=20,
                    placeholder="翻译结果将显示在此处",
                    value=""
                )

        with gr.Row():
            translate_btn = gr.Button("Translate", variant="primary")
            clear_btn = gr.Button("Clear", variant="secondary")
            stop_btn = gr.Button("Stop", variant="stop")

        with gr.Accordion("Advanced Settings", open=False):
            token_count = gr.Slider(10, gen_limit, label="Max Tokens", step=10, value=gen_limit)
            temperature = gr.Slider(0.2, 2.0, label="Temperature", step=0.1, value=1.0)
            top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0)
            presence_penalty = gr.Slider(0.0, 1.0, label="Presence Penalty", step=0.1, value=0)
            count_penalty = gr.Slider(0.0, 1.0, label="Count Penalty", step=0.1, value=0)

        translate_event = translate_btn.click(
            fn=translate_chinese_to_chinses,
            inputs=[chinese_input, token_count, temperature, top_p, presence_penalty, count_penalty],
            outputs=[english_output]
        )

        clear_btn.click(
            fn=lambda: ("", ""),
            inputs=[],
            outputs=[chinese_input, english_output]
        )

        stop_btn.click(
            fn=None,
            inputs=None,
            outputs=None,
            cancels=[translate_event]  
        )

demo.queue(max_size=10, default_concurrency_limit=1)
demo.launch(share=False)