snowkylin commited on
Commit
f1357f5
·
1 Parent(s): 74a347f

add analytics code

Browse files
Files changed (1) hide show
  1. app.py +40 -11
app.py CHANGED
@@ -12,16 +12,20 @@ import base64
12
  from openai import OpenAI
13
 
14
  default_img = None
 
15
  default_base_url = "https://openrouter.ai/api/v1"
16
  default_api_model = "google/gemma-3-27b-it"
17
 
18
  model_id = "google/gemma-3-4b-it"
 
 
19
 
20
- model = Gemma3ForConditionalGeneration.from_pretrained(
21
- model_id, device_map="auto"
22
- ).eval()
 
23
 
24
- processor = AutoProcessor.from_pretrained(model_id)
25
 
26
  generate_kwargs = {
27
  'max_new_tokens': 1000,
@@ -29,6 +33,16 @@ generate_kwargs = {
29
  'temperature': 1.0
30
  }
31
 
 
 
 
 
 
 
 
 
 
 
32
  lang_store = {
33
  "und": {
34
  "confirm": "Confirm",
@@ -36,7 +50,7 @@ lang_store = {
36
  "additional_description": "Character description (optional)",
37
  "description_placeholder": "Information that is not shown in the reference sheet, such as the character's name, personality, past stories and habit of saying.",
38
  "more_imgs": "More reference images of the character (optional)",
39
- "title": "<h1>Chat with a character via reference sheet!</h1>",
40
  "powered_by_gemma": "<p>Powered by <a href='https://blog.google/technology/developers/gemma-3/'>Gemma 3</a></p>",
41
  "upload": "Upload the reference sheet of your character here",
42
  "prompt": "You are the character in the image, use %s. Use a conversational, oral tone. Do not mention the reference images directly. Start without confirmation.",
@@ -69,7 +83,7 @@ lang_store = {
69
  "additional_description": "角色描述(可选)",
70
  "description_placeholder": "未在设定图中包含的角色信息,如角色姓名、性格、言语习惯、过往经历等。",
71
  "more_imgs": "更多角色参考图(可选,可上传多张)",
72
- "title": "<h1>与设定图中的角色聊天!</h1>",
73
  "powered_by_gemma": "<p>由 <a href='https://blog.google/technology/developers/gemma-3/'>Gemma 3</a> 驱动</p>",
74
  "upload": "在这里上传角色设定图",
75
  "prompt": "你的身份是图中的角色,使用%s。使用聊天的,口语化的方式表达。不在回复中直接提及参考图。无需确认。",
@@ -196,7 +210,7 @@ def set_default_character_language(request: gr.Request):
196
 
197
  with gr.Blocks(title="Chat with a character via reference sheet!") as demo:
198
  with Translate(lang_store) as lang:
199
- gr.HTML(_("title"))
200
  img = gr.Image(type="filepath", value=default_img, label=_("upload"), render=False)
201
  description = gr.TextArea(
202
  value=_("default_description"),
@@ -233,7 +247,7 @@ with gr.Blocks(title="Chat with a character via reference sheet!") as demo:
233
  (_("local"), "local"),
234
  (_("API"), "api")
235
  ],
236
- value='api',
237
  label=_("method"),
238
  render=False,
239
  interactive=True
@@ -244,16 +258,30 @@ with gr.Blocks(title="Chat with a character via reference sheet!") as demo:
244
  with gr.Row():
245
  with gr.Column(scale=4):
246
  img.render()
247
- with gr.Tab(_("description")):
 
 
 
 
 
 
 
 
 
 
 
248
  description.render()
249
  character_language.render()
250
  more_imgs.render()
251
- with gr.Tab(_("more_options")):
 
 
 
 
252
  engine.render()
253
  base_url.render()
254
  api_model.render()
255
  api_key.render()
256
- confirm_btn.render()
257
  with gr.Column(scale=6):
258
  chat = gr.ChatInterface(
259
  response,
@@ -263,6 +291,7 @@ with gr.Blocks(title="Chat with a character via reference sheet!") as demo:
263
  )
264
  confirm_btn.click(prefill_chatbot, [img, description, more_imgs, character_language, engine, base_url, api_model, api_key], chat.chatbot)\
265
  .then(lambda x: x, chat.chatbot, chat.chatbot_value)
 
266
  demo.load(set_default_character_language, None, character_language)
267
 
268
 
 
12
  from openai import OpenAI
13
 
14
  default_img = None
15
+ default_engine = "api"
16
  default_base_url = "https://openrouter.ai/api/v1"
17
  default_api_model = "google/gemma-3-27b-it"
18
 
19
  model_id = "google/gemma-3-4b-it"
20
+ huggingface_spaces = "HUGGINGFACE_SPACES" in os.environ and os.environ['HUGGINGFACE_SPACES'] == "1"
21
+ local = "local" in os.environ and os.environ['LOCAL'] == "1"
22
 
23
+ if huggingface_spaces or local:
24
+ model = Gemma3ForConditionalGeneration.from_pretrained(
25
+ model_id, device_map="auto"
26
+ ).eval()
27
 
28
+ processor = AutoProcessor.from_pretrained(model_id)
29
 
30
  generate_kwargs = {
31
  'max_new_tokens': 1000,
 
33
  'temperature': 1.0
34
  }
35
 
36
+ analytics_code = """<!-- Google tag (gtag.js) -->
37
+ <script async src="https://www.googletagmanager.com/gtag/js?id=G-48LQ5P3NNR"></script>
38
+ <script>
39
+ window.dataLayer = window.dataLayer || [];
40
+ function gtag(){dataLayer.push(arguments);}
41
+ gtag('js', new Date());
42
+
43
+ gtag('config', 'G-48LQ5P3NNR');
44
+ </script>"""
45
+
46
  lang_store = {
47
  "und": {
48
  "confirm": "Confirm",
 
50
  "additional_description": "Character description (optional)",
51
  "description_placeholder": "Information that is not shown in the reference sheet, such as the character's name, personality, past stories and habit of saying.",
52
  "more_imgs": "More reference images of the character (optional)",
53
+ "title": """# RefSheet Chat -- Chat with a character via reference sheet!""",
54
  "powered_by_gemma": "<p>Powered by <a href='https://blog.google/technology/developers/gemma-3/'>Gemma 3</a></p>",
55
  "upload": "Upload the reference sheet of your character here",
56
  "prompt": "You are the character in the image, use %s. Use a conversational, oral tone. Do not mention the reference images directly. Start without confirmation.",
 
83
  "additional_description": "角色描述(可选)",
84
  "description_placeholder": "未在设定图中包含的角色信息,如角色姓名、性格、言语习惯、过往经历等。",
85
  "more_imgs": "更多角色参考图(可选,可上传多张)",
86
+ "title": """# RefSheet Chat——与设定图中的角色聊天!""",
87
  "powered_by_gemma": "<p>由 <a href='https://blog.google/technology/developers/gemma-3/'>Gemma 3</a> 驱动</p>",
88
  "upload": "在这里上传角色设定图",
89
  "prompt": "你的身份是图中的角色,使用%s。使用聊天的,口语化的方式表达。不在回复中直接提及参考图。无需确认。",
 
210
 
211
  with gr.Blocks(title="Chat with a character via reference sheet!") as demo:
212
  with Translate(lang_store) as lang:
213
+ gr.Markdown(_("title"))
214
  img = gr.Image(type="filepath", value=default_img, label=_("upload"), render=False)
215
  description = gr.TextArea(
216
  value=_("default_description"),
 
247
  (_("local"), "local"),
248
  (_("API"), "api")
249
  ],
250
+ value=default_engine,
251
  label=_("method"),
252
  render=False,
253
  interactive=True
 
258
  with gr.Row():
259
  with gr.Column(scale=4):
260
  img.render()
261
+ if local or huggingface_spaces:
262
+ with gr.Tab(_("description")):
263
+ description.render()
264
+ character_language.render()
265
+ more_imgs.render()
266
+ confirm_btn.render()
267
+ with gr.Tab(_("more_options")):
268
+ engine.render()
269
+ base_url.render()
270
+ api_model.render()
271
+ api_key.render()
272
+ else:
273
  description.render()
274
  character_language.render()
275
  more_imgs.render()
276
+ confirm_btn.render()
277
+ engine.visible = False
278
+ base_url.visible = False
279
+ api_model.visible = False
280
+ api_key.visible = False
281
  engine.render()
282
  base_url.render()
283
  api_model.render()
284
  api_key.render()
 
285
  with gr.Column(scale=6):
286
  chat = gr.ChatInterface(
287
  response,
 
291
  )
292
  confirm_btn.click(prefill_chatbot, [img, description, more_imgs, character_language, engine, base_url, api_model, api_key], chat.chatbot)\
293
  .then(lambda x: x, chat.chatbot, chat.chatbot_value)
294
+ gr.HTML(analytics_code)
295
  demo.load(set_default_character_language, None, character_language)
296
 
297