Commit
·
34a13d3
1
Parent(s):
37f0e68
Update appbkup.py
Browse files- appbkup.py +45 -8
appbkup.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import os
|
2 |
from threading import Thread
|
3 |
from typing import Iterator
|
4 |
-
|
|
|
5 |
import gradio as gr
|
6 |
import spaces
|
7 |
import torch
|
@@ -9,8 +10,8 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
|
|
9 |
|
10 |
HF_TOKEN = "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" ##Llama Write Token
|
11 |
|
12 |
-
MAX_MAX_NEW_TOKENS =
|
13 |
-
DEFAULT_MAX_NEW_TOKENS =
|
14 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
15 |
|
16 |
DESCRIPTION = """\
|
@@ -32,13 +33,14 @@ def generate(
|
|
32 |
message: str,
|
33 |
chat_history: list[tuple[str, str]],
|
34 |
system_prompt: str,
|
35 |
-
max_new_tokens: int =
|
36 |
temperature: float = 0.6,
|
37 |
top_p: float = 0.9,
|
38 |
top_k: int = 50,
|
39 |
repetition_penalty: float = 1.2,
|
40 |
) -> Iterator[str]:
|
41 |
conversation = []
|
|
|
42 |
if system_prompt:
|
43 |
conversation.append({"role": "system", "content": system_prompt})
|
44 |
for user, assistant in chat_history:
|
@@ -66,10 +68,45 @@ def generate(
|
|
66 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
67 |
t.start()
|
68 |
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
|
75 |
chat_interface = gr.ChatInterface(
|
|
|
1 |
import os
|
2 |
from threading import Thread
|
3 |
from typing import Iterator
|
4 |
+
import requests
|
5 |
+
import json
|
6 |
import gradio as gr
|
7 |
import spaces
|
8 |
import torch
|
|
|
10 |
|
11 |
HF_TOKEN = "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" ##Llama Write Token
|
12 |
|
13 |
+
MAX_MAX_NEW_TOKENS = 8192
|
14 |
+
DEFAULT_MAX_NEW_TOKENS = 4096
|
15 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
16 |
|
17 |
DESCRIPTION = """\
|
|
|
33 |
message: str,
|
34 |
chat_history: list[tuple[str, str]],
|
35 |
system_prompt: str,
|
36 |
+
max_new_tokens: int = 8192,
|
37 |
temperature: float = 0.6,
|
38 |
top_p: float = 0.9,
|
39 |
top_k: int = 50,
|
40 |
repetition_penalty: float = 1.2,
|
41 |
) -> Iterator[str]:
|
42 |
conversation = []
|
43 |
+
|
44 |
if system_prompt:
|
45 |
conversation.append({"role": "system", "content": system_prompt})
|
46 |
for user, assistant in chat_history:
|
|
|
68 |
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
69 |
t.start()
|
70 |
|
71 |
+
concatenated_outputs = "".join([r"{}".format(text) for text in streamer])
|
72 |
+
|
73 |
+
# Mask the output here
|
74 |
+
masked_output = mask_with_protecto(concatenated_outputs)
|
75 |
+
masked_output = format_for_html(masked_output)
|
76 |
+
yield masked_output
|
77 |
+
|
78 |
+
|
79 |
+
# Ensuring entity tags are properly rendered
|
80 |
+
def format_for_html(text):
|
81 |
+
text = text.replace("<", "<")
|
82 |
+
text = text.replace(">", ">")
|
83 |
+
return text
|
84 |
+
|
85 |
+
|
86 |
+
def mask_with_protecto(text_for_prompt):
|
87 |
+
mask_request_url = "https://trial.protecto.ai/api/vault/mask"
|
88 |
+
headers = {
|
89 |
+
"Content-Type": "application/json; charset=utf-8",
|
90 |
+
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3N1ZXIiOiJQcm90ZWN0byIsImV4cGlyYXRpb25fZGF0ZSI6IjIwMjMtMTEtMDQiLCJwZXJtaXNzaW9ucyI6WyJyZWFkIiwid3JpdGUiXSwidXNlcl9uYW1lIjoiZGlwYXlhbkBjb2V1c2xlYXJuaW5nLmNvbSIsImRiX25hbWUiOiJwcm90ZWN0b19jb2V1c2xlYXJuaW5nX25ydG1mYmFrIiwiaGFzaGVkX3Bhc3N3b3JkIjoiMjIyMTI2ZWNiZTlkZTRmNWJlODdiY2QyYWFlZWRlM2FmNDc5MzMxZmNhOTUxMWU0MDRiNzkxNDM1MGI4MWUyYiJ9.DeIK00NuhM51lRwWdnUXuQSBA1aBn5AQ8qM3pIeM01U"
|
91 |
+
}
|
92 |
+
mask_input = {
|
93 |
+
"mask": [
|
94 |
+
{
|
95 |
+
"value": text_for_prompt
|
96 |
+
}
|
97 |
+
]
|
98 |
+
}
|
99 |
+
response = requests.put(mask_request_url, headers=headers, json=mask_input)
|
100 |
+
if response.status_code == 200:
|
101 |
+
# Parse the masked result from the API response and format it for display
|
102 |
+
masked_result = response.json()
|
103 |
+
masked_result_token_value = str(masked_result["data"][0]["token_value"])
|
104 |
+
return_value = masked_result_token_value
|
105 |
+
return(return_value)
|
106 |
+
else:
|
107 |
+
# Return an error message if the API request was not successful.
|
108 |
+
return(str(response.status_code))
|
109 |
+
|
110 |
|
111 |
|
112 |
chat_interface = gr.ChatInterface(
|