Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -15,15 +15,6 @@ model = AutoModelForCausalLM.from_pretrained("Rorical/0-roleplay", return_dict=T
|
|
15 |
tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
|
16 |
tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>星野:\n' }}{% endif %}" # Be careful that this model used custom chat template.
|
17 |
|
18 |
-
if torch.cuda.is_available():
|
19 |
-
device = torch.device("cuda")
|
20 |
-
print(f"Using GPU: {torch.cuda.get_device_name(device)}")
|
21 |
-
else:
|
22 |
-
device = torch.device("cpu")
|
23 |
-
print("Using CPU")
|
24 |
-
|
25 |
-
model = model.to(device)
|
26 |
-
|
27 |
# Define the response function
|
28 |
@spaces.GPU
|
29 |
def respond(
|
|
|
15 |
tokenizer = AutoTokenizer.from_pretrained("Rorical/0-roleplay", trust_remote_code=True)
|
16 |
tokenizer.chat_template = "{% for message in messages %}{{'<|im_start|>' + ((message['role'] + ':\n') if message['role'] != '' else '') + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>星野:\n' }}{% endif %}" # Be careful that this model used custom chat template.
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
# Define the response function
|
19 |
@spaces.GPU
|
20 |
def respond(
|