Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,8 +5,10 @@ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
5 |
|
6 |
model = GPTLanguageModel().to(DEVICE)
|
7 |
model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
|
|
|
8 |
model.eval()
|
9 |
answer = decode(model.generate(context, max_new_tokens=1000)[0].tolist())
|
|
|
10 |
|
11 |
def display(text,number):
|
12 |
combined_text = text + answer[:number + 1]
|
@@ -15,4 +17,4 @@ def display(text,number):
|
|
15 |
input_box = gr.Textbox(label="Story Lines",value="Once Upon a Time")
|
16 |
input_slider = gr.Slider(minimum=500, maximum=1000, label="Select the maxium number of tokens/words:",step=100)
|
17 |
output_text = gr.Textbox()
|
18 |
-
gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch()
|
|
|
5 |
|
6 |
model = GPTLanguageModel().to(DEVICE)
|
7 |
model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False)
|
8 |
+
print("Model Loaded")
|
9 |
model.eval()
|
10 |
answer = decode(model.generate(context, max_new_tokens=1000)[0].tolist())
|
11 |
+
print("Answer Generated")
|
12 |
|
13 |
def display(text,number):
|
14 |
combined_text = text + answer[:number + 1]
|
|
|
17 |
input_box = gr.Textbox(label="Story Lines",value="Once Upon a Time")
|
18 |
input_slider = gr.Slider(minimum=500, maximum=1000, label="Select the maxium number of tokens/words:",step=100)
|
19 |
output_text = gr.Textbox()
|
20 |
+
gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch(debug=True)
|