import gradio as gr from model import * DEVICE = "cuda" if torch.cuda.is_available() else "cpu" model = GPTLanguageModel().to(DEVICE) model.load_state_dict(torch.load("mini-gpt.pth",map_location=DEVICE), strict=False) model.eval() answer = decode(model.generate(context, max_new_tokens=3000)[0].tolist()) def display(text,number): combined_text = text + answer[:number + 1] return combined_text input_box = gr.Textbox() input_slider = gr.Slider(minimum=500, maximum=2000, default=500, label="Select the maxium number of tokens/words:",step=100) output_text = gr.Textbox() gr.Interface(fn=display, inputs=[input_box,input_slider], outputs=output_text).launch()