Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| from transformers import ( | |
| BartForConditionalGeneration, | |
| BartTokenizer | |
| ) | |
| model_name = 'unlisboa/bart_qa_assistant' | |
| tokenizer = BartTokenizer.from_pretrained(model_name) | |
| device = get_device() | |
| model = BartForConditionalGeneration.from_pretrained(model_name).to(device) | |
| model.eval() | |
| def get_device(): | |
| # If there's a GPU available... | |
| if torch.cuda.is_available(): | |
| device = torch.device("cuda") | |
| n_gpus = torch.cuda.device_count() | |
| first_gpu = torch.cuda.get_device_name(0) | |
| print(f'There are {n_gpus} GPU(s) available.') | |
| print(f'GPU gonna be used: {first_gpu}') | |
| else: | |
| print('No GPU available, using the CPU instead.') | |
| device = torch.device("cpu") | |
| return device | |
| def run_bart(question, censor): | |
| print(question, censor) | |
| model_input = tokenizer(question_input, truncation=True, padding=True, return_tensors="pt") | |
| generated_answers_encoded = model.generate(input_ids=model_input["input_ids"].to(device), | |
| attention_mask=model_input["attention_mask"].to(device), | |
| #bad_words_ids=bad_words_ids, | |
| force_words_ids=None, | |
| min_length=1, | |
| max_length=100, | |
| do_sample=True, | |
| early_stopping=True, | |
| num_beams=4, | |
| temperature=1.0, | |
| top_k=None, | |
| top_p=None, | |
| # eos_token_id=tokenizer.eos_token_id, | |
| no_repeat_ngram_size=2, | |
| num_return_sequences=1, | |
| return_dict_in_generate=True, | |
| output_scores=True) | |
| response = tokenizer.batch_decode(generated_answers_encoded['sequences'], skip_special_tokens=True,clean_up_tokenization_spaces=True)[0] | |
| return response | |
| examples = [["What's the meaning of life?", True]] | |
| checkbox = gr.Checkbox(value=True, label="should censor output") | |
| question_input = gr.Textbox(lines=2, label='Question:') | |
| answer_output = gr.Textbox(lines=2, label='Answer:') | |
| gr.Interface(fn=run_bart, inputs=[question_input, checkbox], outputs=[answer_output], allow_flagging="never", examples=examples).launch() |