Spaces:
Sleeping
Sleeping
File size: 2,680 Bytes
dcf2cc2 c12c6cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
import gradio as gr
def evaluate(instruction):
# Generate a response:
input = None
prompt = prompter.generate_prompt(instruction, input)
inputs = tokenizer(prompt, return_tensors="pt")
#inputs = inputs.to("cuda:0")
input_ids = inputs["input_ids"]
#play around with generation strategies for better/diverse sequences. https://huggingface.co/docs/transformers/generation_strategies
temperature=0.2
top_p=0.95
top_k=25
num_beams=1
# num_beam_groups=num_beams #see: 'Diverse beam search decoding'
max_new_tokens=256
repetition_penalty = 2.0
do_sample = True # allow 'beam sample': do_sample=True, num_beams > 1
num_return_sequences = 1 #generate multiple candidates, takes longer..
generation_config = transformers.GenerationConfig(
temperature=temperature,
top_p=top_p,
top_k=top_k,
num_beams=num_beams,
repetition_penalty=repetition_penalty,
do_sample=do_sample,
min_new_tokens=32,
num_return_sequences=num_return_sequences,
pad_token_id = 0
# num_beam_groups=num_beam_groups
)
generate_params = {
"input_ids": input_ids,
"generation_config": generation_config,
"return_dict_in_generate": True,
"output_scores": True,
"max_new_tokens": max_new_tokens,
}
with torch.no_grad():
generation_output = model.generate(
input_ids=input_ids,
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True,
max_new_tokens=max_new_tokens,
)
print(f'Instruction: {instruction}')
for i,s in enumerate(generation_output.sequences):
output = tokenizer.decode(s,skip_special_tokens=True)
# print(output)
return(f' {prompter.get_response(output)}')
gr.Interface(
fn=evaluate,
inputs=[
gr.components.Textbox(
lines=2,
label="Instruction",
placeholder="Explain economic growth.",
),
],
outputs=[
gr.components.Textbox(
lines=5,
label="Output",
)
],
title="🌲 ELM - Erasmian Language Model",
description="ELM is a 900M parameter language model finetuned to follow instruction. It is trained on Erasmus University academic outputs and the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset. For more information, please visit [the GitHub repository](https://github.com/Joaoffg/ELM).", # noqa: E501
).queue().gr.load("models/Joaoffg/ELM").launch() |