File size: 2,641 Bytes
03281f9
 
 
dca1b8e
e8e4967
03281f9
 
 
 
 
 
 
 
 
 
 
 
2bd6ec6
03281f9
 
2bd6ec6
03281f9
 
 
2bd6ec6
 
 
03281f9
 
 
 
 
 
 
 
c170066
03281f9
 
 
 
 
 
 
c5380cb
03281f9
 
 
 
 
 
 
 
 
544b5dc
 
03281f9
 
 
1a665d5
be8f12a
03281f9
 
 
 
 
 
 
 
544b5dc
1a665d5
03281f9
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
import os
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
from peft import AutoPeftModelForCausalLM
from datasets import load_dataset
from huggingface_hub import login

login(token=os.environ.get('HF_TOKEN', None))

model_name = "skaltenp/Meta-Llama-3-8B-sepsis_cases-199900595"

bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.float16,
)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    quantization_config=bnb_config,
    device_map="cuda",
    trust_remote_code=True,
    #token=True,
)
model.eval()
#model = AutoPeftModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token_id = tokenizer.eos_token_id

train = load_dataset("skaltenp/sepsis_cases")["train"]

def prepare_sample_text(example, tokenizer, remove_indent=False, start=None, end=None):
    """Prepare the text from a sample of the dataset."""
    thread = example["event_list"]
    if start != None and end != None:
        thread = thread[start:end]
    text = ""
    for message in thread:
        text += f"{message}{tokenizer.eos_token}\n"
    return text

dataset = load_dataset(
    "skaltenp/sepsis_cases",
    token=True,
    download_mode='force_redownload'
)
train_data = dataset["train"].train_test_split(train_size=0.8, shuffle=True, seed=199900595)
test_data = train_data["test"]
train_data = train_data["train"].train_test_split(train_size=0.8, shuffle=True, seed=199900595)
valid_data = train_data["test"]
train_data = train_data["train"]

print(len(test_data[0]["event_list"]), len(test_data[4]["event_list"]), len(test_data[50]["event_list"]))

def generate_answer(question):
    #inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
    inputs = tokenizer(question, return_tensors="pt")
    inputs.to("cuda")
    outputs = model.generate(**inputs, max_length=8192, num_return_sequences=1, do_sample=True)
    answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return answer

iface = gr.Interface(
    fn=generate_answer,
    inputs="text",
    outputs="text",
    title="Straight Outta Logs",
    examples = [prepare_sample_text(test_data[0], tokenizer, start=0, end=1), prepare_sample_text(test_data[4], tokenizer, start=0, end=2), prepare_sample_text(test_data[50], tokenizer, start=0, end=3)],
    description="Use the examples or copy own sepsis case example",
)

iface.launch(share=True)  # Deploy the interface