|
--- |
|
license: mit |
|
datasets: |
|
- open-thoughts/OpenThoughts-114k |
|
language: |
|
- en |
|
metrics: |
|
- accuracy |
|
base_model: |
|
- deepseek-ai/DeepSeek-R1 |
|
new_version: deepseek-ai/DeepSeek-R1 |
|
pipeline_tag: question-answering |
|
library_name: adapter-transformers |
|
--- |
|
from transformers import pipeline, set_seed |
|
generator = pipeline('text-generation', model='gpt2') |
|
set_seed(42) |
|
generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) |
|
|
|
input_text = "The future of AI is" |
|
inputs = tokenizer(input_text, return_tensors="pt") |
|
|
|
output = model.generate(**inputs, max_length=100) |
|
print(tokenizer.decode(output[0], skip_special_tokens=True)) |
|
# Use a pipeline as a high-level helper |
|
from transformers import pipeline |
|
|
|
messages = [ |
|
{"role": "user", "content": "Who are you?"}, |
|
] |
|
pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1", trust_remote_code=True) |
|
pipe(messages) |
|
from adapters import AutoAdapterModel |
|
|
|
model = AutoAdapterModel.from_pretrained("undefined") |
|
model.load_adapter("rebekah0302/Glo-Bus", set_active=True) |
|
from datasets import load_dataset |
|
|
|
# Login using e.g. `huggingface-cli login` to access this dataset |
|
ds = load_dataset("open-thoughts/OpenThoughts-114k", "default") |
|
from adapters import AutoAdapterModel |
|
|
|
model = AutoAdapterModel.from_pretrained("undefined") |
|
model.load_adapter("rebekah0302/Glo-Bus", set_active=True) |
|
from transformers import TrainingArguments, Trainer |
|
|
|
training_args = TrainingArguments( |
|
output_dir="./results", |
|
evaluation_strategy="epoch", |
|
per_device_train_batch_size=4, |
|
per_device_eval_batch_size=4, |
|
num_train_epochs=3, |
|
save_total_limit=2, |
|
logging_dir="./logs", |
|
) |
|
|
|
trainer = Trainer( |
|
model=model, |
|
args=training_args, |
|
train_dataset=tokenized_datasets["train"], |
|
) |
|
|
|
trainer.train() |
|
from huggingface_hub import notebook_login |
|
|
|
notebook_login() |
|
model.push_to_hub("your-huggingface-username/custom-gpt") |
|
tokenizer.push_to_hub("your-huggingface-username/custom-gpt") |
|
pip install gradio |
|
import gradio as gr |
|
from transformers import pipeline |
|
|
|
generator = pipeline("text-generation", model="your-huggingface-username/custom-gpt") |
|
|
|
def chatbot(prompt): |
|
return generator(prompt, max_length=100)[0]["generated_text"] |
|
|
|
iface = gr.Interface(fn=chatbot, inputs="text", outputs="text") |
|
iface.launch() |
|
|