Msp commited on
Commit
e552bec
·
verified ·
1 Parent(s): 1b5a47f

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -14
README.md CHANGED
@@ -77,15 +77,16 @@ MIRA is licensed under apache-2.0. Please refer to the LICENSE file for more det
77
 
78
  To Load this model using unsloth
79
 
80
- ``` from unsloth import FastLanguageModel
81
- model, tokenizer = FastLanguageModel.from_pretrained(
82
- model_name = "Msp/mira-1.0", # YOUR MODEL YOU USED FOR TRAINING
83
- max_seq_length = 4096,
84
- dtype = None,
85
- load_in_4bit = True,
86
- #token="hf.."
87
- )
88
- FastLanguageModel.for_inference(model) # Enable native 2x faster inference
 
89
 
90
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
91
 
@@ -100,11 +101,11 @@ alpaca_prompt = """Below is an instruction that describes a task, paired with an
100
 
101
  inputs = tokenizer(
102
  [
103
- alpaca_prompt.format(
104
- "your name is mira", # instruction
105
- "whats your name", # input
106
- "", # output - leave this blank for generation!
107
- )
108
  ], return_tensors = "pt").to("cuda")
109
 
110
  from transformers import TextStreamer
 
77
 
78
  To Load this model using unsloth
79
 
80
+ ```
81
+ from unsloth import FastLanguageModel
82
+ model, tokenizer = FastLanguageModel.from_pretrained(
83
+ model_name = "Msp/mira-1.0", # YOUR MODEL YOU USED FOR TRAINING
84
+ max_seq_length = 4096,
85
+ dtype = None,
86
+ load_in_4bit = True,
87
+ #token="hf.."
88
+ )
89
+ FastLanguageModel.for_inference(model) # Enable native 2x faster inference
90
 
91
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
92
 
 
101
 
102
  inputs = tokenizer(
103
  [
104
+ alpaca_prompt.format(
105
+ "your name is mira", # instruction
106
+ "whats your name", # input
107
+ "", # output - leave this blank for generation!
108
+ )
109
  ], return_tensors = "pt").to("cuda")
110
 
111
  from transformers import TextStreamer