crpatel commited on
Commit
2a5668e
·
verified ·
1 Parent(s): f22980e

Upload model_testing.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. model_testing.py +129 -0
model_testing.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from SmolLm3 import LlamaModel
3
+ import yaml
4
+ import torch
5
+ from transformers import AutoTokenizer
6
+
7
+
8
+ def generate_helper(model, idx, max_new_tokens, context_length, temperature=1.0, top_k=None, eos_token=None, device=None):
9
+
10
+ model = model.to(device)
11
+ idx = idx.to(device)
12
+ model.eval()
13
+ for _ in range(max_new_tokens):
14
+ idx_cond = idx[:, -context_length:]
15
+ with torch.no_grad():
16
+ logits, _ = model(idx_cond) # Unpack both logits and loss (ignore loss)
17
+ logits = logits.view(idx_cond.shape[0], -1, model.config['vocab_size']) # Reshape to [batch, seq, vocab]
18
+
19
+ # Get the logits for the last token only
20
+ logits = logits[:, -1, :] # Shape: [batch_size, vocab_size]
21
+
22
+ if top_k is not None:
23
+ # top k sampling
24
+ top_logits, top_pos = torch.topk(logits, top_k)
25
+ min_logit = top_logits[:, -1].unsqueeze(-1)
26
+ logits = torch.where(logits < min_logit,
27
+ torch.tensor(float('-inf')).to(logits.device),
28
+ logits)
29
+
30
+ # temperature scaling
31
+ if temperature > 0.0:
32
+ logits /= temperature
33
+ probs = torch.softmax(logits, dim=-1)
34
+ idx_next = torch.multinomial(probs, num_samples=1)
35
+ else:
36
+ idx_next = torch.argmax(logits, dim=-1, keepdim=True)
37
+
38
+ if idx_next.item() == eos_token:
39
+ break
40
+
41
+ idx = torch.cat((idx, idx_next), dim=1)
42
+ model.train()
43
+ return idx
44
+
45
+
46
+ def get_config(config_path):
47
+ config = yaml.load(open(config_path, "r"), Loader=yaml.FullLoader)
48
+ return config
49
+
50
+ def load_weights(config, weights_path, device):
51
+ model = LlamaModel(config['model'])
52
+ model.load_state_dict(torch.load(weights_path, map_location=torch.device(device)))
53
+ return model
54
+
55
+ def load_model_from_checkpoint(config_path, checkpoint_path, device):
56
+ config = get_config(config_path)
57
+ model = LlamaModel(config['model'])
58
+ checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
59
+ state_dict = checkpoint['model_state_dict']
60
+ state_dict = {k.replace('_orig_mod.', ''): v for k, v in state_dict.items()}
61
+ model.load_state_dict(state_dict)
62
+ return model
63
+
64
+ def get_tokenizer(config):
65
+ tokenizer_path = config['tokenizer']['tokenizer_name_or_path']
66
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
67
+ tokenizer.pad_token = tokenizer.eos_token
68
+ vocab_size = tokenizer.vocab_size
69
+ return tokenizer, vocab_size
70
+
71
+ def generate_text(model, tokenizer, input_text, max_new_tokens, context_length, temperature, top_k, eos_token, device):
72
+ encoded_text = tokenizer.encode(input_text, return_tensors="pt").to(device)
73
+
74
+ generated_text = generate_helper(model,
75
+ idx=encoded_text,
76
+ max_new_tokens=max_new_tokens,
77
+ context_length=context_length,
78
+ temperature=temperature,
79
+ top_k=top_k,
80
+ eos_token=eos_token,
81
+ device=device)
82
+ return tokenizer.decode(generated_text.squeeze(0))
83
+
84
+ if __name__ == "__main__":
85
+ parser = argparse.ArgumentParser(description='Generate text using the SmolLM model')
86
+ parser.add_argument('--config_path', type=str, default="config_smollm2_135M.yaml",
87
+ help='Path to the config file')
88
+ parser.add_argument('--checkpoint_path', type=str, required=True,
89
+ help='Path to the model checkpoint')
90
+ parser.add_argument('--input_text', type=str, default="Once upon a time in far far away",
91
+ help='Input text prompt for generation')
92
+ parser.add_argument('--max_new_tokens', type=int, default=100,
93
+ help='Maximum number of new tokens to generate')
94
+ parser.add_argument('--context_length', type=int, default=256,
95
+ help='Context length for generation')
96
+ parser.add_argument('--temperature', type=float, default=0.9,
97
+ help='Temperature for sampling')
98
+ parser.add_argument('--top_k', type=int, default=2,
99
+ help='Top-k value for sampling')
100
+ parser.add_argument('--device', type=str, default="cuda" if torch.cuda.is_available() else "cpu",
101
+ help='Device to run the model on (cuda/cpu)')
102
+
103
+ args = parser.parse_args()
104
+
105
+ config = get_config(args.config_path)
106
+ model = load_weights(config, args.checkpoint_path, args.device)
107
+ print(model)
108
+ tokenizer, vocab_size = get_tokenizer(config)
109
+ print(tokenizer)
110
+ print(vocab_size)
111
+
112
+ generated_text = generate_text(
113
+ model,
114
+ tokenizer,
115
+ args.input_text,
116
+ args.max_new_tokens,
117
+ args.context_length,
118
+ args.temperature,
119
+ args.top_k,
120
+ tokenizer.eos_token_id,
121
+ args.device
122
+ )
123
+ print(generated_text)
124
+ print("--------------------------------")
125
+ encoded_text = tokenizer.encode(args.input_text, return_tensors="pt").to(args.device)
126
+ print(encoded_text)
127
+ generated_text2=model.generate(idx=encoded_text, max_new_tokens=args.max_new_tokens, context_length=args.context_length, temperature=args.temperature, top_k=args.top_k, eos_token=tokenizer.eos_token_id, device=args.device)
128
+ decoded_text2=tokenizer.decode(generated_text2.squeeze(0))
129
+ print(decoded_text2)