Update README.md
Browse files
README.md
CHANGED
@@ -108,8 +108,8 @@ class SiameseNetworkMPNet(nn.Module):
|
|
108 |
self.normalize = normalize
|
109 |
self.tokenizer = tokenizer
|
110 |
|
111 |
-
def apply_lora_weights(self,
|
112 |
-
self.model = PeftModel.from_pretrained(self.model,
|
113 |
self.model = self.model.merge_and_unload()
|
114 |
return self
|
115 |
|
@@ -132,8 +132,8 @@ tokenizer = AutoTokenizer.from_pretrained(base_model_name)
|
|
132 |
base_model = SiameseNetworkMPNet(model_name=base_model_name, tokenizer=tokenizer)
|
133 |
|
134 |
# Load and apply LoRA weights
|
135 |
-
|
136 |
-
|
137 |
```
|
138 |
|
139 |
#### Example Usage for Two-Sentence Similarity
|
@@ -163,7 +163,7 @@ text2 = "I hate pineapple on pizza"
|
|
163 |
print(f"For Base Model sentences: '{text1}' and '{text2}'")
|
164 |
two_sentence_similarity(base_model, tokenizer, text1, text2)
|
165 |
print(f"\nFor FineTuned Model sentences: '{text1}' and '{text2}'")
|
166 |
-
two_sentence_similarity(
|
167 |
|
168 |
print('\n\n')
|
169 |
|
@@ -175,7 +175,7 @@ text2 = "I like pineapple on pizza"
|
|
175 |
print(f"For Base Model sentences: '{text1}' and '{text2}'")
|
176 |
two_sentence_similarity(base_model, tokenizer, text1, text2)
|
177 |
print(f"\n\nFor FineTuned Model sentences: '{text1}' and '{text2}'")
|
178 |
-
two_sentence_similarity(
|
179 |
```
|
180 |
```output
|
181 |
For Base Model sentences: 'I love pineapple on pizza' and 'I hate pineapple on pizza'
|
|
|
108 |
self.normalize = normalize
|
109 |
self.tokenizer = tokenizer
|
110 |
|
111 |
+
def apply_lora_weights(self, finetuned_model):
|
112 |
+
self.model = PeftModel.from_pretrained(self.model, finetuned_model)
|
113 |
self.model = self.model.merge_and_unload()
|
114 |
return self
|
115 |
|
|
|
132 |
base_model = SiameseNetworkMPNet(model_name=base_model_name, tokenizer=tokenizer)
|
133 |
|
134 |
# Load and apply LoRA weights
|
135 |
+
finetuned_model = SiameseNetworkMPNet(model_name=base_model_name, tokenizer=tokenizer)
|
136 |
+
finetuned_model.apply_lora_weights("vahidthegreat/StanceAware-SBERT")
|
137 |
```
|
138 |
|
139 |
#### Example Usage for Two-Sentence Similarity
|
|
|
163 |
print(f"For Base Model sentences: '{text1}' and '{text2}'")
|
164 |
two_sentence_similarity(base_model, tokenizer, text1, text2)
|
165 |
print(f"\nFor FineTuned Model sentences: '{text1}' and '{text2}'")
|
166 |
+
two_sentence_similarity(finetuned_model, tokenizer, text1, text2)
|
167 |
|
168 |
print('\n\n')
|
169 |
|
|
|
175 |
print(f"For Base Model sentences: '{text1}' and '{text2}'")
|
176 |
two_sentence_similarity(base_model, tokenizer, text1, text2)
|
177 |
print(f"\n\nFor FineTuned Model sentences: '{text1}' and '{text2}'")
|
178 |
+
two_sentence_similarity(finetuned_model, tokenizer, text1, text2)
|
179 |
```
|
180 |
```output
|
181 |
For Base Model sentences: 'I love pineapple on pizza' and 'I hate pineapple on pizza'
|