Update README.md
Browse files
README.md
CHANGED
@@ -62,7 +62,7 @@ class TextGenerationAssistant:
|
|
62 |
def generate_response(self, user_query):
|
63 |
prompt = self.format_prompt(user_query)
|
64 |
outputs = self.pipe(prompt, **self.sampling_params)
|
65 |
-
return outputs[0]["generated_text"].split("[/INST]")[
|
66 |
|
67 |
|
68 |
assistant = TextGenerationAssistant(model_id="Commencis/Commencis-LLM")
|
@@ -94,7 +94,7 @@ pipeline = transformers.pipeline(
|
|
94 |
)
|
95 |
|
96 |
outputs = pipeline(prompt, max_new_tokens=1024, do_sample=True, temperature=0.5, top_k=50, top_p=0.9)
|
97 |
-
print(outputs[0]["generated_text"])
|
98 |
```
|
99 |
|
100 |
## Bias, Risks, and Limitations
|
|
|
62 |
def generate_response(self, user_query):
|
63 |
prompt = self.format_prompt(user_query)
|
64 |
outputs = self.pipe(prompt, **self.sampling_params)
|
65 |
+
return outputs[0]["generated_text"].split("[/INST]")[1].strip()
|
66 |
|
67 |
|
68 |
assistant = TextGenerationAssistant(model_id="Commencis/Commencis-LLM")
|
|
|
94 |
)
|
95 |
|
96 |
outputs = pipeline(prompt, max_new_tokens=1024, do_sample=True, temperature=0.5, top_k=50, top_p=0.9)
|
97 |
+
print (outputs[0]["generated_text"].split("[/INST]")[1].strip())
|
98 |
```
|
99 |
|
100 |
## Bias, Risks, and Limitations
|