Spaces:
Sleeping
Sleeping
Create app_LlamaDutch
Browse files- app_LlamaDutch +19 -0
app_LlamaDutch
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain_community.llms import HuggingFaceHub
|
| 2 |
+
from langchain.callbacks.manager import CallbackManager
|
| 3 |
+
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
| 4 |
+
from langchain.chains import LLMChain
|
| 5 |
+
from langchain.prompts import PromptTemplate
|
| 6 |
+
from langchain_community.llms import LlamaCpp
|
| 7 |
+
|
| 8 |
+
question = "Who won the FIFA World Cup in the year 1994? "
|
| 9 |
+
template = """Question: {question}
|
| 10 |
+
Answer: Let's think step by step."""
|
| 11 |
+
|
| 12 |
+
prompt = PromptTemplate.from_template(template)
|
| 13 |
+
|
| 14 |
+
llm = HuggingFaceHub(
|
| 15 |
+
repo_id="BramVanroy/Llama-2-13b-chat-dutch", model_kwargs={"temperature": 0.5, "max_length": 64}
|
| 16 |
+
)
|
| 17 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm)
|
| 18 |
+
|
| 19 |
+
print(llm_chain.invoke(question))
|