Adding llama
Browse files- llama_groq.py +40 -0
- requirements.txt +3 -1
llama_groq.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_groq import ChatGroq
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from typing import Optional
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
class LlamaModel:
|
9 |
+
"""
|
10 |
+
This class is used to interact with the Llama LLM models for text generation.
|
11 |
+
|
12 |
+
Args:
|
13 |
+
model: The name of the model to be used. Defaults to 'gemini-pro'.
|
14 |
+
max_output_tokens: The maximum number of tokens to generate. Defaults to 1024.
|
15 |
+
top_p: The probability of generating the next token. Defaults to 1.0.
|
16 |
+
temperature: The temperature of the model. Defaults to 0.0.
|
17 |
+
top_k: The number of top tokens to consider. Defaults to 5.
|
18 |
+
"""
|
19 |
+
|
20 |
+
def __init__(self,
|
21 |
+
model: Optional[str] = 'llama3-70b-8192',
|
22 |
+
):
|
23 |
+
|
24 |
+
|
25 |
+
load_dotenv()
|
26 |
+
|
27 |
+
self.model = llm = ChatGroq(groq_api_key = os.getenv('groq_llama70_key'),model = model)
|
28 |
+
|
29 |
+
|
30 |
+
def execute(self, prompt: str) -> str:
|
31 |
+
|
32 |
+
try:
|
33 |
+
#total_tokens = self.model.count_tokens(prompt).total_tokens
|
34 |
+
#print(f"Input tokens: {total_tokens}")
|
35 |
+
response = self.model.invoke(prompt)
|
36 |
+
#output_tokens = response
|
37 |
+
#print(f"Output tokens: {output_tokens}")
|
38 |
+
return response
|
39 |
+
except Exception as e:
|
40 |
+
return f"An error occurred: {e}"
|
requirements.txt
CHANGED
@@ -1,4 +1,6 @@
|
|
1 |
streamlit
|
2 |
google-generativeai
|
3 |
python-dotenv
|
4 |
-
langchain
|
|
|
|
|
|
1 |
streamlit
|
2 |
google-generativeai
|
3 |
python-dotenv
|
4 |
+
langchain
|
5 |
+
groq
|
6 |
+
langchain_groq
|