File size: 2,618 Bytes
186f8c5
 
 
 
 
 
38c77ce
ef19ec7
577ce8d
2671147
ef19ec7
577ce8d
2671147
 
577ce8d
 
ef19ec7
 
 
186f8c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
import chromadb
from chromadb.config import Settings
from chromadb.utils import embedding_functions
from sentence_transformers import SentenceTransformer
import os
from huggingface_hub import login
from transformers import AutoModel

# Retrieve the API token from the environment variable
os.environ["HF_API_TOKEN"]  = os.getenv("HF_TOKEN")


# Set your Hugging Face API token as an environment variable
os.environ["HF_API_TOKEN"] = "your_huggingface_api_token"



# Load the Llama model using Hugging Face Transformers
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf")

# Initialize ChromaDB
client = chromadb.Client(Settings(chroma_db_impl="duckdb+parquet", persist_directory="./chroma_db"))

# Create a collection for storing supply chain and green environment data
collection = client.get_or_create_collection(
    name="supply_chain_green_environment",
    embedding_function=embedding_functions.SentenceTransformerEmbeddingFunction(
        model_name="all-mpnet-base-v2"
    ),
)

# Initialize the sentence transformer for generating embeddings
embedding_model = SentenceTransformer("all-mpnet-base-v2")

# Streamlit app title
st.title("Supply Chain & Green Environment Chatbot")

# User input for questions
user_question = st.text_input("Enter your question:")

# Chat history
if "chat_history" not in st.session_state:
    st.session_state.chat_history = []

# Process user input and generate response
if user_question:
    # Generate embedding for the user question
    question_embedding = embedding_model.encode(user_question).tolist()

    # Search for relevant information in the ChromaDB collection
    results = collection.query(
        query_embeddings=question_embedding,
        n_results=3,
    )

    # Construct the context for the Llama model
    context = ""
    for doc in results["documents"][0]:
        context += doc + "\n"

    # Generate response from the Llama model
    inputs = tokenizer(f"Context: {context}\n\nQuestion: {user_question}\n\nAnswer:", return_tensors="pt")
    outputs = model.generate(**inputs, max_length=256)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Add user question and bot response to chat history
    st.session_state.chat_history.append({"user": user_question, "bot": response})

# Display chat history
for message in st.session_state.chat_history:
    st.write(f"**User:** {message['user']}")
    st.write(f"**Bot:** {message['bot']}")