Harshhp24 commited on
Commit
6b12e36
·
verified ·
1 Parent(s): bb691ff

Upload HF(Hugging_Face).py

Browse files
Files changed (1) hide show
  1. HF(Hugging_Face).py +42 -0
HF(Hugging_Face).py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
2
+
3
+ # Load the model and tokenizer from Hugging Face
4
+ model_name = "mistralai/Mistral-7B-Instruct-v0.3"
5
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
7
+
8
+ # Initialize the text-generation pipeline
9
+ pipe = pipeline("conversational", model=model, tokenizer=tokenizer)
10
+
11
+ # Function to handle user input and generate responses
12
+ def electronics_chatbot():
13
+ print("Welcome to the Electronics Components Chatbot! Type 'exit' to quit.")
14
+ print("Ask me about any electronics component, such as 'What is a resistor?' or 'How does a capacitor work in a circuit?'")
15
+
16
+ # Initialize conversation history (for context)
17
+ conversation_history = []
18
+
19
+ while True:
20
+ user_input = input("You: ")
21
+
22
+ if user_input.lower() == 'exit':
23
+ print("Goodbye!")
24
+ break
25
+
26
+ # Add user input to conversation history (optional for more context)
27
+ conversation_history.append(user_input)
28
+
29
+ # Prepare the message input for the model
30
+ messages = [{"role": "user", "content": user_input}]
31
+
32
+ # Generate the response using the model
33
+ response = pipe(messages)
34
+
35
+ # Display the response
36
+ print(f"Ollama (Bot): {response[0]['generated_text']}\n")
37
+
38
+ # Optionally, append the bot's response to the conversation history for context
39
+ conversation_history.append(response[0]['generated_text'])
40
+
41
+ # Start the chatbot
42
+ electronics_chatbot()