File size: 1,281 Bytes
efccbe2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import os

from langchain_community.llms import HuggingFaceEndpoint
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from dotenv import load_dotenv

load_dotenv()



# setting the Api
model_Api = os.getenv("HUGGINGFACEHUB_API_TOKEN")
# os.environ["HUGGINGFACEHUB_API_TOKEN"] = model_Api

repo_id =  "mistralai/Mistral-7B-Instruct-v0.3"

def QueryBuilding():

    Query_template = """Consider yourself as a personalized professional medical assistant  for the user {query},

                    

                    Answer: provide guidance and support to  the user in a more detailed, simple and straightforward manner. """
    return Query_template

def PromptEngineering():
    Prompt  = PromptTemplate.from_template(QueryBuilding())
    return Prompt



def LLM_building():
    llm_model = HuggingFaceEndpoint(
    repo_id=repo_id,
        max_length =  128,  # Set the maximum input length
        token =  model_Api # Set the API token
    
    )
    return llm_model

def langchainning():
    llm_chain = LLMChain(prompt=PromptEngineering(), llm=LLM_building())
    return llm_chain

# def user_input(user):
#     # user = input()
#     ans = langchainning().run(user)
#     return ans