sid_racha commited on
Commit
0b86375
·
1 Parent(s): 24fdbf8

modified enpoints

Browse files
Files changed (3) hide show
  1. app.py +11 -1
  2. app/chains.py +2 -2
  3. app/prompts.py +20 -8
app.py CHANGED
@@ -1,5 +1,15 @@
1
- from fastapi import FastAPI
 
 
 
2
  app = FastAPI()
 
 
3
  @app.get("/")
4
  def greet_json():
5
  return {"Hello": "World!"}
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from app.main import app as main_app
3
+
4
+
5
  app = FastAPI()
6
+
7
+
8
  @app.get("/")
9
  def greet_json():
10
  return {"Hello": "World!"}
11
+
12
+ # Mount the main app
13
+ app.mount("/", main_app)
14
+
15
+
app/chains.py CHANGED
@@ -6,7 +6,7 @@ from prompts import (
6
  raw_prompt,
7
  raw_prompt_formatted,
8
  format_context,
9
- # tokenizer
10
  )
11
  from data_indexing import DataIndexer
12
 
@@ -16,7 +16,7 @@ llm = HuggingFaceEndpoint(
16
  model="meta-llama/Llama-3.1-8B-Instruct",
17
  huggingfacehub_api_token=os.environ['HF_TOKEN'],
18
  max_new_tokens=512,
19
- # stop_sequences=[tokenizer.eos_token],
20
  streaming=True,
21
  )
22
 
 
6
  raw_prompt,
7
  raw_prompt_formatted,
8
  format_context,
9
+ tokenizer
10
  )
11
  from data_indexing import DataIndexer
12
 
 
16
  model="meta-llama/Llama-3.1-8B-Instruct",
17
  huggingfacehub_api_token=os.environ['HF_TOKEN'],
18
  max_new_tokens=512,
19
+ stop_sequences=[tokenizer.eos_token],
20
  streaming=True,
21
  )
22
 
app/prompts.py CHANGED
@@ -1,12 +1,29 @@
1
  from langchain_core.prompts import PromptTemplate
2
  from typing import List
3
  import models
 
 
 
 
 
4
 
5
 
6
  def format_prompt(prompt) -> PromptTemplate:
7
  # TODO: format the input prompt by using the model specific instruction template
8
  # TODO: return a langchain PromptTemplate
9
- return PromptTemplate.from_template(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  def format_chat_history(messages: List[models.Message]):
12
  # TODO: implement format_chat_history to format
@@ -35,7 +52,7 @@ standalone_prompt: str = None
35
  rag_prompt: str = None
36
 
37
  # TODO: create raw_prompt_formatted by using format_prompt
38
- raw_prompt_formatted = None
39
  raw_prompt = PromptTemplate.from_template(raw_prompt)
40
 
41
  # TODO: use format_prompt to create history_prompt_formatted
@@ -43,9 +60,4 @@ history_prompt_formatted: PromptTemplate = None
43
  # TODO: use format_prompt to create standalone_prompt_formatted
44
  standalone_prompt_formatted: PromptTemplate = None
45
  # TODO: use format_prompt to create rag_prompt_formatted
46
- rag_prompt_formatted: PromptTemplate = None
47
-
48
-
49
-
50
-
51
-
 
1
  from langchain_core.prompts import PromptTemplate
2
  from typing import List
3
  import models
4
+ from transformers import AutoTokenizer
5
+
6
+ model_name = "meta-llama/Llama-3.1-8B-Instruct"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+
9
 
10
 
11
  def format_prompt(prompt) -> PromptTemplate:
12
  # TODO: format the input prompt by using the model specific instruction template
13
  # TODO: return a langchain PromptTemplate
14
+ # Test with a simple question
15
+ chat = [
16
+ {"role": "system", "content": "You are a helpful AI assistant."},
17
+ {"role": "user", "content": prompt},
18
+ ]
19
+
20
+ formatted_prompt = tokenizer.apply_chat_template(
21
+ chat,
22
+ tokenize=False,
23
+ add_generation_prompt=True
24
+ )
25
+
26
+ return PromptTemplate.from_template(formatted_prompt)
27
 
28
  def format_chat_history(messages: List[models.Message]):
29
  # TODO: implement format_chat_history to format
 
52
  rag_prompt: str = None
53
 
54
  # TODO: create raw_prompt_formatted by using format_prompt
55
+ raw_prompt_formatted = format_prompt(raw_prompt)
56
  raw_prompt = PromptTemplate.from_template(raw_prompt)
57
 
58
  # TODO: use format_prompt to create history_prompt_formatted
 
60
  # TODO: use format_prompt to create standalone_prompt_formatted
61
  standalone_prompt_formatted: PromptTemplate = None
62
  # TODO: use format_prompt to create rag_prompt_formatted
63
+ rag_prompt_formatted: PromptTemplate = None