ParthSadaria commited on
Commit
9422734
·
verified ·
1 Parent(s): 1881562

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +16 -8
main.py CHANGED
@@ -39,14 +39,20 @@ async def favicon():
39
  favicon_path = Path(__file__).parent / "favicon.ico"
40
  return FileResponse(favicon_path, media_type="image/x-icon")
41
 
42
- def generate_search(query: str, stream: bool = True) -> str:
43
  headers = {"User-Agent": ""}
 
 
 
 
 
44
  prompt = [
45
  {"role": "user", "content": query},
46
  ]
47
 
48
- prompt.insert(0, {"content": "Be Helpful and Friendly", "role": "system"})
49
 
 
50
  payload = {
51
  "is_vscode_extension": True,
52
  "message_history": prompt,
@@ -54,10 +60,12 @@ def generate_search(query: str, stream: bool = True) -> str:
54
  "user_input": prompt[-1]["content"],
55
  }
56
 
57
- chat_endpoint = secret_api_endpoint_3
58
- response = requests.post(chat_endpoint, headers=headers, json=payload, stream=True)
59
 
60
  streaming_text = ""
 
 
61
  for value in response.iter_lines(decode_unicode=True):
62
  if value.startswith("data: "):
63
  try:
@@ -89,21 +97,21 @@ def generate_search(query: str, stream: bool = True) -> str:
89
  if not stream:
90
  yield streaming_text
91
 
 
92
  @app.get("/searchgpt")
93
- async def search_gpt(q: str, stream: Optional[bool] = False):
94
  if not q:
95
  raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
96
 
97
  if stream:
98
  return StreamingResponse(
99
- generate_search(q, stream=True),
100
  media_type="text/event-stream"
101
  )
102
  else:
103
  # For non-streaming, collect the text and return as JSON response
104
- response_text = "".join([chunk for chunk in generate_search(q, stream=False)])
105
  return JSONResponse(content={"response": response_text})
106
-
107
  @app.get("/", response_class=HTMLResponse)
108
  async def root():
109
  # Open and read the content of index.html (in the same folder as the app)
 
39
  favicon_path = Path(__file__).parent / "favicon.ico"
40
  return FileResponse(favicon_path, media_type="image/x-icon")
41
 
42
+ def generate_search(query: str, systemprompt: Optional[str] = None, stream: bool = True) -> str:
43
  headers = {"User-Agent": ""}
44
+
45
+ # Use the provided system prompt, or default to "Be Helpful and Friendly"
46
+ system_message = systemprompt or "Be Helpful and Friendly"
47
+
48
+ # Create the prompt history with the user query and system message
49
  prompt = [
50
  {"role": "user", "content": query},
51
  ]
52
 
53
+ prompt.insert(0, {"content": system_message, "role": "system"})
54
 
55
+ # Prepare the payload for the API request
56
  payload = {
57
  "is_vscode_extension": True,
58
  "message_history": prompt,
 
60
  "user_input": prompt[-1]["content"],
61
  }
62
 
63
+ # Send the request to the chat endpoint
64
+ response = requests.post(secret_api_endpoint_3, headers=headers, json=payload, stream=True)
65
 
66
  streaming_text = ""
67
+
68
+ # Process the streaming response
69
  for value in response.iter_lines(decode_unicode=True):
70
  if value.startswith("data: "):
71
  try:
 
97
  if not stream:
98
  yield streaming_text
99
 
100
+
101
  @app.get("/searchgpt")
102
+ async def search_gpt(q: str, stream: Optional[bool] = False, systemprompt: Optional[str] = None):
103
  if not q:
104
  raise HTTPException(status_code=400, detail="Query parameter 'q' is required")
105
 
106
  if stream:
107
  return StreamingResponse(
108
+ generate_search(q, systemprompt=systemprompt, stream=True),
109
  media_type="text/event-stream"
110
  )
111
  else:
112
  # For non-streaming, collect the text and return as JSON response
113
+ response_text = "".join([chunk for chunk in generate_search(q, systemprompt=systemprompt, stream=False)])
114
  return JSONResponse(content={"response": response_text})
 
115
  @app.get("/", response_class=HTMLResponse)
116
  async def root():
117
  # Open and read the content of index.html (in the same folder as the app)