David Ko commited on
Commit
5c810f6
·
1 Parent(s): 637dbbd

feat(api): add debug logging to vision_rag_query (VRAG)

Browse files
Files changed (1) hide show
  1. api.py +32 -0
api.py CHANGED
@@ -1605,6 +1605,17 @@ def vision_rag_query():
1605
  return jsonify({"error": "LangChain not installed on server"}), 500
1606
 
1607
  data = request.get_json(silent=True) or {}
 
 
 
 
 
 
 
 
 
 
 
1608
  user_query = (data.get('userQuery') or '').strip()
1609
  if not user_query:
1610
  return jsonify({"error": "Missing 'userQuery'"}), 400
@@ -1615,6 +1626,7 @@ def vision_rag_query():
1615
 
1616
  search_type = data.get('searchType', 'image')
1617
  n_results = int(data.get('n_results', 5))
 
1618
 
1619
  # Build query embedding or filtered fetch similar to /api/search-similar-objects
1620
  results = None
@@ -1657,6 +1669,18 @@ def vision_rag_query():
1657
 
1658
  # Format results using existing helper
1659
  formatted = format_object_results(results) if results else []
 
 
 
 
 
 
 
 
 
 
 
 
1660
 
1661
  # Build concise context for LLM
1662
  def _shorten(md):
@@ -1692,6 +1716,14 @@ def vision_rag_query():
1692
  context_text = json.dumps(context_items, ensure_ascii=False, indent=2)
1693
  user_text = f"User question: {user_query}\n\nDetected context (top {len(context_items)}):\n{context_text}"
1694
 
 
 
 
 
 
 
 
 
1695
  try:
1696
  start = time.time()
1697
  llm = ChatOpenAI(api_key=api_key, model=os.environ.get('OPENAI_MODEL', 'gpt-4o'))
 
1605
  return jsonify({"error": "LangChain not installed on server"}), 500
1606
 
1607
  data = request.get_json(silent=True) or {}
1608
+ # Debug: log incoming payload keys and basic info (without sensitive data)
1609
+ try:
1610
+ print("[VRAG][REQ] keys=", list(data.keys()))
1611
+ print("[VRAG][REQ] has_api_key=", bool(data.get('api_key') or os.environ.get('OPENAI_API_KEY')))
1612
+ _img = data.get('image')
1613
+ if isinstance(_img, str):
1614
+ print("[VRAG][REQ] image_str_len=", len(_img), "prefix=", _img[:30] if len(_img) > 30 else _img)
1615
+ print("[VRAG][REQ] searchType=", data.get('searchType'), "objectId=", data.get('objectId'), "class_name=", data.get('class_name'))
1616
+ except Exception as _e:
1617
+ print("[VRAG][WARN] failed to log request payload:", _e)
1618
+
1619
  user_query = (data.get('userQuery') or '').strip()
1620
  if not user_query:
1621
  return jsonify({"error": "Missing 'userQuery'"}), 400
 
1626
 
1627
  search_type = data.get('searchType', 'image')
1628
  n_results = int(data.get('n_results', 5))
1629
+ print(f"[VRAG] user_query='{user_query}' | search_type={search_type} | n_results={n_results}")
1630
 
1631
  # Build query embedding or filtered fetch similar to /api/search-similar-objects
1632
  results = None
 
1669
 
1670
  # Format results using existing helper
1671
  formatted = format_object_results(results) if results else []
1672
+ # Debug: log retrieval summary
1673
+ try:
1674
+ cnt = len(formatted)
1675
+ print(f"[VRAG][RETRIEVE] items={cnt}")
1676
+ if cnt:
1677
+ print("[VRAG][RETRIEVE] first_item=", {
1678
+ 'id': formatted[0].get('id'),
1679
+ 'distance': formatted[0].get('distance'),
1680
+ 'meta_keys': list((formatted[0].get('metadata') or {}).keys())
1681
+ })
1682
+ except Exception as _e:
1683
+ print("[VRAG][WARN] failed to log retrieval summary:", _e)
1684
 
1685
  # Build concise context for LLM
1686
  def _shorten(md):
 
1716
  context_text = json.dumps(context_items, ensure_ascii=False, indent=2)
1717
  user_text = f"User question: {user_query}\n\nDetected context (top {len(context_items)}):\n{context_text}"
1718
 
1719
+ # Debug: show compact context preview
1720
+ try:
1721
+ print("[VRAG][CTX] context_items_count=", len(context_items))
1722
+ if context_items:
1723
+ print("[VRAG][CTX] sample=", json.dumps(context_items[0], ensure_ascii=False))
1724
+ except Exception as _e:
1725
+ print("[VRAG][WARN] failed to log context:", _e)
1726
+
1727
  try:
1728
  start = time.time()
1729
  llm = ChatOpenAI(api_key=api_key, model=os.environ.get('OPENAI_MODEL', 'gpt-4o'))