jdasic commited on
Commit
99e1642
·
verified ·
1 Parent(s): ec3505f

create app

Browse files
Files changed (1) hide show
  1. app.py +120 -0
app.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import pipeline
3
+ import json
4
+ from py2neo import Graph
5
+ import time
6
+ import requests
7
+ import json
8
+ import os
9
+
10
+ # Hugging Face model endpoint for text-generation (GPT-2) and NER (BERT)
11
+ llm_endpoint = "https://api-inference.huggingface.co/models/gpt2"
12
+ ner_endpoint = "https://api-inference.huggingface.co/models/dbmdz/bert-large-cased-finetuned-conll03-english"
13
+ # Define headers for authorization
14
+ hf_token = os.getenv('HF_TOKEN')
15
+ headers = {
16
+ "Authorization": f"Bearer {hf_token}"
17
+ }
18
+ # Connect to Neo4j database
19
+ uri = "bolt://134.122.123.86:7687" # Replace with your database URI
20
+ username = "neo4j" # Replace with your Neo4j username
21
+ password = "cVW8Db2D" # Replace with your Neo4j password
22
+ graph = Graph(uri, auth=(username, password))
23
+ print("DB connected")
24
+
25
+ def query_neo4j(question):
26
+ """Extract entities from a question and retrieve their relationships from Neo4j."""
27
+ try:
28
+ # Generate entities using NER
29
+ ner_response = query_with_retry(ner_endpoint, {"inputs": question})
30
+ if 'error' in ner_response:
31
+ return f"Error in NER response: {ner_response.get('error')}"
32
+
33
+ # If NER response is empty or does not contain entities, handle gracefully
34
+ if not ner_response:
35
+ return "No entities extracted from the question."
36
+
37
+ # Extract the entities from the NER response
38
+ entities = []
39
+ for item in ner_response:
40
+ if 'word' in item:
41
+ entities.append(item['word']) # Add the word to entities list
42
+
43
+ # Query Neo4j for relationships of extracted entities
44
+ result = []
45
+ try:
46
+ for entity in entities:
47
+ query_response = graph.run(
48
+ """
49
+ CALL {
50
+ WITH $entity AS entity
51
+ MATCH (p:Place {id: entity})-[r]->(e)
52
+ RETURN p.id AS source_id, type(r) AS relationship, e.id AS target_id
53
+ LIMIT 50
54
+ }
55
+ RETURN source_id, relationship, target_id
56
+ """,
57
+ entity=entity
58
+ )
59
+ result.extend([f"{row['source_id']} - {row['relationship']} -> {row['target_id']}" for row in query_response])
60
+
61
+ except Exception as e:
62
+ return f"Error with querying Neo4j: {str(e)}"
63
+
64
+ return "\n".join(result) if result else None
65
+
66
+ except Exception as e:
67
+ return f"An error occurred while processing the question: {str(e)}"
68
+
69
+
70
+ def query_llm(question):
71
+ """Fetch data from Neo4j and provide an answer using GPT-2."""
72
+ graph_data = query_neo4j(question)
73
+ if graph_data is not None:
74
+ if 'error' in graph_data:
75
+ return graph_data
76
+
77
+ context = f"Graph data:\n{graph_data}"
78
+ llm_response = query_with_retry(llm_endpoint, {"inputs": context})
79
+
80
+ if 'error' in llm_response:
81
+ return f"Error in LLM response: {llm_response.get('error')}"
82
+
83
+ if not llm_response or 'generated_text' not in llm_response[0]:
84
+ return "LLM generated an empty response."
85
+
86
+ return llm_response[0]["generated_text"]
87
+ else:
88
+ return "No relevant data found"
89
+
90
+ # query hugging face api with retry for model load
91
+ def query_with_retry(endpoint, payload, max_retries=5):
92
+ """Send a request to the Hugging Face API with retry logic in case of model loading delays."""
93
+ for _ in range(max_retries):
94
+ response = requests.post(endpoint, headers=headers, json=payload)
95
+ response_data = response.json()
96
+
97
+ # Check if the model is ready or if an error occurred
98
+ if 'error' in response_data:
99
+ if 'currently loading' in response_data['error']:
100
+ # Wait for a few seconds before retrying
101
+ estimated_time = response_data['error'].split('estimated_time":')[1].split('}')[0]
102
+ print(f"Model loading... waiting for {estimated_time} seconds.")
103
+ time.sleep(float(estimated_time))
104
+ else:
105
+ print(f"Error: {response_data['error']}")
106
+ break
107
+ else:
108
+ return response_data
109
+ return {"error": "Max retries exceeded, model still loading."}
110
+
111
+ # Gradio Interface
112
+ iface = gr.Interface(
113
+ fn=query_llm,
114
+ inputs="text",
115
+ outputs="text",
116
+ live=False,
117
+ title="RAG - Neo4j & LLM Integration",
118
+ description="Fetches data from Neo4j and generates a response using a Hugging Face GPT-2 model and NER."
119
+ )
120
+ iface.launch(share=True)