Spaces:
Running
Running
import gradio as gr | |
from transformers import pipeline | |
import json | |
from py2neo import Graph | |
import time | |
import requests | |
import json | |
import os | |
# Hugging Face model endpoint for text-generation (GPT-2) and NER (BERT) | |
llm_endpoint = "https://api-inference.huggingface.co/models/gpt2" | |
ner_endpoint = "https://api-inference.huggingface.co/models/dbmdz/bert-large-cased-finetuned-conll03-english" | |
# Define headers for authorization | |
hf_token = os.getenv('HF_TOKEN') | |
headers = { | |
"Authorization": f"Bearer {hf_token}" | |
} | |
# Connect to Neo4j database | |
uri = "neo4j://143.198.173.3:7687" # Replace with your database URI | |
username = "neo4j" # Replace with your Neo4j username | |
password = "pickumaterinu" # Replace with your Neo4j password | |
graph = Graph(uri, auth=(username, password)) | |
print("DB connected") | |
def query_neo4j(question): | |
"""Extract entities from a question and retrieve their relationships from Neo4j.""" | |
try: | |
# Generate entities using NER | |
ner_response = query_with_retry(ner_endpoint, {"inputs": question}) | |
if 'error' in ner_response: | |
return f"Error in NER response: {ner_response.get('error')}" | |
# If NER response is empty or does not contain entities, handle gracefully | |
if not ner_response: | |
return "No entities extracted from the question." | |
# Extract the entities from the NER response | |
entities = [] | |
for item in ner_response: | |
if 'word' in item: | |
entities.append(item['word']) # Add the word to entities list | |
# Query Neo4j for relationships of extracted entities | |
result = [] | |
try: | |
for entity in entities: | |
query_response = graph.run( | |
""" | |
CALL { | |
WITH $entity AS entity | |
MATCH (p:Place {id: entity})-[r]->(e) | |
RETURN p.id AS source_id, type(r) AS relationship, e.id AS target_id | |
LIMIT 50 | |
} | |
RETURN source_id, relationship, target_id | |
""", | |
entity=entity | |
) | |
result.extend([f"{row['source_id']} - {row['relationship']} -> {row['target_id']}" for row in query_response]) | |
except Exception as e: | |
return f"Error with querying Neo4j: {str(e)}" | |
return "\n".join(result) if result else None | |
except Exception as e: | |
return f"An error occurred while processing the question: {str(e)}" | |
def query_llm(question): | |
"""Fetch data from Neo4j and provide an answer using GPT-2.""" | |
graph_data = query_neo4j(question) | |
if graph_data is not None: | |
if 'error' in graph_data: | |
return graph_data | |
context = f"Graph data:\n{graph_data}" | |
llm_response = query_with_retry(llm_endpoint, {"inputs": context}) | |
if 'error' in llm_response: | |
return f"Error in LLM response: {llm_response.get('error')}" | |
if not llm_response or 'generated_text' not in llm_response[0]: | |
return "LLM generated an empty response." | |
return llm_response[0]["generated_text"] | |
else: | |
return "No relevant data found" | |
# query hugging face api with retry for model load | |
def query_with_retry(endpoint, payload, max_retries=5): | |
"""Send a request to the Hugging Face API with retry logic in case of model loading delays.""" | |
for _ in range(max_retries): | |
response = requests.post(endpoint, headers=headers, json=payload) | |
response_data = response.json() | |
# Check if the model is ready or if an error occurred | |
if 'error' in response_data: | |
if 'currently loading' in response_data['error']: | |
# Wait for a few seconds before retrying | |
estimated_time = response_data['error'].split('estimated_time":')[1].split('}')[0] | |
print(f"Model loading... waiting for {estimated_time} seconds.") | |
time.sleep(float(estimated_time)) | |
else: | |
print(f"Error: {response_data['error']}") | |
break | |
else: | |
return response_data | |
return {"error": "Max retries exceeded, model still loading."} | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=query_llm, | |
inputs="text", | |
outputs="text", | |
live=False, | |
title="RAG - Neo4j & LLM Integration", | |
description="Fetches data from Neo4j and generates a response using a Hugging Face GPT-2 model and NER." | |
) | |
iface.launch(share=True) | |