File size: 3,935 Bytes
db4a26f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
# Copyright (c) 2024 Microsoft Corporation.
# Licensed under the MIT License
import asyncio
import os
from graphrag.index import run_pipeline, run_pipeline_with_config
from graphrag.index.config import PipelineCSVInputConfig, PipelineWorkflowReference
from graphrag.index.input import load_input
sample_data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../_sample_data/"
)
# Load our dataset once
shared_dataset = asyncio.run(
load_input(
PipelineCSVInputConfig(
file_pattern=".*\\.csv$",
base_dir=sample_data_dir,
source_column="author",
text_column="message",
timestamp_column="date(yyyyMMddHHmmss)",
timestamp_format="%Y%m%d%H%M%S",
title_column="message",
),
)
)
async def run_with_config():
"""Run a pipeline with a config file"""
# We're cheap, and this is an example, lets just do 10
dataset = shared_dataset.head(10)
# load pipeline.yml in this directory
config_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "./pipeline.yml"
)
# Grab the last result from the pipeline, should be our entity extraction
tables = []
async for table in run_pipeline_with_config(
config_or_path=config_path, dataset=dataset
):
tables.append(table)
pipeline_result = tables[-1]
if pipeline_result.result is not None:
# The output of this should match the run_python() example
first_result = pipeline_result.result.head(1)
print(f"level: {first_result['level'][0]}")
print(f"embeddings: {first_result['embeddings'][0]}")
print(f"entity_graph_positions: {first_result['node_positions'][0]}")
else:
print("No results!")
async def run_python():
# We're cheap, and this is an example, lets just do 10
dataset = shared_dataset.head(10)
workflows: list[PipelineWorkflowReference] = [
# This workflow reference here is only necessary
# because we want to customize the entity_extraction workflow is configured
# otherwise, it can be omitted, but you're stuck with the default configuration for entity_extraction
PipelineWorkflowReference(
name="entity_extraction",
config={
"entity_extract": {
"strategy": {
"type": "nltk",
}
}
},
),
PipelineWorkflowReference(
name="entity_graph",
config={
"cluster_graph": {"strategy": {"type": "leiden"}},
"embed_graph": {
"strategy": {
"type": "node2vec",
"num_walks": 10,
"walk_length": 40,
"window_size": 2,
"iterations": 3,
"random_seed": 597832,
}
},
"layout_graph": {
"strategy": {
"type": "umap",
},
},
},
),
]
# Grab the last result from the pipeline, should be our entity extraction
tables = []
async for table in run_pipeline(dataset=dataset, workflows=workflows):
tables.append(table)
pipeline_result = tables[-1]
# The output will contain entity graphs per hierarchical level, with embeddings per entity
if pipeline_result.result is not None:
first_result = pipeline_result.result.head(1)
print(f"level: {first_result['level'][0]}")
print(f"embeddings: {first_result['embeddings'][0]}")
print(f"entity_graph_positions: {first_result['node_positions'][0]}")
else:
print("No results!")
if __name__ == "__main__":
asyncio.run(run_python())
asyncio.run(run_with_config())
|