EnjunDu commited on
Commit
593899c
·
1 Parent(s): fc2dd56

Synthetic dataset of text attribute graph data, compiled by Du Enjun

Browse files
.gitattributes CHANGED
@@ -57,3 +57,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ Children.json filter=lfs diff=lfs merge=lfs -text
61
+ History.json filter=lfs diff=lfs merge=lfs -text
62
+ arxiv2023.json filter=lfs diff=lfs merge=lfs -text
63
+ wikics.json filter=lfs diff=lfs merge=lfs -text
64
+ wikics_cleaned.json filter=lfs diff=lfs merge=lfs -text
.gitattributes copy ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
Children.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bbfa9e9ddf60d5bde95425c1713dde2087224bd1080e9e1b2c7a8087c286097
3
+ size 125909900
History.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0226010aa829e8f2f723bc625920e92d7cd462d1a628f5479f0ea672f90531e8
3
+ size 68660141
README copy.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
SubChildren.json ADDED
The diff for this file is too large to render. See raw diff
 
SubCiteseer.json ADDED
The diff for this file is too large to render. See raw diff
 
SubCora.json ADDED
The diff for this file is too large to render. See raw diff
 
SubHistory.json ADDED
The diff for this file is too large to render. See raw diff
 
SubWikics.json ADDED
The diff for this file is too large to render. See raw diff
 
Subarxiv2023.json ADDED
The diff for this file is too large to render. See raw diff
 
arxiv2023.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae1fb88d617182abedf2c8a01dd54288471499e0c2921a7c39981d86bafb31da
3
+ size 66487587
citeseer.json ADDED
The diff for this file is too large to render. See raw diff
 
cora.json ADDED
The diff for this file is too large to render. See raw diff
 
data_clear.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+
3
+ def clean_graph_data(input_file, output_file):
4
+ """
5
+ Clean graph data by removing nodes with mask="None", ensuring sequential node_ids,
6
+ and updating all neighbor references accordingly.
7
+ """
8
+ # Load the JSON data
9
+ with open(input_file, 'r', encoding='utf-8') as f:
10
+ data = json.load(f)
11
+
12
+ # Identify valid nodes (mask != "None") and their IDs
13
+ valid_nodes = []
14
+ valid_node_ids = set()
15
+
16
+ for node in data:
17
+ if 'mask' in node and node['mask'] != "None":
18
+ valid_nodes.append(node)
19
+ valid_node_ids.add(node['node_id'])
20
+
21
+ # Create mapping from old node_id to new node_id
22
+ old_to_new_mapping = {}
23
+ new_id = 0
24
+
25
+ for node in sorted(valid_nodes, key=lambda x: x['node_id']):
26
+ old_to_new_mapping[node['node_id']] = new_id
27
+ new_id += 1
28
+
29
+ # Update node_ids and neighbors based on the mapping
30
+ for node in valid_nodes:
31
+ # Update neighbors first (while node_id is still the old one)
32
+ new_neighbors = []
33
+ for neighbor in node['neighbors']:
34
+ if neighbor in valid_node_ids: # Only keep neighbors that weren't removed
35
+ new_neighbors.append(old_to_new_mapping[neighbor])
36
+ node['neighbors'] = new_neighbors
37
+
38
+ # Update node_id
39
+ node['node_id'] = old_to_new_mapping[node['node_id']]
40
+
41
+ # Sort nodes by new node_id for better readability
42
+ valid_nodes.sort(key=lambda x: x['node_id'])
43
+
44
+ # Save the cleaned data
45
+ with open(output_file, 'w') as f:
46
+ json.dump(valid_nodes, f, indent=2)
47
+
48
+ return f"Successfully cleaned the graph data. Removed {len(data) - len(valid_nodes)} nodes with mask='None'."
49
+
50
+ # Usage
51
+ result = clean_graph_data('wikics.json', 'wikics_cleaned.json')
52
+ print(result)
data_statistics.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import networkx as nx
3
+ import community as community_louvain # You may need to install this package using: pip install python-louvain
4
+
5
+ def main():
6
+ # Load the JSON file
7
+ with open('arxiv2023_1624-10.json', 'r', encoding='utf-8') as f:
8
+ data = json.load(f)
9
+
10
+ # Initialize counters and sets
11
+ nodes_count = len(data)
12
+ classes = set()
13
+ train_nodes_count = 0
14
+ validation_nodes_count = 0
15
+ test_nodes_count = 0
16
+
17
+ # Build an undirected graph
18
+ G = nx.Graph()
19
+
20
+ # Iterate over each element in the dataset
21
+ for entry in data:
22
+ node_id = entry['node_id']
23
+ label = entry['label']
24
+ mask = entry['mask']
25
+
26
+ # Add label to the classes set
27
+ classes.add(label)
28
+
29
+ # Add the node with its attributes to the graph
30
+ G.add_node(node_id, label=label, mask=mask)
31
+
32
+ # Count nodes by mask type
33
+ if mask == 'Train':
34
+ train_nodes_count += 1
35
+ elif mask == 'Validation':
36
+ validation_nodes_count += 1
37
+ elif mask == 'Test':
38
+ test_nodes_count += 1
39
+
40
+ # Process neighbors and add edges (using set to remove duplicates)
41
+ neighbors = set(entry['neighbors'])
42
+ for neighbor in neighbors:
43
+ # Avoid self-loop if desired (optional)
44
+ if neighbor != node_id:
45
+ G.add_edge(node_id, neighbor)
46
+ # If you want to add self-loops, remove the above condition
47
+
48
+ # Compute the number of edges in the graph
49
+ edge_count = G.number_of_edges()
50
+ classes_count = len(classes)
51
+
52
+ # Perform Louvain community detection
53
+ partition = community_louvain.best_partition(G)
54
+ communities = set(partition.values())
55
+ community_count = len(communities)
56
+
57
+ # Print out the statistics
58
+ print("Nodes count:", nodes_count)
59
+ print("Edges count:", edge_count)
60
+ print("Classes count:", classes_count)
61
+ print("Train nodes count:", train_nodes_count)
62
+ print("Validation nodes count:", validation_nodes_count)
63
+ print("Test nodes count:", test_nodes_count)
64
+ print("Louvain community count:", community_count)
65
+
66
+ if __name__ == '__main__':
67
+ main()
few_shot.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+
4
+ path = r"C:\Code_Compiling\02_bit_Li\07_LLM4GDA\data\arxiv2023_label_16_10.json"
5
+ # 读取reddit.json
6
+ with open(path, 'r', encoding='utf-8') as f:
7
+ data = json.load(f)
8
+
9
+ # 统计每个label的节点个数
10
+ label_counts = {}
11
+ for node in data:
12
+ label = node['label']
13
+ if node['mask'] == 'Train':
14
+ if label not in label_counts:
15
+ label_counts[label] = 0
16
+ label_counts[label] += 1
17
+
18
+ # 输出每个label的节点个数
19
+ print("Train Label counts:", label_counts)
20
+
21
+ # 获取用户输入的x和y
22
+ x = int(input("Enter label value (x): "))
23
+ y = int(input("Enter number of nodes to keep (y): "))
24
+
25
+ # 先将所有mask为'train'且label为x的节点收集到一个列表中
26
+ train_x_nodes = [node for node in data if node['label'] == x and node['mask'] == 'Train']
27
+
28
+ # 确保train_x_nodes列表的长度至少为y
29
+ if len(train_x_nodes) < y:
30
+ print(f"Warning: There are fewer than {y} nodes with label {x} and mask 'train'. All {len(train_x_nodes)} nodes will be kept.")
31
+ selected_nodes = train_x_nodes # 如果不足y个节点,则保留所有该label和mask条件的节点
32
+ else:
33
+ # 随机选择y个节点
34
+ selected_nodes = random.sample(train_x_nodes, y)
35
+
36
+ # 创建一个删除节点的集合
37
+ deleted_nodes = set(node['node_id'] for node in train_x_nodes if node not in selected_nodes)
38
+
39
+ # 创建新数据列表,保留随机选择的label为x且mask为'train'的节点,其他节点不变
40
+ new_data = []
41
+ for node in data:
42
+ # 保留所有的节点,mask非'train'的节点不做任何更改
43
+ if node['label'] != x or (node['mask'] != 'Train' or node in selected_nodes):
44
+ new_data.append(node)
45
+
46
+ # 遍历所有节点的neighbors,删除已经删除的节点
47
+ for node in new_data:
48
+ if 'neighbors' in node:
49
+ # 过滤掉已经删除的节点
50
+ node['neighbors'] = [neighbor for neighbor in node['neighbors'] if neighbor not in deleted_nodes]
51
+
52
+ # 重新调整node_id,使其从0开始连续
53
+ id_mapping = {}
54
+ new_node_id = 0
55
+
56
+ # 对new_data中的所有节点进行重排
57
+ for node in new_data:
58
+ id_mapping[node['node_id']] = new_node_id
59
+ node['node_id'] = new_node_id
60
+ new_node_id += 1
61
+
62
+ # 更新所有节点的neighbors,使用新的node_id
63
+ for node in new_data:
64
+ if 'neighbors' in node:
65
+ # 使用id_mapping更新neighbors中的node_id
66
+ updated_neighbors = []
67
+ for neighbor in node['neighbors']:
68
+ if neighbor in id_mapping: # 只更新存在id_mapping中的邻居
69
+ updated_neighbors.append(id_mapping[neighbor])
70
+ node['neighbors'] = updated_neighbors
71
+
72
+ # 将修改后的数据保存为reddit_label:{x}_{y}.json
73
+ output_filename = f"arxiv2023_label_{x}_{y}.json"
74
+ with open(output_filename, 'w', encoding='utf-8') as f:
75
+ json.dump(new_data, f, indent=4)
76
+
77
+ print(f"Modified data saved to {output_filename}")
sample.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import numpy as np
3
+ import networkx as nx
4
+ from collections import Counter, defaultdict
5
+ import random
6
+ import scipy.sparse as sp
7
+ from scipy.sparse.linalg import eigsh
8
+ import sys
9
+ import os
10
+
11
+ try:
12
+ import community as community_louvain
13
+ except ImportError:
14
+ print("Warning: python-louvain package not found. Installing...")
15
+ import subprocess
16
+ subprocess.check_call([sys.executable, "-m", "pip", "install", "python-louvain"])
17
+ import community as community_louvain
18
+
19
+ def load_graph_from_json(json_file):
20
+ """Load graph from a JSON file with nodes."""
21
+ nodes = []
22
+
23
+ try:
24
+ # First try to parse as a single JSON array or object
25
+ with open(json_file, 'r', encoding='utf-8') as f:
26
+ content = f.read().strip()
27
+ try:
28
+ data = json.loads(content)
29
+ if isinstance(data, list):
30
+ nodes = data
31
+ else:
32
+ nodes = [data]
33
+ except json.JSONDecodeError:
34
+ # Reset and try parsing line by line
35
+ nodes = []
36
+ with open(json_file, 'r') as f:
37
+ for line in f:
38
+ line = line.strip()
39
+ if line: # Skip empty lines
40
+ try:
41
+ node_data = json.loads(line)
42
+ nodes.append(node_data)
43
+ except json.JSONDecodeError:
44
+ continue
45
+ except Exception as e:
46
+ print(f"Error loading graph: {e}")
47
+ return []
48
+
49
+ return nodes
50
+
51
+ def build_networkx_graph(nodes):
52
+ """Build a NetworkX graph from the loaded node data."""
53
+ G = nx.Graph()
54
+
55
+ # Add nodes with attributes
56
+ for node in nodes:
57
+ G.add_node(
58
+ node['node_id'],
59
+ label=node['label'],
60
+ text=node['text'],
61
+ mask=node['mask']
62
+ )
63
+
64
+ # Add edges
65
+ for node in nodes:
66
+ node_id = node['node_id']
67
+ for neighbor_id in node['neighbors']:
68
+ if G.has_node(neighbor_id): # Only add edge if both nodes exist
69
+ G.add_edge(node_id, neighbor_id)
70
+
71
+ return G
72
+
73
+ def analyze_graph_properties(G):
74
+ """Analyze the properties of the graph as specified in the requirements."""
75
+ properties = {}
76
+
77
+ # Mask distribution (Train/Validation/Test)
78
+ masks = [G.nodes[n]['mask'] for n in G.nodes]
79
+ mask_distribution = Counter(masks)
80
+ properties['mask_distribution'] = {k: v/len(G.nodes) for k, v in mask_distribution.items()}
81
+
82
+ # Label distribution
83
+ labels = [G.nodes[n]['label'] for n in G.nodes]
84
+ label_distribution = Counter(labels)
85
+ properties['label_distribution'] = {k: v/len(G.nodes) for k, v in label_distribution.items()}
86
+
87
+ # Graph density
88
+ properties['density'] = nx.density(G)
89
+
90
+ # Degree distribution
91
+ degrees = [d for n, d in G.degree()]
92
+ degree_counts = Counter(degrees)
93
+ properties['degree_distribution'] = {k: v/len(G.nodes) for k, v in degree_counts.items()}
94
+
95
+ # Community structure (using Louvain algorithm)
96
+ try:
97
+ communities = community_louvain.best_partition(G)
98
+ community_counts = Counter(communities.values())
99
+ properties['community_distribution'] = {k: v/len(G.nodes) for k, v in community_counts.items()}
100
+ except:
101
+ properties['community_distribution'] = {}
102
+
103
+ # Spectral characteristics
104
+ if len(G) > 1:
105
+ try:
106
+ laplacian = nx.normalized_laplacian_matrix(G)
107
+ if sp.issparse(laplacian) and laplacian.shape[0] > 1:
108
+ try:
109
+ k = min(5, laplacian.shape[0]-1)
110
+ if k > 0:
111
+ eigenvalues = eigsh(laplacian, k=k, which='SM', return_eigenvectors=False)
112
+ properties['spectral_eigenvalues'] = sorted(eigenvalues.tolist())
113
+ else:
114
+ properties['spectral_eigenvalues'] = []
115
+ except:
116
+ properties['spectral_eigenvalues'] = []
117
+ else:
118
+ properties['spectral_eigenvalues'] = []
119
+ except:
120
+ properties['spectral_eigenvalues'] = []
121
+ else:
122
+ properties['spectral_eigenvalues'] = []
123
+
124
+ # Connectivity characteristics
125
+ properties['connected_components'] = nx.number_connected_components(G)
126
+ largest_cc = max(nx.connected_components(G), key=len)
127
+ properties['largest_cc_ratio'] = len(largest_cc) / len(G.nodes)
128
+
129
+ return properties
130
+
131
+ def sample_graph_preserving_properties(G, percentage, original_properties):
132
+ """Sample a percentage of nodes while preserving graph properties."""
133
+ num_nodes = len(G.nodes)
134
+ num_nodes_to_sample = max(1, int(num_nodes * percentage / 100))
135
+
136
+ # If the graph is too small, just return it
137
+ if num_nodes <= num_nodes_to_sample:
138
+ return G, {n: n for n in G.nodes}
139
+
140
+ # 1. Preserve label and mask distribution (top priority per requirements)
141
+ mask_label_groups = defaultdict(list)
142
+ for node in G.nodes:
143
+ mask = G.nodes[node]['mask']
144
+ label = G.nodes[node]['label']
145
+ mask_label_groups[(mask, label)].append(node)
146
+
147
+ # Calculate how many nodes to sample from each mask-label group
148
+ group_counts = {}
149
+ for (mask, label), nodes in mask_label_groups.items():
150
+ mask_ratio = original_properties['mask_distribution'].get(mask, 0)
151
+ label_ratio = original_properties['label_distribution'].get(label, 0)
152
+
153
+ # Calculate joint probability
154
+ joint_ratio = mask_ratio * label_ratio / sum(
155
+ original_properties['mask_distribution'].get(m, 0) *
156
+ original_properties['label_distribution'].get(l, 0)
157
+ for m in original_properties['mask_distribution']
158
+ for l in original_properties['label_distribution']
159
+ )
160
+
161
+ target_count = int(num_nodes_to_sample * joint_ratio)
162
+ # Ensure at least one node from non-empty groups
163
+ group_counts[(mask, label)] = max(1, target_count) if nodes else 0
164
+
165
+ # Adjust to match the exact sample size
166
+ total_count = sum(group_counts.values())
167
+ if total_count != num_nodes_to_sample:
168
+ diff = num_nodes_to_sample - total_count
169
+ groups = list(group_counts.keys())
170
+
171
+ if diff > 0:
172
+ # Add nodes to groups proportionally to their size
173
+ group_sizes = [len(mask_label_groups[g]) for g in groups]
174
+ group_probs = [s/sum(group_sizes) for s in group_sizes]
175
+
176
+ for _ in range(diff):
177
+ group = random.choices(groups, weights=group_probs)[0]
178
+ if len(mask_label_groups[group]) > group_counts[group]:
179
+ group_counts[group] += 1
180
+ else:
181
+ # Remove nodes from groups with excess
182
+ groups_with_excess = [(g, c) for g, c in group_counts.items()
183
+ if c > 1 and c > len(mask_label_groups[g]) * 0.2]
184
+ groups_with_excess.sort(key=lambda x: x[1], reverse=True)
185
+
186
+ for i in range(min(-diff, len(groups_with_excess))):
187
+ group_counts[groups_with_excess[i][0]] -= 1
188
+
189
+ # 2. Sample nodes from each group, prioritizing connectivity and community structure
190
+ sampled_nodes = []
191
+
192
+ # First try to get community structure
193
+ try:
194
+ communities = community_louvain.best_partition(G)
195
+ except:
196
+ communities = {node: 0 for node in G.nodes} # Fallback if community detection fails
197
+
198
+ # Sample from each mask-label group
199
+ for (mask, label), count in group_counts.items():
200
+ candidates = mask_label_groups[(mask, label)]
201
+
202
+ if len(candidates) <= count:
203
+ # Take all nodes in this group
204
+ sampled_nodes.extend(candidates)
205
+ else:
206
+ # Score nodes based on degree and community representation
207
+ node_scores = {}
208
+ for node in candidates:
209
+ # Higher score for higher degree nodes (connectivity)
210
+ degree_score = G.degree(node) / max(1, max(d for n, d in G.degree()))
211
+
212
+ # Higher score for nodes in underrepresented communities
213
+ comm = communities.get(node, 0)
214
+ comm_sampled = sum(1 for n in sampled_nodes if communities.get(n, -1) == comm)
215
+ comm_total = sum(1 for n in G.nodes if communities.get(n, -1) == comm)
216
+ comm_score = 1 - (comm_sampled / max(1, comm_total))
217
+
218
+ # Combined score (prioritize connectivity slightly more)
219
+ node_scores[node] = 0.6 * degree_score + 0.4 * comm_score
220
+
221
+ # Sort candidates by score and select the top ones
222
+ sorted_candidates = sorted(candidates, key=lambda n: node_scores.get(n, 0), reverse=True)
223
+ sampled_nodes.extend(sorted_candidates[:count])
224
+
225
+ # 3. Create the sampled subgraph
226
+ sampled_G = G.subgraph(sampled_nodes).copy()
227
+
228
+ # 4. Improve connectivity if needed
229
+ if nx.number_connected_components(sampled_G) > original_properties['connected_components']:
230
+ # Try to improve connectivity by swapping nodes
231
+ non_sampled = [n for n in G.nodes if n not in sampled_nodes]
232
+
233
+ # Calculate betweenness centrality for non-sampled nodes
234
+ betweenness = {}
235
+ for node in non_sampled:
236
+ # Count how many different components this node would connect
237
+ neighbors = list(G.neighbors(node))
238
+ sampled_neighbors = [n for n in neighbors if n in sampled_nodes]
239
+
240
+ if not sampled_neighbors:
241
+ continue
242
+
243
+ components_connected = set()
244
+ for n in sampled_neighbors:
245
+ for comp_idx, comp in enumerate(nx.connected_components(sampled_G)):
246
+ if n in comp:
247
+ components_connected.add(comp_idx)
248
+ break
249
+
250
+ betweenness[node] = len(components_connected)
251
+
252
+ # Sort non-sampled nodes by how many components they would connect
253
+ connector_nodes = [(n, b) for n, b in betweenness.items() if b > 1]
254
+ connector_nodes.sort(key=lambda x: x[1], reverse=True)
255
+
256
+ # Try to improve connectivity by swapping nodes
257
+ for connector, _ in connector_nodes:
258
+ # Find a node to swap out (prefer low degree nodes from well-represented groups)
259
+ mask = G.nodes[connector]['mask']
260
+ label = G.nodes[connector]['label']
261
+
262
+ # Find nodes with the same mask and label
263
+ same_group = [n for n in sampled_nodes
264
+ if G.nodes[n]['mask'] == mask and G.nodes[n]['label'] == label]
265
+
266
+ if not same_group:
267
+ continue
268
+
269
+ # Sort by degree (ascending)
270
+ same_group.sort(key=lambda n: sampled_G.degree(n))
271
+
272
+ # Swap the node with lowest degree
273
+ to_remove = same_group[0]
274
+ sampled_nodes.remove(to_remove)
275
+ sampled_nodes.append(connector)
276
+
277
+ # Update the sampled subgraph
278
+ sampled_G = G.subgraph(sampled_nodes).copy()
279
+
280
+ # Stop if we've reached the desired connectivity
281
+ if nx.number_connected_components(sampled_G) <= original_properties['connected_components']:
282
+ break
283
+
284
+ # 5. Relabel nodes to have consecutive IDs starting from 0
285
+ node_mapping = {old_id: new_id for new_id, old_id in enumerate(sorted(sampled_nodes))}
286
+ relabeled_G = nx.relabel_nodes(sampled_G, node_mapping)
287
+
288
+ # Return the sampled graph and the inverse mapping (new_id -> original_id)
289
+ inverse_mapping = {new_id: old_id for old_id, new_id in node_mapping.items()}
290
+ return relabeled_G, inverse_mapping
291
+
292
+ def graph_to_json_format(G):
293
+ """Convert a NetworkX graph to the required JSON format."""
294
+ result = []
295
+
296
+ for node_id in sorted(G.nodes):
297
+ node_data = {
298
+ "node_id": int(node_id),
299
+ "label": G.nodes[node_id]['label'],
300
+ "text": G.nodes[node_id]['text'],
301
+ "neighbors": sorted([int(n) for n in G.neighbors(node_id)]),
302
+ "mask": G.nodes[node_id]['mask']
303
+ }
304
+
305
+ result.append(node_data)
306
+
307
+ return result
308
+
309
+ def sample_text_attribute_graph(input_file, output_file, percentage):
310
+ """Main function to sample a text attribute graph and preserve its properties."""
311
+ # Load the graph data
312
+ print(f"Loading graph from {input_file}...")
313
+ nodes = load_graph_from_json(input_file)
314
+
315
+ if not nodes:
316
+ print("Failed to load nodes from the input file.")
317
+ return None, None, None
318
+
319
+ print(f"Loaded {len(nodes)} nodes.")
320
+
321
+ # Build the NetworkX graph
322
+ print("Building graph...")
323
+ G = build_networkx_graph(nodes)
324
+ print(f"Built graph with {len(G.nodes)} nodes and {len(G.edges)} edges.")
325
+
326
+ # Analyze the original graph properties
327
+ print("Analyzing original graph properties...")
328
+ original_properties = analyze_graph_properties(G)
329
+
330
+ # Sample the graph
331
+ print(f"Sampling {percentage}% of the nodes...")
332
+ sampled_G, inverse_mapping = sample_graph_preserving_properties(G, percentage, original_properties)
333
+ print(f"Sampled graph has {len(sampled_G.nodes)} nodes and {len(sampled_G.edges)} edges.")
334
+
335
+ # Convert the sampled graph to JSON format
336
+ print("Converting sampled graph to JSON format...")
337
+ sampled_data = graph_to_json_format(sampled_G)
338
+
339
+ # Save the sampled graph
340
+ print(f"Saving sampled graph to {output_file}...")
341
+ with open(output_file, 'w') as f:
342
+ json.dump(sampled_data, f, indent=2)
343
+
344
+ # Analyze the sampled graph properties
345
+ print("Analyzing sampled graph properties...")
346
+ sampled_properties = analyze_graph_properties(sampled_G)
347
+
348
+ # Print comparison of original and sampled properties
349
+ print("\nComparison of Graph Properties:")
350
+ print(f"{'Property':<25} {'Original':<15} {'Sampled':<15}")
351
+ print("-" * 55)
352
+ print(f"{'Number of nodes':<25} {len(G.nodes):<15} {len(sampled_G.nodes):<15}")
353
+ print(f"{'Number of edges':<25} {len(G.edges):<15} {len(sampled_G.edges):<15}")
354
+ print(f"{'Density':<25} {original_properties['density']:.4f}{'':>10} {sampled_properties['density']:.4f}{'':>10}")
355
+
356
+ print("\nMask Distribution:")
357
+ print(f"{'Mask':<10} {'Original %':<15} {'Sampled %':<15}")
358
+ print("-" * 40)
359
+ for mask in sorted(set(original_properties['mask_distribution'].keys()) | set(sampled_properties['mask_distribution'].keys())):
360
+ orig_pct = original_properties['mask_distribution'].get(mask, 0) * 100
361
+ sampled_pct = sampled_properties['mask_distribution'].get(mask, 0) * 100
362
+ print(f"{mask:<10} {orig_pct:.2f}%{'':>9} {sampled_pct:.2f}%{'':>9}")
363
+
364
+ print("\nLabel Distribution:")
365
+ print(f"{'Label':<10} {'Original %':<15} {'Sampled %':<15}")
366
+ print("-" * 40)
367
+ for label in sorted(set(original_properties['label_distribution'].keys()) | set(sampled_properties['label_distribution'].keys())):
368
+ orig_pct = original_properties['label_distribution'].get(label, 0) * 100
369
+ sampled_pct = sampled_properties['label_distribution'].get(label, 0) * 100
370
+ print(f"{label:<10} {orig_pct:.2f}%{'':>9} {sampled_pct:.2f}%{'':>9}")
371
+
372
+ print("\nConnectivity:")
373
+ print(f"Connected components: {original_properties['connected_components']} (original) vs {sampled_properties['connected_components']} (sampled)")
374
+
375
+ return sampled_G, original_properties, sampled_properties
376
+
377
+ def main():
378
+ """Command-line interface."""
379
+ if len(sys.argv) != 4:
380
+ print("Usage: python sample_graph.py input_file output_file percentage")
381
+ sys.exit(1)
382
+
383
+ input_file = sys.argv[1]
384
+ output_file = sys.argv[2]
385
+ try:
386
+ percentage = float(sys.argv[3])
387
+ if percentage <= 0 or percentage > 100:
388
+ raise ValueError("Percentage must be between 0 and 100")
389
+ except ValueError:
390
+ print("Error: Percentage must be a number between 0 and 100")
391
+ sys.exit(1)
392
+
393
+ sample_text_attribute_graph(input_file, output_file, percentage)
394
+
395
+ if __name__ == "__main__":
396
+ main()
wikics.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba9dd08bf0b5b50f3170d8e1ef794d1f6948d0a38be407a3217f3a830c483d65
3
+ size 44635743
wikics_cleaned.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4567da141dd5c2f64e6b059bdc4db24bf974e655c803d3787ce3b569b392f406
3
+ size 29728898