-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathpreprocessing.py
More file actions
204 lines (163 loc) · 7.61 KB
/
preprocessing.py
File metadata and controls
204 lines (163 loc) · 7.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import numpy as np
import scipy.sparse as sp
import networkx as nx
# Convert sparse matrix to tuple
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
# Get normalized adjacency matrix: A_norm
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
return sparse_to_tuple(adj_normalized)
# Prepare feed-dict for Tensorflow session
def construct_feed_dict(adj_normalized, adj, features, placeholders):
# construct feed dictionary
feed_dict = dict()
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj']: adj_normalized})
feed_dict.update({placeholders['adj_orig']: adj})
return feed_dict
# Perform train-test split
# Takes in adjacency matrix in sparse format
# Returns: adj_train, train_edges, val_edges, val_edges_false,
# test_edges, test_edges_false
def mask_test_edges(adj, test_frac=.1, val_frac=.05, prevent_disconnect=True, verbose=False):
# NOTE: Splits are randomized and results might slightly deviate from reported numbers in the paper.
if verbose == True:
print('preprocessing...')
# Remove diagonal elements
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
g = nx.from_scipy_sparse_matrix(adj)
orig_num_cc = nx.number_connected_components(g)
adj_triu = sp.triu(adj) # upper triangular portion of adj matrix
adj_tuple = sparse_to_tuple(adj_triu) # (coords, values, shape), edges only 1 way
edges = adj_tuple[0] # all edges, listed only once (not 2 ways)
# edges_all = sparse_to_tuple(adj)[0] # ALL edges (includes both ways)
num_test = int(np.floor(edges.shape[0] * test_frac)) # controls how large the test set should be
num_val = int(np.floor(edges.shape[0] * val_frac)) # controls how alrge the validation set should be
# Store edges in list of ordered tuples (node1, node2) where node1 < node2
edge_tuples = [(min(edge[0], edge[1]), max(edge[0], edge[1])) for edge in edges]
all_edge_tuples = set(edge_tuples)
train_edges = set(edge_tuples) # initialize train_edges to have all edges
test_edges = set()
val_edges = set()
if verbose == True:
print('generating test/val sets...')
# Iterate over shuffled edges, add to train/val sets
np.random.shuffle(edge_tuples)
for edge in edge_tuples:
# print edge
node1 = edge[0]
node2 = edge[1]
# If removing edge would disconnect a connected component, backtrack and move on
g.remove_edge(node1, node2)
if prevent_disconnect == True:
if nx.number_connected_components(g) > orig_num_cc:
g.add_edge(node1, node2)
continue
# Fill test_edges first
if len(test_edges) < num_test:
test_edges.add(edge)
train_edges.remove(edge)
# Then, fill val_edges
elif len(val_edges) < num_val:
val_edges.add(edge)
train_edges.remove(edge)
# Both edge lists full --> break loop
elif len(test_edges) == num_test and len(val_edges) == num_val:
break
if (len(val_edges) < num_val or len(test_edges) < num_test):
print("WARNING: not enough removable edges to perform full train-test split!")
print("Num. (test, val) edges requested: (", num_test, ", ", num_val, ")")
print("Num. (test, val) edges returned: (", len(test_edges), ", ", len(val_edges), ")")
if prevent_disconnect == True:
assert nx.number_connected_components(g) == orig_num_cc
if verbose == True:
print('creating false test edges...')
test_edges_false = set()
while len(test_edges_false) < num_test:
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
false_edge = (min(idx_i, idx_j), max(idx_i, idx_j))
# Make sure false_edge not an actual edge, and not a repeat
if false_edge in all_edge_tuples:
continue
if false_edge in test_edges_false:
continue
test_edges_false.add(false_edge)
if verbose == True:
print('creating false val edges...')
val_edges_false = set()
while len(val_edges_false) < num_val:
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
false_edge = (min(idx_i, idx_j), max(idx_i, idx_j))
# Make sure false_edge in not an actual edge, not in test_edges_false, not a repeat
if false_edge in all_edge_tuples or \
false_edge in test_edges_false or \
false_edge in val_edges_false:
continue
val_edges_false.add(false_edge)
if verbose == True:
print('creating false train edges...')
train_edges_false = set()
while len(train_edges_false) < len(train_edges):
idx_i = np.random.randint(0, adj.shape[0])
idx_j = np.random.randint(0, adj.shape[0])
if idx_i == idx_j:
continue
false_edge = (min(idx_i, idx_j), max(idx_i, idx_j))
# Make sure false_edge in not an actual edge, not in test_edges_false,
# not in val_edges_false, not a repeat
if false_edge in all_edge_tuples or \
false_edge in test_edges_false or \
false_edge in val_edges_false or \
false_edge in train_edges_false:
continue
train_edges_false.add(false_edge)
if verbose == True:
print('final checks for disjointness...')
# assert: false_edges are actually false (not in all_edge_tuples)
assert test_edges_false.isdisjoint(all_edge_tuples)
assert val_edges_false.isdisjoint(all_edge_tuples)
assert train_edges_false.isdisjoint(all_edge_tuples)
# assert: test, val, train false edges disjoint
assert test_edges_false.isdisjoint(val_edges_false)
assert test_edges_false.isdisjoint(train_edges_false)
assert val_edges_false.isdisjoint(train_edges_false)
# assert: test, val, train positive edges disjoint
assert val_edges.isdisjoint(train_edges)
assert test_edges.isdisjoint(train_edges)
assert val_edges.isdisjoint(test_edges)
if verbose == True:
print('creating adj_train...')
# Re-build adj matrix using remaining graph
adj_train = nx.adjacency_matrix(g)
# Convert edge-lists to numpy arrays
train_edges = np.array([list(edge_tuple) for edge_tuple in train_edges])
train_edges_false = np.array([list(edge_tuple) for edge_tuple in train_edges_false])
val_edges = np.array([list(edge_tuple) for edge_tuple in val_edges])
val_edges_false = np.array([list(edge_tuple) for edge_tuple in val_edges_false])
test_edges = np.array([list(edge_tuple) for edge_tuple in test_edges])
test_edges_false = np.array([list(edge_tuple) for edge_tuple in test_edges_false])
if verbose == True:
print('Done with train-test split!')
print('')
# NOTE: these edge lists only contain single direction of edge!
return adj_train, train_edges, train_edges_false, \
val_edges, val_edges_false, test_edges, test_edges_false