@@ -66,7 +66,14 @@ def _populate_cx_edges_with_timestamps(
66
66
for node , node_ts in zip (nodes , nodes_ts ):
67
67
CX_EDGES [node ] = {}
68
68
timestamps = timestamps_d [node ]
69
- timestamps .add (node_ts )
69
+ cx_edges_d_node_ts = _get_cx_edges_at_timestamp (node , response , node_ts )
70
+
71
+ edges = np .concatenate ([empty_2d ] + list (cx_edges_d_node_ts .values ()))
72
+ partner_parent_ts_d = get_parent_timestamps (cg , edges [:, 1 ])
73
+ for v in partner_parent_ts_d .values ():
74
+ timestamps .update (v )
75
+ CX_EDGES [node ][node_ts ] = cx_edges_d_node_ts
76
+
70
77
for ts in sorted (timestamps ):
71
78
if ts < earliest_ts :
72
79
ts = earliest_ts
@@ -152,21 +159,25 @@ def update_chunk(
152
159
nodes_ts = cg .get_node_timestamps (nodes , return_numpy = False , normalize = True )
153
160
_populate_cx_edges_with_timestamps (cg , layer , nodes , nodes_ts , earliest_ts )
154
161
155
- task_size = int (math .ceil (len (nodes ) / mp .cpu_count () / 2 ))
156
- chunked_nodes = chunked (nodes , task_size )
157
- chunked_nodes_ts = chunked (nodes_ts , task_size )
158
- cg_info = cg .get_serialized_info ()
159
-
160
- tasks = []
161
- for chunk , ts_chunk in zip (chunked_nodes , chunked_nodes_ts ):
162
- args = (cg_info , layer , chunk , ts_chunk , earliest_ts )
163
- tasks .append (args )
164
-
165
- with mp .Pool (min (mp .cpu_count (), len (tasks ))) as pool :
166
- _ = list (
167
- tqdm (
168
- pool .imap_unordered (_update_cross_edges_helper , tasks ),
169
- total = len (tasks ),
162
+ if nodes :
163
+ for node , node_ts in zip (nodes , nodes_ts ):
164
+ update_cross_edges (cg , layer , node , node_ts , earliest_ts )
165
+ else :
166
+ task_size = int (math .ceil (len (nodes ) / mp .cpu_count () / 2 ))
167
+ chunked_nodes = chunked (nodes , task_size )
168
+ chunked_nodes_ts = chunked (nodes_ts , task_size )
169
+ cg_info = cg .get_serialized_info ()
170
+
171
+ tasks = []
172
+ for chunk , ts_chunk in zip (chunked_nodes , chunked_nodes_ts ):
173
+ args = (cg_info , layer , chunk , ts_chunk , earliest_ts )
174
+ tasks .append (args )
175
+
176
+ with mp .Pool (min (mp .cpu_count (), len (tasks ))) as pool :
177
+ _ = list (
178
+ tqdm (
179
+ pool .imap_unordered (_update_cross_edges_helper , tasks ),
180
+ total = len (tasks ),
181
+ )
170
182
)
171
- )
172
183
print (f"total elaspsed time: { time .time () - start } " )
0 commit comments