@@ -112,23 +112,23 @@ def arg_from_proto(arg: bloq_pb2.BloqArg) -> Dict[str, Any]:
112112
113113class _BloqLibDeserializer :
114114 def __init__ (self , lib : bloq_pb2 .BloqLibrary ):
115- self .idx_to_proto : Dict [int , bloq_pb2 .BloqLibrary .BloqWithDecomposition ] = {
115+ self .id_to_proto : Dict [int , bloq_pb2 .BloqLibrary .BloqWithDecomposition ] = {
116116 b .bloq_id : b for b in lib .table
117117 }
118- self .idx_to_bloq : Dict [int , Bloq ] = {}
118+ self .id_to_bloq : Dict [int , Bloq ] = {}
119119 self .dangling_to_singleton = {"LeftDangle" : LeftDangle , "RightDangle" : RightDangle }
120120
121121 def bloq_id_to_bloq (self , bloq_id : int ):
122- """Constructs a Bloq corresponding to a `bloq_id` given an `idx_to_proto ` mapping.
122+ """Constructs a Bloq corresponding to a `bloq_id` given an `id_to_proto ` mapping.
123123
124- The `idx_to_proto ` mappping is constructed using a `BloqLibrary`.
125- The `idx_to_bloq ` mapping acts as a cache to avoid redundant deserialization of Bloqs.
124+ The `id_to_proto ` mappping is constructed using a `BloqLibrary`.
125+ The `id_to_bloq ` mapping acts as a cache to avoid redundant deserialization of Bloqs.
126126 """
127- if bloq_id in self .idx_to_bloq :
128- return self .idx_to_bloq [bloq_id ]
129- bloq_proto : bloq_pb2 .BloqLibrary .BloqWithDecomposition = self .idx_to_proto [bloq_id ]
127+ if bloq_id in self .id_to_bloq :
128+ return self .id_to_bloq [bloq_id ]
129+ bloq_proto : bloq_pb2 .BloqLibrary .BloqWithDecomposition = self .id_to_proto [bloq_id ]
130130 if bloq_proto .bloq .name .endswith ('.CompositeBloq' ):
131- self .idx_to_bloq [bloq_id ] = CompositeBloq (
131+ self .id_to_bloq [bloq_id ] = CompositeBloq (
132132 connections = tuple (
133133 self ._connection_from_proto (cxn ) for cxn in bloq_proto .decomposition
134134 ),
@@ -141,10 +141,10 @@ def bloq_id_to_bloq(self, bloq_id: int):
141141 kwargs [arg .name ] = self .bloq_id_to_bloq (arg .subbloq )
142142 else :
143143 kwargs .update (arg_from_proto (arg ))
144- self .idx_to_bloq [bloq_id ] = self ._construct_bloq (bloq_proto .bloq .name , ** kwargs )
144+ self .id_to_bloq [bloq_id ] = self ._construct_bloq (bloq_proto .bloq .name , ** kwargs )
145145 else :
146146 raise ValueError (f"Unable to find a Bloq corresponding to { bloq_proto .bloq .name = } " )
147- return self .idx_to_bloq [bloq_id ]
147+ return self .id_to_bloq [bloq_id ]
148148
149149 def _construct_bloq (self , name : str , ** kwargs ):
150150 """Construct a Bloq using serialized name and BloqArgs."""
@@ -181,30 +181,38 @@ def bloqs_to_proto(
181181 pred : Callable [[BloqInstance ], bool ] = lambda _ : True ,
182182 max_depth : int = 1 ,
183183) -> bloq_pb2 .BloqLibrary :
184- """Serializes one or more Bloqs as a `BloqLibrary`."""
184+ """Serializes one or more Bloqs as a `BloqLibrary`.
185185
186- bloq_to_idx : Dict [Bloq , int ] = {}
186+ A `BloqLibrary` contains multiple bloqs and their hierarchical decompositions. Since
187+ decompositions can use bloq objects that are not explicitly listed in the `bloqs` argument to
188+ this function, this routine will recursively add any bloq objects encountered in decompositions
189+ to the bloq library.
190+ """
191+
192+ # The bloq library uses a unique integer index as a simple address for each bloq object.
193+ # Set up this mapping and populate it by recursively searching for subbloqs.
194+ bloq_to_id : Dict [Bloq , int ] = {}
187195 for bloq in bloqs :
188- _add_bloq_to_dict (bloq , bloq_to_idx )
189- _populate_bloq_to_idx (bloq , bloq_to_idx , pred , max_depth )
196+ _assign_bloq_an_id (bloq , bloq_to_id )
197+ _search_for_subbloqs (bloq , bloq_to_id , pred , max_depth )
190198
191199 # Decompose[..]Error is raised if `bloq` does not have a decomposition.
192200 # KeyError is raised if `bloq` has a decomposition, but we do not wish to serialize it
193201 # because of conditions checked by `pred` and `max_depth`.
194202 stop_recursing_exceptions = (DecomposeNotImplementedError , DecomposeTypeError , KeyError )
195203
196- # `bloq_to_idx ` would now contain a list of all bloqs that should be serialized.
204+ # `bloq_to_id ` would now contain a list of all bloqs that should be serialized.
197205 library = bloq_pb2 .BloqLibrary (name = name )
198- for bloq , bloq_id in bloq_to_idx .items ():
206+ for bloq , bloq_id in bloq_to_id .items ():
199207 try :
200208 cbloq = bloq if isinstance (bloq , CompositeBloq ) else bloq .decompose_bloq ()
201- decomposition = [_connection_to_proto (cxn , bloq_to_idx ) for cxn in cbloq .connections ]
209+ decomposition = [_connection_to_proto (cxn , bloq_to_id ) for cxn in cbloq .connections ]
202210 except stop_recursing_exceptions :
203211 decomposition = None
204212
205213 try :
206214 bloq_counts = {
207- bloq_to_idx [b ]: args .int_or_sympy_to_proto (c )
215+ bloq_to_id [b ]: args .int_or_sympy_to_proto (c )
208216 for b , c in sorted (bloq .bloq_counts ().items (), key = lambda x : type (x [0 ]).__name__ )
209217 }
210218 except stop_recursing_exceptions :
@@ -214,7 +222,7 @@ def bloqs_to_proto(
214222 bloq_id = bloq_id ,
215223 decomposition = decomposition ,
216224 bloq_counts = bloq_counts ,
217- bloq = _bloq_to_proto (bloq , bloq_to_idx = bloq_to_idx ),
225+ bloq = _bloq_to_proto (bloq , bloq_to_id = bloq_to_id ),
218226 )
219227 return library
220228
@@ -240,40 +248,40 @@ def _iter_fields(bloq: Bloq):
240248 yield field
241249
242250
243- def _connection_to_proto (cxn : Connection , bloq_to_idx : Dict [Bloq , int ]):
251+ def _connection_to_proto (cxn : Connection , bloq_to_id : Dict [Bloq , int ]):
244252 return bloq_pb2 .Connection (
245- left = _soquet_to_proto (cxn .left , bloq_to_idx ), right = _soquet_to_proto (cxn .right , bloq_to_idx )
253+ left = _soquet_to_proto (cxn .left , bloq_to_id ), right = _soquet_to_proto (cxn .right , bloq_to_id )
246254 )
247255
248256
249- def _soquet_to_proto (soq : Soquet , bloq_to_idx : Dict [Bloq , int ]) -> bloq_pb2 .Soquet :
257+ def _soquet_to_proto (soq : Soquet , bloq_to_id : Dict [Bloq , int ]) -> bloq_pb2 .Soquet :
250258 if isinstance (soq .binst , DanglingT ):
251259 return bloq_pb2 .Soquet (
252260 dangling_t = repr (soq .binst ), register = registers .register_to_proto (soq .reg ), index = soq .idx
253261 )
254262 else :
255263 return bloq_pb2 .Soquet (
256- bloq_instance = _bloq_instance_to_proto (soq .binst , bloq_to_idx ),
264+ bloq_instance = _bloq_instance_to_proto (soq .binst , bloq_to_id ),
257265 register = registers .register_to_proto (soq .reg ),
258266 index = soq .idx ,
259267 )
260268
261269
262270def _bloq_instance_to_proto (
263- binst : BloqInstance , bloq_to_idx : Dict [Bloq , int ]
271+ binst : BloqInstance , bloq_to_id : Dict [Bloq , int ]
264272) -> bloq_pb2 .BloqInstance :
265- return bloq_pb2 .BloqInstance (instance_id = binst .i , bloq_id = bloq_to_idx [binst .bloq ])
273+ return bloq_pb2 .BloqInstance (instance_id = binst .i , bloq_id = bloq_to_id [binst .bloq ])
266274
267275
268- def _add_bloq_to_dict (bloq : Bloq , bloq_to_idx : Dict [Bloq , int ]):
269- """Adds `{bloq: len(bloq_to_idx)}` to `bloq_to_idx` dictionary if it doesn't exist already ."""
270- if bloq not in bloq_to_idx :
271- next_idx = len (bloq_to_idx )
272- bloq_to_idx [bloq ] = next_idx
276+ def _assign_bloq_an_id (bloq : Bloq , bloq_to_id : Dict [Bloq , int ]):
277+ """Assigns a new index for `bloq` and records it into the `bloq_to_id` mapping ."""
278+ if bloq not in bloq_to_id :
279+ next_idx = len (bloq_to_id )
280+ bloq_to_id [bloq ] = next_idx
273281
274282
275- def _cbloq_dot_bloq_instances (cbloq : CompositeBloq ) -> List [BloqInstance ]:
276- """Equivalent to `cbloq.bloq_instances`, but preserves insertion order among Bloq instances."""
283+ def _cbloq_ordered_bloq_instances (cbloq : CompositeBloq ) -> List [BloqInstance ]:
284+ """Equivalent to `cbloq.bloq_instances`, but preserves insertion order among bloq instances."""
277285 ret = {}
278286 for cxn in cbloq .connections :
279287 for soq in [cxn .left , cxn .right ]:
@@ -282,47 +290,60 @@ def _cbloq_dot_bloq_instances(cbloq: CompositeBloq) -> List[BloqInstance]:
282290 return list (ret .keys ())
283291
284292
285- def _populate_bloq_to_idx (
286- bloq : Bloq , bloq_to_idx : Dict [Bloq , int ], pred : Callable [[BloqInstance ], bool ], max_depth : int
287- ):
288- """Recursively track all primitive Bloqs to be serialized, as part of `bloq_to_idx` dictionary."""
293+ def _search_for_subbloqs (
294+ bloq : Bloq , bloq_to_id : Dict [Bloq , int ], pred : Callable [[BloqInstance ], bool ], max_depth : int
295+ ) -> None :
296+ """Recursively finds all bloqs.
297+
298+ This function inspects `bloq`'s 1) decomposition, 2) call graph, and 3) attributes list for
299+ any bloq objects. For each bloq object that we discover, we will recurse on it.
300+
301+ All bloqs are stored in `bloq_to_id` as we find them.
302+
303+ `max_depth` will be decremented for each level of recursion. If `max_depth` reaches zero,
304+ only the bloqs attributes will be searched.
289305
290- assert bloq in bloq_to_idx
306+ `pred` is evaluated on each bloq instance in the bloq's decomposition. If it evaluates to
307+ `False`, recursion will stop *after* processing the sub-bloq and its attributes.
308+
309+ `pred` is not used when querying the call graph nor when inspecting the bloq's attributes.
310+ """
311+
312+ assert bloq in bloq_to_id
291313 if max_depth > 0 :
292- # Decompose the current Bloq and track it 's decomposed Bloqs.
314+ # Search the bloq 's decomposition
293315 try :
294316 cbloq = bloq if isinstance (bloq , CompositeBloq ) else bloq .decompose_bloq ()
295- for binst in _cbloq_dot_bloq_instances (cbloq ):
296- _add_bloq_to_dict (binst .bloq , bloq_to_idx )
317+ for binst in _cbloq_ordered_bloq_instances (cbloq ):
318+ subbloq = binst .bloq
319+ _assign_bloq_an_id (subbloq , bloq_to_id )
297320 if pred (binst ):
298- _populate_bloq_to_idx ( binst . bloq , bloq_to_idx , pred , max_depth - 1 )
321+ _search_for_subbloqs ( subbloq , bloq_to_id , pred , max_depth - 1 )
299322 else :
300- _populate_bloq_to_idx ( binst . bloq , bloq_to_idx , pred , 0 )
323+ _search_for_subbloqs ( subbloq , bloq_to_id , pred , 0 )
301324 except (DecomposeTypeError , DecomposeNotImplementedError ) as e :
302- # DecomposeTypeError/DecomposeNotImplementedError are raised if `bloq` does not have a
303- # decomposition.
304- ...
325+ # No decomposition, nothing to recurse on.
326+ pass
305327
306- # Approximately decompose the current Bloq and its decomposed Bloqs.
328+ # Search the bloq's call graph
307329 try :
308330 for subbloq , _ in bloq .bloq_counts ().items ():
309- _add_bloq_to_dict (subbloq , bloq_to_idx )
310- _populate_bloq_to_idx (subbloq , bloq_to_idx , pred , 0 )
311-
331+ _assign_bloq_an_id (subbloq , bloq_to_id )
332+ _search_for_subbloqs (subbloq , bloq_to_id , pred , 0 )
312333 except NotImplementedError :
313- # NotImplementedError is raised if `bloq` does not implement bloq_counts .
314- ...
334+ # No call graph, nothing to recurse on .
335+ pass
315336
316- # If the current Bloq contains other Bloqs as sub-bloqs, add them to the `bloq_to_idx` dict .
337+ # Search the bloq's attributes .
317338 # This is only supported for Bloqs implemented as dataclasses / attrs.
318339 for field in _iter_fields (bloq ):
319340 subbloq = getattr (bloq , field .name )
320341 if isinstance (subbloq , Bloq ):
321- _add_bloq_to_dict (subbloq , bloq_to_idx )
322- _populate_bloq_to_idx (subbloq , bloq_to_idx , pred , 0 )
342+ _assign_bloq_an_id (subbloq , bloq_to_id )
343+ _search_for_subbloqs (subbloq , bloq_to_id , pred , 0 )
323344
324345
325- def _bloq_to_proto (bloq : Bloq , * , bloq_to_idx : Dict [Bloq , int ]) -> bloq_pb2 .Bloq :
346+ def _bloq_to_proto (bloq : Bloq , * , bloq_to_id : Dict [Bloq , int ]) -> bloq_pb2 .Bloq :
326347 try :
327348 t_complexity = annotations .t_complexity_to_proto (bloq .t_complexity ())
328349 except (DecomposeTypeError , DecomposeNotImplementedError , TypeError ):
@@ -333,25 +354,25 @@ def _bloq_to_proto(bloq: Bloq, *, bloq_to_idx: Dict[Bloq, int]) -> bloq_pb2.Bloq
333354 name = name ,
334355 registers = registers .registers_to_proto (bloq .signature ),
335356 t_complexity = t_complexity ,
336- args = _bloq_args_to_proto (bloq , bloq_to_idx = bloq_to_idx ),
357+ args = _bloq_args_to_proto (bloq , bloq_to_id = bloq_to_id ),
337358 )
338359
339360
340361def _bloq_args_to_proto (
341- bloq : Bloq , * , bloq_to_idx : Dict [Bloq , int ]
362+ bloq : Bloq , * , bloq_to_id : Dict [Bloq , int ]
342363) -> Optional [List [bloq_pb2 .BloqArg ]]:
343364 if isinstance (bloq , CompositeBloq ):
344365 return None
345366
346367 ret = [
347- _bloq_arg_to_proto (name = field .name , val = getattr (bloq , field .name ), bloq_to_idx = bloq_to_idx )
368+ _bloq_arg_to_proto (name = field .name , val = getattr (bloq , field .name ), bloq_to_id = bloq_to_id )
348369 for field in _iter_fields (bloq )
349370 if getattr (bloq , field .name ) is not None
350371 ]
351372 return ret if ret else None
352373
353374
354- def _bloq_arg_to_proto (name : str , val : Any , bloq_to_idx : Dict [Bloq , int ]) -> bloq_pb2 .BloqArg :
375+ def _bloq_arg_to_proto (name : str , val : Any , bloq_to_id : Dict [Bloq , int ]) -> bloq_pb2 .BloqArg :
355376 if isinstance (val , Bloq ):
356- return bloq_pb2 .BloqArg (name = name , subbloq = bloq_to_idx [val ])
377+ return bloq_pb2 .BloqArg (name = name , subbloq = bloq_to_id [val ])
357378 return arg_to_proto (name = name , val = val )
0 commit comments