From b7aa7767befe379a770bccc3462dc402c3c7c234 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 14:09:27 +0900 Subject: [PATCH 01/79] Export to onnx --- export_onnx.py | 162 +++++++++++++++++++++++++++++++++++ sam2/modeling/sam2_base.py | 30 +++++-- sam2/sam2_image_predictor.py | 37 +++++--- 3 files changed, 214 insertions(+), 15 deletions(-) create mode 100644 export_onnx.py diff --git a/export_onnx.py b/export_onnx.py new file mode 100644 index 000000000..2ad5575de --- /dev/null +++ b/export_onnx.py @@ -0,0 +1,162 @@ +import os +# if using Apple MPS, fall back to CPU for unsupported ops +os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" +import numpy as np +import torch +import matplotlib.pyplot as plt +from PIL import Image + +# %% +# select the device for computation +if torch.cuda.is_available(): + device = torch.device("cuda") +#elif torch.backends.mps.is_available(): # low accuracy +# device = torch.device("mps") +else: + device = torch.device("cpu") +print(f"using device: {device}") + +if device.type == "cuda": + # use bfloat16 for the entire notebook + torch.autocast("cuda", dtype=torch.bfloat16).__enter__() + # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices) + if torch.cuda.get_device_properties(0).major >= 8: + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True +elif device.type == "mps": + print( + "\nSupport for MPS devices is preliminary. SAM 2 is trained with CUDA and might " + "give numerically different outputs and sometimes degraded performance on MPS. " + "See e.g. https://github.com/pytorch/pytorch/issues/84936 for a discussion." + ) + +# %% +np.random.seed(3) + +def show_mask(mask, ax, random_color=False, borders = True): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + color = np.array([30/255, 144/255, 255/255, 0.6]) + h, w = mask.shape[-2:] + mask = mask.astype(np.uint8) + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + if borders: + import cv2 + contours, _ = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) + # Try to smooth contours + contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours] + mask_image = cv2.drawContours(mask_image, contours, -1, (1, 1, 1, 0.5), thickness=2) + ax.imshow(mask_image) + +def show_points(coords, labels, ax, marker_size=375): + pos_points = coords[labels==1] + neg_points = coords[labels==0] + ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) + ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) + +def show_box(box, ax): + x0, y0 = box[0], box[1] + w, h = box[2] - box[0], box[3] - box[1] + ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) + +def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True): + for i, (mask, score) in enumerate(zip(masks, scores)): + plt.figure(figsize=(10, 10)) + plt.imshow(image) + show_mask(mask, plt.gca(), borders=borders) + if point_coords is not None: + assert input_labels is not None + show_points(point_coords, input_labels, plt.gca()) + if box_coords is not None: + # boxes + show_box(box_coords, plt.gca()) + if len(scores) > 1: + plt.title(f"Mask {i+1}, Score: {score:.3f}", fontsize=18) + plt.axis('off') + plt.show() + +# %% [markdown] +# ## Example image + +show = False + +# %% +image = Image.open('notebooks/images/truck.jpg') +image = np.array(image.convert("RGB")) + +# %% +if False: + plt.figure(figsize=(10, 10)) + plt.imshow(image) + plt.axis('on') + plt.show() + +# %% [markdown] +# ## Selecting objects with SAM 2 + +# %% [markdown] +# First, load the SAM 2 model and predictor. Change the path below to point to the SAM 2 checkpoint. Running on CUDA and using the default model are recommended for best results. + +# %% +from sam2.build_sam import build_sam2 +from sam2.sam2_image_predictor import SAM2ImagePredictor + +sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" +model_cfg = "sam2_hiera_l.yaml" + +sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device) + +predictor = SAM2ImagePredictor(sam2_model) + +# %% [markdown] +# Process the image to produce an image embedding by calling `SAM2ImagePredictor.set_image`. `SAM2ImagePredictor` remembers this embedding and will use it for subsequent mask prediction. + +# %% +export_to_onnx = True +export_to_tflite = False +predictor.set_image(image, export_to_onnx = export_to_onnx, export_to_tflite = export_to_tflite) + +# %% [markdown] +# To select the truck, choose a point on it. Points are input to the model in (x,y) format and come with labels 1 (foreground point) or 0 (background point). Multiple points can be input; here we use only one. The chosen point will be shown as a star on the image. + +# %% +input_point = np.array([[500, 375]]) +input_label = np.array([1]) + +# %% +if False: + plt.figure(figsize=(10, 10)) + plt.imshow(image) + show_points(input_point, input_label, plt.gca()) + plt.axis('on') + plt.show() + +# %% +print(predictor._features["image_embed"].shape, predictor._features["image_embed"][-1].shape) + +# %% [markdown] +# Predict with `SAM2ImagePredictor.predict`. The model returns masks, quality predictions for those masks, and low resolution mask logits that can be passed to the next iteration of prediction. + +# %% +masks, scores, logits = predictor.predict( + point_coords=input_point, + point_labels=input_label, + multimask_output=True, + export_to_onnx=export_to_onnx, + export_to_tflite=export_to_tflite +) +sorted_ind = np.argsort(scores)[::-1] +masks = masks[sorted_ind] +scores = scores[sorted_ind] +logits = logits[sorted_ind] + +# %% [markdown] +# With `multimask_output=True` (the default setting), SAM 2 outputs 3 masks, where `scores` gives the model's own estimation of the quality of these masks. This setting is intended for ambiguous input prompts, and helps the model disambiguate different objects consistent with the prompt. When `False`, it will return a single mask. For ambiguous prompts such as a single point, it is recommended to use `multimask_output=True` even if only a single mask is desired; the best single mask can be chosen by picking the one with the highest score returned in `scores`. This will often result in a better mask. + +# %% +masks.shape # (number_of_masks) x H x W + +# %% +if show: + show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 224a8c1bb..dc44640d7 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -192,11 +192,31 @@ def __init__( def device(self): return next(self.parameters()).device - def forward(self, *args, **kwargs): - raise NotImplementedError( - "Please use the corresponding methods in SAM2VideoPredictor for inference." - "See notebooks/video_predictor_example.ipynb for an example." - ) + def forward(self, input_image): + backbone_out = self.forward_image(input_image) + _, vision_feats, _, _ = self._prepare_backbone_features(backbone_out) + + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.no_mem_embed + + # Spatial dim for backbone feature maps + _bb_feat_sizes = [ + (256, 256), + (128, 128), + (64, 64), + ] + + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], _bb_feat_sizes[::-1]) + ][::-1] + return feats[-1], feats[:-1] + + #raise NotImplementedError( + # "Please use the corresponding methods in SAM2VideoPredictor for inference." + # "See notebooks/video_predictor_example.ipynb for an example." + #) def _build_sam_heads(self): """Build SAM-style prompt encoder and mask decoder.""" diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 41ce53af5..63ef17b1e 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -86,6 +86,8 @@ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor": def set_image( self, image: Union[np.ndarray, Image], + export_to_onnx = False, + export_to_tflite = False ) -> None: """ Calculates the image embeddings for the provided image, allowing @@ -114,16 +116,29 @@ def set_image( len(input_image.shape) == 4 and input_image.shape[1] == 3 ), f"input_image must be of size 1x3xHxW, got {input_image.shape}" logging.info("Computing image embeddings for the provided image...") - backbone_out = self.model.forward_image(input_image) - _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) - # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos - if self.model.directly_add_no_mem_embed: - vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed - - feats = [ - feat.permute(1, 2, 0).view(1, -1, *feat_size) - for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) - ][::-1] + if export_to_onnx: + print("input_image", input_image.shape) + torch.onnx.export( + self.model, (input_image), 'forward_image.onnx', + input_names=["input_image"], + output_names=["feats"], + dynamic_axes={ + 'input_image': {2: 'width', 3: 'height'} + }, + verbose=False, opset_version=17 + ) + feats = self.model(input_image) + else: + backbone_out = self.model.forward_image(input_image) + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} self._is_image_set = True logging.info("Image embeddings computed.") @@ -243,6 +258,8 @@ def predict( multimask_output: bool = True, return_logits: bool = False, normalize_coords=True, + export_to_onnx=False, + export_to_tflite=False ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Predict masks for the given input prompts, using the currently set image. From 5c8c6cdff0159e5caaa57d70fbdab7ff4ea70935 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 14:46:32 +0900 Subject: [PATCH 02/79] Export to onnx --- export_onnx.py | 6 ++++ sam2/modeling/sam2_base.py | 2 +- sam2/sam2_image_predictor.py | 70 +++++++++++++++++++++++++----------- 3 files changed, 57 insertions(+), 21 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index 2ad5575de..50f7fdac6 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -115,8 +115,12 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l # %% export_to_onnx = True export_to_tflite = False + predictor.set_image(image, export_to_onnx = export_to_onnx, export_to_tflite = export_to_tflite) +export_to_onnx = True +export_to_tflite = False + # %% [markdown] # To select the truck, choose a point on it. Points are input to the model in (x,y) format and come with labels 1 (foreground point) or 0 (background point). Multiple points can be input; here we use only one. The chosen point will be shown as a star on the image. @@ -160,3 +164,5 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l # %% if show: show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True) + +print("Success!") \ No newline at end of file diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index dc44640d7..c485dddc6 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -211,7 +211,7 @@ def forward(self, input_image): feat.permute(1, 2, 0).view(1, -1, *feat_size) for feat, feat_size in zip(vision_feats[::-1], _bb_feat_sizes[::-1]) ][::-1] - return feats[-1], feats[:-1] + return feats #raise NotImplementedError( # "Please use the corresponding methods in SAM2VideoPredictor for inference." diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 63ef17b1e..c23efc4f4 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -119,12 +119,9 @@ def set_image( if export_to_onnx: print("input_image", input_image.shape) torch.onnx.export( - self.model, (input_image), 'forward_image.onnx', + self.model, (input_image), 'image_encoder.onnx', input_names=["input_image"], - output_names=["feats"], - dynamic_axes={ - 'input_image': {2: 'width', 3: 'height'} - }, + output_names=["feats1", "feats2", "feats3"], verbose=False, opset_version=17 ) feats = self.model(input_image) @@ -300,7 +297,6 @@ def predict( ) # Transform input prompts - mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts( point_coords, point_labels, box, mask_input, normalize_coords ) @@ -312,6 +308,8 @@ def predict( mask_input, multimask_output, return_logits=return_logits, + export_to_onnx=export_to_onnx, + export_to_tflite=export_to_tflite ) masks_np = masks.squeeze(0).float().detach().cpu().numpy() @@ -360,6 +358,8 @@ def _predict( multimask_output: bool = True, return_logits: bool = False, img_idx: int = -1, + export_to_onnx = False, + export_to_tflite = False ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Predict masks for the given input prompts, using the currently set image. @@ -420,11 +420,26 @@ def _predict( else: concat_points = (box_coords, box_labels) - sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( - points=concat_points, - boxes=None, - masks=mask_input, - ) + + if export_to_onnx: + #print("concat_points", concat_points.shape) + #print("mask_input", mask_input.shape) + torch.onnx.export( + self.model.sam_prompt_encoder, (concat_points, None, mask_input), 'prompt_encoder.onnx', + input_names=["concat_points", "mask_input"], + output_names=["sparse_embeddings", "dense_embeddings"], + dynamic_axes={ + 'concat_points': {1: 'n'}, + }, + verbose=False, opset_version=17 + ) + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(concat_points, None, mask_input) + else: + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( + points=concat_points, + boxes=None, + masks=mask_input, + ) # Predict masks batched_mode = ( @@ -434,15 +449,30 @@ def _predict( feat_level[img_idx].unsqueeze(0) for feat_level in self._features["high_res_feats"] ] - low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( - image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), - image_pe=self.model.sam_prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - repeat_image=batched_mode, - high_res_features=high_res_features, - ) + if export_to_onnx: + print("sparse_embeddings", sparse_embeddings.shape) + print("dense_embeddings", dense_embeddings.shape) + torch.onnx.export( + self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features), + 'mask_decoder.onnx', + input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features"], + output_names=["low_res_masks", "iou_predictions"], + #dynamic_axes={ + # 'unnorm_coords': {2: 'width', 3: 'height'} + #}, + verbose=False, opset_version=17 + ) + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features) + else: + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( + image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), + image_pe=self.model.sam_prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=batched_mode, + high_res_features=high_res_features, + ) # Upscale the masks to the original image resolution masks = self._transforms.postprocess_masks( From baa5202b8b266e8e52b7c2cf2e88cfbdb5689583 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 14:50:59 +0900 Subject: [PATCH 03/79] Export to tflite --- sam2/sam2_image_predictor.py | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index c23efc4f4..b3170305a 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -124,8 +124,12 @@ def set_image( output_names=["feats1", "feats2", "feats3"], verbose=False, opset_version=17 ) - feats = self.model(input_image) - else: + if export_to_tflite: + import ai_edge_torch + sample_inputs = (input_image,) + edge_model = ai_edge_torch.convert(self.model, sample_inputs) + edge_model.export("image_encoder.tflite") + if True: backbone_out = self.model.forward_image(input_image) _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos @@ -433,8 +437,12 @@ def _predict( }, verbose=False, opset_version=17 ) - sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(concat_points, None, mask_input) - else: + if export_to_tflite: + import ai_edge_torch + sample_inputs = (concat_points, None, mask_input) + edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) + edge_model.export("prompt_encoder.tflite") + if True: sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( points=concat_points, boxes=None, @@ -462,8 +470,12 @@ def _predict( #}, verbose=False, opset_version=17 ) - low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features) - else: + if export_to_tflite: + import ai_edge_torch + sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features,) + edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) + edge_model.export("mask_decoder.tflite") + if True: low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), image_pe=self.model.sam_prompt_encoder.get_dense_pe(), From 3c72e9062465f08a2c27c1d7cfd062b0cb489f26 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 15:51:37 +0900 Subject: [PATCH 04/79] Implement tflite export --- export_onnx.py | 3 ++- sam2/sam2_image_predictor.py | 34 ++++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index 50f7fdac6..abf077bfc 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -8,12 +8,13 @@ # %% # select the device for computation -if torch.cuda.is_available(): +if False:#torch.cuda.is_available(): device = torch.device("cuda") #elif torch.backends.mps.is_available(): # low accuracy # device = torch.device("mps") else: device = torch.device("cpu") + # Require PJRT_DEVICE=CPU for tflite print(f"using device: {device}") if device.type == "cuda": diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index b3170305a..3885d4767 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -126,9 +126,39 @@ def set_image( ) if export_to_tflite: import ai_edge_torch + import tensorflow as tf sample_inputs = (input_image,) - edge_model = ai_edge_torch.convert(self.model, sample_inputs) - edge_model.export("image_encoder.tflite") + + export_float = False + export_int8 = True + + if export_float: + tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} + edge_model = ai_edge_torch.convert(self.model, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) + edge_model.export("image_encoder.tflite") + + if export_int8: + from ai_edge_torch.quantize import pt2e_quantizer + from ai_edge_torch.quantize import quant_config + from torch.ao.quantization import quantize_pt2e + + quantizer = pt2e_quantizer.PT2EQuantizer().set_global( + pt2e_quantizer.get_symmetric_quantization_config() + ) + model = torch._export.capture_pre_autograd_graph(self.model, sample_inputs) + model = quantize_pt2e.prepare_pt2e(model, quantizer) + #model(input_image.type(torch.FloatTensor)) # calibration + model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) + + tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} + with_quantizer = ai_edge_torch.convert( + model, + sample_inputs, + quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), + _ai_edge_converter_flags=tfl_converter_flags + ) + with_quantizer.export("image_encoder_int8.tflite") + if True: backbone_out = self.model.forward_image(input_image) _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) From f1e5ab5fcd679d8500acf89ae334698450dad032 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 17:23:48 +0900 Subject: [PATCH 05/79] Remove tupple --- export_onnx.py | 11 ++++++--- sam2/modeling/sam/prompt_encoder.py | 24 +++++++----------- sam2/sam2_image_predictor.py | 38 +++++++++++++++++------------ 3 files changed, 38 insertions(+), 35 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index abf077bfc..952a05059 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -80,7 +80,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l # %% [markdown] # ## Example image -show = False +show = True # %% image = Image.open('notebooks/images/truck.jpg') @@ -114,10 +114,12 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l # Process the image to produce an image embedding by calling `SAM2ImagePredictor.set_image`. `SAM2ImagePredictor` remembers this embedding and will use it for subsequent mask prediction. # %% -export_to_onnx = True +model_id = "hiera_l" + +export_to_onnx = False export_to_tflite = False -predictor.set_image(image, export_to_onnx = export_to_onnx, export_to_tflite = export_to_tflite) +predictor.set_image(image, export_to_onnx = export_to_onnx, export_to_tflite = export_to_tflite, model_id = model_id) export_to_onnx = True export_to_tflite = False @@ -149,7 +151,8 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l point_labels=input_label, multimask_output=True, export_to_onnx=export_to_onnx, - export_to_tflite=export_to_tflite + export_to_tflite=export_to_tflite, + model_id=model_id ) sorted_ind = np.argsort(scores)[::-1] masks = masks[sorted_ind] diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py index 6b3bbb95b..7fec87b33 100644 --- a/sam2/modeling/sam/prompt_encoder.py +++ b/sam2/modeling/sam/prompt_encoder.py @@ -118,17 +118,15 @@ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: def _get_batch_size( self, - points: Optional[Tuple[torch.Tensor, torch.Tensor]], - boxes: Optional[torch.Tensor], + coords: Optional[torch.Tensor], + labels: Optional[torch.Tensor], masks: Optional[torch.Tensor], ) -> int: """ Gets the batch size of the output given the batch size of the input prompts. """ - if points is not None: - return points[0].shape[0] - elif boxes is not None: - return boxes.shape[0] + if coords is not None and labels is not None: + return coords.shape[0] elif masks is not None: return masks.shape[0] else: @@ -139,8 +137,8 @@ def _get_device(self) -> torch.device: def forward( self, - points: Optional[Tuple[torch.Tensor, torch.Tensor]], - boxes: Optional[torch.Tensor], + coords: Optional[torch.Tensor], + labels: Optional[torch.Tensor], masks: Optional[torch.Tensor], ) -> Tuple[torch.Tensor, torch.Tensor]: """ @@ -160,17 +158,13 @@ def forward( torch.Tensor: dense embeddings for the masks, in the shape Bx(embed_dim)x(embed_H)x(embed_W) """ - bs = self._get_batch_size(points, boxes, masks) + bs = self._get_batch_size(coords, labels, masks) sparse_embeddings = torch.empty( (bs, 0, self.embed_dim), device=self._get_device() ) - if points is not None: - coords, labels = points - point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + if coords is not None and labels is not None: + point_embeddings = self._embed_points(coords, labels, pad=True) sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) - if boxes is not None: - box_embeddings = self._embed_boxes(boxes) - sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) if masks is not None: dense_embeddings = self._embed_masks(masks) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 3885d4767..e11220ac5 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -87,7 +87,8 @@ def set_image( self, image: Union[np.ndarray, Image], export_to_onnx = False, - export_to_tflite = False + export_to_tflite = False, + model_id=None ) -> None: """ Calculates the image embeddings for the provided image, allowing @@ -119,7 +120,7 @@ def set_image( if export_to_onnx: print("input_image", input_image.shape) torch.onnx.export( - self.model, (input_image), 'image_encoder.onnx', + self.model, (input_image), 'image_encoder'+model_id+'.onnx', input_names=["input_image"], output_names=["feats1", "feats2", "feats3"], verbose=False, opset_version=17 @@ -135,7 +136,7 @@ def set_image( if export_float: tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} edge_model = ai_edge_torch.convert(self.model, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) - edge_model.export("image_encoder.tflite") + edge_model.export("image_encoder_"+model_id+".tflite") if export_int8: from ai_edge_torch.quantize import pt2e_quantizer @@ -157,7 +158,7 @@ def set_image( quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), _ai_edge_converter_flags=tfl_converter_flags ) - with_quantizer.export("image_encoder_int8.tflite") + with_quantizer.export("image_encoder_int8_"+model_id+".tflite") if True: backbone_out = self.model.forward_image(input_image) @@ -290,7 +291,8 @@ def predict( return_logits: bool = False, normalize_coords=True, export_to_onnx=False, - export_to_tflite=False + export_to_tflite=False, + model_id=None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Predict masks for the given input prompts, using the currently set image. @@ -343,7 +345,8 @@ def predict( multimask_output, return_logits=return_logits, export_to_onnx=export_to_onnx, - export_to_tflite=export_to_tflite + export_to_tflite=export_to_tflite, + model_id=model_id ) masks_np = masks.squeeze(0).float().detach().cpu().numpy() @@ -393,7 +396,8 @@ def _predict( return_logits: bool = False, img_idx: int = -1, export_to_onnx = False, - export_to_tflite = False + export_to_tflite = False, + model_id = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Predict masks for the given input prompts, using the currently set image. @@ -459,23 +463,25 @@ def _predict( #print("concat_points", concat_points.shape) #print("mask_input", mask_input.shape) torch.onnx.export( - self.model.sam_prompt_encoder, (concat_points, None, mask_input), 'prompt_encoder.onnx', - input_names=["concat_points", "mask_input"], + self.model.sam_prompt_encoder, (concat_points[0], concat_points[1], mask_input), 'prompt_encoder_'+model_id+'.onnx', + input_names=["coords", "labels", "mask_input"], output_names=["sparse_embeddings", "dense_embeddings"], dynamic_axes={ - 'concat_points': {1: 'n'}, + 'coords': {0: 'n'}, + 'labels': {0: 'n'}, }, verbose=False, opset_version=17 ) if export_to_tflite: import ai_edge_torch - sample_inputs = (concat_points, None, mask_input) + sample_inputs = (concat_points[0], concat_points[1], mask_input) edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) - edge_model.export("prompt_encoder.tflite") + edge_model.export("prompt_encoder_"+model_id+".tflite") if True: sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( - points=concat_points, - boxes=None, + coords=concat_points[0], + labels=concat_points[1], + #boxes=None, masks=mask_input, ) @@ -492,7 +498,7 @@ def _predict( print("dense_embeddings", dense_embeddings.shape) torch.onnx.export( self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features), - 'mask_decoder.onnx', + 'mask_decoder_'+model_id+'.onnx', input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features"], output_names=["low_res_masks", "iou_predictions"], #dynamic_axes={ @@ -504,7 +510,7 @@ def _predict( import ai_edge_torch sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features,) edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) - edge_model.export("mask_decoder.tflite") + edge_model.export("mask_decoder_"+model_id+".tflite") if True: low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), From 4ce30eb518000d6d66519ea916a5139a3a3a96c9 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 17:39:48 +0900 Subject: [PATCH 06/79] Change dynamic axis --- sam2/sam2_image_predictor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index e11220ac5..93248c0cc 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -467,8 +467,8 @@ def _predict( input_names=["coords", "labels", "mask_input"], output_names=["sparse_embeddings", "dense_embeddings"], dynamic_axes={ - 'coords': {0: 'n'}, - 'labels': {0: 'n'}, + 'coords': {1: 'n'}, + 'labels': {1: 'n'}, }, verbose=False, opset_version=17 ) From 7627c288863a4491ecfe373cc7090dfe21ad3c14 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 18:16:41 +0900 Subject: [PATCH 07/79] Improve broadcast error --- sam2/modeling/sam/prompt_encoder.py | 43 +++++++++++++++++++++++++++-- sam2/sam2_image_predictor.py | 33 +++++++++++++++++----- 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py index 7fec87b33..8efec601f 100644 --- a/sam2/modeling/sam/prompt_encoder.py +++ b/sam2/modeling/sam/prompt_encoder.py @@ -92,8 +92,14 @@ def _embed_points( point_embedding = self.pe_layer.forward_with_coords( points, self.input_image_size ) - point_embedding[labels == -1] = 0.0 - point_embedding[labels == -1] += self.not_a_point_embed.weight + + # こっちだとonnxでbroadcast error + #point_embedding[labels == -1] = 0.0 + #point_embedding[labels == -1] += self.not_a_point_embed.weight + + # こっちだと動く + point_embedding[labels == -1] = self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight point_embedding[labels == 1] += self.point_embeddings[1].weight point_embedding[labels == 2] += self.point_embeddings[2].weight @@ -135,7 +141,7 @@ def _get_batch_size( def _get_device(self) -> torch.device: return self.point_embeddings[0].weight.device - def forward( + def forward_normal( self, coords: Optional[torch.Tensor], labels: Optional[torch.Tensor], @@ -174,3 +180,34 @@ def forward( ) return sparse_embeddings, dense_embeddings + + def forward_sparse( + self, + coords: torch.Tensor, + labels: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + bs = coords.shape[0] + + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + + point_embeddings = self._embed_points(coords, labels, pad=True) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + def forward_dense( + self, + masks: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + bs = masks.shape[0] + sparse_embeddings = torch.empty( + (bs, 0, self.embed_dim), device=self._get_device() + ) + dense_embeddings = self._embed_masks(masks) + return sparse_embeddings, dense_embeddings \ No newline at end of file diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 93248c0cc..4cd2e213b 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -462,23 +462,42 @@ def _predict( if export_to_onnx: #print("concat_points", concat_points.shape) #print("mask_input", mask_input.shape) + self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_sparse torch.onnx.export( - self.model.sam_prompt_encoder, (concat_points[0], concat_points[1], mask_input), 'prompt_encoder_'+model_id+'.onnx', - input_names=["coords", "labels", "mask_input"], + self.model.sam_prompt_encoder, (concat_points[0], concat_points[1]), 'prompt_encoder_sparse_'+model_id+'.onnx', + input_names=["coords", "labels"], output_names=["sparse_embeddings", "dense_embeddings"], dynamic_axes={ - 'coords': {1: 'n'}, - 'labels': {1: 'n'}, + 'coords': {0: 'b', 1: 'n'}, + 'labels': {0: 'b', 1: 'n'}, }, verbose=False, opset_version=17 ) + + import onnxruntime + model = onnxruntime.InferenceSession("prompt_encoder_sparse_hiera_l.onnx") + sparse_embeddings, dense_embeddings = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy()}) + + #self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_dense + #if mask_input is None: + # mask_input_non_zero = np.zeros((1, 1024, 1024)) + #else: + # mask_input_non_zero = mask_input + #torch.onnx.export( + # self.model.sam_prompt_encoder, (mask_input_non_zero), 'prompt_encoder_dense_'+model_id+'.onnx', + # input_names=["mask_input"], + # output_names=["sparse_embeddings", "dense_embeddings"], + # verbose=False, opset_version=17 + #) + if export_to_tflite: import ai_edge_torch - sample_inputs = (concat_points[0], concat_points[1], mask_input) + sample_inputs = (concat_points[0], concat_points[1]) + self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_sparse edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) - edge_model.export("prompt_encoder_"+model_id+".tflite") + edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") if True: - sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder( + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward_normal( coords=concat_points[0], labels=concat_points[1], #boxes=None, From 40de3d8c3bd0f284801a8728e50074337ce1442d Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 21 Aug 2024 18:40:38 +0900 Subject: [PATCH 08/79] Fix export for mask decoder --- sam2/modeling/sam/mask_decoder.py | 11 +++++++---- sam2/sam2_image_predictor.py | 20 +++++++++++++++----- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/sam2/modeling/sam/mask_decoder.py b/sam2/modeling/sam/mask_decoder.py index b7c7dfdb3..7b0868c9c 100644 --- a/sam2/modeling/sam/mask_decoder.py +++ b/sam2/modeling/sam/mask_decoder.py @@ -115,7 +115,8 @@ def forward( dense_prompt_embeddings: torch.Tensor, multimask_output: bool, repeat_image: bool, - high_res_features: Optional[List[torch.Tensor]] = None, + high_res_features1: Optional[torch.Tensor] = None, + high_res_features2: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """ Predict masks given image and prompt embeddings. @@ -139,7 +140,8 @@ def forward( sparse_prompt_embeddings=sparse_prompt_embeddings, dense_prompt_embeddings=dense_prompt_embeddings, repeat_image=repeat_image, - high_res_features=high_res_features, + high_res_features1=high_res_features1, + high_res_features2=high_res_features2, ) # Select the correct mask or masks for output @@ -172,7 +174,8 @@ def predict_masks( sparse_prompt_embeddings: torch.Tensor, dense_prompt_embeddings: torch.Tensor, repeat_image: bool, - high_res_features: Optional[List[torch.Tensor]] = None, + high_res_features1: Optional[torch.Tensor] = None, + high_res_features2: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor]: """Predicts masks. See 'forward' for more details.""" # Concatenate output tokens @@ -220,7 +223,7 @@ def predict_masks( upscaled_embedding = self.output_upscaling(src) else: dc1, ln1, act1, dc2, act2 = self.output_upscaling - feat_s0, feat_s1 = high_res_features + feat_s0, feat_s1 = high_res_features1, high_res_features2 upscaled_embedding = act1(ln1(dc1(src) + feat_s1)) upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 4cd2e213b..51880bbe7 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -475,7 +475,7 @@ def _predict( ) import onnxruntime - model = onnxruntime.InferenceSession("prompt_encoder_sparse_hiera_l.onnx") + model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") sparse_embeddings, dense_embeddings = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy()}) #self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_dense @@ -516,18 +516,27 @@ def _predict( print("sparse_embeddings", sparse_embeddings.shape) print("dense_embeddings", dense_embeddings.shape) torch.onnx.export( - self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features), + self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]), 'mask_decoder_'+model_id+'.onnx', - input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features"], + input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features1", "high_res_features2"], output_names=["low_res_masks", "iou_predictions"], #dynamic_axes={ # 'unnorm_coords': {2: 'width', 3: 'height'} #}, verbose=False, opset_version=17 ) + import onnxruntime + model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") + low_res_masks, iou_predictions, _, _ = model.run(None, { + "image_embeddings":self._features["image_embed"][img_idx].unsqueeze(0).numpy(), + "image_pe": self.model.sam_prompt_encoder.get_dense_pe().numpy(), + "sparse_prompt_embeddings": sparse_embeddings.numpy(), + "dense_prompt_embeddings": dense_embeddings.numpy(), + "high_res_features1":high_res_features[0].numpy(), + "high_res_features2":high_res_features[1].numpy()}) if export_to_tflite: import ai_edge_torch - sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features,) + sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) edge_model.export("mask_decoder_"+model_id+".tflite") if True: @@ -538,7 +547,8 @@ def _predict( dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, repeat_image=batched_mode, - high_res_features=high_res_features, + high_res_features1=high_res_features[0], + high_res_features2=high_res_features[1], ) # Upscale the masks to the original image resolution From 68613b000297794d0de7b36ef2880d35ede989c6 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 22 Aug 2024 13:35:35 +0900 Subject: [PATCH 09/79] Implement onnx inference --- export_onnx.py | 119 ++++++++--------------------------- sam2/sam2_image_predictor.py | 33 ++++++++-- 2 files changed, 54 insertions(+), 98 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index 952a05059..8ee94fd15 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -1,37 +1,29 @@ import os -# if using Apple MPS, fall back to CPU for unsupported ops -os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1" import numpy as np import torch import matplotlib.pyplot as plt from PIL import Image -# %% -# select the device for computation -if False:#torch.cuda.is_available(): - device = torch.device("cuda") -#elif torch.backends.mps.is_available(): # low accuracy -# device = torch.device("mps") -else: - device = torch.device("cpu") - # Require PJRT_DEVICE=CPU for tflite -print(f"using device: {device}") - -if device.type == "cuda": - # use bfloat16 for the entire notebook - torch.autocast("cuda", dtype=torch.bfloat16).__enter__() - # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices) - if torch.cuda.get_device_properties(0).major >= 8: - torch.backends.cuda.matmul.allow_tf32 = True - torch.backends.cudnn.allow_tf32 = True -elif device.type == "mps": - print( - "\nSupport for MPS devices is preliminary. SAM 2 is trained with CUDA and might " - "give numerically different outputs and sometimes degraded performance on MPS. " - "See e.g. https://github.com/pytorch/pytorch/issues/84936 for a discussion." - ) - -# %% +from sam2.build_sam import build_sam2 +from sam2.sam2_image_predictor import SAM2ImagePredictor + +# export settings +export_to_onnx_image_encoder = False +export_to_onnx_mask_decoder = False +export_to_tflite = False +import_from_onnx = True +import_from_tflite = False +show = True + +# model settings +sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" +model_cfg = "sam2_hiera_l.yaml" +model_id = "hiera_l" + +# use cpu for export +device = torch.device("cpu") + +# utility np.random.seed(3) def show_mask(mask, ax, random_color=False, borders = True): @@ -77,81 +69,27 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l plt.axis('off') plt.show() -# %% [markdown] -# ## Example image - -show = True - -# %% +# logic image = Image.open('notebooks/images/truck.jpg') image = np.array(image.convert("RGB")) -# %% -if False: - plt.figure(figsize=(10, 10)) - plt.imshow(image) - plt.axis('on') - plt.show() - -# %% [markdown] -# ## Selecting objects with SAM 2 - -# %% [markdown] -# First, load the SAM 2 model and predictor. Change the path below to point to the SAM 2 checkpoint. Running on CUDA and using the default model are recommended for best results. - -# %% -from sam2.build_sam import build_sam2 -from sam2.sam2_image_predictor import SAM2ImagePredictor - -sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" -model_cfg = "sam2_hiera_l.yaml" - sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device) predictor = SAM2ImagePredictor(sam2_model) -# %% [markdown] -# Process the image to produce an image embedding by calling `SAM2ImagePredictor.set_image`. `SAM2ImagePredictor` remembers this embedding and will use it for subsequent mask prediction. - -# %% -model_id = "hiera_l" - -export_to_onnx = False -export_to_tflite = False - -predictor.set_image(image, export_to_onnx = export_to_onnx, export_to_tflite = export_to_tflite, model_id = model_id) - -export_to_onnx = True -export_to_tflite = False +predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, export_to_tflite = export_to_tflite, import_from_onnx = import_from_onnx, import_from_tflite = import_from_tflite, model_id = model_id) -# %% [markdown] -# To select the truck, choose a point on it. Points are input to the model in (x,y) format and come with labels 1 (foreground point) or 0 (background point). Multiple points can be input; here we use only one. The chosen point will be shown as a star on the image. - -# %% input_point = np.array([[500, 375]]) input_label = np.array([1]) -# %% -if False: - plt.figure(figsize=(10, 10)) - plt.imshow(image) - show_points(input_point, input_label, plt.gca()) - plt.axis('on') - plt.show() - -# %% -print(predictor._features["image_embed"].shape, predictor._features["image_embed"][-1].shape) - -# %% [markdown] -# Predict with `SAM2ImagePredictor.predict`. The model returns masks, quality predictions for those masks, and low resolution mask logits that can be passed to the next iteration of prediction. - -# %% masks, scores, logits = predictor.predict( point_coords=input_point, point_labels=input_label, multimask_output=True, - export_to_onnx=export_to_onnx, + export_to_onnx=export_to_onnx_mask_decoder, export_to_tflite=export_to_tflite, + import_from_onnx=import_from_onnx, + import_from_tflite=import_from_tflite, model_id=model_id ) sorted_ind = np.argsort(scores)[::-1] @@ -159,13 +97,6 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l scores = scores[sorted_ind] logits = logits[sorted_ind] -# %% [markdown] -# With `multimask_output=True` (the default setting), SAM 2 outputs 3 masks, where `scores` gives the model's own estimation of the quality of these masks. This setting is intended for ambiguous input prompts, and helps the model disambiguate different objects consistent with the prompt. When `False`, it will return a single mask. For ambiguous prompts such as a single point, it is recommended to use `multimask_output=True` even if only a single mask is desired; the best single mask can be chosen by picking the one with the highest score returned in `scores`. This will often result in a better mask. - -# %% -masks.shape # (number_of_masks) x H x W - -# %% if show: show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 51880bbe7..74bab6a78 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -12,6 +12,8 @@ import torch from PIL.Image import Image +import onnxruntime + from sam2.modeling.sam2_base import SAM2Base from sam2.utils.transforms import SAM2Transforms @@ -88,6 +90,8 @@ def set_image( image: Union[np.ndarray, Image], export_to_onnx = False, export_to_tflite = False, + import_from_onnx = False, + import_from_tflite = False, model_id=None ) -> None: """ @@ -117,26 +121,36 @@ def set_image( len(input_image.shape) == 4 and input_image.shape[1] == 3 ), f"input_image must be of size 1x3xHxW, got {input_image.shape}" logging.info("Computing image embeddings for the provided image...") + if export_to_onnx: print("input_image", input_image.shape) torch.onnx.export( - self.model, (input_image), 'image_encoder'+model_id+'.onnx', + self.model, (input_image), 'image_encoder_'+model_id+'.onnx', input_names=["input_image"], output_names=["feats1", "feats2", "feats3"], verbose=False, opset_version=17 ) + + if import_from_onnx: + model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") + vision_feat1, vision_feat2, vision_feat3 = model.run(None, {"input_image":input_image.numpy()}) + feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + if export_to_tflite: import ai_edge_torch import tensorflow as tf sample_inputs = (input_image,) - export_float = False - export_int8 = True + export_float = True + export_int8 = False if export_float: tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} edge_model = ai_edge_torch.convert(self.model, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("image_encoder_"+model_id+".tflite") + if import_from_tflite: + vision_feat1, vision_feat2, vision_feat3 = edge_model(sample_inputs) + feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] if export_int8: from ai_edge_torch.quantize import pt2e_quantizer @@ -160,7 +174,11 @@ def set_image( ) with_quantizer.export("image_encoder_int8_"+model_id+".tflite") - if True: + if import_from_tflite: + vision_feat1, vision_feat2, vision_feat3 = model(sample_inputs) + feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + + if not import_from_onnx and not import_from_tflite: backbone_out = self.model.forward_image(input_image) _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos @@ -171,6 +189,7 @@ def set_image( feat.permute(1, 2, 0).view(1, -1, *feat_size) for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) ][::-1] + self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} self._is_image_set = True logging.info("Image embeddings computed.") @@ -292,6 +311,8 @@ def predict( normalize_coords=True, export_to_onnx=False, export_to_tflite=False, + import_from_onnx = False, + import_from_tflite = False, model_id=None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ @@ -346,6 +367,8 @@ def predict( return_logits=return_logits, export_to_onnx=export_to_onnx, export_to_tflite=export_to_tflite, + import_from_onnx=import_from_onnx, + import_from_tflite=import_from_tflite, model_id=model_id ) @@ -397,6 +420,8 @@ def _predict( img_idx: int = -1, export_to_onnx = False, export_to_tflite = False, + import_from_onnx = False, + import_from_tflite = False, model_id = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ From dc014edc73166076d653f4e1e387ebd0eeaf95d5 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 22 Aug 2024 13:53:10 +0900 Subject: [PATCH 10/79] Infer using onnx runtime --- sam2/sam2_image_predictor.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 74bab6a78..9901a2981 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -135,6 +135,7 @@ def set_image( model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") vision_feat1, vision_feat2, vision_feat3 = model.run(None, {"input_image":input_image.numpy()}) feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + #print("feats", vision_feat1.shape, vision_feat2.shape, vision_feat3.shape) if export_to_tflite: import ai_edge_torch @@ -499,10 +500,13 @@ def _predict( verbose=False, opset_version=17 ) - import onnxruntime + if import_from_onnx: model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") sparse_embeddings, dense_embeddings = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy()}) + sparse_embeddings = torch.Tensor(sparse_embeddings) + dense_embeddings = torch.Tensor(dense_embeddings) + #if export_to_onnx: #self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_dense #if mask_input is None: # mask_input_non_zero = np.zeros((1, 1024, 1024)) @@ -521,7 +525,8 @@ def _predict( self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_sparse edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") - if True: + + if not import_from_onnx and not import_from_tflite: sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward_normal( coords=concat_points[0], labels=concat_points[1], @@ -537,20 +542,20 @@ def _predict( feat_level[img_idx].unsqueeze(0) for feat_level in self._features["high_res_feats"] ] + + #print("sparse_embeddings", sparse_embeddings.shape) + #print("dense_embeddings", dense_embeddings.shape) + if export_to_onnx: - print("sparse_embeddings", sparse_embeddings.shape) - print("dense_embeddings", dense_embeddings.shape) torch.onnx.export( self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]), 'mask_decoder_'+model_id+'.onnx', input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features1", "high_res_features2"], output_names=["low_res_masks", "iou_predictions"], - #dynamic_axes={ - # 'unnorm_coords': {2: 'width', 3: 'height'} - #}, verbose=False, opset_version=17 ) - import onnxruntime + + if import_from_onnx: model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") low_res_masks, iou_predictions, _, _ = model.run(None, { "image_embeddings":self._features["image_embed"][img_idx].unsqueeze(0).numpy(), @@ -559,12 +564,16 @@ def _predict( "dense_prompt_embeddings": dense_embeddings.numpy(), "high_res_features1":high_res_features[0].numpy(), "high_res_features2":high_res_features[1].numpy()}) + low_res_masks = torch.Tensor(low_res_masks) + iou_predictions = torch.Tensor(iou_predictions) + if export_to_tflite: import ai_edge_torch sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) edge_model.export("mask_decoder_"+model_id+".tflite") - if True: + + if not import_from_onnx and not import_from_tflite: low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), image_pe=self.model.sam_prompt_encoder.get_dense_pe(), From 8438f9f5cbc45531a9f07c14165be473665560d1 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 22 Aug 2024 14:25:33 +0900 Subject: [PATCH 11/79] Export dense pe --- sam2/modeling/sam/prompt_encoder.py | 2 +- sam2/sam2_image_predictor.py | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py index 8efec601f..20a2f8210 100644 --- a/sam2/modeling/sam/prompt_encoder.py +++ b/sam2/modeling/sam/prompt_encoder.py @@ -199,7 +199,7 @@ def forward_sparse( bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) - return sparse_embeddings, dense_embeddings + return sparse_embeddings, dense_embeddings, self.get_dense_pe() def forward_dense( self, diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 9901a2981..2ef6a12bd 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -492,7 +492,7 @@ def _predict( torch.onnx.export( self.model.sam_prompt_encoder, (concat_points[0], concat_points[1]), 'prompt_encoder_sparse_'+model_id+'.onnx', input_names=["coords", "labels"], - output_names=["sparse_embeddings", "dense_embeddings"], + output_names=["sparse_embeddings", "dense_embeddings", "dense_pe"], dynamic_axes={ 'coords': {0: 'b', 1: 'n'}, 'labels': {0: 'b', 1: 'n'}, @@ -502,9 +502,10 @@ def _predict( if import_from_onnx: model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") - sparse_embeddings, dense_embeddings = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy()}) + sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) + dense_pe = torch.Tensor(dense_pe) #if export_to_onnx: #self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_dense @@ -533,6 +534,7 @@ def _predict( #boxes=None, masks=mask_input, ) + dense_pe = self.model.sam_prompt_encoder.get_dense_pe() # Predict masks batched_mode = ( @@ -548,7 +550,7 @@ def _predict( if export_to_onnx: torch.onnx.export( - self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]), + self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]), 'mask_decoder_'+model_id+'.onnx', input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features1", "high_res_features2"], output_names=["low_res_masks", "iou_predictions"], @@ -559,7 +561,7 @@ def _predict( model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") low_res_masks, iou_predictions, _, _ = model.run(None, { "image_embeddings":self._features["image_embed"][img_idx].unsqueeze(0).numpy(), - "image_pe": self.model.sam_prompt_encoder.get_dense_pe().numpy(), + "image_pe": dense_pe.numpy(), "sparse_prompt_embeddings": sparse_embeddings.numpy(), "dense_prompt_embeddings": dense_embeddings.numpy(), "high_res_features1":high_res_features[0].numpy(), @@ -569,14 +571,14 @@ def _predict( if export_to_tflite: import ai_edge_torch - sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), self.model.sam_prompt_encoder.get_dense_pe(), sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) + sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) edge_model.export("mask_decoder_"+model_id+".tflite") if not import_from_onnx and not import_from_tflite: low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), - image_pe=self.model.sam_prompt_encoder.get_dense_pe(), + image_pe=dense_pe, sparse_prompt_embeddings=sparse_embeddings, dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, From 1bf6439ad7bb9c06b3bc006186a2c249081734ce Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 23 Aug 2024 11:06:30 +0900 Subject: [PATCH 12/79] Export image encoder to tflite --- export_onnx.py | 14 +++++++++----- sam2/sam2_image_predictor.py | 8 ++++---- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index 8ee94fd15..a39c76c16 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -10,11 +10,14 @@ # export settings export_to_onnx_image_encoder = False export_to_onnx_mask_decoder = False -export_to_tflite = False -import_from_onnx = True -import_from_tflite = False +export_to_tflite_image_encoder = True +export_to_tflite_mask_decoder = False +import_from_onnx = False +import_from_tflite = True show = True +# export PJRT_DEVICE=CPU + # model settings sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" model_cfg = "sam2_hiera_l.yaml" @@ -68,6 +71,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l plt.title(f"Mask {i+1}, Score: {score:.3f}", fontsize=18) plt.axis('off') plt.show() + plt.savefig(f'output{i+1}.png') # logic image = Image.open('notebooks/images/truck.jpg') @@ -77,7 +81,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l predictor = SAM2ImagePredictor(sam2_model) -predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, export_to_tflite = export_to_tflite, import_from_onnx = import_from_onnx, import_from_tflite = import_from_tflite, model_id = model_id) +predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, export_to_tflite = export_to_tflite_image_encoder, import_from_onnx = import_from_onnx, import_from_tflite = import_from_tflite, model_id = model_id) input_point = np.array([[500, 375]]) input_label = np.array([1]) @@ -87,7 +91,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l point_labels=input_label, multimask_output=True, export_to_onnx=export_to_onnx_mask_decoder, - export_to_tflite=export_to_tflite, + export_to_tflite=export_to_tflite_mask_decoder, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 2ef6a12bd..84b10deb5 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -150,7 +150,7 @@ def set_image( edge_model = ai_edge_torch.convert(self.model, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("image_encoder_"+model_id+".tflite") if import_from_tflite: - vision_feat1, vision_feat2, vision_feat3 = edge_model(sample_inputs) + vision_feat1, vision_feat2, vision_feat3 = edge_model(input_image) feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] if export_int8: @@ -179,7 +179,7 @@ def set_image( vision_feat1, vision_feat2, vision_feat3 = model(sample_inputs) feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] - if not import_from_onnx and not import_from_tflite: + if not import_from_onnx and (not import_from_tflite or not export_to_tflite): backbone_out = self.model.forward_image(input_image) _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos @@ -527,7 +527,7 @@ def _predict( edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") - if not import_from_onnx and not import_from_tflite: + if not import_from_onnx and (not import_from_tflite or not export_to_tflite): sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward_normal( coords=concat_points[0], labels=concat_points[1], @@ -575,7 +575,7 @@ def _predict( edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) edge_model.export("mask_decoder_"+model_id+".tflite") - if not import_from_onnx and not import_from_tflite: + if not import_from_onnx and (not import_from_tflite or not export_to_tflite): low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), image_pe=dense_pe, From 98e8297da10c27714c396b2b4b95f9ed127960c9 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 23 Aug 2024 12:56:22 +0900 Subject: [PATCH 13/79] Export to tflite --- export_onnx.py | 4 +-- sam2/modeling/sam/mask_decoder.py | 4 +-- sam2/modeling/sam/prompt_encoder.py | 24 ++++++++++++----- sam2/modeling/sam/transformer.py | 42 +++++++++++++++-------------- sam2/sam2_image_predictor.py | 11 ++++++++ 5 files changed, 55 insertions(+), 30 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index a39c76c16..5a2ec6eab 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -10,8 +10,8 @@ # export settings export_to_onnx_image_encoder = False export_to_onnx_mask_decoder = False -export_to_tflite_image_encoder = True -export_to_tflite_mask_decoder = False +export_to_tflite_image_encoder = False +export_to_tflite_mask_decoder = True import_from_onnx = False import_from_tflite = True show = True diff --git a/sam2/modeling/sam/mask_decoder.py b/sam2/modeling/sam/mask_decoder.py index 7b0868c9c..1d6a31e1f 100644 --- a/sam2/modeling/sam/mask_decoder.py +++ b/sam2/modeling/sam/mask_decoder.py @@ -195,7 +195,7 @@ def predict_masks( [self.iou_token.weight, self.mask_tokens.weight], dim=0 ) output_tokens = output_tokens.unsqueeze(0).expand( - sparse_prompt_embeddings.size(0), -1, -1 + sparse_prompt_embeddings.shape[0], -1, -1 ) tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) @@ -207,7 +207,7 @@ def predict_masks( src = image_embeddings src = src + dense_prompt_embeddings assert ( - image_pe.size(0) == 1 + image_pe.shape[0] == 1 ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)" pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) b, c, h, w = src.shape diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py index 20a2f8210..2834a4093 100644 --- a/sam2/modeling/sam/prompt_encoder.py +++ b/sam2/modeling/sam/prompt_encoder.py @@ -97,13 +97,25 @@ def _embed_points( #point_embedding[labels == -1] = 0.0 #point_embedding[labels == -1] += self.not_a_point_embed.weight - # こっちだと動く - point_embedding[labels == -1] = self.not_a_point_embed.weight + # こっちだとonnxで動くが、tfliteでうごかない + #point_embedding[labels == -1] = self.not_a_point_embed.weight - point_embedding[labels == 0] += self.point_embeddings[0].weight - point_embedding[labels == 1] += self.point_embeddings[1].weight - point_embedding[labels == 2] += self.point_embeddings[2].weight - point_embedding[labels == 3] += self.point_embeddings[3].weight + #point_embedding[labels == 0] += self.point_embeddings[0].weight + #point_embedding[labels == 1] += self.point_embeddings[1].weight + #point_embedding[labels == 2] += self.point_embeddings[2].weight + #point_embedding[labels == 3] += self.point_embeddings[3].weight + + # こっちだと、tfliteでも動く + labels = labels.int() + table = torch.zeros((5, self.point_embeddings[0].weight.shape[1])) + table[0] = self.not_a_point_embed.weight + table[1] = self.point_embeddings[0].weight + table[2] = self.point_embeddings[1].weight + table[3] = self.point_embeddings[2].weight + table[4] = self.point_embeddings[3].weight + for i in range(labels.shape[0]): + point_embedding[i] = point_embedding[i] + table[labels[i] + 1] + return point_embedding def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index b5b6fa2f8..3df4843d9 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -265,17 +265,18 @@ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: dropout_p = self.dropout_p if self.training else 0.0 # Attention - try: - with sdp_kernel_context(dropout_p): - out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) - except Exception as e: + #try: + # with sdp_kernel_context(dropout_p): + # out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + #except Exception as e: + if True: # Fall back to all kernels if the Flash attention kernel fails - warnings.warn( - f"Flash Attention kernel failed due to: {e}\nFalling back to all available " - f"kernels for scaled_dot_product_attention (which may have a slower speed).", - category=UserWarning, - stacklevel=2, - ) + #warnings.warn( + # f"Flash Attention kernel failed due to: {e}\nFalling back to all available " + # f"kernels for scaled_dot_product_attention (which may have a slower speed).", + # category=UserWarning, + # stacklevel=2, + #) global ALLOW_ALL_KERNELS ALLOW_ALL_KERNELS = True out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) @@ -339,17 +340,18 @@ def forward( dropout_p = self.dropout_p if self.training else 0.0 # Attention - try: - with sdp_kernel_context(dropout_p): - out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) - except Exception as e: + #try: + # with sdp_kernel_context(dropout_p): + # out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + #except Exception as e: + if True: # Fall back to all kernels if the Flash attention kernel fails - warnings.warn( - f"Flash Attention kernel failed due to: {e}\nFalling back to all available " - f"kernels for scaled_dot_product_attention (which may have a slower speed).", - category=UserWarning, - stacklevel=2, - ) + #warnings.warn( + # f"Flash Attention kernel failed due to: {e}\nFalling back to all available " + # f"kernels for scaled_dot_product_attention (which may have a slower speed).", + # category=UserWarning, + # stacklevel=2, + #) global ALLOW_ALL_KERNELS ALLOW_ALL_KERNELS = True out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 84b10deb5..e124baf35 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -527,6 +527,12 @@ def _predict( edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") + if import_from_tflite: + sparse_embeddings, dense_embeddings, dense_pe = edge_model(concat_points[0], concat_points[1]) + sparse_embeddings = torch.Tensor(sparse_embeddings) + dense_embeddings = torch.Tensor(dense_embeddings) + dense_pe = torch.Tensor(dense_pe) + if not import_from_onnx and (not import_from_tflite or not export_to_tflite): sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward_normal( coords=concat_points[0], @@ -574,6 +580,11 @@ def _predict( sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) edge_model.export("mask_decoder_"+model_id+".tflite") + multimask_output_np = np.zeros((1), dtype=bool) + batched_mode_np = np.zeros((1), dtype=bool) + low_res_masks, iou_predictions, _, _ = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output_np, batched_mode_np, high_res_features[0], high_res_features[1]) + low_res_masks = torch.Tensor(low_res_masks) + iou_predictions = torch.Tensor(iou_predictions) if not import_from_onnx and (not import_from_tflite or not export_to_tflite): low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( From ed87f6a76b46683c3cd42a24d7b8b6fc67a73a98 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 23 Aug 2024 13:53:06 +0900 Subject: [PATCH 14/79] Int8 quantization --- export_onnx.py | 7 ++- sam2/sam2_image_predictor.py | 97 +++++++++++++++++++++++++++--------- 2 files changed, 79 insertions(+), 25 deletions(-) diff --git a/export_onnx.py b/export_onnx.py index 5a2ec6eab..2e46954c7 100644 --- a/export_onnx.py +++ b/export_onnx.py @@ -14,6 +14,7 @@ export_to_tflite_mask_decoder = True import_from_onnx = False import_from_tflite = True +tflite_int8 = True show = True # export PJRT_DEVICE=CPU @@ -81,7 +82,10 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l predictor = SAM2ImagePredictor(sam2_model) -predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, export_to_tflite = export_to_tflite_image_encoder, import_from_onnx = import_from_onnx, import_from_tflite = import_from_tflite, model_id = model_id) +predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, + export_to_tflite = export_to_tflite_image_encoder, + import_from_onnx = import_from_onnx, import_from_tflite = import_from_tflite, + tflite_int8 = tflite_int8, model_id = model_id) input_point = np.array([[500, 375]]) input_label = np.array([1]) @@ -94,6 +98,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l export_to_tflite=export_to_tflite_mask_decoder, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, + tflite_int8=tflite_int8, model_id=model_id ) sorted_ind = np.argsort(scores)[::-1] diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index e124baf35..c37d97b2f 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -92,6 +92,7 @@ def set_image( export_to_tflite = False, import_from_onnx = False, import_from_tflite = False, + tflite_int8=False, model_id=None ) -> None: """ @@ -142,18 +143,12 @@ def set_image( import tensorflow as tf sample_inputs = (input_image,) - export_float = True - export_int8 = False - - if export_float: + if not tflite_int8: tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} edge_model = ai_edge_torch.convert(self.model, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("image_encoder_"+model_id+".tflite") - if import_from_tflite: - vision_feat1, vision_feat2, vision_feat3 = edge_model(input_image) - feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] - if export_int8: + if tflite_int8: from ai_edge_torch.quantize import pt2e_quantizer from ai_edge_torch.quantize import quant_config from torch.ao.quantization import quantize_pt2e @@ -163,7 +158,7 @@ def set_image( ) model = torch._export.capture_pre_autograd_graph(self.model, sample_inputs) model = quantize_pt2e.prepare_pt2e(model, quantizer) - #model(input_image.type(torch.FloatTensor)) # calibration + model(input_image) # calibration (you need to edit reset_histogram function) model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} @@ -173,11 +168,11 @@ def set_image( quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), _ai_edge_converter_flags=tfl_converter_flags ) - with_quantizer.export("image_encoder_int8_"+model_id+".tflite") + with_quantizer.export("image_encoder_"+model_id+"_int8.tflite") - if import_from_tflite: - vision_feat1, vision_feat2, vision_feat3 = model(sample_inputs) - feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + if import_from_tflite: + vision_feat1, vision_feat2, vision_feat3 = model(input_image) + feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] if not import_from_onnx and (not import_from_tflite or not export_to_tflite): backbone_out = self.model.forward_image(input_image) @@ -314,6 +309,7 @@ def predict( export_to_tflite=False, import_from_onnx = False, import_from_tflite = False, + tflite_int8=False, model_id=None ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ @@ -370,6 +366,7 @@ def predict( export_to_tflite=export_to_tflite, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, + tflite_int8=tflite_int8, model_id=model_id ) @@ -421,6 +418,7 @@ def _predict( img_idx: int = -1, export_to_onnx = False, export_to_tflite = False, + tflite_int8 = False, import_from_onnx = False, import_from_tflite = False, model_id = None @@ -524,8 +522,33 @@ def _predict( import ai_edge_torch sample_inputs = (concat_points[0], concat_points[1]) self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_sparse - edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) - edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") + + if not tflite_int8: + edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) + edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") + + if tflite_int8: + from ai_edge_torch.quantize import pt2e_quantizer + from ai_edge_torch.quantize import quant_config + from torch.ao.quantization import quantize_pt2e + #import tensorflow as tf + + quantizer = pt2e_quantizer.PT2EQuantizer().set_global( + pt2e_quantizer.get_symmetric_quantization_config() + ) + model = torch._export.capture_pre_autograd_graph(self.model.sam_prompt_encoder, sample_inputs) + model = quantize_pt2e.prepare_pt2e(model, quantizer) + model(concat_points[0], concat_points[1]) # calibration + model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) + + #tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}}#, tf.lite.OpsSet.SELECT_TF_OPS]}} + with_quantizer = ai_edge_torch.convert( + model, + sample_inputs, + quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), + #_ai_edge_converter_flags=tfl_converter_flags + ) + with_quantizer.export("prompt_encoder_sparse_"+model_id+"_int8.tflite") if import_from_tflite: sparse_embeddings, dense_embeddings, dense_pe = edge_model(concat_points[0], concat_points[1]) @@ -576,15 +599,41 @@ def _predict( iou_predictions = torch.Tensor(iou_predictions) if export_to_tflite: - import ai_edge_torch - sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) - edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) - edge_model.export("mask_decoder_"+model_id+".tflite") - multimask_output_np = np.zeros((1), dtype=bool) - batched_mode_np = np.zeros((1), dtype=bool) - low_res_masks, iou_predictions, _, _ = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output_np, batched_mode_np, high_res_features[0], high_res_features[1]) - low_res_masks = torch.Tensor(low_res_masks) - iou_predictions = torch.Tensor(iou_predictions) + if not tflite_int8: + import ai_edge_torch + sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) + edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) + edge_model.export("mask_decoder_"+model_id+".tflite") + multimask_output_np = np.zeros((1), dtype=bool) + batched_mode_np = np.zeros((1), dtype=bool) + + if tflite_int8: + from ai_edge_torch.quantize import pt2e_quantizer + from ai_edge_torch.quantize import quant_config + from torch.ao.quantization import quantize_pt2e + #import tensorflow as tf + + quantizer = pt2e_quantizer.PT2EQuantizer().set_global( + pt2e_quantizer.get_symmetric_quantization_config() + ) + model = torch._export.capture_pre_autograd_graph(self.model.sam_mask_decoder, sample_inputs) + model = quantize_pt2e.prepare_pt2e(model, quantizer) + model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) # calibration + model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) + + #tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} + with_quantizer = ai_edge_torch.convert( + model, + sample_inputs, + quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), + # _ai_edge_converter_flags=tfl_converter_flags + ) + with_quantizer.export("mask_decoder_"+model_id+"_int8.tflite") + + if import_from_tflite: + low_res_masks, iou_predictions, _, _ = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output_np, batched_mode_np, high_res_features[0], high_res_features[1]) + low_res_masks = torch.Tensor(low_res_masks) + iou_predictions = torch.Tensor(iou_predictions) if not import_from_onnx and (not import_from_tflite or not export_to_tflite): low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( From 2e8d09637d0d05cd38af564439d7fedb1ad5b6dc Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 23 Aug 2024 14:01:56 +0900 Subject: [PATCH 15/79] Quantize mask decoder --- sam2/sam2_image_predictor.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index c37d97b2f..1f2a3808e 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -169,9 +169,10 @@ def set_image( _ai_edge_converter_flags=tfl_converter_flags ) with_quantizer.export("image_encoder_"+model_id+"_int8.tflite") + edge_model = model if import_from_tflite: - vision_feat1, vision_feat2, vision_feat3 = model(input_image) + vision_feat1, vision_feat2, vision_feat3 = edge_model(input_image) feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] if not import_from_onnx and (not import_from_tflite or not export_to_tflite): @@ -527,11 +528,10 @@ def _predict( edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") - if tflite_int8: + if False:#tflite_int8: # labelがint64で量子化できない from ai_edge_torch.quantize import pt2e_quantizer from ai_edge_torch.quantize import quant_config from torch.ao.quantization import quantize_pt2e - #import tensorflow as tf quantizer = pt2e_quantizer.PT2EQuantizer().set_global( pt2e_quantizer.get_symmetric_quantization_config() @@ -541,22 +541,22 @@ def _predict( model(concat_points[0], concat_points[1]) # calibration model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) - #tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}}#, tf.lite.OpsSet.SELECT_TF_OPS]}} with_quantizer = ai_edge_torch.convert( model, sample_inputs, quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), - #_ai_edge_converter_flags=tfl_converter_flags ) with_quantizer.export("prompt_encoder_sparse_"+model_id+"_int8.tflite") - if import_from_tflite: + edge_model = model + + if import_from_tflite and not tflite_int8: sparse_embeddings, dense_embeddings, dense_pe = edge_model(concat_points[0], concat_points[1]) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) - if not import_from_onnx and (not import_from_tflite or not export_to_tflite): + if not import_from_onnx and (not import_from_tflite or not export_to_tflite or tflite_int8): sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward_normal( coords=concat_points[0], labels=concat_points[1], @@ -599,19 +599,17 @@ def _predict( iou_predictions = torch.Tensor(iou_predictions) if export_to_tflite: + sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) + if not tflite_int8: import ai_edge_torch - sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) edge_model.export("mask_decoder_"+model_id+".tflite") - multimask_output_np = np.zeros((1), dtype=bool) - batched_mode_np = np.zeros((1), dtype=bool) if tflite_int8: from ai_edge_torch.quantize import pt2e_quantizer from ai_edge_torch.quantize import quant_config from torch.ao.quantization import quantize_pt2e - #import tensorflow as tf quantizer = pt2e_quantizer.PT2EQuantizer().set_global( pt2e_quantizer.get_symmetric_quantization_config() @@ -621,16 +619,22 @@ def _predict( model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) # calibration model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) - #tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} with_quantizer = ai_edge_torch.convert( model, sample_inputs, quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), - # _ai_edge_converter_flags=tfl_converter_flags ) with_quantizer.export("mask_decoder_"+model_id+"_int8.tflite") + edge_model = model + if import_from_tflite: + multimask_output_np = np.zeros((1), dtype=bool) + batched_mode_np = np.zeros((1), dtype=bool) + if multimask_output: + multimask_output_np[0] = True + if batched_mode: + batched_mode_np[0] = True low_res_masks, iou_predictions, _, _ = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output_np, batched_mode_np, high_res_features[0], high_res_features[1]) low_res_masks = torch.Tensor(low_res_masks) iou_predictions = torch.Tensor(iou_predictions) From f9c8a69e22093fef83481fc391947b0feb3b776c Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 23 Aug 2024 21:30:34 +0900 Subject: [PATCH 16/79] Implement video predictor --- export_onnx.py => export_image_predictor.py | 0 export_video_predictor.py | 491 ++++++++++++++++++++ sam2/modeling/sam2_base.py | 9 +- 3 files changed, 496 insertions(+), 4 deletions(-) rename export_onnx.py => export_image_predictor.py (100%) create mode 100644 export_video_predictor.py diff --git a/export_onnx.py b/export_image_predictor.py similarity index 100% rename from export_onnx.py rename to export_image_predictor.py diff --git a/export_video_predictor.py b/export_video_predictor.py new file mode 100644 index 000000000..a9a78d668 --- /dev/null +++ b/export_video_predictor.py @@ -0,0 +1,491 @@ +# %% +using_colab = False + + +# %% +import os +import numpy as np +import torch +import matplotlib.pyplot as plt +from PIL import Image + +device = torch.device("cpu") +print(f"using device: {device}") + +from sam2.build_sam import build_sam2_video_predictor + +sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" +model_cfg = "sam2_hiera_l.yaml" + +predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) + +# %% +def show_mask(mask, ax, obj_id=None, random_color=False): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + cmap = plt.get_cmap("tab10") + cmap_idx = 0 if obj_id is None else obj_id + color = np.array([*cmap(cmap_idx)[:3], 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) + + +def show_points(coords, labels, ax, marker_size=200): + pos_points = coords[labels==1] + neg_points = coords[labels==0] + ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) + ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) + + +def show_box(box, ax): + x0, y0 = box[0], box[1] + w, h = box[2] - box[0], box[3] - box[1] + ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) + +video_dir = "./notebooks/videos/bedroom" + +# scan all the JPEG frame names in this directory +frame_names = [ + p for p in os.listdir(video_dir) + if os.path.splitext(p)[-1] in [".jpg", ".jpeg", ".JPG", ".JPEG"] +] +frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) + +# take a look the first video frame +frame_idx = 0 +plt.figure(figsize=(9, 6)) +plt.title(f"frame {frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[frame_idx]))) + +# %% [markdown] +# #### Initialize the inference state + +# %% [markdown] +# SAM 2 requires stateful inference for interactive video segmentation, so we need to initialize an **inference state** on this video. +# +# During initialization, it loads all the JPEG frames in `video_path` and stores their pixels in `inference_state` (as shown in the progress bar below). + +# %% +inference_state = predictor.init_state(video_path=video_dir) + +# %% [markdown] +# ### Example 1: Segment & track one object + +# %% [markdown] +# Note: if you have run any previous tracking using this `inference_state`, please reset it first via `reset_state`. +# +# (The cell below is just for illustration; it's not needed to call `reset_state` here as this `inference_state` is just freshly initialized above.) + +# %% +predictor.reset_state(inference_state) + +# %% [markdown] +# #### Step 1: Add a first click on a frame + +# %% [markdown] +# To get started, let's try to segment the child on the left. +# +# Here we make a **positive click** at (x, y) = (210, 350) with label `1`, by sending their coordinates and labels into the `add_new_points_or_box` API. +# +# Note: label `1` indicates a *positive click (to add a region)* while label `0` indicates a *negative click (to remove a region)*. + +# %% +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) + +# Let's add a positive click at (x, y) = (210, 350) to get started +points = np.array([[210, 350]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([1], np.int32) +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# show the results on the current (interacted) frame +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_points(points, labels, plt.gca()) +show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) + +# %% [markdown] +# #### Step 2: Add a second click to refine the prediction + +# %% [markdown] +# Hmm, it seems that although we wanted to segment the child on the left, the model predicts the mask for only the shorts -- this can happen since there is ambiguity from a single click about what the target object should be. We can refine the mask on this frame via another positive click on the child's shirt. +# +# Here we make a **second positive click** at (x, y) = (250, 220) with label `1` to expand the mask. +# +# Note: we need to send **all the clicks and their labels** (i.e. not just the last click) when calling `add_new_points_or_box`. + +# %% +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) + +# Let's add a 2nd positive click at (x, y) = (250, 220) to refine the mask +# sending all clicks (and their labels) to `add_new_points_or_box` +points = np.array([[210, 350], [250, 220]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([1, 1], np.int32) +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# show the results on the current (interacted) frame +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_points(points, labels, plt.gca()) +show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) + +# %% [markdown] +# With this 2nd refinement click, now we get a segmentation mask of the entire child on frame 0. + +# %% [markdown] +# #### Step 3: Propagate the prompts to get the masklet across the video + +# %% [markdown] +# To get the masklet throughout the entire video, we propagate the prompts using the `propagate_in_video` API. + +# %% +# run propagation throughout the video and collect the results in a dict +video_segments = {} # video_segments contains the per-frame segmentation results +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): + video_segments[out_frame_idx] = { + out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() + for i, out_obj_id in enumerate(out_obj_ids) + } + +# render the segmentation results every few frames +vis_frame_stride = 30 +plt.close("all") +for out_frame_idx in range(0, len(frame_names), vis_frame_stride): + plt.figure(figsize=(6, 4)) + plt.title(f"frame {out_frame_idx}") + plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) + for out_obj_id, out_mask in video_segments[out_frame_idx].items(): + show_mask(out_mask, plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# #### Step 4: Add new prompts to further refine the masklet + +# %% [markdown] +# It appears that in the output masklet above, there are some imperfections in boundary details on frame 150. +# +# With SAM 2 we can fix the model predictions interactively. We can add a **negative click** at (x, y) = (82, 415) on this frame with label `0` to refine the masklet. Here we call the `add_new_points_or_box` API with a different `frame_idx` argument to indicate the frame index we want to refine. + +# %% +ann_frame_idx = 150 # further refine some details on this frame +ann_obj_id = 1 # give a unique id to the object we interact with (it can be any integers) + +# show the segment before further refinement +plt.figure(figsize=(12, 8)) +plt.title(f"frame {ann_frame_idx} -- before refinement") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_mask(video_segments[ann_frame_idx][ann_obj_id], plt.gca(), obj_id=ann_obj_id) + +# Let's add a negative click on this frame at (x, y) = (82, 415) to refine the segment +points = np.array([[82, 415]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([0], np.int32) +_, _, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# show the segment after the further refinement +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx} -- after refinement") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_points(points, labels, plt.gca()) +show_mask((out_mask_logits > 0.0).cpu().numpy(), plt.gca(), obj_id=ann_obj_id) + +# %% [markdown] +# #### Step 5: Propagate the prompts (again) to get the masklet across the video + +# %% [markdown] +# Let's get an updated masklet for the entire video. Here we call `propagate_in_video` again to propagate all the prompts after adding the new refinement click above. + +# %% +# run propagation throughout the video and collect the results in a dict +video_segments = {} # video_segments contains the per-frame segmentation results +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): + video_segments[out_frame_idx] = { + out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() + for i, out_obj_id in enumerate(out_obj_ids) + } + +# render the segmentation results every few frames +vis_frame_stride = 30 +plt.close("all") +for out_frame_idx in range(0, len(frame_names), vis_frame_stride): + plt.figure(figsize=(6, 4)) + plt.title(f"frame {out_frame_idx}") + plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) + for out_obj_id, out_mask in video_segments[out_frame_idx].items(): + show_mask(out_mask, plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# The segments now look good on all frames. + +# %% [markdown] +# ### Example 2: Segment an object using box prompt + +# %% [markdown] +# Note: if you have run any previous tracking using this `inference_state`, please reset it first via `reset_state`. + +# %% +predictor.reset_state(inference_state) + +# %% [markdown] +# In addition to using clicks as inputs, SAM 2 also supports segmenting and tracking objects in a video via **bounding boxes**. +# +# In the example below, we segment the child on the right using a **box prompt** of (x_min, y_min, x_max, y_max) = (300, 0, 500, 400) on frame 0 as input into the `add_new_points_or_box` API. + +# %% +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 4 # give a unique id to each object we interact with (it can be any integers) + +# Let's add a box at (x_min, y_min, x_max, y_max) = (300, 0, 500, 400) to get started +box = np.array([300, 0, 500, 400], dtype=np.float32) +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + box=box, +) + +# show the results on the current (interacted) frame +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_box(box, plt.gca()) +show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) + +# %% [markdown] +# Here, SAM 2 gets a pretty good segmentation mask of the entire child, even though the input bounding box is not perfectly tight around the object. +# +# Similar to the previous example, if the returned mask from is not perfect when using a box prompt, we can also further **refine** the output using positive or negative clicks. To illustrate this, here we make a **positive click** at (x, y) = (460, 60) with label `1` to expand the segment around the child's hair. +# +# Note: to refine the segmentation mask from a box prompt, we need to send **both the original box input and all subsequent refinement clicks and their labels** when calling `add_new_points_or_box`. + +# %% +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 4 # give a unique id to each object we interact with (it can be any integers) + +# Let's add a positive click at (x, y) = (460, 60) to refine the mask +points = np.array([[460, 60]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([1], np.int32) +# note that we also need to send the original box input along with +# the new refinement click together into `add_new_points_or_box` +box = np.array([300, 0, 500, 400], dtype=np.float32) +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, + box=box, +) + +# show the results on the current (interacted) frame +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_box(box, plt.gca()) +show_points(points, labels, plt.gca()) +show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) + +# %% [markdown] +# Then, to get the masklet throughout the entire video, we propagate the prompts using the `propagate_in_video` API. + +# %% +# run propagation throughout the video and collect the results in a dict +video_segments = {} # video_segments contains the per-frame segmentation results +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): + video_segments[out_frame_idx] = { + out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() + for i, out_obj_id in enumerate(out_obj_ids) + } + +# render the segmentation results every few frames +vis_frame_stride = 30 +plt.close("all") +for out_frame_idx in range(0, len(frame_names), vis_frame_stride): + plt.figure(figsize=(6, 4)) + plt.title(f"frame {out_frame_idx}") + plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) + for out_obj_id, out_mask in video_segments[out_frame_idx].items(): + show_mask(out_mask, plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# Note that in addition to clicks or boxes, SAM 2 also supports directly using a **mask prompt** as input via the `add_new_mask` method in the `SAM2VideoPredictor` class. This can be helpful in e.g. semi-supervised VOS evaluations (see [tools/vos_inference.py](https://github.com/facebookresearch/segment-anything-2/blob/main/tools/vos_inference.py) for an example). + +# %% [markdown] +# ### Example 3: Segment multiple objects simultaneously + +# %% [markdown] +# Note: if you have run any previous tracking using this `inference_state`, please reset it first via `reset_state`. + +# %% +predictor.reset_state(inference_state) + +# %% [markdown] +# #### Step 1: Add two objects on a frame + +# %% [markdown] +# SAM 2 can also segment and track two or more objects at the same time. One way, of course, is to do them one by one. However, it would be more efficient to batch them together (e.g. so that we can share the image features between objects to reduce computation costs). +# +# This time, let's focus on object parts and segment **the shirts of both childen** in this video. Here we add prompts for these two objects and assign each of them a unique object id. + +# %% +prompts = {} # hold all the clicks we add for visualization + +# %% [markdown] +# Add the first object (the left child's shirt) with a **positive click** at (x, y) = (200, 300) on frame 0. +# +# We assign it to object id `2` (it can be arbitrary integers, and only needs to be unique for each object to track), which is passed to the `add_new_points_or_box` API to distinguish the object we are clicking upon. + +# %% +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 2 # give a unique id to each object we interact with (it can be any integers) + +# Let's add a positive click at (x, y) = (200, 300) to get started on the first object +points = np.array([[200, 300]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([1], np.int32) +prompts[ann_obj_id] = points, labels +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# show the results on the current (interacted) frame +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_points(points, labels, plt.gca()) +for i, out_obj_id in enumerate(out_obj_ids): + show_points(*prompts[out_obj_id], plt.gca()) + show_mask((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# Hmm, this time we just want to select the child's shirt, but the model predicts the mask for the entire child. Let's refine the prediction with a **negative click** at (x, y) = (275, 175). + +# %% +# add the first object +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 2 # give a unique id to each object we interact with (it can be any integers) + +# Let's add a 2nd negative click at (x, y) = (275, 175) to refine the first object +# sending all clicks (and their labels) to `add_new_points_or_box` +points = np.array([[200, 300], [275, 175]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([1, 0], np.int32) +prompts[ann_obj_id] = points, labels +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# show the results on the current (interacted) frame +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_points(points, labels, plt.gca()) +for i, out_obj_id in enumerate(out_obj_ids): + show_points(*prompts[out_obj_id], plt.gca()) + show_mask((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# After the 2nd negative click, now we get the left child's shirt as our first object. +# +# Let's move on to the second object (the right child's shirt) with a positive click at (x, y) = (400, 150) on frame 0. Here we assign object id `3` to this second object (it can be arbitrary integers, and only needs to be unique for each object to track). +# +# Note: when there are multiple objects, the `add_new_points_or_box` API will return a list of masks for each object. + +# %% +ann_frame_idx = 0 # the frame index we interact with +ann_obj_id = 3 # give a unique id to each object we interact with (it can be any integers) + +# Let's now move on to the second object we want to track (giving it object id `3`) +# with a positive click at (x, y) = (400, 150) +points = np.array([[400, 150]], dtype=np.float32) +# for labels, `1` means positive click and `0` means negative click +labels = np.array([1], np.int32) +prompts[ann_obj_id] = points, labels + +# `add_new_points_or_box` returns masks for all objects added so far on this interacted frame +_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( + inference_state=inference_state, + frame_idx=ann_frame_idx, + obj_id=ann_obj_id, + points=points, + labels=labels, +) + +# show the results on the current (interacted) frame on all objects +plt.figure(figsize=(9, 6)) +plt.title(f"frame {ann_frame_idx}") +plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) +show_points(points, labels, plt.gca()) +for i, out_obj_id in enumerate(out_obj_ids): + show_points(*prompts[out_obj_id], plt.gca()) + show_mask((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# This time the model predicts the mask of the shirt we want to track in just one click. Nice! + +# %% [markdown] +# #### Step 2: Propagate the prompts to get masklets across the video + +# %% [markdown] +# Now, we propagate the prompts for both objects to get their masklets throughout the video. +# +# Note: when there are multiple objects, the `propagate_in_video` API will return a list of masks for each object. + +# %% +# run propagation throughout the video and collect the results in a dict +video_segments = {} # video_segments contains the per-frame segmentation results +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): + video_segments[out_frame_idx] = { + out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() + for i, out_obj_id in enumerate(out_obj_ids) + } + +# render the segmentation results every few frames +vis_frame_stride = 30 +plt.close("all") +for out_frame_idx in range(0, len(frame_names), vis_frame_stride): + plt.figure(figsize=(6, 4)) + plt.title(f"frame {out_frame_idx}") + plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) + for out_obj_id, out_mask in video_segments[out_frame_idx].items(): + show_mask(out_mask, plt.gca(), obj_id=out_obj_id) + +# %% [markdown] +# Looks like both children's shirts are well segmented in this video. +# +# Now you can try SAM 2 on your own videos and use cases! + + diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index c485dddc6..4760cf4b9 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -351,9 +351,9 @@ def _forward_sam_heads( # a learned `no_mask_embed` to indicate no mask input in this case). sam_mask_prompt = None - sparse_embeddings, dense_embeddings = self.sam_prompt_encoder( - points=(sam_point_coords, sam_point_labels), - boxes=None, + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( + coords=sam_point_coords, + labels=sam_point_labels, masks=sam_mask_prompt, ) ( @@ -368,7 +368,8 @@ def _forward_sam_heads( dense_prompt_embeddings=dense_embeddings, multimask_output=multimask_output, repeat_image=False, # the image is already batched - high_res_features=high_res_features, + high_res_features1=high_res_features[0], + high_res_features2=high_res_features[1], ) if self.pred_obj_scores: is_obj_appearing = object_score_logits > 0 From 98a19fb961e0188b332c35ba9a39faafb9920687 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 23 Aug 2024 21:41:07 +0900 Subject: [PATCH 17/79] Added short video --- export_video_predictor.py | 408 +---------------------- notebooks/videos/bedroom_short/00000.jpg | Bin 0 -> 68696 bytes notebooks/videos/bedroom_short/00001.jpg | Bin 0 -> 69250 bytes notebooks/videos/bedroom_short/00002.jpg | Bin 0 -> 69456 bytes notebooks/videos/bedroom_short/00003.jpg | Bin 0 -> 66813 bytes notebooks/videos/bedroom_short/00004.jpg | Bin 0 -> 64859 bytes 6 files changed, 6 insertions(+), 402 deletions(-) create mode 100644 notebooks/videos/bedroom_short/00000.jpg create mode 100644 notebooks/videos/bedroom_short/00001.jpg create mode 100644 notebooks/videos/bedroom_short/00002.jpg create mode 100644 notebooks/videos/bedroom_short/00003.jpg create mode 100644 notebooks/videos/bedroom_short/00004.jpg diff --git a/export_video_predictor.py b/export_video_predictor.py index a9a78d668..62dad2977 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -1,9 +1,4 @@ -# %% -using_colab = False - - -# %% -import os +import os import numpy as np import torch import matplotlib.pyplot as plt @@ -19,7 +14,7 @@ predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) -# %% + def show_mask(mask, ax, obj_id=None, random_color=False): if random_color: color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) @@ -44,7 +39,7 @@ def show_box(box, ax): w, h = box[2] - box[0], box[3] - box[1] ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) -video_dir = "./notebooks/videos/bedroom" +video_dir = "./notebooks/videos/bedroom_short" # scan all the JPEG frame names in this directory frame_names = [ @@ -53,78 +48,9 @@ def show_box(box, ax): ] frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) -# take a look the first video frame -frame_idx = 0 -plt.figure(figsize=(9, 6)) -plt.title(f"frame {frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[frame_idx]))) - -# %% [markdown] -# #### Initialize the inference state - -# %% [markdown] -# SAM 2 requires stateful inference for interactive video segmentation, so we need to initialize an **inference state** on this video. -# -# During initialization, it loads all the JPEG frames in `video_path` and stores their pixels in `inference_state` (as shown in the progress bar below). - -# %% inference_state = predictor.init_state(video_path=video_dir) - -# %% [markdown] -# ### Example 1: Segment & track one object - -# %% [markdown] -# Note: if you have run any previous tracking using this `inference_state`, please reset it first via `reset_state`. -# -# (The cell below is just for illustration; it's not needed to call `reset_state` here as this `inference_state` is just freshly initialized above.) - -# %% predictor.reset_state(inference_state) -# %% [markdown] -# #### Step 1: Add a first click on a frame - -# %% [markdown] -# To get started, let's try to segment the child on the left. -# -# Here we make a **positive click** at (x, y) = (210, 350) with label `1`, by sending their coordinates and labels into the `add_new_points_or_box` API. -# -# Note: label `1` indicates a *positive click (to add a region)* while label `0` indicates a *negative click (to remove a region)*. - -# %% -ann_frame_idx = 0 # the frame index we interact with -ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) - -# Let's add a positive click at (x, y) = (210, 350) to get started -points = np.array([[210, 350]], dtype=np.float32) -# for labels, `1` means positive click and `0` means negative click -labels = np.array([1], np.int32) -_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - points=points, - labels=labels, -) - -# show the results on the current (interacted) frame -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_points(points, labels, plt.gca()) -show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) - -# %% [markdown] -# #### Step 2: Add a second click to refine the prediction - -# %% [markdown] -# Hmm, it seems that although we wanted to segment the child on the left, the model predicts the mask for only the shorts -- this can happen since there is ambiguity from a single click about what the target object should be. We can refine the mask on this frame via another positive click on the child's shirt. -# -# Here we make a **second positive click** at (x, y) = (250, 220) with label `1` to expand the mask. -# -# Note: we need to send **all the clicks and their labels** (i.e. not just the last click) when calling `add_new_points_or_box`. - -# %% ann_frame_idx = 0 # the frame index we interact with ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers) @@ -147,79 +73,8 @@ def show_box(box, ax): plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) show_points(points, labels, plt.gca()) show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) +plt.show() -# %% [markdown] -# With this 2nd refinement click, now we get a segmentation mask of the entire child on frame 0. - -# %% [markdown] -# #### Step 3: Propagate the prompts to get the masklet across the video - -# %% [markdown] -# To get the masklet throughout the entire video, we propagate the prompts using the `propagate_in_video` API. - -# %% -# run propagation throughout the video and collect the results in a dict -video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): - video_segments[out_frame_idx] = { - out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() - for i, out_obj_id in enumerate(out_obj_ids) - } - -# render the segmentation results every few frames -vis_frame_stride = 30 -plt.close("all") -for out_frame_idx in range(0, len(frame_names), vis_frame_stride): - plt.figure(figsize=(6, 4)) - plt.title(f"frame {out_frame_idx}") - plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) - for out_obj_id, out_mask in video_segments[out_frame_idx].items(): - show_mask(out_mask, plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# #### Step 4: Add new prompts to further refine the masklet - -# %% [markdown] -# It appears that in the output masklet above, there are some imperfections in boundary details on frame 150. -# -# With SAM 2 we can fix the model predictions interactively. We can add a **negative click** at (x, y) = (82, 415) on this frame with label `0` to refine the masklet. Here we call the `add_new_points_or_box` API with a different `frame_idx` argument to indicate the frame index we want to refine. - -# %% -ann_frame_idx = 150 # further refine some details on this frame -ann_obj_id = 1 # give a unique id to the object we interact with (it can be any integers) - -# show the segment before further refinement -plt.figure(figsize=(12, 8)) -plt.title(f"frame {ann_frame_idx} -- before refinement") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_mask(video_segments[ann_frame_idx][ann_obj_id], plt.gca(), obj_id=ann_obj_id) - -# Let's add a negative click on this frame at (x, y) = (82, 415) to refine the segment -points = np.array([[82, 415]], dtype=np.float32) -# for labels, `1` means positive click and `0` means negative click -labels = np.array([0], np.int32) -_, _, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - points=points, - labels=labels, -) - -# show the segment after the further refinement -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx} -- after refinement") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_points(points, labels, plt.gca()) -show_mask((out_mask_logits > 0.0).cpu().numpy(), plt.gca(), obj_id=ann_obj_id) - -# %% [markdown] -# #### Step 5: Propagate the prompts (again) to get the masklet across the video - -# %% [markdown] -# Let's get an updated masklet for the entire video. Here we call `propagate_in_video` again to propagate all the prompts after adding the new refinement click above. - -# %% # run propagation throughout the video and collect the results in a dict video_segments = {} # video_segments contains the per-frame segmentation results for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): @@ -229,7 +84,7 @@ def show_box(box, ax): } # render the segmentation results every few frames -vis_frame_stride = 30 +vis_frame_stride = 1 plt.close("all") for out_frame_idx in range(0, len(frame_names), vis_frame_stride): plt.figure(figsize=(6, 4)) @@ -237,255 +92,4 @@ def show_box(box, ax): plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) for out_obj_id, out_mask in video_segments[out_frame_idx].items(): show_mask(out_mask, plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# The segments now look good on all frames. - -# %% [markdown] -# ### Example 2: Segment an object using box prompt - -# %% [markdown] -# Note: if you have run any previous tracking using this `inference_state`, please reset it first via `reset_state`. - -# %% -predictor.reset_state(inference_state) - -# %% [markdown] -# In addition to using clicks as inputs, SAM 2 also supports segmenting and tracking objects in a video via **bounding boxes**. -# -# In the example below, we segment the child on the right using a **box prompt** of (x_min, y_min, x_max, y_max) = (300, 0, 500, 400) on frame 0 as input into the `add_new_points_or_box` API. - -# %% -ann_frame_idx = 0 # the frame index we interact with -ann_obj_id = 4 # give a unique id to each object we interact with (it can be any integers) - -# Let's add a box at (x_min, y_min, x_max, y_max) = (300, 0, 500, 400) to get started -box = np.array([300, 0, 500, 400], dtype=np.float32) -_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - box=box, -) - -# show the results on the current (interacted) frame -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_box(box, plt.gca()) -show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) - -# %% [markdown] -# Here, SAM 2 gets a pretty good segmentation mask of the entire child, even though the input bounding box is not perfectly tight around the object. -# -# Similar to the previous example, if the returned mask from is not perfect when using a box prompt, we can also further **refine** the output using positive or negative clicks. To illustrate this, here we make a **positive click** at (x, y) = (460, 60) with label `1` to expand the segment around the child's hair. -# -# Note: to refine the segmentation mask from a box prompt, we need to send **both the original box input and all subsequent refinement clicks and their labels** when calling `add_new_points_or_box`. - -# %% -ann_frame_idx = 0 # the frame index we interact with -ann_obj_id = 4 # give a unique id to each object we interact with (it can be any integers) - -# Let's add a positive click at (x, y) = (460, 60) to refine the mask -points = np.array([[460, 60]], dtype=np.float32) -# for labels, `1` means positive click and `0` means negative click -labels = np.array([1], np.int32) -# note that we also need to send the original box input along with -# the new refinement click together into `add_new_points_or_box` -box = np.array([300, 0, 500, 400], dtype=np.float32) -_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - points=points, - labels=labels, - box=box, -) - -# show the results on the current (interacted) frame -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_box(box, plt.gca()) -show_points(points, labels, plt.gca()) -show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) - -# %% [markdown] -# Then, to get the masklet throughout the entire video, we propagate the prompts using the `propagate_in_video` API. - -# %% -# run propagation throughout the video and collect the results in a dict -video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): - video_segments[out_frame_idx] = { - out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() - for i, out_obj_id in enumerate(out_obj_ids) - } - -# render the segmentation results every few frames -vis_frame_stride = 30 -plt.close("all") -for out_frame_idx in range(0, len(frame_names), vis_frame_stride): - plt.figure(figsize=(6, 4)) - plt.title(f"frame {out_frame_idx}") - plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) - for out_obj_id, out_mask in video_segments[out_frame_idx].items(): - show_mask(out_mask, plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# Note that in addition to clicks or boxes, SAM 2 also supports directly using a **mask prompt** as input via the `add_new_mask` method in the `SAM2VideoPredictor` class. This can be helpful in e.g. semi-supervised VOS evaluations (see [tools/vos_inference.py](https://github.com/facebookresearch/segment-anything-2/blob/main/tools/vos_inference.py) for an example). - -# %% [markdown] -# ### Example 3: Segment multiple objects simultaneously - -# %% [markdown] -# Note: if you have run any previous tracking using this `inference_state`, please reset it first via `reset_state`. - -# %% -predictor.reset_state(inference_state) - -# %% [markdown] -# #### Step 1: Add two objects on a frame - -# %% [markdown] -# SAM 2 can also segment and track two or more objects at the same time. One way, of course, is to do them one by one. However, it would be more efficient to batch them together (e.g. so that we can share the image features between objects to reduce computation costs). -# -# This time, let's focus on object parts and segment **the shirts of both childen** in this video. Here we add prompts for these two objects and assign each of them a unique object id. - -# %% -prompts = {} # hold all the clicks we add for visualization - -# %% [markdown] -# Add the first object (the left child's shirt) with a **positive click** at (x, y) = (200, 300) on frame 0. -# -# We assign it to object id `2` (it can be arbitrary integers, and only needs to be unique for each object to track), which is passed to the `add_new_points_or_box` API to distinguish the object we are clicking upon. - -# %% -ann_frame_idx = 0 # the frame index we interact with -ann_obj_id = 2 # give a unique id to each object we interact with (it can be any integers) - -# Let's add a positive click at (x, y) = (200, 300) to get started on the first object -points = np.array([[200, 300]], dtype=np.float32) -# for labels, `1` means positive click and `0` means negative click -labels = np.array([1], np.int32) -prompts[ann_obj_id] = points, labels -_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - points=points, - labels=labels, -) - -# show the results on the current (interacted) frame -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_points(points, labels, plt.gca()) -for i, out_obj_id in enumerate(out_obj_ids): - show_points(*prompts[out_obj_id], plt.gca()) - show_mask((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# Hmm, this time we just want to select the child's shirt, but the model predicts the mask for the entire child. Let's refine the prediction with a **negative click** at (x, y) = (275, 175). - -# %% -# add the first object -ann_frame_idx = 0 # the frame index we interact with -ann_obj_id = 2 # give a unique id to each object we interact with (it can be any integers) - -# Let's add a 2nd negative click at (x, y) = (275, 175) to refine the first object -# sending all clicks (and their labels) to `add_new_points_or_box` -points = np.array([[200, 300], [275, 175]], dtype=np.float32) -# for labels, `1` means positive click and `0` means negative click -labels = np.array([1, 0], np.int32) -prompts[ann_obj_id] = points, labels -_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - points=points, - labels=labels, -) - -# show the results on the current (interacted) frame -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_points(points, labels, plt.gca()) -for i, out_obj_id in enumerate(out_obj_ids): - show_points(*prompts[out_obj_id], plt.gca()) - show_mask((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# After the 2nd negative click, now we get the left child's shirt as our first object. -# -# Let's move on to the second object (the right child's shirt) with a positive click at (x, y) = (400, 150) on frame 0. Here we assign object id `3` to this second object (it can be arbitrary integers, and only needs to be unique for each object to track). -# -# Note: when there are multiple objects, the `add_new_points_or_box` API will return a list of masks for each object. - -# %% -ann_frame_idx = 0 # the frame index we interact with -ann_obj_id = 3 # give a unique id to each object we interact with (it can be any integers) - -# Let's now move on to the second object we want to track (giving it object id `3`) -# with a positive click at (x, y) = (400, 150) -points = np.array([[400, 150]], dtype=np.float32) -# for labels, `1` means positive click and `0` means negative click -labels = np.array([1], np.int32) -prompts[ann_obj_id] = points, labels - -# `add_new_points_or_box` returns masks for all objects added so far on this interacted frame -_, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( - inference_state=inference_state, - frame_idx=ann_frame_idx, - obj_id=ann_obj_id, - points=points, - labels=labels, -) - -# show the results on the current (interacted) frame on all objects -plt.figure(figsize=(9, 6)) -plt.title(f"frame {ann_frame_idx}") -plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) -show_points(points, labels, plt.gca()) -for i, out_obj_id in enumerate(out_obj_ids): - show_points(*prompts[out_obj_id], plt.gca()) - show_mask((out_mask_logits[i] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# This time the model predicts the mask of the shirt we want to track in just one click. Nice! - -# %% [markdown] -# #### Step 2: Propagate the prompts to get masklets across the video - -# %% [markdown] -# Now, we propagate the prompts for both objects to get their masklets throughout the video. -# -# Note: when there are multiple objects, the `propagate_in_video` API will return a list of masks for each object. - -# %% -# run propagation throughout the video and collect the results in a dict -video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): - video_segments[out_frame_idx] = { - out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() - for i, out_obj_id in enumerate(out_obj_ids) - } - -# render the segmentation results every few frames -vis_frame_stride = 30 -plt.close("all") -for out_frame_idx in range(0, len(frame_names), vis_frame_stride): - plt.figure(figsize=(6, 4)) - plt.title(f"frame {out_frame_idx}") - plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) - for out_obj_id, out_mask in video_segments[out_frame_idx].items(): - show_mask(out_mask, plt.gca(), obj_id=out_obj_id) - -# %% [markdown] -# Looks like both children's shirts are well segmented in this video. -# -# Now you can try SAM 2 on your own videos and use cases! - - + plt.show() diff --git a/notebooks/videos/bedroom_short/00000.jpg b/notebooks/videos/bedroom_short/00000.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26c7b234cdc80cd20591e08b9713076c4c23024a GIT binary patch literal 68696 zcmaHS1yCGK7wzH%4IW$^z&&J8k!^X+M0eD>kNB~d~|7l47Hst@k zp`f6kqN2ijWaNKmsBhl9ft`R2XlQ5{80hF&SXekX_^-WyUH||d0S?vx|80l}NC0>^ zWE505!2fvx0Dyx>Ktw`*3xGp_M?gSELqo4Icy0;99(K7PBA<#8Z~DVciNH5>#FB2M*avV`?^>mm##g?i;KIrz-#y(J+k2+8&_zlgx&s0Ck%_Ye& zIQyH<7X$1-l3u3>S5J=&VBb)i*EH77@FwiX@J+OHuKPA5UhOhvcOynXD0O)5uW7dC zh^tK2n#4Ya~#o45}r))^z{-#HGyMg#Z%6DC)w=_tW$&$gU&`;{D`KjI! zb`=njBL}?L#Rl=I@5__r8Q6LTdD|2P>`%FH6<7LUNfnzyseCsy;uOYKs_8bzLZtnc zJobC2%b0r<>)(ng1hTB4m=od$>B}sGj&j{=Qk`25*$V1k#k^6AY&}_bnwAL%?ORxH zk%mn+4K@tG){VIqm{;O4(1r*lK1$M?mqKbzW6;CeA7(2ES$}4rsHKVSy-;$Ykay#G zd)A*oiE$y#kZ}k6s#NxeO*si>>_$4vs#+CyNSaViV}3B?i39% z*xFAuOV|y5s)`R;=n+RF5a4yv324Cs&~6OPVh(dPP3vJ$VWW)u&~TjVWcRf)ha@MZ z44Y}~L(n&pveZ$mou+u-*w$(C8Xd5TdLx`uQ*IY7b;l3&}#*3u#cd&OPI7Cv-8W%e6)D!#UBWw3sd6&a%8^)2^3% zYELB3n8c~`d4EmSNbNc`!ab!}VCz|&(N<%^m9R)rfS(i6fw0`HzZx@zE@hVcA=g%a zSt6Z}?lVQWr1nKvy1Ht!CAv2bkG&(trAJmz4b5PLiiPiH+{vB=&R&d@4(gRmGaKaX zDA37%DR!;dow~Z|r%w<~+agv~eePuQG?@#ExEne$!=*u+le}{7wg6g}+#Ts&40s3m zfp)oGOM(XSZWSYQYG_Zbm*<3Hy#ONeBikmLwN{C*L$7;H(kJPkE;&g&RK1}c*+qIh zq=^&uz;y~o#L!fC`eB8CF2^4%t|HWuHqj@huYFc0fD#hI)~*Zb48LV9FPI%=y^?7m z{Wy0P!vk&=g2yB+e#G1L3*<2?v?AuEZlO zX5SZZLzBEC76o+Z)aK3rfOXrU!{)~EK8eb=txHup>gNqnQTcoP&m{;3m9qyI`&a5S zGkq#b=v#4T&SvbM6C<|%@)ah*HLR}yR-4@wk0}Xm#W~TOa)nnwK8D&$LqK5DeM4(- z*F#@t0hk5V=iM5x;xZGOW>d0pQ#ZqX+Q4$_qj-@<*R?2r%gQZ1Lp)RvwpTp(InB+F zI_JJ*_%D}KEhl?_F0Sp4*xj3TzOSzwu({x_}$LOYd zcUk67QGD5=$AwtW$)!YCg^9Hs(JX%o*2GXUNHgo6M72^8ug0S~Eu3Y5R>!*@bsFO#?WDD!4Dis&?mJ-YaB!DjxH+9$F@&;`!@Jy(JqRoBl4nyB5RwdXE-+I3Syh>f~ z)+XK^*O*7&{xIxhq8T|8Tk7u+c78(ZGfFFmJddYd2docZ%=edGG(D5NaBW!S-gS+(3vu+*;&m4ix#0 zDO?2WLte4awcLJv<>Sf3mhQfH>%g(Oivxs@}^N9vPD()$9P#Jg-CuoUbnT*wGp^SV*%nohw6O1Y}*`o|oDgR>=klHDPv-E{M%C z7?yj(Qxr{O-H{T4^}YB;41SRhQm8qJTes->b}xOx{#HMB=kyI5PbeBqm~A``K0R@ZkmzP3{vzW4%tU)+e1*IY_{qAO|c-4841i|Dvy+O-#5ix5%^uN@Bo7=1G&G*;GA^wYrif#7=eAz~B=7u# zZENoi0$+fz{XS8&)Ul1Fe?>W9)due+5L4os zw}~F(AY*yr01GW#e6@Q;Z0%N{qk`w)HKn?3kU^*v+;!-GiO{NocUY-0A0?37$1NXk`$DSp~Cm20* ztD=dwJ($dWtu@F}0_D@u(B8-qszi0ND!xG~YrKlS%zv&f>t2OS49ne$+OZ5m;SCAN z@W=>J6*m=_D)^1ef!hNttVtt44rkWi$MfG4zEET=@#3#T3hC7@=TtrQw5KvM7F71Uh;`kd|Tuz&`}0`BKhag$a@yYc!jxEV{_6@)=GTBPs_ z8W29jqMy=1xi?~P6$kLiM3&9@be<7uSqh^b1F9U;$!h_N#qO^Fy6lw?o?h}P3Z(~* zzQt)|!P!j?Vm4>amf26C_48Lz!xoh(>pbp?$A!yYDq&xuZAoy3t!UfDzop7nJXlpx-2-W#wLf6swJ6?7r-bOf^TGyA&#>Iy zEi#N21`WZpCc`lpNA8F}^Mb zcG%;m0GosqP4@Rb&&OC*e^O{8KT6+BZsi(Ei$e*p`c#{3wfjS$XuUG3iSML5!)Um{ zwH~F#ScY;B{qyaMq*#HA9gF19SeQbgl1>3toTU`fr5V({**IdG~wL!rK~Sp5hzv?f7T z!VFoUH?b9vqcu@3-(D&Ow&7YUk*!9AEg*{2Nr?-qN=enbhVBa z-+kKb+fuZ6UlRPpZ1rfI{+1N=FCRpyWYxKmDsZe$LDQG~Gi`c8)UW`^Z>UafQm^ zX#-QBhKUBK!m@h_aEb z?-h`%d%8Lz+$FDrFWkwfc)Bv9cAk)q(wk%D^<^y@?2f45mjV21>>_P9*G zHk2$28`vSorX0CYS4wonmq|qAUdhO-rKN!ef&65GK!UWiwBTTYj6zm>9I0$5gclB1 zDBB2?l5nRZKPVz1A<%mkgp-{OP|i>Gm@VXSDwa9wR#S>uD1?`=S(_Yzir8Y!3kEq9 zO$;mT6yl62Zpwpy8@SKLf9FHn4%BA~PV7mqP2CQb>v8Q|C@H44_N!Aj?V*le0pUje6Z2`~0OHVf6g zLZ3$1v#l%rOV?V;8V3|qw9f+;}k!QQS$`w= ziDi6j!i%x&GunRQGCvJvFspXI1XzvjxGM~9s}xZ5TlG&QIfTiJ4i{gFcKodm&aW0a z&aVaaf{k1jZ(R@>-xSe$Gblk=#{^|aCG*`~^ulH#ppgO91F)RO!H9=A^) z<@5T4BtNrc$+E!nK#4+a{{>5Y(e4f&OHA@3tt~4Hmxcwh1Qa);hOcgs5?;fdu66H#iSkP?6I0H7bb+RUbj0Sqp*S4$ z(Zj8obG6Xu1~OkBgC)EWQHF)`X74 z$V*3*ipiSC$|?;VrgjmTyv@8JNveISvybq=Hzk>W4;Tdx;bslB=rfIMVvBEB)sEHi zUwRo(+w&j$PjRQ_HJMUQX)mI3LL>u6VR64F^7BwbYn53pqoKmF3y4$9oh=A$ta=56 zB0qrp*U}-bL&loOc~AK130*4`Yeg5c?`8Y@p+Nmu#_SfxJyApH9qv$G%taLT3TYTUHdESfQLXzaABhv>_CV%TyEVS z8-n8SLBk^P6)T!s$%j*rppV$z(dt?hzkYJKB>P0>BXO~b_bjGiQ!{ENl)M7uL{rr> z1-@TzUR}N~tbD^p_8W>vV_2<0R=pk~X0BQXX6GyDhnizW zw9!(Zg&Oo+CpcwA$dsQ*h)wO_m;2cfwYoBV{Ok`JKh4C0x0emJgRY+2mQW2Ky2vZq z(A)Zxim73LjC$5-pfJ-b;IWLB;+kM(K47w_Y_h;NprHBb`+fD{dD{%|06(CYSL8$9 zA5+t;RotB<%(Z#1dYZX@L;r-BgHPYul9Wo$!8j3&+f5duP{)A%Fd`L%&#XWy6q%weCL^h?rt@k7~ICvuo43jdN93_F;*~?~MTgZjQK~i*G~z?>#-auu_Am(TFkK!UT-40;-zpA zk=0b}^{<9oz&Aqh{Dyh#%5BOeGB&7tEX0zfL>DXD@4|)&`0a-*m{ zOFYb>El06u{jz7Pg(kX4F00tbTa$Ar#c2jjd8*8R^F21Hr3tMh)_=FOIhvov9j%u0 zvw^i&eDh#KSuYgo&Ry}QgUGlhoT0OizReF!DdpYa;DkV_qq9-qs-P%8)oGs8xDM_} zCR>Z^#Rw)f^MptzzS3mU>bx73hPwQ5vYe5eLxY^eAR4C}H@HN4leRqoYVecsyoS88 ziyW3A!b!D8=ezE>4Cu%HQ6}LheW7R1n^yJKV5e2{+t`{JxTEtJ9vTq$zrbo~?qdmP zhwf)Q&m*ZMn4>T||FOorf*hDXU{q_cEhfs1f|!FL^1neBL+7qZ@!JCg@zD~ImEa+8 zp;_Uev-7wvyAk-e>|I%AwPC$BS+)YDX$)9nXtW|J>_T4e-6N&We>s5CR#S<0vQdNm zrxKUn=6u*@s|CgMzmEo5NYhDeAdqjv5J>4d69f_qBLeWSLoo@Khik>x*M`$<%lbp_ z<3ABp7OEVPeZ7dgh9QD)At7X+rR&SGVMy4KQDiLcI03WzveVE`8#IpXH}rb|-8J>f zI6|pl5JmNNQ@H?80{FQiDd<~eG*erMCe-|WqYd?*x1$|YVoFoFcnU1cnBv0~nz!Yt z#&%;a_)yoRO6+l6F?9^?ZF`v8yJ=BSX?X}|)Dy{%lfrlfSZ&dh6yA&-O2LRkiPTTs z(#AX7e(2EsC&Z2oLYzGG&cvL=l7!nRI0+ArjFn;yvY4nSZ~rT?{+S%;&-w@?wDDGg0VBfvfwm6RlP6ZR{&7zF1{MCaYWd?%{yAT)u38G)FP2aq-2*j zhB&w!qqQVqOF~!LMn^hjpl5!^d-BIup)Y1EzSJky3m3`^ZR9Is#?q(2cjc31F{dE2 zM8Cmr(CiS-@|mlLoYkq{44w3he)4TF%D4sTdIcQPKl7It`s6xZdq}(jUfeKW0nIi? zi_;VVNsEn5QyhFOLoCi0E9S)*LVVt_>Ctwigx51{cDJDGt4tXzM!qAD1EXk9b2`qA zUbklmjd$_@bqgRXWi?{j#&gat>I*mnvZdn@73Cs5<^4NVL zU&QFkG^GEFsHdoMSy5%YDf;<(v%B`iO29Runi1ZsQ-0p$845tlR%uL15 zJJ~_~Lw0{6Z{O)F68~Hk9r+<@XBR937BSNBWv!%J;KzNpX|BAG#3L8$0(aj@HhoI4 zA*%t676sboU-<*4aS55{&o5B#Zf?~rPk1!!gMpj;xfrMb1!6_s;ojx-_%7?- z_|u{9K;hiQT)kmg#!d@Zi~k-;^1i_QR^{s)ftyR`nlbX>Jv7L%&FE@%AT&9CKAXu+WF;RBX`U18Hy zVQI#GEPh*KNrgaa!`~OIEOh^jlR{YybhGl<##+^PG>1UDka#FE+K>UYSupZH0)Ue) zHB44OYDhu+tVqyd-oxNVP)kTzeGTNG)c$|aO-Gn;YhEPv5Tva1Z6UfgdsnWpEyUTC zAT^BOf60q5Xwv}xBQ=^^o4=JBPCWiTUC{QW;g5Rp4I>da?!pHjBeRgtTAPB)C?>wF zk(E1cLmmA0{sSVnN~C4_nmX_)J~XL_lXyB zSHx~xS0`5!O2tpqofIse>+kuMCp!Y%d3DQpnW&6%&K@>D@ztL#PJo-|$R5)NOL*}_ zXT#uM_&yP^-`2epUMbOJ#j0sYPgx8gVp#3L3$dr%x(47B=FgzK0=CC=UI9^fa<2e^ zs?w>)cQUD>Qf_xAj_<{kw|=@AIMwkrH+?-m%g-8OBv)SMTmH(jk=4dE>S*vhKrWaK zuK#3^0#K?II?f7{_A<5<7y2lgmcm0|Wu21wL_cqzwPNaHh8c3i>7Mu49>B}4Yxs4K%e%p=raH;#HE zz?iUnKN#>@v{C9ap)NxGH)DMSJmpTNCK!R67-}5oD7?{ht@m4C4Zrp2YxyEV zsqeoGuuzR#@9jUiD{mjNCyK5!k?kCL297lgi4h+YlUkaO2;ZCjXeb{j@Acnp>cGyU zkroP{N4j%xjTzlCn(@7?z2aG1x_)>5Y3chyTcMW?)~-kFoMy`dudBUhLVs%J{8~E) z{2|{`Ik(2cr@$(ScgR7x#dx*UZCp)>^#z~0+*f9-3!eAF6mzGWZxZ*mw6Ts4|F{c^ zuR1quSp@JXdLviz{rdffnZT*(YUxRI)^Pt>>lHvm8-D=vOuZA$^vV5xxtLn*_hXmJ zH-bneS!)cXI>T4ml{qGvJ#=l+sx0qY%N4k&-j|x}$9hkB|2PlD@&B5HsAOt)_9tWY zrgjTDIkFXc4YX10$701{^d2zUwK<7BV-6WaK&YdGyObWu8vxM_Z$PJR;uBAfXUJFLmf$9OHG5K;Yybu2Y}Co|{bcu66EH2}jhP@8z%{vQ8n=clzjoJdf9um{B&hN0lOu* zRETl)G=l_CM^1>=gJK_yx`t?J zsiXZz+X{ry+9LUX=9>qb2<)~oC0egK zxu{lEwr(+}mWT>X z(Zek7TI!bS9z3UOB7woKb)Iw)W+PU~1}q2NRhZql4%?3z4KmIjwbx18wh$H8_M%hf91n2I@ybHo zra>6!IB{039k-*&t>5hQz6u0E&?^&U-X_&-I=6(XMWZAnoK|0|rn{kMvCL}h++zEVHW8>gMDZ>kO$;b;~4#RCNVO7ageX1$UK|QZ3dsrNU zt5ZOTT3-Ki(?m4iS+x@7jVX^DD=p-b7LaO0A$)7H(^p)?eJ$Tfyv#x&j$5o)l{4({ zT)BS>qH=)jrwmoWX)r5Tp5gn8wUVDtUxw{g_S4JPjYINpeM99$@z83qoT0*E z=|#0?34JtQ!>oF9plO2rf#Uj2QBt4UJC@M{LO+q(xbLU8z*hirvCyYiK)k@nn=c>o z8oPed@44DLxr3HvHs5q1Aoz)TgJEX%_l}n}XjN!b2Xk~mNdJiiTm5YB7nHGU<_%Ej z;K0OnW%w5ddsA`qWVQ5XWV^q~+H+|1UzEC=b>ti^+gVcTrmY@-QypyX5>h|)`_aEN zj0=l)#Ox!Y7!rQ0-6$=3z6wxeNgtLzP8ThRUQ=7!XElT*AIqq$3Hx7%*`&J!WcT`Z z!A>(&66U@FeptiYR>$VDC+%;y#e0b^fv^h;2Gr42;=)aSxXs57fq8d1R{@(TOwrso z-+Rn^@!E!pVnD9|nHP}qsdW0v=`GwL$ffkrCuar()HHrrD0&f{dIc0Tz0|7vAHUrA z*SnP5+=>=S!%Rt7V%$Ej9`6x^!ud<`uagX;Be^_C=@>7s46Cx>Lo2FL$YD{1s@3QbYq%}%8bHJi4Qz`VMZk;$Oxuya4*Su1%*q_0upO`oDlL7B1 zH67{P=)bvz?`JQE-xt-whbxvD29`SCr3t?D(RNcA$!K!+dtq;aCUTG6xPYMYI2Nht z`gia}t=|j$b}jWk=)5Q!BWT)VU$e+)eSpfG3-Ft^K_i8Wujw_KvTOF~)h`mCH?NfU zedkrue7C@j!G=r-Z-vDo!&}gFKQ?6E4q=cIJjr=taVhB zdWz5z1jR8@9KFZ-CUv)Xk8O~>8LgqkrHy8dj>z!!w;r<@OKhxJIcricGTLtgCp|`_ zr1-||_^&~To~86+s02fI~|y?JTw+LrS#gmTaciy?sw5!b92q#)AUZFEo^qTkrr5B z91Q9dBJ#G~IFEyZqEHT*tjukhR4jl({^G@^bqDoc_1F2IKUub}9OTbZIpYGrs~z}w zKSTuB(iNS*k5?M>_7!}dZ_QsR9i0GnUbIov1$4S5M-++AAqTP#gEowpujZ@uiS~~5^w96?ny!KAX9T=lnzDv}eJWLi0`vpgrk;elr*Ff6CGp~NbZdp^nqDzV z^h0=Uq+~>_E-B`l%p_SWRvCPu6z3i!#mm>GXtLt{k${wfDPQC?IYp@~OQ_uZe)cBH zSDJ5ZZSJkq+xJ2_8Psby%Pmsa9G{*xjoOC6u0N=4P`{a49m!8+(k9!oI7PPPd+!&& z<;~BZ%Lv2xahd$3#NIi8@5X(ZO?h_M*7{LglXF_|hEF#6t7b+8Jgb05n9z&AS_M$yEK=eFx~EoXZdntT%d#g=3?y> zG*8f7#&RVycdoy4xpFEYJnGxz6ZBrylApR7G+MtDlbA8G-?kMK#G)!K=QbjBDV|Kb zZuvcC%&}B(a5*Ni>El}8sh%&!FR&2SmDrb&RF3StolOfTZeK8Y`$j%piLI3yO_M6bTX#2%1k{tv5ZE z-rAG1*#R{DkTsm|XW;wOi>|+P)X_I?gsFFDr#F0Y`fJrxq{lHh7d8D6EzI1d8KyR_ zzkMQ#XG8cX#LadauhvijpL+wqs0F~))zA+cNXKfF8V+$=55g;b2au;!ubF*{BZpA# z;MbC69zuLQug&)8M?4ohbc6vcNhk7qo8ACsTC7_4Sb|G8j`rGRLW z%`t4r8kBE#Q+3EB--|@DG2xM^kz{YPd7K1i#rw`cQ|W2O_Ych>xPaUmALHrQ0qCKa zE1RuggS12T6+qjaA-&W=ccF&Q+Lkz=ZkLqy#7dt(s@hQXTg8CkWS`+VQ}jz(<&Jgn zIU%o@P1l`Uz-Cm<^ZO+2c!;S?*th)s<16dD`+9uw>V9GjI|s3HD+=Tgs93GGxAvSN zw<5=JB+z;UNHt{0T`0%Y_qtTG`K{)HlD;5Q;eBYLm6%wPlG9$vbxk2W zxwk=rnF44iqA2=mP0n-tQkz8ZJ{N@(Ve9NdzoRnU(Q`uoeC7J*)3M?!U}OEhygm9y z{rst~u%zIM6!%IF@3?K|?5-QD*z^Qll+dSB{S+wixA31v6kMnLl@7|4bR$L_MnR4K zYV(Z;LKW%492XjcH%CJ9r&<;%stZ(E=W&y=#+oNGbM|aa%3sos?YjRU9v>SM-4RD` zDQ7d49OT9`$l)zIkadeuY~3^!u_m5)wp7wn=4GDKB7~m14jV;|`Wf8@F0TGSTwUbS zKRWd>;N9QXF=8LCVN)6&?k6Qkb)Pd;Veq|!N zr+6S!Uk<^O8Z4)4q>aO}f|A6R-poJjaoe`?ZS9l@Ib;57pDT=Y(mO2$#`)P})yAEc zd|2c_LR0C4-!;miuH6BD7hBH#mLw%wXPW*VKhqG;@U(VlJL03f$%e6@W3c4*SiyZHM+>;i5jXoW42XaWLqI|%JF(}_X{p25N8`RU#!d_~_ zQ&ArJ$?C&N-@4GyPlck{R^1E>Aub)!qy~10?$wAnzL)Hxy`-Jw!f51$D!-xwjIsjL z-<=BE(3xa@gwszktq)tVgL{UX23sLHF$FU|Q$0*(d%`I#;`ox~3X|lj&c>pWvaPYGV2Qr8jD) zZR|tACLz#~^Tqm_Ay~TlJF5F?jmxM(YjX?UErQu9(5 zz9~5vTg{YfRC4K*SXGDUUm^y_u3G&5;;ZQd6w4|}>ofHYA@io=Cl(b|imox{asAO^ zUFd#D6I{^^@=aIQYQ6r*mS9!3tfV+eOSYhLJqH}A;i&q^ERXvlJX`LY7nS-{n$8Mr64f7}*? zmn&=Se7Q7N)FHlzG24=;+BBV%lb`+-x)7Zx^LQ})EO$R@IImzFzAi>j=!n7k~?@)gIKNrh;)^Wj#g#v+@?J zN~)i=)yEW`-_ zAP@*53`75M@&AQSg8vwHV%?Plv)$qJ@|qbM@`{jIZAuu|wJ$l{P9#uXL(5Fn*s|b>cBL=$3gFrCR%`b3VCP;vXK`~0tTx1|t-bIkX%C3R&hQAy6uK{z=%f7M zp{LQYSs1r5P&Jpk5%jUes{?hRlbNsl4`=qSgd$nS*|(?QVXF&CWI*CVCZ6+*VZ2yDYfo49go@V<=eZ!rQy*<%ArUHATWAc z|DIR2qtPosg0e&M&^yu!wB^OeHzYqoWlE+KeA-`NkrO-<+iGyT(FDA6|I^a(!~9K| zO-xzZNw3myjL-Mc%E4`hmGMIz+e*7s4AmSav4qw-Lu0<}nzz>;tSS^~nKGXj*&@}- zpghQN?*uZ~^Q=fP`I40TIfHKAq+5g_$IeONNu|A$ZoD=6z&7w^oWdhzSNX>JXAxK? z(Cfq5Y(SCCXUi0-Pc@k#yP{!@M(T}~`Fsu2!UTVm#|37IOMd^3`Wi`glD`$bc_3e{ zWCTiZiH!;G-<)Y-{zFUkshd!(DkO#+6=V>&ML?zktD;`!TAnY&bDLjWcfh4iReL}n zJC@JD+Qqy0_-p!Rr^u%i;9lr~kw!WZM)d*82IN&DL_Nc&FU{@I!~fa1-v0_fQIDtT zbKr0Oem2y9TxOMebpNR>S1Er~887IbBA6AZkh;DuOPlLQ)Ioxw)vUksEqRGOYi{~O zP^~Tni|*eO!;D7_qoI`CO|MVeUs^jvJsy8YqLZCC-B&5e>5v^;H@DvV@?wAA7!xAO zKe_cZ8Z%v%2pymOB%1I;eS6!pz0O-^?*Uza^+1PC1gK&0z36^Hn62cL*sJl_3tLGr=?6H~m{lUuk5&m{T_lBf2VsE{1&m!L_l z7I&v`nhj;HbDMX#hI?*pL@bR?QHQBPRw#z-?^jTLiSulhdsu&Wel&}fS6FfhTa32# ziaHQmedM@KH0Av4;6C)0y^PMHhEJ{a-bF=5+_Z0kzxhrZ6|M#FHrO|4fUaIL@#On< z=1dq%gkcos8{W!j4Ij^^Ee%s!U|eC}?>QHVmU;Vnct^WS7@%@zWrgl-_NRPl;>U=< z@h>t&Q__2C@3fXo-U_L_;c}xYE79hi!^9*Pczl*qG0$wAJe957P{1@vO`C`rmlB{S zyt`}PGR;F02zEG-FU_Q?!enEVf?`uLWUJqO*}rJK7z`lRWtD7-wBQ=RfCY++*&#)N zNL;$px#MNro8=}uQgiLhSqUvkt`YXF>_S3E|5;|Lb9Fby>EAAMKbYeCA~w*?i@`g1 z^9f>t8p76(IrU+4#?h6^)jy_*Cqx43!1-l45v9*oT?zsR@yN5x`Av8WbNHVZ@jF9T zeY~fa4w!{jg%7@1<`W(BBjF)9AL=uUP)0%SO(+@|V8-9+6Gzjr$NblsrO5WgsORy- z-2fj(@ZrX_D^l{KNU`BFrEo>`#x&*)VOKXyrpUaDs=D;sh{dIwWd5ZQ9%S`*S1@b)P~iX zTM1e`?mrO3xB8`o9jh0S)?iH>9!|&>ie8%eg?|mRyl%+07llYhlY8qKcWu>CI^w(RE41)@2Fh*g61M+%cA zKkLS&0as$k5aVtbpRxND--2gWWIg0SPDU6brU`zvI95ty0h1J zOZ+tr?Vc3>MBqjlHG007nsz$!T=NRRJrHTR)UTdOUr}iiQMzV9nti+Rn1(xvVeWNu ztKCzq2JOZEAg9|i&!wK8+<=^-eB#7~lVbU#Gv>2hp>tN+v+N9=8wqLJ7q!`|{A0o0 zazo#Mb-sVaGu1ug@h<-rAcRk@<3r;z=wQ^f#n^eFaF!k=N&5<5XUE>kwM>?26{cU7 zDHQtho+AI*((&}|rR`C&xHIGY)oqhg zLAMf)nq*R<#G*cMK(FP|7v*HVyab1>;sLVN@dV96_W|9TDv9O$yzIO$*zuhvNM?_j zeusOMS{mI}5XF|E{`=216E3DvUS|6(8ImGVAqk+l_w*W!*7l^y^s=Zv5@9wMerG57 z%%-kWd=o^hZkjPV0X+*2L?i#Qua!cP??qvH(qx-MbaGAyEI1|mH4-}3Dp5H9ANQr5i9>W?UsWHwhE$(ipe`AI%3 zVlJy`ju=*5atn^plhv_0%9V+lMfAO4A4{X zD8!j%8dudw$M9XxrQHtM<3YB-T{<9?v5o1>)ms?5l@8FSlr{bB7MO$-0yE5MxS<$|FKo=2hlr6I5GVFp9M+3X^U`PxH+_kSksuo}`5w9L+q!GFhmPg2^irxbePWja!yfudrNv`uW&y8BAZQwqNk5o7d`$h&ijnsO0kKJv(z z`M{a73<1ne|9a1mXF$`g_V@N#1kW`Qb^CcM;<&h!;IXRCka za-Gwd{okXVDJ)0P!F2m^2CWDjB_({N`F^+)N%f52QZKdx#k?EaO4}d8kG}VhdUIh9 ze5Q9QrM^9SH>Zcj>Q!Vma;Ez>_t9P0Xdc&<#=Hx6DoTI%gN4_ujLlDGW%~96?e1!v zL3wTuUS)qZ{Br#a4pwy(mRHzuI#DB0RGn)-VLZ)Xijw^BeDrwGw|(KvIZ7m-bm9+*n=E+dFNyf=I zBsUeUK0}N(3J9|H@{>g^$+-&>PPkN-dRt2uYp@JF>PIG^e>82}^b0EaA;5IKZ+yqF zKTD46}gnd~Xk#qm85_lCd%WsH=z z5j7-ZU2JYBnk40cFe=Mkuej9g9EE3LB+2kVc9YbYxxgb32bhM0#E-N5V9c@1%)2}} z@C=nBglh|u^7RL0StW#l7Qsvk@~8M6D`iC`y^{2v3yh?{JKa~sk<%xvYAFre@`K1v zQsvfT94jnaRykZV4cJ@uXXE~;4e*%p3sgAK{tPw}o$qwJmRVh8k&!L&_&^`jtpZAE z+B-U{%F}r3H$r@D9VU4t8NOVjb`-R_{rx#s9H2SFr%jQx?K7}q!sr^;I8F}dgF-i< zI201D)>daUJgVTAb!e|oPsviAmmIG_`kQ!5^VEg+n~IS2>Pg?8+sly#v|rZ2{S#Yf z5X|9=s%VpakM(RD&1Y%pVYso;B^*xjSgOfs>ccvD3Y-4RtX( zQk|T+%;CP$(5F+R)LLHsVrEd>`r^CSudPU+CP$aG^BZPofmLUzTw@r%;etwaIf~UU zB49qdfbykAh=IiNWOJ+hhFngGevXZLxKhc@_@M%$ZBst$=U%~XZZv|-#_N1EJa+}D zcpIc#u$OG<4cnRbq*^(GmTM(G%OblS4%T8=-p%i~$0bq?=JZVqo*$LvDsDu*x@^Q- zzv+P&$6}|cA;u0+6Y**Eai|_KP3iK1x$_csi2F$NmT1XCY@%t*n7LhpSs|-!e)ysa z?AxBx8pNmq$`l)Fp-?tfHu_OW)&ADO$F@Z8VZo{gFTq&0ZHoFrAN}U$)@H5G7+*JL zn7q3~F2UZN`+78&`XQiXaT8e~ z=q}bNUJpDylE7~=0uunl__srvSQ3BK*4AsQDib7BS0u%MGZ_K8Z8*bqCqM6pus%+z z6#u8*!JhfkKZ*eBQ(=X!*#7|eKnA~3yK>bZDa#M#cvk5-ZccPu^odnfR3TMWR0UO4 zP-?2Gph}fO;Z!D0s;a366)LN_IMfv)RJ#)&wJKeT)m=(EB=d?Er<74XOrCKC4qKZgbWc!#NcSG^t3)u`(j=$h$UPDyKXZqiOiJ-=f5=hf!3RY#`M(&`T!P839* z*DxIY{{Zk&?L%MZZap4@-7&<8^4ktO;Um#@)#=_uaG1vHBx(GH9<=#AzL8HcseLG=DBih93m3Uo@$0z3bThP-=vHUYzIbkVwXzo--`G zBR;jy;$MdT9iNKr+jGM0_iYxR+Y=w%?P^br@CD&Mvb1lHmP(M{7;rFkVqWU2CmyBd zJKrerQI1>M=dV}B>yO#vc^#{Vg{SV+aK%i2nEBYuL*|U6o8q-ST2E(!E(Q<7i8{KV zQ&2@wCyU{|<%6aF0Ps040*k&%72QDcUCv#5%?Sk3d;deo$jySlOq$Y@nh()AZp zJlUn$bT3*Q9%3UrNhw~}HrL((S(&0nwmt5!3DpreO@!`iP_K9AV`%bR6HB^#4cybU zt|}v4t}|*FV|Cjbi&}a>JsIsPURQiI6t*4(^76dCTb`9Su1+3T&8cP|Vj~6Gy3hCrCV7b`TfTzNp;)0EeuGq^V zr%go1$1dgRowaoA{5X03ZawM4hhiE|QAC}HX*oqyv#DolWj83LovD=EqKH+rJylgn zpQuT_v%1z?wKukRTFa2qx+^(t>qV3#e75zX$_1Gz6<);kk6L+1srDzVdeE5G0=Z>r z%P3jYe%L3SsT6+IJncd)tBDo9$L}}oPe`uzKX`qp6$(~L(OD{5#)eUQcA|n7kK2yH zbWl#H-0D@Ca*h2(X=0%Gi9FOL999a^`&LR)*19;Dr6`0Nq^Qq~g`+C}C`L6tP;tu*T8Y15*3gI54a^Y^;!&VcQl~%E3XhR!Z4f zC2EpU>iYDuN7c$u$%249D3 zaK{iiJTMF5;hu5YPX#PL6WMaRKZg0NO?}5VjagNYN>5@Qu*#e)VngA z*LzX;@9zj|hjWV0ais}+vt7k#lH@$)qizzF$u5&MBS67Q$fR~gW<2!-Gf zu6?`;S5vb@OphTO8s7nGt-W%aX@|i6x_k2O@b`;U(_wxpT`G$!Sh=3WyOtM*lI(21CVCH*i-el z{&m0EGt`8c_+H0$Eblz0E3aWRl@6k-r>Smc38R#CvB0mn_ghnsTcD+^iP)PYO~=!V z2=(n>2Z^qzEo^zY<*jJxE+7T>FRMKoT_3v?G}3?{fV_^3d4b?rU(s~1`rQhSx<-ev z3%F-wuJGx<2TwIr0}JV6W5u$#iraq0D-%b=XXJ~n>XiQg5d_hZelr7Y)6zajiB!&B zmq3uorcuatx;D7@Kq$%iB%w5xHVRlFZ}Pxs>Hq+WsXiskSm=gR*B~X>(f?6Ho~AaDnm7Uq?7q^&H4-~FG%mJC3Aiu;5=knPf~ubtZ{TrW_CNB9U)^~ zeX>kP*u5VI$cz1tC-Dyg`&H#I@crx6{wi20VK)qs?s)A$(adQ$pO0z|84nn`Ly70@ zL|hWPYj-9vGA%%u#6O6+E>3uQNXuet9hX?mDehQ#&hS#0C2d18Am+VKbsRN4GM-X- zzz>pu??iT|9u+6`Us=V8w7CHrouaa7a-ba$&gm#+Bc4xg!qbCD&T5bV1>|ag# zp%-qTcX*%vV)?y1z-(Vl`kS%SwG;j=%>{9AE35j(ag@an^9F?fHg_kWw zv?Q@Bgf4|sKpL1soglVF4g5KWaym{~SQffXcK0SLh0|$UOkUtKa33q8id;L>3M&{! z9pHQR`lw9r4XxVYUcIT&FLaF2{*QPTESbJ+PLBHF1# z*ZU^*q7e1`hNdG}3RX(dSt&G$Hiz4I3MZuxMePq#Bc&Bi=Tn6$r`Sp6rj)9mVJDh| zfLv6r6s4_naY)nu08Z5$q+s?D`J>c`PUEVDO5$wEuROulf+ZKIasmw!x+ zyZl8neHfYgS)ewfb-q+>^ka;CkU?j-1yDBfj!`gox0StWM_)K7 zsN?)a-sTdnofGy#Cs&`G1ydR;K>|n{C&n zcSk~1*DHq(Ym23E1i=d~TXMs;ATn19>f!-T+PJp;t0)YWlD1Y{gTZbsT>S!@Z@eOtW*E-F}#LEKG?m<2j z77@Mi*EHCg=EiFu6tg;5pmjVfbLExToW@Bnj==MrfHwW2>zW7E-*`LR9aE}^w0bwZ z&2NWms@@z0!!bzV8*CZ{$T?;grS7lQ#5xn%S*<;~o~UDKFO$YaOi1Ly*J2kLc)^(? zD=$w?8$?oJ01PK)Nfh>ppQGtF=C`PNk{`+`zDs(CsU`fPwW}y8{G`o0l`nxQ{G`n{ zmnVTK^;GnXTwPp5A=On?AgZdW0;;O0463TBhJxnhR}RF9Rb0fa(=uu{A!K$%6H&0B zq;_^xsuUm2o2{s6-OeevxzhMONO|3w&h2PaugzS3EiOl9s5fVYPFKWkffg#KO!3I% zr)yJ~s)wTHwZA2;A^}fF1CH5?o|WD!a0ACFwp!P=-dS3IGY^7&clSg1KYFzdTADu* zs?wL7BQ7g&V?d^)xt{*`C= zb)5eIywsxE6F?fP@zyop;WPG7@(v_@qhi$38C@HW_BC-5Av^VDr$wlDl7bq6a0}ho zOBvPnue{Pk$Y5?AfB-s|#6GBDGk3~{Pvx_V#QH1kbPpw_CuUo-sb^(W`gVB2?v<%L zbxaLslJ0U3W zdo4k#JNSKd1|x!_2LAv=qWjgPKTg-n8I&)SmUgu4QI%8`6xyE!pS+Eh^W3u8wC&y} zyS6E@=4{q&9II&0(H$N)tkiNBD_ZJ9mZI7g z3e;OtROup>TvarBhL@IB7Nq5sgqH`?Z%rvw+X`V$6K<(-+>(WM)7FZ2B=sR(wGwq_ zdUY?9lf62Z%1Ktx)y?XzZ$bje?GIntvR7pGj_X+|HLpuhduH{bkhK2PB8quMnRdjA z&O=k(u_CkCwFMne6soGKR(&?~)T&*DH8fjQLsuP$hoNfsAm|*dtre7-9YxD0u@Z-) zW$8&DK&n=Pc=0nuh_f5gn?WurEsLyx+;)ovyaURJxDi@(a-`DxanL}t|rVj zq2wV^I){*jP8UZ?tr(ov?<4@YwNBhsFpilT5eF=5j=fv_0S46lRrspk=OnRbexe_x zw(>O<-dE>KANX(i*}rN|klz^9uJOC`j2?td`N$90ul&1D*qPND#__dn5tGRY#8 zeSfz-L28g*DL>BTn#}4S?o6d^pm%N3Ip}=}=f#8AJxFJ`;wXoo6gq+x$DCJ0Fs@6H z*{5kP=gaqg)I7yINIdC&@7kh$S3M)iStWIFr3iNF-DDNjTtiB(U3Dx4TqSg@r4SWW z!L?iFtU9SGu0N8dHGs>C;Bv`!YoW@{N~$(#QPW9;pK?C5^Hg0!^HF8(mgzatA?A@+ zN#iBzYDAK#nQKT_y>6Hpp4gkpdxJpjC}%jM`?VJvQfrRLnCJFh)*cd0NmpDd(c3AS zlGq%`_6D>Dg%#ck#u&>tk`dkEkj5#TJttqmg+c0M%-nm%)vfz{tgB4VFWn8SM(UkD zvK`XX%+Cppzts@eAJXXx|4-d)hj@JQ##%DCsZF2^b#cybOI~RzqpmY`P zrW}S|*)ODO5OWV&@%`qY;8Z%Lo>|R$n$olOJd5_8KSTkha|_>Nmr5RZ_>JvSb3lFw zpDpr_QVH?7m&BCJK(1%e2druQSe$zb@%3$+Osq08-~zsfQwWe*d7ky?_KDAE?OAe< zulux(jo;}^XgZT?=;L@fkT2I|d(!?MgIMpv(eH3k-Og}rHM)`A>~=2h)_>{#(tNU4 zjyB?h(YehF(`nFNA=IGzu|vnh*=6e}sRV5Sb9p{pc5|OcSB2nPcF}2%_jR#aD-Efd zeD0#IrkUAr=Z*g4Lun3Q=j}_Wqc+)f%l`nfwA&c$>z~4-8h^X%8WyulsZ%lVzcb2V zIkfiug&k3w7tg+@W~-Z4)L?MDRM19lP08XofIJvV`VB>}cvnu==PL60xOX=Ft==KZ z>?fWA$z6xqH?GN*v=$A*>Rs1vQ$T4sWnpSgSy%})jRaHd@TOd)Q|wJn5pK6m$N8v2 zQBrJwnuOkzuWiKoUP>J8Po?CP3utLv3YC9R^&tS9xD|HNxRXlhgK7P!L=-F@)FM!u zE4Cr)IRQP}5cPb3oE18Qs;c5mW-=<1kmadDW42664ewQb|tJo8#R5&5A?vvJOF4KNVB@(&Oc> z&CZJ*XXJ_^WE&{vmJZw0)~z^1oC^ zT(hD#7kK10w*yUyolcT#+vKMX)e+s+hie+lo15^F4+9+KE_SJi?UiN2wPdc6vY3|+ z#g(J6Wgx*+s#PcqJ8Ht)wyYpBRwOnAt&}}`i)m+Y@?wztnow|n`nZSVsx{qWKylXU zvd0;{{7p5ZZXBntx{Gk!?|x2NbV&Y9^h( zlpUeoq%50V){G6q6I=LCsRw_C_cFi5{nQg)Q4>4G_z+?FE#;j(R(Ir48Y}MUzVF(B zo%td(NAu?UziOK*dSxqh#}&ZknH-!~4A&CSdQfuiB+mx}byg!eEve#4vg6RSRtZNC z;Jzi*fO*TeDvhvLB|O8{tZS7g#T30{qgW2<4ke)?9~rvaU35hET*B4An0k~wOqMq) zQmx)B^(adDaJfe9yQ+cZoN-(M+&n2pLQ-az^C!&;@m5o2@m|Mbx|!~*cwnojw%0Z` zb{5|ktSAmD^_o>FrP1jccDaT}b-b5-sc;;75g#MDr4vH$7KC}`9;8hP@kp}gq)X3>J`^p} zjDGh#PUuZ#k@ z?;*wA`qusyyWHiz^UxS;>1u;Tu{cR@1&ZZuEc&LL>PlcAHA~^Aj1yp8ERAT-v3h$O znc0Vv?%a!`ZQ zA1-nM_b*XXAvrH6^>Z(es2)UY#=SLU*<5t4?+^Z(pR3CA{{ZUj;C0sF_Kfp_7)y1y zX7P>9Y|h#a#I7WRGV5mWp*Shd+tRqyvirHphoNz%2GZ;)i8OAz9GP_Cox0tT-Yxw| zLW!H$yQv6DnO@qWDLDlT9Pmow)=L|lHncbrI_yz&+=N8H+|D$^YMq_}s^gC|^%Ni( zD=L*r8rMl~m)ee^o_3^Ddrty++Jsq7#EQO4d@02uvvP1L)I#E`idV!C zsQV^$pi`;>Dyqs&8;_RWv}rd%Q~P-O(auC7>Q@z2I6y5x_~!orX4E-yb$|5-%|oH4 z-k4~o&>t68?cx(}2}74B(>yYYwDg7#+DJ+``)_*GT(yrZra{XcY9p#iH>0;OqN^Xp zs*~~5JNCf)6vmc3OP%;!p?P&N_P-a+?pj->AraRiyQ$B1uQ#QrJ)Q{I(e|lhwMF+e zzvzcc`|8T6FVlD1>{5qnP^MhCS6o&}PD3SNm8TVgjY8dIxp12-mm>k!6_TkdH%KW+ ztZxz-8R<36_k{9NWzP}>qQ0r!Zt5}<%B8EXrFUF)4*hG;R=9}GmzeIAQ*>VU>smCU z1X_U_odv)beod)nTTSpguw8mu_W;Xul#%KLHxz($lymv5>Ic+?r*0|P){2*sQit==KaNjihAPVWM8QLF+oEyp52Ii|6hT}5ZRNrkF? z;wlGHes1hjlky7lx}>ef{{T5o{Q9qIJAj&B%nqfn`=O+jym8dF%9vYwxnavz{)v9= zX~$HrEs%2>+)nqeS2hTl93sK27)MZEZ@AM-Elj*WX2y8Q6uzm(4t=Cu8xxwu{a9}G zOd&OAzvAT=$U7m>fqSfpscepWob9BPG>s{4b!6)}j1cp+SZQLMONoG-54roLXvF86 z0E(ffb1@5N!+rhMQ9Yt%r7iP3eQudd%2&4oaW&4|RBypkW40Fil;*eM-zz&@q5lAw zxVy!$^(z&a&JsFSnG_@jw%Co`yQnF+<7`niqi{oMk+szD$adjv{)j}l=DKOn^Czx< zK$=(nKU@8qKVT_QxkhQR;nwTl=-_AQ57eY_=BsLdEK)=9hSS~Rnbz;IKA`xAtt#ff!~rAur4>Q!&~hSZAL7!pOLU5VAfSWxqKo;CK;uDe zj?>?ACl2q8O^`-Ck|J}$8*e9#>ct5z8{p;Gfg}upH}g|&ZMIF znEgHr_|Ng2wlR)MbvT-`7&RIRTT??*6&q_Qwp0hljtMR~?4Qel{!{s=%(L9qKX$vX z^?O0x*rce87|Vww4tciXP5sIJaIYe0HE~y`~44xKe?A;?^t4>$6^0 zdBt*h_j(_O=?HmwYb9<>%F9`G%@)hH_UsO`yR|y=W^FqIj1f9#T-v|{CYu#Ubv2rK zDjRoGyq35dw35d*d#)oR+L)pHVYJiF(Tnxfs*g@%kruvr;Q%mVA4^U7U& zQO39y94xrV&8{FNNo~?^Y(U-IjjGK3lP_i6syOtj zEH|^BTZ^UE+>ENL7+MrYo!$xx@I)`onP4WwBVyrH^6^{LyLPMX)+-u3tL|HOa&K*B za^J;vty5Lwk+wHPQ)%=K39!?c?O$ec`k zN4czwfLiN^TpoZHE9KKG>7}Hrjk7~2!z6AF!uIqV5pbjGtKfzv(?uB(#>Ph;dlZr?sKD#Hy^9g+EE}&mp=wNgcgM7xl-v~oz-M&g$-H+y zZ({DG1yoW@7{*B?K)TK>HYKDHYwqg^$zOO~)KKE@aBE$Jorc%7kz?D`kM&g;r`2InT(3jP9?QdqZixr-3}}K*Olh z#zWjqlj77VY8_@jKHQPc6K%OTp{n;H4UpBGq|;8aZ)MA_61j9W8B6SZC2cO7*te}{ z?N2PP)hdImca24q6{knAI)aFh_T^`-@_a z8S8tCzSSV(WIl}t!)a77+Sn=K7VBU?YJ8hggJi4|b)r@r>jcAvz${+tz-zx>6jU-x z1ul^^vPUE1bEK$jV)4AN;j}NfxH+iEKlqM?uftWL$~th>H!poVP#;fn%K7vk{6u$I z#h(*FsKN=eoX!-%x8KTT{!7$oef;jPd0y6Onv@g0!u&izvJLigPGF!V$y~a@kS=i$ z8YeY}Dqgr;5D;zJrjA!))O5kJc8#+_KYG^ktU1X{6vNih zfL>2Utea49vB$9i-F<6L*XX`7id=2UvN7D*VSad{m4`OCcB5-uf)^gdirws+sGG$2 z5Dc51w^38#k5Z3qO`KI)%BaeT@6m~&e*isfwx+RcBI^H&w6w%X^N)df;Z=lWQ2LDj02K*k_IruC!`*EZ?+`k#zA-a-bDV;%uHxK-%CnRt?0XzB zI}&pyv_WHi*}k&YyHQxnwX?fmIg~v$4PA9iaK9JBqaXGomCixzAxcY$H6K2BegPOk* z*#+8eI#Q-`?4(we#AxKVD_b+FKB}38?)QW-rHrWw*c{h8u}3;m!c<;)96H!zX64pN z7Ca5*VR9aM-0kU9jv}@z{x<%LmV1-(OKq;Wd9LzvDx~Ww5zi#4-t)5+RGno&Nt4$N z{v*}d_*MO0t4xr84}<$AA|vsV4zx7{Uy-K%$*8EE9UK!>&&iFBd0kt54?BC~-~Rxs zf6P9b@66T6cD|}x_$_2#6P$t4-*}kwwdK!uTZu&nj7io+%41D;if>!K#6;)By$Po~ z#J8>AVj`j5A?sSOzr+C}vM7oJ;?R;Dkve(iC1$grv+Ue~J4!IW#kmAFl#6U-2GWfi z0tSTUQH>(WqjRV{-q5c1hN7N=j8)IWWH7K8Amn2wCgX6wJ0B&sS*iBV>r2$-r;?fp zCxaUydl}E?v(9hc!qg4VhKryt(^***&5iTPSsNsj8?l0;0DGFtXTgo;GBIIu#m^-l zrkbX>LtRY-(M<_uJwy)S%fhC4jI@H&<07*j1|sm>SXY$MsWqzV`j3ifm98&}Smbq> z2NHJ5He}~xe>i5>VQ|@F>~y(BH5E-9l0HKOk&!dA7eA%O=$S^$XLcCZ zYuN+^gP@YUUZGHCncZr0yj5lHa0PkUP25)>lezhiSH-6dvR}1HcXLT~eMos`IODUQ zovtslzNs%g^$>n{G}ZR8rJ1i6Y`P z;1i3Ga?aasr8KXsD%uQ17(vCB%+0gUCG(nB zn4poITgig$3@4dw_w{6(rFX|yf*G;Yx^4@=9A0)7^aGQ=X-J;$p-Mv;d`)29=MVt* zsG7RgvZ_-0m%XpT8Fn{^2?H?GlZbZGTuqL}4;Olb#wn+0WQCkI-PwR`w0J&9Jy%_l zOkC!=8#p99Zg{Q@?iBY`VFdjfyj}JB)vwa%D`Ue?OvxPSpW6Oeh)JVx0a(AE%azI*jou1%$VaLp}qY9gl8>1Lvi zdEIn9bGqqB=`a$_y9CimLDwV`*zOV;$ZOB2j97tA;kmI+&h=Q7aZKz7#R0NZlc zYM(1R-@X@2?kqZep4!@mFl=j!3ArNIaq#X^H84e{wl_KT5@VJ)8yr#bq%VEF5y=}| z<6r<>ZndWeQ^e}Ja`_11!^B{7pSgoGa!Qno9@DdTO0RC7k%ZdTnbe0+^Dw&+8}nIZ z91)y+>y4K;Mp?Y4Ucdou-FE3omGHROC1|e`rLUGH!Ix4Jsn$hG?5($;A*PHSt2u2z zjwI%jA!&YY+*2wYhTtJ&c(K0Kl*;N=OMlixEEUZncCL|lxMi&I$2)Vwm4q)yP6w>m8ol(-c*ls zW3z7JIps$Q?CB&W*CEYnOP=Q(%6{aRw-(hX-Bz8?ZZ2zj8ZMpj+I#;1oEq+e6ecY8 zm05&5xVIIvPx%*(5812G!>Q6>!NqEGDk6BhWCeLC#yK$#Z$Jwl6I%|)pL#!+Z(71_ z;?~og;q178`AtGpHyq|Tyr$xkPu+pJy=%$7s;`vxVgJ_Uw>FfY5Kb=m^r|@*_9Qj>oMwLK-k1Vz}yR{ z{T8{#E0eJOsaJAl`mXzbCLaEU`03hHii*+o0O~k%+CFIUIB;9VqaB==ugxai2dKL? zqA{U04WR{2s1J=?Not|6nnUB+){b(IqQ4w<6nRb=UB$wx;t-FjK0WvY}d5`Z0KbQ;WWUM_-5 zHMZWRDdxBeIm=!zOx;0}Z(I0)?dk$5mksEudc(axaHff2AUoF_1BPf;PjH5g{dRNz34|LUDr>*;J^B zXTr-U&1$oQ@Y$T*C#k53Cob5b`^PpH(tY(8w`Ka_B}1#>Xy;uV-FKN_SC*!zvu+A| zDsxfpSolY(b{fL*wBG3IO1*%U=jkeL-MQUJQ`BF2b-jhQ>YjqHoHInlSBc6N3Y%?2 zLskQ}ts`-q(Kj63tEO9&92Jofw>961E`D1T^2&IO;)LjByHFft=qKiYHs4}(kuZ^p zWf$LR@aU5E$q)RXMlw|s->L1PpY34%wOf|_yZ*_neq=oV0K{`gQCxJFVhyt%l-5cx zmCKh*)T*7OBNdZob@3tIY^K_(8pfFgM3A|!V~BA1p>Y+qJszrk0*hB`DIj~#H-`u= z{6(VZ#?{3$Zr1|5#+h018W8m2LdCAnPb-)lM?b)|{#;TbCr_&@ZE||Y5<~iwWl{0Z zYdTV0-fC({qY_DC=4^7z8pRb!bMZGn^U|;1+P;=V>|Q+;?sGXoW*Mzg$9_MtHdjL{ zZqf=0d1HzEwu==TlN@zvw-e40z2;};Wa}%=Pn4srdam-ZnDP2-#JsgfweYS>^}Q{h zgiEvyglNltg9L{5_>n^dvauO&7dBJ=X1X52VL)utqh+HNbVCD%mw*7 z7G6!QZ<>Rqs+KB&J^8{y@t;oRr@QjQiratx0H*WwwZG>2Wf=@?EN`NS7sR8qX@s%wLdC%I;AhByTrH7KVm9pc$c8G$9R_YpRpA)yi3-VVQ-3$k@*x&1G$JP z9*fgm;+@u&n$CjD{9C$^ZGG;FKF!@o20LmBi48s3yTFXal8@8fneZb+vP~PEMASRS zy>LE`X8aXHvZxLLgc}kC`9N45;2h1eSg1Zd-D&z!i{gecdy5XjgTKSWqKFA65yrvK zH*m;{MqlCCWTyu~rc;Rpo@2AKy~{bjVw0n%yia_YzE@*E=cxhS3r5)5+W^*j-0P{` z_;Nf?k6+d0HEB!U?zIyBQAg@Bg7uuVta=b(KHK1#pi|-%5#7t6OPk!V7A($n|pe` z{8l^R6RTBWkhE^eLeD1>LZmg%>fLUVqTkCZH$Ee$YvI7EW{BG7J3I16+Oag;_CL@L z^(RRksiG|06nxXM{6>tNyF7YCZA84^FLw9xEtdhus-D4je|7*-(v~w+sm0C!wWF@r zDa@%K6C{`MoSmH|BUBtliiT|@m)U8SBWNl0EniTBPt%}%Oz#Fx6EK2V9&UBqTH7|_ z_bGb6cKgeu>oqK@((7Z#c`>#ZO&$;h-Hs6U*d4jATlEz{E43|0SLZdf^wH7)5A!A> zM_;DRVBO^}rA)wDatEv`;hMyhYcTxxNmDE}hB%M zRyeEl>Pn}J;W9=pGRvIYF3Zl`yk$?+c9$-vS37kn=&2=O4W^DZytTyL){)NSUc zk?AS5FnN65mZo_r=X2g%>}?>O#9c|57;0L=@EBtm9hk6-FK0*f6{DAs-Da>;#(Z#n zJUm$)xJE$?2W`n=w?g!q7jQePpq?yLX11dsDl4!|_`uy8ZNq0+X?JRMp<8kD86(zp z3My(HH%zXrdrjWxX{38vGXl@UhUa*?JGmXj?{!^8MJ$?qMHFuicNVR$js~6O#9hSn zGy=Z5=^SIiECAdx0S)mFgpkK!zSQ_ieb2GwwBMtHostJgk<4RzMrb`_P;Q}8BVoHEzQeTiq9_S#TCNi>VWrODNeRD_ z6s0X-AOch-ocmHlKL|y|DFt06&uH$MY`~#432!hThwUkF)SIN&O?s2&g?y*#n%CxU z61PYa-+I`3rHs?hj_@Okly;o&ANNtNc4@;M&xtMKT*R?;!c#0Grhh&DaZ4uavL17f zYF+b5q;$02Q0oYtW%sGKqR#sZ-M)t_>(If3qI5F30QGWAN;(v7+QhM|JYz*{RKS+S1sC;(4qc_oIZBNLs zdeJeGtl{Yn)t4QE8>hXxSe@9m#_yRd!E3jowZ@j zYk>q@9iayN$x%&E0Bd7@Ck{>$UG4(vI!ZdebDadh2NsfJe$n?==??N}x&tYyH955E zKZ&M-S4BSsf<{@6wVcon!n`+hdz+{0dR=2{p?kyR#~ufk98DLvJ(uWN-*=AWQwyff zcINXqh?tM@&-@=Fk&;F^&2w+E?IQm5>AtUd)RkY`51Ul~0Q!+lUo@1nu#Oax2A-Y~ zuO#njYXz{g<~#NlV;~$e4q^$G3RzpVI>10VfT^v5f|=D75(wjEu{i7(AT4P+#+|Fi=_qQWscRjD<%g0^U><@-C?*% zx_ZA_8n?SF7;(H8DC3V+nfK2(l8pL#B%Lx*FH(DfAU zq=y4C9<+Ox`m2q7BM(VWIn?{TODo@wNMqr=?=EgcC@R6@3O~8hNmEIwQU2+iR(;}w z)FFQm-5JbfzSnBu9a*NuR~a_vs9M-f8-qwYSE00xk(MYP^3aJxrHpw^JxGaTxlf*6 zhr88a(bIJgQkM_T>&a;oPYon&zQ7lv)9QXACY$C3k6PDfbLZmAa;g0$Ps@)yqUJs& z=nW^>4>+RcKbMho)y;p30mT=lpA_#o0{0^H)8bv$i!_}EWASe4KsEW4XOQlt9>;Tv z3W*Ih{PK=#)OMZADEON9@>+;S_fT2*uc`K;BA^j)PeKJ*R4O zm&Hp>GucxBk?eF?I{_^jO3?FnIfpjp6x=aXiNl=e-3*3Ci?Jb)*0o7mX`x=$z^-nY%o)^4%a!Pj77i(jLA(o$c=MZr>4^7mYQhg$4>cSd}k9J z1+DKS4Vhg;*J_a0P&`Gl2XJv;lv)8K3uFN#49)DI>$}AGH73xnRj) zGOoHf_d(&|sG;qZ$o*+)*zp^yfPoJQ_*q`1UM@^W2Vc*D+%er(+ZtLbJU zOWIpnQb-OTa&hZQ%A z0*su;9PZDhNU|L4gsr2D%=bO*jiI-?)5G7)akR1gL&VMb*Yyb5U}h;B%5ig_iz7Fzq)NDU(#`6j+?;q@CNEz(7$I+OVEdA%WL-NzAU79~?f#^$|kaPUlZMc2#OM zGJ#+VLvXUkenqDvv3nV2U}?EP_j668?*9OG^!hk5ik^-hb*x@XL!9@1CppIDa-y;~ z66U|bWu$b(BHHE6LsH#o2|Zc|)ad zIk<0?+t4>>xT{w?S_`~ngyoR4M^Cu{J!JCA-W*P28HnEzE_Jp5619VGXDOxJo0b=3 zTn5w`t^v-o@d0w{fG*?cINQVSORSPm=-=Zpx%P7pLFgN;kV44kd01_XfYZjP%Z11tQlhRcKB?Hb%#59bKHH-KXecdh-{{)IyMpym zs$O=M?l(J0xl)vS8ltHWC%~j>Uu>?lg0s}nzR*TL2t~2z3OCv){M1t)xj5C0wHFk* zq3Y@{RPLa<96}DNU(LU%GTZ7B*?Uu2DhZPIrm|kuRkG$1dSm=+yw;D*_znE?%OR`JExaV^n)O|_BrZq=7>ll|D<&uVxpuH%ESHXPj`}hDS>{+Qerg`mM z(M9WV9gK=slhJ0{{{S_KU5V~JB^$jdo~dMQQkqETHAXj-6t0xsa7%AkO|&Ckd!1Hq zx;A~OIwU+!^1$0=7LZ(f43ZyRmbI5+t*HDsS>Xcz0BYBFE_=hOybB1asOiUGb6oNj zUtA^=mMacelu+^(C3L7M^u1a(`zqA1ATCv}9$s19g>yTPXIFRW0*F zWFMmI{FYs)(vDvmKRy`;&keq5pEyE&W6#3w?9jJ2RY?^)K-P1Cg6jXsi|nT?8J z3_>f%D9nOOonyVk^sac?+_#r&PfSN?{>|se+MgDiaXseQd5W57>ATN6aN!b3kNd`c zwAz{$V>x7u?*@;l%c;Rx6%5V|4H99(ZH3u+S>#ryr<2Quc6Gw$qsOhQeeA^JmTNy) zliu8<(`sU3XNS!nNrYpl*}CJeC9i;c8rNavG`w`-1dRGK8xi7(=D@Uzo#7eYCPNK> z0wy(rC?k%|7V>U=>iDzwBPa^(29Hyv_-#kxg*>b>ABB5aFmS&P*zUv58R8ohUvxXM z7j$W-rV>dn8c_2jjc9gFum=WRIfd$~pG-t84v(7{2Hhz#)59bu4V|6x-H;z*tbDjG zK?O^Lh7)j&qLewHov3euV0KBdMq_vzl;aYt@`QwWl)wuw!1LyTa?#(|M2mprk=jTJyg%Pv$$;61ImLu?v%0?9lO@iI~b#Vxf}7 z8BZp~8ynG-(qxcGGIiW{auu)VCr0$*Q!p+E*o197$~HIVii%BzJc&rQR74YY7i zd`@ax+)Be_Rn91|aqhCLlIs!`k#v+KNs`oaVRhDFps##9X42F;FWxpc7JVQrv9O>d z;?`a=4)7ep)}bri!;>AuoBnK~yFwa~p}6 z+}}l1Pk3W(n24OcR+BG?9&ts~KJ@QWT6cA{#?i+3Y9^7A4kJ&C-}iItM8lxdFdT8m zQaU7Vehl(ml-v1YbF0nVzBi|{$vxN28KyhMJFR-CsOX??H%^VG(rXx;&I^UDhqGdQ z3ftdf&BsqYNWl{eiCTxcdV`4a6N_?=*EQWoMB%b6Ir2s$W;bx8 z%#1C{K+*vuoSZFYoY$fs6wHH}G3Y)o=gtcf)d+|awb68wZW#OHFMW~637MPDW#uY+ z$0jDi*7G5(k{Y=K*y!1IwbEhCIn9Vk&T$PHZOwV8yVzr*nkt92^slFem^w1>=gh|m z4Xw_!k>cjs=MGo0`X-^>zVGVM5j_n%v0KJQIB&_=xlxwfUv|#L7ghCpq(30nzMK}h zw9PGyvCIz*dtMvQA+9aMCGIa`E{-L5)4Rn#O|0)bc(f43S5+jX<%}^$fUvYMVZg z16h=ZtfTv*sxwWk5#W*Rd5%tI&TH^Q-@Nx?^IGN2v+-f*`9Jjb{T}+NeLjGmEX)PF zKsPE6UGJxntrX6%*CrWV1?TbZmAu$BW4a4bN#L zq{$;BX|`)Robn1;4lK?T4w+KMvOy!B=e_qO#_IfyoNcSb-imH_GDeu+eZ>nrO`OOb z(2K2=qA*YsRItY<_HSHOiH+!or#IN#*vz2okj~L(uuX{8TwRi34@$Z+ftQ=U|5DdX<8-hi*W!MJQ5i_EV{3hWMK#e4%YJS_ zCDjjWbG46Px*A%TnXufEozBjjkY?6D3puXN#R(llT=!x(AbjQ@sl3A}SqlP_kqdSrya`Z#m`2M+;_(s&>>2?{dR&w%tjn zmiYubi^^;Eu2?8(f?U}*rMZV{CR~X7jzn3s~osxT*6qz~(j?j@&r_-KuOPGM= zpZI;~4)Q!RMXCpQ4IO3S7HP7(B`+O?Liff)4FoY=*OGRe`WOj6G82yT1u%@IcEuw~ z?TRjx>{2wRV$u;luTMz+V|s!vHz@jNfCtM}!%G!!+_Y@6NzcnH)jt6Rvsek4r z^UA1^x72GSOk>cEkvWnl64J?SA^Avpk=#WYq(8;U){bkg71vij_gTV4Ws?j~^GZ;1 z%|xCeUopz^n#aumOrd9pmRthnBtt}&j(@9vqX8dD!6zR$rs*HiS3j~i{$!+iWgDK1 z=c-L-C@GC8svaGoOzDlZZasw*)UTp8zD_k%zlvk~L8;x(@s(QSzwd32!4yHLW6zzR zics&;aYXkk`Wl&keP2}Xx*dciKNs{V(JxU@%4nsYOqmlMBhK*Fi=4OQqG0dGcPfB$ zYOsT2&tt~X)QRsz+z#*Ra$et+lwY;O58A$eO9NX3V2>rfpsk&y^cc1M8f###r#IPm zhW^!xC6y3K?BryF@VTedO8K<0GofTOvo<-*JuPt_q|?dtiCGMXoYpvWIiUNlktUx+ z{jf*Y1{3cYuL8|SR;RxTx~ZEsO5z2(9dMl&dppff$*(RS;zrjWdQ$=}_TR%`9EZ;r zMhRX$`R1>}PB|5lfzbA{Pgo1c$MuD;N9|&yo3{g+Plp*!A6YXv2F<78=`lnWJ@RjDj{f!9AfjdCj4YK`faQ$Ry zy^}7lAw%;`?izcSh>E3ZmrAy}m*AJ7>{{X|Zs2KK!)WL|}?}_9$!WcpKq2TZK zmnE$uhf$5+?Sw(^eH4uGbq3@c0~ySLdt71}rIX1nq|@#&S4I!w7QepMT^fD!Wke4h zj~Dk`tWxXltc-k3GX?jCgMWBL{4SeN+)k&5K8ApO;>ks`=zOKfX{2V`d)COc`jXC> zMm?FDFYM-(o~KzY^fR@Gg{W3H&N0Y8O_%wn0&a%K%vr>(7erLh^%1VW^RaoK z?I}D;A6tFvS>}DIPjjoWxiaYxy5J_HaBOZxQpsWvA=X4wKQsh`3TqsOYttC?2@@Qg z~Pt>bFrlPB;9WcWrgWlGVjY?-Fz1CB`$IvM? zX(}};;fjij4Vj&)CpWZa-b-!H>ADh|xgG1Tm60<_s*f9h;!PW(jez-1bAZTo74G8F zX|xRzn@wZ5jet1H@C!teI@p{=&q8=OiR_Kmb?JNe~FUr$dhHdO?HNhpL>zBci2`lf6SLeZCZBgh$#01Sv;3T_p3>@A5@PZL?}p>Dd-o!rOGEhVV+uCp#RP z3trxoM4s$suYdJl4rEm}FGNqaXE4@h%?A3J-($)2h!WG8QCQhfQY z_l1}wW10-{%K_dW6Ug_lI3ByE@u>Gm(mHMj$dG+1QoDtE9FfYNPBHo5BGNaqSpUCk?w#0OO{S)X&?6 zpgI@%(GhP8Y*t$}Tn5!~bANkJ<6ECgGLgN!(g#Xx6>!uG#Nc{H$2YOfIfLL$0J#Jl z%}G%ik||pwK+U9F9h*?f1Mu9g zUp3kIcHgQFyOvd>(lyG+z9xp2rm8x5J)Jw^XFdK~?s%NWW5I)z=PuR@ysuH*4L{>F zn(-ftlZ<#$d2HrxSophs=JZB!tE!Dmr-07)*9Yk}zpWa6i zV(zVYL`JVGJgqX6tctF!f;i*f?H|l$9|=Os@LF4cGbqK-%-5T&`peZJ_-N{BApFKY z9C5whsU&y$8~DvGJO1+RkyOr438{5Tol5PF2^mbuIB~d)&uZTWYi>|gJ@cuk!060z zGoWOHpI_DMNso?N&b5`P-$v2DZ)zzMgw_PF*-&@*qNKI>)`+l%ZbXsZ(ncj z?vOjZ-HkU(ZXe4)1Kn#GC2$5=9htkH?8^Ap)W>%!wO;vwCr`$Nc`CmYfEA_ zb-|Fx;A0&vzUB6*s>~9RE8O_H2yNNg3tBuNttjWdWQk>+ufiP9O^38B8jd3{7CRe# z7jmz+k~d}7wPv>vE_XK%Hwgn3v*{Uj>_bg250=*uJfJM#4$lTx&n2c=OiWIPI#)H# zz#QR$q#N(wbh+vzZ!Ggj@Nd13NJ=xCMp!Y&yfxTKzWgrVo>s_|vsCutX=8u`6EOng zi*?9ttWa>=(@Vm}!s6b6&dYhR+BaPa+;WK>XBLYF;<#}n=WXJ{8SFb=>hyo=h`%@!APog*8Ew-b4*)Wth$DY%T-=RXPaTtZt@iCyD3 zDx~HRbuKyC5mPIAP<1XQGFqMI?K*>*#FrsvDU!T|G?Et;g{_HiNM+fqoI!E#a9QHH zMpcI%_jD|Ym^o41Z;IXFaF|?!a}_hZj3rY95^`QgSX^`<@=8;gD@~eGEF17IzcxU- z@O2#_4&hbl6>U}NrvOL8$j(qnm7e8?M_K#Up{k90dCZzOA4d>xxdED>#$2Wv~#it#2e;{1Dn-?4cb`sY^a6VH5}A%F1 zSA6V}-KlPA45zA3hm;h(EAUau@9^BBDY-#P$nFlCH}+*#+HayQkHF@B7CGNVgrfVC zhxipg;^Im4DVqtj=XF}1QeTk()>R8`xp4P7;S~payUk0Gs$<*HEDbmC?e{Lr zPkqS}Q|?9}`N~Q^tUYOh(o@n%dO&+VS0O$xthWJ%eJ)ed=%$X|2Dk#CT}*c@ItLBN z#13-wnDQM#P*wx73gWOOb30j{d^^-er7se&bhU#0gqxzDGoLBBx{j!DHIosaI@js| zB&vz0IVR#mz)nf%fbJrl1MC9LJr!kJn%F6-CN9~-iSNHvn{S$yqV5lTYMI23NUCIh zl+ED$q#@qmobAl%6F;!!5v;_1;o^C)PHWeo)4r;;dWhO(uc3tHiGaFE8GX)Z0>`rG z=su}xKb@+?*8T__U-Nlytv_RP(kJ!Ae{_4>ajm2D?(&UtbaF3!L-l9figpe+#Z|j` ze3%=5HJKx+JFlTo9}}k0Kl@foJ4)I3I1N(>D}{~n=|ulQaE&mk`K8# zqGs}$3WLTN4@@*rQ+@8qv)iR#b?cwl)#k(0yv_<0JVGm`NcF*hg4VwVGzB zs?@eRDk`dy2lAqOhJT0*JC61Bx~fLh$AmT`&?JenGr|pzxP+@?r!eqcLnB*&2DET( z(#HdZ!!3oNgj>cH-+cc7MEa}N>pW(mBpPD@BLs(uQ0}HMmX6#;{fc&*>Z`jjA0Jg+ zQtkG8A#Z`9hTXZTchSvGak6%~-KN*;+Ni0i89Ac;K2KfI72)?7XzyOWF-AYvVj)zWbpoyRXU>eIYpn0HVysvn>+g}qZ zA00JFriQ`N00HM@1DpoeSm9^{oa~XI!Kv$RWo2kOeFI!e-7!BAPzRG6EZ4WS$L6x# z@@dPBV>ond{ux=XyM4)wX%Ro`{hotY?>v;+X{ceq={=pY{{TBzk*L?}CwVeR?E6t& z=ha(TFk=skk1oror*Mo|_pJ9fT^luxfSD2RLvK2d%LHwaYJWt^4*z7vHB$=Fh?6( zZT4PfZtzZW#D<$%am=`_J<0kr_BW?lpVD#to~nIvoxIkutsN>SRoBa|8JB8;pISbV zLDXv8P30@@Pi;oui(Xl`d;{+dAI8*8AmR>I&fcQD?N@a~mvC+pIWD&=u=cM>HD){K z)`l~~y-y~--6N;B>Wk57DySrMP6H0(eX3=I*&`h|b?&w*W9>xdPPndnz<_SLCM-ko zpC!Z%l2`f>vCeVu0Ewf}HM+(=Szr`#hT>tXk+8Z@HsE3B7W6w|+=|4^#3y%pv00g& z9;52ZM4C$HYSkl!69C|~z~hjc+1soKhU?nC8b?jVU@^bbXEdI?vtKZ!b#|Q`ZTL-P z?ih*TRKXk#8?$qc-1c8$GLK)>9VQM2r5~MBODF#TF{`h~w+iujzD(PG`I&tRdWhf{ zjID?9yaLY%Al)V%Zd>4<4rW*?p*ZQ}z4=yXbbc0<<>If@W_B3sGv3!-kMIXHVjqZH zcBtygKJjSty1zvd)G@>wy*4*uY@IT~(~}>xy9lx9Rqp+a{hzh;Ta`|0$irW(OB8Z& zYGcIQSR{`N+t}vfHa&^VuGM^|Og`uS@_zXCUWR&CVZ0K$2W`j)F!{}JHyu_zsWh6K z-z%vqB4egtE-!`Q@j0Vm!ff^v&~i4&l@Si3Jg&moF5$Eb7Y?u;l3t}lQK`n#A2bya z98gt6Fv|14~^5 z&7Lufj~(4hJAF1j{LA@KyOX&c99n?#ze!g=w7rp9{PNY&Nf>73;;Z4BC%*fJ1?(o{ z7;SviZAP6#QV5pZPMc;IKb22%*>!kZ)!O-I*VW`CXA*opE7g5l3V(sF;)mjF_G6g$ zG0nux>{@#U)Un<8t&^dBx0vlGg~8Z;%m~dHqe6mKx*TyA0!%F!quZOAd%3+hZbzb2 z_?uSS-ie>Jx1YVYyW(E4J?9up%{^Qq@JTz^Gjnl9_gnCZM>zIl3eoDZ)We8P9NtWg zG2n9^HnK?EZr264rGi-6_7LW{To8t;(dp|XMQM!l!01g%=O*gz{xBgW{9w_&0k~v!8zirpiM6GsLelI9FwzoxyC8xkm zkhQ|ToSF3J=f0X!>0x(JF2)YtONIBNAszK&+$xUXXq8b_&relbRZ)&v;B1XyiJr-T zN-+#Lo^5w++NCu*iABo}TOQkF;o6%~s8q*G3#Nwyp4Rai$8iKwJIKBk+;kNaTd7Lc z$XQQOQDAH%ibrC%Q%Y8z$4zoBlD1bz!Y(8?Hu=3C1&Wg#rPsb~Xtp8-@y-KpW@;Bl z)-_s+f+Wzjs&HCIK89AYkKPN&0m0c=x1%*6MzZ5}=~b==e=)x&5PnK|6w@l!NYKlR zVs;xc=Ly^iuo?DUEz^MaHO{skRrU^peuKEKxw^pR=@r0qfY)2-lsl^##^B1yAe7$B zk&bJPx-K(4gJQ39DfX~y@s~6VS8&A8as+hV>v2NM-PdcnNDgnf?f0S0OfjBuEO6ZB zxF1sI@fglROiF$4u<5Z;vByrw zyGrd>2T3M*B+BbBmv3BkLlrzZTh4WYoT@%q2=}$MESD2}Sag6plZk-15_-o%nS)X^ znoSinF}g{3413NX>p9~auHlvLPD(BC(MK*Bcx;L7k|#?np@F!gb?gq(GRz!`S9X6h zQ!OiMMO$gZLuZKMYk}nb;4*e%>=?dzE?WPsloE@N0NRXmfjwl|J> zJ_47(rrOpy=e5GiuP#|Rf5AMIymx)Ybb5@T@wzBv_FUjZn0ZbX&c@?t3JNY*Ui)3p zsWr$;B)cRR!6@3>?aN^Ycq_wI(oLsU%S|Db2Q^U~=w>qRlWlFA>@~`V^<}i_8FXf%q6+s6Qtp}m_ zgQavxt|u$>dOk*xMBng(zMF|jVj{DdPfZu2O9=e{hi4_dNngCOz=~a}MzP=M*M1_# z`^t}_-PVoNoq~=g{{V{L(HB>>(gl5pQqx8@HW;Q2WxFXKjEzp6Jxv{b`*eZzGlj8?x;tw9BP{z@j zLRZ!NT#W&^K*3-nodKs0F>LGFWDbdu_Q7G9ysjH4I_pIXq6sj-OhNd058r zvBf!pgoESW65SXnG1ob*v&F)2nmI+mN>o{hbR|9=sHKO;i6EGVMAheRD75O=k4u2? z&!p=GDHqkBdaEL75z@fd-<>T(IlcyCE9t2Qg^hM79AI(U8=DW@$a|9ZDfDgpt;o`F z@N_>?RZh9B)!|@2^;n-F`cgVE@!o&QdfT*6zIs z3>qB;JDN7j7?3-(Nc>k*3!nllKqH{5;ZNy;_jf<8%tpxIXb{5`O}0WNv^L$CKwZZx zj>VHt6HVjA5xx9di$`t7#fQ{XLQf`kjjga`gR49)`6n}htCciA65$|opqadhnaUlG zzE5xAa^zyYmu$?vtOSud?00Enxy?PC?zPx;S~`}OM)FHsTJ~6nM&abWroT5LhX7Z4D37Mq2GTOGR;cA)7g%c>|nuGc_Z1T_OIw6xm` zt;u-bwJvZ~gsq#>J?e!Sr|OXEQ1K$f(mAh<;?Iral*bN`QB30Un(Ou@$yhph>L6|) zILzMMxSw{wV(K2usU$RCETj%QRr!z^TxG$PsbJ}^iafZ8w4 zMp2pZW0Olfcz5zBDdI#>v?X>i!>-3Y&hk=tBb*jhpN6~UuI z_X9Q=Ewi%z6s;MBQgJ!&{y0QG{aMxNp^?$d=H&LYO7X*Z+8r4-YsSanwa-#sp{D88 zts)A^>3S~|Qn$h}M|Cw1cC?yvF9nZ`OCzOwapjHNrwlkizSEZ5m9wDfgFG+(@!zdy z?Ct*m`P2Lw`}>jirSq+CLDi_940W`1tdN$*{BMMp?Q0&%BRC7}>^Ypk8`Mn&2YWRw zL6lJGRShn^=8^1E9~g&1v)xfC?af8 zK@@CnC83~O{t9c#*u;l0J!r#XKp|6{{UBmKUH1lnFF*XCm8tzz>NRlciJsRnxYodF^-CPDW&1P>>^0*n`{R(f(SQ2wkn-Y zLJY5P{7!SN;3Y)u*VDQG06)u~tI|JeNBmwclet>vuE^w}tJCUPCLjTw(7mp2DUM{Z zwaswfaF~PJ0FEClsnfN(Nj!Fys4=_UfsXk$Lbxg< zfP*X*Fpb7&ur&5w<$ZOUF*&#pI@PMvs0=QSSQ#sN90#m|kK>Q$ZhuqfI#+n0je;=V z8;;k-7<-RpvQqTY((5%aQd3V=QCm@R=Rb`TV2&^gq(g~;~ zB(af1=em*k4V#`%4r{M|fLmmHUt)K6 z^K9o-bkqWoj5O_W88S%R;&_d@Ib5E27e9#$OG!rgv9eB8$YCL_9bx1V;Rnb@CB$DWq~bvz`bflUkA86z6@{w(d(MTqbu%kh*C#2GyCV!sv< zj&@(tji&Ac(qnJ@#?bxi%)gU?2GNGB=!7D?%4hgVi zdNC8&o&iv56HRUnNRXc;#I@tCjse5M6F-P*EjBoQiT!Ie51=g#u3?C8ZB#{oc8A%_ z>a)`$(&-%BRJ9Yt4$cKhG57`Cl)>eRCv?)XX=&JHVY_8#5-A7YVGcb+k=UOevrx>i zU$o|bWigZ!3u3E#?Y+^&UEw%$TKE|^O4H}W!ai6~6`I_~CjimZn%zaN+T<+3urtQf zzTTV+;tsOFae>odY`YsFdv6otc>E_n2-*|Ofq%ppg{ju!hx+PC9S^4qJ|-Wc2Nk|{ z72Xl(T5NXxNKPB4siD-OFVic-nsYJY6EU3}-fP+KOrxx=rgR5KNHuY42z=SJWhHVZfrWD8{PK89~A2D3`sgs_1f zYXl$WOw?#EjJ3E76_XVdBj@FhWHzD(0Xuzed}+XFBLF>vRqg%-_X? zKHGCl{voE)xEU&DrD?sHQxuW2-y~#>yD@>-u^oJZ#_H8oNmCaOj_CTD$Aj4By{-gy zDfbb;Z^hG)401K#jGWR4D~_lx$dhiuTk`}jH5E*q-j(jFrkv70H%UTE>0xm>NhKq( z%L{hmFuly$aD^*LtZDVJHcDCIo->$yZ)<_~8~f4BaBiYaW}5O+OzOsOrk+P}Na%`l zeD>M0f;&R(F1H;~G4UmaX7Z`$4vJ>=wblo%q>k0GRT$p6JlcIrFi#vZ+mpm#b`#Rm zv>wnwMWgm|oAriMDtn{6^okA>5gJ zm!`hurmB_xOQD_CA9dy6sQJ6MscF*U6;*nSV{0HVM^Fb;pfT>~0l@XCSJlkZ`IgH8 zJKPD3^+?Q2_mEd@Bl8c-alO*A@{Xs2Or5TCDyw8IBNHZTW08lU%q$KU?eHrtJrr#& z#Zr3#XZ;j(+GA?m*T?{ooQavumAy|Hk=Sk!^?F@@46%(BJMjkyWw_W$v5nuNqX${a zE|k^PKB9QJt`fQ?aT>>Pn%^aYM!10M^Q?0@m9wsQhfN0yDduZ;7BV7!^FSY>g{Y6b z&2yp@*0$V5@q3A81K@hpS~q#80~~R-{WGxo$ywSj_gmKcQLOFH?nN1kpn$VTm?M1`aJy6+7=K``sO zY*D(`hsRBx^PU~OK`d_{y|L*v{{Y7#W~%_?P&JyIkOxQ}2Vu(S`j4v(W~HP%UbZc+ zhLXddZl>3HUZtkj z2O9@`g46z~HCWqtJI6DRp)D*$r|8Jlm;GFKrU26&eI(bt#msLXbhzIAX!bXLvHt*n zqQ8~3{{VS+;+@jF@ki46B%bPP(6pK4nmcWG!p$sS-cp(VZ5^t|qiO}*HrHuRcSMz3 z2|O{+8y$X-m$kZI%Gv&-z9jlzB}GB>mWOtGz|o6?9K)t5V{koO^`W3^o9LwaQU3tb zM+^NA-`W1Ac*CV$DCtS4rmmzmnwEIU*b5GQL|i1AfZkQudK}jS>Pr^>nSnE3Nr@N5 zXkK%^x&=jRo2gO9#K6omvA)oOvF&cxTRYn|E%EfNlMCb*=oz1K*wiTT0N2O~+06d{ zimK)(Nh@RO@Nh|xQaW6}m}gWgtgnSG~V6> z<8LXOAR?6jB)XE+sDYAyglj`Eb65_}ZsiQgbF+nr+B#BI1P3i*DJ_v}h!Rp>h2NHM zSxv@P-$rGq+K=(lr-&kG!>r1U9EHif?O`K|~RWX@?O5c_0q5QaFkFXO7fq z8EfL6FWU;Lt7!E)k@4hiYePqXTkJ>~Gz7}`gHbKIN>C`6%(0&0sCM{KI?KFV4eWdW z07l-jhN0WxP1YUbB5(L-=H`WU>7UrASVz6QTD=}GPf=_C0J~{ko{t=ZQhkT_bWI{w=Q#r)dK$jtX(D=+Nw$5s$r*@@VZ7b8ys7|aUDe! zPv0)?)#TdP*m@Q6A1%BpqeHtwvRMUs{W8F@N@FAE|qzmfMAgSnMd|IKFc(n^zgMkV?+B zkP?k3Y0NLAxs7nK0FHtILsaRyeB|*dV1H|v2~XTEFP=THHD?4*&8}Y7tGXHuNB&sG ze*@g#w5aNjtNIuoz4^I&(a7sWZo9g8wPCvun_+hOb4gAz&6 zx3y`me$$XZ;|)(eRX7Hj*uXx*LvZoX*PFSg-a8hlHL7aJ%cIH7IAmeT@w?G*IfAfw zavUVv>_S!OF8G$$(M%gLu$hi0hfu-kTVvbgzje?!kCL|*%#tK=d0A~cx8h^H7oSns zM`%rshM~uGl=qX-AF5MUGDhkeeZC=p31T@iswVjdq2p^GyJqytNzSJWXNM7K+BP|k z9HuyKb52ARCfWy%Q5IPK6v(ABQpxc0xO42#W0d+*GpF%3&h17dat#)pQvL2kMWYR} zKE*NBbyULD5C;wN)8czaoX2z9;JVu?gu@hW4{u0D2O>s~K~GTXmOb%G@C1$)Eq%AS zgjli0N$!=Nc$xia~sTZ#_wwsgKK1M&5uFYMqxekO8uKR4a2Dn z{_oS8kylPlHBQG!!HKgs(V8;g9tP~e8;UCxRBgjGCp3RL1Bt#CI}6+3#Fz=F(8H-j zVyJERvF*x8ow@P2X4j3xD3;GxO;X)Hwa=DJAk$Rfz}l92F6R&@XO_WyTUxyZJENJ! z&2wG`+Z)@jp>|q0YTbrdnDM(IcAk)T9?_aG0qL4lvphaQg{n@nou-Y@w6&KOxmk{m zW&p^lrDJ9q>kJ~9u6sFAZU zF|Tvl{5W6D=CA{1kz=;#>RmRQN~tYrbxTPky@Ku?QQVW%+n&VTR%*&wT^r>IYmOwi&{{Y2V zE42y<2(5t282N^KZ|JWwKdB z0ETJLXmlocdL0c^?e|_nK1h0s9deze)Ow35ZT!0UfpxOTO#?%(&|JqADs=i@A#-AE zWbJbX1Nt^xbtCW1wYy3<9#ErRl8jqb)2pd4^5%vYR?{?W#9sg}Zh0J0gk=_?rs`}G z(A7gw)0o2UaPy;ep5CW4BYqM4!%u1!ySsWerJY?262JGBER;hN8f_(_po*%srCI5qX=bm0G}n~vBRu53nPec zgB7`BAEA8L1smL(B-Bz4WR&s`?HE;TFK@FlLDGjuRoN+i?6)mvmXza~gVXGeeH>(#Gb;T@^;2 z@@7^MG};0ZxsYgIF=TjsF0P6XGYWwv*PHU2IHX%>$@udR1fbIw2I} zWmH7NRL-ecnH_fs`5}|jh?;-`O{CU5NGOb)mAmPiWW*YG(5IL z*f9dqdfd_4n}7sYdV1 z>T*Bu)-pMV({2fU<;YrHdXl22;h6DHQaM!ew}Vm++?YTvABly-wHhiXKg@r;rg@hm z8q{3bTR|^tW|}{drhlz(i(WZYkrC3$mb=}KIR187hm$-ccl}Z6TR`4 zk~<0&J;cW}Gc-OKW0i+6=-$_ldr1p3!3|7hlSLe@K1_^cgWUFzggREIN-sfMhtZCv znktC?^WqTA)y^uur@XMdHQG%zQ!Sj7)qErU)l6+S><_T%D8wfYJG**Wb~uGa8_w5J zPWoGpj4}ATb+D~BO{sixJz#DaEA-S|z5PonmlIj-qzfGF+u3`(*MQI@~LNZy3s31JRJw`m$YFNpG0tO$QDcEobsF_{l~){fa=NO9pzy^* z@MEWt@)<3ILzwf;4z=2B4eqCx?5aeHf{#SfBzuOF6$rnzjy6uweZ|alj{~H*1Z)!N z;jJtXxEtB=78V|-Ga{pP1RD7x7{gUoB7{*mVO@C=&Wm#K^mg}Y^F7ISksg*jJ$ zwdzc>-Ll{AfyYqsT|PeO~1yABt557=>Z>^DrFvN+>!Kjx|}*)tRM8p9B5obu-r zh+WqW00D)!*xavfl2j#rp^D|sI0jkfbMi+@PNUuo7-EYP-0VTb9M~}4;Ae{aGT4EO zi!_``1{V1(zR&ohkz#(S8fH6&9!0SMVd%HX7CQ~I@Tk7<>E(yxA#Q)pXy5q2^`pg6 zYiyHKQ;ss-x5(WXI?UE}LB%+AvrG~Tpqn9OM6vE`+y{WCwcx3ixlPkQ#7E|Y(DXD9 z@eK0<+hj@+I-FjG)Su$mm}DgjsXr$5l*DTU<0$^@jxM=E3CK%~^)MHGiYpmX-OC0| zafFMA;hp_l!Y}hx8miqX>X%}b{&M419Pb!<*4woi?#6W)LNf~M4#ii7`y+46mD&Mv z5_LzPjGF%dgbx7!0E*M@N_|<;8;`tj{{Vxgz2DwG%63O&R5y29{+!gE`>I-Kz4s+a z)=9#rnf{9;ce%Gty(MSOobEMkbk6=|Oz!^xoGUU$$8K1pIHJY35p%wgx@ijINM(IV z5b(|@-1yzn#}*%ZY>10l*Qp1H)M`iJbFqW;fmkapO9L{HrtjZ*sql#%OHk42$A!o5 zFpp9;AFSD%l~$PNekdgbAG+q_fh;myGzMs$<;4V{F&WA@7%Yscm@#zICtf zvUM+giz+0RqAGza+#K@43qb>L%Gc~FB>29DWtbfJZ~9F6S<42g-dqv^_)A=U*J)mN zPVe@KD?@53UmN;mV~5Vx^(k6yZ%M4T0=kvt`8-w!d#`k4NS9U9hi2z9&tu#3wZ3a6 zXeGWGV>@;|y&V_2;dp+0(mN08$tq}}lpIWqVudoc4dy9s*W=EXF@D z3`5dyIfKKrec*waJu#|vQ{uv(>%@KSO9#M?pYrN*$2|c#D9&KWrg23|A=>l##t!&hLd&gf(Q}DHq5;9}? zN|v(=UhmwRYgSc`t zIOQCsAGN2-X~?3|RW7Gks`5c^g$FHtbjHuQYqCY@F*3C9stY5Tp3*(;{Bh#tM(xMI zb5e}c=~y`}cY#h9my)6Q%_6O|PIHY6?>`GGxiqginrD-^o0yKvb9XOAM@>5jFF3jg zHV4C+E=qb}(l|^Y?FFqP+(njIPQcc+j&vcn!^0)NAZ&agJ_(y1*5s*sM%1ghqRxAy zJvOoV6uDupE)J)tYX_kuO^|24;mkh!r!#_y%Wr}Qzlhf1f2Q1>`j3PyvC_uom+%WO zYn!#hw;=2Mkc^*MX)LIuh1ULcG=lCMuN=5YJw&Xv?4C>}SGDK)Q!&v$-0Y8ANleGS zW;j~pc8g!;3Kp!^v4zl1UN^-pjv@G5(9+Y;PF*UrvH`OFi#%Cob+2Zf!z2VTnp&BO z!seT}Ij+ggBXJ1EcXUZn9eq5%4NzFoD7E6KYj1~)8bK}>Hh4F-+pcPKCMiBHow%fE zv*X#uZrfjT+iqp@I|)bWb&ys<*TVcj3?vbS?L7`@&At^T)g$U6v~@8N_}Fo<_&~dS zArX!*-bUWHf5CGNy(P!Q$BuIv29)V-;8IJ;1R#Elc*wgB7uaERjeE>V5|f$F;#B znqPS+nd0?0E7~t+)jiJZhb3|djV=#6Rm_87==*L1_MnOAHfLny=Y3;f01GDihVa7h~^Z$pLu0POH1h|6o132|xCL~k8L!av-|?kfQ3VShcNvk?nb0rAF5 zK=%*Si9F@P)~;GVNe10ThHeDT;K+Fmeq3IR4jj^d4e@p+o2gOw+BrG)q|W#}xLE%H zx*hf{F}%XEaCV_Nko;XDq0XmtG}yAdazF&021yuqxGFU22Io%NnsdM5r3C>ggCK`lRok(YsHI(TWu7+o-K=DtHMx{LYV@=!A@2I?@}wz0D_bajz4 z7x&PHoAh|Ie#D7&YN%g^r%0)iGqy@z6=YWP0c_uQgi18$w>52K49Y38cT%FDJ}G0< z>Pa~8NiLr~{{X~mxM9O(foUCzTAjEUEjg$go?jf7`_X_3O-&^=YdCc<#XNi4BO3g> zF#}`UWCXp^((U;pQKdh5p_S0F!=S~SWOfinWdSj@2~zD^5KItL*0g8BG8%b%Z0BQ* z$Dk0w+M}h5zI0Pwny#BwhFasM7#IGQh!G3%W~8 zCmn}tM*f6>)V=1@gTytF&!$VWhglJcr0)!nVvYMo!@V9V;BW<`I2^$n3Xg}pdL?@> zK6r3Dl^sMbYrHI;AVTalo!_N-bW>^60H8o$zxUo=*qQTdSP%z%R4x8k_bfiPA3%G8 z)NiJlqDILB!PsMaUeokf9{P6qEkFTXrRiFWi{zk)$ac>ch^XrmL|Xgo%->=bdR=mW zhftGW4k3z`vOp&dwtO0vv8{0%aLhK_o~4BFje^}lcj9B|LZWhxW}!|&6USFX@N7j* zG|{vl?Hbpxj?hy$YZfb`1Kl`bF=p;!|-km?6PgPH<#5qvZut8OC!!&N;Jnc4gEjQk{&iqot zCks-Fr%9WlsyjmMnX;lhP`Dk}a6O8_)tY4`d}11%KBlHOZ3dRdt*>>Q)6`O;)2gUR zG_z~bL^&~4Q$?u6(a=eM8wm5ZBb}|mI|G|6PHEU%BMA8{e-)?w>wV}2!>K)w;nq}9 zM0ati>nO78nU7#6aK?^_@k+qp`gt02Yod5?Be3)&($_;*lM5<3SjVGV>y&21D>gPnytEFI}{aM<>X6eMd)S~<9W@WuA5x@k8U2V6w@o)k4jCZkHw?K-vb zJ(uwnH>st2O}Vy_+(`CqjpMWwx(@aORy6HWnfi5ey^~yT1yJ7Vd3i3~%p^at8HGY# zeMeD=r-$$_K1*KtBE|YHx6KaD_EPa%NvKoHLs2GCVl9%MpyiZPFyY0&1MoiHO2pC^ zxSQmG)hM9Usbr;`6332HP8YG`lOr9qu3^j(oz(Ie$ocA`bIuas)S-2X85=RwbigC3 zZgR!4I_SpOW!;M2&RwlNAtyo9>MH3Zill>CMKh@Cjk9~>0|dkH%N?8%+(!|$MXam@ zrqp4thB}yo+SwW%XA3>2%45fp_B!ozcM!8%92K;)Q%nn)GaSbbi|-u-9KjpV@%~R) zsDmcOTEkZewTB&z0(Rtb#N0A$+lf4zspNGBrB>8UMk1ZwK9$d=pQOGsY-fLrW*6M_ zqHz}j9nNVMAnZU~Q)|*jGHrD}HjUhBdk3Au?v2gsz>5zd9J`)EXp@+!>L?sbou|(F zc9VOU4?YdK z3%$bW9S|;_^|PPxUF$@}F2xlL*xu**PA>P9bW&szmcEZe6uj1T1->9cGG1uv39Nm> zP>Iyyj5>ATN%fIPZVX=k+U| zgvOyM>%JFJ)J`J6>Ens7xdRg;$)??xvN^ikH4#ZkQRN3l=Do$Ok**fW-~v6tLAs{W z%;)F@Z&jG_THsX(dTMdS3hPly}uuS%`=fu1pWKMW5 zYq^AG&~jB>`SnG%U_D=TYhY<~Z)D(Z9LFCtIN8IfsLA4dFD!dp*8`X$%5|ExL2KC{ zX>&;8Fbgt3f0)mgQ3N5Y%{@~?+;KKbaOJh!!gfa?%Krd_?N#qaO4}HW;R^MXMO`D{ zcS}zt!(shLq#^BPrDi4>Oig8%EV$&f?W+z6;AMvdr$pCoO>)XGGDGRx(3I+S^dg

v5d2v)Kk!Tm((*c?UXNqYL8fJx*1W1JW zETK1gJwm2cUK)vUa>i~pfiJQ)3B9XI!Tm1Zr!%3UxKqCys+Tl~o2r~fQNcBe@d+qA z>eK?u%Yq5zWWDq3ImcDp>27~a)6(ABoxNjxQ-8m|UK?&F^y4HR(ScGi6BZc!_Jg(m z3H_8WH8`Vo*q{BUJ3|c7`G&64{zYAjL!u~-Rmwx1F*@XiMz_3$Z6Yc&0+a5-Lc}4SGoP79|TqWEcTjT=eW0v+6`f^AIwcLfJvvk|#egq7Uk?8DzW7-1K+x8DSLtnlJt*Itq!%!d-qbuiXWd!;rvRv{+_j=HVFUjV^c$e@drt}YhK zdGf~qL`U@r`Fu}|0c?7@KA*d6JJ#vX%K{4mZw%omR$Pb)#WNon*Yal3{r>_=X^|v} z+i|A#IR^6DTA;`m__PrUN+c$XqjDd;JZ2|%=U|TGPJdce!-op_5nw;Bie@Jam03tf z%LU!!2LAmboC8^Qqk#3sFF9z`3{eQb-D$6)|;hr6FDkIH<-Gm|4`kAEmE{7W< zHLj|vN{EBjf`W8J2r>Zv^}}?l4tquLRuu!(SGm@EX(?|SGR9_uHRYgAM4SNcc0{<; zM+h3D-#?Q^LRu_Y8^FI=?8U>|{j`TqOP^exK9J9Oo>fQ-pI;q(!yuH(F@c zM#3={x8hPnw2-vz*;~t}iDCEgq95l`%2X!rOjyWgM*u4mJgaYLp6sg7xt&9KyM4ls zFx2?El_R|ehD1CNe_TFFqk+E(MpF9ppW&m(UPTPVh{2q&rb6rWurQY%t9HlVIfCD-i!rtr6Hp z2e(RS+2ua6l)y(u-eutK`@N?yTcZ^l-Dy%El+CPUEkSxz><1eS!%RCMAF?G^2b06C zET3sHd6?ARg>v~m4gVOs<*F=k)xc0GnnCdPAnT)A+}wZ$$rbqVE0zgkl!?{cXR%OIeRYa zwy>gJ|LvvwguQz4hI-~H9>3HN+T(f_nRWvVDKKEE2ae}q;Ac76D4W(&f}gbZx-3_F zW*02)E-_xZ!8KZ}e9|Mf_8t=BvI$NqS8>CdPqGj+5gCmtAPnn6vhF_88Ezm&h-4*_ zjtFHlQx`er1{MpQ2xfb;5cRm_l~QEaOKjDn>#Hdk=SphXhx#e85>;&?WB|x>a7H;n z8?|1LbkgvRsk1$l0ls`k@rEUSQk(47G}_WhuKhyGC}4|^5*Yu|&;Ueo)M)S2g})93 ze0^-(Xke)6x+-t3PQ`1L0j7RGqy(n!9E$`tL+oAJZBp8bn!-`iJb+1On+WlP<2@VswQw zvUIOEX^cS6fqimhSHjRdwxy~(ZLZ8PR>`xxuqxs$l+0!)&KMPLBuWP^Yb@C;8q6;N zsJqU%f#L`1W=0a94RD^&7S|g=n)(^eBZVtvRZJ~@gX@Nj?9VPN-2xymIFW+-8cz+^ z6`QAC^mt$EhWfc7Xk7J9-ibIXvn5^UD7zm|Nyf5X=de)?9v{(w(y{ zqcE?}h-makJL{^Pci3?wMy~z8{k`Ez@m@mzxqkXXiEnZ?hflfW_Mo}^e(sO12#C3p zLd+ZWf6N;)LZxYID@ki>t0B;|d&8}>k@kNGj_j7l!ehv=NSnLWvJKaHNh)57*L$Xf zDduV#(?Z2r^MN)h%3cloOcV3*De9g4-6CiM$(?j*OUyVLqX#23M9;%AGfe(tnKDZl zP8{wGvj&QgR;oueYc=WF+}y^0rp^?*LmDpVw@hV;FC&u5M<;p-J`>*3GRXfbM9!v!=c`SAhC%k{y3H8U_|a6vbuBaYt_2p+JJfpe!Jv_*_%>)-@Leahj% z38Z8cBWCz!(gZMc)gUX);>C%jsLB^ur&j z@z1?I4T(u&L1shANMWFah_v08O>T1LZTJAf2~DDq@JH|aLH#a~ehA`UDB-?vDk zcVj-nt>a`1t&iVcLjT5wH(Euxx|LG@c@aMPD%hRu7EJBA;zx-{jAG-dKr`V*XE?Dx ze}kL40f^{XThmE|OaC`?P-HpLmAl9@E*oJfxRH2)(Dp<_$q>5pz(_bH%RZrQ(z4Oe zks3J7KjK(wcn^^9Fx~L9u8nuAtesu80}%t+$K5>Y*|ynjVre?hX2sXnW9m@ zX@sU!y#$lQaDfFkDKNR{cf(^fyq=}U81HAtyN3EK=fB&b`w=tK)EFu7E4YbKq{7}c zEVB>kfaFTruU7K+nWCYStMQw??QlGVs6VO)KSs=z+y{k- zTDDr8^-Ma-{fL1Gsnw_A1|)&C5(o59FkWQ|$oh4`imOeQ!Zb1)+H6GsTcu_0D%;nr z`4U{S=N)l;4>a9gFaiwny4#=H67|+@*A1MM&%t~QBRz+rI05!Dbfebx$%OnAHwV5Q zo|az_t4#t*HFMM&ZWuDu_Q}IX&j)R#Jwl)D%pQ&CX9RH4aj&WLxcK7M@Km|rSjZTX&Ko{@p|RBS7a%W8sYFOzsYC>#YE&qx z2In)AV}Tpxw9-P=&4w;Y@KDp3Y1zbw$`t7w!1@g6w5jPz845$m7Nys2ZVI_IZf&&D zmyCwUrU=WUVU8&O!gs{~!uYmJ_d+XyI&abtI{B%W_R^G(JXKW{q^FRuK@b|k82+&Z z7^eS;9~yPi)xubpqOVjx)0yeJVrySYAVkY*Cl+BEX(`b5XXPEWzEYa|#ViR?GBajY zCzR11{X}ASmg+L!3OP*L?d|6Q-0FcX7Y>u<#`T${tw<+iag7@*<35r1+%n$D6+eEL z7O{=SaLE?+!rp$A#|gZk@3>4!bmw0J97DiD6iLC|&67N=@;}H}2p6G>VX)X|phYWw zj5L#?Jbkso3Qo%Km$%_Q%zTXg=#xx2^yMr-?SgLXcQLX6hkY^0ee0){61T7kB!583m&6);X~KmKee3z4@=YWV-Luzimw5E=-dqP}%FhinsMt zC3794)^?{!QryQhu`Y;m^{k+2Y+Iqwe@@KSlGY?6j9Ovy-qbBwU4WvH zpDA{kjAEss3|aZr^2*JZff7*?^5MZUy2h?Ew-@wQ^3i=-c1W2!C7q%&uMM1dx0xXs zIz|h66l3g?b40eniCqFHBRscc_JvDv{+U{Y7oD62CgzSGWNl|XLT)vc*!gAV2VEe9=(X*}!2^4db@86f2byDCm8Kf&VgT9zAOa>xe7_=*8w8zx!p~+ zx>vEe@ipH~7FzdUP@}&aSA)M{xET6iiPDlst&Z*fYWfie17;JzaLCa(a_zpAAD7Ko zYae-f8*q6fs){RH40WMFNq|40Uco!3TV{8eCWc9%cj9+Q=_eOp~NPCSAdza|(N=MdqZFsl^5Hn(j;dL?|F? zxOq-EEv(qP2>(G$(O^yAagJ`JF07LsRys=#Z0EvApS(1YB zDe5(8@Q?u$L!stV$zpT0%v^+uW`-2is;+Ah!OzYAcl=}4F%v#6knc&(lSR*c8JYcA zP{tOe_RPq*a|$@Dtu5RK;Y4B(bOTIORh3aeunxMqhNTeEuu@|y_gtxsnGFHBsjtz(8&UF+e`*7kjgSJ&6nP#One_{FAm&$kSb9p?D=wm2lKsi=P z_VIlsgyKXwRA-gaYYx-O;&32bb1%3 zp`ta0yRi_$3=#g+iSky1f4JA7xtM+wiPb?|@ba6)^5Q!V9`a*X$lay>dgbITv8NZN z7jPUxJtU71us~AH$u;{K^mrxTYgdcjierk`2w&IZDXl#7u&th*I#bFDpxZv|!keB~ z;oEu9cjE|QNGU9SN$&HYl zS~n4CcDKo~E8aNxmMAd01-um3;zzbl{L;_s!?apNfQZrMCGk1HU?z zYlP~gQWt`ah|s@)S3yY4%natIagrZdP*S#XLk>gQSLIOA9(G$niNSVj3 z-o&ZD0Ht=8;wAgvMS)~c4X$pWh`vf@39{Xmg{$}|`AMEv2~%|>0h(#bwMFjd8<`OW zfvg}P29Jj&*euBMq!uF?pKb#~t<#JI22F44#_NwEODZl8N z>-S1^-V~0qIMR6(uP{=JdmEDe_ASTKs+o@gKc*3MmQb*p4Le{_hJu(YCYr6=XGx=1 zdgV8prwZ{%Q$cd)WHa8|6)^2dkq%^^i|#kk7W%I(==4%~&@Yo2&SAF_F@_na4`PoXVh@3e1_AiwF{nWeZZ`4kg-TvdROEYLy zL|~<;Dr$X|zS$eLK#XbLE#RoUaC>4hqfK$sU%=6n`vU8q>cWF*)$EIDGN1N2E6v;B zj&!lhcF8tvMn$SmE8&%=TOSt_h$g<2aiw*vIo(hYT9u8My9qnZ-ZCA{j7fz~)*1}~_nt+XBiZ8QD$;TQf=Axx(`9>s$gk^DsSsU)kIro4Z=yC>SSB|a;dna)?BWQjr9_O&koy*+9St= z?F)~<@NUJ!<7H>XWoLy74CUHsvg&S9OqrHsltU|3o|h*zQoRs2R{_XHP2HL}lb_iR zY|jHw!@H%T^1NI9u+lAKV;t{!e7}@jODP$#g!u_p_W)~iyLosQ*2c+NfH4{t4{Omp?V#ae#5Y-!az|*cmQqf(o^g4~ z0p(;Ev^caq{b;c}+=L&;a>lGWvXn9V!7lGK`9@mZf{Y7DPs1ptcb58x4GFvxEBnY>v!T{OGwr zZ{y`FH_UKEA|w>Z?Z%{&UyNC7E!-j(==y^dS}1qTIF5#R z`inBJ%^!ZgO{TKp$Fvz6pUzJhB#sMKkt^t^#d)=B+XezogvNH*vnyg$Hv(Xw%Lu+I zYC+8GgQ@4QWKgYuyWDWuC1D(=0E#bnbr;8n5w2FlKMXq|9@YE{1VZYGT5J)%nS3JbbAD` z8^AF5u^~9*k(jlQ&0Tvc2e|B>zugx$?%y_@jN2*gNCHiaeM+}TSI#X#;6Kh{%X@3_ zvJKxNEE%Z3DfPE)sbb+AEoG+uBKGNa;&+LSL{FkkKE%E66CqF};SSE&w86 zx9I54(6K~LRdCP}?`W@8b$vesiXY}|sTRu!DNI#l<<_y9$c+DtDnXh(R4vp@z{Xp9_5zWZ6tyQ2UTnT#z5!n|JceAz;kG(z zjoisGV@!iv-DE5c)!UeJd{?)@ATQY_-DvVfWQ8r1<@{357ipGENKLF z{y1Zqs7(+(;eVE!3%nZdGIj~gc#(*Gd-B&6wS|z4UYtKtysvSMCB`)STGX$nugFsk zjv}C)_cP52QA&;1#KwIhy&WRaS#s!l*H@09ueXeDiDNGLzw3w9ME^vh`~`TtM;pKO zMBe3}7N-VEhW!P!B-H)|AgR9@^(fB5v1-RHd>Bkh=$fIU_Qu65`y?C_!-KD({D}vx zlmq$X7qaT3M8qO1qd&K^UK@qCUZD;9er0cE{_oIjZ42z+FeBKhv&mx}n1B{VtKw?$ zCX%L>2@T0df4DYKc)bp>{YPtLN1_ZIDnmh<;!==+=um*YT(g*Z85VbP>#er8wE5PO zu~KX_WlKdYy>(86+RRmaIHA7iuBuUW@L=NV8Zz5=osziEU`<1U!*ffUja!wHM1NX8 zTEW;MEMtiau|GL$R}d<^w(cS!$B5Wx%%Qi#mtvGAWQOAi2V=v) zS=J;qN{D@|PHSur=8)(&WTu5Um9!3)RH$g$jt)L~cYLh1V3E@QktoOxl73lgV*RO? zNsWb+ns@RLW0ZdOw!V?4l0*P;B(VR&0Z9K>Ck?jJ*4706E0ZYfpBYI>S=(`|WJ~5O zX8@i5ITUG<$1btiA+53YEKnL5)N$oj#We;mE!VY(EfL1`r^L~>#zevb2GPOIi|Msj zI<*iYSC+DNtb}{cqM3Bnw$x^H8dOLHn-?y21V)_JN})w!pHf4n1l`HBfl#1hH{Ie0EEzsB>j*t%hi8>0 z-gu)z(o@qZDbCwTZ^Tlf0+$zeJ=b@OKXsM&rX_1$+)A5MqJ-yBG_IsNrrC!-=dM%0 zxu>Qo4NkE-{VOFl(4Y9m#}fE<@G`qA8EJh9EpMn6J(b=V(1uravrp@k*3aMxHh%$K zsA$+f!#F;m!zuC`ooxk`_j^imft%e_;rPCulGPsK{Imy*4c=>5MeCYuY?4=e%i2Hjey}K z-Z1fJXHwuufvc0Z|6rC*dN`o}*(OH1vWZ~gHmSSVu^5wu`(&wm@hSGB!fki-c0Wq2 z`Q>sCJxOx9|B{7shAUkTKhR5$jVM z(ihkILmXTcLV=Z0B}=hfOTcEw2f8jUGZDjljZ<=YhM#stzr;Cn>6OL+R;;A&D64T^ zVR2!-Qgvzf0e{Shh~fK^1G-n+2Fc=;3Mn%wOm{o|ji$F$oS8=--D;42Y+R5In8HOL zIQE$;#gMD$C&*`kKZB=kpw|+ zg*Wn;m!sxAnX*(qBQwGYOM|cNuL`>F=9PZ|PK?_1&mkCv@JXXY=n){Diyz0}3j@-FREZOOlxH z45zb4{RMRDWDU6@7&U*TruhOp@gczVIIZynu_EmEvgm6hKI(bU5BL47Qe&p-_+X|g zrro12i`!F$N1FQY_q{!@^Ssy3l|(q3Y(Opc1*YEZjM+cM`&)5To|7)(1p4war1SN3 znU*p8bV_e$wBM(*vzD391d5b&$Q-@6Uujv#5~?FpAuq#o7CdT0{cbrA*E8{3609@r zQy1d#Yx}rAwe+*Q>~GA&$Z|(t$)MO=UP1Yh zchbo0TT$}d*x&yGaPtO8l4-xCjq{YQx0#_ept+3sMAj&Hsn2$hhoVKyI#(1-g#?4HtEg? zqff0hRh(Yl^HtaJ(IG#azqVDIecEhod2&{b<_etqDx@pkct*&gSFUBbos)d2Xr`X4 zvP>?~F4MU6KE29wf_) z5G~dryB2??J=80?sd zlXuGL^C}(Z%Gv%}d3Z(uNivBgLmUWYnzptaGDLAh2w4pM=VFGJhbXvdGWg5oX{XO8 zq9X$~pk}dzue^fYhsvafjM`YqVRfuJAfl&|GB?o3TyVi7o2Ss@@A>&1zJiLz*f(tq zj4KM@dNs~?Y3kYlMxE(Kqb2X7HWu5Yr^TbV)at2Mw^01o;#IN zz&ajrJF*p>Ln3L7Iw>j_f{$_p8Vti3ag*$DJ&?x*D|4LYEcIqMFY-}YFxyCcL-R5> zuFPuyRI~CuUdi2rx!@O~uGk1)lY7Soesf93wAWK--`(rlu<`=A@R*%5!5kK87 z289(osyD08y5)?x!$;Om%G8T*T_b8EB`@bz>x8b7W(AX_1Rmb8f^vJ?_`ONWZkF`B zsZuK-6WMm_Pc174t>g^H<*=r-#q5*076}GLaxh$$(k-N>B&)Qj3GxNTn%Gt=t;G|m zJyV#?DAVr^Pq3F$c0W4{i#NVZoDC2f(~`TKFvs9+Q)Db1d9<;y6V!-mGug^;{GjKz z>Z(24Cn|R124PeRNDLG5$jX1^=OV0`-B8W2f-2P;vZKlmrZLP~=c5>TfB@{Ghd*z?bg z8<`R&y8%e4%6Y3}&#Ow zN@eQTvaEPW$SI_R2J=a2lP zOoZLu<>-i$8^c5Uk%lufPd=0C!sgYMrl2rdkDr=l2H3h>!BMXB6Pm0FDJ9gsCB;de zw>>2fQ35mS>xYU3Y$H_A7t<}MugP%-iOX_k#gfw^^xcf-iNRT%TcMSSfk9EFG6E1u zgIEe{`AQOT+EPwUq(tdU!XLbZkaCvxovnF2+wsAASP}}Y2x5$K(cwR^W0MQ~5{0!T z^SJXkPg#Zt0bRW816sBTUWcXU)c5623UP}&j5?(U)*iL%4ho&x>K4~y+vAS9#G+d0 z`u-^{QQn7Mup*qy%Svt;hhGF9pxtE68m`a^q4~qs%S|^j6o7cuJs3wKZx*e;T-x^->xknioPd3Ez=7y4~KF9BU9*opW0dDX1 zh{|PoWmi=YY&K`P3H#ouFG)5#iI1&?%7X8X7%O@F@`s0B{hd%u_||Y)853dUoN2_& zsof59>Qz%HE%qs5>E|$wqa=F?jX{V@_7;J5qdF|v((_k>Km}=82ggE~yyc&E!Hr)| zQ=mUMS_{{lyJZY?y?Uhcd=9kCEoJ5xMJBjn$nsGe9jfU;HIHw+ver@&U5L`tV4=6M_=l+qU!KZWU2*%gveo*{7(ueHb$9pQ@7H_B_@ob z^m4!gXeVy@r;$=$hnNh!Y7yiFYx*u*o^WRg6j^4r)+rD6YbtG_{sJHDapnskcjNq#heeQrtvK`q`9O;Ne^o|hswzz4RVZZ*Y|5~2b z@&_C7(m0n5ulW>(D!99J#jmvF{U0ql&^!osSbi}!9|?ta9y+5>R@n^x{8+<3qF+YxOljFK269Vl-5k4B;v{UmjrKe75ij3NE?E|@Us?g& z#RS#%l@{1+*HB6#fg!i1pwr_IN4T15oj8Q<-T*!hz?e+-TtAv?{mi;nDp7dYTn z3g8P{Jca-Y2O1sF>py(AM($CK8qan+{D|pnd_$KKtI{mb3m>A_Qu#{^d{Ijhmu{u( zB*8$of#~x zjvpX738g9cYU4kjeZd|$(kgi?G6zA2X&&*eS+N0o`IrfdNo!%mBgy`MilM%SM-jRr zO5`0QDprsy7>Wp#jbXVb-_|R-HhegPejGCm^ivZj=PXl1y0dppjtiNj9%dbj3X8XB zs1iD-u76I(43ABAGcJs)!}2#!+4XjkoI8Fxhr?fwxAyn;)Q45A%;lF{&EGp}6=~K_ zkQ0*FD}E~eDOTM6Awfcr{nN=x8iU?C7hRB@23s0Yaq9JRy;N$I9;5;a)q+i@HZQ=B zR|ivH^IKs8R_cPG2zwdG9mSPyq%da9ufjR?i#c7qF+KE!#)%xG@Oc=`(i8EW)S)O96V{C@$=C| zeUG^$4b%hFe?SRZ8U3fs{&;0DYR@6@4GOTjzF5&U((nThJ?GD-ERdxGbs1pA$xtC-RS$D1wR^SDvh>au$&^`ny{ z_d7lCXGO?UE0AhMlKblCUqG!hB%7KvUwH{pcCH|oAr57jp|8Q0xaE8EbY`rV+O3+n zwiaxmmhv0Lx{&H7A1r$wbxOYs51!I)sP)jpq8EbnTwxJU?sijR9X{%R7`F#58VTm{ z6i)M}SjIkh)=^EF@$-z*;L*NpIN*Fqz4I85x*?ux_UU6oing*EJT!M;F7bUw$e0|h z)s-5@<1zhAeU1!$_!8t zVt3fZ5~rp|n5bwY$~QU}CFgQ69t3;h{y zvT;>^N^@jb&jEu%8k1?@r&nXnM%Y=cq7()}>ne8FBa_#h&mG`z6gN`_o@v;Ejp|OS z8g&9$3>PrPEo*ndt?Ou{cS)RNGLs=Y@~6IyE;lz0ijo1ur;BS{Hz~=K-(Gd+UHlp2 zgexR~Q4~-au+{8v7Rb>~Xrt`__`zpmlI;h!UTh4s`IFCiHg!cFHpK4IDEZ-^^CfdD z^Ku)3?^*21am_}BnQHA{v&zi#1Qf(l8ayjHKWr`qdqL#c@n;X1%`;2UN>b}KY=!I5 zD7fHEk7SR^F|=_td^dNUB>BY8yvcUfF|dM|Q|#*VWgdGHL7&En9WU$n5v@FbxbT6o8a40IT{pkP z;JA!GldG?OTlA44$ypEKoxK5gD=egeM7QpAR=$NBZ!6zN&v~aik5-(8t7Cxsy?S&W6y{0+mACL#M z4Au_#F^F%ahKy($cPg=$qM{aYsAxOJTCF@#$`u||lG#={+m^&+$Ysnd@mh`HUjU&s}sMba9_gS8Q(b}v7yn8 zwMj=K+HBHnQN?qn=LsGy0awNx#pe$l)l={ybQ!(zR0_E>m#s1|7C;iS)_ZJpQ6 z6P#={Txge&so2I5uOLQdQ*R`S5H)#|8_4D!Ut(7{jC3;%E5qqHLVCvv)j6~*&d454 zvEwXOakw=a?sP zI@~*_N!h@?p{?!NE&Eu2dwTPW)A93s*8-t!&?E#+_Vv7EJdGZ-$ln`8!8TG|_iM2~ zF3q*292276<%KO#O46A{r){hM0(km{(joD4NLH2}D1`B}p$ynUc*G=B)T%a<<-I(c<6O@GnWrIt3c`N*vHJ-fEF;FBbtvR#koD z`WHz#boTHaN&!QtEJvprSs%|RdOjhUdYL`0rMw~?ShC9{;-$F$m!_imwj3G}Re z&%(TS72azh#P>gfP^j$fUEUMTX3X1j!w9Y6&1r&-k_g32;}VP&kA#`drd1VPVT?gd zs!$A?oPNU%{-sY*J395@=VR#x9#JzH_jfoWFJ4nGME<+P@%^bXzx4BYj|hfsSVYul zygp4Y65J48cA5yAh-bz{bui(qD6mXWtSS1sm%hP~djEmXDG_B%$Cn!62VA~nfo{gW9VGQXr>Eww|Ex+0@%EhMCkdpF<4T!xUa97GE zl=DSA;s_y+Rl`0Qe-5;31pZV3oH??5wes>5-NK?{aG9s4`ZVNZ3i)~v3U{ZoWR*^K z_@Y=hsWCa}Wfx??_i!k%tOG`n-zpen3aSqF$XT&TlD zTq3@8;*WQdUuon92&T~o-4~)KR$c9D7x2OvKh6YFb&&on11^(roi_PoIpMIWSOTcL z>*(XnYaoZwh=F*w_&whd^U^#*wYl$+?d$~QGIR_lZd7%Y-%|&&-7huGu0UL>FCMr2 zAX=+M5(5IE&Tt)zsR`?a{h+dY(O)qZ=|$Zy57`NYEG6<}cC11NiFU9@&ROa#c)y9p zd?jQ1h=;a3OD74!Yqb;$`?v6CgE-Qnu*N7%)}dNgm3{W6a%@O!$Mm*wR`{k$ zKiiuu%pV&j&hnbS%q0`G7e4KslP$+tXRDL$3dTHUipSOavP21Ic0Ns`%y>4*R!n$?S)eVoCZbhk#kb%vROt;J!%c+*@ zb7t^vPpRXqx(Vq9{#tI>zPY&^Wk5`A?uV&Sj&5%SLE#M~0mO~KmdLb&9?OJYn29>e z{Iy%07VJ_t?&yYSlY=5*3f>wS?zAJ4&eiY=$JRZ(M$zK-+zO_Hoo1_KJ+ny`4UkYi z8JP;gozAz5025GXDN=+vN0mLSoWg+*zl^i-4LDVsjAwt3FB{fxXVQLPalM1}8w0DO zV5+sIoI>gElnGZVzpO1TaRD@$7xi*ii8K}$9LiFWj2NvZm6C0C8mjX8aS8sdib=OK z-6-*V7yh0-#Z;%(tM2rLMm2{-i+IU%S$nQA>wpWsqJ(>FZg?oV>Hu<%CNJ$V6nEzT1?)f*zYm)-;rpl5wwF=<9nZpjXeDk(OLFCwipfcvEs;Wy zXWTcjN2vzA)HWdzknNWjY^r8rYdy#74_XB8DC*yH`oRKsqq%sBxVpHV4#M3!aAVga z>QKXZ=B296oF$&QNksc)PEC-O>}@dbh3K%t8=-o9o0q7sKS(b-ZBxz5u)*qQ{*=C9 z5JHg%BqH<);jTxXQ8N6~`W+HFKTk3Pfz(FDkY*1OFySgQ6v8VLt*n$T^w(+ z=>8cW`?Mr>%$a^^f0!V}lC-le>5UkY%G8!n2PCkzR!RWGjn)>5%0f6-4&|bZ+=O3aYB03aYB0WmQ#B45d)GRSAgdrBzBmaZ;+Ma%*rEJ?T#FOrG?oRh1D(!RCah z-iki#?LpLUDl!^COp4K$(o2%p`ASL_H~FVwl;OwSa$?|0CffyyA5aoS;s$tD2JK9J z;HIp4$L_N^Lw8sS!&!v7PmO;2{{VS!bdI*C4Wf5O9fnsLNya8XIN*(p`5soxFC{~X z2bj~SjUWIN6>V&)Xe#O?d(Yx7IU)CnB=1DwGP5mTdxQbzwNgmI!q`4E>FuTo<7FCW zb8Dh8SB&vGiyr87zzv*GhCn$X)}n6Naa4nFdbrSHv^7!}i!}|oEr8~}>4d(`eLHS9 zrcJ!wyu3M#$HjaE$Pm88`<$I+S4EheNheE_ZM%cv8@Kg&ET-OukYf5>`?aFSo{P~P zliRgKTu6Q4WhoAb3^M}4X(>-*IvGvo>b*0fI7@_+ zAx{`co^T{1!IQR%{pu^x#V*OU^>zOM&~u*hTU1X=`asV|u$k0O+N66p3+Jj_ZPcY) zKT=y)$^QU)*zI+GD*Yi6ixlTed@n05>WJO? zq@2_hW+N!{hoo<{ykWHrF)y-bD7Sxxr2d0q-HkI;GR#X`liMkNdy?LuyPYDb_9fhJ ztjb1Yey-)zwn*iBv(CFxMKewh3?&H2^_|K~0&G1wJt5ry6XlwQq}JvZ^c)@s$I9mF zV*;vdQ$?X6bG;@o{V^^|NIXswm~JE5l#j~D&KCG3RK!ZAF&-F3ApR?xp$8#wY(q|7 z%tyc>oXVub%^PdY4E!|JYPLvYv8;_E>(c)2x>#uO*D$Jy_grogXp@jO@aBFC&o>=W zxKW0&#<7W2G<40Gw;0>~E)l zy*iiLLMnR=RBu&Ly(2#0PJ791D}{D`Ic+P5HCq~FeORK7^fMmzD3iSck&=h2_haz{ zK||I1vG{@sj^M>*Xsn>HiXRzGJIYlLh^C$BB3qm&YVFY4Q6&vux*lj@5K^*M%F0Ho zar7SYTM8i$L*tgjLJ=7^Et0CTr=G;rn$v1|Z3rc9N1T>cq_U-&t%?+bUgjQNr5+TE zUgjQNq|rGJu32?)GcgtZ2oAqe4#>*?01P&i8QzZNY=ci=kYn{ zp~R*C0Cq7SuPrKf4r{TXrEL2Y?2qmR0g7jb)W`kTX&T$9$mR21nlJ(o7Tp++fC_)e z5t6edqOw9@W?Pn4%E3U)m6EnrN&sAzR;03l6=ba}lmUqalC)MOAO*<)4dmnzU~r(Z z9A6E?t?5-QFlA#975LH!G09930099300C7;Nse#p1RUk4|;Hg)FYj70mL#HFL zIrKA!x>c1CaG%lrX?oDL61gexmvD|#k`?J@cC7MlX#p~pYpzbsg^3>W)4Fs8C9{;! z?dqCr3*JKGs`;rLU8vL%Ni%4;-)1_5uOCrJz#55p$B(lT2T{7cATzWc2g8;2D}-rs zB5zthDs-O*sp$PiF@-)f%99m#IgRj@%q5mtj!RUeH9R0+#SlvR7ykg5yaJp>tZk@> z&aP04_S|Oyk=nj*UoS>`2F~BY>pOD^@nNc(w{v%S=LDJRA(|k`5QY$JWImVa`PDmL5 z2)(akmlTmlq3En3`!}=zM9y+DJY}7(RE0_UCu$D@XVK^yg{|GW+Da|+^dz*7#7n6F zx_hM+Np8p?9q<%QC%OqTy9{|)+ZeLQb zHWTZ(UPl*zophxb-XZNa>$>&4P1)ml4`g{9UWL394x6pU(wmTI9$W5*rK)rGbxSfn zCv|n7nm48OaCMd`uYckUrR1YY?EA6YcdhhRWR-$BhsCxXkJ>Vq#YNCmnoL8oks|Wq zr(FcZaZq^WDQ>1U5boH-<+n8q5WrbIIGqcju(iQ}P5iYH(>r`&V z#>9cW^Is0bn~3b>e(491-oC@o*Jk%e@aHCVN>uSOJk7{iWn5yP$)awFa32sZPf{Fi1VA44U&$GO^o_ha zlra2^OJc(R0B4qK)nZt5Dx)Ar{8x>~Ivne@(s~fe_O|dd-npjhntm%~EsiT?ZjK3Va%Uto&Seun2|~|$8BsIEKA3l8ke^NAC_|pZ6&uxD=8+3< zIrz_-%GCU2Wd@6++aB^Llf4a3?*`OQ6beTqlULvyQ>2kvZKVXQ!DO-)q_UA=G+xqS z-j_$XrXA^#EzT4+A>x2VA#40`LkR8*CB@k;B}S+sfM-34=`7i%++)|Pspfw+bm!>B_ zD=#+)moY339DOpanH;yWUZB%AapKBEK?Q3aQ5dKs|z3YTKm15e&Pth;YYdr>AoPC9hPdG*7zc<0xn4nOV%%0-T?DYIFK&Qq?-`FJ&!jrLxjI;7{SJ5c^FB3)21|@nbG_~ur(0a za1Ldu31!!7uZ;ALocaf2!VkFYc27HbDO(Olos_QOE55{!Ah=>;R+`M|=jMh=t38+Q zPMA52%B+s_P0k?l)K}NA@&xA*yL#~-yL<%nZv<)&_GYf4Me2z_+;^>_0Ogb;^Cf#6 zIm^mKa#_wy(g_Asx*BMayzW9^bklaabCu4LK4BA7*0{rm6kZn08ID`!Vp2m%Hp%)Q zcO8e;nOBnqDv13!Y(BNRv-A$tta?$K{Yn9Mz>1^yaY8%cMN)gXq|GJLK7Q<|9+puG ztTDnflmRMsa{6UQ)-~I(>u1}EQ<66dE5w_}ojX2TSO_-jAxVDkv2lfTp@>Y`x}7b{ zJ7;@@yl)DqrK)>Wd!@dHb65J3}vJr^`e-em<@?B0= zVX*B%1&gpAr+Loigz#)%pu^xFMjP+wi>IsX9sYYhJY!;e{C zBBCR+YxRF6_1=T$(VR2-f$J+jE22Mg;!J{M=|N@C2Z$72^_j#}GG!AeLsfn`P zOrPR#N!cel_il2a?PV_RObBc90 zf?HB}r&A8$+K}b2#hG|c!n(OH3D{6V+!44 z8U;}9l5$NSgr{&qBPQnx9>3fl#1RDzUG9hCK`U@I!n#)yHCq~Q#x|qS4SUG#M-rMR zCg%#Pg-}pt30{7v9Y}QQ6~SY7BjlCR3I71<4jvJN z2lpm@c2~AGOWX>8Yv~3QKE*9#-=`rT%}p8OEj`AY zj&phGS= zVD%4D8ge>{B4_ChfeSO4T?ocDDamG0SWLc*{{R{F1RKQ{grB27#(yObxo@N_mCLHQ zP_-cK)dge~(4P=As^!;Gpeo@jrDY_5s;&iZny@qq;VP~;Y!Z&(WU3^wt2NOjLt<*p zfl=={Yolw9S5c48yrM>s7F_Y|NP2tKE_>g^kmITwG=eOqok-tq>rD*x&Lha2{zy4V zN%UQ!?R1M#)20Lz?#3{*k`hWvW=^*~CV}g@93RNI1cc`+CuxC4uzmNbe-5 z=uCkiu5x!@1jAx85{9N-jlGEj<`>7hQAZg$Q2-3I%0m11Zo2y4Nh{%pQC8H;D7MVc zovatkYbA%Xg&#>J+Luh6h#8Tp<~QUlc%f`5Bf(U+qGWPeWZBUqa)Wtc7$h8kpQ+QNm3B-_Wvze+%AjX{o)s!{qI)Qxq;H)xP8O;yQPOF z0q(UMNJb>N*y!wiWue*6ZdCg6dzgz z3$iaC!lPA4<)98r!qqe*IYvW@`ji`st;z>Hmx0GHyL65e0`V_SBL4u3Jg&JsyfWp< zEe}uj-y$;q09$_L7XFKJ49-nT@R5bS$s^Q;#T}h*V+l&)FrD+_ip442qQqxJ;IxFy zJ_<6a*)u0Ph16lmk`q+k=SFWE2a%7Hr)y0tjuV|dRM|P_mc&4Kjp=b{yNEXF8A>-r zy-F>RUd9Xb;``C_-cpM4CZklFLZeUz6v0;lEK*#h(k`Rx9h)++y0bc=YYfay(SF6;4EMP_<=JqEUk=2p+J$zx%a*4)jWl z19KAK{_^=8U;_bt5BE1-9K$N9%*LoZWv7jbpCx~+BK1A7Hc5I*a*|8sa~i{Q1!XcV?KR}Y3;3aXz3zX?GLa5)^-Rg&UHt6|i>GqF7n zOYc0B$_SR{CAke+ZbBw|kky;QXfgE&RI0dOKEh6=%yi=OQ__9&~<(!T#$8V zzED&*5=zNkC35K+1=0Jmm{RD+(@c_UV=46A9N%p*Ai+Dz!ENlTzeYLI}iT=6Mjv)f4gtZ zBT{xBFYz<`ET1coj>4AU6$exE+E*Ihxjyr!X{jozzuX?JAt(sq!}6z&|CU{WMDIV>&tB#<7PuMifl{0NI%-g%i<8>Ygt5+OIJ3 zZMejG)UuSTQJEskg>;pYm>ibnleubJmQLh=!lhM80K~Yg6}fR(C<7%Xt>D8Bx_3eo znb{CZaZz0`dQ|-y#oURpjTJ?%}lqRGhFM?t1wW>=u-n?f{?I&zff5I+n?{{V`E!eJV$N|mD> zTAhW(?Qjjx0=-l9w6smdsJEm<@Bm!fYSYJE*%_xwh(3`&uC`5IuE0J#{nm@5D|ILQs%AL0)GZ{ommtjuUCQM^l<5-X_@r8+8Ga^z*sN zTGkBkncsMrKP8??$zzp#Xl>)i9P-6IRiu2s7U&CIp$6jP`fyRd{kVD4A4YNwN-`2A zS2q{nL{4kpVy5yPNPIMjR0y6co*qU{O4-+w!o%sxNmVAoa7r09Ow1+?;TXr^mHgD} zPz}IXSr1Cqo#7Lz>t`!R9aAFuN&YVhk@jDh_=vAV3CJFMm!9b!Z{eT#iGO4%_)op? z^(w^eKf@`;qM0>9Ph{WW{MM@v&?!_h`c3{5>szcpK&>Kk%k>x&vNlE1OX&m?r@B#e zhv@`Jxpa@2`e)4w53N1PeKY2T5=b%k4W$B83InDXSUBbxT^RUYYBL~i7q!&o5jw7! zUq;H{PN+J@IwlQ?JGI zxM6W_s_6OVDT16np{PbDj(%${z*^hvye_beaMh8aug zbL}7O!E#(l>$Y2#>Ls}&2(FE09?RpyA(tK5atl>&0c$RM8dW0SB+7qz$xkjnhRS7()bp_T`azlJ!@A@8NQ}h&(e|< zhk^#E{X&AFxT3iwO5xs-pluhq36e^qHfSx%NyN&1Ov#Z$VkfxTAfa&* zZw{bP5b8kMmoB)i)S4tZnBC|j4rnSr@exLy>X8#ETsxOa=Hwy}1#E$+pOQ`PQ!2gt zi6}LPNzzlPPKr9oeb`6ibU8#u(86ClWau9O{{Rw!FaUe<*jDNDnp93@I+w;as9T7f z!aI;BluxNc-(5p}ZtDK@j5{T;H`KS5pt@hImxn z{{VWfsm;GrPyJOUqxvNd&!f~A-zZLJ_1u)dH6VmDC1kFWxJm%YD-x2?D+L>YZn#R} zlF2BbEQ-lgmCK|E;ZdC`VAbFln-SjSB}72$(JOq^nJ9PT7a<`bc$j^CA}Jpg(rCf|0CGqtAC;!z�c<$Fm`7Sje580Nuo*R_z zAt8`RmAEqz*7QdsbY$|0JxF>e{cH6U_wsC`uuSz;PWKJDAp1aMR_flfME?Ly$QV=- zO3{iD9Q&mkfjjhF{C|*9Pr7W4z@7R|{vrL4h`(|iqwOXj;8}WONlT2v%k%CnEY7tx z2Xa~tp#!!aYrUyT$HYD^|Xa_DewQ=tnBsEgFLj8{tSs0=~Q_4?H1Ssj_`UT+X| zYV#^HzUG*aDR=mZt%;$#NcPfBVsUy=X|s1HEioMAcLk3lZp6<-@Yz?^_QoK!PIb=v zuVbWbx}C)7!{8Y5Tf^wv!sua14e^cUc+N+~Bj&L5&Xv=K6$^Wuxo3GP5H(b$GnIw& zLQa};;8e{}?-GQ(Njs9sSD{gqfSbek$!flsH-$FtXuN7ReZlvStw*wLP~TMJ>s(7rpHtMN@qOaY zSqTDDV25uzB3g+gE~xBzlhRM_M`M;XxM=?XQh)VSW+ zo-0mYa^=wyUWA^>zruZMb&@@SN}-q1hv7c8y0i2=*0DLI`hYG^ByAD;K>+C%MbXdc zB_m06ggfq~u?HFCiFSWRTM7$W0~sMfJH|(?6eW@hf?d*z;F&Y(5a+y-!2-*!PwGih zJBC}&KV`ALQjz2$3DeY?3w7Lyu3|DSM%f#$BV9vmNytVgPObbGwtKU6*$5Fm`?qAg z<}lQv^PPkQ_13x0WeOS0A+PXP7H#he@y;ER0Bi3v>u7rBZO17Rx{n82s zbkip=x$=JFiVdqeynoxW7Rx=TKAjm1I+omO0QoKSW#DMy0Z!>1EUW04gmaW$ z)(1TxF6a&FNlF&7hpxb?>hTIDS~e~}5yx**^wVIGlGYZ}3}STeKOIpLw!&>< zvBl(N^;V_l#ff5;jQR!zDAM6{bm058_bt^{gFWF!&|_3}&324T9vK|RQYog{sXgsp zrPmvzd9SIA^sKk7(^bY!3mfIZI-3ii_QtKGqTnd0<7l?n>#+Vxgx!Po(l-* z&osbgpvlA7#@R|)+#VvySgUweD@!U$y+xxa%*D4HZ0d$_??T5T!*Sw)#4%O`OnbmU z0!i@cfaE%YFY-!G##LfeZ5YB2UUPCSw|{P1%G-5uJJ*xNxqGh01(S6{3`zs)Q)Ufxvn36dYC^TrHYL9&sP<2PXor@n(vQ||qmqDOw9_A}cEc>WzRPRpArPRy0 z1yX*gMic|)?@c1H^$U?%y6lR{C^Tz8>T7m!S}PRC=)q`|(Tuv8N)r(yTnADq35Xx4 zKMDtrs2OomR|!hZD0Lsdia4NXPu2+86wwnYvdXtyNQJo{Qhv1dr7hGSt3SkjX=$3r zW4BLy=)xdA)001v7?k?{?jK1Gag@F?M`C5T2v4LP;E*>2MI3ve&U zZ;FDfpJh*3Ke0&p^F7d}@d6vc%zn{sKi*Inrg(jK-)*+X{KssreTk0epuwAqI86gX z*`{mf8~$jH1!kHZn+(J3w;22Y3$iF%4B=QSMJA@9x>g*0!bBT{oS6dWOhJ*$U_crq zEE2pubq9>H43y(d^(NN=fRxB?;TTfIis_i)$W#mu+MCQ45g5c(_Xn;CR z?w38wra^F!fIRO=<5PfQw4EoUftaU>r^K3w&FVQDnG0`|Ci55My#cPK#9C&imDs$) zt-;3JiMX6*E)Vk@3d8V&u#_e>LO_W}nDbs#8sFXhT%3S%s{npxPBj2#<`+4enL;Bn z!8zD?P10ypsqH9-id=Us)yqpVDL6${HFX>+X`O@YyJt8wNxtzr_hR@jiA zQ8`@y0MbFU_GMb?k=dZ3QU&u)QYt$H)IOv-KJY9#_|zRkNkaTIq|M>MBex3@bd>pmh+G~Cv3WQ+7f3379+9bYbPtFr@Rj!{u9&@|ft>#U&yT35JtE)2COv5#rmkvM)Vqme z)U+)O<9VA$HRr`rw|Kwj^74UuVT-msp!%RW6vE&l+Chg75MQ{5w{ zoL-r-fp5E24O_Jf{8uAYlj?eK*An|4t)1Z(zz8~dwHDgEfbNX$hs63%h2fDg^oULA zcVP&fR+rwl1<@9$^QRZ;HTfA+Cgsx&5jXUD7+>+ve0+v#LNhv-oaobzRAN=Nj>}Zu zo}zb*#y}oGSSk282~`JdrxzL~s3r8Fu=>*~<@5?g1iq9uerc6*`W|aoobUBOT$V_> zBlM6i%OqVCzM4`k>=N&}C@v_+XY@)6DF!`*sC*?C!3RU(D7f%UiS>g$?>R2x^)%E} z5vqK`Ei(;GGiwQ*)|rlX)to}Z=jzXD<|jVZmA9tMZ3za1vXIPajQidm{jPw?`ec8k zyGuIY`D$-T==P3-ltjy6(-DyRaGaVP4290+V+zJd6^zr>Q`AMsFymKk__oWYGG^8~ zoo*s7MmG{7GSZ>&EE5xQb9$)UIjSzU?aT}e-4h$~GY&a&GY3qD~3eEcma^i!~vG8XR&x{&b+;rH+M_}TKZTB7$Bs&XAQSY3{9?-GecrTqXQuQq?1bfc-n!n1$jVVB*ZHUXi7$crG5ns5s_Jssk82#d(_-vS=>oiihJB%wWMjwzS3hXt z42?Rc6Jw|EmW}?Uw%XqkL4lb@g)~_gw@y8;q`5~smR34LSWxbj@}@tYD`JGm;(|ymFMxS zO}g4=rwHf~8ttC&vxL`>)Fph<$C0^iUQBC&?rCMIh#sWEqwwnSDt1RWgkNk5-;$+A zB;)R8$w1dn>iHtIpsSD7@(2uT+$#4~F6*}?F6O}$C+qA8(oi^my_UpbDv_2>qpO8g z!g4_iay9eyWwfjktFpZ1w5%B*)+%@@ z&AB&q|)bZx*ep#qGxb~E=Z;O@Kdt(DRD>F?VU97+C z$R9NjW2(0bYwcp1yqavE$yDB?$79o61N}Ix6LPgBf}dfdGAJy1dN6E8T&U_#{glyi zDl4WnkfG?lGgCWRx*C(_U}8RH1GDq3TgFni0hd|{%!wy)MHs~9cA+Zg8|jJ}HpR)& zBN&{3JJ+`T%hrC)#A|B^88H?>j_p&G?gs1g;=C&MJpet97urL_2 zC0*>i{Vh%71Acc$)6`#zamgV|97*bWy!ZJc8LAR06^fj*MAOxQE$c|+GjS%M_4uxr zZcuGiG-JJVRNh%`Rm#cY79HzPZQO#7?U4+DQL(2(A2$ULIaw#U zQ7N8&X^@&9R%wDtN+)_XRrKnqZ{r%{v~LJ1s)v}WIpxdE2~ANp9gO3=6irKk;2f|w z-gtzj-DnCa$krv&TFL_4m~uk#1dHWxtuIZVaz;*=XJc<#kF068VX9`}a8Xr`G|Bo< zDi&xZsB{LAXe(=`Ix9?+4&`@gB9~Wc#6`Yl2Y4x{A*1r6|O2 zH#w^QRH~RS9tqj)@weNoq^7E7(rxB-33IyhH6ufWjtX0%^MytV{RX2iMsibxiOh-I z?%*;CFA<50vf8tgw`W!jVP6DRHPRe&vklYFvV?LbH`Vdrq1%3?_e|V!bCKgCa=j2~ zmkEop1}5WTjm2=GMfD+^PI)U+(u^|Vtc}q*&}zxX6K%tg$~!myYt%PBq@e-2knPFMQC zPERCV6pv(pa%kEKe-Gx9HG2d*KcgfhCvb^(+)rUZ!dW0tB(V8+PSjA62M;~civ=<# z)FIh@S?x}veTOIL(-W5GV-ndg)fo-a8EPBscp+Vv)t=Id!kvU>Xh8D?_Yq+O^GB|>6gLQ$!2 zaWb5(&i??K=Rq9){95$;5r11(aBwbXO? zUNweRRk2e;S5G+3Mq1f$M>E~;h?rRLH?sOi?mG_u0Mqd-T$84$*i*M{#4;}~z2?i? zRI>*mV+czB0BRuL;umM9qGt+vv-9;GWWR`n$p=J%zQikax^Va730hEpP(63aeZLmy zFGjI$(aM}Mnr})nk7*e8QWxWP^yj~N@uT+;!E(1=G~+Bd=qxuDBPSscJ*g9>M&#}T za+yMy(>xU=OVeT#oXMALGd*?%NXl_?q1Md6fQ{fK_P;gkD1UKuM1pa*eHF92(V`!l zgX~=B1DUqP>F^$a^jiTvqf~;nsE|!nXp626LJ{|H1E^oEIKWcKe73jto+jNjJ{^Zv zRY|tUamFc}mXc%P4hVs3Z_P?{AE8|=#VM*OeG%#?fv7uK+A`EH#^b#?ebBzMIa#|v zMgx>$B0stUK&x?^h|0rxPlG4a{2pfj{nJYb#%OVB%yy!!`bimzc3SMI7BN=WaCoLm(ID44NtV&SJR~6hP+0=H9NXt#kW6L3O zerg%lwN6IodX>Nll_|F2Dc|4%$DPWJeLg5MULtOg`A%{=MqAL&k^#9?(BP?n)mj&o z&=PdYN0KhI>WzX9Q4v_rb65#>bCKPWUGq(u5lEEcvwNDOdg3^%C!?p$B2&3onUV5S z$SB*DLv^wlEP7N@r>vf(ohe%#5A>)k%Bz!>L&_rSISu0JSm+?TtJ@n^B`L<_gE7n@ z%T3S0GXu)&nrqt14n}d{2~NU?2U;13ka(;;8fV9NdGQHTa=Y7rkmd!_ovM>?GH`Zr z0{0@@kd~kej!mj6ImR<4Pae)7Z?_V5aZAG8lv@L+D9lE*VilU=dywwLdFGCwks1cY zXqjq{V?_CjEobb7BTrT&EL1f-{*z;rLM4}oT0xxhT9XlaB?VgT-_zH8;gavo7G$Xy&#ju&xj9-A*nLYkS()xj zVf7$OayiL&j?$J&?B#NHrRqr4bd9I)KGb>6KD69gdaE4jHux-IpiZ@${j2-|D;3f% zRLuSIhC|TDA0;7%4*YzSba$ytDmll=5hhz3$7l5^P?@EiB1U3zqfc{{Ve8 z=8K@UkdKP3=4t-`RLn|8^kP@Al1)uDJzFwrp{Cw$Lehhg5{cRh27Y9!gOH7)3Ao8e zP7X`FM6I6if0F$H((ZVaB;)x&(@6{0;k2tdL^>m$*=>Ac%uU33AvQHwMbR4kybKc1sE8060lLsUnJv5+;ZX_twq{5pyC$l>h zA5mRRK-^uab_~GCM^w#)_o65(DeD`H)bBR)WRa=42}a|E!eV5TZX#KkC>SD{RTe3r zsLWf6?d}f5sFkozld)Q4G;!DN5VLzV$-9ysZjx3`OwkVQiiOABz9v7ahvuZ?H4BbT z_@Dfy{{S?k?j=WaY&n&apzV-tOCVDtE2(27De4S(gEbqKMnpK0A>+*zPW6Sti;KY(PV&++ zC8A0d0eQdP6;;Iqc{=CH04bp8vhy81co%5L-=C=&rR8 zZJlyzI!7BRHyV}u+tzAngy29NG3g{yTE`5{JsgrDm240&$!ir995}iH58F%S)TQd z(YYUr;Zmgq=$kYf+hU%BC$Er0($>2tG{y!tm9AB;sK0}sCMT|k&~V2;hHK3_s$av&1A#d=C>WHE_}^vG zb!YffwRS=1a7ll=>H#v2!{z)x>lGb2Bd8DX-e_izV3#NKC$OMlG+T%0PhmheBnkwU z8$60IPb3B%xTBhc<(LRWWq#l%1xv2E3TAs}1?8tuY+LcmyK;OZc_>^mc!n8{!xqFJ zFvEQr*3H~<5vO!o{i`HgL_*_L$iDGqF-Zqno$3oSW>AZ;mYba(DYEImv29JxA|}S! zp|NVY)jIP~&>E+w4ph#IqRl)6q#+$bU91JQ zHz^4bt;mxbTEv@OP2$gXY{oIT1YXC5vasL)+yGg5Q+x~kyuflA$}?@d{I^N z)j1R#?zr6fNv+ROCm5~l5&Jb>&{-xZbi+qM#h26e4H*0%29SvsjZ7~ z?3{xavu(}Bhj&=Xh+n!p^GxWT5j;Yu_M+{Wh)y>nP?Y19!OM4^<>;`?COBN{Y9>c* zRXtD0WHiGlMmESqL|hJJt^8U}smVWe8~Bjnk5iG7rQ32`j!AEr2HZ28 zAU9t2w_2Qd+_FdRR$Q^i2PfGGt*9ODDmJm`TSTBwblelesB!8|ey<9mjI}=F^E-q` zbSU~q+r}}PDi}g-#;UHJiO&7##ph!KFCcEaVHFgTJlwK#laoH%`=g=#o%_DQn`$?jBMbNP<9m(ysMl-WcD0tGeeA?ZG9BCRI(OfakZp!)>Fpz~ zFpQ^!t|ItAl@F%o!SMBt#M#{^S)~=eP>hHbJG#tfvR@or`1oq zgmwl%t85?#Q0A#w4Czm)?Ay5Fgq+NZ!e+R|KO#~jN^{mooPy1}EgZRAla^OpIF+4D z^9fkaN=K5;sIJOw{iLE{%p}0wb5e9O z??XRmde(?i*!2{yeK-%g5ipttnqy3M1WYG%BN=WM%cxiCFH+*%WI+fQo&F`=sn|ns zxVzSw7f^9`tuhX(jEZQM!snZwNWE%544-m)LM%;MFv}l!-AgY^=}+*>)U&N+`F#gJ z2~9FqF6t)sO!9_RX3SSC|xJEt6mcV$-?^gZ2oiYCD{%gG*jrv205uam6M#jxonM=fF z+SR@HpFXe?`X%13$6mJjmf64cO#qyqxLYD*g0Urs9mFE!gLLgV4i}!8eDYW|RZ;QK zzJIHmZV#1_VP>Q=Sy(ugKeq?k{B2Lx0_Uet$lKBz1n?%u;8PB0^MAbet%I{lFRG@Z zBQLVpGMD7L(V9NP2%~|Ij+?ZX>&%~0cS^82{C26CQ#ngEnPu2QJ+FVXaF3`*RB{N~ zdS~ewZL?144Yb=sMWihl*zV?TS3Gg#mytP3W1?B{e;*tZ*6i{$RFGpCtESGvZh?MR zxEb(IdWIrrl71;iP|rw3P^PF2xMF24%349SjqDShgeG^}%0t!><3>x=O+KvyJmc(X zrvRNmV!v@$_#=%RK)6~oPB&}Bj_8+>+@z_fduoOokjidAc^M0*<8Wd)ol8wbMiG=7 zu-)CTSH#bnv(0peqRnI2Yt(fWPf-xmon~#a9ujoNxe377={L?m82QG}^B7xU? zN%pgF&c+~<)OTdPO>7u0ix}k?jHSpS>L}ZjapG=s9EnY@Z3~v5@v4xQG<_q2Q5m44 zL?UuGPp1-u?a3sml;0#sLp88Fr7fJoZvw3n$f>)@CUeblPGl0yuCX;kV?2~LGcWy2 z{{UAH%}VM}*u1~>BmG=IG^McC=9-pCkeVI2Es3PY8q1bOQ@Ldp)X}u=TqFd9AG)G< zk`@kToe2vjnJop=&cz*0_DV6S=89>{Ct%4ax`<)pbr~1Y3_P@qjO7ASFN-fR(}(p{ z(hjK=I3B!Lm+5=@>)ITko>y@mDAa(1iH*2~6HSAQ9_HPGOPM7~S1W@0axa*RY|%FwTqN;#Ez;vb>0w z3AUEZh4e&y(Y;3E9*K!KbyLPCI~Kr!9}VeAsFfUctn>PTTV|qc8-k@{(40c2>Ql&Q zmS9L`FTk6vkKOh(ax-8Kxg&~@pR`3Ormv@qx6?8+9cyk<=xSEu6Yx_QU+(!T8gE~X^aGKE&WS2xVkSCi5xei^zX)W)f=V&grsr@;l*lA z2Icn;gzr%-;qYX4yV695p6ue;CehrLofvXGbPVj}AWU~9w@WA!D_c9RQ`F72$iDRU zoilGWNt$GgWe2v$py;(^o~M&Dk<|b!4-4nDNmfhvZI)hpqK}VHZMl%&YD7umz2ki! zr4h62#-v{R=M(S-)?I!z&KsUrmlcOT+n!Ex;*Z1UA4eID=ZIdFyfX)24LEdSd6;S+ zJEy{$jK2IEfL@Q#sfF~z zPh1I)sbkTef$mj1ZQ5KBoJPZuPk_Bw*o>&d&Krk7F-+}ERNZB%$PH10bC?dhZ)8Ec zob$OrVqGaTSd1uPLn0h;wxJ=a5;Aa+9`we7GPyU(;d(JTxDNn$@$Al%@RszqUZ#3o zlhKXH@H+vY47G3aH%A&Jw$sWGt@ zdfK%g6`apeHe1JBwEj2hT>e&(I=^aK$^D2*qnaPzITpvYD97SLZA7lMg0b7;)X|XD z4RDc<84W|=ILb#L7c67%Uvto#xIeM%@J4X_B8|4@V;gBMo@Uvfnocc@H=x3qnMj>P zTptbh>_vJ;9L+1`=;bEl2^3;x zOK|u#Od(H6FHCvNiTW0tBfBkA(|)A~O+wRA?H-6X9o<~{o~8A#-ChLN7tu!RGzQTf+FIu7$et@l?Xa_*7ll@7(tmj{P5nN&ryX~t>g$41~hiCpbmJ}5;008NRZ z1Nv1XnMlqHmv8*Hqb<27$+lhbF&OoPhv*mwRAne23u*OyR8LE$BZjvd=3i`l0VPPv zo?|fKkmd@2>5HZ=Q8B7S-Ny(%Q6HLd)rp`=S)8*{R8sno{89C&s#Cd1(4VN^;*YIr zh<(pXPdtJpxgc1cNVes0rEX;{!XZ8fdI9PEcRvy`5(P%PKdV2*FIuF>i*eAJY$k+@ ziLi4d(Q+m*nf)1lA?ikUiaQFQ@aJkUO5t2ie7$4CmsqND)b4Cp5=*R~@Xl-=wRR}4 za1n8HT3L`>)Pz|lh4*NyXUDl8i9>_lD2zWWA?ba$!1x3>7m-SA&-|FyvzXeZu)(GV zT!sD$7N5iv#V2x&+46xuBAz|Pkc*k^UmtnWf4qN^FQI+ofj^o*p+iOiPEK94o^<}RhRI#z6WEcftzv?m>Z*x$Y_%OuOzw!;y#&ununLUf zh%iC>M90>V)3V?zG~6HK{?F z?}TU9q#wzA+EFmhGMsiuk5b!p*n=MGvZ^2RWp9~GT+e>x6%?dTV&2d+%jIObVbu5T zms{(Dk{_D!FzGi)Y3?Ijv~D`tuxGI!Bxff5w- zt`V3p)dQCt!bL?3Za8N49O0CWLdBx zNlHNvVHjkI*3DyA;$bib%Vm;S!*KXD*|%B*uY0?3mn`_FHPak5!EZ?YAtU_NtLY^V zqs~yBGv+Wd(P64zcQBQWFmr}xJ35+q{<^=jDxz;_k<3UH#xX29x**Ptwxn(B+Wun6 z-?OM37l@m8`wqB2>sci$A0~B{T!6HrWxfR{&`Lh)wz0q06CVi$iLr^oJ<9k?`r3G( zm??)c7E|tpoogmpEQu^mg%QGP6BBd)08^gGdX&DaC_GAk`iq+*>PhN2j%?(#3QI=e zF(IMt$rnvLlzeAoRV>f}&?%6T9y3gY&?XYonW7tZ$S6-V=+7j z+*KA$2Z!uBmzwD-y6dmhrYBHi1?REHb=DukJ!#~YgU8ekB$)~1htw9KH5AP`1PQGf zTqMFt%u`j>OHDmH)dz@axj07MAfcxF3ChgfdOzLoMqRJO+r+A1V@*nRLgOb~;4|I_ z6^s+0IF#HqIJ0JvhK^)kJ<`gSiE5r5gd6UN!~i2~xC>^DyOkVLmvGLN zITI|TE>C;OM$&pbCMP+@z+940FXH}{J3k{LPV>Co9fx7~WmIF@i{8e^j@;IBl5#p@ ziAEZdH2ol_iO}5OPa(hLifa{p7vuLv(D-ks|jJkZy2m+Qg&|-9=-^yKcxn; zu26BkF_G?WQ;mS+M3i)}N@&hAi(pD7;@46X(3D~aB`%F4jiXWra@#H#*e*yEzSB&( zTY5Drkb4yYqX`Q}MnD6{HT5hp`jJCPCnO#+VYw1}x78hAE7^SGqqrm0m26a1Hsold zG1@vIa5KnXD-*+d`b;ONMBd~W`-PB+HwPl)FUc@??J!sR{s9F$KmZaxPk zlUDm)^}TTWZRSVfkLDH_cA8)rx!9gg#!w^VpL%;`Fh>FivDumgWv8O}*4)I0QQg#e z71tVV=;?A1aB`5p!|6p+B14==BjOgj zGT<>0z4wj>Y6ZA0U|_IkskeoWl0cS|&L@G`yly_{eC;dORpG3#9eck|Xh;~tHsvn3 zTt@wS*M`L^3$I9>%a_eNzif#WRQ6;h;rM3tOG1M(8eY?hy= zb1cjpjIUw4yf2cibf*w(7A1wwlP&J+A3Y#0$wQ25O^zQHT&CN-##gp`X`Zs9k9ER` zZxuBXFpwwItIP?FaPwSQpw#d3dRQ_>aiseY*t<2jjN#prO+-kMVPXe66^7`lnPcg~ z1Mf?A(vPa`W7=poUUwLO6s$dvc2R)dFOjn7!z*=1FPVi#=#z~NPAiGh&^J=VXHxuF zZZS78GLQgUi2&U4viqA9t>L&`9SbpxAxlo%5CZ$4Etxz?xhv#fxIT|mR&=(n7k#L_ z-3=?2eq&TxextfH31!-864(vb)}>e9lTXM>=Fm0~BdGAtn%^12JFt1bPzeA=@ZFy9 za#?nw4n$jAZQVcqB)XF*xt^GlI|H8UZsdk>_`scH#GH(V;&~CVXk_m8A3BM_*~U7J zjibne#Vy*S5=70nb#mjx6sI2`fI-ou+d&es=bnB^E>_+Wya6ze6N)543rka$PlguO{N`=4><=+CF?O$SkLhEKBbm#B zYc%@fKGCBb;sxivKG%gNR1=c}h99ZGbWTx}2_(wE6X7-n`JziwZMeqSL|AnMNQ7bq zfEE_=2~W&eta3=`!x|Aw-o$Am&Gv`)*Q=e3GhSAYL~%Mv>$nbyosk5S7*?iP3URh7O2@b}vL@(PrJ7@|7@?eSlCYGh z+9>3=tjLkUa!yDr_NS7IDvZg=Uvi=Ly{6XtR?hIULsCG_YNnS19%`C)XW(%8g_$xA z;+H7AJSTEOp`L{J&N7J$zKc=zLPol{DJ`GLF|p&Iy10{+`Okrs!dKYF`bxh4Rg=LZTd=i>|K#e>+c;J z1<02DTqAD23&+Z-%8Hj=nYnUupTwJd0=#jpNS{b;G`A@%K3`eAp+!^6a+1U4^_$wQ zka{L2ifVZvDdmzanWVQOTZO`TD8oMV{{Rt2iT6&zqqX?gD-HO_wvJ)v+oQmfql80`w8?3ky?0&7=VR<+SJ zMY3v=(7@XN08!^9@JNZwrk|&S&`0e;Q$MUyAeK(XPpOZm&Ou9R?6Bvr{@GDfyTX#I zkQi&7VlnuYSoRdPBC1>ru~RRxO!4?a%3Gq)5WwhSJ(e5!i~ed06PNy-UoW)QkNQU! zro(BYOGMNL{>{CD(E%maCPM!59@By_*ZZ|?d?E~&mj>Zae)8VS3u61-anFQw{%Qsg zl+Kvuv_@GB%-pQ4I?Ih7^hj8`ra{Ssq7%2S7Ve~M8OZ07RO56^p<sp0y>sg~*6F%cFyB|S?KxFy97Ghj_) z_~kXU)oTMQO|dWJji>4UJjZry(Y+m$guWpP?LK*3A3ewNfmE=LNfgJg?1?0YZZNV+o`~P7F-KB#R{^20Zw#b!?>G#n=c^@O#mU~1 zC;3?3+MhdXw3cEMBc75$_$J0vj`&P%iC<0)Q$$!W%s|`(BVqDXvr9h{44zkRKgnEt zVn2|4#q#801fe+oD3R+5K3yEg@tU*fcy&{;$~yEIL-eI##4oJYQkUa+#krA~gU!Zp zmA#zB<9=gzB&XnN+A%S9j(G`2!%c>L!U+aBX~u0;M9DgAZ^#0nR5*dccApOb0GZe76hu}yz3uq z43MG0Y-#FxpMpL;4d|T&wTdB82!duRUXhUlgbYh@zf`hat?6wW6y}nRx_VY(PLR}K z%mvTb2jRP7Dz1t8<_k_i%>>@o8ge5%#{85Um_`Xphzj=@FG+_9i%v>XY)jcW+r!Ca zqH`_>bb+U|8>#}DzU33@Q?E($iA5iPfWdq$Uw!H|behF7ArOhGAsYxm=)Vb`-;I=4gXSyfg zm!+5T9;QG2ztszM(07oBl%h^X=;~}HvFF4mPdxU8HAm50HMxfB=-$7w`%}_BAT2TA zmQKwRo6N_Dx^M7#aTv>hOlF3u!*<%7f#nnHUf!kWsHv@nQZir(5$wgJPcz2niu3r} zSmr1Qs&1i+X4|b9LC7DuF)j+fhL-J^zufXYIIrpthxBI_t#~qNU1%Ndxf7-%d6BmO zGhVZ!pS$cE4U<)FyBfNjq}YhNZCLoPt0E3WS$y^EL#J3{z&zzfElh0@CgY}|kT=S9 zHkczW2I@kS{{S=AQ%15h^}?E0@0<3P979^QA0V$DJb3$AIkV==2k%k*BkIu&0Vv85 zn3ntIJ_#O$OxvP#=de6s9NRytavMVMRJhhBPuw-Q)bz$Z*w8tmHyf`#H@6#IA4$>- zPDkF>ZFz~ndD`~ty52ccRY@GO#i{HZeobFNYErj_$!G^;wG8&9)8kB3x!6gkDc%O! z9F4X8=u8tC4?C|g?k9=X!cdTH=nzJ@kt=ZSkt>2Z?+P#W_ei=qR@-%wsI9}Ca?C#{ zN!RIQcSXYxNr>Ss;V)EPi}^l@F81&a(RGsBXDtvPsuz}zNO2+~A=;54aouxv!V_VS z*PZ53+9p3F{bWBxov3Q9v8tMJmAAI&%3rQCj@uE+?8+rrkLO>Xd?cJP7 zjG}+{Q82}7-1h$fv{0CnSfu1C@W*``sypG{^lGT*cs&jT`z{RQu^y@qMD$&iIhM9s zn^L}VgzXt^hUC`jU27v9T#EC!W;0n@(S@I4rL3wc-H8r2E>{CgqxNSLVi8T$;$-Kq z%|1f(i9AxQxkBJEw&Qj4{Awk+*mEn{y*h(R;CGfw$>XhoGk&$r=DRxl1zHdf>X9IC z;l+JF2Mm74pfN7CLpL@x=wH8uic|~V81pMQHj=3M9jTD#0=am35fnK%!%8atXAB5lC_WG zTcWONV(~I=54RwA$5=Zp!E5P}w=@*+PR+vp9PeOcZtXmVrXK5v{{SV=y&R7ayCm%M z(xDZd6zGm1ZtfFF3D}4`PGwIxTyQPe0o7|LOh=;B_CZbi>tximz`5awjI0TWh!#a+wNetcMiSqaVDKRCPp-tvVlkX$5bDJii$c5l86 zW8svAz#Z+}3Egm&+uWZ1?2Yozmet8tzC3hTgaEyXVs`lis8&-vWf-3L?@k0AW30y4 z`6~5E^ib0&>(C`5EQybYE_{~Y#7LBcyZQUb_kZ{6NEJ+8{iJW!bNR0Na5-xPby%tO zzXOkt78u)9(a>_dZDYfC9t4xPrkO?~A;AXSNV4gvB`1}=%Z@0ajyP|UQ_=!C?<$&z zE=mx8#VaUmwW9-+q}#7K0t}+m;@BAwT>5tx$e#8?nxC!3BTCbBl+De_G~62`<~y;r zWMot$^i=LbrK32pshJk)9d;0eX56Ad8i*%7>Ae|o>i*L1hbF}PIZ6k#YEl`UBxFI} zrLaGRCPis77Abwu$HY!Fa6` zVd*z9rg4^;4n%KSMVZYo+I{Lv4L@2R)0&E>p6jVBE?0g((}7HqeVKkpwxob!St8n+ zM6jJ-PG5xjlyH{)sM$;Cx%egODm+Ov#{qz?##yBaEk;Nvm!X&N@J{Ut7LrHN;F>0y zGM`XDbt4gX_;L8xb%Zt~@6m|+$;F9oBsjDthYk*FZOc^SAGIJpCd$(-o{qG3bhOR= zh8p?C{{WgEIU+GrXG}iKAs>W79BvyDo~+5OLTNLyHxZVKCgc93FZ#KlsB=DOtYTJW zi1o^6^naSTX&3e-kvNdoMcHw7s*$a?m?e7R1OSNO03QY9-iK*XgtNc2#GxaxxZQe} zCz>O;Ph-XY@gFW@R19zI>D-^3=V9tYVO^6ZZoL_B?D-PC9yQS_Ok%iJxlr?NhQneY z0^NZoTlINWx0l4_{C!-pE*R2uZ*W3T{0c#OdKE%9;;c zc%dPE=n9-Fv773rb~*i%B*nxCWK|_~*p~7jgVrtUNt2R5Gjaf>xe&Nqn&z`3ngCql zl`iSuWJKbIc?)nJ!|{){nr8`x8zXJYA@^c(kxii>>9_Dpn?Yt3@gNC{ChCmy2I(?{ zmQrTX%@#sj+qytYQ{i}xT;SUjbxz7>Ol-`+B>m|C6qo-1Z8~FE1Vs~c;DO($ehrNN zC4E9|H#itd5ivPwumk10$tTesWFJRz@!;FN>vcw3z}lCg183#r?0ou{_a%!w$rYHT zB{4Z!m^k&v;uZ+pKlcrRwViG!Ur_!$^GnGG}N?U1}Q1&?T`>7A>S$m%0%$79)> zwBwEM?%!N3vR}<&b+f0rdID{X^|TG+_@Text3(#rhbSEm7pW!~aQNUy(RcAj*_Ymm z({5Ce^;&VG$l6fb8OL}#6C+!P3Z;0HNBsij9PQ^e# z(^fTB)Lvp@aFEp*4j`vOPe(i6{%LXwH&I;WE^fzLe{LeiKa5*<-dEFME%6cujrc4* zs%^^l@Sd^e5OnNT)6Yq6PE)u?XCO66gT^FW64~}ndQr$Cv<`XtT{n|sXc>hQSB^D? zx*QFO(#B#hMpx}yiQMc+-x!nLk1Lz0A3|}=jP104#4diqqINTlL?Y(6n9A19BwE?( zA0(u#s5*VcJB-uV$3#yHgO|w&`}j3PB^T(McuRkXqgm+Q7{hRC0nW|?OaYlBL~a-A z*qQqa3yyka{{TrPHGPNmJYFl&&WcUg(zk_rJnBJ{4#saW0jE7Pg^VvR^5iYSMAd%b zaePLyFVggb&?$9;CZ3FCQDMABQYQAycZXW~hoMnQ0urZcaf!GRh?acw8`hksrf`Fx zMnGjK)wU?rWs@E7z4h zHX%nH@6*0w^fKOcT~ zRjQVjb#c)!+)Rs8{{S*0Gp1bmBlyZXp|9LCz4(9eA=~KO=5+QGW38v*m+3~yc$Obg zf>JpA>YyOlGXeHkbZ_4&G-EBY-^DtkrSw>Z3ocPb*0tvu#!(r>n94F4oTLaK@=@KD zUM}a>oSts|;Um5$#4;R4NOiRm@=xU;x+=<}lk_H0)2q^GL&~5&fqRdfrvG&yiygF5dw0P za~5tq0P8;`8AdDFtDOt*l$>`e7rQw9XdTfu7VRQ3x!%162S=U4sb%Q(H1K)M-e|@n zKV%v3Aaz(Na78(5xf1nNUKukNoRgIDxz!>GW>&Ul-X6XOdeq%!pj=7^t_M&?xkliT z4?x`I!cjczMpjEb9ig$DVC*DwyK|In<$HXTacksk8WgdS1~QX#;N%I7&kb=F6CA<# zr3_L0K7E}-hMdc~9hLnZ(|oNn9Wg(4WFAEm3QR$n=>ion;o5x{2H8R;65FO7WpUb> zw=T}2j;XlIEnSg$y!aBS1E zoQrb*0K2)^IltP7F_!)kieSR(pnjEP{{Rc@wCBb|2^5+bs8llwfctQEo>}UPa|NmX zCg&3$(bA1?8*U>uukat5#BkcJ^Yl~B&~RVNU5Ap

`wPu|GE;e2;k=fcEjjKJJka za^6!0Ard@D2YnT-F?33tlVD~EfjqT~f%?`=3~7#_1cx}CL*$#20Jmy`tdV=KIgbm@ z%4O*msm@l{l7YnEFFxVmBy6$rUD46uM*f5~KsxL$dV=>DWouP5tuX!{P}7048OEXl za6ftu!!0L)2J6JU4`@ipf~PRWUAGcd#{9*YF&6^$7=NiV%S_nRk(BHd{4ojH9A z{n2TM?LYk|sil)_G#eW9jljscZm5%KXlCF%-@r#jc)G3w}MB_(_POg&jwL6s>U8C!SD#m=Y#p@e!Qt_-} zU>M91kpvJy2XPi99L%`0FXC9u0M#)n>U;O5&;q5g_N34ja1JImmr`XmFX^pzl=UId zcZi(gc}h7Lo}#rHrsM5dDxs_@qFtn*++F_wb-5Ou{U+^y*1CL<#~RwL*O~oe{Ls}@ zRQQ3r>{?^O>t9Xi?3{D>-8H8m)r4zYoJX?gZNvI`Nk7vYBEZD#vUUVq>9LAg0X7@P zX|nM}s8_Z(ca5V5;03O7nBJd0j!;xby+;^G=5$I;&LoZ9)5DT=U2QZ#?JA$8KWaA+ z^Ol)JdD(DOVlj@ko@%|Lx@~C^K5-@z;5MW+X@I-XxY(LpLb{dA>?I)cLzYs_0X!=d z!)GI1Q&8&zDO6HZMd-FZ)3Cp20QQVQ04bkOl;@^%n@)X)Zrr|kA0oSYf^H;!Kht_r zMOhfl4J(lCV~84v9n*_(h}pMpYp%0RPaS5V+1ouL13PfI-L(?t5*>8B(DY|c`L+g` z{{UFuf>-Cg4~S7>P-y6^x2}8M%~E>d1)XfLk{UR zP_*deEnG--N_U~WWsNO(zo}|EWHA@aY95t2CnnqmAaJmd$gWB7MfC%IAm7x5 z#Ci{o)@iV$ZrjZ2KnH&vJJK-zf%MIZ&9RALeTgSTBQ(3`r*L$r-iSjtNTEQpv zBx64-DfIIE2l6og0C4X~Ji?-&o*{7HT_9)PP(5wk&?WHCHQ(7-uopBHU-WjvO4tu(iDjNg|EeV^1h`6<_B8i%5TAjU%deRw} z(TCv+Vn{}E>Km;MiO;&cD-L@A^mn33z|A zWU z3!8D?hzdoKlI6(p1qjFQ52KFGaGQ@YC{AsiZIc_s*xFIb*GDnZ92T^F3i^3<8HoP? zYRQloNZImPIjScK=Mx!@aUw(HxK3$m4OpkBsU7!>H9oTlpUDLO07kH9^OYoEdBR2~ z;G|?$ZkQ0S^eYXYx^ecr#D~oa??K&$Y}LYjQ-O`V!t{V$QyE)3qmd_%s_2$83+ZjQ z2i=&@;-f0?OjU#RDION(9bTpM5&++l#K1gIr9s5`tf?Hmr3pvz0v3H5BY7+6?o!jc zlZ`b!u#fUV)ZiFBK|1K#&x~&q@=w$k6TwOh$BO4=hADBlV;z7mAhKBuHb6xbgdqE!eL$T(MbhP*3k^-j&$Jo>-Kf@rS0^sh2M1I>RjlH<<3qtslJ-76_T>A-MBNNUykIW%Mv->4#GM}}OKygKX z5vZa%$n=IHKA^YOXnnEv;y8`z?c!ALwHI2DoFp$918{uOG{M8-vF3po9bO$&T%vPb zBgDztYuZgXf6QX}T3*x;97#FWYTRs*HzZ|yo$E1S!ZIYDQpo%h8>6W^z_OiK_9E*g zQG(POwjgIj?1`#m5Afvw0G!bKKki4&BE+4j8rwIFvy7wRAnqX?C}u$u0>>=#2gzZK zG2A}lguu1Jnak*jHWjC1y z&5QV_6P}UALvm~I*J76(F{y*B>JqpykP;R=2svlPIs=lfrf_O%@`2qfw9FmSi5v>Z z#Ed(zY*=p0t3`n^MXoS)!pSmVG^;Al*QnJmukwo2S(Y4(yLs z^iUgd*hhTgJi-dLwiN#WP?cYDyJ&HzsazRZ*!M~MvyI{N2dOXv#c=6X+4fzPEn8mZqB@SBxj2L0Zd;@+en=X8 zMf$n9PVk#`Y#k4@HR$SbvV2X?fg6rRPqP@vYsy1tBju9Bu*!Ouds5hou<$z!rxD8h zj}UT206rIny8MLQ1P-)TIfqKG50x_@10Y4oG2Sh_XYfyHs{ZS7d6>lHW*e7iHUl#Y z?f5RaNeDA7b9vckb2*=un_I~hqYmitI3Gc=npnF`{{UdqaHx2*IXWe+w%{8Y9ZLdF|8HB#-Zz0>Zu?$igu_PX97i&Wv0kqyeGmgGLz)jEb__Srkhw_y2IVRA1JhQGiE~UJG2bpFTEcyRq!L0qMaC= z9u1myoMgo5d7UvLS2;M@ICwnK)7MB-}x;yjKU9wD>OBh1+b-AxFzh%tgNJwyX?4|7!(5E2 z#3IQyFE(X3w=LJ0)2K&vzxV5Up3aCapc1I8o~@d;&!$VxH-K<@-xBEJD|GN#1mYu~b}rfQ-a1mIb8ew3G6k4T|@ z(Y!V8Mm@bb^VA=YmP4%*EK9JJs+w_u!ND0#gZkiidbNn6Amkm5;UOS-8={bMerzHz6hlSceOYT5c`4G%wR$p)frupJa$chYX}{UcXo0aA z<|P>ep8`hZ-`MTB8^}wR=-cE6#H@eiOfGS|Vy2DD-?gKtcAi)35SD|V0~BKMo;SGs zzbZMDE4AO$A{`#V^~u;%npY>|l`E5{)OpG+kqI8TM3mn^lrn`=qCBStoR^%8E>KXx zvHCCYxYX)mJT2F;2XD3Jdl~$S>~S8D(|{_lfm6QjP`?=qe^&m9d?Z%93Fv-lf)5#6EEGnKe+x0eNnDOjigINT&U z#(xo?!5l>5A~Xj$ySIziUq`LBq+`Bui2Ujesge#-8TT z+bJJVNS!0X=R*#E&n^D|YAKb;5pQ&d5W=NwYMwKvIX7QV674|f!tzam6jfysiPw97gGEs5+o^IYNQdw7@a%$ak`GBccBz6%20m0 z1u{LPnRYc4tO;@MPBUs3w|GWY-jtPv*)a@4cJKt&@e}q|7#LkcLB^krL?kXJ2cs}Y zKIOpkve#jU^OxzAxPhtF!hhxbKqtu9q|{eGPXjMPamYjxO8`3No%ihRs#fD<-p39e z$e!8D@+fMYHjbylU8m(HYF&hfUj7K;2Wc@{8sYSxV^-qZwrSzDr$CVMx_qgbdd0?@ zByK(roJ>@6_%o7N5`(i#B~iD2!x*|pun9TgCW zKGd0+4uE&aXJm0FRq`J!ECOcDU2QDS7OD)QIglU8-B90SKl(9<_um1qIl>Y#+BC{M%44m|uwj(oc zDp7cQpPvF$mXI7cOn_Rame|A@i^9U)}V307dkdzMrAt?NbiXN*A z!)piEqNQ_<%P zdauAcGbs6+Big0Ho3Qj|1g*igtVdC?zmZYzRR@HlBNG$8BwiL6*h{O`YV7N4|}ouixg#Bd1YDt{z{Fp!1hGnRHafqlUFZ%~-+ z51~EVwI8J6?+^Cn)%Qx5^t(paLgEia_9oe})Cfft2IBW2+ISVXxDYh&9Y!SXBo88p zgux?dQq3XIeh{@65Quw@l+bvb^M=IY=4d=dedz2*aE{`&l_}8i zuN!2Y&9?KO2v~f|h1OKxLB^~?slqNBq;o!6hzg^*QrMVwTBdfZ6^&i=$RRyUp5fSd z??-4!#ig#T?w|+vQ}FSee2|s0GOBcOla~CJo57LXZ%0O~dcI%nQPa2jS*cpbbW=Sq zvfGmQM3ouQaeBOI-}TyS$`A;geeXxX>)gI^rhNYZbcEy5DIYkah%5sxJ@3-Zc5F6I z?}kU@yH6b~z#c(tg?dslpHY@yuB11ot*c}3y(@ePD7N}r(uz@NS9Zgb&50k15Uxli zL2VNh{{XyvX}qc^XT4Nog}(UV8#l4E@?!}7ItG_S{4m%_{E^KLWBKuc;vv`udj)A(}rCB$i;QD+{b zcwBZ4j$&ifo9PrU=3-&`#Cn&=rrm2fu~6(SyEFXL8R;KMXzU?a2xfvN7 z!&F7i>yAoUTu*dgav+k}m*9}~6^T{1?ZMXe!&mlHO$SUcD!4>`&}dJHnb2>rsTS&( zcxq|pofh^SjJ-t`CTLXCNT00U!gQbS1!ZZ2rdQv%i!4k+UN{4^1M3M7Q!yZI0UVEn znEwD|gu;)Y3TFkf;FVW}!n!ewn&Bqpt*2w+ZhRy^K4C})FA-tOJcwNr>K_HMHoz&R zQBMI1@tDif8U(j4Mo}5So;(q7Ww&Fh64p7y+$QTk&@6g82RM$*^MTXq$pLJzqHgy$ zObx;y01996O%Cj${C>>)v+|h8`GsRw0z_OrjE>v}}81n-#mm;|S zNrN8DcgVlp{{V{YPb|=FyHxS-4zycvkQsz6bs36^sngi{JFdj>YSJTYH`sGXmNLYf z*3Z?6Hn_;d@-aDrID}lBi$ghBsC-&FoHZ1JJ+Se&a0Ntp9!?GMmpqCj#S?GiwE*`4 z_#kVl-^AH+o$X*u8%Sj+AGaP-914-csF}oDi_w&25AR%Xly}5~z@uY?(m-bbdeqgI zXPUvW3`$QNGO&r&vM&cWMx~w6bRILy>O^iTdWtVxc7W{R^ICrkppk(r#L5JiOED-D zyv9Q@eksv|pbZY`(g4#Ts2@G|Pm8i!9T8!5$(YD#o?!4fkAYa%Ez*{KTfCAU{iyX{bjq38N!d}z=`Fb{YMU~UBGNtL*|51blFdyx!s>f`{Tgx|rhejA_m=+v zY~Q_W={c#IcLS}7LPAlH1~M6fIfXsOa+0Dh0Na9XOmBq7nR#bpi_<{gVx(z$!t>H1R#sJ7&8 zHo|dAi=8Q^j;g9kO5qWkw66wSIq_xv5&ahmk({gPx5QLOoN{x_Za)>)D%+|y1P3F% zv)ytBjo~Xx&vD>X&~gB4q*QP^YhzJ{Pf(4k8k0~m%E9cd=xU9)u90$cNT;w| zou`y^$i`!o?IG|0&_FN0kRb^37%WZaK}K!u24k1#M*jdM!yfKyBuA_uR_vmm1gnjp z%{dU3_!-7gBk;&bd>3Ox+kfF}E7%;PAoMTzUmu=mXJ~Nuq+D_aE?9u;N3(crlx^2* zf}2j^Xb8pUTpPZr24AE%sqRql<|y1rnWU_b07s&|ZMQ9xu?tg-P#JbOo*PumM(ICG z+tY)<#pbmP_(%YX@$b!O6A@rY*r=C(kP1j(?mQ)iSC&cM-92Wg$lr5})DvPk3n?hc z=HsQqDIc_ABY)A$%>!40VOWQDs*0V#W8TmqJMizfrdxBm8eJNlHj{;7mH2Q364zv^#6($rfB><-JCl?v7(Ei90+tsy4i2=91vqDG0e0x@wFaf?&l(3@&^n z_zx(F%j6IN!lIf3!m#Z{Bazc4<1zMU5h(l)OF}H#=SILmA0?A2VeG{KMn8by^KiRM zO63ED>XX=f%{T=Y8UmH@()gV@2ba}|A2h<;zuai`hhjnbqMw^@_{fm_F+bXj zkdIS1K{VC%RMX+yVb}r7MBo(L1Ed)5nDUT5rNJoQHwX9c{gf>69+Kgt8Pd^W>~WPm zJsPe(WvTrs{-I5z>An*Cy^D^T8iC@bt#8qt2awm?mr;zfP3|)_m`)R1=#z}qMZk4> zMI#QgR0z)ftZZencMC1bzKj0=>MztlWf)>uTer8~ew#lsH>oFac+SMQ0O9o`Z!1Y) z;?00gCjqx*joR2Dq4Elja!Z9Ed+4BYx)x8e~Y3pE2+J#gz zzEFB6nYh26X)j?^Zo^C|;?RliX^f&^IO&vR2)YR*<7mer1Nep3OU@4nryFBG%1Roj zV{|rhf}mBz04GUV-nayxr2bQZ-~_J~+$*Itn3I#Gakpsen!jyE_|oh}yl~*-WcyJj z+)-$xNmOFEixE00=^p!m^4TMewM6hqW2G2%T+J(xdXBCmQKozd%#80gcF;S3;)J1yro{0^MZk7# zRm5yyLWb)J-d!y7!~$=ii3d z`qE~dk3$y*L#b#?Aw#()Mt4FZxYINheD65y{Luz{5LNQ}zDOF}#-->w5NysVKlIsv zJ7i713(ad@qoI*7!K+>Sbr6U@h9ds8Q3>?0@#0KwS%8!ec&0VtLfp-gcz#)^_YZ4P z_?-U$tWng4%*tG#PsphFO3sO%8l~_TR{9BBgJGD7r=4A=C>Ltolnv}Broe&L0yW%H zlNF3%Y^iKh(ghf4T%*c-H9n)|>KUyB6p!E?ZO;)h%cyAvhzf3Gj5|@3coeqQS9ugmC zKWHY5N~yU9gZ7yBQji*4i2ndB!NiV!YU7>vB3MubUq>+-QL8|D19@*UHpJ$moEk|6 z;Dg0_DC07lcW`7RLx2;hqBFcn;md&t96^?9BuO#XEa2!Ci=%&f+om|#Qp~JU!%Jcw zYYFu<^JZo;7UjtGf|F%w+A?XynUc)2U63mzCqbc!i=@q%$vR6YpifYF(}bf-9!R-w zSUb|8^I+bH*&ZDF!s~37jo~L+JmBPk`4mhe34?vhqM4}-jmmsOlRC~VkJ5lC@khWL zqdPb}^<5C(O}b4?2W<*kp~mo&a<}T`eji~Le?8g_l`vilwCGDL5F z!NN=k z&Mm(ix8s4zaoZE0m|B!sBi@3L9B9)saMeT5A7sl^b`4vWDl4lP`i=H=ZTnDP%*UEL4t!L$4`VhaJ>yPB(}5fb-7caxrA?F}j*rH`&S&#Ya{-`p zvXq4Nv`KBq%F}^6?0Fs|ax_XHW>+=0GhsNB%Hsb34eGdwy+qHnZWd|HGPXugjD}q8 z^FqrQsf&@PsEk;6j_!AD?q#`suex(a&Jh>iA5uf~tkl54z$FvF^X3llc@UETH(aW8 z-sW7hT4ssY4sk!ziqmkd!~AiTdpm@nBk3)U8l#tK5%!098&6$G=slBwk-jn-W;VuF zC{eN4wy=f_yHZ?X-Y&B(GMD2X8F`DXzZcsdPP~E!Vr|AzJ9i&~)Z8-NA{Glmd~S__ zk@M*Usj7uFb6tsWj>4uVRWrnUI<55Pct%_JBb%&=1G;lvl?x8Q!N^8%JL4gEpCy*? zrz!i?&&n+i^4$8dj>1jG%cGJILsHkClkh2kf;AIiyMj!rV9ZFIyF=C8kH8^cho&8D zOt9wz4acf*IUAzT20K|zPu|t2@d`S&ww3dE0SS)_vAkT5JjO?4t`|KS_b&&*gvM7M z33bH&MKU-<4Tp1Jm?x~;f5|H;2%yC~dXGe_5R1eT+=1>?K!2G4a~!1(1xZ1$Hv}u# z4HMzM*Eu&KC`?MzZeMB=8;{GHVD!3C820T{N1K;}w%ZGd9kxY*x`tyopFM|a8k&MJ zm6~#*j>j;>z=1sUPBM?cBSF;=S3DMx5+eS{GCu*D1XgGad{x!13wu+JUkI&wJCX3d z)xJG$#N2@kjTiYdP!f86$(O$~*e)WXPCEYJ}l0!a=s%A~TeCi4Ha( zgtJRcEaec{vCacY`ENkQJkxK29&QfCV&YKs;85|)uVEhKo)&^fR$k5|ZiPMVqnIC^k1AovV7wQW^ z-FXQEZ;Ai_#~5f`=BeuWb_B6Awl3UX@Z^>%*fY@V9j2ezdIkf^egouGl(lhOkQ0uX zfhhf%4MX5^KmrC}O#ZWafH#D$V>lnR3B-v+>`Y>YrMrI_+rx)%1KNSe&KXMF2^UgxVYzP$9*+T# z1w%zfKxUQSw-13>G;SYm9|Dgw>pqW~D13NEr{EIM-q7vB2E3UI;IQ{%qKS9xbX5Q`9?F!QH-{LQsm4A=m_k|90sNeTC(S6 zQC*KxrHqlpYDt1=kF>DuLkx=<#9xG`Z-9}p*ol=XB`U8AX05)>=39C^q&I9MH9SNv zFBwNuac+0wArtemO;sqYfoi%6DtfmB2FOa`bYR%? zV$9<+nBXeu+jhg2sA-qQ4gmK8Jr1j-k#`2fsxZe1xocFaX(#t&%f#3dyu^Zyprz>n zj+{hMN<;kGdCDSvWx0Z5vgz$N(pIb{Ft@VydvVj5?nb zGXDU8LgF;^n2QCfhC_qdk;-kKwA{s|2@karqv}Lz6#oCK+Mp2t&PM)EwMxve3xyGl=eUsfgh*-g@-EMU_T}1mwXl14joQw4G_fAXi z0o;|i#Th)(7#|-0yI(b4SG!E-M(#AMFe_u9o0Q)@K+>*V8**67rA*D-Nsl+6rom z+`7ysKB?QjJf(Dofbl7MHYH(22{^6 zSH|{X!832wx-%mYAz~%o1RLBO!T2JE1zjk&h)}tE7dv`$*LZf3_!u9GEtwE)dnoRZ z%duvfpp2NUrIe2m88{o4%4q1y$93f@lIIw` zoKJgKjJS!y+-;JH=><7BRa=fk7{&m` z`3v7ET=k?_BreR+9$lS78)3zA;PqLzn7q0P#NrTQmI@psL8Yz7f)qqu0}Hr#Sw ztumZl1lP29@f60qNnX-x9xpUUb23H~(Ma+C08IY?B^x>EZPWG{c3<4GJ;`XGbn^l~ z5)@r@`#L(wf1_{GHdNk>NQ|fr{>@~J!5L8|<9+9k9PGTasO#nhAeb zWDX}&Hn8Sp_FQV#>zqQt?#qaCt^)InPr*2m@!qi8l6IW8DP8Jwq#wiKBWyA$7^8)! zT6As?HYKuG3lz_vOREt#8sQ)A<`6K?NCpi)c-k}NAWeZh>~dLneK}Hzcc6P#BuVGE z^#hRVK?;cq2d;97bVv1N{MMeS)b$*=$mu_ZP#3?RVKl<5Bsm(!qZy4RSJQp)oP2Hk zd_6@9^F5upbM=FkF+hWogl#Txpz>5J_!;shEt57c}NlKTlP8HrY*=N9FrVzFUD++z+jx# zHD;dWGp)w*ZeVk=5z{ChHbBNkKprZWV;*TRs{%6C2|N;{S^SwOp zCi6EsPWbg%5Zs-B0oEoD-S2v{R5W&4{?S-4c3FZ}y)uq7(>Zo#_OpP;KRl5d+K2}$ zA3OXIK15a_gTGfeR8zSsdd9um!uni~7Tq4yPsAruu~Ou8)iU)vgME&+&)|^G$B{-+1I!pqMJI#?`Xu!X5^ZRNLI5pDkB%0 zn6#lWs2`6WM|eWQhPMWYjVN-ulpNWS9+1%clRhv$$<0UydPBa-W9w>r1Ap zr*!EOFmk~5Q|`DwRPXXm^m$Kx`nRTsVx?GOP0p=>4OGnW5vMl(%Z>LPf#xJVMMy@1 z9@a~`SYUX-P3j^h75KgsBMeSnkj3=EvACY^AyEzSHulGL9NkB27|J9{G6Fz@a7O|N zTb*4TfsC^oh#(Mrxa?8UrSvDH?jQ9Su`{B+p7+|Pl&`0)vytGWWUL95jqG?8*|Nlr zw;>r%?Tn^8!XB2knx(&{?t(l}a2DK$Jy9WUa%vGLO+(h-*~c#{oYg0wcNk0nv^9JT zzv8c?7De3bZPk5$4hVL!T3V4fQ#eH1$;PGJalgeAnc6Z+ESKeTbLLdAVJ>2=Na=)b zWs(-Qo^?{~8ZOd4H$2RZugYy6?7`!7`fNHdB1be#nh}ogf&he#Wm$Kys~wx!=`r_j z0qr-(@kHh}B5B=}Zn27XZUWrkWC8BT5)38IHp~&e{{SRC9v~ZfG<8NIa`zAJjZAqF zx-O0}+(zQ2*-=4{*LD#yIg^l47{x_iAk2d=nreaGVx_z5Zew-+)DG!Hj&{_3C0cg6@ifp-Lc zPkL<>rA=0RTl*TA@Pn=8pWlt7I@IhaVJ+}Pw!=*XP{?`=Mxn^)s>7;i-G`1w5^;%r z=G}-O#Qy;9kEe2yM)IhD>XqU@yc6DkPlF_)VEjy7gd7JeP9k2gpAZRvhMT~NN0>JW zPd@>1d)Y@+;f>!_Gu0F>ce~W?9VX(|lXE5pBVa*C5XIc`;lr)6Kab#3FtYu}*~yM)=>)B&n2$oI+I&4ODdx z(bW%j<3t$hpN}w`v@3u5HLfd5fmEHry9!$T`;uY6J85packFF+JwlczwzCQab~SldEN; z_2BJ4T-ua@**5hHc_Mo!CU=Fl*bv@I02R85SEF{`YGCr1AKJ2DWSsbQ5Pvk+Sz^cN zwsyiF$sjRKG8FI|49BWS0in3ta_)%oON$~R(d{F#*&mTyH1$`!DyJT&8-s)6ym=G> zvd<;Xz@)o8WxsU_C;41F+5XWB0sfOn=@gh_SN7!N&Ux4nfUu6lg~J;xdJZ)8yL?5K zhuNj3h&TLhnaayGSX1+W?6C@%xg75y0|_2PLLk`jvAP1z+VWZuP{XA()HsxI>}oGi zIP9ewT9`WIsj3Wjf=+Euj+yfoDrZ9^3slsQc{+D|6qK1{+)ChRrOt+p%j$Bd-~RIW z{>y{phJJ#ryBZu)t%5z5$f{;sfesGP55qg5fN6|kd&WBo#G_7kw%OYVsG6K-62ndL z76Z-uRdi6C+lTdxold}UG8%^dNSb^_nzn6Y&24-5?c-&`>ygNJ3Pu z4C3`PQ<7X%T^DYj5HBZaq8@1@gE&}*aN zsA?k9`?U@3dIyfK@ZeR$F^6iWIx5Ed3hIA<2;8*fPle_tY$n`>@xCV_iYOdff$FHW zQAm3_+F2RXIn4V^&zo+J=?8Z3fFvY<$8gs%ItjMSi**giKT9xdVNl*}oAM_q4j`>z zIIDQQ19eb=ZbwufX!x0W2Ged`aXYz-B5r^Jz%hT@SZj3>hh+3jn5t|2s)o&+hI!il z9%986H4#;O2c$5ryuAw&4bxPQWtymy*uF>lkv){<)fFye3Is*t0(8}MID>K^G~r^X zaGMX>G1dKB+GaUsj|A~pt)ytK!#EHRkKUO7MoqTP9YyzVqqLNo61wxV+2fF)mq9d2#8V+qDkkU4@R2q5_=zKlCL zVG*Zt{iO@9P|o?EpyZ<){-&ELIh@pOG(|T^F($?18kT7*I9gM*+MS03X5*`BOlUSn z$}b6sr!~~w>or#FqkX~;+311P4CDanQZCFyD7a48?Nl$Dlq|Y;?}L+%JA9HFVQ)7V zn{lalT#TER$qgsuP#6=A94)q;H=Q({rIp2x(V2+8(PUiQApwVJVcbQy?)1q|&B%$L znMh+X#zjsOT@B~3oQBv9WBV&~wGeWeozicqrsuc1J!-u;&AGttlw}jJixdtJ;W$ku zeipQikt7e2Xc;JL=$#r|jG{*S#E+U=l=QESP;m8HTfa0LmDT6zw>VpdmgB49erhig zs&q?1sB&{2A_xO#bOoeL`m!*NkK)0d(V`*k1L7ew|FBdVMMfgUD8Abijm z`+_Y}3-76?ANH|DJv~p-H-8h3NQ-@z{{Z5E=?0k^8|P}ue~zoHO%WlAjZDw+wtv2b zpq`-Pwzp{Gvi%upMPf$u?jE5pfw~Hl>Cs2}smt`QBmt2t;nF{Av_Gchg4CsV+H34a zQYV$8qy?9Ng)KY>wpxw4uc)lH;gwZOm%L#S47uA;Ko#c?Zb&}VBlczMGI-8rXZ5r7BikZznCHLDAqMB7=%ytqnjudjb+Zv}dVk=go@Sls_u$m; zzgz)6rQ_pV`X9cLfs(!?uX}K8uw;&zpWjL0qYMI8_faugdG0l1@&{xFe79JenbQ9|d zGVNL?w5NI=T8H;l*`1LS!JpBf_HK(j73kFPYGe=r&cNR4dB)k1@4^KB5&V*-#WD6# z&5DmdS%Oi_cs#~LD3|aeBzq?XzW)IH7q6WJCAq_HM1LeR_&`1w!hK^S@F}UW{SUB* zhE`O+J>emT*m?l8I-B(tp5P~O1Sv|80U06`&T8n zp~{EE!_L*Qc~dDSY7A>xFxoDV4j{{XURVZwJ6$FP|` zdq}w^9s|1UFRNs}+Q)ct^&}0BKn8edN;J-ej|c?BlQe6KWH!+kTx!}smgiSUr?~s z8~u_pMAOn+#d}#37U2=O04b`vNk-zKt8M(Pb(bq?=Lz<1!cqe`-@G_x6CZc8k9Na8 z87ORFp;r?qPq3(s`zPUjQI)dQVl_bfQ6|J73s&Kqnnrx~t$xI(uVPSs2E;^(1Ez40 z%t{A=9ME`0WU8MmRfx4%)^0T&(;I`WbAkASbw0qf0-og{I$S>xql{ZGMyzxuGh z!uMU*ebytG%zTnRXJPZ6xl&WgUu(05ua29pOAcUMvRz=`{I@3&`Z~0qFsK4yEdDDJ zp`?_AyC}k6X}h;Cj4gjVq4eT3^7oC@L#{oWvFq-G32f69sbXxkVNtH0=GvSDHcwQ= z;qy|^4wFJ2?bjjt;$PYsq`LicU}=*zG2#!?clA$%`PpdQ=<0zIk2st9m5nrao#>zv zE82E?3GAo#9PUfX`}~vO-1y|XDq44J9BLCEdLaA>-Lxgbnw5zj%(~xDc4i}BmFzl` zj>8$*l5<;(bnZh`I{YK_A0l7sMl+A(P_A@|`XWDQQ%SgPHB5s#arphHU#wfLArdpD zrF-76)cVXt{x-kOE_77!A5GWTAX~WQ;&#S*hD$7B4AwRrVc0tZx=m&$EoQxCeQs28ScIGT*dZ6=kVrSX(u*0r&%N42DyQ=NW<| zJ~l;+C`#*bC`PdQ`fQo3FSa*Esv4$0=C_#xt|X@Fs%OT_a~w-^{H6i~I{7Z|Lr)W@ zWZCfJ9dm&&Kd&3d;++>@}EUHElT@`?W0+M=Lnrc>b_V z6UM=_l(e!RX{W?~P7B-SsH?ylV3uo=#U2Cd;kb-#yoO@szeY)jrQAX% zyu~RTZBTFJ^!;gbsI7)#gv=kYP}%jyD^jke({}^SWqUZx$IJpR=BrP#{{Xw3XRx4R z{{U-AscC+KxuI+8i*b5C_H)0rO+=lamM%@Y7eQtYA*@D9)qtsoH` zPYjU&R?~mdzntZELlL6q!%bW?*oT2uhZV2F$g5wTVhbZd+g?%Z}QxJQykFfh& z{+okvy}`=KIaq8+8xTRUzC;mlMjpI*OzEqlyVh(tk z1mtoGRFpm?htuMej#Io2edgv3q}#F0w<3AMS)c-7=Afxj_$7PQOwO9GOl#=n*;K@2 z9OD{@5BA*|jzkcS61o75b`rtylT{3;@goMRM+U0nlP`4#WAxlS#0gSYDOL1s;uLSv zIh0?4Y@6EGy@)&rIfJ}eBMGgYT?=z}C*gJ~BYT*<&vJUY{GWfvi%A`ZsPO z^bACYiW2r`AH5TpNWp7dtHUXraNW@o+q^!jGm~@Omx?EHFSM~h;qVOAVoXNB3Zn1> z?7rL_$Z+qO3j_dUr{<#Q+zn&ctu@V^Zl1Ox_W;Nir(we8AmpS8x`?VJu9;DA58-uC zm)SrJXjhQCOIiI!A(34bC@POD&Rw zTpuuOfF$HpbzDqwx+h1sO;pM9FqrsV9!dui%C8KUDu*IZ<7oc?_);kib1rJ{rMQ!qVM3Jc0Ef@QDWSiW}xV9x}HuZhc9(jLayi<@Q)~{2?NRV$EPJYT<_g z%HKx!P9Y$CU!-i)wKy~=?T;Wj(&xvZVbP;8TOu87sJ zstIJJ@Wb`v{WvJ9SjHhqVZQimQ6pIFiFipiGD5G>%dscIvrD4coAILvqbE8DEdqGOKad64w_ zarRqupF0EwHg(j{$Nv1l{?awGuC7!24MrcWDH48GKw_r#)E#xLAApj`vP%~S(543Y zX@}_>A>JYs^N#N9K=}o{Qf5Vn=bP}p!J189c(oCkldy@{2`B>9kRPYz5aIYn-lBzx zF~hEMokN!v-b<6VU#gv7F?Yz{7zpexWPU{|WB7_Tg|xRFWOC9i6Y(VhMXR)3h#PEjVN;Yto|jAKt-t1{Fz%mW_4-Ry^zIAzW<(|Dee2~@!{hAb)N=`Pp_EPIEhnob zwHZ)gUd4Q}mbVzBm(r<~hZzlC zwKIkO@G+W8LZX$I;5V4dvymQoT}zIj;(Lif`1mR$PXO#=DE>ucyIBkBZMQ$Fm5-R0 z7^YFUgoUQUUSF{rsjIIvj5(qwG>khQjgTC8y-M%1{4v(QffKfz;XbY52M0ZRL z;7=(=_8~32V-TQ}VTjXDK{|aYs1nxMTKauTUQ=ugqPZv95hB2Yyjvr*9qXWj3|CS+ zZ8a=jK#&W9GC!(wPX<3|AZnusOlz?uJR;s zj2z$xT0nXMD2^)j*4`uEq)kDrjwPFz<-9EuKRz~$oPldIin~sN(#9a z;N_l=jh6v~usv}(ouPL`qu_8K>C}P6a42H2ziBFZh#gGTvUI-8$FvCX4sn)9Zoro> zH5JcEZ9!AlVw40^;_%fGlq=bZlTPw3#0$4Bd=!gK#rSH2k(3VU18{n}k_0{6L`NZH zfl$>@lXGZ01H4RLD?<%tM)M~7Q7uxm+-(q^wpEeSYPyfVhrO;M=2VZ8VJ@cJzNXJD z@#;*&xfruWSN>r!%C zkoA-`)DrFR+KHHpmndo_t(lHZO!u0XE%EA1!tt7HN-`FkFG5;fr*vs`xoqPeQW+_h z;4t9ywULKxCmBHMZPw10o0m9C);APOO=3)1)eTg@4bH=|b1%N#;!jzCzZIxD4S_qi zErN>!60eJ3dP{R+{?vur{W#owY?%qhU`jHKqIVdOLHH}G;>R(_?nyK>@{yBroixw1 z)Qn?(w$0m@oKOL9DUksN+gO7;-(sRX=G>c~#utJ85Vz@dam&$!$2Q#0`|%2ZbCyVX z;T((vfyja`4%NwG4ZvOYD%$IlGBNnWSWEP(TDaYZAn)NK<7nUU3RnuAJv@n%VajO} zeM~tf{xKOJG?QJ7(z_z&ipp$+WW;n-z;l~-(>}RE#PUUGy6kDeH@Uh)7fp#mt{T~R zPT5EU-hmtAA-*JXD|+WHDQ@62&OPZ3IPE1z7g@0F2PYtVvl~UzG}qH<`fSw=(lH&I zo-5IfJ}!2WUAY zFsAG2-)b%}>u$j2B;16f@ruE4lXVvFMycabI;sbr!yle=>?L*_KY59LJou4fwkol@ zC?86rgc!S!7nkI<2bDg zlMr$4!en1V(3@l)UEl@$(SXFn80I;R-xL5GXz_L=OI2)D3uPLn2$vkSvV5;@wWvLg zHSaLSWJdiJaIqWm5*N?nixaZ%Xn+B88m(vE;FyiMv)R2LD@YQT{{Wb9(|*)eQp@bW zO7wnl5(-35lJ99FmCwfhw7>_N5AAUbZ4(^S9c)dA`n zo($APL$u~va1#dr`=}CNLwJxX1YU@MQ`nnkJ-0`jH+n{O;$(R?(Pm-aoDBsOl_QEfx3ulZh67> zP4zPLa-0a^W4oysPOh~UHh^#rkkIskm$j!RE=Ria6UoM=H^xJL^m&mB&f~ostfQ+q z)&VZcwmH2|eJ$0%W@ByIj}B@}quBjFNGVLjB+KphYANO{<5L{t$et(~4(()Ab~wdF zGmPU@a5H|6CN&KVy(*4bxemEEmR*fxWMdr*w-3Nzy1B9-o2?a4w1~W|iE;EZzwr5jG?^}Sm zs-(=i(F1&K$K2#mg@K8w;k%!FGzRED(?(g%$uFVXs| z5)D(-lg`6S?PaSi+M|`Tx&%qc<#aZ?Vr|R)`dP!xhXjYo!|+6#gJOcJlv}Bync-67 zM>}NeGq?BQ9FQ=(sEl$!o{;o);yvS)l-j=&LRR>NDx$hhbh(4icAQR}MqG@d;2_*_ z8C)O<2hHN6@u-f^4ye)$sqqFR57`b0H!qH$q$Qog0~Wln>XB>c5O8%C-Gr~@WPmJ` z7A$+Ta^o7-ZRZ(;NuPShen;XigvNiuw0TuiA3b*UAryv_e~OK+2loCPf9}=4^%RLY zovJ?*o}5n$w)5#?{{VNhjnV^d%eqv48!zH9U;K2xu@q~B7})`Z%I;!+`1eP^1cqZD zaDK?Ij~O&!`a}eredyERHRtTn$}#Hh0K3vdrwo<7x7dl<+C>Y~M`F)Lu_?&f&Bh~{ zyv9K|ZiOw61=?L^1}?jx9`*NiA^j?D5g6n?c?8TXYpXIlz?HpArEFmO!b?4w361Vm z;on_=QsikAhp>;s=V{-m0Masp#pIKAQ6(2HnFkQjHcX6?5Jzc}0METGr4mj@VL@K^ z+JrkMx)UDPqzqqlV&uLNUe;bBymvq&s~C1)Vw|7Sm6xz4m8Jl)@ zl844;s|FVYSk|T`=pBw!!}Z6t_?DZ4N}s;0NmQii7&h1IXD^0o zz|6cZGK+TMk(s}$%QT1SyFd3HazkJ`WAX_BUEiNS$!iFhIIU#3MV2AwnOUEBl)MgK z#NraZMeqj5#1D}|*a9lxBU0?R%*N-PhDtWM$<4tN>9CafvIMIfPmR zjJsV+ojp}k7W*#CPn=1zGMlCSBc-sFLp39l(6Rj}=7f||!Ez*IwDnOAlYB^h0(R-yumI*d) z5Wm?PeMn(Y^S&E|=0oR-e1nhV*P_k4_El~|`S*Zu4~}aUCcXd`D&aCW!PZ8JezXpt zpP{&p^ob~E-i!})8z*)!c5%+x!%q8hr>TwZ(acUiZK-aYZJz=-gxNS{=7X*bSAiD+ ziHRl-!zWrlX}NGeBgHPC5vZV)7{o!8479gd2oTZb+Yj>QeBw_PHo%sq)Hr0FBVd-y zqc-Cx$VeQ)2H>qs zpx$xQE#b)bTnC<^o=6eB>sn(S;F3_VHS}|_wPRceuzn-&41}%KVw~0~hq*l+wwFG` zmu_F9JvZ{Y0NIATiOZ7XlF=O@YHk6a<`1bRQ!LCoBgxi-J#_;l-ZxWkz!g5Fx zH5o=@R7jBK4gE{EvX85!j3?Vuy*fv})D7T&8%Mh@S{5B3a+YwX#NpUEGD9yzI%V20H;4B4vC4?w+q@?@j^dcDVqqwTVf5;9C%N_4tf6o#dVfMjm^+LC5JnmPwto{-CIlX< zAPHXJ$k}kY2%L?!C`e+M-3-ieDt}34C;Vj?xe?(fSv}!zXu{zgM~PwhhD=np>EEW} zE)B3Ykn;%S975fbF2_ra!O_3J5ijjs3}|T#wKN>3mo{9Kengo3lT0jltEB)9Ch=%{ znwB2`=NH=3D|I;D^R!!v;l*F42hyl2o3Oc)AXG>DmXN<9E{YAU#v~iu{7?X_<9I{1 zwQCV4*>skT@4C(ai-V218@DJpIl^(6{_WNrrlx-t!(2X{MGE?S{{V`h z$;k39Q^?%yfwD{qnHy$GT#e8ac6Mjts|qLL;?RNURC1e2s?sj-(3m#9BX^|dU` z$QYgBgRro%+GZh04mds^e_}C#(Me zWiy|&aE}&1`b@eo>rk1KxI?aJjOCk{eB^S3j(V8;GA7{UrQ+~05`?Y@$c@Kh5~;Yj z(~UV0<5P)@;By2O4TIt@V0e%0I=3%Q3KWYy44jcUtWPz; zJIczGfP}1{{#)q-c!p+&6ER}4Aa_})88rU@R-xS_spz@z;Dp66<}qA8qK)qhupkk> zGa15Tt}@L9(R@w!sa(VBuNv9{7<&8Mi1%-Gi4Hu|Kun(axc>mzL*tk=RBB<2;YKau zZ0~ONZndG}YGZb*cxFaLN$c$9CG3%7wJ^>yh_Y4`FAv0+#pv3sXSyaWC>v{Pamr21 z4bq9;oplb&vBP>#QA+`#tWulPp$*X$Ovpvk9s^u#Nb^#K)Cq%Q)L*pl%7!5k+r&lc zX&vx=qpAcSmzvfCihk3=X=2XHlr%p40JGhtXStppAH?i}MNV4Js82Sv?bNbiu<@y$nL z&h}KDpf)h8q>%prri^$I!bLL*sx^3B1x3%*rD9p+LC^sQVFeA+S%2xr{{Rz5^QmnE zB(>&bqdHJ^QqfHR0GEl-&*L*6%}7gpwy3U^ckrlnS5Y5>@jw!Wd7Y&Ph@ar=qkr-p zKM0bEl;d-f6A&ZOD}V6MfAr*#MARJD7Tn$oO_BNmCl#OY^#1_(%3tj*wJTDw{Yzij z2^gD^H0(!@rPJH=iV;ZJbFD{L$ee2r&;J11R-Zql5k^G;Q9jciWFPYAU69*1pgmDY z2brB<9$`n|6V*cNpYB#x&iwBD#r{@D$Y*|8sQd;xaC%er`)QrBeZqm49kl7`qIXKN zo+tFKq5lARZy5Ax=z<06S%t5h#m^3O(JDsC6&6p2v)*-VwM+`i@JI37m*hI7CK=58krf>|nZwJK|k!SA14of{7Z$b6b&^ ze~Nno4ZXn^LTqQTA%+Pp#z_K8agsoWPJP5KuJKNsyM+s@JEjM!{oO(Z%t7C1LAV3e zb?U5bb>_c!kRP+EoIcayKM1c!tE{Od?sn?yvdz|IBAQ~8vu0`o5_ckSjdl=N<&)@nXtDkkPn;UxNr5&e4vVcM973A!2Z<3 z2j6`E0NJP-2xCdBtevwjL=(?UED|awqIDA{+TI!ULMfCo?Hio!ZErg-m>-f3u2XX( z-0f#QJfq*SDUA#+7TA5+ag3+=1!>1+M#MOV<9(!D{K`%xAk`0Ltapn{%Ex`@5J#Cr zPQV*(u=OAe8Q7%YB!*=^ECC;MDgKm+MKB+V&qjR0 z371Nja|o6u*qmI_Mph_Cx<821W*ROIa5woa@aY2_olK6Z@n4!!Cbph!N9s2=&)x(e z_1k)(Aclq3v1*9yV6Ae?~1pgR*80Ag|=i-I;4 z8ag=r1CoCRd(lZZog<%8mBZEk-TB9Imxv2uL?i@jJt6&{+{P$gq?Yd8uwAE@>%#W) z0ZLP1G?i@$(>&X;KV*V@#s2^$pBc2QZ;`%tQFy;caNAC$n7K&k&s8-0EA0Rfeh30y zZeHZ1kSD!)d<&!)pY3Px*BlSC34s~A8@vEE@yAaNrRj;X0PIB03>q?MhQB4+F%YI?{t6rT8SAnM-c}zatD>n zqB#YsE|5~xRGLbNPL8N#T69`6{;kXDnCl4Ww;oHc;dC|Z=&5Edi{wv=J#jn190)*- z#1Un&b?4l!timcg7GH7J#^}@@*4LEuLM6oZqID0!UHWgsT0>u}3mHlBE<=X zowB7zR=-n)wI>;~}3#7H$1aGc^rrf&xU8;G1OEtf_rfFfswV^lYu7l_H> z{{Y0qexqGN4#(<8&1sBV8e$*%h7R#D9qG|XJq)<%HkRzfcaTJm?IS8jCcy*??gYewb0CW#2^l*W-sLCLn!U4LTcdp>#VfH2iVB&**fS#- zxMOFf-M!>uL`nTXrv~J#Vg0)k8Ax3%$xGPNw(33NW0=V=(q8*XSm^F6gi~T9P1wn& z#&D@%_HRO54ygEt-Ic*nF4k6IPxg3(r0qG%ID}znUz6>}I2-$8yTwa-OQt!Sq1|}O z@st~oBE%Tn5ON~Ksbdl)ctzId7a=it&IeRvI*R7IW>=qTx%?}oXAGv&DJmfc3o;jr zbDWN;HJCBkoO+bwrqL@iM0LlahIolw9`GXFS*%IPEI8zpaw#GJ#qNt^V@3Y}W#SXk zBYddpN%nrrZsmR#Q^KNIW`o4pYY@Zgsj49vw`&Cj>}ZU zObocoM2ta~GhvxZQqXiw2V*>ni|IZIEK-2QvLO3X57Rrw4#PO-$eg5#ZgMAjbmV(0 z@3Lq_e?@V*;&ifZs?FBY%&}D09dY)+`!X5dnwHgd&iu>{W0I8BYKDERHY^{8J*`7thw1cClMZn z1&T^OmxILr0Q0Z^0AT7PT#&WXk*~sI!_uWIb;h0I6A*j17q>N`XIn8mK?jCMB$sC_ z*BP9%%^(Ko(Vy%}Hry7QFq;zOdnYFgj;xY8Om#6x$j(n<6fhsFGcr!W6zmj{Lm%s^ zzyAQG)E`Kkn5@BoJMlVu{)>r$Fi!Ow(oE^7oBsfF6QQ=)yQ|@rYC>d%9gZ8hg#*J7 zYF=U!{z(9AiU$~-u)6-bSI7OFlZ%tjJ}4Y+a{ZNCpZOE@AOoXsM`Utoul}4+c!Bzq zL;msq08pZdIwSZY{eXh6C$^*hzy7I5Vv)BF^&b)^jn+sL^yz7#p9{)TDCurKXbL$Q zSK>sQooo;LAdn7zR%z4967>`Y26W(ffAs+TWJ)r#{3TEIF8=^zpzxpn0MXI^0O8pA z#1sLFiFTNkG*0NU`=jw#L>1+KdUa9<4W>RFPr5!)E7Q#X0P7I{0K!ZE0I0lo?>v)> zQj6pn796jcRgd)$PxQ|}17H-c5-m&*Gb8g+QQW2Q{{ZmN^YyLSg6cHyyeqIPj(E@X z;H!Jon#6QBBB;(q2^38!$f+1mD?&+ObpUDE{WGx*uEUaZhv~@*c_4IqzJb6;0@wcI z(w5r`T}=#>KSDqLT%+3W_R5z@q9o|;*DrO#Rzb}IZq3Muw=>ETKLEVT?ulY_#v*bp zH!jyqZXjj`o1V4ksw71_-__6iC3wD$*3`MuOnwt^&ONhn5td>CF#u`hk_1Uh5u=pM zOR)qJ2sgMRf*z`>niz%}?`sniHyRysoOm96NIFmq9zW~5{iP3&RX+@W2yQa8VIbs3 zDHFXV8)g3hjg#2cE;=gIBIG-5bCs-gk$lGjypA!_>ik8|qbXCvKIkI<02?QC_YkV8 z#3{qFxa|J`l7Zs1hBZV=Y?Gzw4kXM3Wib3i^#JdB#BL*DQnY5A-Z~D2saH=H`5R=Vbq^SK*Av4j0B@jNIFLG5<%RSp9LvfYvA@+MQt<9 zS*IgC*^S7~(%e_d@Sd0Bv@b>tKNAxx5jY8CW^&-Ywh52dSI6rHcVD|15BlW^@iUP< zGTWlgLJkeYN+Mpg9YzC#)SJqfK%Z%{WBuLBeYE$jLD`+%A^m>CHrv?82 zvVp1Swk=LOt;O;Fl*Ru5MCQFM%H_z+Z%s?MBgQ(-RV=LYPPcEj10Psj59Pgk835!? zup~K10zL#G9l`L*w=~L?rpW~BYVFJWrr#w>VrQA{mwQlAIKCdm6Vi)jWPRyh@=?cn Ir0hrk*brMnXc!n6*w}cl!+<^j02U7BEdl;F!NbAA zAOH}NkYND-&k6tl1{Mw;0TBxT1NT<=_j^bPh=?!jd_d&I zE2C{pv+x*{(MF=fFAtwdm-TGY;j_H>9g*>czPUn<;L>VVo*(3eEU@*Hp~V{;01E>L z2M3D)4+Db?_wUcZz;eT35t42J*q)Rwj2qqDqt9ROe> z!;--bz`@V}V6g#k*nrn%z`M8WVX@!Jl7Pn{qc!%kq@mQ!(!I84;1#O#QM>s)Q@8&t z19Ae%${EUFV!LjQn#Lof|j4@tM|M}Ci8Dxc^Jhg0;;`N>oyJ6FW|>2-9IPQd@` zAe(bLzsFW6tL#U?otkeh8EdZNM%6CrAeHh&=Z3SCRF#^feD%-=#%>SL)+W@8gP(Ve zXj3wiV|vZ`tbO-MhhkNwDj^FYbg#};>HERjV2_q{#6@JV-TJi;)ubDcC4UH<_>QCt z(n;$Qwzd-!Q@0!+{JRk~JWDNbg8J_yX9wxt?v0L`8d@{=D*!mz*YXNj()%5cDpNwV zDWU8S*erZfF%CxrOSQOB(o2^=#*C|-fds+uUTC+9GN*f^?dqinzXtD|1uFbanpN#j?{p%RuT6z@Knj7oZhHOL7em@Jf5qGQ3RRtarh&s}D-RQb|Lub>g1f?c zTJH)}aDF)t)3k# z0iU#c#WAJ{K-@k0MkE81`axj9ejJr$iVRQGbL((F5*r7fX9vy{oAX& zQm?uB{C8Z5&onk018yh2!qOjm&E+HUBY2ULUWfj61H2rnZzR2SSjh*+ zZpbwMo*4OLzzq8~Rj*NLVU3ua{q3Aj{O#m3Jsx@;uB3`1a9$yBQv7Y?GgqL4m4U#_ z=*^BZwULC`jkovbf7h^Nxya=o#BfCgfrJp(TL__9 zR)Z41kvxGW(@f#BbdJEx`}U~eECuwjeA4m8T(LOg9ewVPTDGJ{tB=-i++rFdH(nqMKAV+oX%Kpvnn@jqYYwce?$sN*g)6$SSMhPp5v{@Z+R(;zUA9w< zx1&9hC-vd4IMGueGS8Mp^gU)C`(x<*fh^&sReoN0`>RJZ0p>M*=K~r$4xaKLF~Pfi zLCKu+8J&AX;r%@jUB%Q6L9lhjU&sK(JXqT-TQTDJe-SnH{~+rx;)?v-Y+StWr5?MJ znTflTIaG=qZ}9o{EftA{Wgd^pmP0g$gERl2x1bTp3A+<=E@<%%0q2M>2{q9o-e)t$1w&5AS6Us8*HiNH!SAnj%95VGjm+&s z$Zh&7;Dhp)F&GULD~1jxAu31$wRTaKy;3=AR+_amvL(|qwT8bbBUlvIYQ07)8&~}x zJa9ei5y!q4S7+}AV<|7=$R@BmJctm;U^*y~GDBWNqWX9GXGdA|Rs4?Dke@U+rTii0 zJX2aPrkwcizm<5QMRFkT@X*z2odqpqEK89J^LvCJMEFxjf^P*|ceOZgxKzCDw#HVe zw4kFd8}31x1rr&xlg_%r>X+8LR&2ch#v%Or5TH_8>D6c*=dpgZp9zO_}(FxIStmP`1}c*Kz9B%908;C z!osPp`^#2mMBiE&&6fuj{i+ddn~+mn-FcUd#&$uc_IkbuA8s(q<`p*`n zG77ba1VAs#HF)mzgi4Cv5c~-oc3o@QPVIMtwOw^9cT}s`eo2q9#;(fwD`oiZw=1n? zS#BwTKR#gzDqdii_;T#&laJ>%(WAUybQw0n>8{5Tih$07J86~&RW-MBwf`{f_#z6uObl1J2KAQUw=;H6 zKuy0ehQR;UDzU^f2FG4mn!N%t(E8{#C$M6#luSQB6{ncRynDxSDUwBELVT6ALe5UG zAK)3-vZH#QO>>srAGL#|4shIA=$Ijy7s^QQ@IVt0?eONvSx7ZmIJjeIEcDz1hEkQG zR3KzEYUwdp4AlTP#(^p!CW$*HxMTGH@tjZK^;#dy@Pw{OrM&XaYC$(8(JyYR0oW1) z?(3Z%yW-Jr9+VE3!(oz}Y|`lg_~%#{AYCYv6i_wC!y>W*!8C>>TxC_oG`h5iX@q5M zGsnvaVM?&s0-HE5(XbK4nzxoO2{|mDeoZdHqQvDuS&IlEK(P*3a!vUh^#j#=>d#Hd zqUa+krvpGI+&J7ZJPetI4X6j+gMjsvI_1@%OsI%h7(;pD11ky~Hgje+P(gX3ufvxX+Gu3VO?#kj78q1X8gUe-TFcq*yJR*y{7d!O2 z#;8y$X7MppbP9S-S<>HnLHnm9IK&efK8wViwODBpJef+G8Xl~7>uIk5gD33?tG=`q zl`s?%juD!}USt_s`>&nIXqoIf?X{rO173u0{wIGSRvMOzw)3)dQ_j;BQ(hEBB?cT~ z^FROEI7_J{3a`v_ux9WGBfLK(Z6pb=yBQ-BiR93$p&1waxJ;1YU$;cysQA&D5+5wc zp#FW-HjnO)v%`?lx@Aa@ouPxNA_@HN))hjd>DmN6nHOeW)5N6lIg-WCsW`lMu8c~P zcb0c72{-9S=6lI(lh=&sdj*P4=>`D=Ypfz7A~u-ac+L-e2vK>ab2z+(^L$$Ow`Vdp z0<(aV2L$dT6s_=DR?&HUxio6|K{be|hZ-_IP8CSS4q1o^?@Os~Sr}}&Odcvd6~S+- zb{FQ)s=-^K5}*FZGuvwfp=%MF<)_C=c{E5N4%CH@SZ8u>OPNQ!v7q|Jy&9$OZ=PxX z-$?g1l<+5G-atK zAz#Y{O{Xd_Z$kyB;X|7_RTZB@mTHdtA~=pLU)=8Ti4jqfxHYs`tfj!Cu~2psnU;7e znObHcreAQfkeHwq+|>;EMW5k)77&9PN5Ks5hx;n^gYUy#@8zcAGW>ZP`DF#?5IE`n zN-8d=y&3MaM{SPx3(?)skVQJCb)WL0dCB8+{O$?(+`s=hWf|m|`fnvpm`b_rF^8>z zv%bJBfp<6}?zl`KjYsYy7V(+YG7Iy22%5kvfF_Qg=t|9ES3>+pVX(_?)}pl&RmZZm%+p~p3jYrs!RzFCrgeM?+$XkcMS#K-4@#@1*G#2vRx zP+m83_EwQP-FY0AurJNQC>AXm(oz8KdtKuYU3i^;#mv9Q3%2@J7GS~I zIF&tSZp?||*jG#s6rPnCRcDZJ1zY=aeTq^#(hbC zeD)O8`s&Z{`OK#*-Fl!rE;OJth6y5H?f~n>F_{IG7J|vCKmVl5ti0&Ez=A-tE!!Wf z;Iz(#iBQU{#Zo23%>pzo^9;7~3<2{m+XA=dh79 zciKm&v7$^aHO4Kpp4(Lv!sV{leHt6D^jG!cD0A+ITa&4^k%6<>WKlHtTAl+WQ6G{X^S8Z3_Zlpo^m4&n;1-sbes&*(&j46Mw zR$Mr{$L<8;|N8oW;qAM(F`0q(pTjdxFnVNUYV1&9b8slrj9~L>llK~yW9NFqBG~Dp zHBdzee%doZ#{1ntFJvfO5U%7z$VK`46;SUwP&vj1>+IGqBcwKTsg=n{Zlt>+kk**RY=>5dD zz`FBe3*MyEmO}OpJDs#`-#jj?)ZaDt>tg^ohWb`X1i{%gn626JiP5u(EpI(zh zVL{|(c^dzhZH}vtox1e+!){3Y?6QZU@aLD+hQLSq`s`vbkCsuO{&bT{Ze_9{T#tFr znf)%OLD47`=?|an&DgO6Pqn&bN~QqB<8b7=ET8MzPTY~P0E%@&Umfy;2{`teteF-S zHm9V1cW;FlvY*;G`W&K*jA4<@5P`R`hWB6o1z&AL2dU;5zNtWGQH6P&kOU{;6Yc~B z?)s@FGqn;}TobJM3UHtZ?v#@K6rL%O%2YFW2rJ)(DH^`rYqaz2F%}A+8$C~K@6SZc zQM!30U)MF^P5$vM;6k{%7wF$AU=}lnuV7v>MWH=QD2kOPE#~L1T<9FcN02odCj}>P zm)?Per<=;R&3%|D>Sk?xkUl7}Rx16lMu3!PAU|FC+wxEdHcC={guup2i=pMe)r zH;pY@QVJuq6oZumn}Up4XU!+L+Jp2e^LTl=_`X%o)2T(qhsndd8*$Pt{y?iJ5kWKe zTNUyjf*Pf6yun8ZTwhM~9f0(@KgY9Cs2tSy{CN&gdU>Zo$n{O7x?rsdSCzF|<&LNV z)x(PA7MR&Z3IJ42qq_Lp)Y1CK4lqo-vt#R=*fUKQ1&3i*Qx>{dSv8pQI6XVq)sSds zso=RG7)F!9HV`lIVWV34#$e{ka-wGoem-=@^kL6r)I2er-tux;hSK0BkTP1F7Cl>vvK1s90x76Woi$`Zm@HHYQ;=02 zhNO2+a4?Ns#P*K6`rOi#U#>A*M`Pc9n0FZ{zoq^!?-HJFcVPIu za6gdV;s!C8$s7(PjJHq%rm)jvXZSyi!H2B^>FDC%v&gnrkN3CIZ{y1k%ez*%3=^#b zv-%RLMz6|MGn6Ed-|$P*i(a`?y*u2H(%svsUm@h1gIf0lL!e;_4XRuPPCg9AdZ}+s zr_{L;Sbl;<1IToyEv((mAuS7O@3wIKQwj7gYUF?X3F6I?IrL9jDgYj&*ZhyoFlGC3 z1e1{-#8`2v^gl-1o+DHJS!X#~t{)PhHAW8)40;0*4|T%zUw74|+K@Q^iGp5}B5VCd zQW;dKhgPlX%C!BnhSZ*t{3CMB%=rmqas)oNa7ogY>aF(XqwwEEd;1N>qsxk|e(%LV z#2yTzGfAd8B!OOQU-!emJl>KlMbl>rwNN@fesdyLawb|I*HDjs)|iZh^|=f-0?XjF#1qulUnoL@*{(Wh=6&s1ItZM z0%TMp&w_2n&M}l4nA9$-JtwjvQWu+BoSN~>RUaXieka)y>$l_r^D1(-It zblvY{eRrZ?C7ka`%0oTLFN%I%YB76?H6>O2j-h_E%}~HB^>J<`No6hbEBn|$Iqq+? zS{0$zW(g5xW^0)Cp8@ZLZ+ko7?cH6Tuo03(2R6JAZH{pr6NNU53u(wYWkn1c&zm1& zF7;-~J0hC#<83TCyZvV-cC3=zZH^(kAu4K$TvJWn!M zXf3>Vl>Y`Bj$+hzF$}dYgJ4?nYoyfaaX`AKoR1tB-Fi=2`;a^ z;NYH285W73ZMy6cuBU39610bRMGCJeBIk9W_!Hf5ND|kudj%IiILTXWJtH9$mlaBJ z61jHu3Q(SoU8#(Ghu94MB~vTl;;6}L) zpQ5{;89(h&CokXrW=>W}oa?-Yi(`aI)xPRIiEIs#HxJepN77ia>CbRiXMqdDTXB!1 ztB28$HIqBzYYuge&nq@*AeDB*?m4pG*0Y|Qh@CGtWpqIq$M4T6!qe{QoH4ZJyL3f} z6*YpP_-7+3aWA*80R2-XMharcFGc(yDw&vK7Uzt3Kb`?uGmg--m&>>5Sb4-Wr*F`T zihVL&ef<$tZ5N_M0`aewC+yE67OBs$;bk{dr}s2R>NO0Ka*RuH-W0+54p}QIiQ?YF zre~tTELvz|m7q!mr=280!-wX{qoW>c&tU0@7wg*ec2(B_w)luy5M`zI_I1uZzs@0z zxV$w$RB&o%(|(Jxn}Sv9-0;QhT#~QgapF*FX@H~!6b|X5>t<9}*Lm(dcl=^>B6_TgXmueA$ z9l7A7?zah);=xLzXeP0_saclFSu?!$?qmjp><#!r|4|&oZ&;}IAA#t;S`PMQs)AB$ zpQ{A#9QuO$J<7KjnxS)qeBSyXs^nW9J~;|59kNW5Twg(pR10VjZnp$Ai~7F38dM-` z=N0ft#yEy=dU(8-dHl1g=&Yq$7ziw_Yvn5|&!P>=3yJ~W7BvdUbxAb0F-ZF3BYUG( z9tm1FE0(MLvrg-YEJ?&AbMP-vhJqPE3ljCQd{;TVu6)%p+ziNRtcEl`XNAu&3jzsd z`TEUGJokd8G!H~vx+Ts-?9f#E72pq|_w8h{vUeK3CO6)KR9gT2yWY@@`hLM}R*u`s zoA4B!_C$Wa>85jzKoC7yEihy$YMc>A_XfupY3%ov&>e^B4;~R`MJ%VO>nWmV-OwR0 z`iovvUe4@cT!hA6)S=ieoI0MsFfo7de6}th6_*p5FuACNp48FdWWFA~F79x9JO81B z5#Ohy)Grz+3|=b2u;GlYZEd^S!&g9dJ(5Ka-ER;q$9p8^u{?*2gPX?P7CiD zlF_-p54G8e&T-LsJpLIouYnJv+AAP9(QzS&wO{FkX`z#D#G~S~yO&t~pJWXSg7+%V zH9J~zXL*{33d5mp8kPO(OJS`8Ku`yCF0aJR;rKA9J^Q(qmFrxaN3dZO-YPqN`lqj} z^LHGVt_5hLJ5uOzX;JsX`_?ztfHLsQ|j1vkf4 zBr}e*s!ho}q0HRtaYJ>W1`U@quvSgeRpo;{?b@;X(URE^v|=ybelPW~Dyer1y`swPE0N7On~%80`@ONvI$JfaRMd~L?_5f` zn+Nt?0v2ko8Rmxf70qX7mDhU+G*KW?Kj^QCsw~g;$kl>ksoTiSkn(TlQnFf{&8=eA z%;CrbiuIaiF@W1R&nOP;^;&AKU;Sx>*&?dqD^D;E7+(RAuYk>j_yM!JqfSk}0Ixy?*3zg%M^oR%zfN8y7V7e>yMj1E&z3L4)x`Ya z6?A!NS_Az8jE0^Sn{|^7_^jPReK8TJ8`TKLe`}s8Nj@8Al8quMw zRmV&puJ6F!1y(J6fiQinf>!tJL3B8$`J?wjHVyodVBb}{T;lQ( zX}Ij&&lF{}yn*%2YOM+WzI#$ZxUhBYIh|dk7zC=FFq%|HYaDz^j}U+)5W=fywE;>pTCb)6BS>w4(Fl z-FMXt#&dC%+<@Inq!_~0;1Iq9bSO1Ti4`CZC?)P;NOvRjUuYjwf z3xf~Kq8s5TCLuo5FZ19oY_20qvHF!=>sP=!Xs&;s`UOp@6=k=|Zp_ZLcg8EmadrEy zB9kU6%^C-9qgP%G2B#bpAE$(jFUKtW2JTp;o4Ft|EO^w2@H`+j%&H-Rj2g^RVGh$k zDJJwk8Z7&z$?}cKlR$AjX16ep6_=n?n2V_%I>;}YfT5qvVS;~)TmR#$L;vw$|7OY= z*l%DiMOH48ULHztlXI@&ullE!h32R$meEyV-Wy9}y-I#KBJ%+~On&0~?*?X4T+opoZk$QIz9%KTO*vTO39bS%DI3mMV7Im-R9b0KI}8uS;maU+A* zb@|(JRq~)m?&Q~MV+%a`C16Qs6@y|*=H9UCLQ;i<9;ZwdnM;b{PFV5%8s@9M!sV&M zw&5PkE8siBTSLCtG>RXbwpSsWks)S!aF?tlyPRBo&!o%-&N0digqO2;fj9 z;_g>Ken&A!b?7T#(kqZ3fBn;FUs<|P^5sEjZ%XKwBgzNL@5vCyXJ4jx>fU^ct7DJe zsJN|`oiXdm>OaXzKO6&o7la88n|$a-D9miQ=ZpNEnYMLI_Q0E46ePXI#3W*`@jhSp zyu+Tu_S+IZ)K0lBZsnAAfiurp)r_AM@GS4HVOHfm=pU?*9DH95u9|~scH#V%7AU}$ zMrAW1_LU!3TgIDo#d{&(m_MXZ?N{4kq(qgvy5YQF=W^U({);t-f$U|MauKRj`EMR7 zlU-`$-ZT(jS4r1Vll)|^f{&k!ZTXVy4~xHwyF=c7+Q^~3p$6Pm&Lxaf8kE23+oxP! zbkxYRreMC72W8)hlKGzP1M6fKXZFx7lkx=6_t$qs_146{(&y|67vxN3OssIv+F;A6 z_Z^3Ityq#EqCqi{@L${*Y(a--tySwpcL+tM2;S2D(zI=`>@K8wv&Y5spo%OV`XRCA zE0PDwx)x8f%?6W5z8tOlYmjhMDE@Tv)C+Ym6j%PC-R2*1V=BueRJvi z8ZV8ui$Y!2pOI{Bi4Rx5D*Z(Au2qHEzW4VgQp~ojjmTAk9d>4U%f{Bo^4GLbHc5(^ z053U9v7IKsQjMM5F!kzl2w88kJ4e7EV^$iyHedZ_C0B@6WYB&|dI9rx;woGS-Xy-# zO$sQsQ#%cn$z1-MWik~$p)^|i=+74B;(0x0bDrlB5>(n(Ky>J7QuS5`a29>;BIHZT zDSumZ4=`hu$X?4j%9>2^vj#@W#Hvg^Q7ubV-* z?Rt%Xb6@ah$@OxXvtr6~;-^6%BM(u@LZ=RZf0q@R&kpUk}5e+5)ewf5sjxAKa8Pxn_G{+hmTwB#nLr@P?cD29(Wj(#-z zY*QGbw2^;qRQ+;U!p~l1^rMlaR)i34k}^$+hjIb_&8dj*YUZa%BWRr>`kdVq02bQA zu0fWogAgw@=TXr(v2;H*zf?=gK+vFSvLK9;jv;AhB#p69cgJbWW^*1jnt~pI!uE~w z9ozJ5vD-w8aQfS1O4JB6GH=y`WPN_#bI-V!{EQuOX;S9#GvPY)-d%3+kELa>x=AIT z)K+<}OqfGp*xR@@24sdoLcr%Kmy*JwmY%SdD@Dk{d8?Jea?{H2u*{^46~zvf_&@8b z|Nm7SgIsD?BD{NXH~~7*5(h7dxSG{coHyDf)Z`!U^8d-gs7!#jwWwa~c^H+_IBwKObrfJ0Xf`kVwl}qnvtdtkR?_%FWwP9BY-s~b zpjM9?X;1Sxem~iqz20$u`{@An6%vv;lkQ!U6Ac!vZ;z1m1&9LPlRuSygNpU;2@Kz5 zsOx=gGJQ7R@Nwc2KVv;+|5kk7K^VNSS8M3XSkTvSx;`pu#j@*jI;OaXE9gnQtfD*x zeFebF?zJo=R-ojo)@xzw>q-1Mcu~Q}Z~a6h7Q(_WS4nI&+eOg1Q9gG$9Xyi7;b}td zY`K&DGzwKvu3PMHVLO|mJCxZ*-05Qw7Z)uu#BR-lDvJtnvhV#h)YG50NZbMb*p&;ejLuZrQbs(q z-l6IFA{``941Z$yy`a|xl^zZ{yby^^`cp(3R-GG@dA0#2ywLn7O$lgJ$RuISe&>f{ zg6UXV{Izfoj)a1-ZVWM)>Cp)TJ z#yv1eid33+_K%~1jTV$^64q+xex*msV&>X)IzDhvm{+WH>K zRZiLCD$uq=wyOWme!ghleVN0D3)=33vW$>ap2<~^HQDmjKCzKUp+5cXt*2ev3_%Uf zaLxabYI?Su{l4Q=fj!A5B-f*Tj9I#154R_wwYMNODz1f1oVJaYt!5=Twb{@lq>6H) z+@ZF=kFkN-?!vT8T;ZdX&Yocc9c`7s16?=YxBfogYs$?$gP%X~(!YR?Y#PI7JLD55 zdeZ!g;^02**z*(Id~f^tq=lZ+vsSUy(vmsX!LXwZk}>)93h4EHi@ovkt0tDr$^dMzA zz1TN%_mSXcFft=7!AZ1^?Sb_b(<&!c=|dNaF;i=Ngf zG(69#FKsoxIjg=EVQOLnWk3t_9{7(0^rSIY(oxrD@8{u#?$Sh%dCHqc$TMKbPDV6c zl-<>@TRTMyOkR+7#OS*E?p|7VTRXsGjhRJVr{MO=->Y5`KYgD(@0V-ANF z-9P63|0mlCwSD9Ex&IMcFcV^PnSNz38cJ#6Uu}PrmS4y{a&RfdY+~!hg(N(I=?g07Iwd0>I*xysn!llXVwMCx zc1e{4vzcoy|MxFDa54D)31&ls15fcOg@+sbeE8V6CfuSyP>vh!p*l*QYZDPu+CJ!& zHo&Ssp^tTWKhIm;nBox4!m-fki#zAf&7LAXeR{ExvgJGU$)q1h6)q!Roi#Nn_I2z9 z6k0=DkyPQsrQO44l6heBc#Yk@ez3qE#MT(ifw4w2)F_ss5UAPBUcl7dCEyWRwna?H zBNDLaOI`TY&wt&|IC|*FJVshb))%|z&}mvgPfu3tx>HH$8O|vbIK5L)J>CH}@iZPU zyf8$r6#r6n%CXKF9>u6tlkK{cJUkgkoP{068WSIe-bB;bp0}bh^1b=vp^?$7Yy9Z7 zh``;hJ2)+`V4t>{Eoo6C_G|dJ;@(-)sO8U5(VL;)SzA$*5W#Wf!If#DEE|}lA`MNUN-Ek z6pOe?^+B;Q8DLub8cR0sn04O~1%D(js|&5WAC2?mH}pfB=s&_f4a2zwB?#E_icBmW zZki{vtDl?j8IZX+Um0`lmfM~EjxI+WRw@n8CPhR$Q5_M=TU{OXvcpIbEZ~49rO@B8 z?GIw_I$R!S@k>5d@!py3AOtbbJ}4S6S-H*2t*3?r)X^74wdd~hOQfagJ(D_-Z*{ZnDebPg)A(y26`Z9JsMJ!N8YVg*R2q|RxXe>= zRN}hQ`hbnq;q3>3C|z#A**xN%A2qhU9{LK{yzLvpvMT6!9fNbg$Dv!G2+qOEhA1{X zZ6_|Iofodrwzp{CnN=+j3)436RLy63EPQyG=zBFK%%zDzk=%OQaD&MP*d}TjmoLi5 zT@_KK%G%hK_pb6l4%EIeew%(%R$(`-LwZgkk$PW8v#O0#!=VJUP&fF4 z<3kgx^vSiu*Mdt;Byozhr7D|cHp0pjj#IdwA=4%@p5Sr&7L`>Q2GLg}w|q9{v+KeB zF+4B;N%Wr26f+koR{^F%?>cV4O`5Bw(9jwQeqT;iepR`1EtED46(L(|Dq|C>P-1A^ zAh^*tb{5X1EK=(U0x@Q9$J?5;Ee{U+T3G8N7-Cj z>WWV@Pe4Pr9?r{PWCnh_-IG6X$D&*LHH7}k*(}%=g+v{KgW?Q^=C|(4NGFEBJ5G#R zlm1}-y8E=bs$#Iy?G&BmgKDI2uZa+~tKK1LKz(fqg;aR>_@Z1I-k!~CpH^=blx3Dc zH2EvDn$BG23q>Z484`0^nLygCVmH(6PyGp@hM9}Oz8TFCjyB>EN)Jay!pEmZuYfW# z%dOyH0)v%@coFYeiW8L9F&0v5xicN0XetjXqknoi7VK)N4jF8i6gCUJQh2$H zloTY3rd;b?dHF6)xi1wq5|fZQXtnfow>BJjZH2yF2mimX3|E+!Ud6d@yG&*W#sq(n%_$`whFORl zZR9Q7F16Gnu77wz`h*j5&r;3aXJ-XD&bXwuU&%L3m1y`iOzueoB=sDXkf+{fThH8o*y>rTg(d^VU06~I>AZQ`4mMNJ2 zM7xxq#A25PgN9$N|f;$CBA}8b1L{-z31Wp!C={CUQD%CV&*C9Lz#7*Cu6y#U z&97#t$-dC}su06NhM>2+y>{ud3*fq4F8fb~y3V-cIXl*| z7n5uRs~NQN$z*9C;&c>H4+0BOUHNgih;J0r`|T96xkIn;+mt# zSpvUn>cdm2Q8~)D2i4#z915GqChPxTD@|w-#eiCt^#S0Edcq47K$*b}zllgw{10NZ z1+t63IkzMY#q+SZEoNA81PE1zB|Xkc3?--a-!paEGpQDjX1i$Y9=o3wt5dS zbNb|vFAnYW&T}COq-xN{KiHTGM}OQ18VO>ZoL8@{_|QiM@8kkrc3QbI4)mW6rsw!P z<5M-1SWkwp%VkHZFT3+w7Y_EwO>kY%p_*4!J!@doY|^!lo>t)=tpOyn(j8gpBESh{ z*m8^(B?R%V3|Az9W?WbOkD{YaA(`uX+u&U#NWtoZqcVi?5+bh|HEUk}QS$qV;Qiu5{uTVb;_ ziKhOF=4uJzzEBX(rK%lAx9%sOW&%;FBEu=~ao$x5cPv%KC?}uzLT9`h$Dtfqp(>bR za@VRA&U>^6nSW$lqT@QXQA-XsZZ!vaDZ zF}CumZ)zg6SE^8C0muf!>{`ujb4xI8wH^emkrv&Mp9&aU6=~z`eo+Ye1>CyrOy}(r zU{i|u$SIxH_T>e|NR>@HkT`GU+)(d~q~PkqD_~1?Y^C-aam6d38(g07p*ap!cOMMN zePLdeaJ73x?dL$({k7BoxG*Z9h(+n(3c<&QfK0%C9tiFx`gkx%5PZQX%B~Dp z1i2b~SS)xGJS839rE)EJl{i#+pcJ(WitDNld9U;kJj^Z3yvyK)mgwBqe@H2HZZw+9 zC9~F3_0ym7c7(xnS0H01r2Y*d5#NGGskDE%`2CHc_#gH6Tj3wc{y*C)6d>40^r}<* zWX*aDiw4$O;x*0%pl^KiAcg@sFgc!|S|x;Cw37ML__cpB{b5JK zp&t6p6A1*G)H;ew92^;)5Vrr5mP z@cwN(6`5r{B|tI`_vLtKzq2qy*Yvaatb0Gcp-+IAMoY?&*4O;qwbNTwM%QiMVoA9^ z#o1rEvOpptv&k?r{d{nJ00kLJqu_d*ox_NHgoc30Rm;}F_xm#fVLKXqi_Ej<6a-wbn10=r1PDRO5BA#!ztaKMR_jh$@$Y*qn3?|XayYHgMFFt;!D!lKL|Wfr zx};WPBmWRO>E(h`c%BNu>jCLGkZ2@4wIVq|zF*w3=H7)(f%^qb*%=j%gFb?A*`gWO z@5VE+d%AO#2k5W8!^&g}87UETUg*{Rz;w=Z9wfpR0O%K8t6e2tNNvheTN-f) z<`{mL5D?;2mnBpNqG<{fNm$XpSovAyJ-ZYYU7+}~cRMw&GFC)XT>jO48sADA0eCN3kt@Ny*^88QEvu-#>O{z2TP&Ku+PTu*^4Tz(C5Szd#BF0r zrWT542Ze78=hlJ1w{RRxf^@{Hk%&KpkF7L7X2U7wR`A_;*BNw5kCS8(;1=mcr5GEf zzcqcN?H~(3^O;v5=NQ-kR9$ZP%+|0GK)faD)Baq@0hE-Zc}%t&u@LeLm4f7C(egFs z5NmN;?Y_F>6`)R~RRfo`vs99lm&PO1<5u$&7WqW(3vtTl*63W3p^kR$${0a(ChjAK zu5R~HWU6a1{DNE^Xzx9crFgz>2yA7Zpr?7DIZ!Y;M3d_)pqI&j209lvfnaL8Mgpdq z`i1i@(KL}b&oJ*yYj;N&#L>q&qF?XoI|fxMD;wJ#P;_zRCn!n{CGiOxgb~vMu_yh{ zmJ-Qk?WoR`bZ%0^Nq4qmr58t5T_fs~nkc!50HIM*e6>x*sgTGdtqH^k-o6^S&NijV zm2JSPl#R?YV>2A1k_!n1etCrTvzSO3JW#&1j8XI7L46bG---qWQUokws1)( zR&0m%u8*nJx8X&9D#?W%^l*{xnWTOT6vXTUxkkMxQ5LCNH_RMF36c<*a+ob!t zxU~vK306GPbkN#1C63P6nEZM#fd)H@q~eKLaHj8K#pGiqPj{q?WE1p9NEi=SLhGEu z$2la*eLt39Es;ak%9lS}P&Wyf<&a%v%ai-@9kdsq(31+|_-T92A-SuPZR*R-6=hWB zJ`YJbUs0!Ajj&u5ya{Z4MmtmHyNFDasy#?qDFH~OAQM>$x!b?IZlaJalm-NfucZbj&&jteDI)-$C{jG=)+tfeh|uRSX{P#bcg-8V zTK@0aW$J&rUSXXqsL#&4{xCwnhuoTW;dMNySKo3 zT%&xDpY2;Sevl6?^Oe=!D3e^6d~Pa6O@6OQuk%f9V^+xrtx#JRW9R^)wnO;q6H!Mu z0EtIG?rHdGsOpp7c$4xgKsITl)l;y`#xruv1_5yan(;{0N`5Ua}0I66H)2a5&h7sc>+ z;f~#9d|8Vv;nEYI71W#N{Ir9C_cAfSjti9kprvdZgv7Sh&;!2U>d3_~lwLbf&#A zXB1AXa`*7sC|m6=jY#nC4^@P-Z}o;p6OmRL{Kg`2?iI}D7K9r7C46fK|m z05nJZkD`+kBU_HtF>u}dBxDP%u6OxU9F2^L=^~cP6t!hz@Xy5-(@>X%m0YMkd`O1* zNRn@F2SuYm`}jWBq@X~Ae(>;=b9A?*r16H5!L}|tF0~`c0Av2(uiO2X2Xq2UcD5up zceE9&V29+^I)tSaEWK{iVeRQ7&$B!2$wI?JPH!fWw;f+l-#EiSR4)T@FS4fXu-`yE zNYz2&OQ~S;V_QjlrR#CG{XJB=y!5XY+s+r9L;p&?Y7%UY^0;QxhEhuMOXU4qjbWBN z5CqN9*-Bmz1nwS`$bkemiDG!sS=xzaC33nD=+)j+eM8k~&Lmr@>HegPNAe{~hvz3h zVbwjIj=xPgaF7?$j71a~|GmPe+$84HQM&^INqD`UE*=546jBgZ(xDe+5ZARd&i@7V zKnlO9Rzp|))Jhh=dpmw91S$+dgN7-JHbJV$>JGE^B1%O7#rqKiAyA8*L{2GHU*X=G zm0A87-JcW*RT1)%9mwI?71=l>-vn__8`%f z{aE&&fkCC*yL5j8l`B=bJP#pRi44l2@wr&-+_ke~`VBKajn+$fsk__aD37BZ@VCL} zvRa33ymtBEu&9OI^7yT)=X6;U#k$3e9j*fD!EniugH<4cnyve~I!Vad8>iZ}Jiiw~ zk!8%atxF01W`}*N7dUpDw=sAA-$!fZ7<`KKRI&6ll=1!uA#chyg)6O%6FZa&nNT~` z42zUhoT7j!$_iybpa?jq-AT$R2%@5QDUhN@2pIyRt;&i61r)c9>4h!)&eO&}e0k1T8eZ1ADyEVy)`E1n4tE?pSI(XsS_#}2*MA_qvdTP? zvp9cVzCzgd+(j9&-^iO<*9An&i)XZaT~+~(3GQHwxP8#oWW!74u&(RywAxR}*}2_OE~DPw+3; zSI%=lI!A3Qn|UWgyYW`yy+E|IbLLe{rGwH*yTcvlv3)DbzE-EoAz-sf!tO2aIP>DX zPvzWAlg)5nlO*^rsYcec068VIw4bS<{Kfl`oM4_4S@?rFpcj2&uAF7{WARlP zEq)8C5yMFwQ|Z<251!#8Hhqi9)Kx_HN=g2ZtsggMk8FdYeS;Y$w`{_}cv-WVrl9Eq zh`gN6JJ$LPHJh_lGw!7H_U63gnXI67kIhsWr9_{3-!+@EQkm$@WO{F?C>o|an!+`+ zJ;klL(hHn;St3l%K_glY^!yLlHKWr-SqI*5`J!N{jF3SeHROh2FGkzDeThwjJCUyz zaUH03A7Ob9cjcNbDOCEf?pgFQNhn!*Yp3M6vcN z)U~XNmY7c&YlXpH8_qskc$R76ZL|$m0eu4i)Gc0J4P0_zi<&_niD!D(l00DBZ++UA z*!c--n(Gm{6+u@uERAzqM}pS#FO}rWC48|l#v*P;{U`V=Ru;74xUNE0=^4@U`T_j*U6Jc@9Y%qg}Up4aHbtN5R8K{Oh*?kKvsTC14j&@ypYwB=G)ZqM% z(pD#_C#Co7XbsAp)hfp2PU@YDW0$xih9_$41{1UCnXcS0L4A1nIpfL>=gQ zZ%PpddI^~qA?r6sw4xk`tlb{cfkUVqsHq-mC@dz6bCVM2w>dEcN~IJfcU_wg#THPN z-F9q06flT6Q9IO%q>WWVCFLfvL_)>i=9MDO>g*PwWx%O??KuSN~xYzDLF(b zW~zm=ca_!Ol3RJCf zU^6;)nCV;oP(L-A+!ls@ef(f@V+XNktnNNn!~*j1ciU&hYiB*&t_E?Hp3GKOST7*j zJ}XPiF4Ip&K9i5hgE4H)QXFf0uEGEoD@n1M5vIYR7wC@RII8%jc4oSI(YIH5Nr}(oS92*I9jk^R2T_;C)yXs{Kn$}$e1u|DMQ0~ zYIE^&N==gDvSilCWUs1_{pp?YOO?GK1aDmHnO;vf60XL{F{7!w-NH@WbT^sQaBsHD z^pBVF`h2S`q@jQYvX%&80DT4vp3`!^7x{DLIXubc2*j--j-O2R5^n=Z1UAVT*UXrT@AUv~|iYhle_2dn@Hn7?@tsz1?peeej zs$=eg5>tXIo6&?a0V=Lc2zC0wkzhjm?BqSiCnwR zqrdTd+ka+H$~7jB%rvr%?vM}WyMER6rkoeWB9+ip)5zO`$)a{15;c96hC@9(Qricy zj`@Xq7TUodn~o)FUh&81{{ZG&x4C3_t2?L3GUF_cv)|o_ekTk{)_a3>6S!QS#qzJr zKQhglWor#k4ZEx}v8ptge;BI#}=aab8+lm&canmj04S z@AJWJ)z**Haz*|^z^eZM1C|e_`!0lzX2|;6pDW`lMuHBGp67myHmpN&nc?^JFF_U& zwY~_z9LR%O_gfbWAY8!e=YhCuw}qMfR+t6O4duV&gN1?al3q6;_>=q-z-l2Y2&ekC z`6We(M9R`(e#D9>_!lA|wu z%TAk~UMZeh&7G6x3?z`(5V>vYw{>MT$=`h~=Gml^$3Niuvg5d+s4E3S8gG08B6*%C2ZN`bJZFLE4Dshos&Wpu({>{eRlb|C0S=)KRISRA!KwQbw`=0C6b&BS(l@L}1;E zHZMxwtQL{vf-Y2nT)36jOn^G_dO$o7>GkK{jfiy7q$%<(TSeZ{#JA$9PM8N&#r50q zK!s;7*I4n&_W2=s;G3DdRa@=5TfQkRz0^yv8W}@xAVqnY``T7;iRZMCKGd9;d)Dm= zUS)mjgMSQ6_PYN7Vx`fNexournX`P7G5-MER!nv>GyGShCw~R!vd~@T47T6V<<$}$ zvdA0oTjzpNT(&hrB~z7^#Q;F49%^Mp$N~jZh6+VdKnilDC+}5c0TmLaLW7VP7FK_l zDrg2@RZUS>4LO!4Mc#WuhOkEK-5ofP#UNqQ^4A8Q4z>o~2Nm-i`j0+7V4}-%>S<@p zp1@un=F1hXTIt`sfa`0!y1Rk^56tVOa4-dC%O3U-elVnTVwPojO+>$!k&7Wv?% zRID1gbNyE-WRAn8c{ln$bt5vZB0Fym7EI4HQc}}W#z4^&lMfp?#D({);vICAQIT+B zkT`g0E8>21ghZyChDK+iGM&rOq8Ne877I5Qg?kardFnJMclb4fG@Nc}{e{cL;O zxN%=&^8jp=vPMo0-a8l1>1e6xr>K6BeKI(@*xwrk>g`~(ws{+gz7giL8uJ9T6t5OK z8q(9t0GBjfBvcW=&hfgbBu7hpRR zL8T*z4vE8xL;$v39tv|fvLaNN0jh^cwy4GaNNXTNHIzX?AtugqOB=NB`2-G6T>46g zDqww|8`|M-qE!6LGn`uu`>Y2K6TJtM*qa_IXB?(gw$oTX;pJT2xt3oM)s29)*6lj^ zcan3-Xe%Ox=iSIaz4sEnpGfUNq;4QCe#q8S)tn&`DXa7^ReS@7JzOdUlt zy+RJ6ncl4!KBhump+b~YzKo$tOsJHIr}SN@bVWa+?Lg;3YA@Q8*2a1u(7H6oycK-w=vQ#mD4dL+_>Mb4s%8!+^2`pA-n%zCWa zP)evGrNwtNja3Q2FDrJVkTl2NH)=7*6wwo_IF-RsDz1V6>kq$jN^N2H=U_>?5u_>d zEjoZ+u0IV&gDjXo8rx(>c@4=%Cbhi@Y%E`V>@~gbPOD$D% zg-&l=MAg+q+=mh9Bm6NiSO7RItdWDwo_U`)kOxuZy(JwBcd_*(9-3|!ZVe09yQ)ut zNKKh?b)B^QQB7~?>_WI)ouwKY1T7mr2?Y8P?&WoYOP;4W?v05ja^JC(1vZ4KuaaAIHN0#uCr!;c&L?4AwnG|bF`#N^<6K!ch@O0 zON!1+)(EFR6n0Grsjh@t?PcEY-57q=mSJ`)bINghs)~*-Y;D*9Cf*yaowb=H7ZxLs zPcWCdz0N*191u~=S{xC1jz<8cZ z7O_&y-8%44zsTs)Z}vBvLjY zgN6&id86{rJE*Gls;b6S&izubHKnn{z)hIIU16kQyA-vktha&qr8k6|K4o1Sn=6%3 z!_vC_VrDLii8e?H~&Y@sWEArTvmM`i< z@Xb~&A5_cDnHxtYf$|G?FV(?jWjTAzi3@I?4vnEq%0T{X&*{9i<*Ka4QqVx?p$7fw zUJjdB9oFZ9`Df(zYf?WjGVL)svP#Jw+JMkbn5DtaJGXW(fBB)3jQ7g%Q^@B=*lSrk zT-SJMH)-yk&{h6SWLZ{I6-b5#3EoXVeO- zPrDmRs&1n`pjBDuhSjQ}ivGYkP93PqWA1_qUEQdc1#+cEw*+mdNV|-dM*g=>U&q z@Lp=ZnpmaK_7=IM+$zW_g&d4l6t%|xZ!Be`xfPPYaU=*it^rj%=!-NPky7MF4(=To!C2EGs4~N=W6C2 zOkpQaBj3ol*-Rm6UOF%l4RWJ0s$vqxLFN~on7=K|1BL@uE76_HCq1Lq-jEDNC>l0s z>YG(g2sgOLYAMnCrY}3?di(T{xbED&Baga^VgZ5X9oDsjnru1*f zRE_WnPO1>cBSFQ@)BgaXWSRNo-6d3;O01=pb2g<;4AT;Mlx+<*hOx(xNNC&2GqnlT z7q;F`!|u-wX0tIuQdizM;n;nILPAU*} zLYdwbqnY(GGaqv8sA7ny^iJA_Nz~R9`4ozS(V;F;Q2H=|PK1h#m`3y9RjQZKKVk?~ z198Pft0hrL*XbQez1tI#X*an!26CxR{ZC8VsiN%|*Ksgrx6GinWRnoX#tb*UL4ulBjk<95tpdE~=5&R=Qmtqv(#cH`w+GC?eTV?h%Z#V> zZ1q#1=Nsd5s~fH9`>{OuC1o)5G|g!0vNkch8qgH$$q^KZ?q^%R zJW4t3?zgcC6Lm`%@kT!O*n!-BlzS?d*@YlK1j@&E3K+awv24uYLNh&RWlp^#ezU$S zCU>nYo&Nx$H*3BrI*xrI*H9&1q_?{i;;=z5Rf3hNSaNLu)oLv&)ae3=DkPO|QE^Ty z0!K8P6>2Ov71vB-0)*~UYDHtL5*%uX83=N*A|F}0n_X~(@>4?d2lqrquV~{q-zjNWad90d<*XA%rHECn|N4cbn1_E&-8xv)_o6>46EXMaI1+|J~#`Afp ziJLe&vSQa<6WD=}oKclD?hbYCZcy&0dE}ME@>E($AX4*tC}yzxx;Y5BXp?@_dor@? z8)BiKKqJfJ;@hmZ3-+2f*-Q6gc%0@qT~Onvjs9w~XkzJH14YOmE33?1t>muB9r|Ni z%!0XX$*H5d{8OSvV^?o+Bk(-&qpq7S{^XrD=n%XzVr>y+4tX!p$8v}smvOV5}^QdHjpkkP9}Zk>n3 zU*^5VcjiRYIYv`YNlT`#wtj*ZIEft`OqZh7GdVf$4hj3#R}`=-B6hxHy1M zw~~!P#Yjng=@l}7>Ta{Xukb3Qds(_6>v4sVG42auw>jeT@F5RY~_^;DHb=$WxvMp9gqaX?SkJ4!nCKum7;ZA&FvT#Hdw;;7=0 zpnW;CP1vo@P=x!ju~|5(R5F!LaYANCyxoYSy$6_jQQ**khfs@!N|z*Ps5*rE{=$qq z&~zS~u%iz2(Gw`HCd-P6B_u+rpHTbDA^JruVpOHJx9utkn?s0_Da3|lGJPq2b^#7e z)2e%!Ogvp9-S&Nm?8$WiZ{F-pr5A8XZs&N}5kx!TXC9#&RHviM>K(!-{1zsy1Uv9U zBadWDW^>Wx^&Tv+?N~Lkbaa*P;Y~9~?;Rm&H^f)ih}D}OR%b_$P!2H{{{TCj@3kCp zNXg^qa+(PL01Yey{_UygMGz{AvZ7J~oKy>xoN!Ahs26oasxDC_7z3PC3bIvQ0E#Ql zer7PqS zyli^|zSOM2=E1yHcjew~`p?SrjFj4{k5vs*4_UHP9RTBxd4aA6#UEZqusjkaR81Ro zOb_7&6)S^cdGJc0Z+YcPxaoo0EQS*0p02W9SN{N5p@e|n6;;z|VDUpTqTyR%2=Fp_ zaauVvpwW-4?~2BrRi%?iJy-YL@lG!?3CGfEHb9{>?9{fsMxp)CuO@gV*?XZ$vf_mj z*{g)c=*aNH+a?NO=vbt<38Ba*1l;7eIdh6zopiwyYw;>zq&Im_Zs4UNCWP-D~RZwenEM<`9`h$R(GFs zODcfPm)^f>X1fd!tb2M#74ft8byZP=+7q$Wty%1A;;#t_nm6wK4Rz& z4P;Dc9I?jztJ3*tqAZ#Zq~h;s30ar#)3g5oMs&?%ZkM_}MH`Ps&!|4tb*}d#YK0rU z+diQCRo0(#+P94k2mP!HN8#FzrSw5(NR6d`x=Ga1eIqKcgp@O1YbVt|+#bXrdIH@8 z>D$u6ytM{Z>bd4;tOD61cAZ?ob~f?FdWt7rFc8%lb4^D`(TYKjVh#+#8i@7A@LG9-2=*s3mO6v! zNZ7DXlT}tr)_~@lOH>QaJe2A3iZVUpob9eZ1XOWl@C!C5$BmnRos-BuM0&;X(*FSE zIA3yQ-eV=?LN{Ib9>9~D!9keE3tv$*LJ3~ zDAAf?GhV2pLJF#SmWM+l9tgOx8@6v%Q|Q)1sGt=+GkQqNa2g(3HsDhvtf<4@Tw&J$ z`$j(0DZKJaw6nlLE0XN!*fULjLorto7%a45QML4F2nZ&58x}s7gRLqmCR0%5Hd&Naj zs-bsI%nq6gR`F)9nM2(_H+7p#&%*?N1uuk4s1tAqnmk?*-@z9`F5r|ZDH@rbnU!h$ zPl>+Jy#8nY0=2j6igpk8l8im8dpaSsH0>w)W}d*?tP18CB8DyIOu~tu%3ANSRO%?6 zrZx!UV+Y)08~m*ScimPjH*isLLZA^UB&#TF(=?;W=w`00e@6rj*DLLPEw~r3+JGGE zSwOEFnf`crGKx(xy0&r6?5732mzAg3p-uViTCQYCDFUQ8Ql=7bT7k7UOVG1j$`>4P%P!Xad}Bl_upq zK}6XA&pO(l{GH~{TXmV0=O#F+UAeT?jk8%ZxHds%xbd>|bx#kO*2HeQ3pdNDb7~o= z>X;;oRu+&)%G?MxAoAX@a*XP?%GGSibLn-{I?T&+H}xLd*|1bQl#LhydfU=yoN{{# zHrblEs?1HGw-TpHJ}22xS16hh;eCkcYiWTX&?Y=~0GG!!K+S>9o)^>@3E$NSHKPUjJ9n|#j!G}Vx}jYiy5b1eRviJ`ih9Q)f5xGWJ`S}<|>to-Iz)Rw)j z#kUQ*$IXLf47?i?`KFd05H}dB$hvI*yb!u1MI+fEYC0zB@w7r!%@+ z$oCf=<;3IjTSn)k;gzSY@!kIbspxZ@!c50Cnz}>MT(m~r!L0ln@wXN2@>8QKqyzh~ zdr2$81&gH!-8wixSM6D(t-xNc5CP-81S1b*}d*bWZnd z`h)FPT7AguT`Gqg{?-K}utw4!bQXN41Z^X|&`Fv~gQ~p>3~xnMJKUjKkVB+Ci_wJl zj_E_udNe^I$(g>ONvui2>p#pi44IVpS+09^549a3_m5Ry7fTF8^0M1)Ju?>NwFAca znukqmoXk2yio&XZ#!c6$^BfMJFku)5i@5l#EZQI>1AdlSb?Q-BXDzt7H^XK%M0%Q5 zeO)MZ)}2GZ!2w$JE_UD4SB~QB%pA#ug~3)sEwdPZtYT(oj--sEyIB|33KY37PY17; zm+t&Z!Oi4s8LYSa{o$~Hw~j4dcg+6){aK^@rYe)eA}f%ZSsF#}y=c)m1`GJCthI&V zgTO6Fm{`CTQ@a>do7>yYZiMRVIsgi79%s_T04p0lnr6an!Ar{$%Oqw;9}d-=?rR*a zc+XEqo>%34_+IlEwb*Z?9|OB+UW+OG#-s!?v4SUIXE(ghm!rTJR zq7qA_hB@RLYQn&e5>{`4##|p+q|f8bqt{I{OGf=CAC<0|9ZZ!Ghw7EOuY>h9!xAwU zxL-VvGp)-hdU|>KIhb+L-Wqr~ZXq8*`Gs5R(92R9{da4e&h8-cZX@Mnn+l%JXSuf{ z00t$-!D7o*BjJ^@O9Zn4At9|TW7~;tmcHQOgRXb-L7y z;v#|v3FO zwICfeL{4r1X={s)8~1<~i&|;wVy2RKgwqGQDDF&>HJxV$ zx!zA_J0c>6E|aD)FiEFCevzy;_tG7_4+UO0suL+mzQEGb%KHNATftlu#sW*7eMrK< zZd^w@BtK594bq%aMO&K}BS5|=I)~WpMNMP7dKP%V#H^%i5O)-Zh7e4q?FVql`cBpb?sH_ZAQ^@o=Cp#U8xy~iqIV3tqINiqTmDwOxV*6m(hPsvdwaUGO`!6T1NcT!@=eX@x}A8#I^3fA#$(vzF@}bFL?0> zceNkO%~20&ms8if5JvAF3of`h)U&y|gJQdN4z$nXhb4e>tlpQs9Il!p#OlN1pC6s0 zbS5e3h8MMoA2fd@G*!K%qVvEo&8lc?+Skbbj!TCJT!2Rlt(t1s=M6147<*Ti&vORP z2%VS)JZT?T*%g##8cZhrV%z-EwR6>83cGD+J)~q9+psGo2~Aqpv7m4k`6qO@TWU^+ zETfE!Ck35W{{VdCZmXaw$mw6U$O~(d7rNr1b=SH^-GcJt-f=}Zp)%jTi6v4oomtmZ z^W8fN>!`}>M)Sd?1f|KC^WHmE5~Ltj@s$6nP_cjUH%db;z4QTcx(v5X6w~N|6 zs(Gt}t7Q2h&`$vT5!rKY=Ad{j45(g4r3#ZLz(<%*yQOc@l%M6d+OpWuC4NlXUipl& z5yV9kmX>~XPYZZM?R1SF1)Y}-dFD>v!xe1D`y=~F&H9Or4@NNgC>`qDNKqv*qAn}W z{%A>~$hqO;OQ-psz`X;C^8Wyv)lt)V2RAM@=@}F(`tOyt_#h91)lTQg?0DnHY6`>o*olubiNkvFv~!VccP$ud90m?QH6BNk zO$VkGZFb@pbk|sr*0^&;!B?zeRV$p5;LQzJ2wcxAqs^%$rK*jxMIf`9M>}9gFnmTK zNwe-uYr}$SSr!jFO;hH-BI$~lKR(IY<)uscf#r<$Q4|%7 zr>MZbmag>EF!ODtup}M!S_PosaanoKlyW@u5nD$Dt{a|PXN`|-oD(KXlhWoB*jRBW zM#xIKxwW;W57P#X>Sk0THfiwzW!7dci!hOz&H_GobwVmhwH1MyPCr^yWB5J`InRVz z<>2&mxx+(6&uC7l&h^4t)9vwIW-6O7Vw)XN;+K{~@utn_s|$lpt*b(kuJ&Gfnx_Vz z3{mv;8;B{!$_slJn)A&-6_g_X0CI1OR^ucAd2c^VKUmS@ z{S?32YIo)PQuv`+kM5usR0D9KGw#$gX1?_|NDBhy$az4#Xxk%1RBStCE&h%NvVG!wzGCO{;8gw3yw(_X7j%K#3FSs##+cn6=i#{nSc~ zai}eQH>``f!vSmIxn)$v6)D0)b?A;-+KA77%{#&xr^~r*tml&Lrv;0NX_zxrIe-m! z=7lYuu0ZhTHGs66cpKb*1-B+eNfq@zsS%=5<9rVL}D;G1IrTX4k;x$Uz*_&Q?x~57O7z3Ma=L{o$>= z!QF~RYm>qw^_pyOxo`qUJnf>#{L`E)_)~3qfO=7y7yRqu&Z;K-4}FFv_=lC z zQApqqiMsmUbNNl>TIi;HG_q08BWw*chB(-N7e$L5ZXMO+V*HBEpHC|$&9eG-37%v3 zb3Mi}fG=p#Vn#L&cGVJxBTVx2RdY!CH$CR^64#rYMed0XbjD3Y9*N+y9pd<%yqBrV ze`**r%3A>Ed*bClYk*2FKd5B$THEVbrX zHSy^l;bKs=ID(0i?eRiXJBlQS3a~1xMhPeyx$v6W4s*i5f3JCp&s zW6MAxCBzPEHq8m^Yk8>2Gh5k;NqSq{^K_hp10PhBtskY_Zbmd$p|2wC+KUU@#TQ-Y-KC@0N%U-gQmCz^%0@;^{q9FQcxH6Zmx8aZ=@ zho2NqtPpu?z0>xhE}AiyTP1Y~kGYfTmDfzw5cX&TjFD=hC< zLE!8Bf?Z?vRJ`$VP0B6nE`Q;QM|%*`lIcnDKNLO)WrK>;%7=Uvmd*XETPkiNXhcyn zpseysR=9o`U zZ6VGf52*?`NZKk6Q&4iCLfy@L_x}Lc3iB72^4b=hYO6K4eHd8mf0@nsuc0{wo8);$ zcL*582pRdNw7xdDZt+>}xyOzx?cRF)zw)j{lh@Z#Qs}6tF~r8X z+SwU%u`#vSHw-Z3uw?wXq^k1NIgoaVVU5l)2%1EVzuq?Cxn}PzZa&Dk?c0i$Qp?B8 z`D=}W_kL2j$!L$z`$|*)04}1<8`H3ufNmk9k32n#XC}y9mD0sb3!Wnc0(?#Vi8-mq zfblzX=aWW^pMubZ*dXcQb3!URg$1Ct91##nWCN<9fV4>?Bh`CqaBc0i6Hf5`C8RhJ za6z~|p)oT;IZ7fK4UjSNMZ$1Z851-TBo3=L)FLT+w&0q{P>gmLOl!$d z#%NR2mf>}!OEJS0ivhuC(#vuaOc|{l{m_`oq7L)`pAGAUMmyDNC8K7ZwA#qH=eXdp zDQY)jkBXxOahoDh4-;aFr<%cKGGIzFo?&Y%jEHE`WUVX7dG@3`HKe!q4?j2m0I_-| z$_0(rkMpd+=Fnrh6aA#G%l6;J4#aQyTmb~)noO0#>fPi=xW~?xHje{#ngNN1ZllMz~~S;FY)2aO+vxv9_CV=Dh}WOHW-3 zW``Ty;@11vj8~eir|-CcqnBsZc~LRz1`};edvy#iYa8Kfl}m(aYGW#?9LLy=i6%Z< zmc3O?4tYe}!j93to6^VlJW!8?vb4RT*T&ou!Zw18s=?wmHX{Ad2hA#~N#LvKqixzn z+I}dWk4Du}Go*V--ZvE226s03ZKKxtLb>zhpgiK+H1+_rYmc~IW6Ifws#KUHHfzZy z=LWlo<%;xm&b>Q!uC287TSo@s4maoo;@IFP8;pa+Z!c$etg3(&g!bOY5F=kh#BgSbi>uP7k8Ut&|2YuXm<6jK^&1uBWhxe zrMg|Ijx7^1-tI=7&2YG6j5h~(L1y(rrb?lrGfz#?ddV!iA1kcp#@#rVj18C*6L~{( z{PsF|>3}516w*3fIES&2mt21NU})l>(|0EaM*dKxU2Z!*p5 z5=$)6)5+^Oj9cQ@CM|+V?mOk259qOc)*U<_r^mD}qIp*=hbGF{*LCV-^_rv4q;~bb z*E#u|pOXH_{LejIllqaJvo_9>GHiD-jp{Lx{m;g{-DDcx+wU9KtIg{F09Z<9Yqd4k z4L?L14`5hzl$LRD3!`$HOP|?WhVU6WJA%qmIddE*7~xOz;m>0Em@`naLcj zk*83z2-Uku!Gkap9mP3Vt!}8=Xd6UgqUR2je8&_8xa!Bj-F>`_EXEmk0tD|Wh zIp(JaXZ^?AyoW+^&n9G4t_==l3?=p!8l2rk;!Y%+m8Cq)Yi?XVY9K*FO&m-D zIOAxFHu7s)ECBEU1yNE3`|ZZ|=WDL+$sT)elB+XT!zV`5dy9j-tn-*nHEUoR?EOT( z-PX0DTv!LysczNY%^a zeDs|mds+*~V|1SDTY%nhCwZ_$UNAMZx!1zmIoT$arL<9MEZkf)j0N%SQ)I0yz45m^ z0%;Q4Z0V{k7`tvsI1D!-DjHk~Yq~r&8{Xa<2ZFkah@y9W<;NEtBXAsnJWx_*a!C0c z(Dt&yqhB{3b{rCvfVp+_W=tDw2s?p1@hEoHhc>{px2o4N_BP*p;5PF}s;HDo_C4l6 z_G{t0ksFd4@?>EkZo$dh-V4+WYXn z)#@nP*y$OkaA*MWuvuE_XH2Mc(gxiDQ6=u5Q0us%Dk3gv#X08;APXz?ISnJMn=x#A zjnL0_rHtMUZLS4Q;z~(!be0JYa8TK3tEzL?7c-erCL#BNSs6}xjoP`-kMSj!Gl){g zi*=ISVx`?Y!i$*10Uoa9+MIs!i-tHm1s5DFg-|+KDw+<52RE4Cg2(ekWWtUn*|wGX zFdM8djn=)|0mWkZu>OrtfA7t|l&g(NGie2Y3u7-n_C29v(v_>1N$+FF?**bJbUidO zTn@zcKFBymOtDdtWi3UIdlI2M;l5cNUAL7xV>y{q^j3infnLDUZkq$ev0nJSrlq#xymKV zi-L}zP^xR#KPfl$r+yd+{$dc$-`a@FU80J$<9X=Wf0-z2+@*Gl;zO$^7`uYl%7Vy; zt`^Q#G>e_VRkCG6GQIsRRG?$7Qs3KX`z(m^pf0J;CIa`b5#NVL9lj`AYpRdNS3Bh5 zeT$YJtoe-n}(jf8A;yHaWpo zLnO+gq58&!+vN@LdtBd(Rf(yjHf-@7k8S=a4RJho=A6AD%c#Zie3cGw5Zg7k?Imba zayuF)d+0U&u2EGQeo*48<*Elh+)hmeKcXA6_<}&?o%-f&9 zP5@X8FUfOTgynYZq_B>gx2^cB#w}%X&mED|HO;7n#^tu(^hpV_PypkNlOcrB=anri zk&(Q-w7HH9H#)c36!_ZXgPZeDZ5QKd)k;WPnM25$c}^+tXBs^_u~RgX0KGs$zcIz7 zq#M|S$naYEMq0|e%9<%#b{E~lhyb6lMrOq(VD_|kO-VMtAb6|NMznb^JNZ7R%o%i@ z6?SeGt7vXf;O%kQa233&jZ%5L}si55e$}-cw=$dUzb&^XAru889Sf(^!QlZ2r z4r{X(bu!}cw$OA4jmr&NDfe#E8NaUBz|Ok>g` zb(DkDv4=q?BxO9d#bw0vf;I^*+n@~wObArh+@YhB6y`W4nJsb~O(l`gN(fnSy%iLi zQ%p?IWVoWK=EY^QbW9W5n$j{NP&CBi%gA}cVC>&bT2)gqU ztj7{!1c5p^5JBnD$g(wIFOenAy6%$&2RRHCN^|T~nIcQBN_7PdW@9*lk|)h6$4dbu z4kp`&idbbD-c-oHUuGI)d!wa+rdpW+v9R31;{O1G^m(3d4K9jmC?bj=+hEWR>=Axn z$)WP*Mq-i8bnmn3UfW`9Y}<|Ds|vnY9a|=+;d=zGaT4so%drEOw={N=#)mld#{4&2 z_NKGQl{dn6cAwxADKlys28+lw9vH1UT-u_eNs0+HyK|h<+uOm`O5BHX+`WP5yp$vF zOE^9lqQCN{U(L=$hE~ao-J|G*W?>Jck!$J4D923mMtE;H`FgUBwer&)P`iD!*d3R@ z$ze^ERa8LJw)kU@g7#i>u4CYLLy5K5imA@4;A@=N9ehV6&vLvQ+O;ops41P3N@WeZ z-GSF)#|~h(o%tt6S0g05PL_asw)rnDn&(;NT}U=_8rK&LJbVh-$!nci0}PlunxA86 zCa`L{y@bYkJMOw~ty7$OAk-_P78X@<-enm!k4Rv5ykq0Ix$iGULtHAd)?;T4>&!(V z8ds?Ny`qOQlDYL!OC(V?{{ZU6Yqo96jGVuagj6%Z2OL(78^1=4Z?h%x{B{D26aHgM zhwWVu+^;RuX4Ak22E@T{lUG`U;Pd5qQwN=rnxd(l95KE_zy;LaT+JVgqY(c9Sa<9I z73jqbF@hp5hWyxV?A`MUx+=zA)3Q;>J(@`EXT*Y@f_%+T%&4Y^E%gXl6w5MGHoS>r)EeuKvu~%7vdDLu%#E4~hcUWpnNdi4#j{2O;vFkmacg96950IX z*{9|1MPA6*!&3xoAoK!a>UU`aK+y1P*4qHm0OS@M>GUe@&^cQ!K<-5j`}=rULtRN7&BJpgN>dnIGZZ*;!p;gbQ)?ccnFSD55v zP_d2V(RI(cewJ_e=)Wp>_dM-(rw=FTHyobjGm2|ZAN2M{Ss&_WA|H5 z>$jbh8(UgwSlWqOERDXY1Z-g=?uTg>E(3eHuDzwU`yPJ&_Gj=A1EQvdvXQJXH24wA zc|uiyQIZ{ao#bEUgsKA@C%SDcWBir)=<}Sv&=Yc^iY;B6BwY@y&OLb^AF-NFkEZkf_67JfpLWh_68L3~BjX&gUW=(ZX z!Es|5-qas$!yE?w7_8d~+#)5+7W_byY;Vhd4anJLnBA|4y^aR}aqZld9Wz9Lh#MV% z?~|zZ6OQZ(={X#ZGYDyVD1)3|#N6Y~*0;$F_+CMAibs*v?t_T%S|yFQcYXKnOA|eW zjr_OrLnLJjpX!mB0S8I$oE$OUcUe@f)NBB-OOmLOr1uXTe~KDvrn6AiO|DC@`5Y#q7aj^Oze%pzEzL043udf>5_<9gNcOK;s2Cft z0kB}aR$T!FNwQOm-N`V8zy&3t4r{j+2Q|SuOI<*KlS52af>nrb zy-&Nw%AbF_LE4rFkxzzJ+<|f7y(!fUVYGqw`6rns8=5-`qa)qdjjkN`CWH^uQp>ty z8Sz{}`>$*JY?|t7Sxzx%*oT)dWNy19E!u!2L+yyN!}FA%;F=CPa`}SS*JyDlumyX?Hz%??L#+uCY)%=VF!EVk>1-6f*qLME0SCp{Y-2fV76&pqLNo0 zOg{p#>H6Vn$nK9Ts6XY{c8e;2rs!pd+0B38Yu{#WBCY)$ zHB1^aoXW~DNi>UGNAg*#*;3GrK_2=@s|b%yxFD4a8Qdm z%O@W0ww>4A?^}+`m4*43>*Z8%e|cQPZw54<;JmwFXakb6B_rutqL*`{fAYKds6LjF z{b*u7ItTDyI_YX*ce|*apH@c?wK_caJg4+|jZL^Y+K061Lf8f+NlH7s{^(Y$y(U00}8u-(y-;-?KpQCI4;Ao13wui700J z(a9P8lVruL#e1*{8q{3nLCX`NA)4@5GrslpAJ#c-Io1VWO#cAWXFui?nBzdEI|U$8 zqc;j^4G)dT+INjMS21 zhDxd|oClhVHy#Vg=Z_D}C3lM7nxr+Ii_GS7ht|q|l6NJPhi|A@b}6yH1U98OXwni^^6iL=X~Ni&=-=Q;SCZ^$OfQTv!#XO;78+0AoUV`Ghm z?l$7RG0N$@p_ZRjkWEz}23cK>4i?fk?oOi24rN?mr;XI`!~^DVzL?m0wy{Xki1)8zTRYr|losF!08+K8m`zGYW4p^`sWdo=449WENL zh#bZn!fMRlVLjD_&QJqmX$ew7_d3lB(!t~!%UI1AT3Jt#GWb{>UioH|jCW%@E**5? zx0_~i{FRvZU;hAn&ncAT8IETwB7!G($tBjackd;AE6Z6lK3&V1PISvLE;38Jy5|6Q z?#G`roVuyhQkq(dcGI-_I;IUU)B$TXz+l-K`&zjzAX}#k5y*wrx7#Rkac;M=+)1Uv zLt8vjTn9Aq9R5l&X=%dk*la#qk|v3-#Djmt(pvAENGM}IVNT*I8`HstnWJr_VBMxz z#_{yy#~$$JwmFz#l0ZG&nDBL+J1_)$R84tdwCpkPaPKho^6dds2AWVDCWu^K&Uk70 zI>*?WV~C3r?7;wY9!R@R2bfTrni=d~&hXsuOI|iPwkal$)v=?KqP`|4UC^C^_h#1N z;H1pJHR3vv(8zaO-Hz}N01cP1M{%N^4ShAtE){J4>Q3yf`m$cm*!yi=6=Y93CMK2t z0D@pxO}FVdwconph!4KyQa~ygSYr|c_ei)uG&%~P8NiM>9O0qQBL~=Pw+&nE&dbH; zvlqzxSEbMAijSg#Fk03JUANQ*{ny9Zyp3$eS3SjW?)r~M@%Hf)z)mVO@j7a@F{7AP z^I%b)U(55F&s(d?A@#bNNc*w$ku$FSA9(fHE>>A6ZWrbdlU3`H`VVoX*z!2`$xwP1dV&Uugm0MxHz~yzU;J}#jf15 zW&FyZfEXajYc*f2`54Cod-Vyr^rDnf_#Q@Fq~3d;uo3DaZ}q)qRMhJx$k)|$xiuu3 zIzEy;dKO#paa_Bp9-rgmiz(unJ6+1>IYqX<^bUQQx{8_NCux*`T6e4=%#!ytyzyq;B6kR_yBVehg#dPy8DdGWDrFREYjGbjwUwu?y;ew=b1LS zJXgv;{d&mdsLnH7=BgJ;>PQ(+Pdu8P95T$}=jgEj$YbaCK|EK~om3{&$#jQ{=?%S{ zHWPcVO5GR8@h02*gSxIy?)P^c;eLPZRV>lSTcP9wVTj#s)QO(({!K0}xYdCJ!6DPP z7VFb{Twe!qvE;Nf=QJJbj%(b}sGUW&`%>LjZfnaA`g|Mfy0;TbHjv$ zS)>;d0X!@*-Hs@!8sIVEWJ6lfjkfVAbq@u$SlpIb(PlbI$%ee*lpZfXCKA0Y-BsqY zf+5W#k9~bC9tfETIJI9*PItysx=4=jpj#2#hBEZi zJ8><)YKf$4x{`va(2S@|=LDp1eRN>CjC2g*BkX$qhP7!Mpw|$Y*svwVc$+ zjhf*Qzt9#d&{pQ~M?)n`oB;#63Bp>Hh#!tk$~VBV>QNf7L5FuR&J3 zjSfv?(BZ*Nu0c|%wL<$-F%roQx1gyVXj%1BgNmeKKBg0_q4Oy{!+x@`@{yUz?wPez zUPTq^2G+Y;RDIcU z&cuca_TrTG^F=whl&7&gi!2e*f;S|(lww83!a^{yMUm@x2MhB{k7BhjUV^29?4D-q zc~mQNNDp?XpRHoG#Vl?Xz~GTJ8wVlsOG+GWF%WsHJEtPV0f|d|@8R?w+ss%?3o>=o8Y_YN| zwyN@VOp%zH?iO^KMm&z>^gPl2faI}Ma~!7EEvUaNyw+L^qNa}>&YWNNTI6*Pgo z18@{=K>u<`TBUDjR0^D>>3tq>48`|x^9m*9DJ@AIqz*+_bz~JmYfFk{fdu~EI zgTm!Ml-1I^R%w`0(X?-dAAd8udaWyJoATSql!Ed$uCR-B=h6+l8W!tm>85dPAUWU* zwXgSpTMc6S?Y%|WJKc+3_8u0vO>Vk?ku;$46|L`8EAgkHMa^)I63({{U%BhKtJw z-NeQ(F%=x-?nlv5Po=K=!vd>U{{WmnLSOw^{->meBg!IcW^8Q70Bv-hmmqa>=;Wrn z_SXr$n^PA#BPF=#!&v2Tfu(L?EUl-tKx^9Jc;BZ^?c&D%8afMtu{>@H`5c~M^M+YIiucW53%a#5 z%`JRkqaKx-fH&kp9M+{ZN15tr0*-&EKof3GC8dN?cLx$sW^>`#?oea!$X_;!#9^ptUu|KhGB()lR>wL2-tI6 z*}LC2``Dit+D(W9#H)?nOW4iY($7mROW_W5ZDiK5EG`Ux?%m)x+mTgEoYPjjqibY_ zR+0=-vqSLv!|u7Kk!dT-Rc0BMVHsqNAQ9c37kkG2DBjm^;mK!GdAB~vvk0hjf@KfX zD6EmkyukB4cKf)C1NXV( z!8SiJ);`_qe=z$K^C0mXRR+47zYAGXz%h;8$u_n3(snovs!&$ZR>nwVhSqE2mOUgA zFQkKTaNKtt;be^3szHmc4<~9j{gSimot;t3>p4+;jCc5qzUM@P4*fB95y~+or@H=l zqso|gPfXaOi2jadjl9_7sz2QvyA$HAF^>CtLi6-FwM~2iT4%~Y?!As;;^RMobn@kun=bm?oE+$7$|pL>5L;`F{r<6{P=In8TE>ym$k*U>UrsA*`a zXf%p>7{b=E*RzGsQN&r+;d}Ti{Ct0L%h;dF%l`narB$>UCT~14HaXt|TK6@Ke5`a) zzW7_DYo5S)`_e%ho(Si!POI|{z0;+N8JOta1DfDkT3QGo z@dpG6$|`8CXytUY=J%$Mmp(FoxRJ-J{#*Bb(z_(`d9jO4RPtVBWHD4aI3r|G#2Uzh zZ(zVMZahBKjB#3-{X<_X_pG=Z(o3aq?m37Y0b+8pk;IT2A6@ z3FLVpJjMB6=gNVuf+`A&>a#^ribq>@x>%dLnekrJ%UNt}iYJcnZ%5u|2Hw}X=X1qs z4Bs=Lsg5}*U0oE94H89REG;-|d|(}IZ!*z<#Fe(%*QGXcZucI!EPs;xi;)j0`AaU# zi*(Fm^@_53&ZwKb{Z`&t)gA#>WmsE1fM~Ux#}2lx2Z%&hW~>f#FWzY*9mm8T3kN#S z^6DrI)i2U7yaMC53~=VPn`)UkVwWwN?W+|mYo6AEEvpWG8&Gv)x=q~cJ5Xa<2R=K_ z#haFSrz@tVl= zSd=>u=Iys098?{fQhy|`ZX#Bot<^lomZIfMhM;O=qbdPnLl z4>xJVG+4K4lWD`4OI#jK9qYE&!0GX~6t>G4B;OZfeq~>-1A!q=pX|pSv~f_zLrxqB z?4>eAERcaRAhBR4dw)FyB72I(ui3gfMS4#Hyt8CZa6)M31Ov=)`SWD8mJc#LI zWASv48wy{mz}#5bBz2Da4ocifJCza|d4$swK6{=^T56<@cF7Kz97IkUNNMVlINO>U zoZ`J-n!K(v7Rg&m;wl)@_b{7@gGS+gtAP2hB@r!kw;Qb^Ln)?cQ>{#>`{A);)4ghC z4!oygVmG3CDktE9h1epaazwH6JKVqOm7dolCU@x4{X_O3btahL#sZ|rVw4=D*J6T= zm>uq!u`ozb@gH>0suaW1Mlz`8<@94}abT(}NTnIp_P6mlqvk+E zi2@@n_ql~5KNRtg+6U}c7E__qP^9tUt0sPF`_t)jaLik~aPr$@#b|j}TCtj-t+m{usnt3JF z*`-B$D*!WT_fKXG(k<2-?uM%Jwrd*$W9VhDJD-opg-FIrEX%HuwKF-*7$J)+wa!af z3?;Hr%pZq+z2pT&ERpYH$JPZ;PG~7_V(yj8gS8tV1i7|K zCrkI)5gzlwLq3x93E z`xn(8M@;sH^lfdo768XLVQq-nxj@HP2=0+m(fF{I-vjeY!2@@@o-nX{;y=kxFx5Z= z^|Umk+fxIil>FHqp3PP$?7#97hBkv)oYcMJ6VSsO8HVsRmlNzO-bGmyw+B>i7-@TS zp3X<+nhn!icqUKsZu_IeL-{+9(ofP)E@fE-{{W+WQRyE@HM-8ww(|b~%ikoyrbx1u zR9g^cl1X-5uXV0AE!=Qa>Udv?ZJqd-OC9rSrE1!JC|+xi>J?2xmrx5+P}0-?09Oob zKQPc!UhbWwJ(`8B-T;qkj+tV)Y=Pp#30waFGjaAP{;-N78&U)Wogu{grxUu*+&nL3 zTrS8m-*zX}t0^2`OnO+ix}E2|&+1;|yXZXIfeEor=YU)u!)?+tL!01ve6ABsL_m;a zZ~E+1>X?t-1p;W`T_yqSzYRlIU{)e?ce}ABcT-H;I5eJ4+~Kf{R&3d`^0}E6Oplh4 z?$XMab?M*9lM%g+`>*78rKt`pgJ(^bVmBLhTzrshpQLDW9dLIW;F{THZU+>#Fu`)> z9Rq`AZkwAm?drJr{vOqSvkQ-Tm+`~@092Zx(!%{f9|O4!N-E-aLo{w6`Wf__7J=1$ zunr#7Y6z|as@X+Z0g856o5S0K*nbmGA{J#nbtE-SrKp+Oo=AdMkk-^`#Ef@IZSCj1 z8foQwZF~47OOevo#AB$8i#R!sX|#-R`^_ZwlT<+q)G2S5ks9CFhWu=afQ%^1X zw-J78KxFh*#5nlsDox7hBRDkQ>lHZ58RrIJSv`9)s+Y(|S_$k2tgxI~{`M0@cZ>J$ zbWC5D88j6VRRI{1MuO(_O?+ZbZyLGqunT+DpddBOiNnQ>Jgsqs!SNWXo`!~LI#STt zM(G$!UE6N3@H^1$MpYV|qe(UMJl13Rb&+N<^m7Aoi(eSgBOS$#-qYe1U;a$<4qZDU zI`c|rTr#5I)kMbIOJr%cNx!ViTbRXrCqr2bqf1#kKJ|^B-?Qgr*VD%4>m|&?5O@0t z9@M-sAM$Rl=a7%fdG$M-=D{PGg`<9(j|QDX0m|r@@`~C20L5j|m;EQTr?(sy@sa@T zA>gHEbt5XGlD)z?Vu)&%BiGT*?8CfAFqelMjczXc$MiEI^2_Bm5z9~G6!F-9Gg31e zdW?~gIrVhH@oyvn32u$4`G=Y*>m!BqJuEty*RjoWOJ2vc5J<7)a9QIn zlR71}x~d^~Es>$Bzx&MHyKZ;~@Y?vc|{JED@t)^oZyAEd#D?+ij*S;VyO^?778(i~eH{aHL>dz#ie zU^=*jL{#~VM1vG{01O-tLm~akIfU$Ub|}eG(&H6XH1Ln}U~}R=I^z;E=5CBIC;G~< zfRh+~x#4eOSwR&2dAlunpe6$nT&AsFCtjM;?nN>dkv9s=TNrZ>lFc)PmZQjc66P**BTcz zcc6{K=sbsZ0u9gsY|ezD+CxVCPd+Msz8pH;i#fuO$k_GtQXSFBb~)DINYZW2#WcWK z$u}k)%-=A(3+-NhXJS^*m?R;q23Ow&&3mn#`+w^}K3MgS*AYt$4zB36$YB!*$-~8a zNV5#9H>EL}I=J7t%$=@|NSqzYNc*D^aOODNb6!8oR9VEa$y1foh~%?x6wc`*IPL`5 z&FnVtTJ)Z4lAcymX7ti!lf2ocXKJOC@P?xXoAPUeUEsFcM?yI_mpqp}k4Fog3~dF^ zcAjp<$7uiy_5`fPwc?!GN3pgy8ZUm^i+fPa%p^7-f;fYJkq6+RUr=PGey4j1(cB{3 z6K9dm&bdkYPWY6JnEwFO7&L3eJ2e-P-&mw&r=zZmYUH{7sPB*T+|}zwT3QEw zaW-69+R9jBh?_R12wBkI{KmDqquYyiBO6@z#vdEl_K|Q|po6fs>MfoU3tU@cfd_FM z2uO7fT*qprbLidA)#MG2{tJWQ+hzmejY}gQ`sp4l;9cNx9@GU!Q%g|uEDUuN#~zZM ztO3WDNgJADe~5E&29;>Wsmc#{VaqUZwVXUunGJhy%F*5Xdnz9y-CGl{&5d@R%bnI;P}b2z>0eYIp{{m-(hICNT;LOqQ1(2!liXhu3A?Gu z=0h)b9~K_OHftSRQI~q8r%17Gkt{9;ecmkL`^_8_wZTxsYvKtQnx>hP$xBaIWp1Ko ziOyhV_O*=<(g5%R`^itG^JDUO#BIr$6ky@%tg|o7mf_pIXPnnIAV*I_K9=`KLKs_z zAR0*);blKo&+A2zZZJR^0E;bUTO zNs?@FD?2dqZc61bZsQ~r2Mb2 z)U2vFuYi1&dT(NYtIl6a+mhQIm?l(t7td1>rn4@F+s3J%{{SO?B_dzS?>1EnPn*(g z6z)FVS4W2Ed98okg~#RrBSX4SJQL&1+VDHyDR%#Vy{-`AdirLFlos5`DIghGp3Eh=NTU|r2had zdD@KHS{wieTO4F*zYPs8HrS61(!Y^s-3EHu@K@_c=%5=#oZK|NkUN_nJ79P`G}H*4 z@(6}x{$zN#%Vu!=Omt6g&5{*5E_(-_*Zs(Ute>K|R1?7`)N@$yl8U9b{{SjO{{Skp zPgudJ)YT0GnTA3C04jQ9yD;)wd}>3wMOw##7}Yg~&foCbsr60#P5FeDeVCm>OpM%Z zZfnH(rzEM$tK_K>{)s!wqmj6^r0;9L3BK#^UahsXmpbKW@Za!O`RHnx_&P?+A7+@# zbzb|hDxlhZ$(lSgGHzPAG=ro+RcMbb2^StCflu_EC??Q)YH-U%&{NsqhRv?McW zj7Hpke4 zo5}e+w=HDQ{a4(4iq~;bCg=mt&*W%Np{#p+BxUW|5Yl-wGKt>qNG8~HA8PeFN}@QU zQ8;Ids`B1-5I>?l;C=!R$!1k$^{|^sPct^K{egWK4k{8x9dJM+oxnFjmIF!iLrz>y z@T=8_GD7+!xkg<-q@{qKIsiVz1xJ+fS?)R)$KuD4?A)8Vk!fd$F~$wGQt?J0` z!dIrN{J)}kCJMRZ+8XipASUuAXC4q%MJLwo##VOqw^lyA* zZqiM@Pq?>Y7)ZZ{2?c0*@xFm1WzFkg^dd zb7UPO9Mab@*tNj&AgU2eKqJ3&$mwbwiD6UA9SoQ%sxpeE&{P*oOD+_3FU8Gv{{UB% z@f)LmF@ihvhxKj|Qsog=Lg;E^4vqWTclZ!SAdTRKa8uXNFf&N6jkq@u+J?84@7;Zd z#Bv!B$~wt7;+y(}PaBy#n)Wo5g`SZ!5BlscXu$?6?bqXs?A zv&%UN2*J)DirA+bP{K4r8~EIop2Lb*0S&Bs-Q8}E2#BV0@N)}K9ql_0*i{)MhyMTs z_jtl%c>T9A!EmjDT}*q+90z(i2A6@x$9VA<35O(}@ZY=#Y6hz?hLPdb5*XsR_n?PH zJS4U(A4rQP&jc?vz#nQ`vZ!Z`(Z~bX@LJ}b&IfoqQVg($)vG`Np<0sxH>lj zh8x;O1-ON~6kxQMHt8+ODc?|PV{bz_uylcZs{-_gPi6MYO0D@*#<6_L(INMT;s*I zw_-7r@*h$>Zb#&-<~j&rt1v+LS{d z82A-O*GHE)Quq8{9Zs zeZ~iBjVU#QOyp#TA*cb8NkcfeE$J-a2;4@d;6dC~N0&n!#q{zB!TY)du^oZfjiJG5 zxs8#9jsUg7#1U{-beU~c3*9XNXGddtSufF(#y0?68Surv0bYVN3vkU0&yGrHwAsBJ z(H8V>2Gsk&`l z=NbT$_iod+>RfPAWDOaGxJ1)!#iVK*E^R;z_HG09yl9(!B+%zVT^qA@*l+3 zIz3J7E*JgO$TJ+d&gjGX83){)bWiNpZC|=bMg^Dd3d#!D+(VxnvHjg+OUBaCXLf}N zDqTe*MN3R^yAGL`dl-J60JM17YCm@_!~7HsY>~D$BgH%*=iJScE6QuT@bD8bmL12Jkx8@EVTgQaY`c=-V8;L+-=a zTc{q;62cx|xt=I9`fo*;K~mg%l~k5+*(5J%!^A3U^Y%3Kv~+SDeFV>DA68boAKnPz zSm@jtQ53Kfxz26kcF7xd_8TWP%#e3TaN;pbO7=mi`xdRBj}d=Eq9ipmJ`^G`KQFw1i7GM2>qaKUfM zNet~VPYiR$?z1Fla|rQkg^I=Uwp*6wStmtZ40SAX+YBo7O3S+{mYYL^ee7<^kZ)0$o)r;f#YZ zn)`#98%7>2W&NoQht0G_p_AqLn?CWGkEEL18_}5~J4jKe&2s=RS(8@NHKdJk80w?? z+M^-BVaSH?Hw)hc3m`DoFxzok$1uw(-dM@m(tmW|_CMtIx7l;iVzB6?jgGJnZ;KpT@LIIhl|u$THx3em zf9GKMg{8J*cH4jCm3B`CX#s++nX|@m0mBxZ;mvTfn(O`GZnNXcyv31Ow9Yx57`>_H zcSl(O-a0wS(>T5wX3}lB%V=h%p5|&gg28Lc2Z5?n8 zpBdoxI0vx!IiYE!!LCfoYS?Qo-EN9O*M@eOWntjvybau2apH<)3*0W!9rZ|cw%Y`A zoFBiv%N$alK~FP-v=%sC&yCKB<71g(KHg}+oJF#NOC~m*K&wGh_B_ggUN zsvF>BjfIcxV^4}8deiWAGo#{qNQfUNU-nZaaK+C_OFS-)p{It@%cE|_76qi#>KcI8|r9=b!dQ&S>GY6Pk z9Y}{eq-_+Wfei1n=_fb9M^G)H{{TWucoMY6Vg?#5Za)+zW^&;Jq0Hzj-wVfldcUp5 zrsEsZnH`7G7afRvslHZ(h^aH@*gDOoshN*0zUbO2qZ3$i8i@%tkj_npW-Yf-HI?kQ zQ7yDI+Ceh9$8j1!BZ$M`RD~5N{+Z*nC8P>;w8Y#(%in&M>iLjTYO`3V*{XVZBaiP$ zBk5Q%urBEJJPdc10Gw&4fa^%@lR3WxHBMe;H-fgdkn(dX1IKG6W18)U1Zf9B^z{_= z(FtKR)5doex;KxOHrPNaUZ)Gru8qt;6Prhr?8Rxhb6bu9);$zExG1-}*uNRV#k9@E2(V@kkrujU;k|-Ivy~<_yZNCw>`3 zRSel_8K@18x`v-HMSpciTjIw)ng0OGA$t#F#>G_GgC$^nGvj<`s`j`@*LZ_>Ckn~N z;?eu9{t1<-k!4k7z2;p|573g4!`o61v0tr`)}*Xv*tLJzx$C`ZQ@>C0xA-pgi9gkl zB~zVWO2K&^_(vHasA&20R`#okrjDZh6-7Lx;but3IQ9d6!W2ZM*9D|_f?Mqb6cuJc z1!SUaPS=6d_D4}`c*;6Kv9dJsy`yk&4g;{D=EyRMU9Gfm!tcihx_2Jh zcgKF)sJHd)4wg2ahO#?>X2~@yt#2QD)5PQ89MHS<>)9b>`9^OBpS&r>L}%*0net!d zWMO*}TXJgH4e7Ibwl{x8Q5Z*I zX+h>bTFPph?=rfGH|jL9v92Kh09PZ<%^??AJj?;q^|MCqTF*@$8?MLlZFx83&cA?+kRKrhUemzL3uE zswc5Ox)IT2`3z^fM@-j?U)-|ew2Po>q>inHl7_v8<4OAt@OThGKvUP$Wwm04qFE({ z{{Y~sTy;EI`L za%6kP)F;V{siBvS^Lr3=kJnQ-7f3+%1Z@TwtoJ)zKp^5|z5f7t%SX~A8Rb!VbD4P6 zwmqDslQYj|F5a#|BTRWj=A9(fSO|t)uY0 z-5~v>Tz-4=H9dokHAwn#>33-hIH zK@${IatpdPxPB(M6?&-U#5zX)^_R&#Z#ktP!C5P9 zy5-1-JniOsV`C_o55(R5sWdgo^+OX6qp5DM^oL;&jOywmf_kUK~d|B|a!$){UoV zANr)pO%=PHB!5yzkF`ES2fmI8dN!fz@>wRCwUqAlddS6^`(2YW`?*Ip@I1LKNS-<| zhwxL@ZEQYEVlu#KOSqq=s$7ry^n$?)kC@7{L>DHw6)D?2ayUQos9#b zd!pb0xc*au2bOenQMVFL%`-zr>!q1$q8;`&VSv=m>$_M<2lq;pRScBbuK~k*Y(t0c zn-T3mWs#DfS5Rm_iI^YYqQ~YixQSyEv~H3tA8)44KUF_Whwpn8wXf#uplpjOVxI=M zTz%8scgfZ(k4fUXYIvmpNY=XBj^{UHhAB;ZDV#y6FK;F~&Ld;@{Lsb8DQTHd-7>dO z@#W$OHywpk6m>NH*3Hv1?ukcycx!Gav=lvNYcpdmqI6G@qX^~d$>Da`x53xilSAfz z)=}Oz^m4G4PXX(~@w_;1`lk?5)IC3QP0bCp$iB;uYOrh&u}eJW?rGkNYe*P`E?!0| zqP{9ud*({k@96_|MgZPiT1jXeTmoFVyiv3kcf+x3nn4HTQ7@5^wA_u@kM=A)!L6j1 zr-7hTlkcN%^kV*+FgXvt&d`Z4i23vR!S0V@&CKVk&0#ETj;Ec6ToVoPzj*!Axx;Y_ zUXv=0sg0WKEN}&+9YMhJx+rB7O&G9WikvsxtfK=Z&1-WY_@#(4uAGKdM~#~6UsDO| zG2<4Iv2D=_4_^NOEC*_f@|qUT*_#%bTHOLxxy^o?-$^5QzaoeNaCI^4x8Zw!&@P&b zCA2!)LWII8o1#!0I6DW1*vNa)^23q7T`{qi+;WsDJP;b*rScsLs;^sI<} zCvs4Bs;0TG4bx&Y+ju-rG&%u~1?puiE!5ZG;cJIA!w^R>LL)E8>#}RMp4AkC7-1xI z)WDnQX?XXHZS32*qzo}oV;PiIG)n7J$o~L$Avi+g{T!U^NbWm!Hd}F$=3sZ)#Y(91 z%BHA}X4|~i0gD_oyln*h6P&|tR@y&`UYp>1-HH-ZLUAF(V~A8NiLT2btY(QKY>g)6 zuCW_NBOVx~LzC0M!p6;9*8SFc%dF1^o0opkM2IPfv!i>z-Z?GwV#A`6XmsuZM>(f! zU5CobWVs$p+MJ(0r*mY>GkE|T;I1CD$83I)D|=k^Ubp57PW4_?C;D_V$02?j`Z?VU zfcRLW-&0vkee7K$N8gASVa%5A!@}v%%^gE^#^$_pEjR2u#>0wEA*j!%^9ENln-r6J zq^<_gRk8Ha9BqJ^tp~7_OS%~&EG=W&d0<+5Hzv-+=w6$c7t$|`g&q8i%0cub3Kh9dl61xDKY}yMJxhf zG76{(GV#9)J#P}z6h>Lr{rYgla=*~K7mJ}b@E z)0pINwhPr}$GW`sVmGck>FYc8@6c>{Mrv9-ssSOkC2IibKo-CLwWGcdUK90{u*ArQ z>z-(;D%oAk{MMGUr03jQ{nmRA)^OYrViM{zD5SsvwP91sDZ?MJ5YYZ*X{qGPN8Bo< z^c5bG2&DSawFOg4LvMEJZ{ky=+xZa3I0&K7CxmD*bf4xkul%n}lQ#18Y}Tt}aZWDK`iN$toN?1D+a4q6 zxS-<7sspuZ?E7zYRC#;cM@kE}tJKlUcbC9bD;(GAqpzuwG6503MpwpsM2`2PV3Nav zF|}_12V!}fY4%*%WXa{6o}lXWZ&r2(T_fbAc|Inc%-eev7iBr+9F2BeoXsR{$Dprc zYDM^Tr%blm;ye`+Y(9BQZp=?0O@a9RsA{h;8XFqwI=G|UY9b!7PT#I#_l?@pKQuLE9!p%^6*RQ<4dEnhKV{R*5dq#7@Fx(USx7k# znlc7&>N0;a7ZTYQ+x(193LcW$3-N3T_h$%dowDokgtp=8^35n#{S{?kZA>Ec!=&G;J`M=t( zrjC-J-SI$exb!f%j@cu68hR?Wi_+Fa)A(nir?yBZ|^|p^te4!46=|(*5L390w?kV`Hl3h3 zuKko)iKGoo(b^Bdpk<`W>n>`kV|h2nqthk$hp-3iAgE128=5)^s$$cFNT{gQr(?_5 zT&ss~W1mJ4vt6kk(+|YjbMY|>=p$}<_Ru$_j|pGmrn*X53nF`6Rwb&g?ucZmf*5GCKqYWif3$ z$LKrYi+h$|m?r3`C3hb0%_5*>(oKf+eH6EzKdcr-ES3RiQAzqlUsxp3qoKScy0Gwu z5~c?sP>$nuZ$MVQ^Et!LH0-#rL%salt%@_rdDar|{rjk;Ni@}=w6`C*1HEk^I} zYG!cQFjX2bxzZHf zFo?O2d4k;_(04hm7b5%?91{0ph27V(Ho7zSw2|!x!ZSU=u9X7 z03}e)-_dg$n_oPqW;uA9*@?H6$h{ZY6UlAe&%$HnGfG*dRw*NK4K zCX7D&PW1Ty05G$tN9i;)^NwaWIy2bQg3fdj#%|IVblju~IE_4?c`{c~*swaTATePh zrfBdc!TT^Mnry<7vA2o%?ksREdk+UR;t%=>&SNihxpMkihiff!B-|vAy8i%X{g|x< zsG6~EX-$f*9_iCK;(L$n$4|jom7sgpJ}@yP4PbbZ(wLtkbvKHH&{4~bong7-y7A%& z$L6CG>8EE&9PzoEgPLwH!;Sn3=_85FKJXNPly5P%&8u_3V+sY8$$uHo6V{ zJUH}xI!12;i5rwfD_yz8Pt}^%oONNi^YBYh(!&u^T`faFia_YxF4r*c*ifyYs;GVr ze~v0nq?<04($d2Dshmrtre(FU+~k9A-jC$8=wya?+ax_w0o`Ie2snZ{rH0A{hah(M z$a@NB{$8bys8z=K88~2L+|Un7!MU;nzTA%Wp{cw=q63E_JPrxzCB8&T`iPo(4vEhK z`X1dO-Wr;4+hP#^0Ipp#pBQO|R`ud}8rk%Py3Id`Tq2&6Bi!xJ!6?8V&i6$Dfy3q)nVnaU@)X%+yEkw1Rt4sR(>-X&1M#H^iMF z?!w70sg5RD=TYVx-EYWZgPoJgdRj-@nS_$)-aI+5_=IHuWYu)Af*kE&_Jx~OQwv;O zM&`i%r=ouV`1-qCJIjSL>P3#W9mS3sNbNXwrVNyuVQxajq+#q!B_cU~VHRLq$Fb2& z&q))ACA=I6-p75%a|q@}(YXZw058O9BdKhUhN3a0va;8=;B$99z6WFxd@0s=5*@Vf9-!8>s# zI;gaqHvuFdyB`+s-lEFNwz!Il3YhKp;w`9%_OR>Oc)~d8wd`==a;iFrWRL`)VBF7YPiLgO4%f(2$y#vgT2T^|chO}hyA zu6AFVM(=l+nVW8{ObmZuk}}5)q&s3SrfhxE;@kX`fhQKY6akj_b6Cf}ri!P=nfg0) zP2TwwH1X31YIJXf{{T&er?XZEv2B~E$r~kMYuX&syb*LYz$ReNl)AWGr((>n@|ARl zb{-%4uTts(8-GTUNKX4-TMpK~{?5cDRMm8@25NUbyySzt2G$<%9wO@}VIUo{k^mL^iu-f1@60p-s2!5NVGc4--V-~DY~)|y!6Xz1l>Ba>Zce6V-1 zTc85uumjBL0o|t8Lfc}J{{R6k+mmg_{FqSrZb8fs6+is9Q~v zZ_KK2`x(2B+{FI?%hM0(ntycv02SYiAJBWc^|on`nIWq7y8ot%VmgNB#`nbW+Q$TG7_mo|#5SG=6rO8}+G?`;us^$&ht?Vfc`+_VStZSpZyX1RT_GMxC6AKnN1^)dc(r^ zx3DUFeG#`v2acQjE+e^$8iZ| zfK%vtF7{nhEd-l`MdhX=gX!LMo1SbeaO6_ye90@S-InGNHkPzU(liSgYU2jRN^`%v z_v3ecn=ws$iWf4=V9jZ|C&>7vW5l$xu<4STVStu5ZyX-eaOT}Czi^sDha*1LJO&%;bV3lAfw#kFx+{9mRK0sCak`%G0Gfn*rqj>pza{SWPY%>aaw*k7Ae(s~-$uyp491*e&E+dDvCNna-^j!Xeqa78o$w4UP zt)rxc*5WY%5_ci|#{{$GGrAv5Pok<5axC~AuXx>dY6-E<+K6MBhr25oD|c>q4z?UG zWQL6CNT$!=ee5eBl9-EJYk>5uCxO!EfKi8f2h2pp6&+Q_=-Guc?s+yxbW&r>y6ko% zU3;CU;+6&nHKNVqa5nXtceTTdpmv~tl%nBr-Ecfg&CVat)#Wof;0neKFoxx%Grm4)L zV-36VwQWodjfopZ2eo1Os-OOtW|6qEXv02CBNLN9qRaI<`aW5_9h(1GlW&hkNJ!VwxN~m(n{0?Xqwg z<;=01%tk}|&MU&|!0cZ3o;Jgm&_q7vBp2FSbMZ|+0^;%cSbxgX{LZq`tZ{7+ABskWlbb(AT{qK`CVOtGPlhA|UD3wMN%6fo zjktaXjV>QNQyowum8?6Y9n<1CZ9`VS^ddOuA0maXt8*WhW4?IVZ>(&v?p$aUE5zM3 zIquCqAGI z!L}r7jrZZ*H)-4UCYWO*B_ zy%oqM+!7r$j{CZM3b6)?p|w+=1e_2au=bmL)P(GJGSxDua(<6=bggsXjo`# zD7&!9>kt3~St&g$034rVeT5RqTG3~+MjL(-f3tN%Tr!;LZ`LX#PkbCxW1fx9(j;c1 z_erz9PO@Qqaq#UySy(bEvYOUZ`sD+QWOfx4#E}(CM&da#Po>YADxMe0(C7NZ4jpF5 z+(F<7!0kplqlLQ?nMjWY6$?JO_)nyss`N!5}4*0}r-)%jF8 ztU@}VU1O<|3?zx)iZ^d*{{TfLp1qamj&?~|OaNy2CykI zd4V_v-xK4tMi;{?FB{tJhW`LQW3@}5bNP|8nsK1=ihXDR>ajem5H)dC)ZO&58c9c1 zMs9H%gpKOCym_q_W$hy#>XwLL9cF~sk&{oIS3@Hy^JX>)or5H4y*$?Z4?CU3jnrZa>$E#Q_s#%M0x3D(Hrt7Vp$k@eaN+#yJt+914+|tS%)OFJ}0i^ujKeFKl?b5}hP%Yfc=H z226Fb)H%(Su7+mYTu1v&{7gKZ^F&k3=Cq7=p|d8kSGL-tk+?@V`@Njo#ix3z3ieMU zsitU#rGn*`?#kX$#(9IhkGgPEmQ+^(Gv+-2xi@j#v2(1;H__{PWSt#UBtN4h^i^zb zCK(&MygH8F3fG@6ZBCor5lxa(FoNdX@gXcQ;L6B+!9IOY>j1toJF>z-Y>-LKj|UtN z-rQEbG*daixVY_1IWAt;^s&Msmew-I-E)VHusxwwvkF%YJsnhId9aFLJU6xeJk>@o z85aRqX&1o_J#lFvxN=?V)be*_bLpS>W@%;i9-;ZFeJdT_u_4`82LtdxfT|^yTkCPt z!rFoGw*$Fnw=1s8WHmW(d)niwY6rXByzkXcs_T1$RdnmWMPcU zq0K33*?x+8R}3D?n4c4x?qgAIli)$c9Zc9{$ES?=2M_&J=bqRN-G}0nBsA46_mIZ^ z%_p#oLV8#QhUp#&GS0!y>pG^6wh-e%T~P_Qp_KH{NN<0?-e(K6?m|PI%^SoSoiU-3 z-j^5Byx;E0%fLFw5u-hGf$rdt`q>@79>mDt^``4#Y>njRxDo6sY@lx9(|xIOQZj4m z^GFDnE2wak8=846`e|Fw=`qKXf!?J><_e1Q(M}|ysU2!rfn$OCE+*0g%SkI`nQc2x zt2X-rWVH0M03?PvExS`eBVpc_7z0zqAsy@DJQGYWZZ^Spt3u2pU|{+;3ynLpo{DMX zJ}%bZlldosWa~BC_$9iWf$pR+@WW8Qh`7Uzija=3EMKBt9Upba_ej(GO50GwgTtb5 z12tNy?y@y@Ew}=S9 z@;^?+7lDdjM^Xjjb6xaZ&%*Y&P%oXQzXXE&%RrI6ntR^Fuo)LM6)dg<#sM3g+p)uA znn4|kJqBSJJrv?*ZVa3_xPQE5iS23Ho9h(%!(Ag-PVIM&{{Zm=Q-H&}W8-^|VgMbk ztY$J&$VV89AM7q`{{UrIuF*BtbgYHP?#r$x{G`L=Q5_VnDT10Z+eib!-W)DIMF1s* zk_)hh4lNiiiVhc)J?bp2E-w+7vn%t!B1X_=%;%_K5Z%28h62(X~Ob zybB2>{o@%CZ$$U=CRsH zYa^3JE{*VrgZu-vNpAu{K=Q^~pL}udO4e$Q82n#T76~#y0V-Lxuk!3!HjO- z$pC6OWqFd8sKFLtL^W0A?sY^h02fNjT55l*BQM&Gq^_s0rG~1V+Txv!k*Di%;6~y( z5SerUoVzim&8R8sX^yC;j688^`^-KpJ=5Y8-`(7@C<(|j>8KoZQ08BxP=GBQRYUwl z1BXgI<6FCZmbtM&jHB^hscMC-?}jw;6U5@f#c1D(A2yifr z6M(sFtnV<}>kDivsH+8a9X&|hENylioz-~ov%$e3gL$Hz^%*5_j9ykkW{V4vBqT`E zJa8$wb#rq(wu-gC_=zoN+_l>;TvjWtBttnYigs`Lp(JDha{NLbL{50I9w1x96`XWyo*$H zCY|JJrJ8Dg++)d({{Xn=tr=VZ-WFvev}SpvV&C{_^owDwg{PJN=;n~n?8ckdC~B#} zv6d&br-Q9_pPiC&s<&3nk;+xTg+(lj%YHL4}4fLOdq$X%fPSC8{t(9d;*qd zp1zhdB=xd@4>$h+qP(PejMcTZH3v{@&bE>_?Y{KXLM-$UJR|bi37BkHlry>Q7h2g{ zz2QGZqERa*(_e~fBtbp$GY8$w@CzF|e{zw$Ot1FY6QD#2szK4l-^BY{W-C87 zlGS-zFTS!L?IPrUs;X%2I+Hw(cmM(eH_~$)q?_Sb+ihqiVp^bfW;?7rCWQM?`Fp=d zGVbXM<@vb(00jKi+G*bI8ejalq2dA>0Ml`%06az)`P`rgJH0~y=I)GJ>pJR1{Uud} z`;0d}E+Yj~_YE{-@wE0ayZ&J+DNpHgk~D*6+jn$h?LZRgNUFW}$?z6Us=H~J_LF#D zht)mOgbpozI6N-xrxWo;;P{^1;^*UZDuY=)MnRbVu*=#cv!sXyKHwYSfhpBFPFqmZ zu9}_K<{BFA%P%wK%*m9QlUp>-iW#2Ro?2~fM*?;g?Ee5(%15}WhHQTD0}F9!zp->% zxZ32x>=a)J!>RIyPd)VXwCCu$RF zKK{ofI!4EFBPNgQFpuJ^QJIfXlFuVF$s1q5wBE}$yUWqMcba1MFHB}<+;dNDzA7&7 zj6+EA7YNoozhlNf%TsC{9E^K*;uef~Q$#I}MzaRn7Vle+rKNGXiI?IU2f;2%N_TMw zj`Vp2hIy*rFVdUZ?D1$>lVo)bzeeV~c$*dMB+Ip5-ZYO9WGvZ1Xe33e#isuN8z!y~ zh^LDi?Dc-`T%pZ>r6vo3LI9w<{DxI%0 zQqAh}lXnYVDL<@c?O#Y@mU6~A9IQVX2_68VqNIim!6mgBoNUID7m@gCFgpX;?7C0u zch2`hZ%Z%tasbyK79OjI)g>_GB33}h7YO16{onv6YD>$WaFY%fNpmV3aU}LBvYJ!8 zl_TW=&yrNbJL_`i#~jiSW`X|z;i>@UK*BjVJ)>$emaT!U^{CAhoY^icxWl_6MwbK0 z6$LIG;(QbO81#VP95_1>@;D#`vZ9+!`gn-&a4HoQQ92@HdmqT@H?iPUr;)LM8rFa~ z0I>6jrFH5ki(;y7Aa0Ivd}EQLh}`Yqpb5_7EV@QG*)mLmD%jfM=ff);9}Mg^k&f{C zJV!K)ma01PU1G^tbj=Ct#QtE8AB6J@ZZX{oRZ3I^f-3ke>kVYq{*n*82RBCIG`o$7 z3GB=$Ney((iT?nCUv}@?uswvPNb6ZF#~h;4F|Qv;2Ygj3la>%9V~2IkI~>#SDmu8} zg>F%n2Wu3~YmV~QNZ*6)>{9gyGtg92MUCuV(lfQM<=UtMbh1{Kyfy72#5e~7^Gg|Z zD;;=*l>Y$Kze`W?t!=Rl-UzgHM?NUVDUyOHNHaKFrkhU9!)KC8L;!kN8O;L=OL|k!9 z=-6ANwXPh&0FP=GqNcJhK=)OGJ3zxprUqNC|F+rG%Jg z<7P_TY}u*0UI%NtQ(WV9CJUfq8&O#or=!35l+NiJ{IAm>`5mfktfqT|dO*jYxgacl z%Qyt_W{-*s=oD8ryQy9B=-_u2VJCFCXF~ZWpu3 zW392^6>WQD9oGkNvAPB%XLJpAiM?W!fIU4l@q>S*(U$vcn;vDCOKlaD&c6v+_M80E zn%n|}v~;riNFkH!#>ad?NC}5V>;u?%S-}0NjSd3#J5X=QvY9u~(vKF#BR@FwpPE_8 zsG}Bz)l}qS3{`Wt9}ryO*g>*jXcr|p_8bb%d^yc6-40;dmJjHvPPz)8|>wk*94oWcr%SvCk}ut;ESCFyz^d zi6em|H)8eFnbRs@Y>`OGSoViC%^-pf#Xdk*-a{(73WgY^jgmFn+VBWJHI+PBmQv#_ z1dpqx`_wWJ=@>uu^0~n5jo5G~r_>T}xkO4*@-pQ@c1QGm3@OOf<@`|d88p(26xg9Bv zGf_oRCtjwUn;6`8I#@S5i)>brC@kkKb$kqx7Z~d4UDMIOgsm7c-$Fs%e}O;;HQ~W> z4=BuAnb9_F9SqDahN=R4&`!)GXVtzecQ&Gdx&X|fX~{E5s4Z$L=Q^f@?rbxUPf9p7 z!`?}r*IguSbO5!by&2I6*@}BS5m`N%o{vykPpI0?>EBE zxVI#LA3w{^W0y$S$Yk*TlT9VBqG-Frb~bPYqz);R70s=qr>P&)M9D3{p0Mn7!g!L- z8&-Z#AFIJVD7~p_s$v)Wtktcccz3Ws2AA{SY*t=Q{*I?NqSQ#ItTM(4y`!pVkOASU z>p-li9Afi@d|R6~TT4B^6WH>?c7RX@mG^?Nvd_^x&z{qt>4hC@2iIX^y`gH#?Jl~@ zkUB3oMJL^<%AUu4YI(H|+Z+%En~m0U%bDf}JCw8y9kEMqgNo261O;bzHK0F=I;hXc zJbV%<4V2ww#{E8HuWu)1^p5Rpj~{B&d0BE0-!VVw<<0$>$MHx4Uhj$nE|TW9{Htdl zvl=o&ZQ2%oP~!GhxbHR9KFuTSNM!Rz{{YkLF}LXRe_*IU<}!v3mi7=tU48g49!VZy zkhdqSBYBTU^N`=QFF&31*(H3XIp+QH?s11 zr<%jwW1=pU^L@`o4~vc~DtemC!NK(pEgEoOCfM^lR6vcJme7Her^`bpl6vO{RLEq; z;lFXGXA#9iaVxcx$x1lMII6)Jc@3aFz_s$D?+(S2O3=_2o>*-6Y*Q+lsv3-PYg~q7 z@bOJ-m?>f0e!LFCv$fovr50b^I||Pa-?a#+(9`nMM7j1{H`;LHv7nXiwHzw>mBa^;MI?xJKs`|!x6t} zT9q!l?y+;%-n6&yvOj`T?l@EuHv^a-qfKl0x%UVP!_sGYNdExbP3(yq2YRA;IeWOr z?zZmspd~-`+^@n~hkxDKW?K!;&ZylGb&Q&0*{Iv#mrz7-XyPMp%z&#gCR*s5&<-!bt4(Wv~#IW=FaRMHo)29YdpEqMo*V$8U9aZx3^u(7Z+ z3s|1>kiKJzI*DQ>pq`G$=Crfbf-a-iW&Z%0*(}Zt^~ve*-7{{{`@wBddBT#vOQWcz zoxerk6SnxTkH0R*x^UomD2i&BPN<}GWNs~S7Wpl(XM^h!XW3nKOt6?^jJ4Wtfz|uM z#c+Z<-Wes1{{V&VEcrV3UofN2v${uNBV@6;G6L)Nt=zBkCZ-Xhu7Si2?uGd6vy`}l ziR@kLZ9UmpFNBQK?Qif=bwxu5yr=Dn!T$j5SoFSs$SQ0kmGrJWsuk={{SP3Ff0u9>VLhdpIwe`w{nn{H;b|DcER-!tEJEM%G!G9Ur8a{=QZ6EkI{L-Y8OH|C`#&Q zvqRd)w|PWQz}dB8-0U!RA2fjUTADeo>9p=y8E=?qGd6pnPCQ9mX zKy1t!r(pj8HGVPe6%8`8s^*^+mziWWGQ3sgwH~2Z z(nyiMieb%@$JKB@i+D-Afw*RgrB|%R78M?GKCw-jMd}qJ3-70&=|v{&e+#h-Nw7Wb z--jc74#GE}5b1Lkfop9;Kh-PSrf9lLbzQiy@-1-0DYqIWtA6P)`>w}sECYKJhQr;(k3`T8MFp`=qzwc0MSq zzcHiCpc7Nct)~OY%skrmoLWZ$rd0xtNeuEbrLsv&Qo-D)irSn%1dTX7o(|o~7a7eV z2c&I1OF-*HOfp~UI_*0FQf#`pj0Kc|KxAL*2n&N`CI=nOm8L$xP6y_i4=_RT82lAT z87p*3*2v$&;c@LljK4##Nl`5+`~>u{pWJ>UvB3(U8p2Z=E`*EqjCe}QX}+9tIx=># z;QI=fuS(1vM>ZVeER)%As0Y5tYL`JE(=05MaCp789$kx_nb40*BjG$Hjq&?PvW$<8 zQK~mJ?fgt2v*s?PfS!k=x2sfCQ`p~5u973*So`7HlrxOVna&mPF^nYWsHW8{!Xri1ww&W<5quIE(bl?V2eMHkTrLY-*8&fS1TNP1lq)>F>eD)WzJFFOxsnGr zT{MS?W&4EjZ^;rRo|(W+NvK=Wz+EMe^=A?DMpz7FPB^;-a?w-N#A2O{mNUJNXnRkM zju-LXtJf&74xC1R41lovLUe9qokum>9N*%YfE$pSYkse+nvtUs3;zH}gx{5)-UDIY zh-7(#aFRNCMjhzhSWfHr9uJS+MQJSSVYNm{Si!#N0Xm%9>ZY3kd$rM<+vK3%FlC^K z=Q2@9PLSHB^g-SYdG}=Oa;avK=MBkHQ#Pr(Cylc(?_^gWtdbV*G?fL@((Gl(d|$d#+nChTSY{MbKsR)-15poYXb)xlT7V0Y zqE_j64{6>Vh)Mle=3`4&JulJ1Xn*0qcGi%YO<6@!NRFl#7Vl_xdmjP;9kxVZ5CBaX zjbvlqsN5C9x_FsD(EVJl*(U{RyGcL`9QPemdf^bhmS^?g|;sYcf zdOQ5m%NglplEDX9#!nlgmNrWn7IPlZ=D2u=V3!_k6TL9lSEn^Qn&3sZsn_=_ zM2|gF25aT=`EuTpx`$DGv<^3#oE%A{aka295p?Rw#;)n!SDwxxpDL)6I?I`R0&^NhU@D;-omu|B_+->#6^yQ_;I%7>FBHJ>uI5> zrjfEmD;oAVTM$P-5xA+!*38l#1Azn(cUbRHk+dr>%&7BPSn8=JY|%ViIlR0N0u%L? zuPa-Y=3Yyyerc33)8+4V>nK_;(Kqi)6M1nq#d<8WGosFDZe}1*3KC6=&|>Os9Fe(zS_QEK$@$Br>tGMxn+F$6y@)Yto4kxS6jbJdpZNmz6W9A%N49~ z#XiEpo0t#OnS*m^!Sz|oVa*uPaMs&Qb$}1O8Scr&rsZii;5EX|Yk(k)*5&+EWNT%O z@jM5y?QP6nx{T4{m{L4VR*<@xDicd9VPuweMYUy2J)^v83JLAh8{E#C} z#SIU;%_@b>r-CW1$hoAk$NMOQh_OORc3085{bp4|w!laS?4V&+SryiY&N*}LL8!~B zq44)kYyN9e>nl94&iub6hNje1L@#_>Breney|5q~6ak@9aJtXU8XK5owKs4gdFjX- zi)Mw^)%(C%9{%t^=B~9YPv-?yETjG)2L2W}qyc)6S&_9)T>k*`ko>~$sPIwj~be@{{VVY7_9l^(XoVjO3F6vwCs1- zgY&%ZXP2u$`{pM08C!xYJ9*WY#(wo|P6O(P6`A>oD`UuuiLf;>i*epnLaX8_QJybe z7O)n#u|_Z@ewL6SC zponk_M=yP~9g8|W$m$Kpu_jTV*QPQ|5Y2Uy1CYcy@^>Aupaan|I?=mfL4$%2@_HZ3 z=4;L7dHa;^w`sEV$9Y~?^5}k(UjG2X$eaAe<$9C~T$g-P;dn!-u;7y5g5`b5D;G78 z8tKV9h#w%fY%gvr&*pAEu~vSY$NZ^Dl@WB${Tw^-7r1P3HuhNstN#G<3L(NN&(6Ub z>oJXJdSa3Z;%nu0l(U))3bX)Y2wmaeTl`Sstra^6Q|8H zsxkULVOGFC>z}QkCtVv&=bZWr3ENFnJZ}@eV+EY%nTEzBWlF@L?cQ1LsK_drhdW`a3mb0lM4$908l7EwYnD_mjn~QDgemXZHi-J zZ!)r<=BOZiaOM>AWwM+HOg+<&^@F-QQC`1r92_3gh%GkwjAV)IQ+s@T}Ee5cf{yw z+5sjq!xPQF3xO@a3xrFU#*Xu^#R)Y-SV*=ByfKTW&S9=^cUvCb?6Ev)k|%I@zU%IA zz^S?A+>jNJva&%mFZF7cfFqr)z3sV@x9`Sx5OEvAWfpDkTTNcUOHpfskE_kg-AHip z(riMUzi>3E%Yd;P6CUd}>X~Z=(wq}dfB>qJO;{lvk;^2;wvKnv)f^t--iBeqSh}wI z+t!4U>LC*gY}GXxodsoGEt{Cr821lahVs)m{U8RYiQq-NR)jLgGXtZ0UI@Q*gY&lT z)X{PBL(7*_z1rfvESUSqd!*m-4y%WO35a&}EiG>wlpIwxl&;fN7?r%Igj`#UjNabF zTosgc7wD&Cj{G2tjjc8z0Ctx=;^L}FGy!j6Qhb4_I*XljWq~61xLLrs5n>(Q1a_qy zs-5l+i&Z0rI-ojkev>+fmG54z@ixj%V_x^{zf=&EVX8nnBc zUedzfo*vaZu6S&XA;35hY(>xsliau)cr5ASs-$s}ila~Y6Zh%e#@~I8;6buYH<+0- zy9eLSO>)k!5N)FutdboHo9OoOhVy(G^90?MB;Y<_TS}uq8kp8iW;hpma)2LiI4HMoHjYOAq4si z#)-GfYvgW_ZBJEs;dcNWedBiApbcwB;J8TEoE>G04u4Y*cQ~eX3Ek@vgEb?MgYBu8 zYY~XuRjU$vsA=BpY?}DDGoMLf`Pvwx7e$Y`F<;**8w4cTCQ&`ilvL&C?=`&}znPXP zf2g304uY#Nr?=f@OWwh;vugvg$@T!!RTnJj;LqU3lQ4py*n6tT00$g>EYw*_6mTFo} z!X9Z{3Du8n)cFxg3@)3@6crM-o@>z5J9IOUHTcIPdygHr@K1Y4#8~)@Ruz0SweXm# zViLFR;#&3|PGj3=G>#ywsQk;Ac~YU2xs0+@P|EPyYR6(>Cv-IgTMxr0K52Oe<2^lV zB$0#*_?^eGVPd%bKs~)48)_%r^@P~%QGnqjh5}Aug6UG-3;G`W{u}_xh-Kk+0}OO z$!6wsaOM=UQq(r_yB4^0;&|%F!^zD!gcp~ynx2+SpD_Ob7fXJQQ-`fV_azvdZG-$& zYAo7xu5*&)?_ZcZ!Q4jzxIa-LQAi9-x=;MMpT8}Xfk&AaR8bpQu+)D`4*(s#p>MWM zD=*BMQ?pF6M#6eJm`JFlc>A+Qi*Mj}DP$SLbBwwMjh>fsGV%x z)no6cfGbeS5GH4NrYD*f<}}PEuPl&NL_s$(foYg{g3r6V1*1n;%F0M87(r~&GFFkn z;1HEHq+{bdq-&Y{%_Hqueo2s@E>bnd?uFG9=fk0y&9`7c8-_)jIq3Y=MDd4DmsQ5^ zrP0XOkCxKY)C7DZsBH67a z$l1UaPALU2o0@1n4J{$*P)XJIqFe8~h#k(<&Qza){ zsWDUfd~Q&^9WcVag$>OA;_6E`C$jiY~w2@EJXO|D66)WeblN@w>Fp~=%a(UdN$ z6#D_}{{Sx7(Lq}$-gN9~>~0IuSla6g$czlgn&bE9l=$~Z3u{s0RFn55b=vHsbrRy5 zWWHH5Cq5XjJ5eR%r;l-w?s4(b+x@F{Ni@Xweik_U7oFGAW~^nfh6lT~#jW@yB9#Y9 z)v&UvvKe8AoxTssrwezZ9~&ZQOA}OCV%zZSixY zlMklr1Vm00`Fv#GyB4GT0rLtB^|wCMBbp4%V8n;9q<$8(Y9%qEbuM&t664=^u<=r}j4)4-LPj&SX-p6=gbFJ)7mknD`1SFG7UU&x|kWupe*+U1?X>ry~76)evG04@Ij*-grC9UU-tD4FoOFb8I@=-lDNp`w(KwJI_>a|Vf{k-HAfh&i%c z#+R)GjvQIv9>u+$g9+zT0I9*}#VtOvJvO*4cLxFxy6wGIMyL@GxKnVlL{%+l#W$yo z!6(SLI?Ia{2?rvi9yy~-gD6=F4q|h(s1#|CVSAr*{c)Y-%|EQ^T|6KY>vWAIHKUu` z%f)DAncWU;7?P5uv$XHA#2y53L<4*ch&T{M!Va?_%O9?#keYd!x$ro|aK{}!D`O); zNH05)=edN{E~b_2k*zikb9?=zqz_`=YutxH#AqOISJMEX+*cjSRYqH|=y-?y%tHvc`@I zcedmJ1s|Dmwvi2MCCjQebaf3aX+K5%S9TSu+1U06Mq1{b&S|(mBsE1fJ!D2Yh@+K< zdtA_0ozLi5<;b5^mQ7Jk@9xB!p*Q8Bz~Qi<4^b?F+xMisjdt~u?tqJGYV6tx|BJe{mnJqAMdc8ZNF$nhq}rI$8u{K0yMKlRNj&| zwniRpH@4mfYAzrbp3fouTNA73p88o?a$6As89XG6g2UJW@l5jHnDY$AHnwpAvra9K zN#Ee*Kzah;0!!uq*+OZaBe=33qW9FVn?^c8-RoCorjGGmZzb8Nd=!EnV$ z#69_~84iZ|j3az95XzRceh2Q8$5o1US)Aqhp>tuA-m?iIdrKr?8^LR_!-&B(tZH3m zMVHpIrh8u;?H6S5yQ7(;aBvvxa3KN>0I6eT=Q4c%05TGqcfGSWR-IBezqakuj?lOI zM_c58sXG;95->`1ufa9$=-4!%Fb7r}e03d*Hoq>X%$nHc)EviZia~_2JMlH&7Vaxn zR~Hj>JFR`eCQ<{AYK*O?v6sDE#M!<%s8z&cS9>$_+H5vak~g`_HON0 zO4g8W0lowrK~tM4qS9oEhMo5v=pE7C{K#$xoVT`PpfV4bvEP3mirx9vlz=^x39cRc?l^34W`^{zk8>A9OZTD>>Ty5YA0UNH7jTn^= zTZEfxSos!fQ<~-No^Zh9ruqQ`5q@rRAoYq_7bMlCu0d{q;m=iwJ%s}K9~--+ zSgDtaO^qNr?2V!3Y`#y`({IwzdTDrxVcpu>5Q2j_g zR#HvTLib$Dpf_`!e11ztVna&J(9stpqa09`(L-4Tl=HAj9E~h+ID!xQ!BgaS6uBgQ z_o&oTMmAAVSCD3I$!c8Z)5&=<$((Q8`_%3{S@(I_Znmk{3nR_3wp@m$XJIWU6%a_= z9~_J##2y?+G({BhPe)BNt*S^Od#$@unh8o$pkUFvB?}YHSxoO_S&a?bT|(}f5-;yq zI`JEC4jY!7Pj(5Cn}Kw|6Clmu&NBF_dU)`mZ?L6A%H&a|ZK?;-L4Jlk4!NhJ=1wemy?+akFyt`L5PFt5cQXF4P z330abq#_^g>V}*zzr`Q{%%}BVI^_==s;KH|o#GgEkC!E}t$F_dF*vi#o?Vc2wN$gP zvlY40J5MA!iI(@`vOJXXXzOt&{-a)T+2az!U6QK1OjlL!NiKuCd3&^u;s= z{m^x#r#Q()@ei#30I>+2MF~*ibDXFfVe4)N*x#WX(-3RPA2k}-y>Bk7gjn>H^D@NP z5?F1H)lxj!Qy$|Jhl9Qe0FN)t)8v_Zjt&&ejl+YZyGZP^8$W&L%3=iF`K39xgl#W3 z*>9SDNJz_J{{Vge0Bs@@Hj&@1@*&2qU;hBtdSGi3vwXqcIaFWXW_09!<=u39)@E?x zYci02?O7vlM&y7!&cOkmzS&J)JKDk8WMY1|TY3XLd%Z!z`{k#<{8pe1Ic!P>xx?1z zw$J|nW{Q-Xp40_lKDSA>U+g2XNSn&fp&|8|tbp7arm$>eKzvF$)TjDu2Ji+EjJh~&Lo)a_Q3(HF!-Y0nFZx!UT4>abrv%Tz@(6pQbFvAV&yxm_@ zU7uC-&c1?l+W;0=TEg1F5}V1dP3DZo5ebwwq{c~Uef0uxy~lyJD^d?Twx%gHQ^?0K zYO@^I2NTBP_Ts$Ee9^vLV_%#>8-Oi)hNh4RJ3&K?S73u?rEi>Rs_9H77ewcPFKt1% zi`?(GV%ebc7Ee1^-&u|$W`G~v8@U`p^Zsmx2r~+3HXaFZ`xUD?m$B^f%BUuQbZ-wH2!SoRubO#l&N<~gvD3C#-5D(eJ}^ld zc!CP{*JuFd4Z^Xb*b$(vTcY literal 0 HcmV?d00001 diff --git a/notebooks/videos/bedroom_short/00003.jpg b/notebooks/videos/bedroom_short/00003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1066831d249a122de593c08faea0ec6543b55442 GIT binary patch literal 66813 zcmZ6yWmubCumu|2-HN*wcXxL}pm=c)ZUu_FCpZ)fPVwSWpv5T^FK&evcPZMNe&?L$ zx%ciLd*1BGj?J1iGi(1{`uhSP(6sWi5fbI*6AY|m|9haJ zp`oLr!)H{~|E|z6FfiZ?aD$17iH(hgg^Ph6TIIZcF)%BJj;S!wX%AQ(LwH( z2rOMG=pr|x<@5$>%dMD(yi?BpF_OVeU{SCM5|8Fso5}a(-QG#GZ zIre#otf?cA>lTGsd#M^kqV#GtB#v5+rAByL7-=a^0_u!1pfz1JkKEM0Bo*#qB6V=qaxbG9T@t}nwH~;W7XZ3{K18=(%TTXqf{Euw{QT5QBnWPkb=`qz zwRkFy-P7v=tCIy4?x#CwmvINK=QLf-b!9Hv{sMNt-^_o&*UcUl!?TV7h_Q5-RpEzM z$hMeT-g4kj#Wty}l8HFWssQ>FgUCoz6qd@A7VvY1bsKF$w3f$&PQVZJ+=*qozfV9_ z8pQ?@rrQF|B4FT0qych~vaX_qCiS|{zwOT>S#{a-#E_LJO}A1;RkOWV(ie~^bOs2$ zgg#Stb1lW1g(4PfKHwj$QF+f0LhGRLfcpFA#*x?zhzOSWvejjz)kYeC4VWkdbuy)EGK~Tlq z^#fEH3VXtB)F`MMXk@7Z`A8)s{fMK*dWRQb_geWKZX+=wSVXlfA@kKY#?w?wjcTMX zOY5Z`+8Wh2=1$Evy1`t#(wIIcG%Vl4bTQHb`=RX9o{o>DOC+M$xT8Kg%b3nZ)1lk! zX*DQTcc#Z?fXBMq*Irv9WloKMmXIiyX@)o%PurL3r6U7gYkv1|ZcEZSZg*h%0N=}+ zp`mO81A~9ZjEm-V5TiR?{Fk*;v?e^fI(K4qNBwFe|e(vO29(P-x*k#OxPZb;N#9tCA;d|@#Z*~zc(0%s^9BxPAr&Onz+$9hyK)qnI zO(wyl@AD;6UHrqy1EnU?I7}xV+jgfv#!NIF4MK#*%SnW(&m`R{&z3}aVKiTVCoGOs zbydN77fev+_(g)zfi9Af$t936O!bZS7wP?56eZXyS|dA&iZ#0Ods+8{r)*+|7#ri=+n)8AnL35+_B)57}OY`ozVE zD&TC4g;o%_I83W1R}E@frCH(?TkSe`W^NXZR-u+k`Dly^mh?sgw%}bf1@m-{MN=DF@ z*DMpHIe7y*{tIxNyD!;YxQ+>(Q|Qj6EV%3n@<|pJ$Yhe#K#-xVzzi^+Nf17FQQ0j= z1r*fI(7r(};R&*8d`Z*utJFP>$utbw_$A~B$x?H}hynC6ng(>UIl*YPU5YFx=3dSiyxMly2GTC_=Ps6u0@owi8W#@$iXkZlFLmw^=w< zCmccHw`8fRK7w?^KXe?MbyP5MmQOZTWMZ^&tR91$kx$B%x}BkhE4c-|qI1v}|6n0RP9o0Ar`WfD`IxUskqx;fJp>mklo+ z`X0^5(V}d`KTr_y&6LurwZ|LF_+Mue*jb_5uytu`1xAypjis(s>t>PZ9IoW*AL0R{ zW#p<`X-Y-~o|S#NDc z&jSRV_(}HadO^f-r#>XF;)F^!JjP8g%}+hEj~Qq?;-UHd2I)+*^5BLU`OtIqP_Fbm zMWbq;g?ZjbS9wR?PDYKBMgKa1Ksyq87)b3D?@LWE#BZbf{-DtZdYr2>t46$ZLoQ|7 z!Ok{zxIVxyf&_H)GZTL8HV>&7;G~ZE2-^5x=;>LT|Hx|K69Lq0Re;f4^yo><(%*#R z>$51)*Sxo8#?e2*$G>7teAoAScKIg3Q~gxU}q{AM_Vgy*@7N% zLgkGWc6{tY)L5syGxt(i^)q1vfa#!+BcuE#HeOE^{t$6FiYRwgMiE?e;(g%WM9loC za&Q5wr>>@0`eLyu9Fp`1lPhTUssw!yr$5250k`?+wL}MpTn?B2(viHVeZo9Xa$JyY zx=_13M#e^G4Rei;&1MSjJw5UhC!c=XYtTu2E`I`M`ww46b<9tT#Z@3RGJ|L}Pt}S> zZ!~a^{G8iBKj%_xk2^_)hzB~$ui?-fyiTN!T9npb4y0SnL*!*_QM&CL(XC?V*SYI2 z1-<3A(*heQYkh$~mMmYW=t$Ko5(6SNQ(o;*ORft4D@w z>UmkR4=9hjk+YSD(Sgu4>E%WV(3pEWiPrR~h-rg}u^+w$a&1Ygb04sa{O2mC&{-D8 zitF?*vIkM?>y8C-RDShv$}wfu=_Tv6H<3>8pTGQbobiNKC3H-7Z|rnpX7bmo$D;ED zCr!ElG~_WOnpV9YP`^S{QWw`pwr?Qeeq99@;MSV2@G2CIMl_3+n35;?0rsX zzm>}Dt3BJfV;bPRNYkF$-AC&<%d!v14e$*pMv;)osV{WaKwnEjyh>VCqYaf&a}K%F zAJX5p%E$RUq??g&5Ybfya_N7(k#a7f&N|0Dd~f0$^R^1nyo5~S&?F|!KCzr;ubM)q zKP7*m5CeNDE_?+!uL46# zj7lIcep=85I&X?)3wSH6Qz4VOW`Vj&0BK&PC1g}p=YP!FKlkBY%2jwS_K;y}ch!R3 zY&Ppggpd+<`X89(X8op)!~AZs8;Dr>LkuZei5qjg7-G9ab5j(@#aw_^pL&4;%M}sk z3m#+M!!c%bp82tvP(O;r?u$MxqWP)}OT$>WIJ#$#79n&Dq3lzeXsOU_Fo&PpUCK*x zpOD&v;-QtZY**5-%v60z>eANl;Or9r-9~#SXsLSQT^=OuJh_OuXOHhA20o}x52Qv& ztl?G$Qh$Zd!K_-tPDi&@EN_#6JHLd_!Ni^h10ukEM#4%nWU$?p4e4<5e~Eee|9Hzv zOFWc8+$JH&&$r}ddWQhbS)Y86cI>jc2vCJouVRaq;+ zPHE>57LNEW3w3Zg*0J0jLuky!(p)iUpr76~ao#VAAs)0Ss3cM*TG@T6iqt|hH8`z` z+VB^kSNU>kFa@l2xXAduuW^aVk3~mByoXo;QrC1YGa&uA(a8{pUo)we zw34kf?N2`AaQM!Fdhr5R2I?HcuE7L=@LQF0r#K%dDRy6rjp4PVTtHdW}owUX>Xsmss`?xX8j^Lb!HL2mVY z??Az)VC@TxzL25GrA~H9x#{C}I~#seTG_Iz25VU9;9r1LSn|fgi7{|2XLioR&%a@+ z$>oz0(>I%!pi-MP7d!Tegnk{x!FSw6m5i+LpC_AUIH=glkof1q%V}<*I<&l8_Jg@ zt{xL^#2p$Mh`_`%GrrxSRUMR9fP7|W?GvK5527w1c$%ykGyLpWdPkW5O^7r+O(>#L zmb&_pi^vcwTs?gbjNJH-2Px>@tpsn5(6$xICg}`5DK!SQmH4G4I)0)E*N!z-)(+R_ zAoRh+P{mkh9$#-&EOV?<=P@F1a35Lr!e4FsN@FeWr5h&M$L2#? zUWie?qpQRJgtnebVKq19G=@IEH`V?mU`DIfgE|?mlPhkLcT$-Vw2mlJ{fsYltXb|Q zPn(YxyX!E8D-X3tjDB7xXw`3(&pXG>&6vh-3%vFJ9Q^^ty0X=+2>u|-<}&pn`HY?; zjVl9fnAW%;zdpY3HK)RBUKx)(cf7A_=5A_P#QU9wx9&8whTUK)fvCzM_hm>A=;zGD z!oVz>$q&;-Cc!V0kC!E&mmAO;=a&8Fl2sW|T#tmCjptf$7mPx4{ zD9J$Zd4qMRhLBEGBLg!(yek}?gGs%(Hd;5$w~9rOKv8P7BU-|~R1q)clwRkA zLnFdYcVC<_%>2zXR`mm8MAzpC671|aOs%S#P|!SQz8>hMx|%}c3p_A&;;zTV_nM}i zEJf&Q(aJr^dn^GRyKr}i(!%FyETA1-HVM_@>OT*Vr@y?6$~$+?_INufKBs+Z?fTeeuJepVa6Fw&p@fQjsupc;OmPX5heg zDUaV%gnG#C-d`z&hbj#m2j1Jk{_-=GFf_|waP7_3RjkTWz8XIp@OQNrVIh|^Bx zWqpRQ=$z0QrPg}USa768Nq3#iCfs+6T~VKf?f zrO=L@fQK=*$YdAfpJD4gvLyw3=9LPk*N#$8ifuea(G0hYt$tq`ikMC;S9rW&t$Lwm zo;*g$`~^Mv2c$w0>AuUY4E-pLq^xaDBA@yJ;YGS9lO#(1zyh>@KWWHSCt&-Ewi&%;84>H5~ zLM?!=^4Yi2YeUX;qn{sZ+E;>Nhms9GC}uPd9nf`K)Jv3>mZq-854jLFUrR5R*N(mP zc;el9{sjyY$-z{aL7~m8nAVYrsBTOHoy<@JsmHU8I&o$qZdDIf<(Fqs zXnX20d+M=`S`lC6nXb1Q>joTW*u@as#QcK}=;o}ZyHXF@7}qm%ZLsQZ#gMsBd_`p8 zi$MlGkAJWsdo=)$-8%omHS`~rg#BCOqxwg6hh(cW3G=Gba{bB*s%KBsJtp-$Q5#n9 zORYJ&9p#e~m^uX$w-Vc;*2i>KL@SV+r3U9kHswQU$5=GSSuDA~jy_Fa;v5{K>#bzz zm1`%w19#NJuj`evBT>m5c$aVev}yn7+C3v5$_XHTTy~k6?>hnGK*O> zYlcghM>FK?TUTBI8Cv{ge)(2v;c39U<_$WdfA?or(=-WbtRMGTuA1HmZKh=LHVO_O zt0c@>NIfXc8v8selam9_ji$u-ZpJFAYPkLU=xCVjCaV`_HW9u)h`5<7G)7W=GV3PJ zqC#rB;gK<9Ds(7_cqEf>Qax-ZV-9nE=qz_*r@HGnK9$EDj|=~_CO)eCIr*%0c5hCr zS!Y-22`4VryxykvM|Zc-r&1RZXx8J89rBGmYYA;geFv6`oK3W;_2Vf5Pj(79(b3Nj zG+}=MqBt)3JDsrt-~nN1^`8klpSpn2mi53hp8d(1xus)3B362jDKzENo~Qg`-xH~U&qPJ!l0#pvL(JxnDoHbb zf8N~Us&$BzxR}pZsj&%X7}9n(j4EYX9?U3ESHZR+svEiH&bT1T{L@I}Q7@fiR98^P zh0EPKeAVRa)PR>K z5*{+4TKgnkmE<)u+D~^xdul7AgjeA_Sx?l?YjwWj?vkI} zY3JSJX7Cn>Oo#`o)?w32twwA%;G>h{cfG9?x>M6g5B4BeKAPHSEt`XmmH1!a9GSR^ zv8KOHgd`Br`n+JuR)HwxUog^Y{Gdo>@B5`7SGx1J>Dh*AdD3vW!ppPxn8v|LNn#4O2vX{!(G%s?X75eZa^s=r_ zVtrQiXJ^zC=3r7PA)-Ix&-oG2r-eQY-V4IZGiUMp(0As#f#C==;g=nZD#;_RP%YLY zRlRZG7AzrNpMx2oSVdhOtr&%XNUWYqs>g=lhKa&AAX|)HCCWmARtn$=OdQtU0*&g1 zd>z0+xUv6_M^X1`{l}xw+)chzij(@mLS+@Gs|w8!{n-S};p}Pb zzZ%)Uidjy|zr^NelalJOo)$>HY$a=VjBu@L7O>D+SVpI8@S>`@p$qyPsN#6Opm9*o~j`MbaaAnWW*WRawNezxP{H~c`Os8 zBRis5!MAKP*VKdT-Tcw=6jqJLRAdI%i2HuHn1_Lk5#WO50luuR$41nd!O zA4gs^S#XQ38_47caVxgQUPym+*fwk5@B-V5d*C&^X%?doPw6cMgx`JYG@d`l!0_B~ zixjG_n=e%BqzuYxA$V-d=OkaYrh!-_PzZAi_Ma+>#+-o0D9o+X$GX%wdHsO`&@OXH za+BLcQBh)J$L0)_S`kHWs9V*6!?a}H1v7dQm=w$RnnBqp<@MOJ=^@(04R`*_Sa|%3xx-hlDI%=l7W!4N#at?kN)REbx@gUyQ$e z<7V_770|wsTKVx?0K>{I3T5xblT038B@@f*Ov>Q;81HU}gdp7%R)QTph#WSGcy>hq z{eX7qK_|Lbu^Oo3*uZDh0CBz-l>637VoJ{5g-(aa#N@V3Sb3UjC6Oyx#!*6Y{6`hS zl*f0spWq1*!?MILgaJVHV}+CSv~_Fwsb})?PZQriju3kQb&%^KXz@oMJY(LKcJ@#I zEC@d^TB~vEvMmKm*4YgG0$z9sWpdG@dt&`7E<^_MfBo`A*h;QgaqvKwjIKnwNqub`P$ z-?}Wat^HLe3o=djcw!P3J$9?i2whhyYVc}yxsZ_));m<}PkUo77=le>;)b>7Bt$a= zqhT>xxiD+d_9wi3Mel-fL7$(2S?B&^&h6IYv#{)0q(5Hqr;gL zslYu;O6DmhZ*;`_q}a(<_KxqwwE*W0YU8VG3(EYI_B zAkIfG)YSGTjLrA}ea}+)>V-lD>o$dD?e0Xsz8cab7}vDv9VqL~<|UX&`mCI4+1CQ5T?Fcg7k@?}h} zP0lK>Yy<_eJuVxSc4Kw0>n4Odx51~@9}H@Bl8I`cRhvtG(k@^5dZM{;ss9}n>}8jn zvXigKeGcV!H&)ceT>{n!gV6#^%;o+9)Ha!8+54B@K6V90{skBU{?yg{j8x7)5ENN) zZS)n{n!6!OeaSqbvbZFfipw#G-6zzMG@oHgUHf?{+F~e z4gW8TH5nYh_o6SrkA+VN0}>gCKj3-qzp|_K}k8nMc9{aYBs5MDN;f`mqIbIEn0-FU|f3zl=exy7vWTc;80mnI?Ss%I&|>6I z7eH7{*>dZdf?_8z^kq)UOvd;j80XV|`I!eK+R^Fr`^%S-*DA^Tch6UmDlH9ZYQJ9P z=a)K!tihXUx0&e@O#+RV1Z^Yn>LS02AfebEA<6MOq7zfYty7W|vp+9Y6KeYO%M)M} zY%tqoJcnelk*H(l+N5yL!i5W3p8H3==?a&F8jHoEPsvkcTRY%(%jO3e$mf=8DRp!< z*36bqEv6eC!Xl=p9mk)4)g9m3@Q|UB1=hZrEni-;A>J zg7c1SLvYM*aXk>OGb;K;!)KB!9fS@FJ|2A8929)P|4nOSqJHI81+AzlX}K0pDrxSp z0h8!jb@s0OmZeY$U}9z~`J(yDnZ_Z?xa$KRt#*RU9o2$^>6KKib5-Dh`Mq}DB#%*c zX~jbLbspujHDqV`ah+$BG60CbOEi;PMCyG!quF8K$ zn!1Up(U_RzI|ttAiV)A+XVDJZblyit`mTbkBkRjC& zbzRZ-RoA1*mjJuSnxL@~@?4rVr-MsNIGv=ON@rCcR^n+phf2d<`!;Q*R&z&0efb@+ zcG3MSB^V8L=tIgzk0%mMv)2%&bHrtQ1B`nKf+CRLDVT-EbSZVUTy2r%K3~qHP0!T7 z9fBOB|Ahr4^wJRA&n7v!-8)nl;AZ2Ab>!n64+AG(zD1k0d(ZuuX3hay_^s#V>dJ%P z&S%bg^d@t2HP?5~yXnOq7}F7PF0W*d1#isFW9g5vN|kX%Wav!d%Psq2J--WOTCfyM zv529|_z-eUI1IVhcQNB~FkOEdl~AtpWNx`(`tZgNYvWNk5nH0RMKG>4@GUN_LI}X^ zEvv(-WqZ8j_*S{e*Q%RFzqYEvHP04*$)e~!A$u{dG~~NVk^zfCl#gtaMLN|`c?FeL z_7y2B@&NxcuSH=@W-i7zEJ@?=>n{MGS>@&XzF*T^)tZ<%-S#otf|_IOdGP{|ANnMv zTAWp?4$S7AFna$_ORkXX0X5|TD=lqN#esWPV?TWTIa+$>5bGz=n9-k9uQsO}{&m># zrfVB(TGODLNb}Fjifqf;Pb7q5sg4SsWXv3sek|{QJ<69bDYz`jIQ5ltBh6blXPE12 z%Su*we`5<)jZKB^dWD|bBwOGD)!SEo}-z?l+6lZfE{OI6%w)bV?C0e|L?VQ zEf$Krk`9eee&F3o-DBX$E5@_#F!f58q#Kr1yS@R9aT~8Jq`VuJPWEKqbdxVp8n(`~ zPm)DZ#kpLNOfXkvdISLRqo~~q&9Yn0j}j9+!C`2jx(iE6;iJ&+9_~f+rQ|;4Lkjng z{_n6_W6e*f@`#p(4G`AEX99zQz&lZ5#?%!ltD(cYr4tysGYjjnnvRdN%9{r~QZ>IT z7KC3K=SChb@n+jwKgB>ZKkRZJx~LiXo(zv0^#xVy3&err|3vHiu#nilP%+}sp*5l? zsokd!DKX2(;lpy|!<~b}31Og7J$gDi`Gj1(Mp=A%xp6ziVw6lm_`R|O9vbP^HW{=z zk_2JWWgCeBc+kYQ>i_8*o_sXQp$Gb?Gk>nj+QmOxDx8W8Kt9#Qf`_Cqvk(86;tfv=_?G{{J=jAV@7z{-

O*dN{JQ=vytRhM7yzdNq^n{jd(I!y2q zislQTA6gWBn~$KVT(#^Qcn4MaJ6u?)yOZN4tNaan%QQmuBdQKlA zCmTNv#ex}I0#-k0t=P%&HYuk@5NS`MgpSYWjC#EM|<=-+?|5_G_e#`n3OTFG* zqdW(%tLdmDO9CWPJ88ZT8o(7CbwP5R3dHm=C3uLHA}DhA7ePMQ^mxpp_$ zNmWTdzfe_v+vgCf{wkuHWI$RygveskS|5RBbj2~Ak~>eJlgSeJ7jW+@)%&IlJ>jtc zAtEmiFBJV)q=@dbh;Y`k1aR)qOd!ku8(u)yLlo#GZJ@h(?%edIvXT|gYoo{eG|>6I zMIt&)+6X1q%A;%U(|Gn>c)&Qu_bXOe_vqY}M;)O)&FO95Im!k!<_RMq+;R=K9Nmy6 z<*D@NIEDciV}|CC3xmApcM_nPmi>yoP^$JrM-T|6=n8xi*>{oj!mMcf7C@M`7 zecbi@+WMvRr}wf!`!R&cu9>gWLvu=#y{0mI+u!EJAUoR^VS6~Yn_sv` zP=8npmGApa7_uAE^o-6ydP_YdgXd*ZeyR|K+Wn?*FfLCN8ZB*I{XB1TlTIUnYkUeG zQ%I-oP>QDITYOu*Y5hTb()I2Wy#Ij^E*+jL#mpkMn)C+2LAOE1S!O}k*Eir%>shxO zapzn5yfVIE-UjT4sI{B<0Q<^m`CVU4iSB0LDZ9P&o_KBCj~zSQSCO%$)Xu` zlm5`Q5bo-J^${p|^Y-!TnP;|Y;YY!z^hMA&e~um|yFAC#W@eS28uWOX%fipA0WeM0 zGl`Y#%8|3Tf6yk$qP2}b%xazETKaE#QR>-3hOsPEoYxIaP^!$e3J>}cFEzP->Or^R zq?ETRYvOSvG^I=~Ze3{xKYgr?6cZLd^#M(4uT6wZIl(W{{>)kX!^FIY3#8r=-6XE# z9*vj(@wtXb5gABxoWASf^vUD^Z)QofAaW&>Q@@KfmueB4<(s`j-N+?|tEQytAf#>O zH)7EDezw-s_oKt8M<>#QdN#7l?RCQ8klYofPo_Cc$7JKbJDFUf!`OB_Jbl-6h%(!` z-gEQqhY!AO0oEL_uM$ZjxS~u3_xA9AFb0#`ohvz=FhGjpN;Sp=oEWTli;Ni$gcqBX1{9D_X** zBEc)$fTmHuEYYVo3Dr%R>r+qJy^Gfe%opaI;Se7RG_8>k8cX-xo|N*^X7cY*zePWc zO2oE$svCi6GE-W|mi0#}*GO}}(xt{B&T|U#xJGt+O(#&I@I&p}wq_7sX@@!3k%^$| zrI<>OJDVCw(L|=_gFAzN%v(u?eYV?EO;((4ms~^DIB4=gJltn}B_lBTP?qtlCn4Kf z=}J5213PjecdjJWxv`wj$oj`Dma(AZapKQ)g2!x2*@KPtn#1u_5{o>!Hsca_E$tk6 z!Eb0Tn}w$4wn%$FxE~w&^|0J>B#Alk&CLfC)^&VFc*m(7Q=9U7{mqxfgV~{Ao}8q= z0D|vZJwt}sRQl?wohkgD-W|`cAp-@v9nI;8LT^Yxte;ii(WcQYD>ak6dM9qxs2>(` zU5b;t(l?wtF>iZxCiTZlg*q#%jX||>Na`in{M=mJUo}p-q}mVOM2*h z?+T54YLi_nu0*+|8vVZ2D;)mjpha(709E+-bPVCQ_*<>wj{)CQ)UZU^-_f|57>KUB zaJlm6O^9T6l1dK!R#9*fnm-Z#)1a^X?L*Hle!z=Qp;fcErOca`n?ttcWr=J}f>;;V zAj1B1!;MiTV}?ds5_0+C0ODfzUgsVTpOd>f)pp1qbr`=E*O=1eXod1S0Xqlq8Yo!s zuD$J)Llobo`hZn|I`Sd?oNcC^m1UVSS3DuJ@MMkjon3v)E}2aSorVB=^NF+~scl?| z6^`c_Ei>Vu66O2l9m0V(v}IW+N}Vj*n2oyujHu2Ppp%}IonjOHJ$LYCT-CM)YE2Zo zX<*LCtjf0hZ1VzN;k73?4I)CLm_>gy((znoh3-lG@!Zx~yFCdGB`zEJcj!so&{@^opLNLQ?SZg|JW*-2uqE>qPnn z&FX~7>@9Dl1kNQUj!S(d7RxGfUbY%#ZgibXQ7Z1Lsl9_ksGuEXXiRDyV7N|PIVkQg z0A0(=!7mOrQSYj@L?%>yATY{OLS4exLl^1R;*0uNv_hMQQa8rwCAYEMiGOke9Tj7I;?z1-r?tQTjbfG;ZCBhFI^%qS^ zrBNXhSaE+!>Q$PB#uGD+i+2%7kXp6$&=K5p*;5eUrS(k0GB=pKtZY>#156hi2HRt( zD#mYzV4e&m1bViT{t-I5;PLxl&r6RHvs!5!Z@##-@&8Jry_ zYs2S%Wg8~6|0*{BC$gIvXI4|Jq)7DQ-hf{Jc2^Bquv3mW3}jX$l-C6CS|qa5 zgBRzK%rNjo-Q7kvbJspsRy*M@NW=UE1H4;?w@{8vdvN!z)`2mY;++ggu%G#DCuYZ zSJaLzza)1km-Fm>PeGpzyuV9#tN??UYnk2(biB>GyRaDW_nFfz*?U%MNIPgF4l5E- zT&H)O3W%Ds9TLeGT;}-j0YzpqJ;+XHj@ZI@?HlgSO7!AONaeT_TF(M11r=q*xhG?#D^z%^!C(( zjY=yi`B00jyLE^Z)dFeJN@v)qmRYO)mOq?XwjY>+P86o+0{Da+H4nGhlNm@?Rf9P@F6^l4Qu$k@%^r+y^b!+%`?Ft-A&D=$* zQp-7 zxGUVf!DO-K_rRziGshuct0uuLI-Y$YKEp0w5T3t$3*}{#0rD5&d&PyZZf|uhDvJq^ zLa4I}B)G6`_M=GZTT1Op9k$UG&4Wo?vjJsqj195FTLF^77f)%TUMkKE-yDdk6O4o+ zw^B$J-`EXwmUmn_0he3wBw_dKHfnRP?O$keUTy`0F^B!!*uO_5O@x^rO*_^#vUXbP zr!4LC(q!=JtDeC^R9kA;MIv#VhOS0Ko)-TCY8{`_B^&v)>L%`P&;&hf!L*3Q8*Arx zW8U)?XVbZR2#Fe5Z#NbV`d(U6$7rkIdM^&R+FCyGu2_VwqyBXQAQCQq`jWb=7LEu5 zC4{3Ra$_>FcVFtUA)@I0BZqOK%-vR+ehX;WX=w9k|3Tj>=zkz^)v&1o8^7{QCw9IS zhx=NQro4Ksmd$tE%v`f@37~F<-<-#$*96s)X9t%e4c4|c0RHnlz-jf zXJ?`jtOKM+@c9#DxsCFO-Gtxd2u7U9D8{FxAuhYH+$DF4ND~L~ubCge*yeqqR;=@4@BOKTfbkWR zc!4W)h{>-0o29q;59lS0%Y4!Tp-S*6#?b6Z1v9|Z9g(o@sN_06cKzrQ9-j4Ru8l0CZ<0sr8M&v z`-_?VNe~JtvBF|3qP%D!)`V%lbBoCFJe_z`^tK;uzg7X0<{$aJ%rNhqDQG!jYp3_g z)wI955R&%16T(_{1h^@V!u*)aj1rJ$HEHBhNJ-lMK>^S&IE6_rwAY+<&;bCy9qOjeN_vUqb)&?(EBIbjy0- zLV0u!Txko3OqKg&d7Qz#qNhh&(bSv7=eawN_#80Dn!d7Z_2+;T2b*mdK zytehc!^)Rl>`!b(Yvx*)1IXA(RB4|EElv`325?W9KWEb%q40nDkRp;7ygQz%!LLpu z*ZA6h+$%x-PFa=APjL^N%i)USYU$7UQzjC$Gw1bVlWTE>R3Pya)6-N+OZk#_YO{2{ z4joiD&1;Mt{g{)7!BMvMCat|7qUer2xw4BXj*Fnic9Bi0CiByTy-osK~p&!(>QSjP*4 zJR>gMOm<=4mrMU;^$30h$#KuIHf;%lhdA{g0o+nUs8p%%pGhW!27ag;a4@a?=%Jzn z`#k6>sw99E+iURyXZVcdfeHoa68xRV-FU!?we%saW0qT%ufA#OfFPXNDJ9}_X*T0L zPh!i302QEMHe#YjGfkX%sc(7>hKuT|kDCKWB6lYTk=VDdQ7c=K1Il!FV#}{70Qh-f-Cqz% zkeZL!w?InJ-5I>O6}q^YCrsuuUN&*s|5Teb9Y>fqDZa?5jk8{2gp;9=u)6B8k)yG7Y7Hpi0GeTN z5-BgMv}!=&27)Krgs?ILV}$;86g1>n!i_NLJn?@AHqgo{{rlZhkl>%- zP|VlctKOq;!S#d&G+){FH^rECXIfXoEL4tIk0>%vvnRSi4 zJ$u~Wf7YC%EIw$U{q}MHcSeUO8b7{-q8&uW_Vg{`>`gqgcj7LKnzo44@Gi8iog|VH znpLXtEpy|Va+iYZ1@khKq+Uq0q4`MT8;N$~iN(?PG3G0!ktAH?BMxeT#I}O!4WrA` z%3sMQ9O9}??UYIiYZ~Olwr1H9ex_-Pnb+=BhY9S0bwI?$Kz$CQOE=``XxQ-JT5B;F zuo^gSm z58x=Q@A1b@606{&jRmlQ8-dx@ce72L$nLU)SePh1ZFd6vcnD~4ylTHJYWcez6VnJL zZ(c~m)r7b0T2pbhb&b@jw46h3A>1+EI|=Az8d|~aOX%BkARAybKyPSs;9@6 z`o>2Pie~XH#|@lQ#Lt=EDw=iXPt>H0aCbBH=d{LH%GM`}e};Wz@0U;) z(XTux?-=3m`%*rsv`$z(L-Y_f3hFdcZMy#2SRtP*^YKWZeQ?Txfr!>?do4Wp-R?i( zH#TUzyx)klnFAr6o~>M7H$hvS*H_7J$qFJ9*lAYew;(5sIpU}C_K2K-6*i5;Ic zG7@+ds#V3$j6}^}Mb_Pvo=3%E@5*7KIa|T8Gm&0U3eA@#ql@5leUK$BuY~A=z@tma z^QZjy6n|DmWEXY zcHfrajaEFQ7JebRLbr8ua{n^Lk(nunIQks0RTaBpWM%c-spVU{y5Wr#awSRBn6KnU%lSIX=T(a( zYw8!K$V&?-+Y_5j%3}X;+gf8Hk4TLOwV>zA!{-S4&B9RTL)uwM^_+#IcwG)V*%M88 zQL}817(tr@(+-}&i&m|xjkBY}V1Dq!KFX@(h;WfS8#Xa;L%4#t{Y;itON#UbKjXQ& z$PS*!vp+Mm#!p3!s=2KLedsATmm8GD3FBQFi+&v`|!VIi-jqo&7(o4Quv(BR0xL z;_V|3jgGaCpDFI;Qc{Rr;uA@1%Xp1G*lPL;HfNc#IAneFytDc=gOZEf5r6;VDApod z|Ka-;Wn@82Lhd{m&BI34zx5q8&Z#F4D@*iYNauB~k9+NRej6*ChGKBA%9+p~Pe}iX zg}$15Y}q8&6@P2EOV!vw%Am!-sZLGe zi-({PQl5_#WIYC|V9J~!s;VQVf$wp(krgftZOZaqZ`X*$PaCb3jh1)rD{#*S{XV<9 zCC31sVe2+$z;fxOcb$>O;^IL6;z z-phdBD`*mGwPIJ3;k4`<`21I})pVGDmbJJofMj`ZMd&(A8eJPBV?ekU%QRJNLRv{% zYu%>Hx2a`Yhnqa!{{UuJr-hs&!&8Qm?Coa}ZnW)-ya2~YZT64NQ;N~V=gCetR8{U& zQo$q&rW;(VAy5f{Hs(_r)TVTi0Bk_Q^o=o^qiZ3MU7i8UyzgEwTGA%a^#=mdA}==X z8U_Z~;q{AT(y zhy|wizdJ7nRZS%oL!%&!E+HSI>hII15)SK&*>ge4)ziDNZa3`lhVXrADH`hw?NOPa z73X}j=9sHk8tb*p!?c5cg55O@lmZUb+ir23S0|Nn#miH9ju97aO@nGtO@nHo4pk>W z@K&QIK&DElDHdH|xb0kmus)xlrc_|)DHIU=L?!4DhA4@8CxRWxGT=cI2dW5UOdhN! zu;)T*r1fw7C0dN0t^WXpA|p^G+t)5Zro*WEZs44gN76hKkW9$A3s>tK_)pCg#v7Ee2i9_l>t)Gbekt89t9*YNgKt`ZNQnxBm&GXwGC+DJo2WO8)>@2PXb#Ya(mCar_|;g;Ps@7E<6lLx}mVQiKOez<0U7 zG{jcC@8XMjHKX8wZdoGrqk8!{T#sTAXE&CBKT2*Vw0$6t^AWUkZTq3_{*91YJALbT zgio0r{6r1^07c&yHCphJ%xT6cw1~Es>VWk|(pRJD3-_!FX5-ehCY-?Zxt-m(ABbLq zFfDKKl9g6cB2-S*BBJEPLCT7gf{8%Ll@b(+i9ib~m6b>l)=?@-B>*gi5~NCmvN9$* zN4czPTX#9e%>%ro9F*j_95GpB>%MQ(spw|(D(YiWT*#uixND?#9}_;1V9Z&0n(ZdO zrg`S4o-oJ(ah30p)6`)YocGCyU}-nATb9HI579)7E#ML)=@i@cTflshq&bzWH!(tA zy%kOBo52ev^i?;iZv;w(N`n_JT%?1SE?j^)a^=Vamley30HwijToVG-6>5q|3S8}r zmmP|uP!S=`4^V9z&v)jXIkS-UC*3=OqT)tp3?qUDS7wTp*N^r>4o7CQolPJ@{iW3p z+@jJk4lAk%_$e7JIuA;#V?NGg^({SX6R5~;SPjLvA$-2ec=HjkVQKFun#Z@maJbAB zq&SOY&GL_w{e5T@#AvqF(zw2PTz=E8xLgF7BA_@Nag%NQID1#y{--oXXx>nyE)DUkWPN0= z8%TCczwED)`A5srd5>JDrT`djA2~ajf1cI%y&ec@;bd{NHL$|SSkbUGnFSJM#RfPB zNCN4mjtHFRkjCNZ9oedohJgK+Sh}rT+LW?WHLq+~Kmg?8;C7Df>USv9(cyf$v_ZYA zO4Gyxb?fUbWsl`uI57HC#(Nv=TGhuUp5>${PUsR-(c+DS_8NhZ=H4jCdI1cnM9uSN zrYNZ0XIaD>FP+v^K3QH|NiHkX`Qoke%i-(->&Mi*g5$MucIMpi$?APF%7v`IVU*O< z>JzpI8eHceU|&b_f0iNAXhYo4B(QUi(n;!AzgBt;JoJCeWMFiX7e{gduxZ@AtC%8# z3qSx60@JHuVM(K#>OIFRuG*lB2IK;qZ4zC)#gqJ7ks6*zIiMPol+LN4km+^^-{IQ?FI&}kgg;&|S%g?*jU$vz!Q;>4x#u+)2BBw-DxJl**%TF8X1xZJ$= z)%{y%nx~UkF1-`1T$VOysJ+qP{PX4S2)d#s+UEIR^e-x0<^T=B8&}q~Z8ep!;VruG zUTN~ox_s`EHWx(Wa(OOZ&6b{0XMaz&r4<*!m#6A8(n{B{#8~bt+WA||{+dKmyFT^d z8y*B^omV9s&5^FM&dteNn&*Ri)+2t~#xsNJk-4v6Al*6*s|(9|+biH_YVO!pY9c`#p;{E1Dy#dN`2F~DX2^8oPuLeHOlT(QY?KZNbpWcq}q5VCQOKw zHD63{MIMqCsQb$xBT#avD=LCT1=ISKB^rNHI4L1MNOfmhG!0j;aybK2>ztVF)Jv5s za-^_mg6V&=^`;dHZ|vbrp%Fh3S3&9|<7ni*n1d%O0Ut9YQglD89c{H${jK9KkX&}e3tP!EL{z^^9G6n zWU>g^AdT3JZY1O#u-zA}dSKHkeGHLNfcRu|hde(@-4o4x&b#^Lf0<>|G}>var*nIL znHeIC9P!BKxaJ2+=e4*X0X)fBr?z^QNs32KS>V9QTTv(>*DrBk!anS6W!9FD^?&Ej zF~xIRQ=sW`fIzB#I+>%j`VF}QkaG0iR{FiubOOrCr+R**{?xO`&|_h@Mi|Jm8aHoV ztb>7;Y$uoj)WY>4w5(Z*t>W>^>-VY0|(Kv0oAMWClGQkYTZkrSi8et$O>_d{=CZ=GS%PSH(Q-2qvHr zYdy`ixj7_Ej|@r0erb{RuHJbonjZ|=MS{g93a7vNmeI>0sMY3aw2w6O;ljrY4{iJj z$SbmxG2mUCv1LDhlsQ{ibM;@NH4xn|WG z4Jw{ZLb>l~r-sN-7dFc^q;6T%zGL;(=LdRqHiJ&{hMdP$S%iWNz#j@e{1+~{ z12PEMh?R`w``t86iWW7k4FQZc2IDq^w|>=Cw(wiUBg?gIuF$mVk^XfY56s->7~Y*) zebacYV?!_21tHD6QL&$P)C`1OsMz`q;FOIeM0n3Rn^ie^O2-V(QuTg$AI8bWc{#S8 zwZ+(f3>QegX(@u5l5lXHal2G0kYMU+;y)v^?O(2=<%}ttl|9Eq;;?b)z`be4_$Eocc|SPoW-7RJ_+A|&$u}5CZp(vwk6P%p z49OVZmLrcN7tPwyZCnj5zl%`pBK?pFJ2xyE{YFnpe`@0`o0PoF^6h6zisr%WSalV0 z39F6rWYT*sx-={pYhm*0QilKu%{jG__;{R7#hFA+T#*yGYC0f88aE94)O<)jMn+1> zgiLYD;E|ssJQdQ&=9`|Az6sUL4W@Yetg!1SdT9OBZrDaY>nix7EnyC?k~6n7+HG1w zaNO9SYcef6h__m}8+#@3L>%4^p#4^AF`4UH<98fB%OA~qYlE8j)|aYOXqAywL`{Ki zFgLdn`o@K+!>Vb}R7T+7_B7mgg^lQ9X0{$-fX$^3s%a5dKw9Rs<-I%}(tB)JrNv4( zW{OTPDEWt(yuCJ=GoIET54mHt=Voi`8pe?hp#$3HgK)lmuK8+hI-%GYa1w20$A7J* zwvpe_?x)0=-c-~*7QL)qNcGN-~4f01qo9W=GNMu@FI3brCM)<&K<7eA6MCm9T|#U4PC+rKBes>ncQr{GY(A5s zBp%^;0I%B|~{ zBx;7z4}3e3mqF3`?xKn(G|`;u&LwbEO6#D60a9->_@$}TAAJ5PcR{3R%TJ*m@>BO= zdHD~`4xdKsFx5Ylczvh-3G60kN^d3A#5+Mj`N&fs>4l%_MV4oj#p!h! ze|xCZ5P61>-^ppaVo~J}Bue7At|-6}Dh>)|MaTjQinT=mLBL$7sk?fCKoKeqIjN;e z_Qng{lUgoMAP@#OoAvqh$RMH9+S&DOLQhAbg8SXlng0MnJKUpuujP0=v86qQ#-h5Q zI!gM%e9GcyLET1FOyUZH?rRKVplGA!EEsnYHppDjat;Mj&>+IaUT}iYL z;FA{SXcute5v92f2sp*w(cTfx78dSHk;;ho5}YY6b;4yq?6-nTj!S)vU?x`_)}152 zhmY{LkiaE>nOxQ7X!bP9RdX zTa^N+MDC*EtSHKClxj6niCj)f7_OuUvTY0wt=1?Rr0kL0QFYF-_N?ij(o-s$K&MIA zZv-^2R%Q20NRmsalQn|i;{4W@g9w(b(vfi#W#6^1d@iyvcCEi+-}0+Sqm{&T-;w4m zZ~K$9vc+?SP#-V{8t1(2B%mXdv^XO*x$)LX-F2NTmPG1IC;{fG<3XcWRNl@JmN1{b z?uhGOG4oEBNl&FiHlbA;Yh`9XMGno+cH6h4E#AJF<{ctGFlkcIzM@)ePmV^(6^nzT zb|KQ-b`{~g#r0Ps>O6PXzOJHtb9j;ljfD0ZHh>0AvQFZP%VjH4xbYQ}<+af{vpaH) z6h19PEI(>i59tNyb&n?Mf@uR|z8t$*AZhe1Ureu{km>W*G}JH_#KPu-h+7_47tcMJ zW|zwcNg@8;ajY6Xo2Eu=#sQOTEx;}M2unr#f)h*A8y%eBwW4EK&5C5klR*Ar*J*sU z<;vY&rwL^<16a(FbuSa;KPu@OZmCBrsl<+saNy}Z;dg9d2Kz0ydQqHH z#!6OhNoF)h);_7Kbs4;iq<=7l&`iiYJT4X{HlD#FXf|Ggu=}Q}ruA(60aao5W36o( z82GD-dK*9)UIo;OCFO;`iHQLm*avVn!-fnI#ky!mlsMi1b_{O-fQO6baeunqNih9a!Jb}b=%g1)02;}H9(DKqtZ2c2k#mD zcC2cf7uWJKJrzw|>2;Lu+{WaWEh#LhupOtl*{w`P`EpYrB>kP8`K`7a*?F1@w^CL3 zB-mdryRSn@CyN_h+>D9jm0$K^PuqBESd20@4g1roFuBpQ?t)Jemf@N*9D>)dlNS{D zTaaB;z~B!cu8bEkr6#gB$qhlqIyrXShO~8`lJ@@qRhs)GrhOcb?KIpE#9VS-BUf-^ z8pFh*{EMqUPSt~htjPcl8?qwD;#PNK=`PiAoF8Oiy@<^b8SJcm7gc3cV$Qwnwc;imgS|Yo$O)o!;cFtYt}wFYmWz`*6k~>_~-4p?KFLU3hG8m z#}1EZUocbMsmCX05MMTuuC9fcV{@bCGqFm8%oWX;USsk7C|#EvCM%yzjPocp?tMQM z!r`@Kc~e+!uco!Zz`c%h96t?~yEcWqWTdikU9{~sokF7Mg?G(HK8lw0NXK#&C*8M- z9H&)Z19+q*xuj^$x+*_b5MY#BIspQm3n~*EMDC>}3Y?*L5Q1Y+L$y&>N{L9lf+;!< zJ?WPzbiD_0Q_TcAmQ9F4RU67tZrp>Z^`$+BZlcPG+N`LOQZ3{?zOAd5H=xzXGj$^1 zfuE=CM>wD+J+gwKQDs*3WkhWpQZx^PD4W4^mqF^s1THw1yOr_ywSh0KI+X}yX$UCE9= z1&JPW!tPz8`lSB=ytZRL9+6V-^oqlk*cmJT0Cfb<;f0NF=~`S~lqmC%{{SZod42~G z>m;a?h;*)6MFr)!yd48hZ~p*n$92B$KQw8{B(MZZqO6q^0ZvhN#V078i2(&gTB3j; z?dl~-+tf+`fhz~=3sI7T%N3px0$Pj|kzeo5>>XJZyjPj?;Pi^mEYX``pIC4-HQWkAxkP&!$K*Kvl#=|F*1uv)-(a_7MuO&kYhH}rnlHfL zlERt!t-2?F4G-XN$Rzn;KM3NYT&$duob1zz!I;2Rf|LrAT$?~Ka)>HHMi>-HQE@8V zpyH}40ErHYuv8lCmE7GilL=K0vFc455Y^7Jl&!}V>NrTHj3Ju!JV~dLn&cl*eU?>3 zohCwh7EAQR8CW10D%rlqa8}=w`6uynSs?vUJ*qFo8(;9&aq{)Jok0`e3Syx&;LT_DqwXH`9+w&cr`z-AYopa9PYd)f0@%lff8x=Z{MXV+-m{4`(PwL-j{ zEaJM6-tC`&eAIOZc38UOM0xW5tNemJ#}jy?qj$R;7JJWWyiw7gb~qwMPJ(qd3F(Y( z*g;=A)?o0eBo_NceIB4=V)0&SqVOhuT_Q;Ku3p?6x#aEoUBs50;Lq}nd7;*zxxYm;gL%ZoQw2;f7{epk-n@RjO&vd$rH!CB!~#EQUdLJi`h=}Hwe4v=cV0W| z2CgVYJq%|tcgP0IoH$2Te%0&szodB=^Q`i;aTup)Mz~~p)FNoL?V6gbk%J3aq%pk8 z+>T36%bJ&u16WUJF4Sp+@VIv@eKMicI+IvVSF%8lS1(L+HtJU^Y~TkVp$YMw8SfTO zk%Po3p|ELBNuPCz^bE8C4t;dLOR|V;f;hk#A%ZjY$$j&9C~2-YzXe*iym(ipizG?y zypQH6^yf(Wx3)N#$H-n?YhV|!18a@DD|)Nz9g)=O6jYSXH5!XsD_Hv+F+7|E;W;_O zwTF`Mx_irEp3brGpP3iKW}A_H;B4<&UslbJQ1fgqa~~kn=^%DF0k?}ZO$=*qv_%X} zZO7Ura`wF!v6`gK>^9*;>TSLeeYVXeW7|x|4{{oLKzRx>TvsISp)~|`F*@xA;>AP7 zPo|pSr^8(GHsu~uF!f(2Q&h(J1Yo!|*>~A^O06EY8q*(y?v;i`eQ3xCw=@-Ak4rhm zV{PwqmerK{@s&6_I^R&0wWe8ToBF<|s-%`!+2Ojjxl)7(Lc&VVx)Z%VXbNa_| zMWE(B@!V4?-AEN&xP?TOZRrjv7mfoP(HsKh&FGE`kYwsb{{WH#UXe$WV0bL~4``*5 z?bLx(-jG~M?XDbBEIt!x+re>mYBBT&0J{0AR5Eob+faIm_x(uC=o;dDcXb2~--uBn z%9Sp^p%$3Z`qFnLX-;^=`MtM(8$= z5hQKBHn4_iRP7tw_k2i5DY~a(l8v9~-0^mQnjHys0dGL@L{SdVLpP&t^j@Cc(umYX zty8dQl>?hV_M0pnLHH>32VUR9J-@E|EzMBi(wfh`D?Na(J*krIHle%bl9nO+QUb2y zq9!|@n@^9z)9GW`f}R%p7y@C~qy`q-kO5KHfsiVWO3I6p0-RI}l$=t^Dh1t9DvOj! zMgWVpl|^wXkO-$QGv|+Wsd-+c-wayJEpg-|1-~`vo@~;vJoiNUy#k@}XEiILt$=JM zN_U5VJIDxODjunW3}A9WY7fNxJe2f3QVnNEqOXO)&8Vl2Q_Ze$?X>r`*9U@CVFpr> zF{#uI?K$1nO{?s}^S;Wcg`;;BT7e=5a7ZP~Jyl%U+Wh|jB!M8qwL-dlB!7*`4W8{< zw!$&sl(Obl)eM)VhwwM#y!6ucmdyjfrHAl0Ou> z4463~H5b|Af+QPlKv+CvX4VH5%A|ET9>!|8imqKv+ra-K- zB`35$NvmPp{Jo1F!pu&No5jWXJS};4GNqtAjYYe2YnN&*B1d1nM?p5!;oX_}E=Ye< z?s``_vi|_`dK^aRid(yGX<`6#4he#4dKTiSk&y#@IE}G>5wxzQL(EZmJlEC*eKeZ1 z6p???sqF0T3#8B55Z+&FYBdT*{{X1Zng`AIZ<;G+ZH#j{vNH(R zoH(Q5f4T`el{yz!lvyl2b4(Si2Wy4%`Xf9Cy3EIjakj;+<|ab{e7x>Gi^|v4HSWsUt|>Qa>#a{xFo$Bw&Klys zgVX;2If||RCLyyTyf>ZoA5GWgk~8kxX65&W)Ih0x5(hRzYvvYSrVQ?Mr1p!P>+@aR zAE`E!8NuKBL-kya+|-mZ2gcm0?u@k_WW6$IWR1kq7oXNC%_ROX z%e{IJ6{H25<``Qf4FkNUBS*{Cew9xF+C{FkW;T)Q&G|;G!9Rfgn7b>{%5g|a0fIl) zw~D9FWz}7aIUu;ZG}s|Cyv-$%R50Of(C|cjeDg=a9fCq=E`_Z)nsUU+8~V*5dtV~y z@xdiUP-|M^>^E(*XaTM37G53@c5}M+9#PgPAk&*pXTc+E3t!T8g{}_pxqGqA$TM!c z;z;$zrz3PMwJk3wVNxV-W&3%qZ5)%j{wM>vWSOGeccl(#)YT&kvsDI*1AsC}_j4Dp z_-2b`Ed&j-SO*e+GMGz|BBi-l{1n{jB!?Bl8@BJuE*ny7plj->DxM4p#BhvTw%HjbDdZwr#xd+`WDmp%BX|JV9`<6*jLMd@d)hEVo2CIy{ASBf^1 zYuF>1D0XS4LtK!n={K!sRX0_wd8VK-(vabfre5D?4f_lm@0H0%)jPJ{1yv=4uI!%E zdRwyGl({suzy>{Pf-+?hRuD1_7+&Hw_h-2kORv$)(c@yIb?jR=X2+HGCROK?TYbnk z(y_z|8p7NpyRtJ-^+t~+;uBo1O-8+v3$e{`+i-3^2~~X&#brfGAgD7%H1Q3Af!&J^ zF#?Fp*O5ni~S;*9B2ko(xXjfU7s1 z{87t3r2E%?B7&{dTB28CR7zPi3)A*J5K0?PJ(qD$B{XFs$xxL0r~VW2R--YrQho<~ zk`!uY6;~^XRH)Zt?whR}7+LpYVE&3rb8|;B6E(MDta#;N@CsiDmrzFJ5xQ`^jqf1@ zHv19!Yi{;&2|}WfZo2F=l(;bQ~Qrw_T(XbR$O0tC2wT(AKrSz>rp_H)V4Dla) zIGuY8x!C{^>nJ&SI`6K3B2rJ``h1Tcd9>1&`&8ii;qnDb0UE<2?tC%}b)XMQ;-X5+ z(|qUUZDvCq2Cqd@`(FcOv-*R8zfoEuK=$Hk4jjf|0A1G-ql_xX5C=HiB+k(G7Pwpy zVmX7kG`T^@2C`v!F0A=$TdMNKLBmf_`71P;Tyk?Y#X-VV;(mxMLpFol)I}j(0^y0ASz{I-7%}usa%31>!e#8M6s7&DlxRDn-IWYDOKY z%3O{#2%db_?wun|6hDA3Ej-1~veBT{Lm_Nn^eogycws2z9)YrClTj$KD>=HUam!+= zir^S~Kr3@0Ly*rH8*S@TWBW%nl-lr4)cES}Y|>L6gI+mW2GrQ*VcxRaQE;0K{pys& z;%GhX=9Fa#24Hun8FR@R(3C?XlwCAT7JO3nh0zUt(~HX+K_W}6tzxi6UxWQNA8poy z9cAYH(NbHi+V=Mhx9d48uS{DNrI{ME(?sG4DRuhIEh})`Vj}z&7}TEMws@?KW}K~D5fxSQGZSMrN`kVqycamw8>?u$$vgKZv5f^zptCH)q~~wxasAd2^<>An3~-72?oz@%b_Y?QBPQ0c4iKD8&!VdR^!1g z-llri%*u#;$qqPOu!E4=P>+NLZaFUzs!gfU)jpz*w8Ig0JQ$pBi)(G&y>%a(HF`iT zQ6!&_G4WITS!29wmo1J+*2=i(G0OtU;e_C3sPW~HDg`k2wN4U5*_I51E zyvM6+5{atkbdDFZMoe#TYSV(7N|<^)u?z%m8~unXuP?fT-b72`+;*O|v~K!-$0N0e zO~x_O>M-IXwG$1qzAQVI@742fuF{{n@6BfDVkV8(*m~CImSTpF%n(f8jIXg9nr=RK z_$Z7203=_TO}~w=Bj`{&v(!FGdV{+yR{`wx50ZwUdo5kjp~L>G{DHmQrtu3x19;lA zUVB5SA5QOf)R}UjD$JuDNM`L%taalEc+O!V!iG_plPft6>984!@+_E)&8^EPzFY_cV8GWY~*nAeBPwQ#Cvl9NJ+W!FYT*SRtjz9jE zf8!_WKJ)51Y%eyI>?nD*o*i5j8?F1$OdWrrV@kyr6oN-OnKe9f^JwAL;;VG~| zF^fDDL~;}}9x);!%^)8fy`~MB{z^u7Fg2maGDYueb)_^0yC2<36+oNiVeD9TmnN2( zwWoQmw@tvuxX%b7p{Jj7ze@DFN1rsQ#u;2Gf*Yabz~^Rw|~Mt#^p)I0MTfFS6X1&*SqZ zqoC8rEjyUtN4$ysre(8c&igG|PplR=MdL?+$kvcsn{Iav{1Dhw^wko`);|cnntDdf zy|TIGeX9#ou9dn~MMI;4Sz(3qJ~q1>8zexyTVsoE1)QNKzS(1Ol2=M@;scHl)}4*B zY%<+-=RC=;_;eN0Mm!SOrfF%AV3nR3vzu)5*cx+F$w^^k#_~fv<>zD^(YW6xjChYQGO`o@0EhnoFwj!{RmACLhMd!C z^V7)ZI?cyY@c8askj63{>*(*9t(;*Y#!GCm8!`iM>ayAv^A(aE&P<&jMAo2S^n(cH zV&@#V46oj_p=_;~K=~pP;qg2?V9A2^#fNSi@7RcB_@wO|@Zf86!_CRCQqwkfRd_6H z?_*gad!|8cEFP1 z+Bw}#$fRAlwkkjybR5sl$hmX9kdw!UHJqr;VkPGyknk)dk=DbQ19?-LO~WqqEl+4+ z#gfgk;<83+qbaqC7ykemE_F+TC&tf|z?(6Ba2sc&-Z`;l68 zWo8TbuMKF}Y(e-Sgv>eM3U037Dc_AleYQJ-hgW)|enXcIJ?NYy5@&1=v7HyHl-(~Kkb?rd62a5K6 zA(zE3-7jKaQ!j>k$pLd50mGKwOCwTq#UrJU6m4s;Hr**7y#5uAe9fy;n@UX4bKh($HPcde}_uiUEWc&fyKPsF;{IXPsoF~-4W zRKMQ4iafgOGNIR%+)4_yQ?96$8?H$j2WiK!>XOo(&N{O<8at%_#%EmU3MzjzKbFr#ypgb1Q&UNPxN%i8`mT6{_1jmp(-2Oj^whW;-E@kh`6sA_2Vyx z@n_l!2o<_rc)!G$q45b=gAcIGz*j! zOT(R#3Ck^&GB$4(H@%J)9fvuN>MS6yv3*qef>=DQ2&|}Lb&o?gVFj+PVl(Pb&U;k`IKB$SccU~;G7s~Hw!P!K3k;Mpl&i@sD${G)lv3m zf%`#Y?R~pju#AzdpS>_U5LB9CYX%J;oi+aeD!MvfG1I$Iqi79gsYb)%wWA^Fa9EtT z$sPOFy(gITU3mDKZFMzR%n=-n#1A6UZ}3f$DCKDnXgA2*EFD+s??Ke1H4KT=br-*n zO7~OAeaGFFgPwVymbpD;eEpz1k{cn(Rb-f*d!+c`qiUHl;D=2=YrSUw05*FoCT2XF zD}}n#-zYC$dTNj3Doyk8pYu^U;{_#`gVs{&?ErSHr&+9rum+vu*u2Xs$Y{>pX|dK5 zYn!S0q(6-|Z^c+~4DK>o6!lqcUV5IRJ71DMo}`OrsuvigZ6kUr!GkpTC%DOC$5Jqi z(R6iVVK!GAoSC?qqFj{;ZczR+18<7zn>cc;i%45#%pw^o&TPWr6CAfTRR_pP2ewdJ zypNlzZLWd;0Ncm?XD>|2EWBr&X8!=k-TlO0HznAY`2OKyKja|k@nv+1mtq}BO_u6X zykY8G`N_VVFiA3f)CFB0CLh{OYwm?>yGgBbLkMfja~Tn_6!|?O5I}3N zj@UQEJJxu4OoFM=J1vK$-iJY|kgSrP783Z3hwj?tV^-DFNb5mF@ZY>yv3iIqDWn!H z0o=~?x~j8`v_{(w;IeSRX`WvOqfpE&fx`Rnn+~-)Y7<8G%{lkYA!K{$oKt&+&EUY7L1C(NMOdQ}VkXgt^el z=$&DFnidsZE7>6qX>GaWek#40(Qg_sX1S&*ZZ;HcEmNHgk;$7KmV1&aW61LfP)g<; zB_wBZBqyZQG}^HEnkA=@-KD)D+;?QQH2SAyi+z?8e}wSvR4vvPTby4@d)I z2QZnNSUu;L^>L_C#t*Fkhi7}?u*KJOMxyI6i^=&jRFU;j*`673!)IrgRhOyMU)X6~ zoq6iF_&IM|(Rq1-x&5KVXhPx`-U}$dw6!92<7eRtokc1YTK@oY)PU@QZGWj9NM^fY z2O1hsGsLHAV1!_G{^?4iaqwE4<7Pkbl+{e-;xX$q_PRny&137@Sdyntj|M2&QRB!y z^B6XT=<2hPX8C|&h9{1fL~!1_y6Iwu9f{4=tYqDK4HAFgd^CrW zRpRuLEUFV`b)U&>$k6EA4$BpbNtAtmp&0?wWdN{ z?+!DSvKHpGsN5uY3m&E3gR9QMY{K{0?#zFStUE8mxpr4CfVWgWrYgwU8;e;4gR<8V za5mfpol9C{qq7b0THqa*8~26v@2X7`(EOPcwGhT1s7*angPGfOPqSmii*uSX8!V61 zJcs2PhPxJxMR%9E#fMD=rbrm%7zc}G}52qc(+r$9u%JyDfKdFy7R8Ya<{{Swf!d=;tq+bp4 zH)wFZw;L&N_BaPLu;g(Akp*w* ze^Y*Ie2tQMwE9@%_i@Pii`y_F*qGSCb-T^4xq8hD>Z8i~RBiO%7gn5$$Ypz(PmBFH zqafIBE_M5pP!z6yPnBvlG#X>;sWfKANes`YmUcmJ3Q1t-NE@3&nYqoeUbD-ydFx%% zAbyZ5DjpmJkH#AlNZ%yV>^;D*ppPzja*i(wmq^p0X7Ji*jrZnU((j5Uo~9Z&1Q9_L zQMWuSV_4hsvp^T`RFsiA;>-^r`FGBmHl<%z<_h|Hh^L8-hMp;1F=T<)E^#9Ou*vf(`HQc2 zU2jkl>NxGQ11x%$di6XS{AwkQ;@dSNppbSvux90n3@Hys!3Sf`6QR`N_*aJlaOF*>R3vz9fx`u z#Jjmfgq)bt3h~>xg$GnTdFbiicQoKX1qW3+{ity8N~*48P`XE1tpM>p z)J1X}fNPUBC)3i0UD&)w4nuN3Ntl^n2Rk?~JL5jCt<$*Q5zB_XC%JOp$U`VIGYBr7NV@KGg2w(11&j2NhRT=P2Sr zHL48Kf3!K@b}0|R3nQ;7;!#>@0vfmSh!1H&8<&!kAY(IdTG~ME0oR@T(ZV{TF8tDJ`*l-aJX4YFcps7@%3~MviGKc| z<_;0Y#{AKbP%L29Dh^XB4pp#_r*B^^{{Z3^yuPPI(^_wZlhrr_u)7Nzao=Zq7u4Lc zJi+AOE_sp6ms6pRqDaOTNpjb;a=1GM?_>=8;aek^3w@|E4f9`F^XJvam%Oz>5Tk46 znyRa%J{ca^+B0K&BP;hT-FxcRnv#m2Q*VUkO31_D5ZeQY>0;uu`J<_?V@UPn)PMf~ zR6c4>V~F{$q-i(wdUNy{@(Uhct2rt!JaTw!Bv=8kkJ7ysouYCxX#3S`|F^Lm7?LOD;%vN=aEc8X%!E^DdJ068`{r z{{ZV1y^kg4!~OJ!{YU=*SgDawRSk|?v?j+cXhu59ZE-eBahkN%Cn0sCPd-6n(>les zS~Sw+oLETHZ2g_8aa9q$04g$E%8NNYT#(jH*eh<(I3ig#SuAQ=fs!^lfn{-#E+NgE zFpPq0H#L&Uu0k>ig{-uUh-lGdt+nsoyic2I0rL#m?1~ZaC-Yvf#j{R(itzqsmlnNI zpV^P(ru@Aha=P*LvzsN<{7bb2$?TljGFar>1;1y+nV+OWCH!{#<8lmha4SK-6-~nRFdW ziZ2#MHMw62=_tcI6%D~JtsO$h`u$d}wpnpL;JdOpA?Y-C*2)R9H{!SE6hBn3TDPSG zPe9ikP1(r@H9if?)IsG+m$v>ew;$53Zzj3JIY24CyL4h#QrVcESgxj~bo4Vn1158% zjk~cPmA(i+GU;xol$&fePxBX+>h#u$M{S_w=43^^sybf1{KNP?MhCXwHItFF?rXC4 z8>nka!L^yx^U_4?jIv0QkY3;rR=o6rmZC~m97EPI-Oj#!X)mS$+`?QP8dQ;;1Ir!} z&BIY12O*`V@a89WrNx?qx>OsvyxOUx&GiG zn*4%odG`aU4H>uy94Kp1JKO~KxOi^hg`zfuC!6?gu)W2z1dRFHKVl+fbHobbUh+=m zpXLjAG4WdTV9Rb+1WXn=<_wOR zX|8z$0>x}-GBxMH-)oNa1$LQLM)8TJ*h9Uj-z*GOtUpBj!fNJ8cG?erLEUQ}Zp6;R z&23a$#*u?1l`T8GcjdiJ$Uj8ynq3SDa*;Jn{*&GuuQWYP5H zr&N;<)3pfAL_{3>Ha~a^l7ZE2RM)mQpA_M2X5Va(&^pv%;YsT@)mfo;b# zxAZLk0Om%bq3~ZOvCnZWvs%XS+D_!^x+cCSR03M&v^Nt#IFJ++u~W%CL!_EVJ(wCE z;2KN2l(^xO@<`aFl3A$YkeJ#Xn?>(|n}@SB-o#@TELgTBM#9QiK@4eZdj~`*V{RMV zUmyXTh7;P7<)ab8FTfz5sy;&@7i&qv z%ws!^_U=kj5pudf$2bcIWuEW*l6@}|EwFT3XJQVv(hnpxO-#=-MJw@alPrU`Y)Fmg z#cob$wVKxS2jb*;D7fB(PQQ=t66fZQl8cU3y^~b~rk<&TFxNAA-mv`DStGm%OIiegy5qTS5(9Rx zKjya&9*l3anRi`OYED=+80hy|ZRqCJELe1|$SsW?PvRM#<)*~VydH)(-93-85+{V_ zy5R`L)rEE>4IF4ZI`_@k}HggXxN~AshijH zPsD_xckt{_4=R&6=O0@ci4Um1+u>G6@wu6P?bjaw zlO~6%&E|({6%=x|Gh5=~7JZ!M^VG&1w^u z%;cJ?M@8GV{{Xxz>&g1ot?2di8iWy%Ac54l<%*%KV_Zz2j=^h*wr2NU znvupTXoc|>v5hC0Yd|+gNlYB)0p140HAMxab^~Fy$;l?T1ujWLS9bxv>qZxLLd%Mk z%{=T;w68nTio+baL5xiMDiLf``zq5RnGcrfE`DmB^#WEqn<3cyKjV_~j6U*D#{U58 z6}(-SwR!eC-aqlX=2Di!h*KTeOpWObb%K*$nwBv5B-o9DTp?w_Cd6!V3?Tr+n=m1B zA999{P@2bt(2e zxJ}cjWz_10J3Bp>InTCC4c>DdNMndDBe!yFLPWYF!mO@I>5$M(*N5!jqiO+^}8>++sLpwwyOCDPbu zw4!Oe&ep#KEPK0}+x4jF15cDR%>y5^YTc)wOQvY>sp4<3LXvK!+=9gpq^#8{8p}ZF z+V8)S{{V+tBGbp(mR$K_LR9&TMhI>m>nz%zFiQLOkHuVWCApNWYknLCYn2R^2LM4f zH_y8z;&m-g(5heAF0+a3gS&Q1X17w7P;TSG>mH^y%I7$ZrA66lY}VDCI5G9_JJcA9 zX(-*}W;|IM>+H)r6h?{WnmtC0dT8`Y!I2lss@Wz~>Cil4F|_{6e?kgv9y6Rx49`0S z2Ju+O2FqDRP~Mf@)`pfg$1R$!mNFOx^8(6JL;#&YV!uXtl$Q*zddIZBkq~_LfL1RJh;*Thr~bXmw&+$G)*%I4T2$Um_Z+!?NEFkRPQXFa;6FD z8G>2xhlL5sog$NGl}_Af}<5esbukB5K+b%Xj>e;mu=Fipz9#%;qeo`Qa6 zsAbCyCY?P6DSPC7K$=IJ5F(BSVs*u`#7-972Pc{|$z+m_CdkohG`@Jn~lVG^zgpB9eNM3W8ltBff(3b%WkHdg!R(^a-PmPQ+w*ek7jqKkq%; zJ8RAAw1zne1A67Z*nVfD^!P>FTq6#$D_o#NMoO56(jPUiQ5GOV6Vd!B87I?fn08;e zt=_V6X!-?LmS_bNSvy$UM;}XmR?FD1E2y;U8DAYUdq4nPxqC|b7@wfUJX5)o#S>oF zJY#Fz3#`X7?YmC=*O}H^<5I%+M#f58c>^|F+a;{-vb-){I8nLiV3g`b8}MWvJAt;4F_%rh81TX&B|H7#aqDWCBUvynH%k zIA%Goh>>6pd7EF_avRs$e3#|eyszboSQ!&zsF{tZtjo9JJEI-$dn|3T=1T5|I6TR^ zhU#Kz?s2_>E1lR+R@?Gf?gpjS4s#mN3AoO~ow=RJ%89&C_~eQ}2)Mzt_4g&g1Y%Q$ zC%1E*BewJ66_~klT-6>GD`07R%eS4pa@pL2k*p2z90@Hwp2@dRO*Y}%w~2ZgO$eqX%f{oouU>w_^oyuTII&pzUJ>x z>NwGpuEOCbffQ|V>|%R~zIXosf(h_cx~-B4p=&?@wl`knZ4U=!wvDQ)s$A!?7IvGC z#m(OR>2T1Owek@5NKSa2w%GzV3-19%*XcFuMY-hApuV=#CSI~>ZLb~SX$!t zUN=E(<5_{Py{z%aCF|rRtrz4JS={Kj2`RIR8nOQF!)kP-;wW0D1ny8EvI8~llOuyd zZ0`V)gqh{*o^09);8e3Xl&3l8a$yR<7E)5+8NtUT0v&s1p8k@70HpFftO~Iz#3ZR$ z1GdYaNZDs~*&xW78zgzy+nRx?i|t6^@=r6oeS((n3Qt)G;sCm61@<4CCuahp;V&#* z;Hf`PrhV{ToRU2bXvqzjb?rb5IP$n5dG9w#{Bb$8rh>Mp~j+wVxh@d}_R zK2ecfCxu5_C*7rxez3#yUWZ1H!O^H0_i3RY(F@G#=Gtv-zTbfu?=P2P^;Dw;EKz># zjBl}_D7?*+Bc`jTr)xyAvMg7_jlq|;+h1y?R6{7UWR2sw%|Dv*zge#FDl~|BdPJW9 z-_o#{B#)sijo{}G%~B##K8|35O|iu8EN{-qPSdCl`7CMX+qdLG@GR8zSRL-`>RbC{ z{O+G0n>@z|u&y!>rp7ztWW}uZf6L-!{5dKk?rfxk?Ye=bsT~^GZOQCo{x)7!o`2Tj zuzVD=bIHsn*^(kJtfvoOrQ4Z~P)}9TXliEj=&BrU;YBy&fCq56Oa5PHTh4 zJ8VJ;`evLj+Nyxs-L@Z?3Jmkkm9x@7&vwFykIhvr)#P z=PC|Zo8X*}_io@;1P{BN_>awPO#4Ls2js9IcWbQt(qmK&X~{?wwjR#acR!IaRn^YL6P#M@;h>(qpEa|l z)6JyOx~{3%>StyaMs~u(w4Bav%PVDN3Mxu^4L+I}KEz`g8{Bq~x}a87KNp7gUvL>H zahI9AFUQEwk*)C@_ZycIP0%%Ah35{%t3HRVOnVfPb=V~T0NG7-)`f{{Vi+hLTd`JKb|-^oTr{<)=&P%ao?_H<#~|Ff0DwbKF0=SpFCP0Di~K z3klqPg)%J~zHZcX(>c8SH(TVslSQY$L~C8>bhS~7;(Q~*m|J1OTy8ic>Ge8lo4O}r zwc6<2liT>3?A0%)d80q?7Zojk_WuCj`Gq%?HJNPY)3^t7$sbGP9d4p#Hc;4NBJO!3 zEcRJ@x#={Tl1r)D*PW6umN*{lIVE~GgxkeiK0~87ykX<^fSsTyb|s!LVo`lt z>9kUKvrp-KnCAwwE={%Wy&W?tsGDX$%0PFqtz#X-2nafDQx!|8CSzfgHLvi?gV`%w zGIVdZOPfYv7F~}&X7#js@QOw<>~=0^RJdcqqL5@-@!e$ykkV{i-n@|V#0f5Gb07DZ zZJ&79deHuxR}=78M$pUIl@SBsIBpU*Y);_@>2EGhU&m zjyYvyQ8sBkWQ=GJla?!Ovhf_W`jw%uW9R4~2dlslqr^9vPK`8y9;QLR2^>G6lOMc}^} zJ4xKLwFRlh7JhM{IMW=$k zSKM~IE4Kan*K&>$<738)Z?xWb6!v@b#Y{Cxk7YqFV?W@f;gGoQ)t7pV#q}Mg= zr*UHo-FDfdZq!>vhHDqi+OoL$q1w~*#1Df&LrBJNZs_Fr|T`iltCgu4rMXPF&Q)%;6HNyB@*z?S| ziwk#iUP>tow(Y&o6Q9jpxZ{#1L}*hx=C!nUd-1@mYIkO}bccKHrA&i<2jme+tkhm)5rH=jC>HRIU+TK{81c$Z?FJ{Zp5clW*0`=xOF3SR{=>2cjLdXT?s+c zYJRlt!~>X+{{Tl1=B_j)^3jv2SBYBH=2&Eq0=>IZrH=R5QBH2ur?EVbEDlIo zM#QfIR-VL?H$BdG=|(w=pohee1C_H&(}C(>xV0Gi6;h5iKQ$(6I>kG^WN)$iZ3SsM zjyD64Lrp}+-?BZKrKJ@cj|8c_>#5Tlx|*I(!b=b47L7mFSg;iuY_0E(R=eh~-%J77 z@JW3+zCjugzD9b;{;{-Y4%}679!4~G6@0&qh+@8-j35W-A=k;9W@NKVn$JU;7UkTz9Lfj5;<6 z+fhmdf$;lH7T5q~fd`$C`rL$j7(5n}^-rwS_Vp^7O*TF8hj`TozRm28wY5$4SLIqz z4MkObQ5$Ub#?#a`FGJ{&NL*nKF52%6;r7ez0eg4dE!tH<&9af)K~LjR*#p0*!k^1} z4vN>!OQb=mLKeDs6A13MAti{f>HB2D3rWY8h8-0fD%!9Dv1RaRSG(vN2RfmadzyYz-mW|Od zp}!=lbt$43sH6@3jlE$`n@!iWk>I?;sKho-@?ffMw>S_O{y8bsR59$sQybyfJ>WQQ zcVA+pxO^ct{onI5!4ss45!{s~aJO-m1Bnn+%6@Ww`7+ zmLdYG${GnfQQ&uYLdmDnPZQvLYEcGi(`n)&e3^`9$-wwd#4x_KmcPB*HfZto16nc27kakrU8(^s~sn4u(%mLRpw zBWQC@V1+JCIuesTqg_F3($zW(?P$G^w(F$SC#P+UaMeX?UMFFf^>Eny7m{G8%;r6* zvAU>di!brspUvbN4@m@&%HuYjEN%>Qni?qjt$M2%)p)A=YUCyWt?B9j5D=xcDtDd9M=*Oc~Gd{;=I-ozwm~Ou3TP33oqXd!R61CR8ScS9s zA*(Cv*a)PYHcnL#hr_pXnp#gQA7qS zivIvJ`GzNkPouJ?BYV1*qCCIZ&KZ0034iw1frt2rH61?RW}=cn&ZkCXZ??#CLrJ`w za#K^(M}k<|@w($SpS;txt{a>)2~pG3Kx}IrG=F4I+L689E$-pV72V#4q1W`yK-!+J z30-TJwc`-tHlklUEw<>?PL_1FPNr*~Vl%O}F7VH3qj|tjYpGo}r$t`ObnXro?KHHt z?Cz0{&8N8!HAIXa?hh1OZ}`iC9iCv*rk}y6fWuk-y9{za@IURPpEABS%@G|V=^ z#5Tv_WzMBHyCjx9_-;#2Q;>$RHsyeW%|rRD!xzBcKb4bm;1#CW`iiQGGm1zZJVR^Y zyPn?nM{LJ?Nj=JVwA%Iuhq7k<=HBf;2Q|Z%NC#@uy`&t$$n0tPgX~$=n%1kP%IV{! zWo0X!dWSK9Rnv@vU)i~yHjCe5+B=l|iLv}l^bKR6Hj1J0H||K%OL#wN>$)Ne)-$Fs zm5EEQKCYdUQ%M}rxw~7Qd=1>vH?-yxW38!?-KJ&2$?6ic@<+|HHlma^FmYXY3W9D| zs`D(DbB+Gb2!VcIKY6@xwhF)l}6Ih$?8PC3A>q5;d2S`EAs_pGVXPkpV@EZ$C>N(IP0b4aasOi@<*KO0IHzUB(8GP=beMdx<;^=tj961sv`BZpXLhA~t+mC*#P=a9Yo5xSn$TEo!amU6*KXDM* z94^~}Hd@?P9=GN#Cr^#CRa8e6+!`EC!hZ1^2YTw8bXizD*4p&S5YeMqtPaa@2#oK~Vt!FuAI+aw&O* z#mVGHNfVv6V$g=GiWq5UadeW}ClhAhcO>UJA zJMKC2OQu`CZSusC__~!d%i{+e>~~4Hjj=bIdGA;dID)i|45pc(PFA*%ec|ExtZH}U zwX|nkxTk?^ZNmMCsNIqV7i>jDXaajC*YMwmQZLY1F- zX#RIVhok(t+tz6Z-E*J%ttz6%=xXY3#ZcqT$L5qWT~_RJpT?;t%#7G|18(N4)?Im{ zZzzZKoZJ6pZ@^eNYTN7H{)b}pXjvoqDdSJoDXK(5mWgV zxK`BEvZfgX(>`F*3kz`@*qd09%__6Y`n-7xe0eloj%&VOnzUlpOu4|3?r<3rlv`z~ zHk$b21Hhg)eXFqN)_|HK*IQ>4#exhODmGN}hh}MJS0o#fa}uOtmyA_k5&%P*pu>gd zJ10m28cvZ;Jb-jkj5fpysY6~eD>%yLY$|bA$k%qW2xuZb(dU|5;R%hZic_}7lCqN+ zGoS-IB|a^=BM3`;cSsEgVVb4*A!FuuDB50r0W|qf%{r0(8%eHvLFpKa8|`(1`rCAh zI1V~Uqob4W!p5@w&Hz}2W=gni<3?m?Ileobxt48Gy74!Cxona11NA-U8pxQ^+B$Ze z$dY!K4k5QaNctbBYSy$mt6G7F?KU`G=JyOvBjmo16!6U3#`e3oZ5&3`FsXImpCbL$ z&m-rwzf)9{)BgbC8l6dqoQx0v%W^MqHa$ynhw7uruok`W)z(`v;>&o?5Wqfb)1Iyt z%h;l6*fWjKg#81N>Ng6Nn@RXZ`-#8Q1xMw-Ea|rNZ6=00W)FhbxOd(7k#C9#Tk4wI z3pBC585|tT5!}1q$9wp546S~{xL=~WI{I@UiT>tSbYqZ#i}0O}>Q50{B6Uhzu>jZ6 zUluC3H#`f}N8I-R0GQGDhJ4ebpTrjqX2H7;z}Yj>Bfm+$-b$T3PB$f|;-3f_$}D)& z3yTqbpn|wY@wM0PNT_TQ3`}u_`=pHFaQu$~fo8P$s>Ky!?Dq@P&H-yG2i+rMWp44m za$2G^xHYiZb|@TH9xF0|4t8q?ki7JAP~X=|F{b8uVIY24x7Hie=5nuzVi?N-eyhBJ_L z``nf?#$f5%uBBP2e=VMAAd(C&WUT-)t_NblF5aHjy_e>5UXuxqF|f3Zj!p$of&*aY zL~L@q8*$zZt`B&zQ967L1j97Z=YI?v$8)rRPD-MZ$vHEtbV_QMnIUtjXBjqFYz?_N zq!u_Iq|$_C9;@PrYH35y*{P)$YySWf4V=e+MQVXl47g;4&SBXjGb4+CLTr@6*o1@* z{F2a$`D9`W9Y!O63D366jKbz=f>K=iD?IIUY{z^SwhZuzZ}Cg1!KXxOXAW!wC(6Jr z7kj|_J?dOAQn@c9GHn{Q*6<9GeHkNctUn@`hg74LtT0g3xZkkXI-WoP&caMhC%WT` z*pjL!n|R+8$ClEwYp7qW5-|7=Z)z!`yB`d#&5{Q*=vH~ZNuwrOyxVs1YrDs%6sQBg4(qnJX znmk9UPYirkjI{GNGTXPiH5ddW^xD(O#|yty*~8eAL(U9Jnz{#^RhoR1?j0iPSKp71 z03zK;y+2o`p3kSNi!ZV#63Of(YnTV$2dngiHBObL)b^%=nrH6D#$p!aTVvF6?na01 z$MP>?P1PJWJRU6yGD%NVJ^V3FKI_=h4^@&~q0Y9|Z)qlc=h|mc z3Gl}go8m@ea6jXkX_S#S7{~|~btTxVnsmaMj zaums&#*9wIZLYPkciVTh535(mDV!T4TEfx}P7coE*BMx;5Y}ywjwa|0c48^4AA5t# zWtT*!!~7i9VVOsb(d0Eb@yc`FhGFe0s(vX-sN60W9mkm-owcS5@HwCwi0}8?mpk$NO;hJ70wm$uXM~{{U9@E}Wc758OpQrKi$N%4zi4JvK*rzv1B& z^IP{)wV|Z_7h!jDJ=IqK045*(rhbH(89_WwilLE8$8?(`Lz;b~;xkExl9oGq6>(#K zbn+@89l@_5?do|)9>6+RQOessmYnx^5s!#yD>`iwn3qXIhMC7R6WQX^y!|GZ2Q(j| z;bo~7Hk>jxo;NsTV>vG!6-YffHyao6)KvthrR zm|Zh*G`WS6GDa5WFK{k7SevPN!FJMeM@;NdgHufUtU<4n4NUN}Ww5o8*kQIp_wOjW zFFNTuTrty%8EIO^*^SNY1HJkDO3is!jGe-zV5CPfRClAFR9nWF>*1gyBHO*M$tJ1rc zd`;G;ZYEI4C-+5?+lY$hw~MTkt7-IFtv7{GBbMH+ z*B&Dd>?iWrt`D8anmk-m0JZd2j^2BbE%Jt&AU+(@w+`GdX}24Uk@{znuomE~#Mfp< z8|B>LVRfQLPDEmqBBGaK(xvi^9Bs4TE6+0PAAn4#&@}eM{11{j8Zzx-1dYU5Jr5Zr zvnsMd=>Gt=)E+d=NKFGH#@7ayXUPm6EwxU?#)HGkWDdNo;t=h6Wi!E@oKQ1ly>NO_ zdhAJ~Css5)1E@dvgTNwj+pTBT$W2rGtePBA*p{!A4fSB}-P6wQvMd%ih_*bRob}Ru z+H5!cPFAQn@Qn#Gef#dLX5gUbc9f&(1uCW{j-AtL8&q~GiT?l}`>h|ECr#qBJIBOqSfM5aHKkB{!IKYQPK?)X%>;15dHmvLrdL&A47Uw1PdM zGD{}n+eagx%_3SVS%D9NCyn=DO(X)k=*tGVGpanZqRo5L;rs-&pr-Ohp6^#ZM0{-E zw+oa>x-!9|%}wNLk?LyVe=Kb+LqE!zyg023CGU;Fdh8@BBdHpR#!m@Vq-t>v-5le* zGXDT2pENQx)_We_;?h4Q^rA1m=@U}Z&2c1AJbfk)(3+MQX+CUe-0atSH`22A(|L17 zn-O&+ZNA(WY67RpT2$A-`iOWqIDAvrk!fd+@PIi#HFv3I$qL@8`o547Y3ur*ALbBJ z`8&)I`m+S~h6M$F8O~c?0jlc7ivze&kI)rv+>*o}KvPeqqDn z2;4#14q<3skoPj3=I&s>Ff4tcM7suYN^;!b9ylF`;)V2rd8y)ahG-2aoI12loYT}d zz?bZ2c;y_}x*4%Kr=s9(Bu?EVZW*=BIr1u2oMnKO zCPg#<0LyUdQ_CI@1rDWC9PiVsMkW=mP#6~hVpz;3@S z%0`{BIAMdt;QJBn$ru8gR`)xw+jkx5u}&o$9MilzcV5HAFViU**^!b!c|Iqf=`cCJ z6yI|@No0&UZX++vJO?Gbn^+Vns)jm-$2|T4akD&221`uXY zhBR)Ozi5q^+qZf+R8!79%kgt*1+N@h>lA>^u9sHRW_4vWlGM|dO?Ys%$iD7d*GBC{-s~DUM)@0!?%){V zVZ^Jcie|%!^$sq!)-kQO0CGKO*f%rjv`R{7*&9QEXbl~}Yzk5^!feKeTkj$2^_Nk* z4MUvRNx~}t9_7a%CvVvtq;6N*6X0aRa2)o^*EnBzJdV%tbveTx8B$NCO4Ai%TMj&} zGi=A(mF^9a#+fT55^3|r;O0aUH^pZbKKPr%HunXgEj)8DvQ{Ehz{bmQZgOr_NH`p1zCL~eM=(4NH1?tE^uf$Bz*^$h`!=((=$ z1x57mxb~T!1t(EcJ>lV)95y+fgP%JL`qvn|F_WbIZ}=sFY8B}A*ws&>fxu#DcJY0n z_Z_O_^w4`A1f0A^;5GwhCIWNVGD(+Jj*8x$1kf}0@q;`g@nG$=cb2Ds&PtO{k%sU| zF9-J8mK#vRhDO`GY$(xD6w!y%%GnxswEjvCCEH4wM5HBzY&$t5&85*yaW*dv{{VV7 zzBBz5Ihy8X3RkCKb)EB4(AGqn9YO~m2Cv2&*;df}$0;KXus?xE(xiytorR~E*o$Rc zGHiQ2p;P=RvLHDOd14tfQVfXv(ls{^1;lbFiK!z86kxRXAa!XBa5xdUXL^RcT)T+# zI*g_ByFKLtIYd=H^E-+XcV<^4>Ut638lFq1WA#VLA+TdXZyeXCoy;Q4c^2fn- zX?-Jm&R)Q^V5`L939{R-#7<7%s$YPJnEUqJe_j6or6*pD`U&7uM8K$Pa)05yOGfx+ zRzIsB$>Qo93*BWH^C~l3NbV|8ZCtXc=<%f+K2(TFsWHyj){?!4j|IEemh!;xqLx#< zfI61MGKZTUrN3EW96k4{?hPSY*!Fx^e>7>y5l9}Z(3=Z zMw>|!!R9PSz?G9ts_Iz0GQxM_2ma(wk6c@d{Z>pgnjzZ?g+*m7bni2f;f|HsU$?of zC*Xh+*1Ij8O4}7n&B;0OKxrFG=njZfY1JYYJEPznP!aes7Hr%_#`N~eM@~p&cK2Jt z`C_`qtMcsam{#d^Fj3UFmq{C)ijJ!+%(2INIR5fG5qp$HJ7>VbSR#ei%*62cffgN- z>@K$~vI8`vGe^g0+UQ5^{{a3fGf5NM+lH2++ZyNFfxMZh-oNbk( ztSQfs9M|ZLAXHKcbG+fk&Zjlmgy_bRFRF?c4qzpbr;%df?jRBKLrbOUZg_kXPfYFp z9vlbuU3+yESzgF_eqp|eOww>-X9p4TRp_SW(bZ2K_BXKhgZ|NuJy?*B6?DzKHQtf* zDjU0&HfQic)a&Mn^&PJ08~w8tw+}P5xr* zbXu4xHb_@Ax3x=L!^qOgejr&;)M>ucWoP1sQU2nGr#tR}?Oo}w%+p)q%?d-fr53rOR&TUIDlIGu#Vg9>ZfPRrPDm@}7>E?JYc0yV? zP0jG$k>VqN?m!kxk05z8r_yw~lX_;5edzf!EuLLVhl{ z!3kK_)so=%!7Z%W;W#E|elN5ywXV1*dJSf%$t@*YuvI~sb4OyPGm~a#$?f|kPp8-% zruSx>Gz}6N9QbId!`;$#ou3e0Z-SemjjY2QakR7?J)Xcjt_8B;WQc3sEy zp;1=Y=lmNgO?wjcdxX_EQotm;NwaUu7XGX3ShbbTmP}ZPCHVnv**|1=C~BJY zZpTi?lYzq!7urR@9Q(*ZQa$gGPbO|15%+k0l=TbM?iVdAbJLh*ZXjEp;t$>(;IBDV z6topCf=un3UAgT$4<2E9CsmGNWciG;T2m#4D6HUi^(nzt^aK>-BV znu?-#@lo+o%^Tp^@f5M77wqd+BuyFnjmMhKt$*Zz_yFU#NK*$yZ=`;bNo{pk?mjrxs)-Fy85~bcYn;`7$)9J2~

$kk=&T$@L7S_U5fU?$GHBb)xQXhA1Dv}B1wOHSyKN1=~)f+ zhbV8BD%pQp=wtTCKP#g=u-^~M)I(-AzE9V2D<|tJTWHkre)6(E=rztwQ4uPhOVQ&k z1ht^pY|G@O9QD-BycLx&+wMy5%&y3VL@LJyZTf>L{MqJuT!x@#9Hxvr;t_EDWNQ$( z3W^$ez}w-u(ULo_mDV{5E(pMCXQzYfGfr?Gl|xrg%(l}sdoxDU`RmH{HUm&(PCsT= zzCU5I@Wd!Nf!jk*5DGMgqvSmT&)$byqVxjvfsWDRj-vAv*N+}MscLq10|bXxxa!tlHs{;~~K zJK&ZF)49NV-q8*C<`#~T=PCjj8LohgfyZ`{&u+yorD;7gs-tDGwnqNRKP4YXr+&#; z4dWte^ z*Z%;i7RI(tV<2~}mKg91h3utvpRyL@>R?o+Dv{YZANHeo!uFm-8y>NeccVD(OqL31 zz{eYIHaqP%yd9x%j%ESjON!eBqT|{Ccg+E*l@%MCnRy<>!xIRx?QYFL)w(-)(K(y3 zx3?czur2EejF3n!($YAC^11;@ihM6`sqCG=j7;`&RUK^1HmQNoka|t+#No_1W(RRk z_q(jD2@c)-k%6*=jg(?x4;0yvZBm6fVnA%#?!N)5oO{qZ!S>J`ibmA98_Da@0t`=}` z2IH1%4yx2swV>XW?!wnN-_e|Q2NSk4v8Qp`l%FGrT>fG#xNAbo)oZKk8YiJ5I=F~) zK^AP`xtL_!L&iXy!CJ6r-ZyQ+z}nY4Z@(n*p;7>EaCfQ0Wj2hRD_WCPh)o+F91a6%)+I2kL zvGjvwYcIeRS>#ELWKV!$1=F{1akIs@V%|9BoeRNXymCcjVjpXD5P@#949y-JyaIA) zsA-AU{jdf4)a_E|VtbEy1Q30Q7;B`Fqun|&md6Jh`!+1?S5%$`V5yC<-zG@HOmbg$ zHoLg>B79~<-@NU~Gqcm8b0lkUxLCKJUq19Y0h>=N>LwWy9c%9dAIPUs={0&Y-RM|l zr6jqOrPy6gTy8{8a3RdOHe-mxuoVbPG#yWblzjdYxZRw}yQ_A<>QaBdBQnVS%z=>6Y5rUg`8&{59p# z#4$+QVutu-W$$cEcn$>E?7tMS89Vu%Fq6WY#H5pE8ip9bJcF)U?xL7%Ma`TK0topl zl21rsj#e^89nJuxHYn+a7{(U;+nWv?lH?Nyo6Gd9kkn8}?LGTmBlR+VIL~ADn}tc0 zHBk0ya5(dBK{V7^4N)Ux%}XjQl+|s1#0^Y@lA|q9Y?wlwNCl13HF*oE-=4)cAElSq zSh$ZZQv1&CC~3-U&-c#alPj}auD^58RQn#QPo(g)$U4$0L!)*kl8%Bw=**A1@(DFm z_=VR*d?@0(;$&$m@ma1prfTxvO?c#h^)g*DuBXMIlyLmgbQQ1aBhOd1(>X#Te7CNw zmlW>eo1?@kH7FV0783D&&gT6K@OoM(TNF%SBEh3(#B41ALMU!YO=t|#Vsm!QkX2m?P9Li{0os|4Y z8J0l@Q>ih7va{+n2_z=OrlbuUGF4K?o+kSeq}A!8*h0r>bjP$xV4`8$biPj^X$+TK zk8uf7YL1gTD91?$?#0uY0~@#mvdYqB*6GmHIL%IEp{X4SpvZTLBEAV)c{`}2>E2+g zZAhTiX@xBR0Od^4YHF)(@SJRM546_-Ywug0134@3#jJrif*Ej|W~;=R$FkQHOQFJU zT7s(3;5U+vTFo|0&MH~nBPZ#?^5#4aU7jgmZwPi8qnoZi3U+hs3&$K=7F+etGiaoL zFw>*dsYcqmJ9w?{eKdqPUz;34)6@LJrfMSewG`5k(u`S?F!FRyw2eK)D!{liCeWgy zk<#eegc@9%WTtn$qL_}2IhsM>ZBbTs8jys((ekDUGHd_n<$Q&@f(}T8bllL(!@q8wm$| zZZC}Y_Ed{?M$3*G%x(L%EM$z}3u$Bsq^#`e9WVrd58j$s6npIg%@2=vG* zDB6F`ng_Na-su1=Xzy_WHSW{rh(13?k)&KZ1a7~6#3Yiu?{~DH&0Ztw;ZF-&6!D$LIU@nhJ$zPFTAg(j zV|498XG@G9L9I3(lWg4#z;;_u!I&c{H)f1A8Sg#$Sn?vc3cTjDUSm^DbLRx`7FIK}@vl9*aLBj1pR2@dFp4W!bd#_|Zg82rQ1Tq{?r!rHx@Apq2 z57n~L2P=f*ERw+Cpk!H|)47#9VN86DxyByPl2nduvL}tgvNFg@^_{FuhKhbz3N}Y9 zk1@<5pnuaXVCy-gR2U63-IbxH$$EzHD;k`T^Y9iJ@H55F|;JT3q$$KzhlPG^Il>1^0c7|-n+ zt5ge~5^OtDpf_+xX~{aDq!6lP%>wKc5}%`QZc8e6V5X~o-KUn{t7WB9>^ggg-g8^= zSvs4)@Y=tm#xwP#ACjrRPuL)Ww5?>*P*O%@>_rnA=8!JTxouVE3Mwx#Hjv9$`ewx; zdtAl|BYT`O#CC<16wQ|JXcl=sBR3awgKsH|#WcHXw(cj#vH&b;K?BioO>L+5G+Cko;b}KWWvt<`psW=q_bOsQ)SR9i2 zaELoq6krsv1tlX+M={Am(Bs%fToHtAQOPYRdgKnucS3IyyM4D7_@*W|uiBF87=w(E%4y{0&Vl?)IUc1k8;HIp+je_@ z$8uco64G&wq=0iIsB3#qf@^O8u#|`E41!MzrZ-bP-mT6zic9PO#y~m04Sv9ToRlf# zJ0vToqh!#!DZrVI9i}^B{Rg}ysHBKh*VMl4VZ+Fc=phE7Q2d_aJJqnADea1=$t5N2 z4NZ%m75@Orp4+d{m)f>zn-~DH#B&*g){a>C0E0xTHZG90N|(#A&{fG%1c8m1hfNL} z#`pd9fzfgMR@${LIjVDSmT1x@`3I6`Qa2QI;$Sl^V~#)BYkZwKH|by{h2-%$n_=eFF-Wwyr3(X@RbXAWp>ha%;zsFlX|7v#Ds zXHS!xh&v+J3{7z4K}@Bsrj6~2x*O6ze zX_L~lcolDsJ-!6K^`p73Clf&AZmyzzLJN`T)3jzoNsdq1#n~sg*-lv`c0t0)CY{gP zQ=l;vdi^DR!wZ=#M+ERl+!I9pPHx+BaLA&YQyaDthMPgAsgbg~6mv9Lv&Y$#4n+N? z_c=bEmbL?FOcIUGb6m%D+bg3|5a`Q#NrSwA4_D1C^TQh~@;k$OE%xm~)EdHZ)4kHi zwa`4=f&P>Ca45RuOj(yCY6dZ*keUq8TkqBoWBy9ySNCxvuMB1-{4iBOqNw0qw{Fc^!v} zMKuMoih7z*GQk_cVBpt+jUwSk)sW&HZ=wnLAVo(GpwZi@fycTMKi(?OrEGX(``g+1 zsMPAzlyT#ovqsE;d11mY&etG!sEt;dNZTwDGFG@-JxN!(r{- zxR`;l9|V``=GGB}v0C}BiK5CfgA6c;?SBsFYOOMcwBnK+5)O=vI3#ZeH#E57caP%1 z;>Y*n=|cYi**zcF!me4TrT3KrY#i7N{{WyfO>zM9L#xm?_9lw@4G0#;CWeS$6HG)1DfZ%gm2s5a0oq1kAowz7~12q?rwh(blO8e zzJTqob5vbH9(}ZQo)#PWJ5Y5ZQ=3{N9Cj&`e8&?TRiZFT*Y}}fd*YVzdbji)xogx@ zx}}6kJa0R>toiiEG@A>KTSCfNHHpRTaLY6$bvtHh%RXtYWYFzt0YTv!rtx5#f(KJ&*jxu`xv0?AdPP5mb0A@N6`joan%HYQ7F(+;*-^CZ9gB4kS+6@?fDa@cVOJK|0 z5lx_0iG!Fh1s@!n;TL9X;*Mt3sbiF^jn~&}7`}D^{>)tj>~-dt?&*2CVK{Zi1#Wn8X(bPxn#r zhO-)N8oZ92X$MEH2R4KFT)hW#npI(7+>ID1OC}$LDMxN8@4<6gCmo4i*ZtD`2lXHM zEvX@7g2V!l{U)8~wH=ho1Ie1&eQG;kY!VH#hq}AbdqkTNg&(=2y_S~nV%$;OQCm&}0VMMyLG1zGX&hLKK{Mwpd zsW;YX{xA0vv-+fg@liGIiMXZHwHV0SSr3c`duRC-t%o9fxt>pGK!#*IOB+)1u?-H9G z9Ghz~x6wg_*-qI}iGgbnX3DyW~)Qlc?$M zYiiutVVlY}@v8=bMd?ERNzk~!xjF{3X-sFr;>jGOyKF*K8nq&_Tx zKZ%wotDa~#pF#5*Ny;5k0~j~RRnv?@e?-q zDE-U$5(5plqb`8d++fx8Y4Yt$Q%LW@EM@H<`;5pQXx+(RU~q#5qgfc}%pcD6li&WqSnxPlEgdP=naAkyg9`xO0qS08M;Vhx9p3oIW16Rv$^s3jLJwKh&S&ycs;~! zQhedBWkWM;fvtBUH(%t0GMMWmj`=zw@x&Ck>K**Am6~1baxyUYc8lQ-EDjjqKf563 zVcd5m)Kw7Z>T?00MNk3g*xMNAgy+DdKCRw08=YWF?C;ub%IM{3s;G?S9>%zXvhEmN z?fI%qF_bc<7SD;|&3i}^ebar9A4KiC5%CtdgOMAPsj8b9Y3T+J4fo$2@aOn)vW;QJ z;Mm&dy^jpDMs8!eF5|eZEZye@H?3|V%*{ZtN&YNuw2{4>K?)_~n%Kls+hqrAOp*7x zUIP7>JQLUv0#t6*DHvPb+-`?-u4gU?OOcPeaEq1RJ|DDFWU7or2nEhNKu9yW%7h*1 zP2Rc8hEZ@pS^kvZY;y^Wown?{-Nd1+jW&HbQ3lK10jOx^zDCa!uA{1FMtJT6hW5(AliGXp8<#^^ z=p=15e0~Im!IHE%)62pC0A&3aDFhqoQP{TtAml+81addrt}2fWxFe0MZy+oc1_CYs zJggNOc^}+4CmIhZlj~b-_VPhhO{+HZY0e?du`o(Yp_{ur<0o+$BipB3T%T&6g%mB! zv;^KnjQTBf)ktBeh|<)wUG(p;cNQIwZ_;Wtvs|I|3LQI8U=lb7v(?DMhFFg_J7_)I z4)i<~GfUA33KaT$Z5;+B#EDr^;Lh+Gl8^!iG6LDg;=_w?*E-vPr{T_t7 z8)0iCwY*jU*$DhYH4h6=rf@Mx+L*@H$!1O6h3sY%`$Mf#i^WBd%!5WGB}Hq6z{`@C zr$Mr%veLW#R`yQ^x3a6z;*ersW5cr!W5bVKkRTc4mO^nk*5n2*t7nnllFk}MZ3L#P zOa|{I)6l)vf@8N7MEaLn`v76y)#ziOjoVm>WH81pl6Ye=bp29>wYKo(r;)i!9Yk)8 z%y%o{!bc(viZzU*BzyXBIR_%3IR?OvN967{) z+&TPJ8oE(%`7jaoUJDQOl5qti&9hB&WWhZo@$W7GJ-tqE<`hnqrNt&qbZ$FZ;O6`n zs>`V+uAVA^?;{d%AD+~u6xJ{}&Bf&N-^pp^ds@~U`_yWDp-($7mf_#9ADLQ=(&}j4 zj*dWB3A7T7L8I=m_rw6)4bf~hudsb2W1EEEmxiO^hU+E%^5UW8DtxmfOeQPkxc-#5 zbeSTvowt(plK3ni#4yA+k}j9%w6fXEz~FWqk?}`lAGw>aCKD(bBW);qE!?^-A4(@V z*u4}K5dGGpV}+5nzyy3~pWI8>^N?x!JURHxMnH5i{{Tye{B3ZcK!f;5KP1<|3x}&|{f2z)bTpc6G0zJJ1^|7bQ8{Bt zYK__w{S*C1dsX@ikK1wpmN=yQZ2fksvv(*bukXkeE`G(MMx~UMu^6hLie~bALG~Po_Ml%$S3pasXLOOD`aHzE z55n`>9@Jnhb3p_pl5VFg1=4D5o!%6!5X=pW3E$n!6iFy5+BQ`TL$fd)i3ZSn)F>-t zd(AaNa8AyD<_>Mq1MZl?3*Q|1&-*r7PxZ+SdOuh%ktoTKA$B6 z^E4F`Qipx$+HpirBjLHXNrDTlg_Ts55I>_VPmJ(;Nc_-RJkrml(z;uCqA?tn&$l08 z+k4tkvp6(VgyF zowYE>J32D4k`4}ND&=^49N)as+{RgpAaqBT=@gN;=Nmi|#wQkkx{$@|ti$}er}q`` z-12OfjNiL7aFRIPiRU5Re}V(d4#%X_=bHm19AkKJO?1C(0iB&=-<^|H6AW|cr}c9I zz@$q_EO&HsxLn(>}v)@m#DPe+nWQPY!!pkyBImFl@rThCq+wOC^X#`p5AbpmOmnGypKQ)U4&aN!4OBgWMzS@)`;7U$Xp zd#aL5^8PdPUL(q2N#-3o`Qe)%>a^y)td7R=d6HAun~#%*#QI*4L$~xN(wjIZ0vSC? z_k1JnkIiFikGP^8^1S1PxN~=^<%6-EQrO zN~u5b?K!vmGyed~DaF|8?QVnt+jio8kX1H5W77@x3G(OIvc;CD6ISCDV1DOAD+uA* zN{!dj&hK`9ChB_3f+*#aly0(BjC5EIe(jCxus|0*@lx{+Vf|KSxJiz!#iuqf0)G>) zWf`4nn48J6Kjh!dDk+GHs#^iuID5d$=71*zgvwido+Q5lQ>LCud1-1`P5Uf7K@Mps zD4&L@joW4ceq|W&9Sd*vNIt~k+G1%sp1z8rpH!k`u8t?-7I(ZvOJq#im%XQs1P<{a9-GaM(1q#uTbqay|%z$oM&<_&(ap~xa5p3!9}yrRCy+X zKuk3=KzTR^geyFyTS+rt9I(#NdH8O(0=*=WRI>ghEUmWE+ekl3QJM=_xU~H?mi*Of zA$|-xy%qA0c>%cI+aHq6t@7($dnCqt^7*e=WFKUQ4&s+187S?>$9^GnYa9F?FaFL| zwpNbIn{aUhlF@dL1dOxejARDP*5RR}2H}B+=-egAMG@${gWsAyMkKjBwRe|d7x3SJ z@VF~JT^_bRi$nPke8GbT772L~uBJ1&^@}{Kl?^MzX!Ga~+c^nBK-)rD(Zf z`dd5Jr(|wa>3m3= z&GASYIGi>eWjY#~dDtHmjFG2%NGQN^EyHL34LCq!ttSWNU!^n<+@w#xB~Y z1K%8Ee22|+wHR&yLpjns??Y(~CPr;mrk28^3!*uCLoNeJ+2S)iKZsqboarVv1h^hX z;PvcM5k1!y$mB(V1b2cGS*BOfJU)*Bj}_fa^VzpJ+;eIAJrI_1#uE;f=jP{;+=`Bh z2-sRcaNZm)4-itDT~Nkd)D~Q8|aRsNC1dd7Z>J%lx zre;R}0J$%_f3fzzQ7Te8)=)7;D71`Y$I@tWrBZUjZ*i7=WAZ8(x#h(T9+N{I%tE3Y z?vA2x(0iKc90KD_R~~Uxm)q>l*1f0pni~HA=@md0(jL;&VhFJZFnEHNXlTQ+2>c_? zCm8S4J1ACFYB0YIMIdudVp@0O%aM)PTn`I`!KqS8>@A&v{{V6QhyJUqlYkC;3kPC% zxA73~$q7+cs6atzj;VvB8`zW%;Bsv8mc6_j7cvGzd^Hso+#de`5DwvpUdgI3C{o}+NY{^ib(J`FZh`gTNb4OK%eo(B;lFUz%^y4QM8c)Lg->>%7F$mbiO$$J7I`nu>` zT>DhB=m6P3?rp$zxeUtcZB822UKh32+6GQ%bAiltg%a15&e0&Kc$>Ej!(_1>FYDaz z+Eci~VwKXyN`Zu@fzDK6O>*#XMV%pYpb{$5#EdI^m&89o=g z#)$Eb&UAl`xKjTBmFg7F{t8i5Pi)|+kg`~tB6?c6OhjjPpf*&~aPS^~{zND9I;flk`egWi^(&iDHvQ6D zFToJ{>N4wiws$AB>_5|JQ-p~#8IEIO7|6k_X>)*CTTgTLUhfUVh>$fCVD3kTX;RTr z)C)~d1mE67#~*10DCoLn?kU7}{nO-#dn0>N0Iv_ZSQg0#{F;C^&1~%a6;({PvTJ)m zQ2tivkGj-#IP$f;aU*zUNb z>8MU_fF1PbX&*&Io(oaRJ-L*fi)LP#Y6CT5gDWkJyh5Mfy>}*FPa8h%~ z^dO0^a+NG_&kLfKHptgorQOK`N2^awQq40|%5(jms|ajWplu8n%2D>2>_^_p9syw) zjgq59k&#I65|!@59EWUQMg86DO|ArAnq2TjRTH9>xSHW&3I>jOwEC~*^(O2h^pg^7 z=KkoD_L??tNM?~7kUE_trwwI4_KJSc1N_GQrXC?04VC186-ieeWFki7Y%w(LYqx{e zrbs}vjW)Q)e{)iCGm~Gub%ax#fB|zxm8gd@;tHLu`z&_*a^{b7(0ZdhYEtjD5yswV zhQAvc?qC!Eok{^yNh>Yd!*-rd8HB1Dht*L*Dd_=${my1cnJhtBdJK*Z)M?y(O$<%F_pd<{fe!jgZq- zG=4T=C)$8CoK|%k{4HNuj9drFO;82@0C@mmw_Ha3>p|NVdw^!OCSJ0NH+9D=!T2~N zG=|J>vo!fFs%wn@0A7**0If>P{Fa;6R2FWxs}H=&7ykg&$lu8!g+10N4JJ#`1D~#{ z{{a1oQIZ>5iqFvO4P44);^0x)GArhj0+BTHp{&@)Fdlc^8`>u;le~w=ZWq;rw(PuV)B}o znz2Y?q9$8j@BaWV^|!3Liki(q<@FD9MnFe%gmDTDY;lVVa2poTft04XCs66Do+PMn zl#7;v1_6FU^xacz)60UQ@>HXYlbWW8%gLccdktF}QS85|V^Xv<1+AtyhB-s2s-dGB zlS7crdSH_zm~J*l+86#3fDRT#O#c9K{s;i{BBMid$xX$tM(^FhJG$`LFJpb20`fjU zW-UslP0`bl`tGvzc$3(Ri!7ciA}L*(68VHhIkIphs<&QO`xY*^@O2aPV`DvwUSoMr zd00BL{7puF=?L}!6wh+4)Ko;ex?K7+(SSI}@`0Jyyv3%DO{)H@KT0l~{{RzI{{Zge z_Aoe)=>!yJ{$im#qN_g$)@5KTTYq7!wacZAym2^Rs>`Q2FmsiI*lvCSF0xYnMmFCj zm%G?36-w*WKt7Y6cs0MIpfE1BPYz0%2iT?na=9e@+5%fc#T_hXr8Aj~@i^Rlit3Iy z=F2&xV+R4!ROe&VHXB0=i+Nq-Gvv~NV5V4A0mImgY5owqa1wkjv%{@a4}a#1)7~be zlow(GSzFz;0f1^clFd?w2{&YKox^5(1=g&8S`?I3Po=Ahq7xgi_>n&0*85mW#G!C? zb!AW9kVP5#5#^T>fb#_`GuJ)XdO@V|veLun1YJn_2A}npq*WV7smJ(cZUe&WRv$1@ z)wH?If@T(+<8vH2?JMHX)7%bLKLhYmicM08-kzpdt*{5W-o>sA^q*H6j*+Pscueq) zCuNAS+j)a!!Z{i0s$>I`?Dh_`n)$5V<3Ajf9aLHCV3IzpW7^f9N#`Fg){q%p6C8fb#KiCEBjTx! zKn!w4AUorGTs{RD77J}%GtGpJzkg@o;1rAU$lv^NOMV$H9M(8;Egzm~52S=Y) z+K09B!x?*eFCCZuvusaV=^!Pcobdku6}<_VOJ2^suDn=Q zmC!TA`_bZ!$GG4i9<=a2SX`97FFm#lFy` zfVVa_k991tox~frt*0V|ey^#}1L>kU;=WZh%_9i^09HM%?;|8jqAvdcxlpAp$m}^0 zyRW$+mfFY4!1hQU(g6joYk>#60v89!@{u2xjU=v{PpI^8?Bu3db9Ujq%CQdmq8g{V9k5Xbpq^08Xo#gkKkjl+Y9SAJHAnDeQ|;U;*r>n=IVN7@dE> z>_g%acuRUh$}&qE2GD>LcQ*H?n=J0vMtz$U8|Do(7RND?ib8OP)i=Tp?rYDxCxews zJKl2Gl_fK3n}T@TGY-}~VsEgdFxORrJ0{@o8G@8%E-Nvb4Q85erdGeh-$_^^<)uHoj2en_`)n6)?KJ=x>%SwIIS_UfujO&##{|>TJnxC!j<}t6 zAA)T)MJ-MIIpKY>A+H4cKT3W<00h`Is0qO>Env1nRz4T|SX|ql@S4f>D8=N{fwtTg z3_if-Wt&ErvY%H*gP*)Jk9PQt{)u!JxL5G00v2NPlGMI zQVm69ds`IaxR{;nA>-l_C98QibTY+#6J ztm$oZo|8v0qO2DznlagRwl-#TWNG%HwBIyTI(Ja&3}roI1@K<%oxi<>#P*tZw`fZx zVM$8i;uLA^q6X65=Q2|j{vppA;6t; z@{0W~Q$bxGZIOfBybSP2*7Sj0qj|DsPTex43lse@c zl+`hU9no;f(YS1LPS8g%rz63jTx_^WW#*`L9Y@NOvWHiZ^*VEo8f`;sp}ERA$04&4 z^dPoY2^d3~;2a#&a6#BXK+z3Ok_}Fg^zz=vHav9Vc<9K(xxk^ceMWb=+I?yMGMEfb z*xIPR%VyR%mh!(wvCq*kwq=~@&raK~>Nd*CbJs(h(LoT0K zFji9r?b%Td#>&ILlZNDQcIBO~(#Y9ef&%Bo%P< zCSY~9wC`!}X}CZU)}b?L(-vREgn00l+Y@AqvQ@Jxg*EM9D4paSN87 z@Ny}xII=tLJ-IB8EYwTjRnf?9Xsaiu-;;5dG;si+Mxn90PQnQG(?4+F5)RJQ#;4p2IutS?mWq>MSobzd4TR9i~B;DZPK5J{rJn}7@oxcz*DAToLrmlQ6G{>RvO8xLpP?W8A|5mU0MC&dDcQvVp*VjirXzTe`($}Y zVzuU%Qd6*hnO{cF`pch^&-3(wzFw65`Ql}{=1q~h%dpF(vi&;v-TI-$WPM}G*tA%j z<7?G00nW`ORlgGaigag>bMNt06FgUTYUzCp3!e`51>Qs9a1%HTYXCuP*hl@jUSXkO#B&yb${ z>^=blNPqo|_ffm5@*`T&3=ClVENJVp28TU6VK>N zr+Ff&+e7Dh2c5E9WV33d*=yUpF}Zt6>T9-+5O$VgJYI1WfR0`(N$&Zld42e(XE(ko ziOYCnvL0TGoyZgH37%bX{#Q?ZrLJ+};z|_&SZ9FTEXu{lth(g^Qz@Efyze=1N7O$F zapU=*d>gUO-nQ#TvNOE$ggJnyx-deS$f~;7$z)N44hp;xxCHPnth9wK@-TA?Pe=pA z56xRx_((9xEJS7uZ*zV{FtnAyyinQzm^{tqN>DIYK4y=xJ6iGj*19^I>jP15W~zoM zdAb=eA7k=Z?gcE2(p-W1NXqATDBVm(T&zQ7u{cCn4=aLr>MB^eUSSE;sNd?~X%};dQm( z6SYFCMWfVU@M_^?%-kF^2beoS;F_ooWakJz!ZQk3ob1+Y9$u}|+~4KZq{0q#)mWa~ zJevNH?kK0#YBWv6`X5T!e*#wG@3S5QoKGN!Kn#tpBQ(ZDH=LGqerMC>m_Y1wbCK#Q zT4Hxmb((sLd9K5@-AMpNMJ)v23E_@X-idJSH-m;@Qz=`A3{pLVcWV{6T_9A*^Vu0E zL}qoew|Z(wF#XOER>xo_rf?fv_7Lxe^qRW;MvoKBF;&LVbKqiZ!-E}@xFTG}w058D zLd{UdTxZss!x3(4HzL2H=p?0ZgacNHf*xa?WgaCp-inm4wdr!4TF3ZM`2 z=4jk^Kco}rHCXB;@G2YVwM_uJC-hz46VGMs;hLPka5O2&!(s|#uEN5pqROT>1vK~o z3aMO8t?d{2pjo3=Q%LHqGG@Z(dfG`F{3H9?8Uc;{?65qRoRMO1-^p_SyOxUxb3p_UMZpSAV;;iCwXbRWE&z{VLOV@tZZ4#d z(z}rDMD6{i6K)u7WC$n=sadVaT$Cgg(?D?6#o^oh=)BM2mXU_zfmZoLZj{#&>!1lqX_{Lr8z3UYJ*{`-hZ-vyL8X#PJ7_AI)FR?!zeihM)=0H&xoqBi~8x@>Xhm`kSf3{^zIFnujkTX`vE z7LlWKn{1xLx$VoE29q^qo)&|AFZ@6|=X4a+Fu2%L#B;t)x5~*9%p7|q*`8lMN>IXF zZ*q|Ii!}O-4Q#r6?rt06rKl!IWWMn8+l-CYxKnPV*T2K9Yp7mD+v4XN?Y9hoTHI`m zjkemEeg}xS_7ub_CW4QV=Fb9Lk5PQl1)xyr@LG2k|E#qh_9-quMA)1^rX|i*WW2brg5{$S4 z0a`ISDu!c??RfjlAdhGyBO!w4rl6VFq^F6_11EuJj_~H$!1bh>M@Mr7O!4w--Q%-? z@owvIa7`^;fRq3$s`XesZ6tHI?^8du*BqpK1B#P1%|8%i(mIvr?9@i+el8e(D4^F3 zgsyZ=k+@`QUO6}i4`DzOib$mwj!5Jj*!DPl9HdGLO$SGL4is@Q*ST~>@VkxUAw~K* zji$P+iou&{ect_!V$9h2KO?2@V#@*J(mG z%@R}Hnzba_WS7cEd&*b$vUY+1JF$k8m>Uju9qDdx`&&2WhxF*Ajj}dC@ZoS;K_1b~ zE%oBox|c2nqB2i>=M{&zh%VuPvAp2 z-7M5yrwO;g4*5$??kavnTDHv^U0+YGCMf0br?+aFpm}{^^xg%#F)&74JB?6Bd8mhA=t@A&!x2=Abe+` ztznUeZ+YIZbv-_tQfGrw9M`es@YFEcXdSunyLSzXTlvIt48`t)t8`ME$B|jid>05{ z&eOGd;HqwyskyDF!+Lswlr^jy+J4NV?Xd6Iv`tKaTM@S_v|2Br>qF zMmuAHQ^$OQqEvyA<%+4Oswy-Z$DbCK&!cmlb3H4%Ii02>b{+N$Y&;c<=2|&pZ9cO| z%vD-*@wk1dX~_ae^4ta8WuZe|2BAp}RXedqD?^*I;`@!VxH%O_2USzURSTh+jg}{Z z*RjIwa6QFkX@Y+&S9$}UAEebf)UrL<(oFC2Tux=LJrmTmW0uY=cMwuFYDo1BKBk$& zC&uE_o!cXOOn4xR7PtbIKrc1dLFl@#^G!CIrTv*)*GDr)*_tO{liJ~av`d~4CT0nL zlKGjeVRxeILmm^8W37x?*m)UQv~vq!zUu?dZ!zf+RJSKl*HP3o?;h|p*zU;Ywi^7l z-ue4;Kr!AT%<|v3gHWX1O{ml{oUl62XdH%n5xCZVn*2KEy&v9v9N(=4za+%XGa|>L zc&77~s|mOjRCLu`T+&!i#c5;hEX{4+2bes&Rgi3wRM9-0##oD)ekG_8uek*z-X5t$ z5Bbnk#rlP^0q`iJCrEJtW@-;3RneP#jJ0F`0Qua3nppR&y;J`H$h0Xwj-em67QBB| zXp`AlTF!dMmz{C|sj0tREP()Nk_fT_zChY|+(;NI=?N$M_K(iU#}?ZqoveRgsQhq$ z&y65KS1obkhSVMWZ2|77{-S)Zv%1ig?`x{)M)Bz%)JDZ2rbU}8$=9kowD zbN>KV%H6JYsU|o3 zHumJa^iqj67qlFqgj{_mxc)&Y4G)>yP&5z2sA`YCY>ogsyCkAzc_f4bB=F|eUCp%Y z2k97y1KOi;pvp%08gm8iTL3rj!*lUNIsP3yQ4G(7(fd!e{{S@U$cmc<$n3T;*Jh7u z6n5%bHs2XPL^HAh>r7;-ujrA-u*oEGp0^BN1le7hMMI10sUp}hODXTOKpzsPA+SIQ zuagB8NgG5@_=_TRk9SK9f9bR2fYs!|Lol_;0lwD&v?$$ytQ#yEJ_rD9N=yzd?@5YF zg`roWRaPGwBF6)Bhk6Q4I<}DEk_lVigputFqSp{Glgxmn$>m3SWm7`Czg zc?FCFM%#sM32{^~3lz^5BfTfcae}a_ELVsrVrdCtLf}h**r4X8yAI^HBNFx=7rFO% zeAVcV@U>cZY4YxQ1@Jq(JY{3vwIHmHrJ?c4$r?q&m~K7_rh*E{p9MrrnkS1nk7#p9 zJ;izbYLY)K#wF7qEEhhn8GKo1Y!1J}HMy2q>9(mX-4{}*uci`9gB8BWBl9akt|>YV z6BgQ~;*RR~dfxJY_2#&5qk}goZtH zMQCHA{&7g$n&$RvBX81gMfT^lY%#6mnX_U56HeE(TAM~D@Fv*kCk81Rd0~DN?7Go1 z05aLTGzETHos~gPOO%?jg!h49_-ElLwbQFIK9a*MaNATH?T~Sii)N%*`Z{X&7|b!AENP? z>M0&8!A*`=-)a!WdKGOq_Bz)Fw`4UV#b*HV+X@F-ngik2(mI;^CXAUW-TOl}-;BQl#>Z9(iSayPbV%NJV%D* zn_H$qsF?AWM=Vao49&_|j}x`0drnJMR}+aL*1o`#DFM?|$wwfIQH+Ltn#h54F61R| zN;BV^RMAulT`UaoY4OuLKT_;1$X}si z-+~hik5saB{bIG%veLcGds+6}z$7_uEyCYpLe^`$7qCwm5yxUn%%JSaJ#sKSgZOQk zrIN9(YlVwk(BOE3wF>_LFVYD-N(!2tIr|Tyy3wD-6YkGo3qZ6k4FI4W%rD24sKlUZc9XDQ!Lq7TK2Wz z+Tp_B@dqRTnr%XYr~?g55`r?q_Me6Bnn*%a`FlmEeceS1U29{Acu)4ng<|=tYR@dP zC(u>YmgRV6c$O!38?-*$91sZfZD}4n#Z$1AgLyGRVhdRr*Y}+LL2TkSfg;(7es2P3TX|x(5tYk`DOx zwcMP`A!#);Jg%jvX{M@e{nVKWE%u>t?7h44Hw#qC=k2lwS)iq^rIEA79fhULaJYPK z0FdT5X3`3oJ{Z+XbL3`6B^+RFD>KU)onEC!H>9{d&`l&wet6v08w_UlC%(wp+(TAx zglLJrLAb2_UWT7lrIMDUV{65M`%F8W!@0w8PIJjimE>|$sg#k8plTJ`bzHh7d))aU z?b1%~`b9S2ew=#>+HFz3YZuNyN2bMI&dp6FL?#&qL}PXrH1lhQ(2it|b6V_%Ak$&9 zFb+y*E?1afIoM@t!Shy|Qy+t?(A-u~pAy3U{vY!qHrtk9EqGq+l;!PO>3}|qszIwz zLs<#z%G=v}95QncdtH()_g-hqr_lV%tI)&Qr?V#!?rYvVSa||-vh*0-4%Hwu+0AoK z!;uRQPILUfug#^AY8va+P~T*8A6*FgC!`0lYvXp+jpt_=^tdY9l+^XqZjIcrN*LgK zoYFxGli&@x-D@~)3Q^428}KN2By_{IY}gK6;Z!8Dq0Jizzgm@0GyJhIhf(tepRh$< zY8c%aO_V!iJ6{ocmDvb>VMpPoDte&rTTR< ziNCpv{#aR_ba5VC)O+FssJJ8aqiPCayTOb4fl>gglq4hn0N85X&rhZJODZb)AYpMn zVwivZdq3wr0uC;-f(y+panao~1}9U`OHeb};m*S2-`Tn*SGW{DwD&titH17CkOr3r z_J+fl>&<0qj>oNNaSfuTqGEYAI6(N9Edb+XlUXDG0PR+c+vS40{#z3RV3nDx=c?#Y z?-SKXI5wHU-`cbsP1bgxZ?0))?!{Yg)Mx2P1DtYM+S`cJXYX_$^W#RD9c7)ay#D|% zc>e%apZ;r50n+X3LDl~NZKM1X{E<=WM=(T^a zKc*9;^-=pi2mnmx)g+VmH0+D6N4ft1CWYa?v1+`3F$n(vb4v_e+ueS!M_TsuhV`}o z0FD9djDD5#&pAlxJkMHOZ)XOM4Hh2&uFs5Dcz_E{nzpH%f}MktLdK6W7Slag;66)f z8AHS@^opAbQRx)O{{S^6h;vRz+H>xJK(!eMs|rRq6(HoWuIL1HQ~L=R#l~ZBQ>e|Y z>qy#1;&06Z#|P7NnEWj+hCjJa5dMw-05nJ`6rb6l{^~#K1k0q+5_l#`isGrKB7sVx zbyr}c{8BI;7rFQa=c#9nvR2P$h@T*HBqGf%XtC-AY1la6E{O_AAS1 zbrQ?uk|c3?Y%+G8n~mMP??VLe%~4fI^OUjvvKF2;)K+SAt$PdH=qG5u*=ui@+pule zg=SF4v!KuFg`1~pRc!NKttfEC9WjZw8DIcdSt!RWxq4!lcE9H89VS^Q-xP+XKMah6 znSZ_9J}b)4=FMVRT>UX^PlzY|^XqR-IbtBNu1BiRTzLEor&A*?#4_N2_VIBw(t7{{VH)4L-uW zo~ubqADAJ5kVe?CFctyXIi0!hMr$!s>Dqkr)4>ZQZScDV@a+N7051Q`UwT+gP7IG-uz_%+CBbZ?tY+#BWh9AaUD#{%Vs0#Wq4W{{W5(em8pcH5z>dSFNRm v$DSJ>=-Fj!IvoWpO_C8KF<9Zpc8l@^ADX2hGbl~oxX21i@^Jx0F(3cgVk?LS literal 0 HcmV?d00001 diff --git a/notebooks/videos/bedroom_short/00004.jpg b/notebooks/videos/bedroom_short/00004.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3da8f7943b968b576327be142dbafc6f55cac4c GIT binary patch literal 64859 zcmZs?Wl&sA)HRB`1b252?h;%FcMa}t!QI_m2Y0vN?lAZu0Yb3g4gqq>^M3EW-;di> zXYW39YO3ed^wzcd@A}_&2n@xqo@U&9tn8dztn8mZL;T%@kbppd`8Qzy8*u;kfQN@i zKtTAAaB%<45D*a&KR)G&~{L3%YS>gEri1V)CF(d5i_~0ZZdc8F*EF~-8ty{~lHBH@sih#ipZh$t!&0qj9vBc% zkkHUjFt9Km%m26T$7&etUoa_PI8?t1q=bONfPlt;_zQ+Wfrk9J`Iw7B{25ZqV>pZt4LWvKQx8PAgXelu@AM_sY&*e% z(Xdf^fCGj3G1$f0*=7sH)gm4%oPAtJb7KG0f72Q0M!I6Qe^=pWw1I-KzJHhB0!gqi zqPt2#SdX8+*u<{#P@zQCr#unt7wFWoGxug>$~f!(Ftma3+IXG`bTJFEPiD)=A(JL9 z6Sy!xO#LCjpYr4nE3Cv`4Wj&#b5m9obM7h*sSgt|c17XR$^9tOjSB1Q&XJTtBQqR` zDczGpWVpLaxQ2!laA^wGi7AHqQFrPGTOp7mN4D$ zC)ivjJR^*@6}(`j$dMD1Y!eRZ@`=OvJTHM-sKtXqi9CI*PQKN+~dM^;i5)NG0OVOcWhOyv2x}6I_eF zZBFJiA(_t1(Ex5Y>7~IfIc1#$ThBXq5OVFMYnZ>}H$|}_=ZZb$B0l-Fy{kkChdkOV z+=-XqM)K|Ubi|Qwy{I}&72d%e4yis5HgcC z7Kn%~9u>AWkjXvCko?j9?TtGT^<7R>DkUh)AWS&yGh&jxe@~Vg9X&U)SoEaXX905& zwN!hg5ikAtTc9OY>LS_S>a(Ol6Nt*6gmJAycOoRL)hBgS7liRlmuX0Brh|3~JWZE< zhXyKU`k>O%{6tJHV92KutT(SB=T!cTw5kuti3eaDy9^YD@ppgCfvc1MmW>)-GI@-8 z-=o^u%4OZC79xAqyesbmv=~l~nTp(|b2H7hU^pfX+HYiby@eZEtqTv)g-WL)IE*hF zMmR=oeww@4U38h8gvNBekCaj1L^vk3B=nQJ3FQu3p45>U5{3k&EX?1nzJO%ni3NlSY-TI^$i`FJWQS0Z?bA?5>bxWkERE$lmVi8HDqALx!5 zb8^NzpgadD#$e*t1!0t&>u)kC_-EXaHpCy#i5M^i?hg2_ob7>+C+9XWe{?KiKE4_) zE>kUxChxwU8@Yvwe;YESgX4+cbR>@;wQcIP6V|-M(A82LsxymA%FjJ zHV!}`-x)xo{1H2~6n@}y9|@PvzUh7So}pMym#hofep%-{3@Amxn5a%KLD&T_P-}-F zZ*r{sh2T7Hw!l$L`3n(_QUmXT*y5W$vRFDes6SsE=HXtytw~zU#-<$<(Byv$j_=CR z*!-17w+^<3ij8@tiCwd+xdN$5{gT!48yp$lfz3O$F@@ia#W?G-JYYg6zax|Qjmy}2 z4)}84CReveC@zWzU0f`2y0SX`3n0@7SC5O7Rt9ydhbeLc62%ot&+!YmY0ekSNGB6x zjR-CSTA{JA*lD)`te3cs6L~Tqe4NooYr=ywYflh`z@-$&l1$o7wZAnYw+HnZ4#LX+ zLI8CSsi}s-P$!|+Nl5C#gAz*h8obIhTnVt#BC4#`$|kwel0!$;aLCxvrIhTzdYw8i zQ+^Cv!U=RE2SWd8yA6U8S@0n-Mk{|L<^)xth~L7T-}5bu+tGX$yold(kXy<5qA8qP znZ4)JnZ2woctv%zvrT#2txg11hTIm3m~@ zdlp-cOVOuP_t`t*;TakiW~TTrM78!!Il7r?9%k-o`i>;&(0chGl8Q4$)0cq=j(xF6 zfTz3K!bG1S8F79Sia%Yq;8w3o?+RUy;C3SZ&Tny};%lFJ?mieNw^&<4ttYJB78Wt{ z>lTVfo2`-5k(t=-3ODKW&oQ~%8Np4-qCG5BX0Q;|qV9g>yDL%`iD_Gd3Y~Bbl*ms&==Q)LTERP9$wZ%vR5~ZYs!D1kZ8$wmG`G3-Qn%(Od!jWPar0mcx-jF^uWJ#No%WM1SU)zkN*Mf_ zvWYoaP_(~(+-lw25A_s(c86xNH3ZGxn|~VX4u_>KUi&gPUH%DP|I!zKilAfFxIr+d zE>&@r1V~i*cB`x4fbdd%qZ8s^%diMOzXI!XQ0m}_JS*M|R6bUlF%sZPZGUz3(_^@j zFZ@yJ3nDV|pno~>p&C6lqbyfV-Qmpdf*xyGzNAc?fw+a}vUWjUQokpyuXMJF{sWR0 zx)fAYIzV}I-UF5^k5as8q?|%!x(Z!%7j!7pkhN#zFu5`+WcDa2qkJkxh$p8(l$|aT z(!2W^lOYPWbw(SB%{>0R2|7AO{DwiXhz6ipi6$@lX~nTyR|ko?T08;XFCS2A=i8W- zpu=1V3duuoqZn1hfaR{^$C}XBAYA5PCb)Om1MD96kiuM`#eP`H&uh5VtXGFT=x8YZ z+oM^OqxnLve-_e2{Cs-|IbSGZLVVogt`ih(mu1*iubSE`0-YeXoVQKxwsk(1= zn?ohgX;kbBc5Y^hyhqwuvcjC^(vcDuDRNBX9{-8jY|LX#m&!*Aj@M>(31D3%jtOii ze8ulA1Eq{WEfX#iX%Hirv9S9Y_NWK0|v( z50Cme3&k{ap5>$M*1=ES9<9Sk*l9X0@6h=0ywdCVbIMHwBIu6!TynxdZv(V({E5u`@!pbHsBi;!yj! zZ_;r6v;rjp_=w=CmGqb<-}Om?NH&^ULOpTEqRF-S2aUpT+uPu$HtXXV!L^or@|s{fUujA@>%66wcu(+tAWIf8u{tmHl8@8 zL@>b7Vx%b;YTqEX1O$bI2E)TJ&^{3COwkyLxqvK4jMMng&Sa+7PBH_1I(b#${J>XZ zm-1g&VFcer=Ps)QRU^%HfE4U7aNWy%5{+2-loz~YkQW{JBub+ zsdZENd4E~ort>dE0W;Asp{Hlt?8Wrg^`D|WXQr4_a6}}fW4wJW2WZ|qAckF64bS|cZWM)*!Ye*Q}E@(CXS&0y*Knn3O4X>g7qP0 zgXd$9MuTJ$?aHpb#@j&ol6Le2rjK^YQBDr&JS=7-MBXfrry|#J(+zm~#B_9N%{C|) zq!hiaZIBib7w~S@_}IfQKi`o8SATx&3Hn}#zYtv=zYT}Jp#>lz;I)Tu%JJMt@^@R-NtTUgO#QzWlEf% zUhr{F?hRO-LT0|!bQM46gpPL`sk?{6qQ|1vKWvy9it#Q)Z z8@Ie|)qawdduTvjZkfJk*m7x$YN#1y;lMZ%0roq531-um9$vrS>OMXtZdK7Os?_)j8m2Tho}XE*Cp!rHKnd$Snx&Q&KKzyF@I zdWPP>I!`-pwde3?vqfqst}=mn2FI(^zpylj3wFY=btt>WO;#JYU}aVQh-mIObFB%= zW?@&XuwQkW8U^(=FW0^Yp!syZJpPG(;8}F>HWG`vj`%7!gc{$iV+|pnn~MxS+iB-k zFM>7BYcLZp2tzg$uS3ekW*)6KEYeq0oe1|XG`0Dy;oUb@pOIai2+4JLOV^g}35ywB z*pxq*gQ4=fQ=_n`71i*>$wPabJ_A&M*RxJ%0H3LJ&jy7_b;+H8-v$fo5hL6VfWK~ck9-w}h zV`Yzx?_6+NL}EtksBr0fQ#uNXmzO&km*%<>eB1>}&i5({QIGg9`$oH)M&Gg3TqE@` zSbxqn@Z+3#ed-=Avn(+<8C|_yu0yROphJBW>i$us&PJ?>dpD8nwBk#s<~&s9ig}$V zQU<@D&UnXQ>p^!SIi6y;bu?K$eXTtq_$e*2yyRA}K5#EZ?vH#>apsMx2q84U-U}O`-o*a|HZL? z0Zs8k@cdUcO=QZipLH7zZMh;-((JGDv@_WmcFh&iNaFU4eQZJQ9iGqY`wFSDeMqEN zJo)jWBdEohq(wI+E4=T=e<7;MzBOAy0iS!E?JfvtUPM0+vr=X{hpb zw+lZPReu?Huzn4SBI69*BPjs5}kF{3or8a+|pMeY#!0 z9GDyadW_IuOt4;QEb6L!7;QG`&`_AUdV9J|VK6V?TLJO(=i3VJYvj}pFvs4sKV~p) zco5Fh-&tjQ2X=DjRL|!OIBYET-cISy9k(g@p=)lPPBOOP45S-33}VQw7u9OtiFPjy zFxV7@1)^~kJ&2|UNP1~87Od4FWNBy9N{I%C zuf=u@3WqM@Peb3*R0n?YS_4}7vLDrkVa?B+O?0?Z*8|QQy>+4r@t9aMheoDsho88Y zk)=2M3M@-C1zdo#Mf@bZ)?2fJJurf?WBHdfIIftORR?5t$tMeyb?lfYEEr&Bb&5$xzoo2|%COP7|I1BICZ)SxUW(Z8gn+mOM zsCCPmIA9QQBUhjQfQWrC(&O}wD7>)o4~$2ZMNKx%&q65C@G}FKTBR&fT7)(#QJ~7k z$*7RF_+ni*b%J>-K{K@!)xeZJqR9YMK2X_6E@bm#2l1_Q!ag57#E(8~mZzJex%^72A)hkTNab=hqpgiqLXkAXYI7@uEPGl=imYaO)U7?I9QbG_ zF-o5KN4oe7s+4Div01m0w7{u{9LX8A8nb5zJgE_AY=ur)1$YzTxJ!Z81ziHr8u;Gc zSej+D=7{Xpxm|P$eV=eSTEL0U3D92APJQadt|31?Vq+XzRw?MlF{~;%y!YZ1cTucv{w$md#6}WjLgr z&=mIBoaE;sNPfhQm!;+XU_j4S#(*ypLp3^*II&L* zbhkmtXC5LTh6B1Do8e-R|3OtCutYarK+zwNDMKbD6nFyLH8p5~hk0nA01@phsE-_t z0tLgD$w4e@f}|d8uj0(yh{0lO-x30Dbd`vW8I#{Km~b{4?rINuY5p$(N7_FuMDjv6 z3+e3rnwpOhyHnxoD`|8uxVfQWzsq_SE8yjq2gUG_SRDNGSz`aXJhUGo_rI)z0g&F< zqh4muGES+B*;{8dLWIj{CqjM! zTL3!q9#0z$!FM;F9BK*8GoBo4xN`52H5{nEjxhE7JZFb8a)#!?>MefSyH;G>SF`=l z%BiA*RQ3q4y~;!~c0X(}Cz+vngsL;x{{`r&o@GkSGgfanWzR^pfzi|)heD(l<=ydj zMbF4&-(yC5wDWLXWz5Ofbw;1HOVzknEw=4f7;6(h0Ug|WfEeXRv05;w`|345tC%9m zYxTYfqiHs7Vvb|vPE^J1)9jYJ`+A8)K70M}XOdYoEmk{gVXju`YBXY5z3u<<7eWxE z%-&l3YRukzF;=0i+$8@-dFkC7dR};I$UC-y?q`B`6nX9Ac%?<$R0Ddv3$3)p-VF_i z@ICkg4-!3ms^F2&#`f?K2p1#Y`NMA<3*y?=$F*010_ zBptdDCSl~`ta7ju6-8^q9`I0rdsQ>4&(~a|wsbo8S*kj)(866_Fy6iP_$E7=$>t=n zI_{NRb0|lh8;nhHIbq>4UYaH<1GIwCFU&zQO!sfc?VJAb z8GU%+jW#Z@Jl5rwuY7x!&d!-sDqoQHg1U!u8LHSTSs#pDSMOGH%H%7)(l*TW5sDf1HUfb^uNjnnDEsZO!J) z6VXQvV|6MCk(&;g$dNiXv&m`{nY@Ed>Bcw|B9)eBEr$o;jm@%8B;PRgaEcYL?~uKV zPO6<=w={~6tXFM5f~x_k3eOq_dGt;D+_H6EShY^I27yAuP=f(k!#P@$TihS4>25&) zb}>3Cp0LxHib?XokgmioQ^cbE)KfB|iy*X;I)LJERx(+aw{6TS_PX2Hcx{wl?^+JqS_Nj0h6$YiDqvh(Q0u{f*C8^mGrwe)QheGe zsld-+ma-R$9SY>2QhACjTXr=*SOUc#7^es~NC}cU#{0PvpMNzQwp)Y+z2dLfx#6d# z8k&4JES}-kz+r-NaP)k5rZA_Ho8s?CG{e&R37&|@TpDXFwmu>{@_o+YZdz`@P;+tY za?_L>>6Bw|wUle^jc4(uHx-Hm-vCSNRS%aYe3>_iyI()k;@hZ(Q*Qk>2H%{VOy%X; z+{D)K&GB&2)XcqG!D_-vmQLI-{#sLMt@hqaBpIL`vFM<#>``)%r_kd6dX<+k!pf8?+HbNYT>D8hIB!0 zEqkwOS9W=V_s%bK`{iXc5)z4hHLMer0ACHBrJ9MKNM=N9Y&X6ylY zbH(&;br#tAW7H`ZPbWJD?6p~%65kBop zH_F$6*15a z1&3+ke^+>$Hy3e!)PV;WA3YQ6|DWtq)rE?VW8agPI0U!7LATApzz~NRTFZgiQpplD z<~&O&={@$y**4-mc6D}cjn8dEpOcip!9dq0dgej*>5uC5;$H|b{ENnm!6Zv@v4XRk zeyE~#Yy~+^`;2_PJiVp~c0V<;kK>XJWl8>!Pg{eT#u8;VZ&K&U%Xq6dx3e$TD& z>uNH-nrTvkPu;?8z7y0>c(-M>-(Uu*gi?hAk4lR{btR6a6(ywCX!zSe{7kKAdC_J4 zv!sj6-nJL}OkhC?hp_oX;nAFHcSZs71D43KEhVr#rNmUyxmZnBtZS$sc;pVQim{?f zUK)kk9bAS1^jMP_U@!bNKj>$O1Q+~Q&P((`{i2a$Zp;64XelS4CFfq z2v^l8H%+Ukg{GnX!%lm+*0K|CJV73Y-?RNJ&kz|=66N+YZ`&7^CS8$izklp$=l z4{-V1)6ec1lMgxR`<2x-<+FgJsz_hN$6U!#BZ2)ixK-tI4yclDI%#j*G~Yr^`Ktv9 z(^RAZgD#J$-(BbsOP8pj1o580@ z22q`&_dy3@sZ8ZDrrv;C0M&yKkz9^4NzKQVAl9O-U%fab1qTrh%h8$f7N!0Bre!1e zNgCE&du3K`JxmDg8X)4Yv3P|JvSYw@o@Sg6MN(#?x=zPIRFaaCe58@1N6%@UJz0Wp z_@XE!(gOeZEx8aoq)aF6$MX2X&5oyP*@?9AMVdR8h7F?T8R=Ctd}z$cm-W?~x0ab2 z`3xFEh!?VjzYuJ9z-ky|XR_GaAJoYAy`$Rd%+5c8JP>zm^yx%ZQJY+aHCa=M<*u6S z{2oW=XO(Q{XPe(%+Uh}d_2o>jQHj8rlXmR=yn$pHo(Y5^P zE-andBoShdg^6@7vC^*OT;W*EjyK3YxaEc=kn0`Wv~WhJo!f3$Oe=b+5*+TXXQ3ppX27YTH>QpYGJ(6E~fkYcqJvFzVKJMsw}Sx0AnMncPkR|m@IU6 z*?QxE#EIOzTGJui`lj(677Y5%Pn|#e>&l}enBT>X(pEm5IhnA<)+pb=Q>^sRS5kM8 z#o>mqj@m-aCVg?ADGvE$4e3mh3dUL!AU5_oqyLjv?+t z-4}o?ii#{xok++wim)sd&IQqQ9*k7%v*qKI0a(=S=gq_G?w~TjTP5%O zXg0eSZ;uKsmsEu`7Fm#4vW{Ed3Vp_Pk#&_)pEOqfZP!CfY%Q;oxgXwpsNZ)TauCD*Ly%>Nt3v4H7nz;GaJ1Qcdi0}SFxF#}` zYH2Zs^ZZe?NYc0Dg$u|itc*V+)A1B0VRjpTKDH&EP(!$c>dR;Jaj97dxvo~je_$5>|BLw+{a@|J4_FKPcW8&wIC&Hc@Z2%E z==|JW!FLRi zifFXj($?myH$mML@5+IuFso_lcnXNAz({ka+fafsdG@V4crs(7%=rT?eeMGc5+ zYF`5l?NCNEkjzZabd}Pr$|q$GYI?HX*6%D{dX=>?Fj#Z2Uc$VyXOEG}nrs>&LiC6M zR@xIqu6;HNsr4B8s<|Q@rP*eqa&&qC8e-|^D<9b)HeN1Ow+2h2>6CQgb;xm5Sdi8? z0w3OQOkB~6%$SS}m`=%*yUwnu04Bq^In=0yk&wN)$DWBmmN8a;x10XH92vi#T;MRP z#~dnx%JLP@`xdz&HF&-dC;x0w=X^YQ<#zyCYf^*V6S^mk{`R(7Okcq-_Zw4OJ}3W@ z=RagWsyuAushB&&9BHwmBQHm%z4+(rR|13jajgdQACE>JxR8sP8Azm|hHJ#*t@ zV)AF<_y5dmsZ(CKN-cZ+pz9Lp;gFUK;Ot!)v(O?ZJI6d}jr@QY0(^%nt4?@MY1vN&BS2z+Cq>5n0il zp*T{O89O}#lPs?fUkYF3jq>!)jgl&vDmBPWcIt3I)H)o89)ogrT;c5GC6dN$wqXfe zx=5PY5c^Z+1b(cX3`_+g%NuG`<~K^2E{|g*SGu&n1>fz=Sjet&t3Y02ow#t{Rb9&` z_a0VrZJH6*lVo%HnpBmVpK3@vO|bR*sJ<_Yimvd0WNaM1W?4R{m)E75pQf3u-3n+m zNhj}%L_kJ4qow0Y<^G6D`=r+PLhhz|Cm`?!x!)i$p3bYfv$xXX9{WJm3MSm<_>QR8 zOE&v>nbmLE&wKmrj-Eu7=RWreEuv$3BoaV%u)2_z?jcX^8noa$mGFslC9tX9Js7ju zB+Zq5%7{k2f`AbI`>>Vj5v!_Qvm&A-bZgShl>Jo62kHw}Gs~wRb;hmJ0{(@7B?B7Z z)Mx;)R;cD*x19Ovh=;p6SP|rqw7pMTUaZgXUiId`F$KW9N4?u?Qoa)Vw0HDlY0C2q zp@kaO7l_3|`W7O2^X9y~CkbM`W?{>=iF+~RNciSQRl(xcCJ027crs8m2GR-})m05( zSr|Qjgunn?T3`qk>CNE{0)0m?(rTL%IH2PXF`%i2=_zvQeH@x(y1ptmJ77x)%7*dx zDZ6RD-=>R@KWdq#)zj}cs#oycfiB0h#QMb0$_Az!4|Pn5|IHiYyS=cuUT0cTwq_RZ ztfAmev;wla)N1@Vc-&=4{)uyq+q6HfLGD!{*wFYqZOe?;g*|D>esyY{Mw~X?4msT% zX3MFEUcBj5>N$^V#L$(jTe!B39L`8_uG}8QL{L6@XGxntieniOHk}d_8p+MJE8F!K z!pcs{)KZgU3Y0Yt^8+(+SLocD-iwMtA)DuA3i|bm$UvP%K71s0GZRj{L~PFu&nt7X5{p? z>Ymayy)jO`!^EA3f8EZtGj4re+e(dS^}xx zW4(KSAI#ZlPh*x~IG~m>RN=1v(Ai-<-34dYCm$xgjn03vBv z&2yU#0^=pDK^{d>S!A1?T4Ga*m#OSpJAl;@wv~fsDH)9!iqVA8EvZ+4&3|Aq+sJ)%2)Uo!4gs^-KHzH7 zLb1r|gGVuu{88*!hWQ|1Vw)vHv@?`FAK)8eweeSL^#wzg%RwYe7anEUjVY zsdj>oaz|zrP2rmFWpK)vNNmu<_^eVG=Tl(SRxXKc|9bRs3f9Zs#`*{s0|4I6^r7wA zj9@g=K<2{Sxaf~g43AR{Qm&XPky+<_FyJ&P_Uzs1 zhgYyG^SutXMzaP4=?{f#!oFO6lvBOZX79{N20g{gVnjfWr-OH>I;6>b2zA<-i4<{A zTtU8&<={wm8@;}Gck)&H2mA={t#D8a%QsoSNkm8^BxTL0be{W?5nhlf@8AT?^)a5| zLazh{gP>m`;@xeQo4}*=P5#1|833|v8a#6Whj1Vf?#Z-VTm6fE(>RVh9(s>L+#-#f z>iz|~d#2qL?hfM1%oGR`N;6TN6!0>zi)27)JEt$u~`RTk)= zt%u(R^fGbXX3axq94##o`^%MMPM0f#l z!J1P*jN*Zt(s(l(`i<;B8Bk#+)zsQ3(+uVEP_$iHm- zyygW>U)n0;>j|$B{bE>`VLF-CjINegQjUf!%$V$*HZ=G_CjqWeV&`SMTT`-vmVwPx zNdn2=+rBObq&9XZ3ynImbP}5@gN##jO>q^=D)J1Rd9e?V%i;-ZNyKB9{nI&}n<_DE z6PtCz*jW)OFX)S-E1%4EbWc%)zldD6W#EYH60=3MIKoSz>a2aa!vNEerU#&oqH)Q+ zkXg4UPi(}6)4P85pf-}cp}Y2B8p`iW5Q(ov6|0<{RXR)V$UWq5T{t;FM~+6ViT7@g zwP=3#?M&<9>4)YzXgG!0+i26C77_bF!?8w~y4rvN1C?834EDk!;nP+7HAscFNxVR@=w)^Q6~@Ism^ zDAY>80$~*?HCjkUx$JI|LZ5`1@j|r)A;pCYqo~MDK8lXhY`A1FA1NTtNbK8$!fHJ6 zU?)#5xOHDdx2D0 zn(wZT2t*%z;oM@Q2xg0X>`SytZUq8p> z1L{8v`iB<&YwVc*%e6@(`7-G+Cw?KTVcN4GUBf-|;HK`M``{2GR&T5iYpr@{&5fZH zP~Ij3WnTuj`a~rfbOdw9kF9+|Z41}67ZWIzrGRK^4lXN!ci;p`;0tc%6mR?9nJH<= zO51R3PJ1Wj7(&^Zg-!DgQkHUUTvp8N76-m|)Mu-#OKCzrCT-O21r_)w8kyPvHjQLV zKp4^2o)N zg}yg{v6RFN>?%j$ne2tM;6HLG+ri7EZLYxYFtJfL(TPzT2|k`CSwAi7Zg#+8ChDCR zODVyw`Kl7AULHq@tzZ1w(cOe!P0?p?_s4jd{@^`DBo}^T(d1Ut?cym@WC%oL#Vcov z{_P`wHekiN16>$Hhl!lC{<>n{o&U|@;d{^Vjcrr`_M?sT=f~_H2l+7qq0EDq!uLa` z*qX@=P=Q)KCybf{2^yh;i(J$0z3+5|hsu7t)b7ydbWb7Hxe1*OZdr!j^(E%&GjnCj zk!jbF*{_svc-E0Tqs+R%tDXYIZarmbBd>n^7m9oJ!ExhgE={oA z$AJMN*>rBrao7CWX0s$;9uQrD7kBjuVvh$+QxfO2oz&zuT1#ALR^6Pod#O8h^OxO70`+IfxU(mN$IkcxE-}`6~+}oBJ}GQL1JuX zJ#IzHY`cVXTXFWJnq*%h{RkPJ{n*$l$q-0rtH$m6IMQOR{gO>WY)s5jSK&C044}9Q z4>cT#_ozznBYaz4>pCv1UVr6AfW`MS5~1j1L|z8owod03Bjk+zn(}pr_q9)X;11eK z2h7jm1u9!VW#d%o-!2E|YcwA?#@sF}>}?wG$^}6a13rXyOK`Dl{K2 z_S$K}!7CZ$U6Kv3dai_1sI(oOQfeCYx6o1Tr~>U4qxuFwhkS2__c(_xCl^PW;nx>b zuCKs!LqyeEAtd&D%r}@oWw}c2N81NpIVN_cEIGRRu(wyMCPwZ3iHi`2V(A93vO4Qk zSDFN&l!r}vU`ttkKvWQ^^ZdoKzfo`nUE*}H%(+`XuB|XMa!flvJzqmCv>JNC_yn!L?_{JvlmC`K2GSpZFtShp)HO;8lG<;P)zk5c`E0EMXt+To*dC7?k6w=&C?4?J| zs*1ISUF~u;1uzH)rpoq+adIBTEU$(L&EcWH(5vS*M+Z=d8-y9PT#_?YMWw6oX)L5X{PmFVCpy%F3?qWH($_0tSq0pZjZfsM3PF|3XAP5_%RR zEfn=QIOzRkJj^T1s928qDZWtm;FA+jy1IuXv~SE@*;$ZsinY9yvTk6rv^s@(n59jr zn)8$6Z{ z$ntNcYR3OnpslUFUHRPIMVN>zuU~Q1idw&sGhi+P@l$n|t@sL_uxw9)6!9ZLB*=;B zi_0xQ!On_Sy&5IuZK+OOX1@7cLZXP=fA)h-mE@*tMeZy<4%H~-E}cK{Fgn1q1g7X9 zmqB6B<*8r0QfCP0y8S#AMg2SQ8MpDqJ^pxwvpm>k(xKfooQ~%?#b>ngj;q2hzK#U@ z7jCCT?ZGwoH@w?*VL-!u0qD{y*HF?jsn%hCB*Y*$e|s7J0-JpEQ@qLDmfUM$>%|x4 z3&G4=e*r0p5Xcrt_>#wf{%*o$6{8F(V#Y)|8DX)4dx;yLi&Q!StQMcba$T~XYPLVz zzGUf_Qcjm5_cp^}{_1x@*^wHeQ4c3~9F@G?s`q*(e?V;D$4?@eX2{BAO2W)8agCA1 z2ro|M+0(|%KxrymB0}5t2nAB0TRsORs=&8;cL~mbNONOKA_f&!Jt+m4F`}y4C+)7D z8iGz1T=@>%Gx-Tx6iLX@Rao-8;I?OjbE}QRC8^XhRrVv0zS7wWAOw>rbGOBX>J&!} zT?KGRug+8b7lQt_r6(H?%I>$Ys^91%t|H#2vT|-6dQzTSTv^D^s59aP2t=uE%Y%lx zGYN$f$F~$)MM<@zS=3NPHP_C)*^IK&w2T)x;9+ARZ5(FJ3jq=VHwZZ_*@`h78<@b% znIVgH?wqaTLOYz7oDJp`2&yt`TTvPs+a1vT1VqM!CRE0-Xk#SWPT`8(^+10J0PW-_ zU?=TW5PP@7Y=rT8N`GKd zHI|J9>5;DPLwj;$=+rX%ONmN}s8}S8H#}P3#4=+{7Dzir+21Dsc&wL?r4D4$NRuZ+ z6=P?DA47$|M9mfBt$JC_Cc#%CPw>LAD=_x&c%qLc4C{GQ1ogz@>3MbGEj}&vW*iWw zhj_TEOW;2>k(o^pk=FNp|1^-;eG@Ux%@M6bCMi<1^T?>IT>|h?bpj|80q}Ho9y}Z^ zW8g#e|EQn;pZjET6rO;sD{%<*cGT^%DI5k1*H0xNXq=f$ zwze?xd@chUxlRIQfrq?3l|n}ttxo>@N1j|`lpO+c-@`?E7kJUlPUB8~-5?j|V19#d zN}jl5K^(K^w6AYi`cqZWpxL0K6wE&6r@*sexp7!@aStn9T*dHN3WAO^n{EH?>~*T@ zGNV^jAOpBK(ktKXwy>^6yBhN4z3k7*t%sVM~_DWi1l**mr z9rx($^R~OJNiDgx8G6&ki8BG%+N?T}jp~{DX;%#sHEDp4lv|_u!q-(V?s?|>Bf}qD zddcf;o&ZcJ&whjXby-R?h%78%GvZAW`xDr%Jxtr5|{HvWe zywTKv(w)w`FLDB(EWS^wrHCL*$+ztJCZL~eA6_IB?(AI30a@79Wy63AGhw=1&6E#_ zP^)t5?@T)+Sh`ELfLvqZstlYg%HLQsnMEVLvEz1U-~QN596wg;(;e5Jdg=F&TrALY z*OAtLRn~^0rq_tB{9Hdw%*dlG!kH39a0g#e@yJMslRil+$U5+~Z5-H-{LI&Vu}jsh zr|F_+x&~Zk_{GSRAP@v~vXmr3jthZrymIQPyCR_4JM(T@Tc0}-j)}{!tLiO*@@Iq- z(?03EM6V0!pVPKQ=dNNlGpuAJgRrZDDN{;%f(|!m8YOIU=l7;@Qzh`Y4yU0P*UQ1EMmZ@-aU|gfIn!nEd$8?@>{WW^}!Y}mO5w&1u(AHnDHNu6+k#; z*ef9cYn90c*xylJr#szTwf3Sq*6@A4 zK%pXu<*C?yhzNnsOxa~dA506|k3y4X2T#Y+FLmX>Dq>(zi)3yy(eWa_fwJT$2FEx{DSu z3#{4X>S${tf)(#GWVkx>YIT;VIxnMno}X=dUmo94ageuE(P(BmH%g_Pt3{o8+xb9= zMZ8@}I}xsvuy8y@XxL7oq0CL(Bu#G(gK<Zr-Z!wGW2@5xXU{~R z1GRxq7f!$ciIg65W-8owk4Qie@;$hXLs=@hz`p^Mc_i*LYb-AHU2_HN1|sT$4g7}T zf-+&Q5beeaZ&$PslV^~&KbTel&#|imgS~%Hfg7gkqBZ`(bejytH562J*I~le`=HI@ zwq2Xdm-|%R$TU^?Dp*zL4}PeBj#NwTu7&Cfbw=v!{Dam?lua>@kou8`rhKHJIsY=x z^*O)iYwHYE&way436^WWr`Dhf9DcsZrp|*;rLC>gPi6m!4b?%!2hYzIVBOSr6avrh zzfxr};PXf{n0SpE{Z$7`!fSW<;s0gg)>ni%m8tnjQ1jBWacX|PQFkHtt+L%QoT9Zg z`l+D|H8V6L=xKi;_R0EEYQBR-z0^vwsDK&WmG&drgkd(qREc)aw21~SnuKrzvJd^5 z&r7!ww_e)6U9ewcv>{Y^PG?9P#Cjh@iHh>7X{ol)&wU-6Vhs+KE^$E6zyGB`Ayl{I*+mT$ZV?rg&0^f_!$ZUiY zr_;CLS=rofI^-%>1@BAC*=H(}R%CqyN=lDf=5$!uQqS*P!f7NAQ=5NUyW~nP)f`Hu zb2S1?wYWT@U4g)a-BPse!8Q>1 zr#6ZD1jNktYQ>IhE04b4F}!lci&o6=r4+^L$Xe3*Wkbhl+=H&{mBeAs7M43-4 zN-ub_u}h|~Y{~vYWKwmq>uic#6K&XBMNo*Uq5!=m$`!+xz4aD0)c0&96wAN_=s}{q zKZNbYijg!6ygDom6CIno85wCN%5>H@UN4Q7adh=5qp1xx_Lf&R;M@Z?LDi|-6x`i$ z>5SA>(R6}#Vbx7`&%pHE{bycz#k>4c5A(8gl($jHF)rYiu=`o%}xfONAej@{z z2YZx7Qsw%{O_|KKHrC>G#ma73=MX_S@lqEA$m6fBd|%v>ty^7Q=Ht(HF%ryc_Rzu< zDqR&JoZX~Xmm}JP;OZg@VsrsIQtPeU5e3%eU#9jd`qTaY9{|ZfHosjx6*^Q$Suk9K zq$c0Zk3}g|VJhr=-AAUXeg~oZWPgHJ=bQfklj@o+U(Hd#yW+YAZ%!Wwen+_4s8nAp zwO-kx{0Bqz(D*~@>F2(ZXCGG$l`KrL=kfM33m$(TW3wm<;@tyJ7U&v7cpBj_yk*VC zZvkwDkV`uO@h_NZ&4MBKVaU3Rz#Wz&H<453hB}!O_)9{XWx#_fKLU3#SyA`~=JX1f zB;ir+SJ>v^Q|?vR(J{yy;Q8|n2homd+ghnSDs5pB$rUi_ee$;@n5G?1y;kI~aLW{3 zZFZ+Yg$6EAzy;MF}aBkDUHIZMWpFnMZCakP=lX*wX z$4^6LalKrpq@Qb0+VhjVkQ$mYaV;C$d&ohnK=@z-^pqTQB%ZM@9pP2$<{c46Jpo1E z&`ZgmNDB4sAtU&7l~Wz+$QS4>%?0A=2*ut)c8IjN(0K~ff0NrFjJSWj%yP;^ONrX# zTvXVIDa5&`IF<~FaUsaKmRJRsPRphUq8v+`f|vvdaV|y1MI{L;Q3(kbB}4!_OF%@1 zxfC%xy?b3pRZUMpQ%+^8o2e3op?yjMaV=ncML8n;UznHo^ioM$iE3b_3j#tbB~;}& zu=R`Psv*;XAZ=h+{SE4;aB5!`!{AQf)V-zdj8;V}XKOY;0Nlb~;?0l1H#v~!gh}(~ z&Ph4*=gb42K77DA@qGBO3Va_2!LTjScIdQ#r%TxF^0qpTKpsP4SoH?Ld1>~yN_os` zrE_;FC(h}Y5~@@)V9Kn^Dy3N+>^T*+=Pa5)q${2rY8RcKMtSZv1Pi$v7K9mBfjN~w zK`K*Crx?&P2p-p$ph6^_a*>otQSBoqt16PHP56TJq_}LFj|ap*@e|#izg5!v(@Kw_ zE}cdm|#$_?8OTSl8k$Vh+askvy5To^y%}<1G zU=#I_&Pm8RV`Az~O%l%)JSJIG00SY?-BNOXL%iMP#MQa3Y4glZtqN?n^UVq5pDIKB~tQ2wz`(wMfv}o`VN#Rp# zRVRf_t&uE|BJ4_$^%dC5^{m9GH<3&T54exh>9d)2AGwcnvds+MNR}3%ezOte>#L#_{taj#3WJVy9#elyf<}F>q*v8e_i=3wUPkb2=m_U0{O~L- zZCI=858o;DAP;GJItJ0WmzAg}$yYbts3WtiNcAc$PDecnbbZ&;N701i4hJ9@P! z`(DA=@_Vo`PpwVCdV-QSMuen2LT=Po40;q@DcO8j0?UbWE-GLUE|sy!x-0=j)0qx7 zU=eUGc5qfq3lJ|0z9^bV>B&seQzNZ2A|Mu^q5zIV8i)%BJ4fX=5Hd&{4%h2sxmEcCLX&9s zr@)=TrF%TMpwhu5zI^$hIrHbt z1Dz?`r7;0c!>3`k46^9EaVEv3kViUR$JcbTaOq?LW;Rn6tz6UC$f!0fDy1H*{{WsX zycYuIRnR<;!;xECwhw& z+*3u5J7iR#1-$R<8fvX35yd}%ejD=VNQXtypfQ|Es83wA+SgQ;HUj3!7RWzmrmU%b zk2$t!1ZYn#7%_4WJIECC)YT$k75dCwZ&@e14^XQVRypMS-{wDhQQbc7%PcodU0@W{ zs71=k)Im~$Mr0)b-rFu3T#nqH@425RQKzb=nOMSRmYk~ANL(tyw1dYAvU+u9&%}S> z&rnrwa6(qRN{YIs`CB;-GJetE{k3n5y&^*9Ndp6UQ@xyz2V(&q^*h_OTC~t<@=Q*6 zO}CL|Pl&c_?YYS%lh60Fyumo>VOJuQJ&(vwP-I!o{Jkz(jZ)^7M){IU5;H8a^-=iW z1xk*j?0UUCAx46~1knWbQ=%ABRAf7N2lO83_f%05*0m4FdFtynF$_W|!So)+y(XI7G?K!GF);yZxDBH2Y zS7z!_h+{zMTA0E-uGF~@m)3Jww#Np9QA>-HqqTx17|p#KS|i%tjjRLKcWNK_8V4bG zo$Kz@72BnfCr!pmWlDS(MysVX&ydv8)KF9yq)#^a7Zg=0 zIdaTDZQj1>DzQqM6byELDz1uJX;hUYr9)_ZRp~fL^tyWRt4$ld-S1q~lOZxL=-IXV zT_aqj)uBm2vaPb<-`M_H5=usIS3}mP(<&0QsM{;L>Fdc!Me;7Eq>Wg&7@tenTj|To z(=~`SNE0elHVcQPPIA8o*ttobb$V`-vmD_+yDoQdsP|>FIOH6<1<`h$trirI3ST~t z!XGw_^%YaFG#2`ufuOX@flRdcQ@M!B9uc{LGRsv@y6tQ&RX*#rj8VujF1R~%S~Pv3 zG}%SkOqt~W5J_ln-w;gSXL8M=Jul7my=9gm-IeA%1H zO2{40K2I8tD>-WBmJFLqE?qQ;7ZJ7DbT)<-9*=a5!P#{myGZ1A%?#d1mKAru`zdD@ z1$Xv?EEXbkU+=6eFHbBP5nJ4~biw@0)1OWs>`YiG6zp5@)R4O5Wn_XP&L) z#FiRI47TcgsjAh`gs0PgH~X4 z7-3Q9NqD5=s7KzH7wfH0W`y;0GOMCBA3!e&3bN2os+BD6Od$J9bsJER=lv6_3+{j? zOPWLcTI#tk)AdS6nJ-X_FgvJ>9o?b6)g^Lo#Fb zcIdDOC=A?OmBqxc2)Hb{vFqyT=xL^^omgXnHOjNdk#+%z-tA_%NlT$*${;N-@Z*NmEj=I_infjzueNiP)WhJm1TGHfoQb?@fA7 z&T7&XL5syrLNbaJ{5Xd>R_T6ec_k9Nu;T1C zdA^XE1LoJn?EOK}68xZj%i8@drYxTw2ukl3{hjb!XklzFZ+4Tnmn+TdN<*rS z(k2)atW6Y9)J(qwaVclA&*MGlrkcK3lOR=ZOWq^G2sgHDU-ASyDMXF>@WinfHCb8;1l^=${OgFZVC6UhFh zU*j*q{zi8ugS)y+mBC7qzH=mVNTJ{y+eQ<>bmslFYKd9R>k=_q-}3^HeyVq|yW21H zB^-_MpQ@egF8Sp~lgRVxKHFy9FW!mZ>b>FK_WuAcv}o&U=B=4IMnX~LJW9WMU#I-H zn$)#1{{VY;$w%&^zplT^yLcno!EUGrIX&5Hfc-G;WAw}KP9e@ zsx3rnnoRPk7;LL;*w_MPjc~8Rp_cZk%|4%|E)-;Jv6m9s=1B>dTEU7nQ^kK(g;7wH zi!|!moO4&J3CY$oQGD)kXR|STi1dFni?Si9MTLvgqCaQuJC0K?K$h%U>Xt^Uq$+3QWy*<%QauqZwvc`FFeuXDC_#}(cLJuDAcqhW48`3!PaS28g%N8RFPwmv*WAN z%9C%v@96s2&O2K*d$q5A?U^ykqkx@{=vs9`YH3iUn~Mt`kajc0n?m6$m%k>U=Cc8#Hi zmf$TL1MgY#HV4ORXvKFw$f~2@+|XNUuXMDXjcRQi7M~E_%k@JEJ|Vr9HZYl%TB>r7 zd2DN|HjO@07^9G4;$4#B(WC7Nr^*}ebGdMX+=nI^yp~Or7*n69rfRmZr}u@u2Tvpm zMl)+V9DYv`Fsl8z*f@6T|AI4lbV3#d|e~n zCwOSLgFzkvx$>VQz&8g)wSyOvo_%TlpzaLfa|{aK?FwO3@-5Tn*9{gvhsIAMAm&)~ zJnUt^VPZ3x&xpVII9tNc>0&0x?~lVx$k zrcu z9_4w%qAYw$LVYFq{ZRh^!)w*V#1KFEWL8f~tBC#yk0$nngBFLUAml|XK6HclvJ0wc z#8j}xJxHQR+>`+1O`1x>B3&0{#ehXlDsoebngTA1w?%+KTwF_%xVV-90&M)(<{Xk$ zQt8?>O0xBO_-WN+We5;)%WlDE=@+oSHLDMr{KD?^Cf28+txqVDI!hv)S`==(6H!qrNa$#Y_X?pGn*9O3bM)l@06bc# z6VF-X4YhbhI@&5{v#FHg8SiRZ?w3=mo&#FfVE1Il1SDTOwe2IqH$Oij`Q4%J#H*xk zX|6?4aEX@^r)1>RrDoh!jK+mNYrB$E+gt%RZXmmHU?`UbUCBkkW!hE*OMrF@jx3)7 zZo!icGpfoh!($e$WJp}D5a78anH;fX#!BwziM03|T!Lx~v3Yeea}}Qhc7CF)-ZfOC z44aF}ay6!|a_7Vb=CdW*Q7zD%_&dfPZbsJNBTk3sE1`{Rz!xe6Q98BPvT_MrH=g=X$!*Uo2^O4=^N zl##LMojOu>k4uW^Bab;O%97+DW&}$nR$=QpL^B$5Dif4aa2!c`)iqSe${6r}o>m^S zE*dE2JYxRsCbU%Gxoc{oW=Q_W3XiY?b!5=%4{*0LPpO?Bte6cQ6DIH4C zv|u=HZ^tf2v#BVVR9o~lVv%6y&B^mqt3Auq&d#H!q^QbURrvn^Cbcw4i68-Mf1ZX& zSOUy+7R2WI6S>CKiqwcYWohVwL;A14MNJSl0sDUP$|BG!2dyRufawY(t3J;=@v!ZA zR(SlKJseYw&!GNQP5%H3sgI&i{S{w{`Jc+q>{XxQqu5`ht32n^L+zuRs#fac{1^HX zll4->@2&nqEzFc3s+SJ*#3j>4=QaM@L+L;IpnUGP*53AiF6XwQxc=kEtr?x{&g0Bt z*9VgMDE-uWU1TppNcwyy>4$Usvf~M~Y25CAc3*PP@t=GAuu`i6UN)<$2|CDu*a#c* zWOsv76coxgu_D3wIy&<_Qpi+7`p@&V^w6b=nboLcD^vcfUN`qq9y=aTsaH~ia&Zgs zeJrN^SOB#dI<-gl zlpKmV(s@ogjD2NIIyb4cel}P@gWh(skgJ`{YINqS{6&`nEH2f6_aB$t;FegvQdMT0 zh0FD^sGGlz)9Q2Sb%>TF6qM(^`cL`7vX+ugt#vWZh!B=J5a8xj(bHj`!u@7)@;%3T%+q?V#&MJdBJ&(%N!>M ze3ba9dgJTNPgyZZ+hk)*Y2-+G?`DV5RhY4O`re00UKR{v5gU4Ty<)tWt6kJ?-a?e6 z30*V8YMs=cjFm@nIvy!Ru+?NrR+D%1+NxOUP(_3D+UIqzH1$!Zo3~@6h% zhKqP%;F~@M@2+sTy3oX{^MeAg{{T?E?EWu0r2S?2MTJwz5~mx!bkS*W7pEMgWN~QI zhcSD@e-;_crx{Kd)_&@oiEF4=(O;5VB`dQysnTB&ldIW=%e1p%*{cwmYDA6@gXsYK zT65spu30ligzQXdPoOmWNML@TWl$%amMHL~f1RxDH8)V+9KmuA28OJwX>g~eX#W78 z;P5NPDn&>8YN{x=-C)`6>*~WzN{&^l(1P)#80s{rzCBVUc0?}jqvVKS^yg!VayuOz zj3|M0U6&O!1t_=`J5de=nF8+UmlCH$vcN>TFNt)(L{|LWN^t&XXr);Rbn2ug`w2>t zZRmcILt)zd-+Pucy<5Aw^1KvUhfz7`hOs?SQR8H&gMERFC4<=H5<4DsYVpW<3(zV` zk%=TJHn<~U7RbEWvs$;7iKmgY3FavDms*!Lom{Q7qYMdS>ZFLkoCyeV>xG`Ju#9*)QjBAG_Pyxj1tW9wOOo8XG5km{rTP#g-0FI=z2gO-oRQN=%uyh2+LX)Y+H~MdZ5y>T0ns4z551gOI(Q zT-q{sKRsC{Yt7?X*_zQ@k%mj8HU;vrVLZW`dKF*^7Z(>DIqZ3+wKaI)^}W^8j-5oQ z<{#8lCgne~K?2G0xTy9rD^+O4w%N*_wi?)xG#&zg(OkPB_0x)kZIhO!`A+YSaA(v^ zw64rL>DC4A-&AiVA+dp7AduScITZj(iKSV|$67e_I(7MNm>p_p>of!R7bC~1`)ujx z{_I)e>sB#0cXeRaC>@_{tC{D62AhY>G-zcN$mnhX`R zas>z_0zf=zO2D^a8RY!M4EYa1cy%9e?dZPO>U}5j&Lb9=QnTwyEcdF;Zx{0K{{TZ* z_>W?!?0r-|+BrI}za}Xv`l;R<^(e&D-BPv?ScyMbJHtAEsTkbc)#A|*dn(1BIM$#s zR7x%D7QF(XQfl!ikOsm}0gE#A`_)DA^Di$EBzj+*Gkm>5WE)aKyHaoMd`|SLU(EHX zeLn6>(&>=cXZV@}}ZDdrcK-Azd~T68jEztc!QRuPNPS@lZ@K`J%?5_<#gqc3$u zo;7H?GLm{Wj~(ConG+~Z(h-W?7vt!D80vFV>k~$$ZNxb<&HMd+m0E_j6ruEy7JU{4 z{I^r8mZM*+nBU(hjl*N~Xrz*^p*w-%_5Z`^5{m zS4Y;upmwFm9ir7B!j#`I^$Me$sS5Z;9@iBMt+8J5 zUQuxM*dRzn^%Rt}Quk}*>C`2b5lsnD*r=tQGAU`k)Gjp) zDh9^I`YzyU=%Iq5p$HL~rCQXHvk_T0?M@#<`QpBD717C0Q7kd46hVP1$i^~8V%8`G z@4NaRcC{)&rbLM95plE=g;(-<9}MCA$5c6n)6trNgz(zMmZ3diH{!?ng0$qIZ_4OW zRZOdeBm>ggWUekh=3A-f7!{jTIwXs+l6wv~MZD~|^x8;MY-(4OZ}mBj5pr zsMmFuNS5gXVhw|tz|*Gcs!@D%@jQ-?-CC7R0@kGi84JWnxVb7kasu`Pq5J41`f=v` zr7pCTRn^F3Qt`@^Mv`(waVHO@OTFG4L4`?E zi6FAluwIFnHOQ*JcsD%ikKZ-2bvkg0n*!7%sHo?H0L!aFdU{C@LaA4ivDF?NQYOS= z7sbPM`dXJJl(+yjTD<}>R6vTWTijn6>A26cCS@+ULsXRAeL{{{CgvOTbFAf7LCGm4 zgCz`7sVR~;$gxq&*?uk__~G2A7*8?e*tj{2*!6o7^C^)%EXtBPse%s3m1QIW$VJNP(%c77dy~Drri0vewkWehIUJ(XR(^uN&ep=!Sbl=Hh7%ls65>m; zTuE%(LQ(UiKh_kU!Xr^hJ06CTFDf^@ z~H5AV?57km4$Ruk0gI%@H30;W5kI{?rn(Kc#^ zu9~Sh>F0)Y{831W6Z)z?cQU2{cXUgRAn2$T0n0ae`tF$yv8L3}%aUh}NX76f+|>?H zY;Mmb!9wg4x&*2^ifh?5^v5;CyL!WHYT$Buo!-}Id&5SGnqpFVk1)v=J0E% zt;=s7mri6Taug;6jNIa17nYjtcGmc+u10Msjl+Xk5~L?pivKnX>Ah*1T$Q>C3oW7UD!!vCm1gty0Hnbnlr&bd4YO~ zRiBmBs}!APvc}e0f6$hyy|K7bSCcmB*BQ?c$X%|pRi&pm0){>}J3AgKby*TTR(Kk< zZBd$2iE0baZ-wORD5F@@-0XTyPgRDZGL$S5HrTu>%79e6JlYDGQ6lc6&dA>~+fh-H zo>Qpi&7n_dsr5Nx=EuW}j9Q&y)|^Jbau4L!8UZ=x*b+9|KgGdYPg~s0PC=(stSb_e zP%)$K;o9{Ts6kqV*+3qLD#-QWUW3i0AM zwoWS6jFYjaPexFhZ^Xvc6NPFF8jV(~NT8^xq)HV~gxWB%aI2k`xc>lS)z)DtnRga9 zc@^pYom_e|v*(WzJEpf$lJQnD^$z@Z^>iX+m}%lrQe<3I>FW{?!lh#6xEo$fHI=%8 zgtc<6X_cC#St?OHuh1r@Oev1ZRAD%9kou#$RB}S9jW{I<%J2+|PZzXmaB;eRLiAKm zA;6_ANK$aA9;%Y7z7(pYs#Hqp%am{~dM#ZvM+&kjHZM}5mMuag7|0T&b5;D_QkN#Q z<+e6G&u{>byReRx6X~XTt4j0;ZR)8&_F11=bA7!(+eaOLyu_tnSua5+5<|>E z`pJ4RoP(#4OG5{}R7t5)Rfg+MD;|uWmaowzM2o~TALj4qlT*PVIELPrgWfSm0bI5j zl0VK*pRA{EQQiA>Yo!i<<$?R3t0sn(G_vzlh21y4&-cGhjX@PruIdB?o15_Ie>ktJ z0armOH_FD$?I*m|Q6wT;;n+%ZHtiwLOVqeZDZU5Re%%_1QjWK0rK~_iHFMwiDkJRV z{#YoHP^sk^S#sZGfx_&PO%7DCV0p;GW)gT$>#(yJ)> zl}{0c7oO~Je3iSIttNUMGrrQ(lto%P#EgQ`jtC)eFmRR~Rl!8Yfz01>s<}$o$ z9#%}0ON^-A!@?3U;vFYvE0r}3G*i!1l&H;7hpbxErE^d@s7g;Y2^|R>rN3z{NOF0w zEYMU*CBjo}gPmg8O$?Esh1%x*jH4CCERuRUJ6ct6qsMbojf8?>jb9PHopyzjaq@a z0`QE5&PLo=*D>2SSjG9pN43uRc$cwT1C^-Mr(()^E^X~+QR=m7h^bfz0Buc;_ub}& zKuyX^n(N0@Q5dy|9+oPST9WC-u2MB4)ufcIDu@7)a=ca zR-!CMps88qwZEE6L3<~pD}8YM#C_whhQ)b~-{#zV+da96L03Yr_!fNBACV8Fo`&s3 z)e?e5&9*$PLZH&ajw>xGAQhY8Dn6=h(8fo7qFFIaj-^K{i;x>CwH+p|g(!-mFcESR zK?JDnIUTHhL|WnUT`rz#_dwJd@@k}9G9)5I+hNFqu<8&%RLicx!JuY)KV%Do9yvbM2JK5@0 z(q3$e1eO&^a$VgQ5N!E|uLgwzzJ~VI7Md)n(%4IS7)a~1*B|NC*xFh(1zMkZ0fLJ4 z0NvHy6K4XZ8l`2(PMe)mwBys}ye*yMP- zUapWi=`SfhF!nTb-9wS8c_MH80_&>(0Eko{gd`5g)%@IaBm=M38(c@H(Z&zLRHZ2U z95r)}rm0;ruhD4IJ*~ak1zMK3Cq%UHkFgd10PiusP?imo;Vos!MZrbT(G?f-pZq|| zBhz$Iah**`^g;*Y>3>8~e=+|6#8l|{Z4R6(>MBZ^E%z{?H}tR`50up}tSTp`wU3M- z;;^<^G9*Aqxhy%=iALKl(L9G~J>=H(d5oHVp;b+ZS%~BzDzi z-O9M@2DPEenk`B{9q-guB!3Q}0NCH-3AlSnn(kX&CYd%f^l!|1De6CWYp^Rr)I2Fn zq$-x6{7_V12?yX;JaKv}mO06iA|*nAvXRe8+@#zV7N??Vnyd?ou9g|lt)XZN_Phmm z0>4aIm%F;nc9=6F41Ikq@3X3t6@|DkO$bE&2L#?e?K~`4af8OIf){as*Mzem$fk{-PgxO9dQp{;w_H^2^a=OQB>fKc; zC`w4o{{ScEQ(mw#Q*3pnMJDHqUi^jaE1S?(D;8zwK~<19etMpxJ94w2sUQH{D!oT4 zb~n*h3PHPkH5p`X@ljLF8M7$raf@B)8JW16U2aZ{6=n<~IaM0cq>_@H!5I+Csg*U6_0n9ZoX8i?mp7e^w!=_kJe)&7vMRDFGJZDyMp!314l;cLH0m?$byjHN3lf&Tz&jIilZAfx2demOi`O)k$UwbjlNn4vf0 zYyK}=x@j6-s~IZ=Q-wt)eL3a+b7fV}ae^fEVSu5$6M>YGA!F?&`Fez_5xcEUn7h+x4j?W^D=wN$2q|Gpc$Rt{ z!&RQv&S@(<1?n0V7O8@=AL(j|vc!1<%6K0`GM18}oB(zXJw9eV9E!#|8Cry5wt%Ou zmKhL2qFhTUP$Pk{7YB%PBhdFZRTOiYNivF3R1NY%xd$%y=&MM(`JR&;(VQ!uX|4N3 zm0NYrPugi3kg1`DBCn|$aFNmn5c)b=*7RHAMShy{-Eh<>b;0PNc?Xpm{<3$6rU-lL zVKNn~JkAfv_n^#XH$2pw->B3!CE{wdVx%0dP44sRIf2vre-pXotLbKotC4#a(bh~d z0s58c(N$9QWL%*S4zh(*k{)k0^SUhT_oDFbcD-3NJ>7~qHp~V;B`SEdm_$YDce+_{ zQO76253oFNtUEG($4h@=2Zj5G{{Te4At-THcWO+^OTNA6Vn(Jr#$H+abseu(LR2{( zbzC-+5b0nZi$jq!7|7^-A}pyd@fNP@TNG@%sIAudZGaT2!s4}FKF3uB(oQJAXh86T zj6Diu^D}6Q5qq+d0#L!CwE`8T%58ke^jplrph8xdy5oxc{1@iz;u*6it)rG3;VRi~ zQuC?jFmNQB8O{)UK1^@>gxv`U9)KjAfwpea0e@puBa=VYB ztGOC3SL^;r!hgGH%IL}?^Qq$Nq?npB`%xI2w9pwy~mT=A6> z1Y%z>$;DcI7Pz>huZYP{QFN|6N>0s}Qb@7M>}emBUH7a~rCk(sNRd5IRb1$cAbe>^ zy40SIo)6dP{{WNy-K2m=snynMi?=)>e=7h{+7c0Uk;M<(1#Z${5vcd_-kAx^2KWRzc&iYoOnCW!W*UKtz^y$osP9f{bk^p{}{{Yke z2#El8=__R828O6 zt&_bF!3vckep)n0i6lyGo-7#w_`TKEVHdMOtBpX~+_|we3ZjG%k^?5%St=3SI;&PG zzI;E=2Kr*TDU!7u$iD05JkQUnTxT_Leu5%Z3VJ!e=?^tM&c>eFi4_Y59aYpaWy-5u zZDG_a*vYOK-~&>z7|n{gLFX3Qyi1Vu28E|5gsyE*B4l18lb}g*kS5JVR^s8Z^cn~N z0^+$4m0X>TCQdoENg*KJry#K9OoC2sy40R5LQ0FHi>0R~G=n<9ZkGjA9hf$BF$$N` z<8_^jAo-fyI|Qyvdbi#hm4RLr1SswnX;np3TTzbNZG7BST}-=UiAW=(Cc?w!ffFXu zgp<-N3l42Paa~cW>FBuxIsC)~-6~Mt;Zs5XEdX8O>zMmk0JIKnqx`1iqH}y*eGi2&AZH}59iP%#}k;pB*N_aJB z$Q0;vC8C)B08Jj|%ubPu%Pg_=4{$u7-F7~tucADrF-39$qnnN2DNZ+Gqvj#nVz8pE zT@epCVUx}*s!nE|RKJL!KZ^?I5owaf#hSy~(gm#XVDX2yri&u|2`9|Rs}VCMrFf=1 zaZhf}yKm~UyQiF5q`$P(XU~xVDf}2|v6eghWnO@#Gkcyt;8Dnbi&0fB@Trssv6l4$ z!KPeP=5{ry&>?Er{x$jYUGrV=ij-&aG)O_{#UhH&Z%g|svwly#-<18|?=@{kqNUVQ z*3>4;l`HEIr2$~2A$pVjE1FY@0Nr>{ z0s+e}I|E+)u~v;z6DW_XoRa~KYEc8BL#5~MMYrTy>E`W@Rng?rBhlbchLl2!;o#!lvyB8`JSHD1grbd;oI z$&8nodIiR!2+frgrzQky;L3#D2Jiu~OPFNGCRwmyChVkM!Q5nLCCKn@_<@ADAx?g& z-0tMqMMU=`w3*x{m7}fAfU${f!^ zqOu;DZZ6#n`Hn^sS7yX4+&R=}*Qct*Y~F+u?7PIJS2$hh)614_qQxX>rh6vsu+}JB zH=EatJ6*+7ovz;QNpUyhtP21V-kGv_6w`)cC(CQnG9kn|Il03;*^tVc3*=r*a_4)q zB9`eHyNrpF;6JPZB$L4VAA#1V3fDCgfBeB7gn<2&lG-&3@cdta)aAviSE~O2$60gK zOVc0E`YWc7sre5~oUTfe;fWml8+y#1H#V_SuH>=gQ=F=3k!>-gO)_TMg`OD}C{>Fp zo|3Ad{V3F-)3fntc4V=uW+CW(nuti+UWT9DEl#l9n4(9-xdZh24|BAh?#eY9+JRRf z{?39dk@vtKiw2Xv7(FJRPu)!)IC{#thgnf4?EVCC;QaFS1XV86982gN1Jduec%hbF zr3%}&+#X#uW|J1i?$U3Cen(5PnSsQkK8tS0s;%kX|irY>=Q(uPWUXxBw0LKY<|F$Hr-JVD!T zM}giw8B{a%gE-_S*7Z+AZYH#_dD^;3B#hwxX;y2@bwaQKQp6h0#OudJmIjvM;XXm@O@C8^UtphlCZr& zeg+>?)~Ih}YD%^SxQ=1d2)+W10t7MOKw~GnH0!BEi_GqNa37?HIXR;3(x>Z4&RtDP zac<|%{{S)|16&bDn8KT1aDH}ZB#Xo!vG2VP%xaNM(`V`@I*)nQb#HI|??(wwL?{`*8f6rlaA;^H4XL?HQWVID0+tC0sKk^YrCEc{x#-x$rKC0b z_LUM&F1o&&DH4sQHOMg$e6L{7uER}3G^L=& zXQOioTAHOtHvlS8C@U7rrp{#{a&*g)00IrKi^YeUIeA*;s_Rp<<}YG%1BTBt5~8Vf z5roTib!#Xo;nTsw2~?%161&)CZfZv%AB)bjdrf+7;F!Lo@uPy4l%CT4LM)hqM-06W z%jxLrE1*d>7{ZP6@Z8(A`eS57H$3TTbVyZ1fF!UgB-&XMD4T{AOlmxAIJZ04BDe;T zSGyiX0orayj-s*XH}nZ{Crg?i1~G9E$9X9!g8y(3NU|x!}^9 zS?%bG`rVH7BBS8mjdyNB*pVn%xhlJ)V)DYJm?oM{%rr^}7ILYU)Y5KZMJ_?K?8i?5 zsg$`1eT9md8xbYjQs8YHbgKTIKO{C{V?et_ACen!yG-Yj^)h&vx|-~W$KF21>`Yxz zSx5GpzXOIr6sH_cii3_O)=hLYkV15WZ(4PBfRUf2h*?I+(kVgOvm))=wTfKT$}$;D zF0l4sI4Go7LJzUzQh45^=LVBK^13kkNOeq7GFHB?tX~M=o=fyvD?NVgMq=*vnk}}S z3jHEW!md|WL#av8Nh&YPAi^J_({NJgv7-Nr1*C5V~dT?1FXgS z+w%=5Hd-Jkh;aj}PBroAEYA2)rhPJ!qY7^fh`q-J3vh7IXlMQVJ#|bHM74^^&sc>H zbc-lLE=JId&qLSN;D)0`o+PORi0+$or#H(=GnFc`YT38s>Z>>^^i*23QcvP}_^1;# z1S+sR+j<&*cUxjWX6TCRbo@8I6%m$uTa7usVcjKq5Pn8=ts1kBgENy%Q59D~Y&CB$yerdd)&+K&t+&mm}R^-kuraBTHX=bVcuQudKlL;%$xe`3h7}*xoqU}orHK}bfqm$Lc`?pQgsj0q_)+73oNrFi+ zuQxB>lyLnx;g9r3@_uVghObep5ybsWecOhUP7uy6(EF1 z7R3$=c-qFXSz4ocNgXBV)+(NwiY+#rfedM~kFO#4k88`SiWJ60t~fYs(waabC~GFc zDv2kJ-jyG6GnW+^oohQ%aU}9N zSIV00B*d)cr&cd4ALhhd%U*YBmP2-@QN0aePiL5dpPGz?`^2=#OaeahZ&`v+vD)D! zK~(sHT;$d~gIP#8NOYue45eU{Vkr(iQf$jRiM8T^vj^~UTGiEuAx$K*SUoKtx>}a8 zFH;}Daqi9m19mfZCLK?!z}eN8+y;zAoE7%R+HQvGfjml6$?~@p0%UG z4{Or@0G81zB7U0`_?o zIu@0tu0ux(;_EuRV9KSS^>@QyphyDd;^Z8<9+abFDx0ML08`I*iXOUD9*5_(s4W>+ ztgN4{)!`RApP$vK1c48>I62wpB!dRlEU2=J)zl%v9(;(nV?#_5MC8z<=F5=`N$!H= zq#36y$0AQb$@)!X-$$h0AC#q#?S7)D_1YaZ&dV@O7}Z|qwW>XVx~V_YE6NObmBD}! z5zUthjhIm!*>I~StOsh;iKx>mdg_cL275@`#rnU z$Al7)4)^D&H1%rK%A><1il|795scM}_6fe+M}zuyiKEl^Uqz+UDUu2xPd#HZEUN&> zz81Q+4#$Hvy^}9cRO*wjD_Udbq>LH;n`T(4Oq0vjb(j(jwr8KT+p&{TBy~1Qy3{Bu z!jYe!EZtH=9gYyZH@)O1GBz6*I*{2_^+8EQBRnGHk(Ie^%Ed`UQ$-;psHgxlu+L~4 zHf(wei4o?MgO+z2&*JDzjixbhZ{Z(^V1ieM-0_~S@M^Ig1a*fay|qZRD+tWz4urb{8W?Z zZCsHoSxCj$(z;qXII1Js*osmo8vs&y4cgSZ9KjEjK`KG&XC~aOf7hb;hAVMk6UA~L zneP;*_uQl5jv?m1;{6GlbCF7YFUjgv27O|&M}(yVax9<0)H!6!fU0Gi0QNLR+GFJY zFAkMYXGT}lSCT!h^dfjSp<8+vV8elX84;^vBFwx-RVaM4o0te)fMr&kG^-=-`&a>O z+`2(2TXH1W!$ghIubiFXxQsy`di)D?$Ix5T6mJI|pPSl-Rm)DuTcpNdalCDbq}WX07_{63{WwOu?ts*rvU z)}RDJg!CmN%pa1jPgt5bziX+}BlJbzt-d6VOOHdCa^K{c6s02#xxQwlQ&UPg(88~< zpKYIz@AXwr{KZWiGs_S~8cMJ&Nd&2R8w0k%fflW-!I~?oGU-_(v=?ZJNcTrgq9ocB zr^qT#u!kSru?v{ys7C3Of7OZj$ot#gC=I@BEObbOtNAh!-|cEKX(6SS+(J}>eFU~G z=cxHwl%~R|N*~@vg&$5$E2OG7#F{Bb-iHd0pqGY^%zzKp)0b{0IRW$n0c1t_i=r>y zMOP0gMlbZa+J>eV`Pi3ae=!x(D&E7a{mD5kstV} zG5O1p?;e_?iaNS=r0l}9l6>sjWuX1W-z}D6qfHd_iCmD0l!Yj+Gl*f~(Y=TJsR0WR zSD<3)=K5Kv?>?NcKd6Vahd{Y3((6Lh zI-*C!GY50lG3k=#Ap`t1VgCS1m~}Jhkj|KXYObA1P9%=Bs9Ws=wWigEr0xP~Y7(0+ z83y#Zz?C58k~f0NOj0Y{Y83;c5JZ`Fx>P!VqJi5M7LnXyql9D$+RYeQ6-MnR1&=CZPAV=Sl{!*@+v(ui zR~7j+`=_hNq-q^<&8(fTYOB0a)GMqLMpAYQzZ9v3@@7!!I=-MlqM@m&zwF{s{92SB znM$HY{Mw`v`suqFRVXBau;oYjVc$x_^(kVa{{Xa1i2nfZGh+Ot*T4cw`ZR6WS0~~& zwerpC!{+0mlOcm|SeMai6p@=1g7zl-sn!t}{-aE9{oI*v&)Ii6Kg#MNN6*$vEQqz^ zfJTq?oPVm@=zPYy^E0l9xJ_ec?5O*p-AX%kooSQ*0CzIcce6)Fp~60$H5dN?7>-ZD z+=~ zQ6D)K1#0M(7CpCh@&f+=yw?8!r~d%Y{!gEt;P;aM0NF?BhT1>N+KK)3Wi>pXvye+4 z`r{AWBD$iMJau&C2;nU{`oZ?lIpMx4s(PsJ&7L)j-zBBj$bJd`0J?Qw^nRcDJd{6` zknNLIj|cZ?LeKvIN)fiHN0s_1bj>&%RLYi;Pl`DWkJ#AOQ&k7MsE(uVPCCEW zKxfJM8GQFs;tQqsTSwB8BTXzd3h9^X$bb5XdXX*^Btw({Y>8P0B?hXRRsP!W> zy-e{cl3Si?jnKUER8nkGdO@;*;OK@Js)n zlGiu4PX`(3;+Z__aw#Uu8gN!cQ@AT+vi)Rh7jDB@eQ5b$?K3VCqnwtnPSqkzz>)hR5rxD3c;hgUGAV zR;!Sgy&BfJl}cxi)M@Su$e9>aU#Qn;4K0*Ku|G9fEh4wOb(e@nlki}&h9yjQEV)Y- zV$X6OA?;0QE}d8xh5CpOU!sd}56h$9CHhSzSJCP@12-UWW_*|GB;3VEiJEL-lXDda z0mSOY7QUrIjHi2OGA*saMJze%Bcx)>tji`y$TjmY4`ye){IrT%p7K&)Nhpl0NJmP6 zu-VX)m?8ZdvQZhn5}*ys@;{WiLg_oBO{vU1bTG zxH+I3c(WN_D|wp4;OtRe6p@5m5ac6>^RS&qn$l2xYYX2nGjRE(XNPsUcd?CH5|E-t z5Mv2GZWib*OQ?c1*dDD)P13a+gx`SWay<*GtyXz~V`2f!o!m2Ol?zgh<->qx6bU;o zv>QI_;8dU#i4>83u_?aJj-BocmP`uk6cdPCfIgzQ=WE*S0n;qTh(rlYrrgtfjgDl2 z!u>2s5Vhh+QgISW)or~XayR0RG!QO;IA&GJ!r8I~FiOI)5&*Y{2snmhZP~GpgjF=< z_yeJ(BLuf97*vv+APu&+O}K{HE|i`Ou#}{FHs`UAR59NTueIS)S2}2|s5iPP#VuOS z(bXrg;y;7dsw4xO%I=}+{$wcFu1LVQ95Jm&(DZ5~%QoQsB|Gj1nPd!@6;dk`E>(lD zyy}&!8M~U8$#j<@rdCCaw>f6u(y1O2k+Me@6)xQ_y6#8aVh+}>I}qy-PdM4i&@1?) zJ~70hkqv5qa4cb=BTP?)VD{O;RP`f;`&>v`AAID=*p`6ci9Kl4@2lxS4u+*azz<`y zsZVcoKSi_$Dlik18e}WPnF?*gfozt(V3K-;6S~68CF;=1qZ-FnzJq&~1Hsc(2*Ql1 zNWuz=uWXVDH-056>nD;!>#a)km*~6CT$*h%Z0>3;5_?W$ZV$~uuyDO8#>k3|@E=UA zk7q?y)W0Y3cy!9x_B3^7y}`Ca!MO_dGElsO1{G{&My!KqhF+k%QXS!iiIJ!;`fk=D zRdyn&U|?2l@UNVm;ffU7POHj|;fQk7-9(uuaBUJBF;c9Y#D&s!FqT3(&PEQWC)ZDq z8^ni;3sIB$kFp&_+$rUd#^+Dvbwbbj+VNZjn;MH8pkr08*s% z0Fl@j$vHM^O2tC=)!^AxbEqg~Tu8)K%ao3(1S-={p#k$dDE)5{c( z3Es)N?)F!45yuQksql8^^Q!Zk|#3gjGtZG z8YKx>a8dyc@#8>s7_}?YkmM1gH|l zmW2tvO>9Lfj>erAf8wXNTSYp0dbI^TC_gbY8GL4{A1)$&IBxu!sggK*T9pY0V_Oit z#(S9i@7>8$aaxq}Eek>Ok7Fh+b4jX4)2^je_;t~P=qkJU(xW42Yc$aleytbWFX?8~ zKXCO$S1*dJmjI@!%8scP99|6U7pPR;nqjk;UR^QR->EM2n{A0{8`#hnBhWP}$@U`Q z8Rq8X(&TFcBVRZ(Adp(j1l>8ZsFN(~!yIx*a>)rQIqnVZcL$4xrA>)bWIf*3qyb6N z>xjPIrC*JnhM(MC^w+EeY0^NFJaq&_k=RY!_UEE+;3qgGcG5HBTXI$(qKERMQ9wzl zucuE)g$j{Fb@shFFUxHPI~nS$DowMDfe61Xu3L=?jp3w3tJAj(@#v6rwxUt-4@>uB zxxLsQ`2>EHpZ;#ob9=XZLWMiAF5ZBk^j$oN`GkD$P?PA%pQ?S25{{0lx>Uta6tk@C zL}*ggmf>OAL;Bpk&Cy5;{nZUpQ(~cv$0df`ARH?^nO(h5LWmNG(ycbuNfJrfTo|pN1-n~-AI&8GD9kW%D3I{<*%}O1C?NWYD%9|S9YMv2sh(!#G zByqw?19jiDnw~f^2-@(xjvGiPmy?vFFkGX`)jn6w+5} zh)#H6S|E`F&6K3-)Z^Dme^9Wbl_6gPlH_BPbe(=1{~ z9*v9*Lqwz0Da;e2tWEBddoDB5g${B}?U*~-cUVY^`Lkg)l#56`VC{7z)Dou(`n7al zXypfJen%I0JGcsK7cQefrG%!YF^aAbc6`u9f{-?KsYtNMhFmqmWQie|N`tXTUBPL~ z$f%p0Lc%RiR+S^+meYDV&5jb)e7E+EH<|ltYzzHgAL!zFk=4`-?ew=ts8w|N*|>WH zsOu@GTGd55Do~qUDOM%MReJ>jl&D`jVga>PQBJHSK{D};M#~Q#T3RZ(M3X`jHwmI9 zIX_7|M*FiQ@k9aS7U^I>cXKNbancA{Z6;~x^J^DjX%wnjY4VCCZU>wtRGoN59^~g zZ(wyDd}|<9M>PcFF0Nr{Oqh`rySf{1hn7XsT z#-%3Sa{H;O#m%Wwk?=?YzgSqFOS!2rp9+;3=}A9{EoNG%&B}zshAfroU<;b5RFV$9 zDU%r>e8Mic$M}E@NZ**{cDhPxn@S}~KBUnroHr>uMX>2{nffkM#U#SmtQ*wMkv;$ApBk*(sWjt zGh&^!9O)Q+{G){F$dD(T6s*Qv1NrS_cTwK{m){6+X+LZ5e>;1ljYE$(`22|)`N}aD(dT@8KDrquv4zjKLjs8lP^*vQ)fuo-0Q&+=5N+ew*D-yy| zg<=tloRMtu%QxrfYtaP+&>s@=9D}9%DtZ%L`Ex*kE}=smhRvuCz%2$f;d{%ibkgMI zq0=g^ew$iW2uz<%ZV~5a6e3mRHZvfj0z0c~!lzX#lD6Zf9UNgB+wdZ*xE=G>;Q8$< zPmZ>p!79T#BbY2?ew>!eEVH{8yM4s(O@DcG-6v7ewK@o=oC=9%rcjDF(aw-6M3p0) zkWx3lrS}eL;O^Earb=Y_iLjw)#D#!(n}RdTC%dL=!l83TH9U|_0&%Y_Q7JzZYXkL2 z3gc(dO{*L=vK=y_r=>+aNfE^g6C6kxB?v&WQU?G4v&fH92%PgQmGfjp$pG+J;(J@G zbtu#c3Iue#Ekj(>DJlXDL}`^ktEuwp7FQh?yP3(S#mS6T*TJuAE9zhp3Dx%W358a8|8MsN-5DjcHN^l!J&LLyzw|bK+R-3j9NAr{q$j zeG$u;k$a$n({JSAh+5;T&phpI`y3>m1%iw=Mez*47wOvQE+t|cI>FA&bBAkw?nwE6 zJd6rlgAZ7g3-nn%gnkZUlbkVZNGBFCWhq&49NwD|n}TLUU|Pfstn5^!M{8d&id`zU zLScmn3I%0XT9ti(>>D{9$fT!ZN6ZUkUkSn5=2aU-!jxi7Qewz4s+6FW6zvzbcG}&D zaA>HdQ9_id0#mr3W6iSRn*{f6RL4>})s!L%S!Y>L@hYM~1&+%k2I}2%HLRv2sYBxF zc@oQ2rmiVtDqS5^5us~PI8!1NKm;FEM`l}I=ATHlMvPIQ-p1nD21Vb4NkK+8V(8IZ zF;bR`C#6RXy*pT%nd;?Qc#W@8zfgt_Xgt&l9sHdy2C}QJkxC}yva3oVMK*pEnjcd!fjo8+MEEd{KA_>NWp z!-s{OZf(u8h`X#-Z7~$IRWAPkRSEMvzVetUG}X^EiCWO1g;vh}0RI5B=@ne*A$PRYMP}@e8a3!!hix3bC)jj#nmx6Hfx$SW{*>bWR9&~ z?o{{}ZM>j?z_in@)_SZNOLvHUIoS3zW0JcbBn#N~G<9HAoLOpQTbNdu)v*u?}lp>Slvo2eD`k-Ra%ZlqV%M)1MBGhi;x#HFVZ z>2?;{F@4_GTGqv;-M@%_-qer$nCBs^#xUpp=uh`Ca1oSYKJz<=~M`0PID} z)~Up}WI`=+k_Pe_4iOa(nj&7hqOMW=N#rl7mr-;lbjp{MBHE#ED}{!nY}4l;ypb6-po}M3I_d z(S#;G!jJ=>mq$bD-jPShB>cMXy>cT&T0KHjd{>syf_ZTqgFcJ$Q@!OF=3bu`S1Id? zm41wcyzlEEf&msF>?1dJ1ZLXlqkeEt&CkIj<~nEO_MCEK)jFCqu0z?H47vZ?@ebmbs`f~Gr2@{LXGY*$eNjwM4 z)O!AgB@gAtawSwv6LP+c>AntF(q+hLv z6}ZH4eyPpQ8L8$6t00lJ2PF;R*2pap4s(~(!9{gVF~eGs(2A7bsa^Og*~YF-hozF4 zO0xhR%ETP;po>{H;thy9#}@)1_)wlfkn{%lK+|ZlsZF`3J%-sn0$X#MXA&S4Q%+)6 zLXJ4QQhIqaV^eE?F*50;A{v;0D~{l$X*(o{`p7*qH^! zY4ugKIb%UnYH)hD^f4+{ny8591LBTI@hdT5ZzNB2|w20UZUQT74EZNQlKe zia3;F$t0d79s}sRDNxoYV#BVkv*Hd~xlv$p($w=)6wxdL;9Nl@{j`yO(BchDR_k>2 z+olPTKbpxt(|!!})2yCwNIfB@u9~bIbk!YPp0JT3NaP<0zGHYS+B<{6RYz0qm1Pa6 zP$-c*(y$ag4Xcd`O_jxq^VG7+DYZ&isj5yvO94DS_qcu9*PrHsbZVl=KHngcew6 z%1*6CIS88-Jf5^wN$Qq8IeFP=9Z3s@MJLHtUA-;qi7V!nRm4SR?9x!6hGyOqAM`ekkCbJt!Dt?GB=zFmSAO zN1{$|waI)M5!FgZ)2JAJ@=~P-%d4ySi}j+*?yM8& zAuh#4OXifQ1Mva)62`_;k#uCf*0)>|u*K<=E_9wN=~M^hNI zvm#y5U_P8@*v1m7*%BOaR1K99j>LcXxNY(qZt{0{WWJmNh9z4x-uQ3A<0EdD;OnxK z?=RTvl_6AXP4@l;_{!YmFP6J*XQ9zYZ7?|n)ap7zS1KSt#UzTrgpU%H&>IvU&vLY> zDzy)q!*oObqp-Q8?=+1o&)cIXli1T~{Z5!EWik7+mk$g1w|F#k(Q2|F0uo6f{V%So z{vspvzY*TcO!}+WDJ1E_)vA6Z@u=E;Zn?uq_<^ zj}zAFO|T+wAKY65x)NaS^*6Zn{8;hrWyz?nE&=v*^8PrUXH>tHp%q={Va9KpVrOV(utB&B2u)Rh)@a# zcrNalk0wr?`E}imAt*&O`lbZmnI&~X$LlDvwR z42k_L3HCo>B(R}Y5)_fzNah#OlG-YE&{Dv+vq@*E5@AMVx#zRK{H6#B4^rBh20 zQo+I2UlMTM`!L@3+mo4I#Zp(1(@RGTsFI^hmZU~T8`aKpNw`)85RP4sNB;oS8UU3< zh*bo;2V9brgTw`FFD6ZXIZdpAZX^%HFe~aPrny!NobO-{VMqn%x}tm|bsBQ+P5gNO z04BWpy6U;*X;M^fFpx)?p;HT7m*r|TxHMc+$59lKg%D5`akte~ues0ub)=S^3e755 z78Y4Om{MAlr!S;?VH|LeLp^d4__5WjQ{LE~2mHw;MlPzBbq)nckr&(yX6$*V)D*ly z5=0JZ3qsL;QI`g`&HXI};I&7l8ng|9vN;oQUy}XlS4;09EAkbM-~8Gl{uGO)8}{dN1^$ue)Sng_G-j(swps?$_cYFEoxPBepaDJqXMt!wcOf%1#hR~ zZ2ZvZck?`+cb5f4eL%c)bEtAgPq6u$qczLRFGpx>E2 z0ry>tp`<}t4i(bW1JTt<{{Z6dNa`WZNM?+5sx_o~NDQ-DFjJ=ZQbR5*>zdK?2{~Nf zq4&%VlElRF58iDxI;N2{Xava^>tX|LB#s-#Pr|}VRfRyLgP1uGc!D}Z`YJALtYCei z{Mb6CiKW*}YG$Q@Q=4LwEWq`M2FJ>^-NHo8Wb!ddXS1Wo1U;eXMcpkbp*QM8b~Yp} zK##H>j-A}P80}?L;2;|-niYP6dBNzBlaLfL>}5qO*r)E!oeBUksbqAiS7_9Q|wKnCzd!Z=T@uwh3{r#v(2fO6-=H}XmLe_y`JftNvJR3 zrb>HMzvS?-z253rP--&ly5dFm3%ghfoFKnm``x28jqU|mxy9lQBxGsnxe$366$cdi z=015pkZ0-n0(uf&b}Umycw@!>rG75u*eydlqjEKQw)K_k19B1r)VjE)&ojCgMvP>& z`s%8QKU|MvI;OIkl^E(JLuBd}cMn}K3_y1q=i6}d5LJC~;~!7S1C z;vRy+o4C_w)YZnWjA$u-l#uA=%Y#e6@$+{>S&Z{yj#2kkKNdWiKBEFu7fCR&&zeZ| zhwbd>-~Ei-D_r^IL{cNfg{OK{KL#X;%)h0}PZXn{8Wp`D{tOxWzobv&ql@f}B>Edu zvj-FB#uXgje6sl;wV%0~Qj%~PqUh&hvjVHQYFIr*b-5&t-onlI7Sqg#q(SDZH!i-Z zHocEdPfn+}`rJnGOHVJ_DL#V4n?vsfKaPcK=_s;?nY*B~6q>}_o{|f(QyQ8kOI#ZjnPK`IAmBH`mNQ>fDANUE2vqgOOea*_kUNo z{2VAXDvId-qzk8-GGm?v+$|CfxPByjD(GY30*@xFwOM5I>!XsXIFTu*sYY;Arz{HS zkPMd@87U`&o;8e$OOg_$`lZh!vOv@t$IT{KWkG_joZ+4#a_ACA@kG6;S7B}WijEn+ zsnF!9k~*jlQ&Sc2uH4yBcEu>-m2~X~FvyCg3UeAOq4`BCEJvLQJc~}w)^Zm@LhB^S zAhp%^ezv_SkbN4MjwmMBQhpQCg=|uLUdmShP0$iY1t&i^ch_8GTCiLOk~)ZDNUTEB zB1bt<4ze;wK^gi(DKSMNNkoWMw%aRj2Qk{$QP&beRkvn#W7O$0N@JQCgi|2j78{8L zvqYSSf!YhJFy_H3rHtP4TKuE%cl1!4II-`uk5I4$)-S{YYz?!w8#H{}6p~u5nq;T` zw4bX_{{YEkrD!|TZdPr6Ks)AfW`T@xPQz|X<=AZ59I2T%MfpJ(N_hx79YVT!&=pP9 z$T%nvuVS4`4MKskig^W@z^=<)AS@1TP_M3>#3zj_8)7`wo>bpx+|wmJU^fy-50u5! zbc&5q0~CvxD{3Z5J#{*PxLikp9&wW{knYWOM;b>Gu8_MmEpu4m?0FO`)%3$08n_Y- zh_8<26M@E1k@VNV-M3hV0+aO+ePocK=?wv&RjcZ{JpE+oU3D6+RU~L1F-if> z=twG-FgBEnqz#O^eP)`vnG-=GW=h93NGVcJXj5`}g+_6V*m3AIP)@1z>rt5$S2Q-g zJc{^&0Ol*nHaFPB2r@a8w#%u)oihQ>rMKQhiC8~m*+H>O2=PZC>}Y7Ksj8wiuEi3n z6%C32D1D@yh-~ZO`_8eYdoI8MtHPvpb<}a!KyTJ{NfMqz^8O>(a4R24NMAM@$c3Sh^hiI3uhl#rif2Tq3yg&% z>>!I4GSekz6#(F+s2jM7sFb3Tt0-_TZWb*-I2?8! zo)SC^RZlD{vYKdUis+}OIH6V{@R!J{vBMz3Spj`wLB2(*T6ovO#g+>)b_)%1q@}4{ zbuqe4!Rjr%;{APnY5mzDLblrecj2p~E1g$8FHWJE^ib#ZBOi&i3FoxL zY@o=G#2zarQ$1&PQPt^?{ou@<0b=$*C8q^K&eP{$ ztZPrH-1z3j$$6b8{5wE^8Fg=WyZkk&KW#TCfYK(&C*qQbFu$9p8xYPueV>n^DL$;iy?^ z!Ej}F7eJjv)DEy*^Ku08T_6m%IYs$@-55Y9p@{iM8NcDm8A(Mz+DYA;awS>^IFZOj zy{x=^3g1)vOZ5kfySi01#4uAJ8)fqGekpP~WO4hL;0KZzlYY>LDK$EHmGvrJ;2G^Gbzc{x~|aNb**YPT*bq#+5ob!&k-%H+7IEY*UaVK{{U^u!vY*T95bio%BApz{YFPZ5 zB&Yu4@1IWuzWw9K8OM-W_`lSAQqfK@iiipLaV0AB9NT~%6exL~CZk3wMk;wQuW1!2 z`G~xqR!ID))rdG#_mXH7Bh@Ko(`MP$s`64z{v5+++G@xw{9o#5*66e~ck3w-qqiC@ z3c=|TBMPs+>^U?YVw{;Y-BMa$HbYjSTL~ONQM8HVDLd}X`RPyjEhPs~%#7DoMjGRPa$fV6vAW}dqZ)+4dm zn9tDRBCAlGrV;j~UeX(JkAfvRMex`u)3iF!3ZRcO&M8EZk>v!W5P2+KPNm%aDp^NY zs8357Ij1Xb#I%9zdNqCGtI|d))FNtdKvhIpDlmA-9(>)GQchhCbvlY?K+H;oXPF8? z2WKJVE@iUasM0d1zNQL}=68Gqf{7HIJ|jGb7cIdaWPkSl{{VJ*msef}Z+D1Br&scKLP+N%@ZfqLP?b|ngc5cYCul$)iP*!c)>T}v zs7j5%!*StHNgNL&Z5*nBC#6}a@h6pzS05v*X%SOSxK=joSFDr3kH@9?nM9P6t_eky zlWs*03LD>}U0wLRPVz{SBA5)q@L!O$*9FsUI}1#M3nNZf^(ow5d{W{_W$B9kr|g#{ z$?$81yTs9s)^h7IZ5jBC$D7NYe_eao-C^4ECzI-^@|G-)86}LrOWm=9K$U0sl}fhc zCf?WOM27Fx=%WXAXpUgXBs&fm9b63?OKc;Y8jx-qY5YOBzD%AC7**BNO0J02 z?0I{E>Q+xSbRLVR z@qaOkzrEw8S3D+gJ}Wb6{{S^+PE{1DPbwAoG|(7=AxsisNp+30n4sugRFT4R~rrC|h zXzVYAWLF80Dit49V{ek7be~ILH`tr76rI7=nPNAIB3pWkV9GXexX77k*A96FNq_F+h=I+0%{PI!DXRIN|IBUAoA>Ma_}9PZVOuY zZ_s#p9Kp+D5*n4xphzK>k&|mT4pI^;XpE*4E*W<9Yf7U%n83aH9A?yMA;|6B{>IwK zX=a5;o-{2dq;sw|5Jq5*1Z;S+VuGnO@)Ee@$_Z7rOzt?(U+-sVhk2$HKsm3Y-i z3XWKj%vYs0E){y6LLyr9(!RAI;D$xwM7I7PJHk%_Jq@wwX_uU!K;e9kp}iUnDn``; z=axPIw9fGzVid}l)P;^@(46FgLE?>LfgW+2)23SG@ny-yik%LMnyqzI_~Myq36}^- zj%+&&h&dI3kXG9>6D%>C;lt#Q!)Kw=ixXp#gjj+uEn+xZm8MNTp2WaDT}O<24)h*x znXYQKr>a1WSg|GK{KVJs7Zuja;ZNe#%0XCZH3Z^Kr3oYjp+zf=W8qRrDNt}qwF(kS z@z+y~h7^yPh;onOj+F}$(DAh`KBiR)swI52VlxsWB+Nbu)3IaWCp7RmwrIWQnyeaH zujuPyvSp={C7XK4+7y%03omdt;i#voMmUxvM$3$d>@HngYv5QyvPAK0MR+4Tivqz4 zr-f0e1QL}Fa3B<`cp!rFlij#!WL%nEK%grnQbM34rdj_04ER3Dw<9Cx!BvSN(W#93 z%n-E+;{&aUW2HzK(SNau-K`jD>l4;Qw66S8bN+3{wFz2KwRL!|5T#`ri1l&o3OxqR zRTQXFy2$>N-x_V7ln05+MJ6UhNe_e*y}(t>lb%h>u8tA~SP>JFWWUo<N_mnmuC zl{kg7+~i=idi5<#@>0qb1x#QosaDw#b}l2aY0Eoi4nzlai;hJQ4q=C_cXc|HV*-^0 zOkDa}xW(kBlx$YfbnsGmu{5nlrD~FkQjEo@L5r;@PtmCf**rPySFSx$#x;=fERh?7 zlXO)nl{e$WG36OpG-6a{<7T0NAmxJ>91XCSNRR`V1ko_!mf>*U19NkV>sZlpmQ5ihWt@&!p z_3GaJU0RP+za+mSsY(4px0b4x(w!RfX3(I^JW2T01I~opjqhVYkF2kQ;Xx{S$Gq0B zV?8v{peL(O41)?uDrsc6@Us_kpyZgr%)Xjuoi6Zrt0b2q%BO&5kLqdXP}$ z3eC%^y81|kq5(XwFHzKM6py1NvukkdZVGh3F{!NTFhXCL9puzvJyeO7PSV~ctEm{Q zL!HcC*O|E{w*wY!P|QvYg9Zc@D$6fImYRqTdV@C=uuoYc?!>C5oH|IgeJOesS=P=Ys;n(@z?9rv0}3T}dr6BO+P}mV>vO9Em@! z-Z=R`=1pISdub_|(VxUh9G)yoZ9Mrbxx=PqMNEYvHb z&cPEJ9cUzF4xokY*qWI0%gagG(#XZmJq#Z3qD4@Nqa6BqVwpDElB}=QEh&REB&wP? zRG3B8ex9H6n+8fcx|LpeNKe9(lOibs%2)9V`dd^;87CJb(lWU+)I@aA#E=p;K0Mwo zOOXS{l_d7t)4`@ir0hP0IwI_ht9ZTPn$RG0%Aa9+vMVQn-g&@XJY6MAJBCdc0yOh; zL($TfI^)Qfi%llL^z@lNqP(BQrc|E>z)&M=yi9b-!tP;Zni4dw$$4@v>MZFO%{-4a zOU;OJ)VxpF(Q|YyB_I22XL^=SZ^ac8ea6q+DLqXk99Fg(SV0{J2>x!C1y%5hLCWpa z=IRV|oS789Alk{B46IeO@;)UbelH_aRkXUZ#eG^oNpst z`0Ah?i5EV{RUf1xYg6}tC#pn*SS1NcwhV?`o|l{6O(<*ng)E4zf&&aoLSc{*jHrT0 zUcr(3zM;0i`zM6_=hZ2R2vsy2EWu6?tZ94RYND_ptDnCk5Y%BY!1Z1v) zEy(RxxtpWO=9Gmd_<>0r4oA?|r|C5#NvI-1sKS&oU4RoJ+g6)+$06YG>nK$P$T*R~ zV=zbH?ln_+6zMbut*(f6py^v38IU%+f`B{&k#Q{KcJmV~L3{&3=|+1!45PKuny}QG z0g{O-79f5;mjuy-cvNHZNl&n|hz>Rv4BCgik*Cyknltn@8o2GV>J^XHyU?famwDl~ zDu^n8;VfV)(v}&gSyoCG=;(1Y@{8L)Gd70fWgnQ;k^cZ`(!z%m1hzhf2K4@8Xp$z= zCaRd_+>%dCno%)MQ%1Oy zHu}lzA@zYk73MqIf&&y?`obIaNiZX zZq@-0Ow|cbJrOVbHQN1(4Jp;(>kKLjf^o7rMiQ=69g5|%NQ$F`nQw5dphaXDDlVaW zr9jzHtAdcNpoX^;u2o*6QoD%xo_^L24N9U+i8tSyojl(wGAVTh*pd@ZQ)?oia0>Nf zlzP`TJRZ$FXhpD$obt*uA<-+PN7Ob4Ba1O^-J$lG-H+WmP$XL9ZgsZAUyxa=VbelYBr6eK%9S5bM}8^;oo-O>O<6bsq5)uwrCzFs zNNiG7e5`p!Od5?gNL0D$5-}*^TqQ(BH(>BD?+v_I#T#rR6i76Ox+kp*jc^$#2G^y!OlLcDi zk8e>Z>rgMj3nVQda2FxnMEZmmAX83}azaX@m+a!YN#rCs%_OS03KnVkWh;*x<-xoI z2BYsC9eGkwjT1z0PfsWank90lh(4;pDLpKt)W)s{(A7SRO;aKzRnamA6i$q!1K}su zPfAsjojoq03b4r>6Q_c)bxS+>r9OtPB5CR*J1l(wg z>SRVlvvAm47rT>!+zNjjI+sVc&3tSY^?*tAIiUsD9kP^wDd_CLFWUtCop${#g9*{ zq&(S-$l!&6i6fC*ds}KIlAR$zOIkSC+=0_;=J{)pUKL-`<&6uK395-vNgPt4St$oI zZdBw(_Nr=slZ>D$MJD{*YQ&_D3BMaO*U{I>5{wp(aqi|E55jm5c!C&oec-96jKe%) zI+&DWl>}`mw>cdtJPS<>(^Wv}V^+fjY5-b!$;}cBl!M5|Ra}JLz&aJmPl?G(^R^fVuV=Cfsl-G zks~~Y)p9r}om_GOjyp;yM~E^ikCV#1=N7u(+gP>AIRKBLp1yw#`x`LdV^6QeKu-AGn$4Dwp+@ zy@^n)+p3rKmAtVk7n|mJ_?_zp?{=wU{ob=W6Cx(s2-~vUmgxi=^tP=^)1(8%60e0v zXxYs>xHf61N;LQ_moR%DRv|ONfbR#Oz2n$&R{Pf`sYJBvAm5r_lhl8-uX%dfHl)0D zG!y&O-wD4ofyF&H^Dy<8BTU4BwW@~QC=3Pd=yNIre8`1sKsXk@2>Z_o3JEXNXpzMp zmVkY8jm$gIT~FDOqfcEFpcJScMr$tYsQ;Z)YiVOK#)$*f7tYB0#uZ$M!^$AH8bW*A}tpg3zEl? zwk1X_Yk8b&^;An1MwtDm*bK zv;*yYL}|+eXL>X|(ND_*SrhIGq0g5Xl;CNd7SW6a3nhr)sb!)Vhqa582K! zjJgfRZeKZHNDwNJkQJ6NawnH8o3LD(=w<@wsR3OEF^9C3II4N5$TczjFWSa&Cfy`k zCh;<4KvGWMGo?w1B`VD)A{r!|Q&Y_tk|_exb`aB9O$A(PiddtS3fh&V5Pd8OQ6`sB z*F=A(TCAk}B=j?pD3Jj`?O++{rmxY>>6B%Mj520>IvJ2xqNX6U?bxK9sV?Ou=gBY1 zR&*jyYXCk{7MmB3N)De}B{!<0bUk&jHE0P+XGe(+MmdkDLMMRR#GJ#Nb9J!=4%gd5 zOa#$O^Hickv}9VVpc2zcmF%-s5&1n%u9)~G`jN7h0@6X^-GBp3oXUymq*jF7?d!!gt2Mm-mw-!pW4oU^m*>rwvz?z537K=FB^3;{;5k^mfB7G(LS>jnX0YOLA zAluCR*L$K6Rngoh8lzg2Z_c&$5z)}mQ4>u85jyV0Cda9zNzY%BJ+Br8TG_oVC1(_* z{nj#{X|RR1h2+CR+mgJRM2u3oit<&F-18MKY}r?6RDS99Jlt8fhYSvY^|Qo)JWD|R zB>?^Qp_G9?3KE~u06zz#$Jgq#F?@9}r}e@7AGoVOT3r2Q`&+*PtDyv(nKYgzB2G-I zO`*6xOxW#BL#n4wmVJ*WSG15)M}Hkc)%qzT@p?LvjyO@#89tKowOK7S0y!mLq}h{x zmIy>uL{s8C;~7+I-ji=*>8fyba)Xf@>=o0&k79|cfY_so+%JWqntzz1nLA1(T73k( zZp{!`o2scQ1a)--ut<@Y>89ZBaYC@IRLbyXxX%>wk0h|uP_a*Kmjy&? zBomaJcS*WMj$Lac1x42*|4JdFmmI4DL?LG?~fIW3B`p z*G9aQ<0UX6=r&;rpiPgYHlm?P9Mgz3BsC_F(Ek85OTU7<2U5`LsA z1mkBGF2RiO3`e6M)1#?q^7^{WwvDlg8eT>6;#dw(=BzNr<_H%@?=?26gVo|f(rqM5%2E0vYJszSsk#LE%?+f2Yw-g2O1e^8EeI@;`q;Sr$4u zQzT^C;Y5%}s-cld9RvcznRQd1a*5vN{D!8y+U+1&3Az6OJDw+aJI|_VC|z|NlU7rw zojnTq>Ilmc>D6pONf#kG?{XA!lflh8RMPDM%PPX?d*F>$jyk$oamOIIvej^Sr9$}; z#5t>6s618aSMy-gcP?!bu4(l6%IRlOlA?_pP=9L{b|%-lIk(FSrCBK|O~DlQlZCRRoGk0#6mgDzZ;3WLV{EHx$C*F;f)g zvv$ldBgKo-;ZHJZg@G`yVv#Q~X*~ohZ)Mv*cT9XsE!bBRQ}XgexRnxD8oWtZ3I~va z@Mo^nJHJtdYMN~>Sps5NIL!i;vNdIh#I6b_nrt|~NHn#tVe`92h>{j17fO;+LChQU zjx4ZT%a7(I=eDMrQf{VgE`XW^VWy~%LBgI(NQk*3D$OJAhDXXqjLL`T*_%hd$ORD_ zXP+a4>on=b8gVT8jOk>80g`UkitRmlECeZzBHeR3mtTftE3AfSWFbZvaP ztu}SzR13vxV!$tL_q#G=X%Z_Ey0UsoK|XNOr0FTxQ~D|XPFfmbB#%#!SQ3uU_Ppw- zql#o9M1)A9nB+bZq3WK3cyzM050#A_WPJ3e#E^NwbfjB8QIlO(o2Tmfe^(V+eyxI6 zfZ~l@7M^j;b~Mp(k)Pf^?9^B36)hy`K-|17t8qEC%coSL;ZXuQP%U>Z1tw?H6w|^H8mxrn zfyS(+Xo^n}o0Vl1R7y#P3A5TTHLixd$9|@4T^(D`+n<4Z;<98@RH}xo(#Wd;oFwhT z&`%pZ8hXSnRwnm(`rRt(DUFg*G)EXa6_@$V!$+$XQKCy&k!22oHoWH-=4(z!3a+X# zGPB~wmUI)fyHL~S6DwxlNMXrMFjcUZlTX0vRU>X%Jr0$u!!l7$v(?CiT{rdFORBr` z646!tI>?fG=si!34@#1*XIsGotY2|;F7F>x*XJqaRuXhHCV3$@bzS?bf^G75TdcH=iG=*S)~ ze@pI6^iYV^TM~}Lg&BF)?u8j@tY_5Mi|uz4HEc9=q8K9=wV2VzB~Ly_yBBlmioT@e z_9c{_?oh%9)Fta0M=a}@bXt5Q<}aHfN+^G1_5~F@LeZceQXL3?yB>#L-;oNbg1s)L zTbRU~i)Al&z~@a4K2PTN(H2>t2P{2|_WXn0W@I{WfB_9Gae>fVdaBM5QhmHk!Yt!q z@uT#T-o@Y^>j7GIgR&Yf$s|wnaVXTm&Xxjy?LYp<@p>3~3Qpn=vGEpW!1r05C0%3* z`r35L9~29XVLxW~A*wA>j;MsDMMWXM(wz#5LH1SwJH^Kr*e~HepVvotI@|e$yG*9cjrq@%ZqB! z(|wK9^Tk&mTN=XBb`j9vp%PN+)LiNsvr!oT0L#>zWDbN$v~k)E+iA6E9Wlq_ECI%?TzXB2fxC-yS%Um@jY zl1nZIbq!jm>X$`Pa4t+rJ2xLTZ1pDWO=C~_=sBF(kZ~b=Za6q-b;_Qc!#BTyMkzXZ zyAJM#fF@8>l7cRiv@oe9RUJvDB;fUK^b{|4Eqoa1)>D*-vd~7{ngRIGv6BZ`rum8) z(4P>EOG)Aif17j@^|7Kv>{3DP;@E7Vyf6W@dc#|#!77L=0LL%*k(>&?@-EtQd0HtH z8m_CSlg%Jr_RTT79Xv8o|VqLZrN-*cSR<`!_t0EeE!DFRWW#LCyH^!i_dkA+NnBt9m6&e+O zn};owfc0BbR%x}kF$BuGS&hGkP@5+EmNLFfrLlX!I?W=c+fmiZISg_oSM&zqH&}$L z;sE+s08%HZ?-VMwRWwq@c~uJ1q1pi~=;eYV^+>VldHSO8R%ugw6)QpAEM+Iy+^Utu z&Pq_T(5;T6ku(;mC};4=YwXV6WfnZNYL4bP)|>hd;KHb_t&|h=A_FxjM4X28-#3w2 zsMLihqN-GmLI<-aYXm+W&6>)IqDs@(4ZaOjNZN@7m~pP+?TL`Igu*A32qeD*DdveT%BYw6X?&l=Fhbi{ z9C${zbjMrXEjVxzmHvuJe!XhBR`czCPq=_9yfG~Q0166)_TjVjN;hvpCo9O1;K`)g z7^!5@n?si`)$4yikS<=A>iwyUgI011GM{_2n7 z@-IWN{{TfO<&)aoI{C1FiiSMg$d*1S0)I(Ol?VGef4oZorO2gw&n| zYV!*?;=M^%!wCIle^?{6wif zeU7ghlakVR7WWdZbIjIgvB;o$vl$sEVYAJw?(F4Pi#|u~Rz|{``PlPPQ%&NJv=X0V zH;vlIil@vw;|qdIWh7kRNbr%#lN~ypMI^#4K^Dc} zsY-r|t?`Yqo%wRTPH5ocAdWOh3(23)u_|N zoKlHn97>{+!+8lHZ*79IlBF2nmN`dCR0}K|kc&*}4dJPk2|1K32~s?qS)_7Z$#Ja! zky4xT#EUzYWv1W;GK`BnSlUCgI&Dg(kX~?^BG_ky5!L?yi5_O(cw9L}C(Qu3))bX` z*YkfjQPapy{u@E;E-TT`SSP2P(#gjNyle8K@b-q6EMd@7M{K}R>9dl`B2NG}*@~g8 zMIF;oMIj#)Dvtb@OorZu616ELUr|<-Q_fE{H@s`*LdHjNvbh_03tTp$sR=g6b{deC z_*J!}($p%vOvp;@(wzM}eC>!#dU8U6K8EzuAqW;5 z98*nA0x*uW{{ZsK{{ZIEQ`d{CmZfp1ZA8S$6r6{MCxV8?(gErM_6<|%@Kv9!8N@si z1BGbvNZGjDF{!nP#G)GX8cDrUu5>d00EZ`su~sii{Vfa{8cM2v8se%0VOP_T@32xy z<>tQmsGtB(6wvU*5!R!VogMAk844+I3KuL%Tvue`mKl?HmYD}i2vGAo$%`hLr_Q`; zhYH2{@h(VJpDapClBsIr&I+QdUz<>r%9F{7AqyX*TUY>&q8WYS621Qb&u^j7qo$0U z9cnIblF3c_aF!}6wI~)7Jt|g%{91M6>@h0}fjoq{{_>=eSOn=x{s}&s@8qeDAU#M* z1;o-Pv=i)N)Jv!#B-2kEr{I|&1IQMf!QLE~ic&0ZeU>giD771wh9VtgPbsHZH1m%X zOSR7`2o_l74W>n_i|UOgmazd92*qy+2FpSCkg?85{pFGajcw>*X?}^>SDR5EEtFW;7I(E3{vgpDe41IFPuOeibC7dQyJ|D*MM3 zDFVI&OLK%wF1QqVRtGcf*ft{0pHn7Zi(Hx<#g;Vyv-I6As%vd?Bve_MKAQzPlk-Yk zXjS>?NK#X7D(D=olo}?7MuNB_D#USIoJc364h4}Sl`cxpkmfw>NHj{ySus#b1Xm?g zvnflDm#ZX`9r^}a8MJo}iWmxVi9UAJ(4}JdZT@$@rA*0B)TNaNqVbgP3JcHB+^OY7 zu)v;NQh6a zE7NU-yCQDP`pp)SWhg2HbFGNs7_!-&o-(_ML@E~|B2WrZQR<{91f8UUPvZSD)=6@W zE`7M@i-k!nGX|Hr+HkjCI_>CYNvC&nPmDro>7WYej{yBJhpZ4f81>PmX(>`tpm-eG z_9^%e=4yDJZ(r`^B%rFD0SYBCD>`CBU>5 zQy&RWsmOU5v=r$iNeqM)aY?Z4>1*1Dfc@%^QM5hS(P;1!MHGrn?5P1#vuBuN2k28n z7F$#bP&)w^c94iVxoTA+?|hP#$fItAp(gg@avarMyK^-9tUM3X_G318wx9JQeNXHr zNTeL(bqA$4Djb}S30&wlTnSQtyD1)Hg{np`wvrQkn&xmBfV` zcBOzkCL_z!#jscl+swdDiKT!@hDgs$b4wqi3i9lAB^`7*(^t)Y5!8gvm2wqXGH4g?}d#4J3*8Si4X2VCpbQE#;+`shfh8&O!2%eVcRO^J#nS9CiNyyLC8! zch!z+Ld~I`*5^YZvR8`x2FEg$inj*R^x7p~n6w_0El56;o3u$sR|&;WsL5arqNqrf zC!k~Tfx{~{$EZarZV0j1ZU{bxnu?6}k4u+P4Mnj(yr!qZH3$_DKgN&hQ>V*Ui8r^x zL%y~;gmsar3qjbVC={K<66n-|FrAjQj>7V*bm=JMRc4Sx5?>V5+4U7R+AW9()<#KE zMX$BXk}G30RzNQ{s)AZMl~<>LFJm+t6DQtC*cG@pP-?L1aTTKy7KLD%726z3_*M8G zE!dolRSjw@U`(mpM-X}od9itQu}P%Ls`_N9Q7Ydz5INg~i5A+N?$3r~O&Vm4SdgF; zlhO|sfQ4-`YT;CB-jl^Aq&R8SN>DVjn@fJ!;jGzeNV3IMuM zxX+6kgcxP1gVjuzkI60PV*mmx((2Q|1*54!ks12w+C1!#NEy8T@GR5Mrqm3u)y9P$ zMgIV~p0)v%tp5NbFs8*GG z0cBIo*VDxr?pf*CKvWT7`ABGOkr}czD#p)_U4%h=lKX%O;u{5vjB(jFnli zEv6cis$Hpg-_!F%+h-)!PR8NsZnhjNCLhsA z{7V-_IJKQoUv{7U4>Fl5^(hnN-t3Ul>j>9VirWmYQ<5|^`>lj+HY0kpDmg?kyGrhPIstc!JNQQ_gw zKSKh*P~n!wRj`y z7^y0gj*_NOn+sV%US(`?#af$yxMjCWOlP!$L4F5H@7n1IGt$e1SQ8qr`lb3x4*vl6 zda^JTQ}XKQnFT(3ShTxH^fAP`MUYaW3^ZhLcu}vwbSY}l}Wuc@yZXu zB7dFRRHUC80rG|Ytgx-gVQ624K<*&>9W7TDS39!*0E0@Y>DGPNN$Ei;SD3gMq({4` zln#Y~AoGSRp;-C~;?=a9swvK)PCthsJ#q(1NJ+)G=E>7_x=Qj;m34Bb#T=Z)j>Q4# zW)#8;T<~f0NEa+It!V@W5zevu!#_LeWR6KI0H-i`jKi8!((|e``U&%kuBBK93gD4R z+tQLs2;P#zman`TNlKOzsZ-~TQG~}jQ^T8Z6nR5HZ{kRHDlqJMT6VudsiN^qg?kkY zg9NBO1t&h%W$)`VKv^EDg&Xqi;=!z;hN9|5DrszR%@5OBPi2CKNI4e#3O>7|9KRT| zTbV#{NvUbMRCTq`UdfpQ6!E8T)kaqXij(MP(O1w_$JSF$iH93)2cUzPJclw*w2w)-8YC~pCIN_~9M9_7K`{QRpPer*L(Wb&k zDkl5cNT!Z7*arEy?q$ezJun$4)T;OrGo_f?Wd8tzE-rVHze^`i*XZkVUMfjA!%z%s z9H1*`=q$TLjkXRmRMTOxjVtoPR6OCKTw<%KA^<964aWZfgP~tbB7!5HHLIW~U(t4y z)v8O3D7NAJn(FCj>tew?2wx}|R5?>dXDgHU}&c|b7OD+J@SI!bew z&BZ!+b@3E0^rARV=(+W}k~}ce%0A-Mf_qe(7eF~l4icuBX~Q%~l|Ks76hF@9Io4s1 z`&<lRb!l*SdEmKj} zN|jpbr;wFGuO|7R?aW=xMM~*$`DHE0#OeLvMR}G56-8ACzlC?jy(aipIZ{g}6pP}0 zC;7Eh_iC2Es#=#NJw!wpsv$O7e&W6~@+$0+4xvVXWQrQ=9Lg^Ygb%x#Zg_^ksNs=Y zLEhMCktzxn611x#l?#J@M#NTAU>Nbi{w!)my}gZzsH7S7i6dqBFbvqVD(V4x8j&If z%6gR&l#VHleC3WzWbb9dnG`}NiBYQdD!5!@?I!-lM0HfMg&1XyP?Ke0P`Z4fwL6Pk z`h;=ZHX$PXcsJAu56ok?1yPX$(CQL{&5@?MTF)&~~r}D5*l;W--y@RK0CHvQbKNCM@Go`_T|kcZ(KeQbQ%O z+p^cOZ%QHz04Niz!<@9#wYlx4m#R_aG02rCXWtC8Pg+yVpfR8Cri2gF#C*ZDDIv=c zJX=5yBDI1!kx?Vcl0|=|1T-yLzpq6Mzj`UkeA0l=#gn3@2*{VKmgtrxQ>%()9xo+c z6@8U5sC4KeAb};m6swxuQyg=QG9)N+hPde!2e_4Sr{Fn;icBpiL@C0AgpWDiPG_~I zX;qz#nv^Tha;T0$(73Jo$&h;*XO~cnTt^i_zF`E$t8xVe0(mhraJ3k&s$pqRi7m4l zQmbyh&ceq|lSvYasTVC8j~G;N@%0ytIdID+xb<3PN+FvaB_vV@>cS_v_dltrmVEd-?G6Ebt^WaTFT z;bzTmO4GHW*8u(5qs$w=#x2U^# zl6GgY&BDgI)LkZ0@pQd*zLugbFJ3@`pM2`Ui=VLmOvI_ciR6wjqUSlg1^rViIoQsHH)U4>0!n!g29YTU@ zB2Dax39zV0^aYNL*d~ofl_+0#Dh%kvblF$+k>dG9tJgsNnvaX&dZ=7-p)y9Gq^Mma zY$4Q%M2J!rkff+tNjZWT2S+SZMI6yANgRU`6rF@vf4KA)cX<$~=sLKh#cevhO8IfZ zmZ;{awhyN*ijpsqo}o}lvjKq96w&IHG#*-zZ+?<>qWEHfpI(IHX6! zoVV>BchvG3DdSn%q=J17055ppRZfXr7#BTC);FMH4s;U=jW(80Z`M=5vG$`${8FEi9J{T%0qDcgsr8BPEeI1Rk3|JB-NY$}PDLaU>7e&Pd56Y6@(8zYqv~JrdS&L&f{Aq$oR7~a+mTTq-KGeY zclK$A_fr1=ps{qb{{RT1{{XYc>}1!d3Tcn`GXDV4sf(tYX_R1p)#LUwFl|RYQ%6tt zwJY2a5YNu=KQg4kG}P_6nRfIGz1q16*60#;qMoT=rGP&#`I|aote^@vJxqiTwgb9d z$W<`UKBWs~%IPRl)ssT1f|9HwV9fHCPLh=vWKlP|tT{Hl@L?WhV$zEo+oN!|IPYyh z*yYI5F-RsHtn}!X1SxrFJ2$-=Uv!r&tpgJ!n-d_j847F)X^T=-i~0w6V&k)ATF+Iz zL5>?mT=spyMYMW}IIf`ns$Sj8$nH49-RO_)7b^b%TF+UGnR9YUR&0bU1Rcy)z@ug- zVh&uGaWA1NK7jn%m*QE^BBFYBm%mg>X{J|1YySXAes-ySTsd<|2|yR(qignE%(+EG zWjDEzf3#eS>?XJEX4+b!45SBYqKK?Qzoy`2?+7RFy46?aY&ZLsTlgG(kqj%8>HyvtTz6Ge7~# zDDiGp_O-;KVwym2)-05tx;dfOToQ_jx1@A)6K^?n5)~aJX(v1&p4{32Qmi2pD4mFt z{RNjoF=#O$?J!v1pr`pf?O9kTDiz18P#e@N^l7=YF^-5*KUIqW3Y7|EBo7Jz9e_O9 zRo1OaJV`G?q?DmU;>m`%K#vgZd-Tz=a(bCpceR1l8;aosHeJ)#LK6~T7O@V#T_b?E zE`HWV5ML2)4tTf&q5y-51yQTwLX?B%EV_MfU85kwsLx^Q$!QblQ%#+OpYj;&2An<&8*@30f86N`NUl!;m9SR*9n$MIt3Z%qa>A z8S;kPsZ0d-TAq?YE}J~m1SpdldGIKFCN*Vk%&xChgmV>1P(a8p6h!XBaeeN9COFR) z#Kw!}f6t-hRCku6O-fU$P_CIb_wf-dJcV{Rk`E|kMXE(p5@v#AO&YTbLAWkJ&|gu6 zrAbP`+7tl?u;kQ=T0wrUG{=3oZjtC2d6?p+S$sJ*#wrVYmw;`4BMmYW?&>Fxj!Fm2 ztF%=ntvu>#$!-DW%o8MPu;c+lu(E7~i>RBi=b15{Xf|!Tp@E}Vluh!ri0WzyJy{;+ zpmfBe#@kSH425D=eJonFMO=dn(Ul_ml#}rxTT-FGhNvoQbj&f;CThBC*%e4d7h4POlrE+qQNc_)%Bw3 zH%|gH2K>7i>q${y6Edns6q*e1OY<6Bzb|qOwCWe9o2QyQ3O}?+zfInaJw1iXDF^^C zU@)p=Wm8V5ZR-!I8Qiq&+@LqIVKnL(g3?DE^Ym@=Hk)6i@{)!4k`E~tXjdeJAwv>u zM@Y@tP#a4IvX+fmi6o|wXO1Odzm${BP`=P!Rc60grC}BKnw+R56>-|>zx`nY!~P)4 zq%d|jDl1}+_BpU-)#N7%Rznx(6FLIZWeEkr2H=jHhbU=;rsP@ABiae5X|w8b$5Bl= zqpXaaH3%6gIej@24>=~;zNOTwB$uevkg7_MEsTp&&1|tpA+QIK+Wa2HH7G{F$q>f4 z$|Ooule7>Eq{6_IkRn8dr9woJa8hvtxPn76DrAGRW^N}=p#z%=V?8mdNdExn){*98 zE{3w4;-%DQMt3pH1w?0)o`G$ zX42MBS0@!UL{oVy1zoA}4$#Ymhg4J1)LX2EH5=a+5EbbbEhdp@_Z3qk?yU8x^eaC> z4Y&_0tS6;_Aypc*HoY_zDZXYI6o~-oDPJ`11<5+y40%*_w1;o?5h(+)LlE7Ya!`6m zY$)pi3jsjw3IK!Ha$pOOQ`4ok3d&^z(K0`qV44-*42V|c1MF*E9UPwWng#M0(tV++ zT0WAj%@hFipeaA;A-w<~mlCE7`YjnMFe;*nWC4zvb)weU%_O9bk!;wC9p%%GYL8c^ z&n%#=M`yc}Eybn=vepyDtxCS#k1m&9rK%+dtcG#6o^+U|PuN$B?A@_-{2aM(Qja6* zj1_6><%u$quGm&NLnUY++-5m5cvaC;Y7iKzB20tTCgDgt6om}IALKm5 zcaK!=%|vwCjFMF8>gK~V*z)F9e;7r{tZv~07{I<_%m&SB1^twg6QyysDu2v`$L)~7{R?#)c1DzKbTN|A;N z)D5bLor@m24q&|nRY|c*Q5_d~dbXPusYq$%B@0Olf@JvMlx$iL4E#fg+Qray*{7Oa zFH!CClV!&+guSOC;Sg{#ga;EzT zJXpFsl^SgS0J*Cr>r|8~Y8lSi>C$XcqH`;VGPtX z5qp@436(i*x=)*9E(XbQktmT0M1Uy@NlKD-DI{2f$VNrNDuhoQa-iK3Au=HJr9$BP zA1vg-0C#?^T76w+g)zU&TB3;B#L>)2N)Iebx_3QdyT3cC&S`topj2$@(N#dR@dA+| zFVkfVp4V7gxG{R|Ak;LPs&Eu#DC;33BAH`2kfF*IvByl*4x^^hRh42?u>&V0gC&jO zK;6A6%slNBbw9kRfyzBbrDz=ppA4XG=Mcd&LP+DSrIwyh6qTf*Ly)|VAwT7o`) zl3pr~q6>8RRy`IhsSVIrbt(y;Pgg}z2a^;_AAo2O)GF*T&DATv!=(QJO4%UT*@m@S zQ>^8Vm;Ebdg&>SiGC2vYF0Pec)H@L549I=Ev-%rT)pgoapPlcyHH8qcXlv-(Q|@O~%r zLS-*qrAMloWR#C6lb#a#7^=d)};G6y~>35=P zb~80T?OVgt(?$wW%1L=+z^AWDj5O#UcK(;FjA4?!Zr3!pdflywa`I}2Cu7j1KS3O? zJv$lDD$qx{p|F5l!rD@&Fs6YOR=bOIX3&*c&Z${61!PAdW`OX06S}dc9`EVNPZ+Ts zSmNcHzi2EBPR@RlP6@4-`^>Ilpt+M9;zVJ(Atgr#< z#}zzL{?eip9oVk*AUB0c=mq3guj|mp1@BUm`XHC9OskALF=WIifExJ%6^E6qEGjZN z88!Mvde&tT6(?xJbLI?83yYetQh4GPeH8eDF_XJss>xP-HoK`#1(UU`ECaQ>k3fVc zCuBN~XzjE+B^(P*;l)xg9BH~C% ziFZ45PUo?ZbFlo~2DzxksH%*}C5#&bAjGgPM~D>PXB)laBCvwTfyX27XMJ(KtV0{F zUrX+_Wm{aVj3Ca|xv!I?i`&+|=@EN&fb}Ws>1v`9Q_CEw z=}8CDFPEXmCEO?l0xWNx$1UDAuif1_2~|}%mX1;DQ!lCql%~WVLjc>7Oo?hBDf!;( zcZX7~s+x+WNSP7HtTHJdSB}*J0ol;^)S?&&v_T#3VBH;t#xkT=OOhpBhHWM?QfV}0 zMo7BKNj^Gtb$p!U*r8paimCN|Ki?1D5dP925m1rDn5+`34}^@_oof1AOpEDmFOVvR zrk=Gp;gKsBu>)#I$bvW##hVZhCs3e*Xw^DO>E};5Gr+E{Tk?^TzR)i-N$-BKB}dCh zU|LfkP9qQtIomRl1^XQV>j|)FT;Lltnz>g_7my@yzw>G97IP)0fF#rq)lolOgB*xN zh&xiWg%3duDq6Z-B}AwKs^ znQ2vqO@)M2q;129yy~ctI#(Q#f`yAI1Y$~uLB9rk>rp#c(j45fk(nJ8PK=TA>0Naw z&!{T;@p3|?8!Qi13bE)fXHQ)PTzH_8fU6UY zBLecz#G4i%xilaE<{nLal{!#Q%T-i>R+QmfonmaDPpl5k8NSmsddjJ0j#7dUq5;EY z8yjYNTM?>UB)JZP)KdE7hFC;Ulf+-~W7TN-RciXx()DtUlEkG&C_Ohp41%Ki<%0nE zKJn0kXe;BbN&XdjPBG-ljQ3)4i&xYjK{|$)Qi3+Fn$yO5Yyk_)qR&7cE@)bV017Vf zXws_)t3nQ3*>4K-3X@GrfP%TmqcZpgBQc8j6((y~|h6#7&=2%fH`&*LdwD}Att_$xWTrI4%?zlM>LE$8 zlAsDmBm;2feXMcGBC8}VD@xL!6qTghC>?|s9=Ja}rB_tZ_l~njiC@u#g~t-P-AO6A z5vyny;h1|XBp{a>7KW}-o@`3!p{;^SdC(+|M3ks2FhTzSIXYa599ra&eJ(c?k&mO) zRp>gB^h%YgNLP7u*_3WfTxryLWA%9++G>sSc^&TpO$K>Y?fAY`I7$&eWxVv8Qy(GTQOG)uYWUao5n2jY5d^un-NT1H}@oE<%&A z4%eds)GeE2q~vbI06pKg79=jNulcXdMeNK(kWG1c6lKRKT;9ot1ywW z?_V0aUN?J*hxavF4jE$AAVO95WeQ5tdO;1}fHr1cS5DEj#;P>x8eLoji%Cr~P4;Gj z;{=M8GUB%$4Rq^NYmg+^bToh)xi!+&XcXz0SC)}d(l7r2g=8lDxNP;5yq*Vhl%&-6 ziZFSo)ujm?0|-9c-q9w^xS5ULMc}8?CF35gL`NmmpnW0kqD)pjFV1)AL}}D{{{a2Y zhyIj3=*cKcXc+LZb`>qru4>616-P%X-cY3e42C=(`o($w0NYagR@jDwT8}regQ|G6@&5q7hy4;T5K^*J5?}4}AMRqmotVE91%KP8{{RXv z#hMKgE3?m}UG)uI^i2ePiH83Gi!o(^M1H=ec$(z{_F+Yu7>@C%{_P-o)6%E^0KSGq z>#~)+vAe_ljseqMMi2i0SuNB0oS>E<0;aT**U!aM7`q{jBic*(*Lmu`bG(|2yDv~j z=zyDgL4QQki7?cjnu?S9#r(#-YF?>UwHuXBAt#imy=ZqbYjz@rb{!-=v;df`&@Ay` z7nK{s+VwpyL4+WbE`f1$3NxL#5%p07IjRnjptUkLe1yw6!Ttghl zK=sH?`Y)5}qncl7`RY`6{{XB-{-D5XgXyu1F`+*S5T8Ooa|NtyJ-*jqwPIVdCQY1p zz9rBm%tX9{YcdGw<0rx-Dp!((l3$;ssT9=J6Udnn6q6ZJwGFYZz#N{JeWhG_6GzKJPmJ1B5)MIv2;wgmdz4oG^l4@?E zPnm?2*nx)R5t#>z!ae%c4AGRK0SN#pKNenRi!}1o$hl-Vl&KYR`7c#YHX+TPs*k=j zDyuWpb+HvjBeon!O|!kmerKS*^65r2%kvSK?ghpyck*qsr=j?S=92Wgt4#%ERFF=A zBzRP;CgVPwh-tzoag%1)*L2FgPLQHh;g75tGSy9Ux#rP(QV><^@h zo^N{$Yu=H6O|J8_%IT_LN|q2jkVoQK3YlQ-VwI7Le2J^>){|3mkt7N{P^*pjO9G?0 zvJQ1UN@V-80~!2Y$`({ET+&H&rEtE7m8kCit50+C>sTJNeBte4E3J)KRuq%aq?DgK z_WK4|HO~wcJlW!oYn<_fPw8 Date: Sat, 24 Aug 2024 17:54:00 +0900 Subject: [PATCH 18/79] Implement onnx inference for video decoder --- export_video_predictor.py | 5 ++- sam2/modeling/sam2_base.py | 80 ++++++++++++++++++++++++++---------- sam2/sam2_video_predictor.py | 40 +++++++++++++----- 3 files changed, 91 insertions(+), 34 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index 62dad2977..29b261b7e 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -12,6 +12,8 @@ sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" model_cfg = "sam2_hiera_l.yaml" +import_onnx = False + predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) @@ -65,6 +67,7 @@ def show_box(box, ax): obj_id=ann_obj_id, points=points, labels=labels, + import_onnx=import_onnx ) # show the results on the current (interacted) frame @@ -77,7 +80,7 @@ def show_box(box, ax): # run propagation throughout the video and collect the results in a dict video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state): +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_onnx): video_segments[out_frame_idx] = { out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() for i, out_obj_id in enumerate(out_obj_ids) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 4760cf4b9..5e364fadb 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -275,6 +275,7 @@ def _forward_sam_heads( mask_inputs=None, high_res_features=None, multimask_output=False, + import_onnx=False ): """ Forward SAM prompt encoders and mask heads. @@ -351,26 +352,58 @@ def _forward_sam_heads( # a learned `no_mask_embed` to indicate no mask input in this case). sam_mask_prompt = None - sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( - coords=sam_point_coords, - labels=sam_point_labels, - masks=sam_mask_prompt, - ) - ( - low_res_multimasks, - ious, - sam_output_tokens, - object_score_logits, - ) = self.sam_mask_decoder( - image_embeddings=backbone_features, - image_pe=self.sam_prompt_encoder.get_dense_pe(), - sparse_prompt_embeddings=sparse_embeddings, - dense_prompt_embeddings=dense_embeddings, - multimask_output=multimask_output, - repeat_image=False, # the image is already batched - high_res_features1=high_res_features[0], - high_res_features2=high_res_features[1], - ) + if import_onnx: + print("begin onnx mode") + if sam_mask_prompt != None: + raise("currently not supported mask prompt") + if multimask_output != True: + raise("currently not supported multimask_output True") + import onnxruntime + model_id = "hiera_l" + model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") + sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords, "labels":sam_point_labels}) + sparse_embeddings = torch.Tensor(sparse_embeddings) + dense_embeddings = torch.Tensor(dense_embeddings) + dense_pe = torch.Tensor(dense_pe) + + model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") + low_res_multimasks, ious, sam_output_tokens, object_score_logits = model.run(None, { + "image_embeddings":backbone_features.numpy(), + "image_pe": dense_pe.numpy(), + "sparse_prompt_embeddings": sparse_embeddings.numpy(), + "dense_prompt_embeddings": dense_embeddings.numpy(), + #multimask_output=multimask_output, + #repeat_image=False, # the image is already batched + "high_res_features1":high_res_features[0].numpy(), + "high_res_features2":high_res_features[1].numpy()}) + low_res_masks = torch.Tensor(low_res_masks) + iou_predictions = torch.Tensor(iou_predictions) + sam_output_tokens = torch.Tensor(sam_output_tokens) + object_score_logits = torch.Tensor(object_score_logits) + else: + sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( + coords=sam_point_coords, + labels=sam_point_labels, + masks=sam_mask_prompt, + ) + dense_pe = self.sam_prompt_encoder.get_dense_pe() + + ( + low_res_multimasks, + ious, + sam_output_tokens, + object_score_logits, + ) = self.sam_mask_decoder( + image_embeddings=backbone_features, + image_pe=dense_pe, + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + repeat_image=False, # the image is already batched + high_res_features1=high_res_features[0], + high_res_features2=high_res_features[1], + ) + if self.pred_obj_scores: is_obj_appearing = object_score_logits > 0 @@ -429,7 +462,7 @@ def _forward_sam_heads( object_score_logits, ) - def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs): + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_onnx): """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in _forward_sam_heads above). @@ -458,6 +491,7 @@ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs) backbone_features=backbone_features, mask_inputs=self.mask_downsample(mask_inputs_float), high_res_features=high_res_features, + import_onnx=import_onnx ) # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying @@ -742,6 +776,7 @@ def track_step( run_mem_encoder=True, # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). prev_sam_mask_logits=None, + import_onnx=False ): current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW @@ -758,7 +793,7 @@ def track_step( pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) sam_outputs = self._use_mask_as_output( - pix_feat, high_res_features, mask_inputs + pix_feat, high_res_features, mask_inputs, import_onnx ) else: # fused the visual feature with previous memory features in the memory bank @@ -786,6 +821,7 @@ def track_step( mask_inputs=mask_inputs, high_res_features=high_res_features, multimask_output=multimask_output, + import_onnx=import_onnx ) ( _, diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 8b2fd6c4d..afd597732 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -176,6 +176,7 @@ def add_new_points_or_box( clear_old_points=True, normalize_coords=True, box=None, + import_onnx=False ): """Add new points to a frame.""" obj_idx = self._obj_id_to_idx(inference_state, obj_id) @@ -291,6 +292,7 @@ def add_new_points_or_box( # them into memory. run_mem_encoder=False, prev_sam_mask_logits=prev_sam_mask_logits, + import_onnx=import_onnx ) # Add the output to the output dict (to be used as future memory) obj_temp_output_dict[storage_key][frame_idx] = current_out @@ -320,6 +322,7 @@ def add_new_mask( frame_idx, obj_id, mask, + import_onnx=False ): """Add new mask to a frame.""" obj_idx = self._obj_id_to_idx(inference_state, obj_id) @@ -379,6 +382,7 @@ def add_new_mask( # allows us to enforce non-overlapping constraints on all objects before encoding # them into memory. run_mem_encoder=False, + import_onnx=import_onnx ) # Add the output to the output dict (to be used as future memory) obj_temp_output_dict[storage_key][frame_idx] = current_out @@ -426,6 +430,7 @@ def _consolidate_temp_output_across_obj( is_cond, run_mem_encoder, consolidate_at_video_res=False, + import_onnx=False ): """ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on @@ -492,7 +497,7 @@ def _consolidate_temp_output_across_obj( if run_mem_encoder: if empty_mask_ptr is None: empty_mask_ptr = self._get_empty_mask_ptr( - inference_state, frame_idx + inference_state, frame_idx, import_onnx ) # fill object pointer with a dummy pointer (based on an empty mask) consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr @@ -531,13 +536,14 @@ def _consolidate_temp_output_across_obj( batch_size=batch_size, high_res_masks=high_res_masks, is_mask_from_pts=True, # these frames are what the user interacted with + import_onnx=import_onnx ) consolidated_out["maskmem_features"] = maskmem_features consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc return consolidated_out - def _get_empty_mask_ptr(self, inference_state, frame_idx): + def _get_empty_mask_ptr(self, inference_state, frame_idx, import_onnx): """Get a dummy object pointer based on an empty mask on the current frame.""" # A dummy (empty) mask with a single object batch_size = 1 @@ -554,7 +560,7 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx): current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx) # Feed the empty mask and image feature above to get a dummy object pointer current_out = self.track_step( @@ -570,11 +576,12 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx): track_in_reverse=False, run_mem_encoder=False, prev_sam_mask_logits=None, + import_onnx=import_onnx ) return current_out["obj_ptr"] @torch.inference_mode() - def propagate_in_video_preflight(self, inference_state): + def propagate_in_video_preflight(self, inference_state, import_onnx=False): """Prepare inference_state and consolidate temporary outputs before tracking.""" # Tracking has started and we don't allow adding new objects until session is reset. inference_state["tracking_has_started"] = True @@ -601,7 +608,7 @@ def propagate_in_video_preflight(self, inference_state): # consolidate the temporary output across all objects on this frame for frame_idx in temp_frame_inds: consolidated_out = self._consolidate_temp_output_across_obj( - inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True + inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True, import_onnx=import_onnx ) # merge them into "output_dict" and also create per-object slices output_dict[storage_key][frame_idx] = consolidated_out @@ -650,9 +657,10 @@ def propagate_in_video( start_frame_idx=None, max_frame_num_to_track=None, reverse=False, + import_onnx=False ): """Propagate the input points across frames to track in the entire video.""" - self.propagate_in_video_preflight(inference_state) + self.propagate_in_video_preflight(inference_state, import_onnx) output_dict = inference_state["output_dict"] consolidated_frame_inds = inference_state["consolidated_frame_inds"] @@ -712,6 +720,7 @@ def propagate_in_video( mask_inputs=None, reverse=reverse, run_mem_encoder=True, + import_onnx=import_onnx ) output_dict[storage_key][frame_idx] = current_out # Create slices of per-object outputs for subsequent interaction with each @@ -788,7 +797,7 @@ def _reset_tracking_results(self, inference_state): inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"].clear() - def _get_image_feature(self, inference_state, frame_idx, batch_size): + def _get_image_feature(self, inference_state, frame_idx, batch_size, import_onnx = False): """Compute the image features on a given frame.""" # Look up in the cache first image, backbone_out = inference_state["cached_features"].get( @@ -798,7 +807,14 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size): # Cache miss -- we will run inference on a single image device = inference_state["device"] image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0) - backbone_out = self.forward_image(image) + if import_onnx: + import onnxruntime + model_id = "hiera_l" + model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") + vision_feat1, vision_feat2, vision_feat3 = model.run(None, {"input_image":image.numpy()}) + backbone_out = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + else: + backbone_out = self.forward_image(image) # Cache the most recent frame's feature (for repeated interactions with # a frame; we can use an LRU cache for more frames in the future). inference_state["cached_features"] = {frame_idx: (image, backbone_out)} @@ -833,6 +849,7 @@ def _run_single_frame_inference( reverse, run_mem_encoder, prev_sam_mask_logits=None, + import_onnx=False ): """Run tracking on a single frame based on current inputs and previous memory.""" # Retrieve correct image features @@ -842,7 +859,7 @@ def _run_single_frame_inference( current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx) # point and mask should not appear as input simultaneously on the same frame assert point_inputs is None or mask_inputs is None @@ -859,6 +876,7 @@ def _run_single_frame_inference( track_in_reverse=reverse, run_mem_encoder=run_mem_encoder, prev_sam_mask_logits=prev_sam_mask_logits, + import_onnx=import_onnx ) # optionally offload the output to CPU memory to save GPU space @@ -888,7 +906,7 @@ def _run_single_frame_inference( return compact_current_out, pred_masks_gpu def _run_memory_encoder( - self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_onnx ): """ Run the memory encoder on `high_res_masks`. This is usually after applying @@ -897,7 +915,7 @@ def _run_memory_encoder( """ # Retrieve correct image features _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( - inference_state, frame_idx, batch_size + inference_state, frame_idx, batch_size, import_onnx ) maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats, From bed89142ddb3834bcd8cb6ceefd1ef7a4e1702ef Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 24 Aug 2024 18:33:48 +0900 Subject: [PATCH 19/79] Implement multimask_output argument --- export_image_predictor.py | 12 +++---- export_video_predictor.py | 2 +- sam2/modeling/sam/mask_decoder.py | 56 ++++++++++++++++++++++++++++++- sam2/modeling/sam2_base.py | 16 ++++----- sam2/sam2_image_predictor.py | 18 ++++++---- 5 files changed, 81 insertions(+), 23 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index 2e46954c7..7301ec163 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -9,12 +9,12 @@ # export settings export_to_onnx_image_encoder = False -export_to_onnx_mask_decoder = False +export_to_onnx_mask_decoder = True export_to_tflite_image_encoder = False -export_to_tflite_mask_decoder = True -import_from_onnx = False -import_from_tflite = True -tflite_int8 = True +export_to_tflite_mask_decoder = False +import_from_onnx = True +import_from_tflite = False +tflite_int8 = False show = True # export PJRT_DEVICE=CPU @@ -71,7 +71,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l if len(scores) > 1: plt.title(f"Mask {i+1}, Score: {score:.3f}", fontsize=18) plt.axis('off') - plt.show() + #plt.show() plt.savefig(f'output{i+1}.png') # logic diff --git a/export_video_predictor.py b/export_video_predictor.py index 29b261b7e..060adca51 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -12,7 +12,7 @@ sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" model_cfg = "sam2_hiera_l.yaml" -import_onnx = False +import_onnx = True predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) diff --git a/sam2/modeling/sam/mask_decoder.py b/sam2/modeling/sam/mask_decoder.py index 1d6a31e1f..580dff0f3 100644 --- a/sam2/modeling/sam/mask_decoder.py +++ b/sam2/modeling/sam/mask_decoder.py @@ -107,7 +107,8 @@ def __init__( self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh - def forward( + # デフォルト実装 + def forward_normal( self, image_embeddings: torch.Tensor, image_pe: torch.Tensor, @@ -167,6 +168,59 @@ def forward( # Prepare output return masks, iou_pred, sam_tokens_out, object_score_logits + # ONNXに変換するために推論とポスト処理を分離するバージョン + def forward_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + repeat_image: bool, + high_res_features1: Optional[torch.Tensor] = None, + high_res_features2: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + repeat_image=repeat_image, + high_res_features1=high_res_features1, + high_res_features2=high_res_features2, + ) + return masks, iou_pred, mask_tokens_out, object_score_logits + + def forward_postprocess( + self, + masks: torch.Tensor, + iou_pred: torch.Tensor, + mask_tokens_out: torch.Tensor, + object_score_logits: torch.Tensor, + multimask_output: bool, + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Select the correct mask or masks for output + if multimask_output: + masks = masks[:, 1:, :, :] + iou_pred = iou_pred[:, 1:] + elif self.dynamic_multimask_via_stability and not self.training: + masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred) + else: + masks = masks[:, 0:1, :, :] + iou_pred = iou_pred[:, 0:1] + + if multimask_output and self.use_multimask_token_for_obj_ptr: + sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape + else: + # Take the mask output token. Here we *always* use the token for single mask output. + # At test time, even if we track after 1-click (and using multimask_output=True), + # we still take the single mask token here. The rationale is that we always track + # after multiple clicks during training, so the past tokens seen during training + # are always the single mask token (and we'll let it be the object-memory token). + sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape + + # Prepare output + return masks, iou_pred, sam_tokens_out, object_score_logits + def predict_masks( self, image_embeddings: torch.Tensor, diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 5e364fadb..80d05f240 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -356,30 +356,28 @@ def _forward_sam_heads( print("begin onnx mode") if sam_mask_prompt != None: raise("currently not supported mask prompt") - if multimask_output != True: - raise("currently not supported multimask_output True") import onnxruntime model_id = "hiera_l" model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") - sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords, "labels":sam_point_labels}) + sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") - low_res_multimasks, ious, sam_output_tokens, object_score_logits = model.run(None, { + masks, iou_pred, sam_tokens_out, object_score_logits, object_score_logits = model.run(None, { "image_embeddings":backbone_features.numpy(), "image_pe": dense_pe.numpy(), "sparse_prompt_embeddings": sparse_embeddings.numpy(), "dense_prompt_embeddings": dense_embeddings.numpy(), - #multimask_output=multimask_output, #repeat_image=False, # the image is already batched "high_res_features1":high_res_features[0].numpy(), "high_res_features2":high_res_features[1].numpy()}) - low_res_masks = torch.Tensor(low_res_masks) - iou_predictions = torch.Tensor(iou_predictions) - sam_output_tokens = torch.Tensor(sam_output_tokens) + masks = torch.Tensor(masks) + iou_pred = torch.Tensor(iou_pred) + sam_tokens_out = torch.Tensor(sam_tokens_out) object_score_logits = torch.Tensor(object_score_logits) + low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) else: sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( coords=sam_point_coords, @@ -393,7 +391,7 @@ def _forward_sam_heads( ious, sam_output_tokens, object_score_logits, - ) = self.sam_mask_decoder( + ) = self.sam_mask_decoder.forward_normal( image_embeddings=backbone_features, image_pe=dense_pe, sparse_prompt_embeddings=sparse_embeddings, diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 1f2a3808e..bdde03c90 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -124,7 +124,7 @@ def set_image( logging.info("Computing image embeddings for the provided image...") if export_to_onnx: - print("input_image", input_image.shape) + #print("input_image", input_image.shape) torch.onnx.export( self.model, (input_image), 'image_encoder_'+model_id+'.onnx', input_names=["input_image"], @@ -578,27 +578,32 @@ def _predict( #print("dense_embeddings", dense_embeddings.shape) if export_to_onnx: + self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_masks # multimask_outputが定数になってしまうので分離 torch.onnx.export( - self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]), + self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode, high_res_features[0], high_res_features[1]), 'mask_decoder_'+model_id+'.onnx', - input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "multimask_output", "repeat_image", "high_res_features1", "high_res_features2"], + input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "repeat_image", "high_res_features1", "high_res_features2"], output_names=["low_res_masks", "iou_predictions"], verbose=False, opset_version=17 ) if import_from_onnx: model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") - low_res_masks, iou_predictions, _, _ = model.run(None, { + masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { "image_embeddings":self._features["image_embed"][img_idx].unsqueeze(0).numpy(), "image_pe": dense_pe.numpy(), "sparse_prompt_embeddings": sparse_embeddings.numpy(), "dense_prompt_embeddings": dense_embeddings.numpy(), "high_res_features1":high_res_features[0].numpy(), "high_res_features2":high_res_features[1].numpy()}) - low_res_masks = torch.Tensor(low_res_masks) - iou_predictions = torch.Tensor(iou_predictions) + masks = torch.Tensor(masks) + iou_pred = torch.Tensor(iou_pred) + sam_tokens_out = torch.Tensor(sam_tokens_out) + object_score_logits = torch.Tensor(object_score_logits) + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) if export_to_tflite: + self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_normal sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) if not tflite_int8: @@ -640,6 +645,7 @@ def _predict( iou_predictions = torch.Tensor(iou_predictions) if not import_from_onnx and (not import_from_tflite or not export_to_tflite): + self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_normal low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), image_pe=dense_pe, From 7fde3f142751023fcb8bedc589443857f60f3cc7 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 24 Aug 2024 18:36:31 +0900 Subject: [PATCH 20/79] Implement sparse embeddings dynamic axis --- sam2/modeling/sam2_base.py | 4 ++-- sam2/sam2_image_predictor.py | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 80d05f240..b03e17508 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -365,7 +365,7 @@ def _forward_sam_heads( dense_pe = torch.Tensor(dense_pe) model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") - masks, iou_pred, sam_tokens_out, object_score_logits, object_score_logits = model.run(None, { + masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { "image_embeddings":backbone_features.numpy(), "image_pe": dense_pe.numpy(), "sparse_prompt_embeddings": sparse_embeddings.numpy(), @@ -377,7 +377,7 @@ def _forward_sam_heads( iou_pred = torch.Tensor(iou_pred) sam_tokens_out = torch.Tensor(sam_tokens_out) object_score_logits = torch.Tensor(object_score_logits) - low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) + low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) else: sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( coords=sam_point_coords, diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index bdde03c90..a24d5bf1f 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -584,6 +584,9 @@ def _predict( 'mask_decoder_'+model_id+'.onnx', input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "repeat_image", "high_res_features1", "high_res_features2"], output_names=["low_res_masks", "iou_predictions"], + dynamic_axes={ + 'sparse_prompt_embeddings': {1: 'n'}, + }, verbose=False, opset_version=17 ) From fc6dfda9a4cc701af258f6bd5758b905507c0e5e Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 24 Aug 2024 18:47:25 +0900 Subject: [PATCH 21/79] Connect import onnx --- sam2/sam2_video_predictor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index afd597732..a75d30008 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -305,6 +305,7 @@ def add_new_points_or_box( is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True, + import_onnx=import_onnx ) _, video_res_masks = self._get_orig_video_res_output( inference_state, consolidated_out["pred_masks_video_res"] @@ -395,6 +396,7 @@ def add_new_mask( is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True, + import_onnx=import_onnx ) _, video_res_masks = self._get_orig_video_res_output( inference_state, consolidated_out["pred_masks_video_res"] From cfcd64cb4072fbf6cc670459b045a36a964012c2 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 24 Aug 2024 21:43:54 +0900 Subject: [PATCH 22/79] Only export image encoder core --- export_image_predictor.py | 4 +-- sam2/modeling/sam2_base.py | 47 +++++++++++++++--------------------- sam2/sam2_image_predictor.py | 42 ++++++++++++++++++++------------ sam2/sam2_video_predictor.py | 21 +++++++++++----- 4 files changed, 62 insertions(+), 52 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index 7301ec163..042216bbf 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -9,10 +9,10 @@ # export settings export_to_onnx_image_encoder = False -export_to_onnx_mask_decoder = True +export_to_onnx_mask_decoder = False export_to_tflite_image_encoder = False export_to_tflite_mask_decoder = False -import_from_onnx = True +import_from_onnx = False import_from_tflite = False tflite_int8 = False show = True diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index b03e17508..2a619f43a 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -192,31 +192,11 @@ def __init__( def device(self): return next(self.parameters()).device - def forward(self, input_image): - backbone_out = self.forward_image(input_image) - _, vision_feats, _, _ = self._prepare_backbone_features(backbone_out) - - # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos - if self.directly_add_no_mem_embed: - vision_feats[-1] = vision_feats[-1] + self.no_mem_embed - - # Spatial dim for backbone feature maps - _bb_feat_sizes = [ - (256, 256), - (128, 128), - (64, 64), - ] - - feats = [ - feat.permute(1, 2, 0).view(1, -1, *feat_size) - for feat, feat_size in zip(vision_feats[::-1], _bb_feat_sizes[::-1]) - ][::-1] - return feats - - #raise NotImplementedError( - # "Please use the corresponding methods in SAM2VideoPredictor for inference." - # "See notebooks/video_predictor_example.ipynb for an example." - #) + def forward(self): + raise NotImplementedError( + "Please use the corresponding methods in SAM2VideoPredictor for inference." + "See notebooks/video_predictor_example.ipynb for an example." + ) def _build_sam_heads(self): """Build SAM-style prompt encoder and mask decoder.""" @@ -353,7 +333,7 @@ def _forward_sam_heads( sam_mask_prompt = None if import_onnx: - print("begin onnx mode") + print("begin mask decoder onnx") if sam_mask_prompt != None: raise("currently not supported mask prompt") import onnxruntime @@ -365,6 +345,7 @@ def _forward_sam_heads( dense_pe = torch.Tensor(dense_pe) model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") + print("backbone_features", backbone_features.shape) masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { "image_embeddings":backbone_features.numpy(), "image_pe": dense_pe.numpy(), @@ -378,7 +359,13 @@ def _forward_sam_heads( sam_tokens_out = torch.Tensor(sam_tokens_out) object_score_logits = torch.Tensor(object_score_logits) low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) + print(low_res_multimasks.shape) + print(ious.shape) + print(sam_output_tokens.shape) + print(object_score_logits.shape) else: + print("begin mask decoder torch") + print("backbone_features", backbone_features.shape) sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( coords=sam_point_coords, labels=sam_point_labels, @@ -401,6 +388,10 @@ def _forward_sam_heads( high_res_features1=high_res_features[0], high_res_features2=high_res_features[1], ) + print(low_res_multimasks.shape) + print(ious.shape) + print(sam_output_tokens.shape) + print(object_score_logits.shape) if self.pred_obj_scores: is_obj_appearing = object_score_logits > 0 @@ -525,7 +516,7 @@ def forward_image(self, img_batch: torch.Tensor): backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1( backbone_out["backbone_fpn"][1] ) - return backbone_out + return backbone_out["vision_features"], backbone_out["vision_pos_enc"][0], backbone_out["vision_pos_enc"][1], backbone_out["vision_pos_enc"][2], backbone_out["backbone_fpn"][0], backbone_out["backbone_fpn"][1], backbone_out["backbone_fpn"][2] def _prepare_backbone_features(self, backbone_out): """Prepare and flatten visual features.""" @@ -791,7 +782,7 @@ def track_step( pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) sam_outputs = self._use_mask_as_output( - pix_feat, high_res_features, mask_inputs, import_onnx + pix_feat, high_res_features, mask_inputs, import_onnx=import_onnx ) else: # fused the visual feature with previous memory features in the memory bank diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index a24d5bf1f..de9fb006f 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -125,18 +125,24 @@ def set_image( if export_to_onnx: #print("input_image", input_image.shape) + self.model.forward = self.model.forward_image torch.onnx.export( self.model, (input_image), 'image_encoder_'+model_id+'.onnx', input_names=["input_image"], - output_names=["feats1", "feats2", "feats3"], + output_names=["vision_features", "vision_pos_enc_0", "vision_pos_enc_1", "vision_pos_enc_2", "backbone_fpn_0", "backbone_fpn_1", "backbone_fpn_2"], verbose=False, opset_version=17 ) if import_from_onnx: model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") - vision_feat1, vision_feat2, vision_feat3 = model.run(None, {"input_image":input_image.numpy()}) - feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] - #print("feats", vision_feat1.shape, vision_feat2.shape, vision_feat3.shape) + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":input_image.numpy()}) + print("vision_features", vision_features.shape) + print("vision_pos_enc_0", vision_pos_enc_0.shape) + print("vision_pos_enc_1", vision_pos_enc_1.shape) + print("vision_pos_enc_2", vision_pos_enc_2.shape) + print("backbone_fpn_0", backbone_fpn_0.shape) + print("backbone_fpn_1", backbone_fpn_1.shape) + print("backbone_fpn_2", backbone_fpn_2.shape) if export_to_tflite: import ai_edge_torch @@ -172,20 +178,24 @@ def set_image( edge_model = model if import_from_tflite: - vision_feat1, vision_feat2, vision_feat3 = edge_model(input_image) - feats = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = edge_model(input_image) if not import_from_onnx and (not import_from_tflite or not export_to_tflite): - backbone_out = self.model.forward_image(input_image) - _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) - # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos - if self.model.directly_add_no_mem_embed: - vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed - - feats = [ - feat.permute(1, 2, 0).view(1, -1, *feat_size) - for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) - ][::-1] + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.model.forward_image(input_image) + + backbone_out = {"vision_features":torch.Tensor(vision_features), + "vision_pos_enc":[torch.Tensor(vision_pos_enc_0), torch.Tensor(vision_pos_enc_1), torch.Tensor(vision_pos_enc_2)], + "backbone_fpn":[torch.Tensor(backbone_fpn_0), torch.Tensor(backbone_fpn_1), torch.Tensor(backbone_fpn_2)]} + + _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out) + # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos + if self.model.directly_add_no_mem_embed: + vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed + + feats = [ + feat.permute(1, 2, 0).view(1, -1, *feat_size) + for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1]) + ][::-1] self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]} self._is_image_set = True diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index a75d30008..6be9cf102 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -662,7 +662,7 @@ def propagate_in_video( import_onnx=False ): """Propagate the input points across frames to track in the entire video.""" - self.propagate_in_video_preflight(inference_state, import_onnx) + self.propagate_in_video_preflight(inference_state, import_onnx=import_onnx) output_dict = inference_state["output_dict"] consolidated_frame_inds = inference_state["consolidated_frame_inds"] @@ -810,18 +810,27 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_onnx device = inference_state["device"] image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0) if import_onnx: + print("begin image encoder onnx") + print(image.shape) import onnxruntime model_id = "hiera_l" model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") - vision_feat1, vision_feat2, vision_feat3 = model.run(None, {"input_image":image.numpy()}) - backbone_out = [torch.Tensor(vision_feat1), torch.Tensor(vision_feat2), torch.Tensor(vision_feat3)] + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":image.numpy()}) else: - backbone_out = self.forward_image(image) + print("begin image encoder torch") + print(image.shape) + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.forward_image(image) + + backbone_out = {"vision_features":torch.Tensor(vision_features), + "vision_pos_enc":[torch.Tensor(vision_pos_enc_0), torch.Tensor(vision_pos_enc_1), torch.Tensor(vision_pos_enc_2)], + "backbone_fpn":[torch.Tensor(backbone_fpn_0), torch.Tensor(backbone_fpn_1), torch.Tensor(backbone_fpn_2)]} + # Cache the most recent frame's feature (for repeated interactions with # a frame; we can use an LRU cache for more frames in the future). inference_state["cached_features"] = {frame_idx: (image, backbone_out)} # expand the features to have the same dimension as the number of objects + print("batch_size", batch_size) expanded_image = image.expand(batch_size, -1, -1, -1) expanded_backbone_out = { "backbone_fpn": backbone_out["backbone_fpn"].copy(), @@ -861,7 +870,7 @@ def _run_single_frame_inference( current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx=import_onnx) # point and mask should not appear as input simultaneously on the same frame assert point_inputs is None or mask_inputs is None @@ -917,7 +926,7 @@ def _run_memory_encoder( """ # Retrieve correct image features _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( - inference_state, frame_idx, batch_size, import_onnx + inference_state, frame_idx, batch_size, import_onnx=import_onnx ) maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats, From 78e1c36f1f9955aba6039e6bb3d9eff86e0e1fae Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 24 Aug 2024 22:03:09 +0900 Subject: [PATCH 23/79] Fix onnx inference error --- export_video_predictor.py | 4 ++-- sam2/sam2_video_predictor.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index 060adca51..d951c01ab 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -50,7 +50,7 @@ def show_box(box, ax): ] frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) -inference_state = predictor.init_state(video_path=video_dir) +inference_state = predictor.init_state(video_path=video_dir, import_onnx=import_onnx) predictor.reset_state(inference_state) ann_frame_idx = 0 # the frame index we interact with @@ -80,7 +80,7 @@ def show_box(box, ax): # run propagation throughout the video and collect the results in a dict video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_onnx): +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_onnx=import_onnx): video_segments[out_frame_idx] = { out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() for i, out_obj_id in enumerate(out_obj_ids) diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 6be9cf102..2fa98c06c 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -43,6 +43,7 @@ def init_state( offload_video_to_cpu=False, offload_state_to_cpu=False, async_loading_frames=False, + import_onnx=False ): """Initialize an inference state.""" compute_device = self.device # device of the model @@ -103,7 +104,7 @@ def init_state( inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"] = {} # Warm up the visual backbone and cache the image feature on frame 0 - self._get_image_feature(inference_state, frame_idx=0, batch_size=1) + self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_onnx=import_onnx) return inference_state @classmethod @@ -499,7 +500,7 @@ def _consolidate_temp_output_across_obj( if run_mem_encoder: if empty_mask_ptr is None: empty_mask_ptr = self._get_empty_mask_ptr( - inference_state, frame_idx, import_onnx + inference_state, frame_idx, import_onnx=import_onnx ) # fill object pointer with a dummy pointer (based on an empty mask) consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr @@ -562,7 +563,7 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx, import_onnx): current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx=import_onnx) # Feed the empty mask and image feature above to get a dummy object pointer current_out = self.track_step( From 37573c8c62dd1ddbc04e08ec6439cd34575fdeba Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 24 Aug 2024 22:42:23 +0900 Subject: [PATCH 24/79] Add output name --- export_image_predictor.py | 4 ++-- sam2/sam2_image_predictor.py | 21 +++++++++++---------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index 042216bbf..7301ec163 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -9,10 +9,10 @@ # export settings export_to_onnx_image_encoder = False -export_to_onnx_mask_decoder = False +export_to_onnx_mask_decoder = True export_to_tflite_image_encoder = False export_to_tflite_mask_decoder = False -import_from_onnx = False +import_from_onnx = True import_from_tflite = False tflite_int8 = False show = True diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index de9fb006f..5edffaab1 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -593,7 +593,7 @@ def _predict( self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode, high_res_features[0], high_res_features[1]), 'mask_decoder_'+model_id+'.onnx', input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "repeat_image", "high_res_features1", "high_res_features2"], - output_names=["low_res_masks", "iou_predictions"], + output_names=["masks", "iou_pred", "sam_tokens_out", "object_score_logits"], dynamic_axes={ 'sparse_prompt_embeddings': {1: 'n'}, }, @@ -616,8 +616,8 @@ def _predict( low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) if export_to_tflite: - self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_normal - sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) + self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_masks + sample_inputs = (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode, high_res_features[0], high_res_features[1]) if not tflite_int8: import ai_edge_torch @@ -634,7 +634,7 @@ def _predict( ) model = torch._export.capture_pre_autograd_graph(self.model.sam_mask_decoder, sample_inputs) model = quantize_pt2e.prepare_pt2e(model, quantizer) - model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output, batched_mode, high_res_features[0], high_res_features[1]) # calibration + model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode, high_res_features[0], high_res_features[1]) # calibration model = quantize_pt2e.convert_pt2e(model, fold_quantize=False) with_quantizer = ai_edge_torch.convert( @@ -649,17 +649,18 @@ def _predict( if import_from_tflite: multimask_output_np = np.zeros((1), dtype=bool) batched_mode_np = np.zeros((1), dtype=bool) - if multimask_output: - multimask_output_np[0] = True if batched_mode: batched_mode_np[0] = True - low_res_masks, iou_predictions, _, _ = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, multimask_output_np, batched_mode_np, high_res_features[0], high_res_features[1]) - low_res_masks = torch.Tensor(low_res_masks) - iou_predictions = torch.Tensor(iou_predictions) + masks, iou_pred, sam_tokens_out, object_score_logits = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode_np, high_res_features[0], high_res_features[1]) + masks = torch.Tensor(masks) + iou_pred = torch.Tensor(iou_pred) + sam_tokens_out = torch.Tensor(sam_tokens_out) + object_score_logits = torch.Tensor(object_score_logits) + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) if not import_from_onnx and (not import_from_tflite or not export_to_tflite): self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_normal - low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( + masks, iou_pred, sam_tokens_out, object_score_logits = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), image_pe=dense_pe, sparse_prompt_embeddings=sparse_embeddings, From d388d2fe67cdd74e80b091693cc769c553cfffdd Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sun, 25 Aug 2024 21:15:07 +0900 Subject: [PATCH 25/79] Export memory attention and encoder --- export_video_predictor.py | 12 +++-- sam2/modeling/sam2_base.py | 90 +++++++++++++++++++++++++++++------- sam2/sam2_video_predictor.py | 72 ++++++++++++++++++----------- 3 files changed, 127 insertions(+), 47 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index d951c01ab..8c3e75270 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -12,7 +12,9 @@ sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" model_cfg = "sam2_hiera_l.yaml" -import_onnx = True +export_to_onnx = True +model_id = "hiera_l" +import_from_onnx = True predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) @@ -50,7 +52,7 @@ def show_box(box, ax): ] frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) -inference_state = predictor.init_state(video_path=video_dir, import_onnx=import_onnx) +inference_state = predictor.init_state(video_path=video_dir, import_from_onnx=import_from_onnx) predictor.reset_state(inference_state) ann_frame_idx = 0 # the frame index we interact with @@ -67,7 +69,9 @@ def show_box(box, ax): obj_id=ann_obj_id, points=points, labels=labels, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, + export_to_onnx=export_to_onnx, + model_id=model_id ) # show the results on the current (interacted) frame @@ -80,7 +84,7 @@ def show_box(box, ax): # run propagation throughout the video and collect the results in a dict video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_onnx=import_onnx): +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id): video_segments[out_frame_idx] = { out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() for i, out_obj_id in enumerate(out_obj_ids) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 2a619f43a..992b1b3a7 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -115,10 +115,12 @@ def __init__( # Part 2: memory attention to condition current frame's visual features # with memories (and obj ptrs) from past frames self.memory_attention = memory_attention + self.memory_attention_onnx_exported = False self.hidden_dim = memory_attention.d_model # Part 3: memory encoder for the previous frame's outputs self.memory_encoder = memory_encoder + self.memory_encoder_onnx_exported = False self.mem_dim = self.hidden_dim if hasattr(self.memory_encoder, "out_proj") and hasattr( self.memory_encoder.out_proj, "weight" @@ -255,7 +257,7 @@ def _forward_sam_heads( mask_inputs=None, high_res_features=None, multimask_output=False, - import_onnx=False + import_from_onnx=False ): """ Forward SAM prompt encoders and mask heads. @@ -332,7 +334,7 @@ def _forward_sam_heads( # a learned `no_mask_embed` to indicate no mask input in this case). sam_mask_prompt = None - if import_onnx: + if import_from_onnx: print("begin mask decoder onnx") if sam_mask_prompt != None: raise("currently not supported mask prompt") @@ -451,7 +453,7 @@ def _forward_sam_heads( object_score_logits, ) - def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_onnx): + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_from_onnx): """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in _forward_sam_heads above). @@ -480,7 +482,7 @@ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, backbone_features=backbone_features, mask_inputs=self.mask_downsample(mask_inputs_float), high_res_features=high_res_features, - import_onnx=import_onnx + import_from_onnx=import_from_onnx ) # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying @@ -544,6 +546,9 @@ def _prepare_memory_conditioned_features( output_dict, num_frames, track_in_reverse=False, # tracking in reverse time order (for demo usage) + export_to_onnx=False, + import_from_onnx=False, + model_id=None ): """Fuse the current frame's visual feature map with previous memory.""" B = current_vision_feats[-1].size(1) # batch size on this frame @@ -694,13 +699,34 @@ def _prepare_memory_conditioned_features( memory = torch.cat(to_cat_memory, dim=0) memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) - pix_feat_with_mem = self.memory_attention( - curr=current_vision_feats, - curr_pos=current_vision_pos_embeds, - memory=memory, - memory_pos=memory_pos_embed, - num_obj_ptr_tokens=num_obj_ptr_tokens, - ) + if export_to_onnx and not self.memory_attention_onnx_exported: + self.memory_attention_onnx_exported = True + print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) + print("memory", memory.shape, memory.dtype) + print("current_vision_pos_embeds", current_vision_pos_embeds[0].shape, current_vision_pos_embeds[0].dtype) + print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) + print("num_obj_ptr_tokens", num_obj_ptr_tokens) + torch.onnx.export( + self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'memory_attention_'+model_id+'.onnx', + input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], + output_names=["pix_feat"], + verbose=False, opset_version=17 + ) + + if import_from_onnx: + import onnxruntime + model = onnxruntime.InferenceSession("memory_attention_"+model_id+".onnx") + pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens.numpy()}) + + if not import_from_onnx: + pix_feat_with_mem = self.memory_attention( + curr=current_vision_feats, + curr_pos=current_vision_pos_embeds, + memory=memory, + memory_pos=memory_pos_embed, + num_obj_ptr_tokens=num_obj_ptr_tokens, + ) + # reshape the output (HW)BC => BCHW pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W) return pix_feat_with_mem @@ -711,6 +737,9 @@ def _encode_new_memory( feat_sizes, pred_masks_high_res, is_mask_from_pts, + export_to_onnx = False, + import_from_onnx = False, + model_id = None ): """Encode the current image and its prediction into a memory feature.""" B = current_vision_feats[-1].size(1) # batch size on this frame @@ -737,9 +766,27 @@ def _encode_new_memory( mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc if self.sigmoid_bias_for_mem_enc != 0.0: mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc - maskmem_out = self.memory_encoder( - pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied - ) + + if export_to_onnx and not self.memory_encoder_onnx_exported: + self.memory_encoder_onnx_exported = True + torch.onnx.export( + self.memory_encoder, (pix_feat, mask_for_mem, False), 'memory_encoder_'+model_id+'.onnx', + input_names=["pix_feat", "masks"], + output_names=["vision_features", "vision_pos_enc"], + verbose=False, opset_version=17 + ) + + if import_from_onnx: + import onnxruntime + model = onnxruntime.InferenceSession("memory_encoder_"+model_id+".onnx") + vision_features, vision_pos_enc = model.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) + maskmem_out = {"vision_features": vision_features, "vision_pos_enc": [vision_pos_enc]} + + if not import_from_onnx: + maskmem_out = self.memory_encoder( + pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied + ) + maskmem_features = maskmem_out["vision_features"] maskmem_pos_enc = maskmem_out["vision_pos_enc"] @@ -765,7 +812,10 @@ def track_step( run_mem_encoder=True, # The previously predicted SAM mask logits (which can be fed together with new clicks in demo). prev_sam_mask_logits=None, - import_onnx=False + # ONNX Export + export_to_onnx=False, + import_from_onnx=False, + model_id=None ): current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW @@ -782,7 +832,7 @@ def track_step( pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) sam_outputs = self._use_mask_as_output( - pix_feat, high_res_features, mask_inputs, import_onnx=import_onnx + pix_feat, high_res_features, mask_inputs, import_from_onnx=import_from_onnx ) else: # fused the visual feature with previous memory features in the memory bank @@ -795,6 +845,9 @@ def track_step( output_dict=output_dict, num_frames=num_frames, track_in_reverse=track_in_reverse, + export_to_onnx=export_to_onnx, + import_from_onnx=import_from_onnx, + model_id=model_id ) # apply SAM-style segmentation head # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder, @@ -810,7 +863,7 @@ def track_step( mask_inputs=mask_inputs, high_res_features=high_res_features, multimask_output=multimask_output, - import_onnx=import_onnx + import_from_onnx=import_from_onnx ) ( _, @@ -835,6 +888,9 @@ def track_step( feat_sizes=feat_sizes, pred_masks_high_res=high_res_masks_for_mem_enc, is_mask_from_pts=(point_inputs is not None), + export_to_onnx=export_to_onnx, + import_from_onnx=import_from_onnx, + model_id=model_id ) current_out["maskmem_features"] = maskmem_features current_out["maskmem_pos_enc"] = maskmem_pos_enc diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 2fa98c06c..49f1f7e15 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -43,7 +43,7 @@ def init_state( offload_video_to_cpu=False, offload_state_to_cpu=False, async_loading_frames=False, - import_onnx=False + import_from_onnx=False ): """Initialize an inference state.""" compute_device = self.device # device of the model @@ -104,7 +104,7 @@ def init_state( inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"] = {} # Warm up the visual backbone and cache the image feature on frame 0 - self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_onnx=import_onnx) + self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_from_onnx=import_from_onnx) return inference_state @classmethod @@ -177,7 +177,9 @@ def add_new_points_or_box( clear_old_points=True, normalize_coords=True, box=None, - import_onnx=False + import_from_onnx=False, + export_to_onnx=False, + model_id=None ): """Add new points to a frame.""" obj_idx = self._obj_id_to_idx(inference_state, obj_id) @@ -293,7 +295,9 @@ def add_new_points_or_box( # them into memory. run_mem_encoder=False, prev_sam_mask_logits=prev_sam_mask_logits, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, + export_to_onnx=export_to_onnx, + model_id=model_id ) # Add the output to the output dict (to be used as future memory) obj_temp_output_dict[storage_key][frame_idx] = current_out @@ -306,7 +310,7 @@ def add_new_points_or_box( is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id ) _, video_res_masks = self._get_orig_video_res_output( inference_state, consolidated_out["pred_masks_video_res"] @@ -324,7 +328,9 @@ def add_new_mask( frame_idx, obj_id, mask, - import_onnx=False + import_from_onnx=False, + export_to_onnx=False, + model_id=None ): """Add new mask to a frame.""" obj_idx = self._obj_id_to_idx(inference_state, obj_id) @@ -384,7 +390,9 @@ def add_new_mask( # allows us to enforce non-overlapping constraints on all objects before encoding # them into memory. run_mem_encoder=False, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, + export_to_onnx=export_to_onnx, + model_id=model_id ) # Add the output to the output dict (to be used as future memory) obj_temp_output_dict[storage_key][frame_idx] = current_out @@ -397,7 +405,7 @@ def add_new_mask( is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id ) _, video_res_masks = self._get_orig_video_res_output( inference_state, consolidated_out["pred_masks_video_res"] @@ -433,7 +441,9 @@ def _consolidate_temp_output_across_obj( is_cond, run_mem_encoder, consolidate_at_video_res=False, - import_onnx=False + import_from_onnx=False, + export_to_onnx=False, + model_id=None ): """ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on @@ -500,7 +510,7 @@ def _consolidate_temp_output_across_obj( if run_mem_encoder: if empty_mask_ptr is None: empty_mask_ptr = self._get_empty_mask_ptr( - inference_state, frame_idx, import_onnx=import_onnx + inference_state, frame_idx, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id ) # fill object pointer with a dummy pointer (based on an empty mask) consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr @@ -539,14 +549,14 @@ def _consolidate_temp_output_across_obj( batch_size=batch_size, high_res_masks=high_res_masks, is_mask_from_pts=True, # these frames are what the user interacted with - import_onnx=import_onnx + import_from_onnx=import_from_onnx ) consolidated_out["maskmem_features"] = maskmem_features consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc return consolidated_out - def _get_empty_mask_ptr(self, inference_state, frame_idx, import_onnx): + def _get_empty_mask_ptr(self, inference_state, frame_idx, import_from_onnx, export_to_onnx, model_id): """Get a dummy object pointer based on an empty mask on the current frame.""" # A dummy (empty) mask with a single object batch_size = 1 @@ -563,7 +573,7 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx, import_onnx): current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx=import_onnx) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx) # Feed the empty mask and image feature above to get a dummy object pointer current_out = self.track_step( @@ -579,12 +589,14 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx, import_onnx): track_in_reverse=False, run_mem_encoder=False, prev_sam_mask_logits=None, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, + export_to_onnx=export_to_onnx, + model_id=model_id ) return current_out["obj_ptr"] @torch.inference_mode() - def propagate_in_video_preflight(self, inference_state, import_onnx=False): + def propagate_in_video_preflight(self, inference_state, import_from_onnx=False, export_to_onnx=False, model_id=None): """Prepare inference_state and consolidate temporary outputs before tracking.""" # Tracking has started and we don't allow adding new objects until session is reset. inference_state["tracking_has_started"] = True @@ -611,7 +623,7 @@ def propagate_in_video_preflight(self, inference_state, import_onnx=False): # consolidate the temporary output across all objects on this frame for frame_idx in temp_frame_inds: consolidated_out = self._consolidate_temp_output_across_obj( - inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True, import_onnx=import_onnx + inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id ) # merge them into "output_dict" and also create per-object slices output_dict[storage_key][frame_idx] = consolidated_out @@ -660,10 +672,12 @@ def propagate_in_video( start_frame_idx=None, max_frame_num_to_track=None, reverse=False, - import_onnx=False + import_from_onnx=False, + export_to_onnx=False, + model_id=None ): """Propagate the input points across frames to track in the entire video.""" - self.propagate_in_video_preflight(inference_state, import_onnx=import_onnx) + self.propagate_in_video_preflight(inference_state, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id) output_dict = inference_state["output_dict"] consolidated_frame_inds = inference_state["consolidated_frame_inds"] @@ -723,7 +737,9 @@ def propagate_in_video( mask_inputs=None, reverse=reverse, run_mem_encoder=True, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, + export_to_onnx=export_to_onnx, + model_id=model_id ) output_dict[storage_key][frame_idx] = current_out # Create slices of per-object outputs for subsequent interaction with each @@ -800,7 +816,7 @@ def _reset_tracking_results(self, inference_state): inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"].clear() - def _get_image_feature(self, inference_state, frame_idx, batch_size, import_onnx = False): + def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from_onnx = False): """Compute the image features on a given frame.""" # Look up in the cache first image, backbone_out = inference_state["cached_features"].get( @@ -810,7 +826,7 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_onnx # Cache miss -- we will run inference on a single image device = inference_state["device"] image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0) - if import_onnx: + if import_from_onnx: print("begin image encoder onnx") print(image.shape) import onnxruntime @@ -861,7 +877,9 @@ def _run_single_frame_inference( reverse, run_mem_encoder, prev_sam_mask_logits=None, - import_onnx=False + import_from_onnx=False, + export_to_onnx=False, + model_id=None ): """Run tracking on a single frame based on current inputs and previous memory.""" # Retrieve correct image features @@ -871,7 +889,7 @@ def _run_single_frame_inference( current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_onnx=import_onnx) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx) # point and mask should not appear as input simultaneously on the same frame assert point_inputs is None or mask_inputs is None @@ -888,7 +906,9 @@ def _run_single_frame_inference( track_in_reverse=reverse, run_mem_encoder=run_mem_encoder, prev_sam_mask_logits=prev_sam_mask_logits, - import_onnx=import_onnx + import_from_onnx=import_from_onnx, + export_to_onnx=export_to_onnx, + model_id=model_id ) # optionally offload the output to CPU memory to save GPU space @@ -918,7 +938,7 @@ def _run_single_frame_inference( return compact_current_out, pred_masks_gpu def _run_memory_encoder( - self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_onnx + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_from_onnx ): """ Run the memory encoder on `high_res_masks`. This is usually after applying @@ -927,7 +947,7 @@ def _run_memory_encoder( """ # Retrieve correct image features _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( - inference_state, frame_idx, batch_size, import_onnx=import_onnx + inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx ) maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats, From ebbda3db0d0d126ed191d5cf6e77a3c6848fc80f Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sun, 25 Aug 2024 21:44:57 +0900 Subject: [PATCH 26/79] Disable export memory attention --- sam2/modeling/sam2_base.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 992b1b3a7..fd878f5db 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -699,26 +699,26 @@ def _prepare_memory_conditioned_features( memory = torch.cat(to_cat_memory, dim=0) memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) - if export_to_onnx and not self.memory_attention_onnx_exported: + if False:#export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) print("memory", memory.shape, memory.dtype) print("current_vision_pos_embeds", current_vision_pos_embeds[0].shape, current_vision_pos_embeds[0].dtype) print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) print("num_obj_ptr_tokens", num_obj_ptr_tokens) - torch.onnx.export( + torch.onnx.export( # dynamo_export self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'memory_attention_'+model_id+'.onnx', input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], output_names=["pix_feat"], verbose=False, opset_version=17 ) - if import_from_onnx: + if False:#import_from_onnx: import onnxruntime model = onnxruntime.InferenceSession("memory_attention_"+model_id+".onnx") pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens.numpy()}) - if not import_from_onnx: + if True:#not import_from_onnx: pix_feat_with_mem = self.memory_attention( curr=current_vision_feats, curr_pos=current_vision_pos_embeds, From 08f84c53e068c21b7dc24ec040996a5023531dfa Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sun, 25 Aug 2024 21:49:07 +0900 Subject: [PATCH 27/79] Fix type error --- sam2/modeling/sam2_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index fd878f5db..eb05bf9f8 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -780,7 +780,7 @@ def _encode_new_memory( import onnxruntime model = onnxruntime.InferenceSession("memory_encoder_"+model_id+".onnx") vision_features, vision_pos_enc = model.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) - maskmem_out = {"vision_features": vision_features, "vision_pos_enc": [vision_pos_enc]} + maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} if not import_from_onnx: maskmem_out = self.memory_encoder( From cc43208de01a14caface52a41c05266764a4c95b Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 26 Aug 2024 12:04:39 +0900 Subject: [PATCH 28/79] Implemen masks arguments --- sam2/modeling/sam/prompt_encoder.py | 47 ++++++----------------------- sam2/modeling/sam2_base.py | 11 +++++-- sam2/sam2_image_predictor.py | 28 +++++++++++------ 3 files changed, 38 insertions(+), 48 deletions(-) diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py index 2834a4093..d62c9bdf8 100644 --- a/sam2/modeling/sam/prompt_encoder.py +++ b/sam2/modeling/sam/prompt_encoder.py @@ -153,11 +153,12 @@ def _get_batch_size( def _get_device(self) -> torch.device: return self.point_embeddings[0].weight.device - def forward_normal( + def forward( self, coords: Optional[torch.Tensor], labels: Optional[torch.Tensor], masks: Optional[torch.Tensor], + masks_enable: Optional[torch.Tensor] ) -> Tuple[torch.Tensor, torch.Tensor]: """ Embeds different types of prompts, returning both sparse and dense @@ -176,50 +177,22 @@ def forward_normal( torch.Tensor: dense embeddings for the masks, in the shape Bx(embed_dim)x(embed_H)x(embed_W) """ - bs = self._get_batch_size(coords, labels, masks) - sparse_embeddings = torch.empty( - (bs, 0, self.embed_dim), device=self._get_device() - ) - if coords is not None and labels is not None: - point_embeddings = self._embed_points(coords, labels, pad=True) - sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if coords is None or labels is None: + raise("onnx not supported coords is None") - if masks is not None: - dense_embeddings = self._embed_masks(masks) - else: - dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( - bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] - ) - - return sparse_embeddings, dense_embeddings - - def forward_sparse( - self, - coords: torch.Tensor, - labels: torch.Tensor, - ) -> Tuple[torch.Tensor, torch.Tensor]: - bs = coords.shape[0] - + bs = self._get_batch_size(coords, labels, masks) sparse_embeddings = torch.empty( (bs, 0, self.embed_dim), device=self._get_device() ) point_embeddings = self._embed_points(coords, labels, pad=True) sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) - - dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + + dense_embeddings1 = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] ) + dense_embeddings2 = self._embed_masks(masks) + + dense_embeddings = torch.where(masks_enable[0] == 1, dense_embeddings2, dense_embeddings1) return sparse_embeddings, dense_embeddings, self.get_dense_pe() - - def forward_dense( - self, - masks: torch.Tensor, - ) -> Tuple[torch.Tensor, torch.Tensor]: - bs = masks.shape[0] - sparse_embeddings = torch.empty( - (bs, 0, self.embed_dim), device=self._get_device() - ) - dense_embeddings = self._embed_masks(masks) - return sparse_embeddings, dense_embeddings \ No newline at end of file diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index eb05bf9f8..924aa9d6a 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -340,8 +340,15 @@ def _forward_sam_heads( raise("currently not supported mask prompt") import onnxruntime model_id = "hiera_l" - model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") - sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy()}) + model = onnxruntime.InferenceSession("prompt_encoder_"+model_id+".onnx") + if sam_mask_prompt is None: + import numpy as np + mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + masks_enable = torch.tensor([0], dtype=torch.int) + else: + mask_input_dummy = sam_mask_prompt + masks_enable = torch.tensor([1], dtype=torch.int) + sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy(), "masks":mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 5edffaab1..6507d6b27 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -493,25 +493,35 @@ def _predict( else: concat_points = (box_coords, box_labels) + # New data for onnx + if concat_points is None: + raise ("concat points must be exists") # Noneの場合はtensorサイズが0のテンソルを返さないといけないためwhereで組めない + if mask_input is None: + mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + masks_enable = torch.tensor([0], dtype=torch.int) # boolだとonnxへのエクスポートのwhereでエラーになる + else: + mask_input_dummy = mask_input + masks_enable = torch.tensor([1], dtype=torch.int) + print("mask_input_dummy", mask_input_dummy.shape) if export_to_onnx: #print("concat_points", concat_points.shape) #print("mask_input", mask_input.shape) - self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_sparse torch.onnx.export( - self.model.sam_prompt_encoder, (concat_points[0], concat_points[1]), 'prompt_encoder_sparse_'+model_id+'.onnx', - input_names=["coords", "labels"], + self.model.sam_prompt_encoder, (concat_points[0], concat_points[1], mask_input_dummy, masks_enable), 'prompt_encoder_'+model_id+'.onnx', + input_names=["coords", "labels", "masks", "masks_enable"], output_names=["sparse_embeddings", "dense_embeddings", "dense_pe"], dynamic_axes={ 'coords': {0: 'b', 1: 'n'}, 'labels': {0: 'b', 1: 'n'}, + 'masks': {0: 'b', 1: 'h', 2: 'w'}, }, verbose=False, opset_version=17 ) if import_from_onnx: - model = onnxruntime.InferenceSession("prompt_encoder_sparse_"+model_id+".onnx") - sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy()}) + model = onnxruntime.InferenceSession("prompt_encoder_"+model_id+".onnx") + sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy(), "masks": mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) @@ -531,8 +541,7 @@ def _predict( if export_to_tflite: import ai_edge_torch - sample_inputs = (concat_points[0], concat_points[1]) - self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_sparse + sample_inputs = (concat_points[0], concat_points[1], mask_input_dummy, masks_enable) if not tflite_int8: edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) @@ -561,17 +570,18 @@ def _predict( edge_model = model if import_from_tflite and not tflite_int8: - sparse_embeddings, dense_embeddings, dense_pe = edge_model(concat_points[0], concat_points[1]) + sparse_embeddings, dense_embeddings, dense_pe = edge_model(concat_points[0], concat_points[1], mask_input_dummy, masks_enable) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) if not import_from_onnx and (not import_from_tflite or not export_to_tflite or tflite_int8): - sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward_normal( + sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward( coords=concat_points[0], labels=concat_points[1], #boxes=None, masks=mask_input, + masks_enable=masks_enable ) dense_pe = self.model.sam_prompt_encoder.get_dense_pe() From bcfca16c779453f26764a07efd0d754770c7cc84 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 26 Aug 2024 14:00:50 +0900 Subject: [PATCH 29/79] Replace repeat interleave --- sam2/modeling/sam/mask_decoder.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sam2/modeling/sam/mask_decoder.py b/sam2/modeling/sam/mask_decoder.py index 580dff0f3..43276f94f 100644 --- a/sam2/modeling/sam/mask_decoder.py +++ b/sam2/modeling/sam/mask_decoder.py @@ -263,7 +263,12 @@ def predict_masks( assert ( image_pe.shape[0] == 1 ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)" - pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + + pos_src = torch.tensor((tokens.shape[0], image_pe.shape[1], image_pe.shape[2])) + pos_src = image_pe # batch broad cast + + #pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) # one_hotが生成responseえる + b, c, h, w = src.shape # Run the transformer From 6ec15f692fb43d02806aafcfe96b6e16845133f1 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 26 Aug 2024 14:52:33 +0900 Subject: [PATCH 30/79] Fix tflite export error --- sam2/sam2_image_predictor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 6507d6b27..2c2e9f23d 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -148,6 +148,7 @@ def set_image( import ai_edge_torch import tensorflow as tf sample_inputs = (input_image,) + self.model.forward = self.model.forward_image if not tflite_int8: tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} @@ -545,7 +546,7 @@ def _predict( if not tflite_int8: edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) - edge_model.export("prompt_encoder_sparse_"+model_id+".tflite") + edge_model.export("prompt_encoder_"+model_id+".tflite") if False:#tflite_int8: # labelがint64で量子化できない from ai_edge_torch.quantize import pt2e_quantizer @@ -565,7 +566,7 @@ def _predict( sample_inputs, quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), ) - with_quantizer.export("prompt_encoder_sparse_"+model_id+"_int8.tflite") + with_quantizer.export("prompt_encoder_"+model_id+"_int8.tflite") edge_model = model From 9fa36afb22a2ae5011546e0493d52550ddd9b7ad Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 26 Aug 2024 15:56:36 +0900 Subject: [PATCH 31/79] Fix torch inference error --- sam2/sam2_image_predictor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 2c2e9f23d..6af9f1861 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -581,7 +581,7 @@ def _predict( coords=concat_points[0], labels=concat_points[1], #boxes=None, - masks=mask_input, + masks=mask_input_dummy, masks_enable=masks_enable ) dense_pe = self.model.sam_prompt_encoder.get_dense_pe() @@ -658,7 +658,6 @@ def _predict( edge_model = model if import_from_tflite: - multimask_output_np = np.zeros((1), dtype=bool) batched_mode_np = np.zeros((1), dtype=bool) if batched_mode: batched_mode_np[0] = True From a02df7ccf2c28e656651d975fb520844f118bbfd Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 26 Aug 2024 16:10:15 +0900 Subject: [PATCH 32/79] Fix torch inference error --- sam2/sam2_image_predictor.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 6af9f1861..767659bad 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -527,19 +527,6 @@ def _predict( dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) - #if export_to_onnx: - #self.model.sam_prompt_encoder.forward = self.model.sam_prompt_encoder.forward_dense - #if mask_input is None: - # mask_input_non_zero = np.zeros((1, 1024, 1024)) - #else: - # mask_input_non_zero = mask_input - #torch.onnx.export( - # self.model.sam_prompt_encoder, (mask_input_non_zero), 'prompt_encoder_dense_'+model_id+'.onnx', - # input_names=["mask_input"], - # output_names=["sparse_embeddings", "dense_embeddings"], - # verbose=False, opset_version=17 - #) - if export_to_tflite: import ai_edge_torch sample_inputs = (concat_points[0], concat_points[1], mask_input_dummy, masks_enable) @@ -577,14 +564,13 @@ def _predict( dense_pe = torch.Tensor(dense_pe) if not import_from_onnx and (not import_from_tflite or not export_to_tflite or tflite_int8): - sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder.forward( + sparse_embeddings, dense_embeddings, dense_pe = self.model.sam_prompt_encoder.forward( coords=concat_points[0], labels=concat_points[1], #boxes=None, masks=mask_input_dummy, masks_enable=masks_enable ) - dense_pe = self.model.sam_prompt_encoder.get_dense_pe() # Predict masks batched_mode = ( @@ -680,6 +666,7 @@ def _predict( high_res_features1=high_res_features[0], high_res_features2=high_res_features[1], ) + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) # Upscale the masks to the original image resolution masks = self._transforms.postprocess_masks( From 6fd0a6b179f38dee3294dda0aa108b6f159be956 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 29 Aug 2024 10:28:59 +0900 Subject: [PATCH 33/79] Support model size --- export_image_predictor.py | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index 7301ec163..eade17315 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -8,21 +8,40 @@ from sam2.sam2_image_predictor import SAM2ImagePredictor # export settings -export_to_onnx_image_encoder = False +export_to_onnx_image_encoder = True export_to_onnx_mask_decoder = True +import_from_onnx = True + export_to_tflite_image_encoder = False export_to_tflite_mask_decoder = False -import_from_onnx = True import_from_tflite = False + tflite_int8 = False show = True # export PJRT_DEVICE=CPU # model settings -sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" -model_cfg = "sam2_hiera_l.yaml" -model_id = "hiera_l" +#model_id = "hiera_l" +model_id = "hiera_t" +#model_id = "hiera_s" +#model_id = "hiera_b+" + +if model_id == "hiera_l": + sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" + model_cfg = "sam2_hiera_l.yaml" +if model_id == "hiera_b+": + sam2_checkpoint = "./checkpoints/sam2_hiera_base_plus.pt" + model_cfg = "sam2_hiera_b+.yaml" +if model_id == "hiera_s": + sam2_checkpoint = "./checkpoints/sam2_hiera_small.pt" + model_cfg = "sam2_hiera_s.yaml" +elif model_id == "hiera_t": + sam2_checkpoint = "./checkpoints/sam2_hiera_tiny.pt" + model_cfg = "sam2_hiera_t.yaml" +else: + print("unknown model id") + exit() # use cpu for export device = torch.device("cpu") @@ -57,7 +76,7 @@ def show_box(box, ax): w, h = box[2] - box[0], box[3] - box[1] ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) -def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True): +def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True, model_id=model_id): for i, (mask, score) in enumerate(zip(masks, scores)): plt.figure(figsize=(10, 10)) plt.imshow(image) @@ -72,7 +91,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l plt.title(f"Mask {i+1}, Score: {score:.3f}", fontsize=18) plt.axis('off') #plt.show() - plt.savefig(f'output{i+1}.png') + plt.savefig(f'output{i+1}_'+model_id+'.png') # logic image = Image.open('notebooks/images/truck.jpg') @@ -107,6 +126,6 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l logits = logits[sorted_ind] if show: - show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True) + show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True, model_id=model_id) print("Success!") \ No newline at end of file From 1f2c9da9a5bc8db69ae73771ed517dc3e0b9c66a Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 29 Aug 2024 10:30:19 +0900 Subject: [PATCH 34/79] Fix model id selection --- export_image_predictor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index eade17315..76de23f1e 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -22,18 +22,18 @@ # export PJRT_DEVICE=CPU # model settings -#model_id = "hiera_l" -model_id = "hiera_t" +model_id = "hiera_l" +#model_id = "hiera_t" #model_id = "hiera_s" #model_id = "hiera_b+" if model_id == "hiera_l": sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" model_cfg = "sam2_hiera_l.yaml" -if model_id == "hiera_b+": +elif model_id == "hiera_b+": sam2_checkpoint = "./checkpoints/sam2_hiera_base_plus.pt" model_cfg = "sam2_hiera_b+.yaml" -if model_id == "hiera_s": +elif model_id == "hiera_s": sam2_checkpoint = "./checkpoints/sam2_hiera_small.pt" model_cfg = "sam2_hiera_s.yaml" elif model_id == "hiera_t": From cdb71ecda40a755b42f4a2cd76e9cb6cd12ea6fa Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 30 Aug 2024 09:47:18 +0900 Subject: [PATCH 35/79] Use dynamic quantize --- sam2/sam2_image_predictor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 767659bad..850cc118d 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -161,7 +161,7 @@ def set_image( from torch.ao.quantization import quantize_pt2e quantizer = pt2e_quantizer.PT2EQuantizer().set_global( - pt2e_quantizer.get_symmetric_quantization_config() + pt2e_quantizer.get_symmetric_quantization_config(is_dynamic=True) ) model = torch._export.capture_pre_autograd_graph(self.model, sample_inputs) model = quantize_pt2e.prepare_pt2e(model, quantizer) From 7d1ff3818851afef0670c3b3acc425f854b269b2 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 10:10:49 +0900 Subject: [PATCH 36/79] Export to sub folder --- README.md | 187 +++++------------------------------ README_ORIGINAL.md | 186 ++++++++++++++++++++++++++++++++++ export_image_predictor.py | 42 ++++---- export_video_predictor.py | 35 +++++-- sam2/modeling/sam2_base.py | 12 +-- sam2/sam2_image_predictor.py | 12 +-- sam2/sam2_video_predictor.py | 2 +- 7 files changed, 280 insertions(+), 196 deletions(-) create mode 100644 README_ORIGINAL.md diff --git a/README.md b/README.md index bc13c1c9a..adf72845f 100644 --- a/README.md +++ b/README.md @@ -1,46 +1,7 @@ -# SAM 2: Segment Anything in Images and Videos +# SAM 2 Export to ONNX and TFLITE -**[AI at Meta, FAIR](https://ai.meta.com/research/)** +## Download model -[Nikhila Ravi](https://nikhilaravi.com/), [Valentin Gabeur](https://gabeur.github.io/), [Yuan-Ting Hu](https://scholar.google.com/citations?user=E8DVVYQAAAAJ&hl=en), [Ronghang Hu](https://ronghanghu.com/), [Chaitanya Ryali](https://scholar.google.com/citations?user=4LWx24UAAAAJ&hl=en), [Tengyu Ma](https://scholar.google.com/citations?user=VeTSl0wAAAAJ&hl=en), [Haitham Khedr](https://hkhedr.com/), [Roman Rädle](https://scholar.google.de/citations?user=Tpt57v0AAAAJ&hl=en), [Chloe Rolland](https://scholar.google.com/citations?hl=fr&user=n-SnMhoAAAAJ), [Laura Gustafson](https://scholar.google.com/citations?user=c8IpF9gAAAAJ&hl=en), [Eric Mintun](https://ericmintun.github.io/), [Junting Pan](https://junting.github.io/), [Kalyan Vasudev Alwala](https://scholar.google.co.in/citations?user=m34oaWEAAAAJ&hl=en), [Nicolas Carion](https://www.nicolascarion.com/), [Chao-Yuan Wu](https://chaoyuan.org/), [Ross Girshick](https://www.rossgirshick.info/), [Piotr Dollár](https://pdollar.github.io/), [Christoph Feichtenhofer](https://feichtenhofer.github.io/) - -[[`Paper`](https://ai.meta.com/research/publications/sam-2-segment-anything-in-images-and-videos/)] [[`Project`](https://ai.meta.com/sam2)] [[`Demo`](https://sam2.metademolab.com/)] [[`Dataset`](https://ai.meta.com/datasets/segment-anything-video)] [[`Blog`](https://ai.meta.com/blog/segment-anything-2)] [[`BibTeX`](#citing-sam-2)] - -![SAM 2 architecture](assets/model_diagram.png?raw=true) - -**Segment Anything Model 2 (SAM 2)** is a foundation model towards solving promptable visual segmentation in images and videos. We extend SAM to video by considering images as a video with a single frame. The model design is a simple transformer architecture with streaming memory for real-time video processing. We build a model-in-the-loop data engine, which improves model and data via user interaction, to collect [**our SA-V dataset**](https://ai.meta.com/datasets/segment-anything-video), the largest video segmentation dataset to date. SAM 2 trained on our data provides strong performance across a wide range of tasks and visual domains. - -![SA-V dataset](assets/sa_v_dataset.jpg?raw=true) - -## Installation - -SAM 2 needs to be installed first before use. The code requires `python>=3.10`, as well as `torch>=2.3.1` and `torchvision>=0.18.1`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. You can install SAM 2 on a GPU machine using: - -```bash -git clone https://github.com/facebookresearch/segment-anything-2.git - -cd segment-anything-2 & pip install -e . -``` -If you are installing on Windows, it's strongly recommended to use [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install) with Ubuntu. - -To use the SAM 2 predictor and run the example notebooks, `jupyter` and `matplotlib` are required and can be installed by: - -```bash -pip install -e ".[demo]" -``` - -Note: -1. It's recommended to create a new Python environment via [Anaconda](https://www.anaconda.com/) for this installation and install PyTorch 2.3.1 (or higher) via `pip` following https://pytorch.org/. If you have a PyTorch version lower than 2.3.1 in your current environment, the installation command above will try to upgrade it to the latest PyTorch version using `pip`. -2. The step above requires compiling a custom CUDA kernel with the `nvcc` compiler. If it isn't already available on your machine, please install the [CUDA toolkits](https://developer.nvidia.com/cuda-toolkit-archive) with a version that matches your PyTorch CUDA version. -3. If you see a message like `Failed to build the SAM 2 CUDA extension` during installation, you can ignore it and still use SAM 2 (some post-processing functionality may be limited, but it doesn't affect the results in most cases). - -Please see [`INSTALL.md`](./INSTALL.md) for FAQs on potential issues and solutions. - -## Getting Started - -### Download Checkpoints - -First, we need to download a model checkpoint. All the model checkpoints can be downloaded by running: ```bash cd checkpoints && \ @@ -48,139 +9,45 @@ cd checkpoints && \ cd .. ``` -or individually from: - -- [sam2_hiera_tiny.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_tiny.pt) -- [sam2_hiera_small.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_small.pt) -- [sam2_hiera_base_plus.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_base_plus.pt) -- [sam2_hiera_large.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_large.pt) - -Then SAM 2 can be used in a few lines as follows for image and video prediction. - -### Image prediction +## Requirements -SAM 2 has all the capabilities of [SAM](https://github.com/facebookresearch/segment-anything) on static images, and we provide image prediction APIs that closely resemble SAM for image use cases. The `SAM2ImagePredictor` class has an easy interface for image prompting. +onnx -```python -import torch -from sam2.build_sam import build_sam2 -from sam2.sam2_image_predictor import SAM2ImagePredictor - -checkpoint = "./checkpoints/sam2_hiera_large.pt" -model_cfg = "sam2_hiera_l.yaml" -predictor = SAM2ImagePredictor(build_sam2(model_cfg, checkpoint)) - -with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): - predictor.set_image() - masks, _, _ = predictor.predict() ``` - -Please refer to the examples in [image_predictor_example.ipynb](./notebooks/image_predictor_example.ipynb) (also in Colab [here](https://colab.research.google.com/github/facebookresearch/segment-anything-2/blob/main/notebooks/image_predictor_example.ipynb)) for static image use cases. - -SAM 2 also supports automatic mask generation on images just like SAM. Please see [automatic_mask_generator_example.ipynb](./notebooks/automatic_mask_generator_example.ipynb) (also in Colab [here](https://colab.research.google.com/github/facebookresearch/segment-anything-2/blob/main/notebooks/automatic_mask_generator_example.ipynb)) for automatic mask generation in images. - -### Video prediction - -For promptable segmentation and tracking in videos, we provide a video predictor with APIs for example to add prompts and propagate masklets throughout a video. SAM 2 supports video inference on multiple objects and uses an inference state to keep track of the interactions in each video. - -```python -import torch -from sam2.build_sam import build_sam2_video_predictor - -checkpoint = "./checkpoints/sam2_hiera_large.pt" -model_cfg = "sam2_hiera_l.yaml" -predictor = build_sam2_video_predictor(model_cfg, checkpoint) - -with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): - state = predictor.init_state() - - # add new prompts and instantly get the output on the same frame - frame_idx, object_ids, masks = predictor.add_new_points_or_box(state, ): - - # propagate the prompts to get masklets throughout the video - for frame_idx, object_ids, masks in predictor.propagate_in_video(state): - ... +torch 2.2.1 +onnx 1.16.2 ``` -Please refer to the examples in [video_predictor_example.ipynb](./notebooks/video_predictor_example.ipynb) (also in Colab [here](https://colab.research.google.com/github/facebookresearch/segment-anything-2/blob/main/notebooks/video_predictor_example.ipynb)) for details on how to add click or box prompts, make refinements, and track multiple objects in videos. - -## Load from 🤗 Hugging Face - -Alternatively, models can also be loaded from [Hugging Face](https://huggingface.co/models?search=facebook/sam2) (requires `pip install huggingface_hub`). - -For image prediction: - -```python -import torch -from sam2.sam2_image_predictor import SAM2ImagePredictor +tflite -predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large") - -with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): - predictor.set_image() - masks, _, _ = predictor.predict() ``` - -For video prediction: - -```python -import torch -from sam2.sam2_video_predictor import SAM2VideoPredictor - -predictor = SAM2VideoPredictor.from_pretrained("facebook/sam2-hiera-large") - -with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): - state = predictor.init_state() - - # add new prompts and instantly get the output on the same frame - frame_idx, object_ids, masks = predictor.add_new_points_or_box(state, ): - - # propagate the prompts to get masklets throughout the video - for frame_idx, object_ids, masks in predictor.propagate_in_video(state): - ... +torch 2.4.0 +ai-edge-torch 0.2.0 ``` -## Model Description +## Run -| **Model** | **Size (M)** | **Speed (FPS)** | **SA-V test (J&F)** | **MOSE val (J&F)** | **LVOS v2 (J&F)** | -| :------------------: | :----------: | :--------------------: | :-----------------: | :----------------: | :---------------: | -| sam2_hiera_tiny | 38.9 | 47.2 | 75.0 | 70.9 | 75.3 | -| sam2_hiera_small | 46 | 43.3 (53.0 compiled\*) | 74.9 | 71.5 | 76.4 | -| sam2_hiera_base_plus | 80.8 | 34.8 (43.8 compiled\*) | 74.7 | 72.8 | 75.8 | -| sam2_hiera_large | 224.4 | 24.2 (30.2 compiled\*) | 76.0 | 74.6 | 79.8 | +onnx -\* Compile the model by setting `compile_image_encoder: True` in the config. - -## Segment Anything Video Dataset - -See [sav_dataset/README.md](sav_dataset/README.md) for details. - -## License - -The models are licensed under the [Apache 2.0 license](./LICENSE). Please refer to our research paper for more details on the models. - -## Contributing - -See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). - -## Contributors +``` +python3 export_image_predictor.py -framework onnx +python3 export_video_predictor.py -framework onnx +``` -The SAM 2 project was made possible with the help of many contributors (alphabetical): +tflite -Karen Bergan, Daniel Bolya, Alex Bosenberg, Kai Brown, Vispi Cassod, Christopher Chedeau, Ida Cheng, Luc Dahlin, Shoubhik Debnath, Rene Martinez Doehner, Grant Gardner, Sahir Gomez, Rishi Godugu, Baishan Guo, Caleb Ho, Andrew Huang, Somya Jain, Bob Kamma, Amanda Kallet, Jake Kinney, Alexander Kirillov, Shiva Koduvayur, Devansh Kukreja, Robert Kuo, Aohan Lin, Parth Malani, Jitendra Malik, Mallika Malhotra, Miguel Martin, Alexander Miller, Sasha Mitts, William Ngan, George Orlin, Joelle Pineau, Kate Saenko, Rodrick Shepard, Azita Shokrpour, David Soofian, Jonathan Torres, Jenny Truong, Sagar Vaze, Meng Wang, Claudette Ward, Pengchuan Zhang. +``` +python3 export_image_predictor.py -framework tflite +python3 export_video_predictor.py -framework tflite +``` -Third-party code: we use a GPU-based connected component algorithm adapted from [`cc_torch`](https://github.com/zsef123/Connected_components_PyTorch) (with its license in [`LICENSE_cctorch`](./LICENSE_cctorch)) as an optional post-processing step for the mask predictions. +## Artifacts -## Citing SAM 2 +``` +output/* +model/* +``` -If you use SAM 2 or the SA-V dataset in your research, please use the following BibTeX entry. +## Original document -```bibtex -@article{ravi2024sam2, - title={SAM 2: Segment Anything in Images and Videos}, - author={Ravi, Nikhila and Gabeur, Valentin and Hu, Yuan-Ting and Hu, Ronghang and Ryali, Chaitanya and Ma, Tengyu and Khedr, Haitham and R{\"a}dle, Roman and Rolland, Chloe and Gustafson, Laura and Mintun, Eric and Pan, Junting and Alwala, Kalyan Vasudev and Carion, Nicolas and Wu, Chao-Yuan and Girshick, Ross and Doll{\'a}r, Piotr and Feichtenhofer, Christoph}, - journal={arXiv preprint arXiv:2408.00714}, - url={https://arxiv.org/abs/2408.00714}, - year={2024} -} -``` +[README_ORIGINAL.md](README_ORIGINAL.md) diff --git a/README_ORIGINAL.md b/README_ORIGINAL.md new file mode 100644 index 000000000..bc13c1c9a --- /dev/null +++ b/README_ORIGINAL.md @@ -0,0 +1,186 @@ +# SAM 2: Segment Anything in Images and Videos + +**[AI at Meta, FAIR](https://ai.meta.com/research/)** + +[Nikhila Ravi](https://nikhilaravi.com/), [Valentin Gabeur](https://gabeur.github.io/), [Yuan-Ting Hu](https://scholar.google.com/citations?user=E8DVVYQAAAAJ&hl=en), [Ronghang Hu](https://ronghanghu.com/), [Chaitanya Ryali](https://scholar.google.com/citations?user=4LWx24UAAAAJ&hl=en), [Tengyu Ma](https://scholar.google.com/citations?user=VeTSl0wAAAAJ&hl=en), [Haitham Khedr](https://hkhedr.com/), [Roman Rädle](https://scholar.google.de/citations?user=Tpt57v0AAAAJ&hl=en), [Chloe Rolland](https://scholar.google.com/citations?hl=fr&user=n-SnMhoAAAAJ), [Laura Gustafson](https://scholar.google.com/citations?user=c8IpF9gAAAAJ&hl=en), [Eric Mintun](https://ericmintun.github.io/), [Junting Pan](https://junting.github.io/), [Kalyan Vasudev Alwala](https://scholar.google.co.in/citations?user=m34oaWEAAAAJ&hl=en), [Nicolas Carion](https://www.nicolascarion.com/), [Chao-Yuan Wu](https://chaoyuan.org/), [Ross Girshick](https://www.rossgirshick.info/), [Piotr Dollár](https://pdollar.github.io/), [Christoph Feichtenhofer](https://feichtenhofer.github.io/) + +[[`Paper`](https://ai.meta.com/research/publications/sam-2-segment-anything-in-images-and-videos/)] [[`Project`](https://ai.meta.com/sam2)] [[`Demo`](https://sam2.metademolab.com/)] [[`Dataset`](https://ai.meta.com/datasets/segment-anything-video)] [[`Blog`](https://ai.meta.com/blog/segment-anything-2)] [[`BibTeX`](#citing-sam-2)] + +![SAM 2 architecture](assets/model_diagram.png?raw=true) + +**Segment Anything Model 2 (SAM 2)** is a foundation model towards solving promptable visual segmentation in images and videos. We extend SAM to video by considering images as a video with a single frame. The model design is a simple transformer architecture with streaming memory for real-time video processing. We build a model-in-the-loop data engine, which improves model and data via user interaction, to collect [**our SA-V dataset**](https://ai.meta.com/datasets/segment-anything-video), the largest video segmentation dataset to date. SAM 2 trained on our data provides strong performance across a wide range of tasks and visual domains. + +![SA-V dataset](assets/sa_v_dataset.jpg?raw=true) + +## Installation + +SAM 2 needs to be installed first before use. The code requires `python>=3.10`, as well as `torch>=2.3.1` and `torchvision>=0.18.1`. Please follow the instructions [here](https://pytorch.org/get-started/locally/) to install both PyTorch and TorchVision dependencies. You can install SAM 2 on a GPU machine using: + +```bash +git clone https://github.com/facebookresearch/segment-anything-2.git + +cd segment-anything-2 & pip install -e . +``` +If you are installing on Windows, it's strongly recommended to use [Windows Subsystem for Linux (WSL)](https://learn.microsoft.com/en-us/windows/wsl/install) with Ubuntu. + +To use the SAM 2 predictor and run the example notebooks, `jupyter` and `matplotlib` are required and can be installed by: + +```bash +pip install -e ".[demo]" +``` + +Note: +1. It's recommended to create a new Python environment via [Anaconda](https://www.anaconda.com/) for this installation and install PyTorch 2.3.1 (or higher) via `pip` following https://pytorch.org/. If you have a PyTorch version lower than 2.3.1 in your current environment, the installation command above will try to upgrade it to the latest PyTorch version using `pip`. +2. The step above requires compiling a custom CUDA kernel with the `nvcc` compiler. If it isn't already available on your machine, please install the [CUDA toolkits](https://developer.nvidia.com/cuda-toolkit-archive) with a version that matches your PyTorch CUDA version. +3. If you see a message like `Failed to build the SAM 2 CUDA extension` during installation, you can ignore it and still use SAM 2 (some post-processing functionality may be limited, but it doesn't affect the results in most cases). + +Please see [`INSTALL.md`](./INSTALL.md) for FAQs on potential issues and solutions. + +## Getting Started + +### Download Checkpoints + +First, we need to download a model checkpoint. All the model checkpoints can be downloaded by running: + +```bash +cd checkpoints && \ +./download_ckpts.sh && \ +cd .. +``` + +or individually from: + +- [sam2_hiera_tiny.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_tiny.pt) +- [sam2_hiera_small.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_small.pt) +- [sam2_hiera_base_plus.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_base_plus.pt) +- [sam2_hiera_large.pt](https://dl.fbaipublicfiles.com/segment_anything_2/072824/sam2_hiera_large.pt) + +Then SAM 2 can be used in a few lines as follows for image and video prediction. + +### Image prediction + +SAM 2 has all the capabilities of [SAM](https://github.com/facebookresearch/segment-anything) on static images, and we provide image prediction APIs that closely resemble SAM for image use cases. The `SAM2ImagePredictor` class has an easy interface for image prompting. + +```python +import torch +from sam2.build_sam import build_sam2 +from sam2.sam2_image_predictor import SAM2ImagePredictor + +checkpoint = "./checkpoints/sam2_hiera_large.pt" +model_cfg = "sam2_hiera_l.yaml" +predictor = SAM2ImagePredictor(build_sam2(model_cfg, checkpoint)) + +with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + predictor.set_image() + masks, _, _ = predictor.predict() +``` + +Please refer to the examples in [image_predictor_example.ipynb](./notebooks/image_predictor_example.ipynb) (also in Colab [here](https://colab.research.google.com/github/facebookresearch/segment-anything-2/blob/main/notebooks/image_predictor_example.ipynb)) for static image use cases. + +SAM 2 also supports automatic mask generation on images just like SAM. Please see [automatic_mask_generator_example.ipynb](./notebooks/automatic_mask_generator_example.ipynb) (also in Colab [here](https://colab.research.google.com/github/facebookresearch/segment-anything-2/blob/main/notebooks/automatic_mask_generator_example.ipynb)) for automatic mask generation in images. + +### Video prediction + +For promptable segmentation and tracking in videos, we provide a video predictor with APIs for example to add prompts and propagate masklets throughout a video. SAM 2 supports video inference on multiple objects and uses an inference state to keep track of the interactions in each video. + +```python +import torch +from sam2.build_sam import build_sam2_video_predictor + +checkpoint = "./checkpoints/sam2_hiera_large.pt" +model_cfg = "sam2_hiera_l.yaml" +predictor = build_sam2_video_predictor(model_cfg, checkpoint) + +with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + state = predictor.init_state() + + # add new prompts and instantly get the output on the same frame + frame_idx, object_ids, masks = predictor.add_new_points_or_box(state, ): + + # propagate the prompts to get masklets throughout the video + for frame_idx, object_ids, masks in predictor.propagate_in_video(state): + ... +``` + +Please refer to the examples in [video_predictor_example.ipynb](./notebooks/video_predictor_example.ipynb) (also in Colab [here](https://colab.research.google.com/github/facebookresearch/segment-anything-2/blob/main/notebooks/video_predictor_example.ipynb)) for details on how to add click or box prompts, make refinements, and track multiple objects in videos. + +## Load from 🤗 Hugging Face + +Alternatively, models can also be loaded from [Hugging Face](https://huggingface.co/models?search=facebook/sam2) (requires `pip install huggingface_hub`). + +For image prediction: + +```python +import torch +from sam2.sam2_image_predictor import SAM2ImagePredictor + +predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large") + +with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + predictor.set_image() + masks, _, _ = predictor.predict() +``` + +For video prediction: + +```python +import torch +from sam2.sam2_video_predictor import SAM2VideoPredictor + +predictor = SAM2VideoPredictor.from_pretrained("facebook/sam2-hiera-large") + +with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16): + state = predictor.init_state() + + # add new prompts and instantly get the output on the same frame + frame_idx, object_ids, masks = predictor.add_new_points_or_box(state, ): + + # propagate the prompts to get masklets throughout the video + for frame_idx, object_ids, masks in predictor.propagate_in_video(state): + ... +``` + +## Model Description + +| **Model** | **Size (M)** | **Speed (FPS)** | **SA-V test (J&F)** | **MOSE val (J&F)** | **LVOS v2 (J&F)** | +| :------------------: | :----------: | :--------------------: | :-----------------: | :----------------: | :---------------: | +| sam2_hiera_tiny | 38.9 | 47.2 | 75.0 | 70.9 | 75.3 | +| sam2_hiera_small | 46 | 43.3 (53.0 compiled\*) | 74.9 | 71.5 | 76.4 | +| sam2_hiera_base_plus | 80.8 | 34.8 (43.8 compiled\*) | 74.7 | 72.8 | 75.8 | +| sam2_hiera_large | 224.4 | 24.2 (30.2 compiled\*) | 76.0 | 74.6 | 79.8 | + +\* Compile the model by setting `compile_image_encoder: True` in the config. + +## Segment Anything Video Dataset + +See [sav_dataset/README.md](sav_dataset/README.md) for details. + +## License + +The models are licensed under the [Apache 2.0 license](./LICENSE). Please refer to our research paper for more details on the models. + +## Contributing + +See [contributing](CONTRIBUTING.md) and the [code of conduct](CODE_OF_CONDUCT.md). + +## Contributors + +The SAM 2 project was made possible with the help of many contributors (alphabetical): + +Karen Bergan, Daniel Bolya, Alex Bosenberg, Kai Brown, Vispi Cassod, Christopher Chedeau, Ida Cheng, Luc Dahlin, Shoubhik Debnath, Rene Martinez Doehner, Grant Gardner, Sahir Gomez, Rishi Godugu, Baishan Guo, Caleb Ho, Andrew Huang, Somya Jain, Bob Kamma, Amanda Kallet, Jake Kinney, Alexander Kirillov, Shiva Koduvayur, Devansh Kukreja, Robert Kuo, Aohan Lin, Parth Malani, Jitendra Malik, Mallika Malhotra, Miguel Martin, Alexander Miller, Sasha Mitts, William Ngan, George Orlin, Joelle Pineau, Kate Saenko, Rodrick Shepard, Azita Shokrpour, David Soofian, Jonathan Torres, Jenny Truong, Sagar Vaze, Meng Wang, Claudette Ward, Pengchuan Zhang. + +Third-party code: we use a GPU-based connected component algorithm adapted from [`cc_torch`](https://github.com/zsef123/Connected_components_PyTorch) (with its license in [`LICENSE_cctorch`](./LICENSE_cctorch)) as an optional post-processing step for the mask predictions. + +## Citing SAM 2 + +If you use SAM 2 or the SA-V dataset in your research, please use the following BibTeX entry. + +```bibtex +@article{ravi2024sam2, + title={SAM 2: Segment Anything in Images and Videos}, + author={Ravi, Nikhila and Gabeur, Valentin and Hu, Yuan-Ting and Hu, Ronghang and Ryali, Chaitanya and Ma, Tengyu and Khedr, Haitham and R{\"a}dle, Roman and Rolland, Chloe and Gustafson, Laura and Mintun, Eric and Pan, Junting and Alwala, Kalyan Vasudev and Carion, Nicolas and Wu, Chao-Yuan and Girshick, Ross and Doll{\'a}r, Piotr and Feichtenhofer, Christoph}, + journal={arXiv preprint arXiv:2408.00714}, + url={https://arxiv.org/abs/2408.00714}, + year={2024} +} +``` diff --git a/export_image_predictor.py b/export_image_predictor.py index 76de23f1e..980bbd4af 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -1,4 +1,14 @@ -import os +# Export image encoder and prompt encoder and mask decoder +# Implemented by ax Inc. 2024 + +import argparse +parser = argparse.ArgumentParser() +parser.add_argument('-model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) +parser.add_argument('-framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('-accuracy', default="float", choices=["float", "int8"]) +args = parser.parse_args() + +import os import numpy as np import torch import matplotlib.pyplot as plt @@ -7,26 +17,25 @@ from sam2.build_sam import build_sam2 from sam2.sam2_image_predictor import SAM2ImagePredictor +# output +os.makedirs("output", exist_ok=True) +os.makedirs("model", exist_ok=True) + # export settings -export_to_onnx_image_encoder = True -export_to_onnx_mask_decoder = True -import_from_onnx = True +export_to_onnx_image_encoder = args.framework == "onnx" +export_to_onnx_mask_decoder = args.framework == "onnx" +import_from_onnx = args.framework == "onnx" -export_to_tflite_image_encoder = False -export_to_tflite_mask_decoder = False -import_from_tflite = False +export_to_tflite_image_encoder = args.framework == "tflite" +export_to_tflite_mask_decoder = args.framework == "tflite" +import_from_tflite = args.framework == "tflite" -tflite_int8 = False -show = True +tflite_int8 = args.accuracy == "int8" # export PJRT_DEVICE=CPU # model settings -model_id = "hiera_l" -#model_id = "hiera_t" -#model_id = "hiera_s" -#model_id = "hiera_b+" - +model_id = args.model_id if model_id == "hiera_l": sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" model_cfg = "sam2_hiera_l.yaml" @@ -91,7 +100,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l plt.title(f"Mask {i+1}, Score: {score:.3f}", fontsize=18) plt.axis('off') #plt.show() - plt.savefig(f'output{i+1}_'+model_id+'.png') + plt.savefig(f'output/output{i+1}_'+model_id+'.png') # logic image = Image.open('notebooks/images/truck.jpg') @@ -125,7 +134,6 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l scores = scores[sorted_ind] logits = logits[sorted_ind] -if show: - show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True, model_id=model_id) +show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True, model_id=model_id) print("Success!") \ No newline at end of file diff --git a/export_video_predictor.py b/export_video_predictor.py index 8c3e75270..edb636b50 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -1,20 +1,43 @@ -import os +# Export memory attention and memory encoder +# Implemented by ax Inc. 2024 + +import argparse +parser = argparse.ArgumentParser() +parser.add_argument('model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) +parser.add_argument('framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('accuracy', default="float", choices=["float", "int8"]) +args = parser.parse_args() + +import os import numpy as np import torch import matplotlib.pyplot as plt from PIL import Image +# output +os.makedirs("output", exist_ok=True) +os.makedirs("model", exist_ok=True) + +# export settings +model_id = args.model_id + +export_to_onnx = args.framework=="onnx" +import_from_onnx = args.framework=="onnx" + +# import +if model_id == "hiera_l": + model_cfg = "sam2_hiera_l.yaml" +elif model_id == "hiera_t": + model_cfg = "sam2_hiera_t.yaml" +else: + raise("unknown model type") + device = torch.device("cpu") print(f"using device: {device}") from sam2.build_sam import build_sam2_video_predictor sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" -model_cfg = "sam2_hiera_l.yaml" - -export_to_onnx = True -model_id = "hiera_l" -import_from_onnx = True predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 924aa9d6a..ca5044c90 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -340,7 +340,7 @@ def _forward_sam_heads( raise("currently not supported mask prompt") import onnxruntime model_id = "hiera_l" - model = onnxruntime.InferenceSession("prompt_encoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") if sam_mask_prompt is None: import numpy as np mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) @@ -353,7 +353,7 @@ def _forward_sam_heads( dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) - model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") print("backbone_features", backbone_features.shape) masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { "image_embeddings":backbone_features.numpy(), @@ -714,7 +714,7 @@ def _prepare_memory_conditioned_features( print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) print("num_obj_ptr_tokens", num_obj_ptr_tokens) torch.onnx.export( # dynamo_export - self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'memory_attention_'+model_id+'.onnx', + self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'model/memory_attention_'+model_id+'.onnx', input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], output_names=["pix_feat"], verbose=False, opset_version=17 @@ -722,7 +722,7 @@ def _prepare_memory_conditioned_features( if False:#import_from_onnx: import onnxruntime - model = onnxruntime.InferenceSession("memory_attention_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens.numpy()}) if True:#not import_from_onnx: @@ -777,7 +777,7 @@ def _encode_new_memory( if export_to_onnx and not self.memory_encoder_onnx_exported: self.memory_encoder_onnx_exported = True torch.onnx.export( - self.memory_encoder, (pix_feat, mask_for_mem, False), 'memory_encoder_'+model_id+'.onnx', + self.memory_encoder, (pix_feat, mask_for_mem, False), 'model/memory_encoder_'+model_id+'.onnx', input_names=["pix_feat", "masks"], output_names=["vision_features", "vision_pos_enc"], verbose=False, opset_version=17 @@ -785,7 +785,7 @@ def _encode_new_memory( if import_from_onnx: import onnxruntime - model = onnxruntime.InferenceSession("memory_encoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/memory_encoder_"+model_id+".onnx") vision_features, vision_pos_enc = model.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 850cc118d..a5ee2587a 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -127,14 +127,14 @@ def set_image( #print("input_image", input_image.shape) self.model.forward = self.model.forward_image torch.onnx.export( - self.model, (input_image), 'image_encoder_'+model_id+'.onnx', + self.model, (input_image), 'model/image_encoder_'+model_id+'.onnx', input_names=["input_image"], output_names=["vision_features", "vision_pos_enc_0", "vision_pos_enc_1", "vision_pos_enc_2", "backbone_fpn_0", "backbone_fpn_1", "backbone_fpn_2"], verbose=False, opset_version=17 ) if import_from_onnx: - model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":input_image.numpy()}) print("vision_features", vision_features.shape) print("vision_pos_enc_0", vision_pos_enc_0.shape) @@ -509,7 +509,7 @@ def _predict( #print("concat_points", concat_points.shape) #print("mask_input", mask_input.shape) torch.onnx.export( - self.model.sam_prompt_encoder, (concat_points[0], concat_points[1], mask_input_dummy, masks_enable), 'prompt_encoder_'+model_id+'.onnx', + self.model.sam_prompt_encoder, (concat_points[0], concat_points[1], mask_input_dummy, masks_enable), 'model/prompt_encoder_'+model_id+'.onnx', input_names=["coords", "labels", "masks", "masks_enable"], output_names=["sparse_embeddings", "dense_embeddings", "dense_pe"], dynamic_axes={ @@ -521,7 +521,7 @@ def _predict( ) if import_from_onnx: - model = onnxruntime.InferenceSession("prompt_encoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy(), "masks": mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) @@ -588,7 +588,7 @@ def _predict( self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_masks # multimask_outputが定数になってしまうので分離 torch.onnx.export( self.model.sam_mask_decoder, (self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode, high_res_features[0], high_res_features[1]), - 'mask_decoder_'+model_id+'.onnx', + 'model/mask_decoder_'+model_id+'.onnx', input_names=["image_embeddings", "image_pe", "sparse_prompt_embeddings", "dense_prompt_embeddings", "repeat_image", "high_res_features1", "high_res_features2"], output_names=["masks", "iou_pred", "sam_tokens_out", "object_score_logits"], dynamic_axes={ @@ -598,7 +598,7 @@ def _predict( ) if import_from_onnx: - model = onnxruntime.InferenceSession("mask_decoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { "image_embeddings":self._features["image_embed"][img_idx].unsqueeze(0).numpy(), "image_pe": dense_pe.numpy(), diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 49f1f7e15..c3425e340 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -831,7 +831,7 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from print(image.shape) import onnxruntime model_id = "hiera_l" - model = onnxruntime.InferenceSession("image_encoder_"+model_id+".onnx") + model = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":image.numpy()}) else: print("begin image encoder torch") From a224ae5e7e13d2fc1fc2fa099ff259e4d6141a53 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 10:14:07 +0900 Subject: [PATCH 37/79] Update checkpoint --- export_video_predictor.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index edb636b50..b3a9dff80 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -3,9 +3,9 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument('model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) -parser.add_argument('framework', default="onnx", choices=["onnx", "tflite"]) -parser.add_argument('accuracy', default="float", choices=["float", "int8"]) +parser.add_argument('-model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) +parser.add_argument('-framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('-accuracy', default="float", choices=["float", "int8"]) args = parser.parse_args() import os @@ -26,9 +26,17 @@ # import if model_id == "hiera_l": - model_cfg = "sam2_hiera_l.yaml" + model_cfg = "sam2_hiera_l.yaml" + sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" +elif model_id == "hiera_s": + model_cfg = "sam2_hiera_s.yaml" + sam2_checkpoint = "./checkpoints/sam2_hiera_small.pt" +elif model_id == "hiera_b+": + model_cfg = "sam2_hiera_b+.yaml" + sam2_checkpoint = "./checkpoints/sam2_hiera_base+.pt" elif model_id == "hiera_t": model_cfg = "sam2_hiera_t.yaml" + sam2_checkpoint = "./checkpoints/sam2_hiera_tiny.pt" else: raise("unknown model type") @@ -37,8 +45,6 @@ from sam2.build_sam import build_sam2_video_predictor -sam2_checkpoint = "./checkpoints/sam2_hiera_large.pt" - predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) From 7506a21a940557dd482cc0bd27791d84ebc0bbfc Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 10:18:56 +0900 Subject: [PATCH 38/79] Excport memory encoder --- README.md | 8 ++++---- export_image_predictor.py | 6 +++--- export_video_predictor.py | 12 +++++++----- sam2/modeling/sam2_base.py | 6 +++--- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index adf72845f..9b9557833 100644 --- a/README.md +++ b/README.md @@ -30,15 +30,15 @@ ai-edge-torch 0.2.0 onnx ``` -python3 export_image_predictor.py -framework onnx -python3 export_video_predictor.py -framework onnx +python3 export_image_predictor.py --framework onnx +python3 export_video_predictor.py --framework onnx ``` tflite ``` -python3 export_image_predictor.py -framework tflite -python3 export_video_predictor.py -framework tflite +python3 export_image_predictor.py --framework tflite +python3 export_video_predictor.py --framework tflite ``` ## Artifacts diff --git a/export_image_predictor.py b/export_image_predictor.py index 980bbd4af..7b6ac6ca1 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -3,9 +3,9 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument('-model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) -parser.add_argument('-framework', default="onnx", choices=["onnx", "tflite"]) -parser.add_argument('-accuracy', default="float", choices=["float", "int8"]) +parser.add_argument('--model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) +parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) args = parser.parse_args() import os diff --git a/export_video_predictor.py b/export_video_predictor.py index b3a9dff80..872abf675 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -3,9 +3,9 @@ import argparse parser = argparse.ArgumentParser() -parser.add_argument('-model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) -parser.add_argument('-framework', default="onnx", choices=["onnx", "tflite"]) -parser.add_argument('-accuracy', default="float", choices=["float", "int8"]) +parser.add_argument('--model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) +parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) args = parser.parse_args() import os @@ -109,7 +109,8 @@ def show_box(box, ax): plt.imshow(Image.open(os.path.join(video_dir, frame_names[ann_frame_idx]))) show_points(points, labels, plt.gca()) show_mask((out_mask_logits[0] > 0.0).cpu().numpy(), plt.gca(), obj_id=out_obj_ids[0]) -plt.show() +#plt.show() +plt.savefig(f'output/video_'+model_id+'.png') # run propagation throughout the video and collect the results in a dict video_segments = {} # video_segments contains the per-frame segmentation results @@ -128,4 +129,5 @@ def show_box(box, ax): plt.imshow(Image.open(os.path.join(video_dir, frame_names[out_frame_idx]))) for out_obj_id, out_mask in video_segments[out_frame_idx].items(): show_mask(out_mask, plt.gca(), obj_id=out_obj_id) - plt.show() + #plt.show() + plt.savefig(f'output/video{out_frame_idx+1}_'+model_id+'.png') diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index ca5044c90..44dcabfe6 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -706,7 +706,7 @@ def _prepare_memory_conditioned_features( memory = torch.cat(to_cat_memory, dim=0) memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) - if False:#export_to_onnx and not self.memory_attention_onnx_exported: + if export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) print("memory", memory.shape, memory.dtype) @@ -720,12 +720,12 @@ def _prepare_memory_conditioned_features( verbose=False, opset_version=17 ) - if False:#import_from_onnx: + if import_from_onnx: import onnxruntime model = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens.numpy()}) - if True:#not import_from_onnx: + if not import_from_onnx: pix_feat_with_mem = self.memory_attention( curr=current_vision_feats, curr_pos=current_vision_pos_embeds, From ba20e684e15b6425afe5c0c5db176b2b893a013c Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 10:42:03 +0900 Subject: [PATCH 39/79] Implement matmul version of memory attention --- sam2/modeling/position_encoding.py | 40 ++++++++++++++++++++++++++++++ sam2/modeling/sam/transformer.py | 35 +++++++++++++++++++++----- sam2/modeling/sam2_base.py | 2 +- 3 files changed, 70 insertions(+), 7 deletions(-) diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py index 52ac22674..3d18dee1a 100644 --- a/sam2/modeling/position_encoding.py +++ b/sam2/modeling/position_encoding.py @@ -219,3 +219,43 @@ def apply_rotary_enc( freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3) xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device) + + +# Matrix version of rotary enc +# https://github.com/facebookresearch/segment-anything-2/issues/186 + +def get_rotation_matrices(dim, end_x, end_y, theta=10000.0, device=None, dtype=None): + + powers = torch.linspace(0, 1, 1 + (dim // 4), device=device, dtype=dtype)[:-1] + base_angles = torch.pow(theta, -powers) + + end_x, end_y = int(end_x), int(end_y) + x_mults = torch.arange(end_x, device=device, dtype=dtype).repeat(end_y) + y_mults = torch.arange(end_y, device=device, dtype=dtype).repeat_interleave(end_x) + angles_xy = (torch.outer(mults, base_angles) for mults in (x_mults, y_mults)) + + rotmats_list = [] + for angles in angles_xy: + sterm, cterm = torch.sin(-angles), torch.cos(-angles) + rotmat = torch.stack( + [ + torch.stack([cterm, -sterm], dim=-1), + torch.stack([sterm, cterm], dim=-1), + ], + dim=-1, + ) + rotmats_list.append(rotmat) + + return torch.cat(rotmats_list, dim=1).unsqueeze(0).unsqueeze(0) + + +def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): + + bq, hq, nq, cq = xq.shape + bk, hk, nk, ck = xk.shape + + q_out = torch.matmul(rotmats, xq.reshape(bq, hq, nq, cq // 2, 2, 1)).flatten(3) + k_rotmat = rotmats.repeat(1, 1, nk // nq, 1, 1, 1) if repeat_freqs_k else rotmats + k_out = torch.matmul(k_rotmat, xk.reshape(bk, hk, nk, ck // 2, 2, 1)).flatten(3) + + return q_out, k_out diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index 3df4843d9..b836c5245 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -15,6 +15,7 @@ from torch import nn, Tensor from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis +from sam2.modeling.position_encoding import apply_rotary_matenc, get_rotation_matrices from sam2.modeling.sam2_utils import MLP from sam2.utils.misc import get_sdpa_settings @@ -24,6 +25,8 @@ # A fallback setting to allow all available kernels if Flash Attention fails ALLOW_ALL_KERNELS = False +# Use matrix version of rotrary enc +USE_MAT_ROTARY_ENC = True def sdp_kernel_context(dropout_p): """ @@ -309,6 +312,11 @@ def __init__( self.freqs_cis = freqs_cis self.rope_k_repeat = rope_k_repeat + if USE_MAT_ROTARY_ENC: + rotmats = get_rotation_matrices(dim=self.internal_dim // self.num_heads, end_x=feat_sizes[0], end_y=feat_sizes[1], theta=rope_theta) + self.rotmats = rotmats + self.rope_theta = rope_theta + def forward( self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0 ) -> Tensor: @@ -324,19 +332,34 @@ def forward( # Apply rotary position encoding w = h = math.sqrt(q.shape[-2]) + self.freqs_cis = self.freqs_cis.to(q.device) if self.freqs_cis.shape[0] != q.shape[-2]: self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) + + if USE_MAT_ROTARY_ENC: + self.rotmats = self.rotmats.to(q.device) + if self.rotmats.shape[0] != q.shape[-2]: + self.rotmats = get_rotation_matrices(dim=self.internal_dim // self.num_heads, end_x=w, end_y=h, theta=self.rope_theta) + if q.shape[-2] != k.shape[-2]: assert self.rope_k_repeat num_k_rope = k.size(-2) - num_k_exclude_rope - q, k[:, :, :num_k_rope] = apply_rotary_enc( - q, - k[:, :, :num_k_rope], - freqs_cis=self.freqs_cis, - repeat_freqs_k=self.rope_k_repeat, - ) + if USE_MAT_ROTARY_ENC: + q, k[:, :, :num_k_rope] = apply_rotary_matenc( + q, + k[:, :, :num_k_rope], + rotmats=self.rotmats, + repeat_freqs_k=self.rope_k_repeat, + ) + else: + q, k[:, :, :num_k_rope] = apply_rotary_enc( + q, + k[:, :, :num_k_rope], + freqs_cis=self.freqs_cis, + repeat_freqs_k=self.rope_k_repeat, + ) dropout_p = self.dropout_p if self.training else 0.0 # Attention diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 44dcabfe6..1ffa4d045 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -723,7 +723,7 @@ def _prepare_memory_conditioned_features( if import_from_onnx: import onnxruntime model = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") - pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens.numpy()}) + pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens}) if not import_from_onnx: pix_feat_with_mem = self.memory_attention( From 0185597b811b1c75f4fc08d367015243e2a45033 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 10:53:43 +0900 Subject: [PATCH 40/79] Fix inference code --- sam2/modeling/sam2_base.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 1ffa4d045..609a3b7e8 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -717,13 +717,20 @@ def _prepare_memory_conditioned_features( self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'model/memory_attention_'+model_id+'.onnx', input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], output_names=["pix_feat"], + dynamic_axes={ + 'memory': {0: 'n'}, + 'memory_pos': {0: 'n'} + }, verbose=False, opset_version=17 ) if import_from_onnx: import onnxruntime model = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") - pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens}) + import numpy as np + num_obj_ptr_tokens_numpy = np.array((num_obj_ptr_tokens)).astype(np.int64) + pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) + pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) if not import_from_onnx: pix_feat_with_mem = self.memory_attention( From e49a1e7a0237bb700f9e8a1017a9764a3f7095b4 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 11:05:43 +0900 Subject: [PATCH 41/79] Test rotary enc --- README.md | 8 ++++++++ export_image_predictor.py | 2 +- export_video_predictor.py | 2 +- sam2/modeling/sam2_base.py | 13 ++++++++++--- 4 files changed, 20 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 9b9557833..a07a98ee1 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,14 @@ python3 export_image_predictor.py --framework tflite python3 export_video_predictor.py --framework tflite ``` +## Test + +Replacing the complex tensor of RotaryEnc with matmul. To test this behavior, you can also run it with torch. + +``` +python3 export_video_predictor.py --framework torch +``` + ## Artifacts ``` diff --git a/export_image_predictor.py b/export_image_predictor.py index 7b6ac6ca1..162dc4490 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -4,7 +4,7 @@ import argparse parser = argparse.ArgumentParser() parser.add_argument('--model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) -parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite", "torch"]) parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) args = parser.parse_args() diff --git a/export_video_predictor.py b/export_video_predictor.py index 872abf675..a890715e7 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -4,7 +4,7 @@ import argparse parser = argparse.ArgumentParser() parser.add_argument('--model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) -parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite"]) +parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite", "torch"]) parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) args = parser.parse_args() diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 609a3b7e8..82b637492 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -375,12 +375,19 @@ def _forward_sam_heads( else: print("begin mask decoder torch") print("backbone_features", backbone_features.shape) - sparse_embeddings, dense_embeddings = self.sam_prompt_encoder.forward_normal( + if sam_mask_prompt is None: + import numpy as np + mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + masks_enable = torch.tensor([0], dtype=torch.int) + else: + mask_input_dummy = sam_mask_prompt + masks_enable = torch.tensor([1], dtype=torch.int) + sparse_embeddings, dense_embeddings, dense_pe = self.sam_prompt_encoder.forward( coords=sam_point_coords, labels=sam_point_labels, - masks=sam_mask_prompt, + masks=mask_input_dummy, + masks_enable=masks_enable ) - dense_pe = self.sam_prompt_encoder.get_dense_pe() ( low_res_multimasks, From 249c7a14877f018c3a8e26fd64703ef9eb0670e7 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 11:11:22 +0900 Subject: [PATCH 42/79] Connect model_id --- export_video_predictor.py | 2 +- sam2/modeling/sam2_base.py | 14 ++++++++------ sam2/sam2_video_predictor.py | 19 ++++++++++--------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index a890715e7..fa92a52c7 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -81,7 +81,7 @@ def show_box(box, ax): ] frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) -inference_state = predictor.init_state(video_path=video_dir, import_from_onnx=import_from_onnx) +inference_state = predictor.init_state(video_path=video_dir, import_from_onnx=import_from_onnx, model_id=model_id) predictor.reset_state(inference_state) ann_frame_idx = 0 # the frame index we interact with diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 82b637492..480eb848d 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -257,7 +257,8 @@ def _forward_sam_heads( mask_inputs=None, high_res_features=None, multimask_output=False, - import_from_onnx=False + import_from_onnx=False, + model_id=None ): """ Forward SAM prompt encoders and mask heads. @@ -339,7 +340,6 @@ def _forward_sam_heads( if sam_mask_prompt != None: raise("currently not supported mask prompt") import onnxruntime - model_id = "hiera_l" model = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") if sam_mask_prompt is None: import numpy as np @@ -467,7 +467,7 @@ def _forward_sam_heads( object_score_logits, ) - def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_from_onnx): + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_from_onnx, model_id): """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in _forward_sam_heads above). @@ -496,7 +496,8 @@ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, backbone_features=backbone_features, mask_inputs=self.mask_downsample(mask_inputs_float), high_res_features=high_res_features, - import_from_onnx=import_from_onnx + import_from_onnx=import_from_onnx, + model_id=model_id ) # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying @@ -853,7 +854,7 @@ def track_step( pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) sam_outputs = self._use_mask_as_output( - pix_feat, high_res_features, mask_inputs, import_from_onnx=import_from_onnx + pix_feat, high_res_features, mask_inputs, import_from_onnx=import_from_onnx, model_id=model_id ) else: # fused the visual feature with previous memory features in the memory bank @@ -884,7 +885,8 @@ def track_step( mask_inputs=mask_inputs, high_res_features=high_res_features, multimask_output=multimask_output, - import_from_onnx=import_from_onnx + import_from_onnx=import_from_onnx, + model_id=model_id ) ( _, diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index c3425e340..d5420675f 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -43,7 +43,8 @@ def init_state( offload_video_to_cpu=False, offload_state_to_cpu=False, async_loading_frames=False, - import_from_onnx=False + import_from_onnx=False, + model_id=None ): """Initialize an inference state.""" compute_device = self.device # device of the model @@ -104,7 +105,7 @@ def init_state( inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"] = {} # Warm up the visual backbone and cache the image feature on frame 0 - self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_from_onnx=import_from_onnx) + self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_from_onnx=import_from_onnx, model_id=model_id) return inference_state @classmethod @@ -549,7 +550,8 @@ def _consolidate_temp_output_across_obj( batch_size=batch_size, high_res_masks=high_res_masks, is_mask_from_pts=True, # these frames are what the user interacted with - import_from_onnx=import_from_onnx + import_from_onnx=import_from_onnx, + model_id=model_id ) consolidated_out["maskmem_features"] = maskmem_features consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc @@ -573,7 +575,7 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx, import_from_onnx, expo current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, model_id=model_id) # Feed the empty mask and image feature above to get a dummy object pointer current_out = self.track_step( @@ -816,7 +818,7 @@ def _reset_tracking_results(self, inference_state): inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"].clear() - def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from_onnx = False): + def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from_onnx = False, model_id = None): """Compute the image features on a given frame.""" # Look up in the cache first image, backbone_out = inference_state["cached_features"].get( @@ -830,7 +832,6 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from print("begin image encoder onnx") print(image.shape) import onnxruntime - model_id = "hiera_l" model = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":image.numpy()}) else: @@ -889,7 +890,7 @@ def _run_single_frame_inference( current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, model_id=model_id) # point and mask should not appear as input simultaneously on the same frame assert point_inputs is None or mask_inputs is None @@ -938,7 +939,7 @@ def _run_single_frame_inference( return compact_current_out, pred_masks_gpu def _run_memory_encoder( - self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_from_onnx + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_from_onnx, model_id ): """ Run the memory encoder on `high_res_masks`. This is usually after applying @@ -947,7 +948,7 @@ def _run_memory_encoder( """ # Retrieve correct image features _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( - inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx + inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, model_id=model_id ) maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats, From a22a6187750d276f95137ff78e76c82b85c124ea Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 11:35:03 +0900 Subject: [PATCH 43/79] Implement tflite import --- export_video_predictor.py | 6 ++- sam2/modeling/sam2_base.py | 81 +++++++++++++++++++++++++++++++----- sam2/sam2_video_predictor.py | 66 +++++++++++++++++++++++------ 3 files changed, 128 insertions(+), 25 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index fa92a52c7..08b815d5d 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -23,6 +23,8 @@ export_to_onnx = args.framework=="onnx" import_from_onnx = args.framework=="onnx" +export_to_tflite = args.framework=="tflite" +import_from_tflite = args.framework=="tflite" # import if model_id == "hiera_l": @@ -81,7 +83,7 @@ def show_box(box, ax): ] frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) -inference_state = predictor.init_state(video_path=video_dir, import_from_onnx=import_from_onnx, model_id=model_id) +inference_state = predictor.init_state(video_path=video_dir, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id) predictor.reset_state(inference_state) ann_frame_idx = 0 # the frame index we interact with @@ -114,7 +116,7 @@ def show_box(box, ax): # run propagation throughout the video and collect the results in a dict video_segments = {} # video_segments contains the per-frame segmentation results -for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id): +for out_frame_idx, out_obj_ids, out_mask_logits in predictor.propagate_in_video(inference_state, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, import_from_tflite=import_from_tflite, export_to_tflite=export_to_tflite, model_id=model_id): video_segments[out_frame_idx] = { out_obj_id: (out_mask_logits[i] > 0.0).cpu().numpy() for i, out_obj_id in enumerate(out_obj_ids) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 480eb848d..9a0072db5 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -258,6 +258,7 @@ def _forward_sam_heads( high_res_features=None, multimask_output=False, import_from_onnx=False, + import_from_tflite=False, model_id=None ): """ @@ -335,19 +336,20 @@ def _forward_sam_heads( # a learned `no_mask_embed` to indicate no mask input in this case). sam_mask_prompt = None + if sam_mask_prompt is None: + import numpy as np + mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + masks_enable = torch.tensor([0], dtype=torch.int) + else: + mask_input_dummy = sam_mask_prompt + masks_enable = torch.tensor([1], dtype=torch.int) + if import_from_onnx: print("begin mask decoder onnx") if sam_mask_prompt != None: raise("currently not supported mask prompt") import onnxruntime model = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") - if sam_mask_prompt is None: - import numpy as np - mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) - masks_enable = torch.tensor([0], dtype=torch.int) - else: - mask_input_dummy = sam_mask_prompt - masks_enable = torch.tensor([1], dtype=torch.int) sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy(), "masks":mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) @@ -372,7 +374,63 @@ def _forward_sam_heads( print(ious.shape) print(sam_output_tokens.shape) print(object_score_logits.shape) - else: + + if import_from_tflite: + import tensorflow as tf + prompt_encoder = tf.lite.Interpreter(model_path="model/prompt_encoder_"+model_id+".tflite") + mask_decoder = tf.lite.Interpreter(model_path="model/mask_decoder_"+model_id+".tflite") + + prompt_encoder.allocate_tensors() + input_details = prompt_encoder.get_input_details() + output_details = prompt_encoder.get_output_details() + prompt_encoder.resize_tensor_input( + input_details[2]["index"], + [1, sam_point_coords.shape[1], 2] + ) + prompt_encoder.allocate_tensors() + + prompt_encoder.set_tensor(input_details[2]["index"], sam_point_coords) + prompt_encoder.set_tensor(input_details[3]["index"], sam_point_labels) + prompt_encoder.set_tensor(input_details[0]["index"], mask_input_dummy) + prompt_encoder.set_tensor(input_details[1]["index"], masks_enable) + prompt_encoder.invoke() + + sparse_embeddings = prompt_encoder.get_tensor(output_details[1]["index"]) + dense_embeddings = prompt_encoder.get_tensor(output_details[2]["index"]) + dense_pe = prompt_encoder.get_tensor(output_details[0]["index"]) + + mask_decoder.allocate_tensors() + input_details = mask_decoder.get_input_details() + output_details = mask_decoder.get_output_details() + mask_decoder.resize_tensor_input( + input_details[1]["index"], + [1, sparse_embeddings.shape[1], 256] + ) + mask_decoder.allocate_tensors() + + batched_mode = False + + mask_decoder.set_tensor(input_details[3]["index"], backbone_features.numpy()) + mask_decoder.set_tensor(input_details[6]["index"], dense_pe.numpy()) + mask_decoder.set_tensor(input_details[1]["index"], sparse_embeddings.numpy()) + mask_decoder.set_tensor(input_details[2]["index"], dense_embeddings.numpy()) + mask_decoder.set_tensor(input_details[5]["index"], batched_mode) + mask_decoder.set_tensor(input_details[0]["index"], high_res_features[0].numpy()) + mask_decoder.set_tensor(input_details[4]["index"], high_res_features[1].numpy()) + mask_decoder.invoke() + + masks = mask_decoder.get_tensor(output_details[2]["index"]) + iou_pred = mask_decoder.get_tensor(output_details[0]["index"]) + sam_tokens_out = mask_decoder.get_tensor(output_details[3]["index"]) + object_score_logits = mask_decoder.get_tensor(output_details[1]["index"]) + + low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) + print(low_res_multimasks.shape) + print(ious.shape) + print(sam_output_tokens.shape) + print(object_score_logits.shape) + + if not import_from_onnx and not import_from_tflite: print("begin mask decoder torch") print("backbone_features", backbone_features.shape) if sam_mask_prompt is None: @@ -467,7 +525,7 @@ def _forward_sam_heads( object_score_logits, ) - def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_from_onnx, model_id): + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_from_onnx, import_from_tflite, model_id): """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in _forward_sam_heads above). @@ -497,6 +555,7 @@ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, mask_inputs=self.mask_downsample(mask_inputs_float), high_res_features=high_res_features, import_from_onnx=import_from_onnx, + import_from_tflite=import_from_tflite, model_id=model_id ) # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem; @@ -837,6 +896,8 @@ def track_step( # ONNX Export export_to_onnx=False, import_from_onnx=False, + export_to_tflite=False, + import_from_tflite=False, model_id=None ): current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs} @@ -854,7 +915,7 @@ def track_step( pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) sam_outputs = self._use_mask_as_output( - pix_feat, high_res_features, mask_inputs, import_from_onnx=import_from_onnx, model_id=model_id + pix_feat, high_res_features, mask_inputs, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id ) else: # fused the visual feature with previous memory features in the memory bank diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index d5420675f..e5e8ea6ed 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -44,6 +44,7 @@ def init_state( offload_state_to_cpu=False, async_loading_frames=False, import_from_onnx=False, + import_from_tflite=False, model_id=None ): """Initialize an inference state.""" @@ -105,7 +106,7 @@ def init_state( inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"] = {} # Warm up the visual backbone and cache the image feature on frame 0 - self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_from_onnx=import_from_onnx, model_id=model_id) + self._get_image_feature(inference_state, frame_idx=0, batch_size=1, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id) return inference_state @classmethod @@ -180,6 +181,8 @@ def add_new_points_or_box( box=None, import_from_onnx=False, export_to_onnx=False, + import_from_tflite=False, + export_to_tflite=False, model_id=None ): """Add new points to a frame.""" @@ -311,7 +314,9 @@ def add_new_points_or_box( is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True, - import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id + import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, export_to_tflite=export_to_tflite, + model_id=model_id ) _, video_res_masks = self._get_orig_video_res_output( inference_state, consolidated_out["pred_masks_video_res"] @@ -331,6 +336,8 @@ def add_new_mask( mask, import_from_onnx=False, export_to_onnx=False, + import_from_tflite=False, + export_to_tflite=False, model_id=None ): """Add new mask to a frame.""" @@ -406,7 +413,9 @@ def add_new_mask( is_cond=is_cond, run_mem_encoder=False, consolidate_at_video_res=True, - import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id + import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, export_to_tflite=export_to_tflite, + model_id=model_id ) _, video_res_masks = self._get_orig_video_res_output( inference_state, consolidated_out["pred_masks_video_res"] @@ -444,6 +453,8 @@ def _consolidate_temp_output_across_obj( consolidate_at_video_res=False, import_from_onnx=False, export_to_onnx=False, + import_from_tflite=False, + export_to_tflite=False, model_id=None ): """ @@ -551,6 +562,7 @@ def _consolidate_temp_output_across_obj( high_res_masks=high_res_masks, is_mask_from_pts=True, # these frames are what the user interacted with import_from_onnx=import_from_onnx, + import_from_tflite=import_from_tflite, model_id=model_id ) consolidated_out["maskmem_features"] = maskmem_features @@ -558,7 +570,7 @@ def _consolidate_temp_output_across_obj( return consolidated_out - def _get_empty_mask_ptr(self, inference_state, frame_idx, import_from_onnx, export_to_onnx, model_id): + def _get_empty_mask_ptr(self, inference_state, frame_idx, import_from_onnx, export_to_onnx, import_from_tflite, export_to_tflite, model_id): """Get a dummy object pointer based on an empty mask on the current frame.""" # A dummy (empty) mask with a single object batch_size = 1 @@ -575,7 +587,7 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx, import_from_onnx, expo current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, model_id=model_id) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id) # Feed the empty mask and image feature above to get a dummy object pointer current_out = self.track_step( @@ -593,12 +605,14 @@ def _get_empty_mask_ptr(self, inference_state, frame_idx, import_from_onnx, expo prev_sam_mask_logits=None, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, + export_to_tflite=export_to_tflite, model_id=model_id ) return current_out["obj_ptr"] @torch.inference_mode() - def propagate_in_video_preflight(self, inference_state, import_from_onnx=False, export_to_onnx=False, model_id=None): + def propagate_in_video_preflight(self, inference_state, import_from_onnx=False, export_to_onnx=False, import_from_tflite=False, export_to_tflite=False, model_id=None): """Prepare inference_state and consolidate temporary outputs before tracking.""" # Tracking has started and we don't allow adding new objects until session is reset. inference_state["tracking_has_started"] = True @@ -625,7 +639,7 @@ def propagate_in_video_preflight(self, inference_state, import_from_onnx=False, # consolidate the temporary output across all objects on this frame for frame_idx in temp_frame_inds: consolidated_out = self._consolidate_temp_output_across_obj( - inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id + inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, import_from_tflite=import_from_tflite, export_to_tflite=export_to_tflite, model_id=model_id ) # merge them into "output_dict" and also create per-object slices output_dict[storage_key][frame_idx] = consolidated_out @@ -676,10 +690,12 @@ def propagate_in_video( reverse=False, import_from_onnx=False, export_to_onnx=False, + import_from_tflite=False, + export_to_tflite=False, model_id=None ): """Propagate the input points across frames to track in the entire video.""" - self.propagate_in_video_preflight(inference_state, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, model_id=model_id) + self.propagate_in_video_preflight(inference_state, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, import_from_tflite=import_from_tflite, export_to_tflite=export_to_tflite, model_id=model_id) output_dict = inference_state["output_dict"] consolidated_frame_inds = inference_state["consolidated_frame_inds"] @@ -818,7 +834,7 @@ def _reset_tracking_results(self, inference_state): inference_state["tracking_has_started"] = False inference_state["frames_already_tracked"].clear() - def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from_onnx = False, model_id = None): + def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from_onnx = False, import_from_tflite = False, model_id = None): """Compute the image features on a given frame.""" # Look up in the cache first image, backbone_out = inference_state["cached_features"].get( @@ -834,7 +850,27 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from import onnxruntime model = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":image.numpy()}) - else: + + if import_from_tflite: + print("begin image encoder tflite") + import tensorflow as tf + image_encoder = tf.lite.Interpreter(model_path="model/image_encoder_"+model_id+".tflite") + image_encoder.allocate_tensors() + input_details = image_encoder.get_input_details() + output_details = image_encoder.get_output_details() + + image_encoder.set_tensor(input_details[0]["index"], image.numpy()) + image_encoder.invoke() + + vision_features = image_encoder.get_tensor(output_details[4]["index"]) + vision_pos_enc_0 = image_encoder.get_tensor(output_details[1]["index"]) + vision_pos_enc_1 = image_encoder.get_tensor(output_details[5]["index"]) + vision_pos_enc_2 = image_encoder.get_tensor(output_details[3]["index"]) + backbone_fpn_0 = image_encoder.get_tensor(output_details[0]["index"]) + backbone_fpn_1 = image_encoder.get_tensor(output_details[2]["index"]) + backbone_fpn_2 = image_encoder.get_tensor(output_details[6]["index"]) + + if not import_from_onnx and not import_from_tflite: print("begin image encoder torch") print(image.shape) vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.forward_image(image) @@ -880,6 +916,8 @@ def _run_single_frame_inference( prev_sam_mask_logits=None, import_from_onnx=False, export_to_onnx=False, + import_from_tflite=False, + export_to_tflite=False, model_id=None ): """Run tracking on a single frame based on current inputs and previous memory.""" @@ -890,7 +928,7 @@ def _run_single_frame_inference( current_vision_feats, current_vision_pos_embeds, feat_sizes, - ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, model_id=model_id) + ) = self._get_image_feature(inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id) # point and mask should not appear as input simultaneously on the same frame assert point_inputs is None or mask_inputs is None @@ -909,6 +947,8 @@ def _run_single_frame_inference( prev_sam_mask_logits=prev_sam_mask_logits, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, + export_to_tflite=export_to_tflite, model_id=model_id ) @@ -939,7 +979,7 @@ def _run_single_frame_inference( return compact_current_out, pred_masks_gpu def _run_memory_encoder( - self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_from_onnx, model_id + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_from_onnx, import_from_tflite, model_id ): """ Run the memory encoder on `high_res_masks`. This is usually after applying @@ -948,7 +988,7 @@ def _run_memory_encoder( """ # Retrieve correct image features _, _, current_vision_feats, _, feat_sizes = self._get_image_feature( - inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, model_id=model_id + inference_state, frame_idx, batch_size, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id ) maskmem_features, maskmem_pos_enc = self._encode_new_memory( current_vision_feats=current_vision_feats, From 3063ba53a13c9a7749fc5a312a2fb192f91161e9 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 11:44:40 +0900 Subject: [PATCH 44/79] Export memory attention to tflite --- sam2/modeling/sam2_base.py | 40 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 9a0072db5..ae2614da5 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -116,11 +116,13 @@ def __init__( # with memories (and obj ptrs) from past frames self.memory_attention = memory_attention self.memory_attention_onnx_exported = False + self.memory_attention_tflite_exported = False self.hidden_dim = memory_attention.d_model # Part 3: memory encoder for the previous frame's outputs self.memory_encoder = memory_encoder self.memory_encoder_onnx_exported = False + self.memory_encoder_tflite_exported = False self.mem_dim = self.hidden_dim if hasattr(self.memory_encoder, "out_proj") and hasattr( self.memory_encoder.out_proj, "weight" @@ -622,6 +624,8 @@ def _prepare_memory_conditioned_features( track_in_reverse=False, # tracking in reverse time order (for demo usage) export_to_onnx=False, import_from_onnx=False, + export_to_tflite=False, + import_from_tflite=False, model_id=None ): """Fuse the current frame's visual feature map with previous memory.""" @@ -799,7 +803,20 @@ def _prepare_memory_conditioned_features( pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) - if not import_from_onnx: + if export_to_tflite and not self.memory_attention_tflite_exported: + self.memory_attention_tflite_exported = True + import ai_edge_torch + import tensorflow as tf + sample_inputs = (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens) + tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} + edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) + edge_model.export("memory_attention_"+model_id+".tflite") + + if import_from_tflite: + pix_feat_with_mem = edge_model(sample_inputs) + pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) + + if not import_from_onnx and not import_from_tflite: pix_feat_with_mem = self.memory_attention( curr=current_vision_feats, curr_pos=current_vision_pos_embeds, @@ -820,6 +837,8 @@ def _encode_new_memory( is_mask_from_pts, export_to_onnx = False, import_from_onnx = False, + export_to_tflite = False, + import_from_tflite = False, model_id = None ): """Encode the current image and its prediction into a memory feature.""" @@ -863,7 +882,20 @@ def _encode_new_memory( vision_features, vision_pos_enc = model.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} - if not import_from_onnx: + if export_to_tflite and not self.memory_encoder_tflite_exported: + self.memory_encoder_tflite_exported = True + import ai_edge_torch + import tensorflow as tf + sample_inputs = (pix_feat, mask_for_mem, False) + tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} + edge_model = ai_edge_torch.convert(self.memory_encoder, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) + edge_model.export("memory_encoder"+model_id+".tflite") + + if import_from_tflite: + vision_features, vision_pos_enc = edge_model(sample_inputs) + maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} + + if not import_from_onnx and not import_from_tflite: maskmem_out = self.memory_encoder( pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied ) @@ -930,6 +962,8 @@ def track_step( track_in_reverse=track_in_reverse, export_to_onnx=export_to_onnx, import_from_onnx=import_from_onnx, + export_to_tflite=export_to_tflite, + import_from_tflite=import_from_tflite, model_id=model_id ) # apply SAM-style segmentation head @@ -974,6 +1008,8 @@ def track_step( is_mask_from_pts=(point_inputs is not None), export_to_onnx=export_to_onnx, import_from_onnx=import_from_onnx, + export_to_tflite=export_to_tflite, + import_from_tflite=import_from_tflite, model_id=model_id ) current_out["maskmem_features"] = maskmem_features From 1becd1793cb94744b5ee4d380be2738d360aaf21 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 12:47:46 +0900 Subject: [PATCH 45/79] Fix checkpoint name --- export_video_predictor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index 08b815d5d..e172deb0e 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -35,7 +35,7 @@ sam2_checkpoint = "./checkpoints/sam2_hiera_small.pt" elif model_id == "hiera_b+": model_cfg = "sam2_hiera_b+.yaml" - sam2_checkpoint = "./checkpoints/sam2_hiera_base+.pt" + sam2_checkpoint = "./checkpoints/sam2_hiera_base_plus.pt" elif model_id == "hiera_t": model_cfg = "sam2_hiera_t.yaml" sam2_checkpoint = "./checkpoints/sam2_hiera_tiny.pt" From 340fff0688bf6bb1829b978ada17ff40235cf331 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 16:59:27 +0900 Subject: [PATCH 46/79] Export mlp --- sam2/modeling/sam2_base.py | 39 +++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index ae2614da5..738d92919 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -179,6 +179,8 @@ def __init__( self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond self.max_cond_frames_in_attn = max_cond_frames_in_attn + self.mlp_onnx_exported = False + # Model compilation if compile_image_encoder: # Compile the forward function (not the full module) to allow loading checkpoints. @@ -259,7 +261,9 @@ def _forward_sam_heads( mask_inputs=None, high_res_features=None, multimask_output=False, + export_to_onnx=False, import_from_onnx=False, + export_to_tflite=False, import_from_tflite=False, model_id=None ): @@ -503,7 +507,30 @@ def _forward_sam_heads( low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks # Extract object pointer from the SAM output token (with occlusion handling) - obj_ptr = self.obj_ptr_proj(sam_output_token) + if export_to_onnx and not self.mlp_onnx_exported: + print("x", sam_output_token.shape) + self.mlp_onnx_exported = True + torch.onnx.export( + self.obj_ptr_proj, (sam_output_token), 'model/mlp_'+model_id+'.onnx', + input_names=["x"], + output_names=["x_out"], + dynamic_axes={ + 'x': {0: 'n'}, + 'obj_ptr': {0: 'n'} + }, + verbose=False, opset_version=17 + ) + + if import_from_onnx: + import onnxruntime + model = onnxruntime.InferenceSession("model/mlp_"+model_id+".onnx") + import numpy as np + obj_ptr = model.run(None, {"x":sam_output_token.numpy()})[0] + obj_ptr = torch.Tensor(obj_ptr) + + if not import_from_onnx: + obj_ptr = self.obj_ptr_proj(sam_output_token) + if self.pred_obj_scores: # Allow *soft* no obj ptr, unlike for masks if self.soft_no_obj_ptr: @@ -527,7 +554,7 @@ def _forward_sam_heads( object_score_logits, ) - def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, import_from_onnx, import_from_tflite, model_id): + def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, export_to_onnx, import_from_onnx, export_to_tflite, import_from_tflite, model_id): """ Directly turn binary `mask_inputs` into a output mask logits without using SAM. (same input and output shapes as in _forward_sam_heads above). @@ -556,7 +583,9 @@ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs, backbone_features=backbone_features, mask_inputs=self.mask_downsample(mask_inputs_float), high_res_features=high_res_features, + export_to_onnx=export_to_onnx, import_from_onnx=import_from_onnx, + export_to_tflite=export_to_tflite, import_from_tflite=import_from_tflite, model_id=model_id ) @@ -947,7 +976,8 @@ def track_step( pix_feat = current_vision_feats[-1].permute(1, 2, 0) pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1]) sam_outputs = self._use_mask_as_output( - pix_feat, high_res_features, mask_inputs, import_from_onnx=import_from_onnx, import_from_tflite=import_from_tflite, model_id=model_id + pix_feat, high_res_features, mask_inputs, + export_to_onnx=export_to_onnx, import_from_onnx=import_from_onnx, export_to_tflite=export_to_tflite, import_from_tflite=import_from_tflite, model_id=model_id ) else: # fused the visual feature with previous memory features in the memory bank @@ -980,7 +1010,10 @@ def track_step( mask_inputs=mask_inputs, high_res_features=high_res_features, multimask_output=multimask_output, + export_to_onnx=export_to_onnx, import_from_onnx=import_from_onnx, + export_to_tflite=export_to_tflite, + import_from_tflite=import_from_tflite, model_id=model_id ) ( From 08312115e02258c221abf7c7a8babdfb3d32114c Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 17:35:13 +0900 Subject: [PATCH 47/79] Added assertion --- sam2/modeling/sam2_base.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 738d92919..82befde15 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -194,6 +194,37 @@ def __init__( dynamic=False, ) + # Check decoder sample parameter + assert(self.image_size == 1024) + assert(self.num_feature_levels == 3) + assert(self.hidden_dim == 256) + assert(self.num_maskmem == 7) + assert(self.directly_add_no_mem_embed == True) + #assert(self.training == False) + assert(self.mem_dim == 64) + assert(self.add_tpos_enc_to_obj_ptrs == False) + assert(self.use_obj_ptrs_in_encoder == True) + assert(self.add_all_frames_to_correct_as_cond == False) + assert(self.multimask_output_in_sam == True) + assert(self.multimask_min_pt_num == 0) + assert(self.multimask_max_pt_num == 1) + assert(self.sam_prompt_embed_dim == self.hidden_dim) + assert(self.backbone_stride == 16) + assert(self.sam_image_embedding_size == self.image_size // self.backbone_stride) + assert(self.pred_obj_scores == True) + assert(self.use_obj_ptrs_in_encoder == True) + assert(self.use_mlp_for_obj_ptr_proj == True) + assert(self.proj_tpos_enc_in_obj_ptrs == False) + assert(self.soft_no_obj_ptr == False) + assert(self.fixed_no_obj_ptr == True) + assert(self.non_overlap_masks_for_mem_enc == False) + assert(self.binarize_mask_from_pts_for_mem_enc == False or self.binarize_mask_from_pts_for_mem_enc == True) # True for video + assert(self.sigmoid_scale_for_mem_enc == 20) + assert(self.sigmoid_bias_for_mem_enc == -10.0) + assert(self.sam_mask_decoder.dynamic_multimask_via_stability == True) + assert(self.sam_mask_decoder.dynamic_multimask_stability_delta == 0.05) + assert(self.sam_mask_decoder.dynamic_multimask_stability_thresh == 0.98) + @property def device(self): return next(self.parameters()).device From c724630e1a68745836ca3f98314648f4e1f64dee Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 18:01:09 +0900 Subject: [PATCH 48/79] Added assertion --- sam2/modeling/sam2_base.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 82befde15..80f209b5e 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -224,6 +224,12 @@ def __init__( assert(self.sam_mask_decoder.dynamic_multimask_via_stability == True) assert(self.sam_mask_decoder.dynamic_multimask_stability_delta == 0.05) assert(self.sam_mask_decoder.dynamic_multimask_stability_thresh == 0.98) + assert(self.max_cond_frames_in_attn == -1) + assert(self.memory_temporal_stride_for_eval == 1) + assert(self.max_obj_ptrs_in_encoder == 16) + assert(self.only_obj_ptrs_in_the_past_for_eval == True) + assert(self.multimask_output_for_tracking == True) + assert(self.use_multimask_token_for_obj_ptr == True) @property def device(self): From 19afdf034cb8de0e518d395ceab5765b23d1ef1d Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 21:12:29 +0900 Subject: [PATCH 49/79] Load onnx at once --- sam2/modeling/sam2_base.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 80f209b5e..c4052b1c6 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -193,6 +193,12 @@ def __init__( fullgraph=True, dynamic=False, ) + + # onnx + self.image_encoder_onnx = False + self.prompt_encoder_onnx = False + self.mask_decoder_onnx = False + self.mlp_onnx = None # Check decoder sample parameter assert(self.image_size == 1024) @@ -392,15 +398,17 @@ def _forward_sam_heads( if sam_mask_prompt != None: raise("currently not supported mask prompt") import onnxruntime - model = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") - sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy(), "masks":mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) + if self.image_encoder_onnx == None: + self.image_encoder_onnx = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") + sparse_embeddings, dense_embeddings, dense_pe = self.image_encoder_onnx.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy(), "masks":mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) - model = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") + if self.mask_decoder_onnx == None: + self.mask_decoder_onnx = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") print("backbone_features", backbone_features.shape) - masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { + masks, iou_pred, sam_tokens_out, object_score_logits = self.mask_decoder_onnx.run(None, { "image_embeddings":backbone_features.numpy(), "image_pe": dense_pe.numpy(), "sparse_prompt_embeddings": sparse_embeddings.numpy(), @@ -560,9 +568,10 @@ def _forward_sam_heads( if import_from_onnx: import onnxruntime - model = onnxruntime.InferenceSession("model/mlp_"+model_id+".onnx") + if self.mlp_onnx == None: + self.mlp_onnx = onnxruntime.InferenceSession("model/mlp_"+model_id+".onnx") import numpy as np - obj_ptr = model.run(None, {"x":sam_output_token.numpy()})[0] + obj_ptr = self.mlp_onnx.run(None, {"x":sam_output_token.numpy()})[0] obj_ptr = torch.Tensor(obj_ptr) if not import_from_onnx: From 7a3772ce66b191d1a27a72e36675970f9150694c Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 2 Sep 2024 21:26:51 +0900 Subject: [PATCH 50/79] Load onnx at once --- sam2/modeling/sam2_base.py | 38 ++++++++++++++++++++++-------------- sam2/sam2_video_predictor.py | 6 ++++-- 2 files changed, 27 insertions(+), 17 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index c4052b1c6..fc6ee09e4 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -195,10 +195,12 @@ def __init__( ) # onnx - self.image_encoder_onnx = False - self.prompt_encoder_onnx = False - self.mask_decoder_onnx = False + self.image_encoder_onnx = None + self.prompt_encoder_onnx = None + self.mask_decoder_onnx = None self.mlp_onnx = None + self.memory_attention_onnx = None + self.memory_encoder_onnx = None # Check decoder sample parameter assert(self.image_size == 1024) @@ -398,9 +400,9 @@ def _forward_sam_heads( if sam_mask_prompt != None: raise("currently not supported mask prompt") import onnxruntime - if self.image_encoder_onnx == None: - self.image_encoder_onnx = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") - sparse_embeddings, dense_embeddings, dense_pe = self.image_encoder_onnx.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy(), "masks":mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) + if self.prompt_encoder_onnx == None: + self.prompt_encoder_onnx = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") + sparse_embeddings, dense_embeddings, dense_pe = self.prompt_encoder_onnx.run(None, {"coords":sam_point_coords.numpy(), "labels":sam_point_labels.numpy(), "masks":mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) @@ -854,11 +856,11 @@ def _prepare_memory_conditioned_features( if export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True - print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) - print("memory", memory.shape, memory.dtype) - print("current_vision_pos_embeds", current_vision_pos_embeds[0].shape, current_vision_pos_embeds[0].dtype) - print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) - print("num_obj_ptr_tokens", num_obj_ptr_tokens) + #print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) + #print("memory", memory.shape, memory.dtype) + #print("current_vision_pos_embeds", current_vision_pos_embeds[0].shape, current_vision_pos_embeds[0].dtype) + #print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) + #print("num_obj_ptr_tokens", num_obj_ptr_tokens) torch.onnx.export( # dynamo_export self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'model/memory_attention_'+model_id+'.onnx', input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], @@ -871,11 +873,13 @@ def _prepare_memory_conditioned_features( ) if import_from_onnx: + print("begin memory attention onnx") import onnxruntime - model = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") + if self.memory_attention_onnx == None: + self.memory_attention_onnx = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") import numpy as np num_obj_ptr_tokens_numpy = np.array((num_obj_ptr_tokens)).astype(np.int64) - pix_feat_with_mem = model.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) + pix_feat_with_mem = self.memory_attention_onnx.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) if export_to_tflite and not self.memory_attention_tflite_exported: @@ -892,6 +896,7 @@ def _prepare_memory_conditioned_features( pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) if not import_from_onnx and not import_from_tflite: + print("begin memory attention torch") pix_feat_with_mem = self.memory_attention( curr=current_vision_feats, curr_pos=current_vision_pos_embeds, @@ -952,9 +957,11 @@ def _encode_new_memory( ) if import_from_onnx: + print("begin memory encoder onnx") import onnxruntime - model = onnxruntime.InferenceSession("model/memory_encoder_"+model_id+".onnx") - vision_features, vision_pos_enc = model.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) + if self.memory_encoder_onnx == None: + self.memory_encoder_onnx = onnxruntime.InferenceSession("model/memory_encoder_"+model_id+".onnx") + vision_features, vision_pos_enc = self.memory_encoder_onnx.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} if export_to_tflite and not self.memory_encoder_tflite_exported: @@ -971,6 +978,7 @@ def _encode_new_memory( maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} if not import_from_onnx and not import_from_tflite: + print("begin memory encoder torch") maskmem_out = self.memory_encoder( pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied ) diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index e5e8ea6ed..104c377dd 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -35,6 +35,7 @@ def __init__( self.non_overlap_masks = non_overlap_masks self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj + self.image_encoder_onnx = None @torch.inference_mode() def init_state( @@ -848,8 +849,9 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from print("begin image encoder onnx") print(image.shape) import onnxruntime - model = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") - vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":image.numpy()}) + if self.image_encoder_onnx == None: + self.image_encoder_onnx = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.image_encoder_onnx.run(None, {"input_image":image.numpy()}) if import_from_tflite: print("begin image encoder tflite") From a9f2d18ff79fd526e4133d76101aa277105f8b76 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Tue, 3 Sep 2024 09:52:32 +0900 Subject: [PATCH 51/79] Fix export memory encoder --- sam2/modeling/memory_encoder.py | 3 ++- sam2/modeling/sam2_base.py | 32 +++++++++++++++++++++++--------- sam2/sam2_video_predictor.py | 9 ++++++++- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/sam2/modeling/memory_encoder.py b/sam2/modeling/memory_encoder.py index f60202dfa..fc521e916 100644 --- a/sam2/modeling/memory_encoder.py +++ b/sam2/modeling/memory_encoder.py @@ -178,4 +178,5 @@ def forward( pos = self.position_encoding(x).to(x.dtype) - return {"vision_features": x, "vision_pos_enc": [pos]} + return x, pos + diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index fc6ee09e4..3997237e1 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -396,7 +396,7 @@ def _forward_sam_heads( masks_enable = torch.tensor([1], dtype=torch.int) if import_from_onnx: - print("begin mask decoder onnx") + print("begin prompt encoder onnx") if sam_mask_prompt != None: raise("currently not supported mask prompt") import onnxruntime @@ -409,7 +409,15 @@ def _forward_sam_heads( if self.mask_decoder_onnx == None: self.mask_decoder_onnx = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") - print("backbone_features", backbone_features.shape) + # print("backbone_features", backbone_features.shape) + print("begin mask decoder onnx") + print("begin mask decoder onnx") + print("backbone_features", np.sum(backbone_features.numpy())) + print("image_pe", np.sum(dense_pe.numpy())) + print("sparse_embeddings", np.sum(sparse_embeddings.numpy())) + print("dense_embeddings", np.sum(dense_embeddings.numpy())) + print("high_res_features", np.sum(high_res_features[0].numpy())) + print("high_res_features", np.sum(high_res_features[1].numpy())) masks, iou_pred, sam_tokens_out, object_score_logits = self.mask_decoder_onnx.run(None, { "image_embeddings":backbone_features.numpy(), "image_pe": dense_pe.numpy(), @@ -879,6 +887,12 @@ def _prepare_memory_conditioned_features( self.memory_attention_onnx = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") import numpy as np num_obj_ptr_tokens_numpy = np.array((num_obj_ptr_tokens)).astype(np.int64) + print("curr", np.sum(current_vision_feats[0].numpy())) + print("memory", np.sum(memory.numpy())) + print("curr_pos", np.sum(current_vision_pos_embeds[0].numpy())) + print("memory_pos", np.sum(memory_pos_embed.numpy())) + print("num_obj_ptr_tokens", np.sum(num_obj_ptr_tokens_numpy)) + pix_feat_with_mem = self.memory_attention_onnx.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) @@ -950,7 +964,7 @@ def _encode_new_memory( if export_to_onnx and not self.memory_encoder_onnx_exported: self.memory_encoder_onnx_exported = True torch.onnx.export( - self.memory_encoder, (pix_feat, mask_for_mem, False), 'model/memory_encoder_'+model_id+'.onnx', + self.memory_encoder, (pix_feat, mask_for_mem, True), 'model/memory_encoder_'+model_id+'.onnx', input_names=["pix_feat", "masks"], output_names=["vision_features", "vision_pos_enc"], verbose=False, opset_version=17 @@ -962,29 +976,29 @@ def _encode_new_memory( if self.memory_encoder_onnx == None: self.memory_encoder_onnx = onnxruntime.InferenceSession("model/memory_encoder_"+model_id+".onnx") vision_features, vision_pos_enc = self.memory_encoder_onnx.run(None, {"pix_feat":pix_feat.numpy(), "masks":mask_for_mem.numpy()}) - maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} + vision_features = torch.Tensor(vision_features) + vision_pos_enc = torch.Tensor(vision_pos_enc) if export_to_tflite and not self.memory_encoder_tflite_exported: self.memory_encoder_tflite_exported = True import ai_edge_torch import tensorflow as tf - sample_inputs = (pix_feat, mask_for_mem, False) + sample_inputs = (pix_feat, mask_for_mem, True) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.memory_encoder, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("memory_encoder"+model_id+".tflite") if import_from_tflite: vision_features, vision_pos_enc = edge_model(sample_inputs) - maskmem_out = {"vision_features": torch.Tensor(vision_features), "vision_pos_enc": [torch.Tensor(vision_pos_enc)]} if not import_from_onnx and not import_from_tflite: print("begin memory encoder torch") - maskmem_out = self.memory_encoder( + vision_features, vision_pos_enc = self.memory_encoder( pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied ) - maskmem_features = maskmem_out["vision_features"] - maskmem_pos_enc = maskmem_out["vision_pos_enc"] + maskmem_features = vision_features + maskmem_pos_enc = [vision_pos_enc] return maskmem_features, maskmem_pos_enc diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 104c377dd..fef6713b9 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -562,7 +562,9 @@ def _consolidate_temp_output_across_obj( batch_size=batch_size, high_res_masks=high_res_masks, is_mask_from_pts=True, # these frames are what the user interacted with + export_to_onnx=export_to_tflite, import_from_onnx=import_from_onnx, + export_to_tflite=export_to_tflite, import_from_tflite=import_from_tflite, model_id=model_id ) @@ -981,7 +983,7 @@ def _run_single_frame_inference( return compact_current_out, pred_masks_gpu def _run_memory_encoder( - self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, import_from_onnx, import_from_tflite, model_id + self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts, export_to_onnx, import_from_onnx, export_to_tflite, import_from_tflite, model_id ): """ Run the memory encoder on `high_res_masks`. This is usually after applying @@ -997,6 +999,11 @@ def _run_memory_encoder( feat_sizes=feat_sizes, pred_masks_high_res=high_res_masks, is_mask_from_pts=is_mask_from_pts, + export_to_onnx=export_to_onnx, + import_from_onnx=import_from_onnx, + export_to_tflite=export_to_tflite, + import_from_tflite=import_from_tflite, + model_id=model_id ) # optionally offload the output to CPU memory to save GPU space From cc9acbea9ca331a7f9bf788389b0b2648678e676 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Tue, 3 Sep 2024 11:38:01 +0900 Subject: [PATCH 52/79] Implement inference mode --- README.md | 18 +++++++++++++++++- export_image_predictor.py | 13 +++++++------ export_video_predictor.py | 9 +++++---- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index a07a98ee1..00ad409d5 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ torch 2.4.0 ai-edge-torch 0.2.0 ``` -## Run +## Export and Inference onnx @@ -41,6 +41,22 @@ python3 export_image_predictor.py --framework tflite python3 export_video_predictor.py --framework tflite ``` +## Inference only + +onnx + +``` +python3 export_image_predictor.py --framework onnx --mode import +python3 export_video_predictor.py --framework onnx --mode import +``` + +tflite + +``` +python3 export_image_predictor.py --framework tflite --mode import +python3 export_video_predictor.py --framework tflite --mode import +``` + ## Test Replacing the complex tensor of RotaryEnc with matmul. To test this behavior, you can also run it with torch. diff --git a/export_image_predictor.py b/export_image_predictor.py index 162dc4490..109fdda8c 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -6,6 +6,7 @@ parser.add_argument('--model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite", "torch"]) parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) +parser.add_argument('--mode', default="both", choices=["both", "import", "export"]) args = parser.parse_args() import os @@ -22,13 +23,13 @@ os.makedirs("model", exist_ok=True) # export settings -export_to_onnx_image_encoder = args.framework == "onnx" -export_to_onnx_mask_decoder = args.framework == "onnx" -import_from_onnx = args.framework == "onnx" +export_to_onnx_image_encoder = args.framework == "onnx" and (args.mode=="export" or args.mode=="both") +export_to_onnx_mask_decoder = args.framework == "onnx" and (args.mode=="export" or args.mode=="both") +import_from_onnx = args.framework == "onnx" and (args.mode=="import" or args.mode=="both") -export_to_tflite_image_encoder = args.framework == "tflite" -export_to_tflite_mask_decoder = args.framework == "tflite" -import_from_tflite = args.framework == "tflite" +export_to_tflite_image_encoder = args.framework == "tflite" and (args.mode=="export" or args.mode=="both") +export_to_tflite_mask_decoder = args.framework == "tflite" and (args.mode=="export" or args.mode=="both") +import_from_tflite = args.framework == "tflite" and (args.mode=="import" or args.mode=="both") tflite_int8 = args.accuracy == "int8" diff --git a/export_video_predictor.py b/export_video_predictor.py index e172deb0e..00dde3f3c 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -6,6 +6,7 @@ parser.add_argument('--model_id', default="hiera_t", choices=["hiera_l", "hiera_b+", "hiera_s", "hiera_t"]) parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite", "torch"]) parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) +parser.add_argument('--mode', default="both", choices=["both", "import", "export"]) args = parser.parse_args() import os @@ -21,10 +22,10 @@ # export settings model_id = args.model_id -export_to_onnx = args.framework=="onnx" -import_from_onnx = args.framework=="onnx" -export_to_tflite = args.framework=="tflite" -import_from_tflite = args.framework=="tflite" +export_to_onnx = args.framework=="onnx" and (args.mode=="export" or args.mode=="both") +import_from_onnx = args.framework=="onnx" and (args.mode=="import" or args.mode=="both") +export_to_tflite = args.framework=="tflite" and (args.mode=="export" or args.mode=="both") +import_from_tflite = args.framework=="tflite" and (args.mode=="import" or args.mode=="both") # import if model_id == "hiera_l": From f2d1dfc853cc4e5dc3d915a7b15298967ebc2069 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Sep 2024 12:13:51 +0900 Subject: [PATCH 53/79] Export mlp to tflite --- sam2/modeling/sam2_base.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 3997237e1..94b417ad0 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -180,6 +180,7 @@ def __init__( self.max_cond_frames_in_attn = max_cond_frames_in_attn self.mlp_onnx_exported = False + self.mlp_tflite_exported = False # Model compilation if compile_image_encoder: @@ -584,7 +585,20 @@ def _forward_sam_heads( obj_ptr = self.mlp_onnx.run(None, {"x":sam_output_token.numpy()})[0] obj_ptr = torch.Tensor(obj_ptr) - if not import_from_onnx: + if export_to_tflite and not self.mlp_tflite_exported: + self.mlp_tflite_exported = True + import ai_edge_torch + import tensorflow as tf + sample_inputs = (sam_output_token) + tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} + edge_model = ai_edge_torch.convert(self.obj_ptr_proj, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) + edge_model.export("model/mlp_"+model_id+".tflite") + + if import_from_tflite: + obj_ptr = edge_model(sample_inputs) + obj_ptr = torch.Tensor(obj_ptr) + + if not import_from_onnx and not import_from_tflite: obj_ptr = self.obj_ptr_proj(sam_output_token) if self.pred_obj_scores: @@ -903,7 +917,7 @@ def _prepare_memory_conditioned_features( sample_inputs = (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) - edge_model.export("memory_attention_"+model_id+".tflite") + edge_model.export("model/memory_attention_"+model_id+".tflite") if import_from_tflite: pix_feat_with_mem = edge_model(sample_inputs) @@ -986,7 +1000,7 @@ def _encode_new_memory( sample_inputs = (pix_feat, mask_for_mem, True) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.memory_encoder, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) - edge_model.export("memory_encoder"+model_id+".tflite") + edge_model.export("model/memory_encoder_"+model_id+".tflite") if import_from_tflite: vision_features, vision_pos_enc = edge_model(sample_inputs) From bbf23d1df64410037ff3fd65ad950286fe94b9ac Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Sep 2024 12:42:24 +0900 Subject: [PATCH 54/79] Update usage --- README.md | 15 ++++++++------- sam2/modeling/sam2_base.py | 2 ++ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 00ad409d5..c6b939bf1 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,7 @@ python3 export_video_predictor.py --framework onnx tflite ``` +export PJRT_DEVICE=CPU python3 export_image_predictor.py --framework tflite python3 export_video_predictor.py --framework tflite ``` @@ -50,12 +51,7 @@ python3 export_image_predictor.py --framework onnx --mode import python3 export_video_predictor.py --framework onnx --mode import ``` -tflite - -``` -python3 export_image_predictor.py --framework tflite --mode import -python3 export_video_predictor.py --framework tflite --mode import -``` +tflite not supported inference only yet. ## Test @@ -72,6 +68,11 @@ output/* model/* ``` +## Inference Example + +- [ailia-models](https://github.com/axinc-ai/ailia-models/tree/master/image_segmentation/segment-anything-2) +- [ailia-models-tflite](https://github.com/axinc-ai/ailia-models-tflite/pull/90) + ## Original document -[README_ORIGINAL.md](README_ORIGINAL.md) +- [README_ORIGINAL.md](README_ORIGINAL.md) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 94b417ad0..4c134b741 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -1004,6 +1004,8 @@ def _encode_new_memory( if import_from_tflite: vision_features, vision_pos_enc = edge_model(sample_inputs) + vision_features = torch.Tensor(vision_features) + vision_pos_enc = torch.Tensor(vision_pos_enc) if not import_from_onnx and not import_from_tflite: print("begin memory encoder torch") From 22469594467dca2a2d1131a5857b071c281570ed Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Sep 2024 12:50:43 +0900 Subject: [PATCH 55/79] Fix memory encoder tflite export --- sam2/modeling/memory_encoder.py | 3 ++- sam2/modeling/sam2_base.py | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/sam2/modeling/memory_encoder.py b/sam2/modeling/memory_encoder.py index fc521e916..c2fe6f340 100644 --- a/sam2/modeling/memory_encoder.py +++ b/sam2/modeling/memory_encoder.py @@ -159,8 +159,9 @@ def forward( self, pix_feat: torch.Tensor, masks: torch.Tensor, - skip_mask_sigmoid: bool = False, ) -> Tuple[torch.Tensor, torch.Tensor]: + skip_mask_sigmoid = True # Fix for tflite + ## Process masks # sigmoid, so that less domain shift from gt masks which are bool if not skip_mask_sigmoid: diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 4c134b741..d80cc1f92 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -978,7 +978,7 @@ def _encode_new_memory( if export_to_onnx and not self.memory_encoder_onnx_exported: self.memory_encoder_onnx_exported = True torch.onnx.export( - self.memory_encoder, (pix_feat, mask_for_mem, True), 'model/memory_encoder_'+model_id+'.onnx', + self.memory_encoder, (pix_feat, mask_for_mem), 'model/memory_encoder_'+model_id+'.onnx', input_names=["pix_feat", "masks"], output_names=["vision_features", "vision_pos_enc"], verbose=False, opset_version=17 @@ -997,20 +997,20 @@ def _encode_new_memory( self.memory_encoder_tflite_exported = True import ai_edge_torch import tensorflow as tf - sample_inputs = (pix_feat, mask_for_mem, True) + sample_inputs = (pix_feat, mask_for_mem) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.memory_encoder, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("model/memory_encoder_"+model_id+".tflite") if import_from_tflite: - vision_features, vision_pos_enc = edge_model(sample_inputs) + vision_features, vision_pos_enc = edge_model(pix_feat, mask_for_mem) vision_features = torch.Tensor(vision_features) vision_pos_enc = torch.Tensor(vision_pos_enc) if not import_from_onnx and not import_from_tflite: print("begin memory encoder torch") vision_features, vision_pos_enc = self.memory_encoder( - pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied + pix_feat, mask_for_mem#, skip_mask_sigmoid=True # sigmoid already applied (fixed to constant) ) maskmem_features = vision_features From 41f688ec8a94b5c37278a6301800a6941220d832 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Sep 2024 14:15:39 +0900 Subject: [PATCH 56/79] Export memory attention --- sam2/sam2_video_predictor.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index fef6713b9..126ffb662 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -302,6 +302,8 @@ def add_new_points_or_box( prev_sam_mask_logits=prev_sam_mask_logits, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, + export_to_tflite=export_to_tflite, model_id=model_id ) # Add the output to the output dict (to be used as future memory) @@ -401,6 +403,8 @@ def add_new_mask( run_mem_encoder=False, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, + export_to_tflite=export_to_tflite, model_id=model_id ) # Add the output to the output dict (to be used as future memory) @@ -760,6 +764,8 @@ def propagate_in_video( run_mem_encoder=True, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, + export_to_tflite=export_to_tflite, model_id=model_id ) output_dict[storage_key][frame_idx] = current_out From f36169e87ec302c75279fadc60cda1c3763165eb Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Sep 2024 14:54:21 +0900 Subject: [PATCH 57/79] Implement tflite inference --- sam2/modeling/sam2_base.py | 54 ++++++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index d80cc1f92..aea70792e 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -473,9 +473,9 @@ def _forward_sam_heads( batched_mode = False mask_decoder.set_tensor(input_details[3]["index"], backbone_features.numpy()) - mask_decoder.set_tensor(input_details[6]["index"], dense_pe.numpy()) - mask_decoder.set_tensor(input_details[1]["index"], sparse_embeddings.numpy()) - mask_decoder.set_tensor(input_details[2]["index"], dense_embeddings.numpy()) + mask_decoder.set_tensor(input_details[6]["index"], dense_pe) + mask_decoder.set_tensor(input_details[1]["index"], sparse_embeddings) + mask_decoder.set_tensor(input_details[2]["index"], dense_embeddings) mask_decoder.set_tensor(input_details[5]["index"], batched_mode) mask_decoder.set_tensor(input_details[0]["index"], high_res_features[0].numpy()) mask_decoder.set_tensor(input_details[4]["index"], high_res_features[1].numpy()) @@ -486,6 +486,10 @@ def _forward_sam_heads( sam_tokens_out = mask_decoder.get_tensor(output_details[3]["index"]) object_score_logits = mask_decoder.get_tensor(output_details[1]["index"]) + masks = torch.Tensor(masks) + iou_pred = torch.Tensor(iou_pred) + sam_tokens_out = torch.Tensor(sam_tokens_out) + object_score_logits = torch.Tensor(object_score_logits) low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) print(low_res_multimasks.shape) print(ious.shape) @@ -589,14 +593,24 @@ def _forward_sam_heads( self.mlp_tflite_exported = True import ai_edge_torch import tensorflow as tf - sample_inputs = (sam_output_token) + sample_inputs = (sam_output_token,) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.obj_ptr_proj, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("model/mlp_"+model_id+".tflite") - if import_from_tflite: - obj_ptr = edge_model(sample_inputs) - obj_ptr = torch.Tensor(obj_ptr) + if import_from_tflite: + import tensorflow as tf + mlp = tf.lite.Interpreter(model_path="model/mlp_"+model_id+".tflite") + mlp.allocate_tensors() + input_details = mlp.get_input_details() + output_details = mlp.get_output_details() + mlp.allocate_tensors() + + mlp.set_tensor(input_details[0]["index"], sam_output_token.numpy()) + mlp.invoke() + + obj_ptr = mlp.get_tensor(output_details[0]["index"]) + obj_ptr = torch.Tensor(obj_ptr) if not import_from_onnx and not import_from_tflite: obj_ptr = self.obj_ptr_proj(sam_output_token) @@ -883,7 +897,7 @@ def _prepare_memory_conditioned_features( #print("current_vision_pos_embeds", current_vision_pos_embeds[0].shape, current_vision_pos_embeds[0].dtype) #print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) #print("num_obj_ptr_tokens", num_obj_ptr_tokens) - torch.onnx.export( # dynamo_export + torch.onnx.export( self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'model/memory_attention_'+model_id+'.onnx', input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], output_names=["pix_feat"], @@ -910,7 +924,7 @@ def _prepare_memory_conditioned_features( pix_feat_with_mem = self.memory_attention_onnx.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) - if export_to_tflite and not self.memory_attention_tflite_exported: + if False:#export_to_tflite and not self.memory_attention_tflite_exported: self.memory_attention_tflite_exported = True import ai_edge_torch import tensorflow as tf @@ -923,7 +937,7 @@ def _prepare_memory_conditioned_features( pix_feat_with_mem = edge_model(sample_inputs) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) - if not import_from_onnx and not import_from_tflite: + if not import_from_onnx:# and not import_from_tflite: print("begin memory attention torch") pix_feat_with_mem = self.memory_attention( curr=current_vision_feats, @@ -1002,10 +1016,22 @@ def _encode_new_memory( edge_model = ai_edge_torch.convert(self.memory_encoder, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("model/memory_encoder_"+model_id+".tflite") - if import_from_tflite: - vision_features, vision_pos_enc = edge_model(pix_feat, mask_for_mem) - vision_features = torch.Tensor(vision_features) - vision_pos_enc = torch.Tensor(vision_pos_enc) + if import_from_tflite: + import tensorflow as tf + memory_encoder = tf.lite.Interpreter(model_path="model/memory_encoder_"+model_id+".tflite") + memory_encoder.allocate_tensors() + input_details = memory_encoder.get_input_details() + output_details = memory_encoder.get_output_details() + memory_encoder.allocate_tensors() + + memory_encoder.set_tensor(input_details[0]["index"], pix_feat.numpy()) + memory_encoder.set_tensor(input_details[1]["index"], mask_for_mem.numpy()) + memory_encoder.invoke() + + vision_features = memory_encoder.get_tensor(output_details[1]["index"]) + vision_pos_enc = memory_encoder.get_tensor(output_details[0]["index"]) + vision_features = torch.Tensor(vision_features) + vision_pos_enc = torch.Tensor(vision_pos_enc) if not import_from_onnx and not import_from_tflite: print("begin memory encoder torch") From a4107359cda5c536fc6f14bbccad79d9936c687d Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Sep 2024 18:17:23 +0900 Subject: [PATCH 58/79] Prepare rotenc weight --- sam2/modeling/memory_attention.py | 31 ++++++++++++++++++--- sam2/modeling/sam/transformer.py | 45 ++++++++++++++++++++----------- sam2/modeling/sam2_base.py | 33 ++++++++++++++++++----- 3 files changed, 84 insertions(+), 25 deletions(-) diff --git a/sam2/modeling/memory_attention.py b/sam2/modeling/memory_attention.py index 0b07f9d87..ab3494297 100644 --- a/sam2/modeling/memory_attention.py +++ b/sam2/modeling/memory_attention.py @@ -59,13 +59,13 @@ def _forward_sa(self, tgt, query_pos): # Self-Attention tgt2 = self.norm1(tgt) q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 - tgt2 = self.self_attn(q, k, v=tgt2) + tgt2 = self.self_attn(q, k, v=tgt2, num_k_exclude_rope=torch.tensor(0)) tgt = tgt + self.dropout1(tgt2) return tgt - def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0): + def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=torch.tensor(0)): kwds = {} - if num_k_exclude_rope > 0: + if num_k_exclude_rope.item() > 0: assert isinstance(self.cross_attn_image, RoPEAttention) kwds = {"num_k_exclude_rope": num_k_exclude_rope} @@ -116,6 +116,31 @@ def __init__( self.pos_enc_at_input = pos_enc_at_input self.batch_first = batch_first + def allocate_rope_attention_weight( + self, + curr: torch.Tensor, # self-attention inputs + curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs + ): + if isinstance(curr, list): + assert isinstance(curr_pos, list) + assert len(curr) == len(curr_pos) == 1 + curr, curr_pos = ( + curr[0], + curr_pos[0], + ) + + output = curr + + if self.batch_first: + # Convert to batch first + output = output.transpose(0, 1) + + for layer in self.layers: + if isinstance(layer.cross_attn_image, RoPEAttention): + layer.cross_attn_image.allocate_rope_attention_weight(output) + if isinstance(layer.self_attn, RoPEAttention): + layer.self_attn.allocate_rope_attention_weight(output) + def forward( self, curr: torch.Tensor, # self-attention inputs diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index b836c5245..45a318eb1 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -305,20 +305,33 @@ def __init__( ): super().__init__(*args, **kwargs) - self.compute_cis = partial( - compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta - ) - freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) - self.freqs_cis = freqs_cis self.rope_k_repeat = rope_k_repeat if USE_MAT_ROTARY_ENC: rotmats = get_rotation_matrices(dim=self.internal_dim // self.num_heads, end_x=feat_sizes[0], end_y=feat_sizes[1], theta=rope_theta) self.rotmats = rotmats self.rope_theta = rope_theta + else: + self.compute_cis = partial( + compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta + ) + freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) + self.freqs_cis = freqs_cis + + def allocate_rope_attention_weight( + self, q: Tensor + ): + # prepare weight of rope attention for dynamo export + w = h = math.sqrt(q.shape[-2]) + if USE_MAT_ROTARY_ENC: + if self.rotmats.shape[2] != q.shape[-2]: + self.rotmats = get_rotation_matrices(dim=self.internal_dim // self.num_heads, end_x=w, end_y=h, theta=self.rope_theta) + else: + if self.freqs_cis.shape[0] != q.shape[-2]: + self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) def forward( - self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0 + self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: Tensor ) -> Tensor: # Input projections q = self.q_proj(q) @@ -331,22 +344,22 @@ def forward( v = self._separate_heads(v, self.num_heads) # Apply rotary position encoding - w = h = math.sqrt(q.shape[-2]) - - self.freqs_cis = self.freqs_cis.to(q.device) - if self.freqs_cis.shape[0] != q.shape[-2]: - self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) - if USE_MAT_ROTARY_ENC: - self.rotmats = self.rotmats.to(q.device) - if self.rotmats.shape[0] != q.shape[-2]: - self.rotmats = get_rotation_matrices(dim=self.internal_dim // self.num_heads, end_x=w, end_y=h, theta=self.rope_theta) + #self.rotmats = self.rotmats.to(q.device) + if self.rotmats.shape[2] != q.shape[-2]: + raise("rotmat shape error " + str(self.rotmats.shape[2]) + " " + str(q.shape[-2])) + else: + #self.freqs_cis = self.freqs_cis.to(q.device) + if self.freqs_cis.shape[0] != q.shape[-2]: + raise("freqs_cis shape error " + str(self.freqs_cis.shape[0]) + " " + str(q.shape[-2])) if q.shape[-2] != k.shape[-2]: assert self.rope_k_repeat - num_k_rope = k.size(-2) - num_k_exclude_rope + num_k_rope = k.shape[-2] - num_k_exclude_rope.item() if USE_MAT_ROTARY_ENC: + torch._check_is_size(num_k_rope) + torch._check(num_k_rope < k.size(2)) q, k[:, :, :num_k_rope] = apply_rotary_matenc( q, k[:, :, :num_k_rope], diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index aea70792e..90c720233 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -890,6 +890,11 @@ def _prepare_memory_conditioned_features( memory = torch.cat(to_cat_memory, dim=0) memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) + self.memory_attention.allocate_rope_attention_weight( + curr=current_vision_feats, + curr_pos=current_vision_pos_embeds, + ) + if export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True #print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) @@ -924,18 +929,34 @@ def _prepare_memory_conditioned_features( pix_feat_with_mem = self.memory_attention_onnx.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) - if False:#export_to_tflite and not self.memory_attention_tflite_exported: + if export_to_tflite and not self.memory_attention_tflite_exported: self.memory_attention_tflite_exported = True import ai_edge_torch import tensorflow as tf - sample_inputs = (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens) + sample_inputs = (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, torch.tensor(num_obj_ptr_tokens)) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("model/memory_attention_"+model_id+".tflite") - if import_from_tflite: - pix_feat_with_mem = edge_model(sample_inputs) - pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) + if False:#import_from_tflite: + import tensorflow as tf + memory_encoder = tf.lite.Interpreter(model_path="model/memory_attention_"+model_id+".tflite") + memory_encoder.allocate_tensors() + input_details = memory_encoder.get_input_details() + output_details = memory_encoder.get_output_details() + memory_encoder.allocate_tensors() + + memory_encoder.set_tensor(input_details[0]["index"], current_vision_feats[0].numpy()) + memory_encoder.set_tensor(input_details[1]["index"], memory.numpy()) + memory_encoder.set_tensor(input_details[2]["index"], current_vision_pos_embeds[0].numpy()) + memory_encoder.set_tensor(input_details[3]["index"], memory_pos_embed.numpy()) + memory_encoder.set_tensor(input_details[4]["index"], num_obj_ptr_tokens.numpy()) + memory_encoder.invoke() + + pix_feat_with_mem = memory_encoder.get_tensor(output_details[1]["index"]) + pix_feat_with_mem = memory_encoder.get_tensor(output_details[0]["index"]) + pix_feat_with_mem = torch.Tensor(pix_feat_with_mem) + pix_feat_with_mem = torch.Tensor(pix_feat_with_mem) if not import_from_onnx:# and not import_from_tflite: print("begin memory attention torch") @@ -944,7 +965,7 @@ def _prepare_memory_conditioned_features( curr_pos=current_vision_pos_embeds, memory=memory, memory_pos=memory_pos_embed, - num_obj_ptr_tokens=num_obj_ptr_tokens, + num_obj_ptr_tokens=torch.tensor(num_obj_ptr_tokens), ) # reshape the output (HW)BC => BCHW From b17e0f2d93bd22d777bced156124c93aa7438cfa Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 5 Sep 2024 18:18:38 +0900 Subject: [PATCH 59/79] Separate memory 1 and memory 2 --- sam2/modeling/memory_attention.py | 54 ++++++++--------- sam2/modeling/position_encoding.py | 51 ++++++++++++---- sam2/modeling/sam/transformer.py | 94 ++++++++++++++++++++++++++--- sam2/modeling/sam2_base.py | 96 ++++++++++++++++++++---------- 4 files changed, 212 insertions(+), 83 deletions(-) diff --git a/sam2/modeling/memory_attention.py b/sam2/modeling/memory_attention.py index ab3494297..f3b649fec 100644 --- a/sam2/modeling/memory_attention.py +++ b/sam2/modeling/memory_attention.py @@ -59,23 +59,19 @@ def _forward_sa(self, tgt, query_pos): # Self-Attention tgt2 = self.norm1(tgt) q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2 - tgt2 = self.self_attn(q, k, v=tgt2, num_k_exclude_rope=torch.tensor(0)) + tgt2 = self.self_attn.self_attn(q, k = k, v = tgt2) tgt = tgt + self.dropout1(tgt2) return tgt - def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=torch.tensor(0)): - kwds = {} - if num_k_exclude_rope.item() > 0: - assert isinstance(self.cross_attn_image, RoPEAttention) - kwds = {"num_k_exclude_rope": num_k_exclude_rope} - + def _forward_ca(self, tgt, memory_1, memory_2, query_pos, pos_1, pos_2): # Cross-Attention tgt2 = self.norm2(tgt) - tgt2 = self.cross_attn_image( + tgt2 = self.cross_attn_image.cross_attn( q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2, - k=memory + pos if self.pos_enc_at_cross_attn_keys else memory, - v=memory, - **kwds, + k_1=memory_1 + pos_1 if self.pos_enc_at_cross_attn_keys else memory_1, + v_1=memory_1, + k_2=memory_2 + pos_2 if self.pos_enc_at_cross_attn_keys else memory_2, + v_2=memory_2 ) tgt = tgt + self.dropout2(tgt2) return tgt @@ -83,15 +79,16 @@ def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=torch.tens def forward( self, tgt, - memory, - pos: Optional[Tensor] = None, + memory_1, + memory_2, + pos_1: Optional[Tensor] = None, + pos_2: Optional[Tensor] = None, query_pos: Optional[Tensor] = None, - num_k_exclude_rope: int = 0, ) -> torch.Tensor: # Self-Attn, Cross-Attn tgt = self._forward_sa(tgt, query_pos) - tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope) + tgt = self._forward_ca(tgt, memory_1, memory_2, query_pos, pos_1, pos_2) # MLP tgt2 = self.norm3(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) @@ -144,10 +141,11 @@ def allocate_rope_attention_weight( def forward( self, curr: torch.Tensor, # self-attention inputs - memory: torch.Tensor, # cross-attention inputs + memory_1: torch.Tensor, # cross-attention inputs + memory_2: torch.Tensor, # cross-attention inputs curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs - memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs - num_obj_ptr_tokens: int = 0, # number of object pointer *tokens* + memory_pos_1: Optional[Tensor] = None, # pos_enc for cross-attention inputs + memory_pos_2: Optional[Tensor] = None, # pos_enc for cross-attention inputs ): if isinstance(curr, list): assert isinstance(curr_pos, list) @@ -158,7 +156,7 @@ def forward( ) assert ( - curr.shape[1] == memory.shape[1] + curr.shape[1] == memory_1.shape[1] ), "Batch size must be the same for curr and memory" output = curr @@ -169,20 +167,18 @@ def forward( # Convert to batch first output = output.transpose(0, 1) curr_pos = curr_pos.transpose(0, 1) - memory = memory.transpose(0, 1) - memory_pos = memory_pos.transpose(0, 1) + memory_1 = memory_1.transpose(0, 1) + memory_2 = memory_2.transpose(0, 1) + memory_pos_1 = memory_pos_1.transpose(0, 1) + memory_pos_2 = memory_pos_2.transpose(0, 1) for layer in self.layers: - kwds = {} - if isinstance(layer.cross_attn_image, RoPEAttention): - kwds = {"num_k_exclude_rope": num_obj_ptr_tokens} - output = layer( tgt=output, - memory=memory, - pos=memory_pos, - query_pos=curr_pos, - **kwds, + memory_1=memory_1, + memory_2=memory_2, + pos_1=memory_pos_1, + pos_2=memory_pos_2, ) normed_output = self.norm(output) diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py index 3d18dee1a..1ccd0c8dc 100644 --- a/sam2/modeling/position_encoding.py +++ b/sam2/modeling/position_encoding.py @@ -198,16 +198,9 @@ def apply_rotary_enc( repeat_freqs_k: bool = False, ): xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) - xk_ = ( - torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) - if xk.shape[-2] != 0 - else None - ) + xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) freqs_cis = reshape_for_broadcast(freqs_cis, xq_) xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) - if xk_ is None: - # no keys to rotate, due to dropout - return xq_out.type_as(xq).to(xq.device), xk # repeat freqs along seq_len dim to match k seq_len if repeat_freqs_k: r = xk_.shape[-2] // xq_.shape[-2] @@ -250,12 +243,46 @@ def get_rotation_matrices(dim, end_x, end_y, theta=10000.0, device=None, dtype=N def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): - + # オリジナル実装 (6次元テンソル処理) + #bq, hq, nq, cq = xq.shape + #bk, hk, nk, ck = xk.shape + #q_out = torch.matmul(rotmats, xq.reshape(bq, hq, nq, cq // 2, 2, 1)).flatten(3) + #k_rotmat = rotmats.repeat(1, 1, nk // nq, 1, 1, 1) if repeat_freqs_k else rotmats + #k_out = torch.matmul(k_rotmat, xk.reshape(bk, hk, nk, ck // 2, 2, 1)).flatten(3) + + # tfliteでは4次元テンソルまでしか扱えないのでバッチサイズに制約をかける + bq, hq, nq, cq = xq.shape + torch._check_is_size(bq) + torch._check_is_size(hq) + torch._check_is_size(nq) + torch._check_is_size(cq) + torch._check(bq == 1) # for dynamo trace + torch._check(hq == 1) # for dynamo trace + torch._check(cq == 256) # for dynamo trace + + #print(rotmats.shape) + + q_rotmat = rotmats.reshape(4096, 128, 2, 2) + q_out = torch.matmul(q_rotmat, xq.reshape(nq, 128, 2, 1)).reshape(1, 1, 4096, 256) + #print(q_out.shape) + + bk, hk, nk, ck = xk.shape + k_rotmat = q_rotmat.repeat(nk // nq, 1, 1, 1)# if repeat_freqs_k else rotmats # for tflite trace, repeat_freqs_k == Falseの場合は nk // nq == 1 なのでrepeatを常に呼び出しても等価になる + bk, hk, nk, ck = xk.shape + torch._check_is_size(bq == 1) + torch._check_is_size(hq == 1) + torch._check(ck == 256) + + #torch._check(xk.size(3) == 256) + + k_in = xk.reshape(nk, ck//2, 2, 1) + k_in = k_in[:k_rotmat.shape[0], :, :, :] + k_out = torch.matmul(k_rotmat, k_in).reshape(1, 1, nk // nq * 4096, 256) - q_out = torch.matmul(rotmats, xq.reshape(bq, hq, nq, cq // 2, 2, 1)).flatten(3) - k_rotmat = rotmats.repeat(1, 1, nk // nq, 1, 1, 1) if repeat_freqs_k else rotmats - k_out = torch.matmul(k_rotmat, xk.reshape(bk, hk, nk, ck // 2, 2, 1)).flatten(3) + #print("k_rotmat", k_rotmat.shape) + #print("k_in", k_in.shape) + #print("k_out", k_out.shape) return q_out, k_out diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index 45a318eb1..3ceca919c 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -330,8 +330,8 @@ def allocate_rope_attention_weight( if self.freqs_cis.shape[0] != q.shape[-2]: self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) - def forward( - self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: Tensor + def self_attn( + self, q: Tensor, k: Tensor, v: Tensor ) -> Tensor: # Input projections q = self.q_proj(q) @@ -356,20 +356,17 @@ def forward( if q.shape[-2] != k.shape[-2]: assert self.rope_k_repeat - num_k_rope = k.shape[-2] - num_k_exclude_rope.item() if USE_MAT_ROTARY_ENC: - torch._check_is_size(num_k_rope) - torch._check(num_k_rope < k.size(2)) - q, k[:, :, :num_k_rope] = apply_rotary_matenc( + q, k = apply_rotary_matenc( q, - k[:, :, :num_k_rope], + k, rotmats=self.rotmats, repeat_freqs_k=self.rope_k_repeat, ) else: - q, k[:, :, :num_k_rope] = apply_rotary_enc( + q, k = apply_rotary_enc( q, - k[:, :, :num_k_rope], + k, freqs_cis=self.freqs_cis, repeat_freqs_k=self.rope_k_repeat, ) @@ -396,3 +393,82 @@ def forward( out = self.out_proj(out) return out + + def cross_attn( + self, q: Tensor, k_1: Tensor, v_1: Tensor, k_2: Tensor = None, v_2: Tensor = None + ) -> Tensor: + # Input projections + q = self.q_proj(q) + k_1 = self.k_proj(k_1) + v_1 = self.v_proj(v_1) + k_2 = self.k_proj(k_2) + v_2 = self.v_proj(v_2) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k_1 = self._separate_heads(k_1, self.num_heads) + v_1 = self._separate_heads(v_1, self.num_heads) + k_2 = self._separate_heads(k_2, self.num_heads) + v_2 = self._separate_heads(v_2, self.num_heads) + + # Apply rotary position encoding + if USE_MAT_ROTARY_ENC: + #self.rotmats = self.rotmats.to(q.device) + if self.rotmats.shape[2] != q.shape[-2]: + raise("rotmat shape error " + str(self.rotmats.shape[2]) + " " + str(q.shape[-2])) + else: + #self.freqs_cis = self.freqs_cis.to(q.device) + if self.freqs_cis.shape[0] != q.shape[-2]: + raise("freqs_cis shape error " + str(self.freqs_cis.shape[0]) + " " + str(q.shape[-2])) + + if q.shape[-2] != k_1.shape[-2]: + assert self.rope_k_repeat + + if USE_MAT_ROTARY_ENC: + q, k_1 = apply_rotary_matenc( + q, + k_1, + rotmats=self.rotmats, + repeat_freqs_k=self.rope_k_repeat, + ) + else: + q, k_1 = apply_rotary_enc( + q, + k_1, + freqs_cis=self.freqs_cis, + repeat_freqs_k=self.rope_k_repeat, + ) + + #print(k_1.shape, k_2.shape) + #if k_2.shape[2] == 0: + # k = k_1 + #else: + k = torch.concat((k_1, k_2), dim = 2) + #if v_2.shape[2] == 0: + # v = v_1 + #else: + v = torch.concat((v_1, v_2), dim = 2) + + dropout_p = self.dropout_p if self.training else 0.0 + # Attention + #try: + # with sdp_kernel_context(dropout_p): + # out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + #except Exception as e: + if True: + # Fall back to all kernels if the Flash attention kernel fails + #warnings.warn( + # f"Flash Attention kernel failed due to: {e}\nFalling back to all available " + # f"kernels for scaled_dot_product_attention (which may have a slower speed).", + # category=UserWarning, + # stacklevel=2, + #) + global ALLOW_ALL_KERNELS + ALLOW_ALL_KERNELS = True + out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p) + + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out + \ No newline at end of file diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 90c720233..aa3df17dc 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -890,11 +890,19 @@ def _prepare_memory_conditioned_features( memory = torch.cat(to_cat_memory, dim=0) memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0) + # 標準の実装ではforwardの中でweightが確保されるが、エクスポート時に固定するために先に確保する self.memory_attention.allocate_rope_attention_weight( curr=current_vision_feats, curr_pos=current_vision_pos_embeds, ) + # 4096の倍数のRoPEAttentionが適用される部分と手協されない部分を事前に分割する + # 動的なsliceがdynamoでエラーになるため + memory_1 = memory[:-num_obj_ptr_tokens,:,:] + memory_2 = memory[-num_obj_ptr_tokens:,:,:] + memory_pos_embed_1 = memory_pos_embed[:-num_obj_ptr_tokens,:,:] + memory_pos_embed_2 = memory_pos_embed[-num_obj_ptr_tokens:,:,:] + if export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True #print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) @@ -903,12 +911,14 @@ def _prepare_memory_conditioned_features( #print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) #print("num_obj_ptr_tokens", num_obj_ptr_tokens) torch.onnx.export( - self.memory_attention, (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, num_obj_ptr_tokens), 'model/memory_attention_'+model_id+'.onnx', - input_names=["curr", "memory", "curr_pos", "memory_pos", "num_obj_ptr_tokens"], + self.memory_attention, (current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2), 'model/memory_attention_'+model_id+'.onnx', + input_names=["curr", "memory_1", "memory_2", "curr_pos", "memory_pos_1", "memory_pos_2"], output_names=["pix_feat"], dynamic_axes={ - 'memory': {0: 'n'}, - 'memory_pos': {0: 'n'} + 'memory_1': {1: 'n'}, + 'memory_2': {1: 'n'}, + 'memory_pos_1': {1: 'n'}, + 'memory_pos_2': {1: 'n'} }, verbose=False, opset_version=17 ) @@ -919,53 +929,73 @@ def _prepare_memory_conditioned_features( if self.memory_attention_onnx == None: self.memory_attention_onnx = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") import numpy as np - num_obj_ptr_tokens_numpy = np.array((num_obj_ptr_tokens)).astype(np.int64) - print("curr", np.sum(current_vision_feats[0].numpy())) - print("memory", np.sum(memory.numpy())) - print("curr_pos", np.sum(current_vision_pos_embeds[0].numpy())) - print("memory_pos", np.sum(memory_pos_embed.numpy())) - print("num_obj_ptr_tokens", np.sum(num_obj_ptr_tokens_numpy)) - - pix_feat_with_mem = self.memory_attention_onnx.run(None, {"curr":current_vision_feats[0].numpy(), "memory":memory.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos":memory_pos_embed.numpy(), "num_obj_ptr_tokens":num_obj_ptr_tokens_numpy}) + #num_obj_ptr_tokens_numpy = np.array((num_obj_ptr_tokens)).astype(np.int64) + #print("curr", np.sum(current_vision_feats[0].numpy())) + #print("memory", np.sum(memory.numpy())) + #print("curr_pos", np.sum(current_vision_pos_embeds[0].numpy())) + #print("memory_pos", np.sum(memory_pos_embed.numpy())) + #print("num_obj_ptr_tokens", np.sum(num_obj_ptr_tokens_numpy)) + + pix_feat_with_mem = self.memory_attention_onnx.run(None, {"curr":current_vision_feats[0].numpy(), "memory_1":memory_1.numpy(), "memory_2":memory_2.numpy(), "curr_pos":current_vision_pos_embeds[0].numpy(), "memory_pos_1":memory_pos_embed_1.numpy(), "memory_pos_2":memory_pos_embed_2.numpy()}) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem[0]) if export_to_tflite and not self.memory_attention_tflite_exported: self.memory_attention_tflite_exported = True import ai_edge_torch import tensorflow as tf - sample_inputs = (current_vision_feats[0], memory, current_vision_pos_embeds[0], memory_pos_embed, torch.tensor(num_obj_ptr_tokens)) + sample_inputs = (current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) edge_model.export("model/memory_attention_"+model_id+".tflite") - if False:#import_from_tflite: + if import_from_tflite: import tensorflow as tf - memory_encoder = tf.lite.Interpreter(model_path="model/memory_attention_"+model_id+".tflite") - memory_encoder.allocate_tensors() - input_details = memory_encoder.get_input_details() - output_details = memory_encoder.get_output_details() - memory_encoder.allocate_tensors() + memory_attention = tf.lite.Interpreter(model_path="model/memory_attention_"+model_id+".tflite") + memory_attention.allocate_tensors() + input_details = memory_attention.get_input_details() + output_details = memory_attention.get_output_details() + memory_attention.resize_tensor_input( + input_details[5]["index"], + [memory_1.shape[0], 1, 64] + ) + memory_attention.resize_tensor_input( + input_details[1]["index"], + [memory_2.shape[0], 1, 64] + ) + memory_attention.resize_tensor_input( + input_details[4]["index"], + [memory_pos_embed_1.shape[0], 1, 64] + ) + memory_attention.resize_tensor_input( + input_details[0]["index"], + [memory_pos_embed_2.shape[0], 1, 64] + ) + memory_attention.allocate_tensors() - memory_encoder.set_tensor(input_details[0]["index"], current_vision_feats[0].numpy()) - memory_encoder.set_tensor(input_details[1]["index"], memory.numpy()) - memory_encoder.set_tensor(input_details[2]["index"], current_vision_pos_embeds[0].numpy()) - memory_encoder.set_tensor(input_details[3]["index"], memory_pos_embed.numpy()) - memory_encoder.set_tensor(input_details[4]["index"], num_obj_ptr_tokens.numpy()) - memory_encoder.invoke() + memory_attention.set_tensor(input_details[3]["index"], current_vision_feats[0].numpy()) + memory_attention.set_tensor(input_details[5]["index"], memory_1.numpy()) + memory_attention.set_tensor(input_details[1]["index"], memory_2.numpy()) + memory_attention.set_tensor(input_details[2]["index"], current_vision_pos_embeds[0].numpy()) + memory_attention.set_tensor(input_details[4]["index"], memory_pos_embed_1.numpy()) + memory_attention.set_tensor(input_details[0]["index"], memory_pos_embed_2.numpy()) + memory_attention.invoke() - pix_feat_with_mem = memory_encoder.get_tensor(output_details[1]["index"]) - pix_feat_with_mem = memory_encoder.get_tensor(output_details[0]["index"]) - pix_feat_with_mem = torch.Tensor(pix_feat_with_mem) + pix_feat_with_mem = memory_attention.get_tensor(output_details[0]["index"]) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem) - if not import_from_onnx:# and not import_from_tflite: - print("begin memory attention torch") + if not import_from_onnx and not import_from_tflite: + #print("begin memory attention torch") + #print("current_vision_feats", current_vision_feats[0].shape) + #print("current_vision_pos_embeds", current_vision_pos_embeds[0].shape) + #print("memory", memory.shape) + #print("memory_pos_embed", memory_pos_embed.shape) pix_feat_with_mem = self.memory_attention( curr=current_vision_feats, + memory_1=memory_1, + memory_2=memory_2, curr_pos=current_vision_pos_embeds, - memory=memory, - memory_pos=memory_pos_embed, - num_obj_ptr_tokens=torch.tensor(num_obj_ptr_tokens), + memory_pos_1=memory_pos_embed_1, + memory_pos_2=memory_pos_embed_2, ) # reshape the output (HW)BC => BCHW From 1532106d57a1c5077d80d0050ef033b23f7ba17e Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 5 Sep 2024 18:47:03 +0900 Subject: [PATCH 60/79] Fix onnx dynamic shape --- sam2/modeling/position_encoding.py | 20 ++++++++++---------- sam2/modeling/sam2_base.py | 13 +++++++++---- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py index 1ccd0c8dc..f262431c9 100644 --- a/sam2/modeling/position_encoding.py +++ b/sam2/modeling/position_encoding.py @@ -253,13 +253,13 @@ def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): # tfliteでは4次元テンソルまでしか扱えないのでバッチサイズに制約をかける bq, hq, nq, cq = xq.shape - torch._check_is_size(bq) - torch._check_is_size(hq) - torch._check_is_size(nq) - torch._check_is_size(cq) - torch._check(bq == 1) # for dynamo trace - torch._check(hq == 1) # for dynamo trace - torch._check(cq == 256) # for dynamo trace + #torch._check_is_size(bq) + #torch._check_is_size(hq) + #torch._check_is_size(nq) + #torch._check_is_size(cq) + #torch._check(bq == 1) # for dynamo trace + #torch._check(hq == 1) # for dynamo trace + #torch._check(cq == 256) # for dynamo trace #print(rotmats.shape) @@ -271,9 +271,9 @@ def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): k_rotmat = q_rotmat.repeat(nk // nq, 1, 1, 1)# if repeat_freqs_k else rotmats # for tflite trace, repeat_freqs_k == Falseの場合は nk // nq == 1 なのでrepeatを常に呼び出しても等価になる bk, hk, nk, ck = xk.shape - torch._check_is_size(bq == 1) - torch._check_is_size(hq == 1) - torch._check(ck == 256) + #torch._check_is_size(bq == 1) + #torch._check_is_size(hq == 1) + #torch._check(ck == 256) #torch._check(xk.size(3) == 256) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index aa3df17dc..9ea16217f 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -915,13 +915,18 @@ def _prepare_memory_conditioned_features( input_names=["curr", "memory_1", "memory_2", "curr_pos", "memory_pos_1", "memory_pos_2"], output_names=["pix_feat"], dynamic_axes={ - 'memory_1': {1: 'n'}, - 'memory_2': {1: 'n'}, - 'memory_pos_1': {1: 'n'}, - 'memory_pos_2': {1: 'n'} + 'memory_1': {0: 'n_1'}, + 'memory_2': {0: 'n_2'}, + 'memory_pos_1': {0: 'n_1'}, + 'memory_pos_2': {0: 'n_2'} }, verbose=False, opset_version=17 ) + #export_options = torch.onnx.ExportOptions(dynamic_shapes=True) + #onnx_program =torch.onnx.dynamo_export( + # self.memory_attention, current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2, export_options=export_options + #) + #onnx_program.save('model/memory_attention_'+model_id+'.onnx') if import_from_onnx: print("begin memory attention onnx") From a7a2792f4b59a866908bdfaa9db55e990ac2979e Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 6 Sep 2024 17:47:17 +0900 Subject: [PATCH 61/79] Improve export code --- sam2/modeling/position_encoding.py | 6 ++--- sam2/modeling/sam/transformer.py | 40 +++++++++++++++--------------- sam2/modeling/sam2_base.py | 18 ++++++++++++-- 3 files changed, 39 insertions(+), 25 deletions(-) diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py index f262431c9..9eefe8883 100644 --- a/sam2/modeling/position_encoding.py +++ b/sam2/modeling/position_encoding.py @@ -277,9 +277,9 @@ def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): #torch._check(xk.size(3) == 256) - k_in = xk.reshape(nk, ck//2, 2, 1) - k_in = k_in[:k_rotmat.shape[0], :, :, :] - k_out = torch.matmul(k_rotmat, k_in).reshape(1, 1, nk // nq * 4096, 256) + k_in = xk.reshape(nk, 128, 2, 1) + #k_in = k_in[:k_rotmat.shape[0], :, :, :] + k_out = torch.matmul(k_rotmat, k_in).reshape(1, 1, nk, 256) #print("k_rotmat", k_rotmat.shape) #print("k_in", k_in.shape) diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index 3ceca919c..e34741a0f 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -344,17 +344,17 @@ def self_attn( v = self._separate_heads(v, self.num_heads) # Apply rotary position encoding - if USE_MAT_ROTARY_ENC: - #self.rotmats = self.rotmats.to(q.device) - if self.rotmats.shape[2] != q.shape[-2]: - raise("rotmat shape error " + str(self.rotmats.shape[2]) + " " + str(q.shape[-2])) - else: - #self.freqs_cis = self.freqs_cis.to(q.device) - if self.freqs_cis.shape[0] != q.shape[-2]: - raise("freqs_cis shape error " + str(self.freqs_cis.shape[0]) + " " + str(q.shape[-2])) + #if USE_MAT_ROTARY_ENC: + # #self.rotmats = self.rotmats.to(q.device) + # if self.rotmats.shape[2] != q.shape[-2]: + # raise("rotmat shape error " + str(self.rotmats.shape[2]) + " " + str(q.shape[-2])) + #else: + # #self.freqs_cis = self.freqs_cis.to(q.device) + # if self.freqs_cis.shape[0] != q.shape[-2]: + # raise("freqs_cis shape error " + str(self.freqs_cis.shape[0]) + " " + str(q.shape[-2])) - if q.shape[-2] != k.shape[-2]: - assert self.rope_k_repeat + #if q.shape[-2] != k.shape[-2]: + # assert self.rope_k_repeat if USE_MAT_ROTARY_ENC: q, k = apply_rotary_matenc( @@ -412,17 +412,17 @@ def cross_attn( v_2 = self._separate_heads(v_2, self.num_heads) # Apply rotary position encoding - if USE_MAT_ROTARY_ENC: - #self.rotmats = self.rotmats.to(q.device) - if self.rotmats.shape[2] != q.shape[-2]: - raise("rotmat shape error " + str(self.rotmats.shape[2]) + " " + str(q.shape[-2])) - else: - #self.freqs_cis = self.freqs_cis.to(q.device) - if self.freqs_cis.shape[0] != q.shape[-2]: - raise("freqs_cis shape error " + str(self.freqs_cis.shape[0]) + " " + str(q.shape[-2])) + #if USE_MAT_ROTARY_ENC: + # #self.rotmats = self.rotmats.to(q.device) + # if self.rotmats.shape[2] != q.shape[-2]: + # raise("rotmat shape error " + str(self.rotmats.shape[2]) + " " + str(q.shape[-2])) + #else: + # #self.freqs_cis = self.freqs_cis.to(q.device) + # if self.freqs_cis.shape[0] != q.shape[-2]: + # raise("freqs_cis shape error " + str(self.freqs_cis.shape[0]) + " " + str(q.shape[-2])) - if q.shape[-2] != k_1.shape[-2]: - assert self.rope_k_repeat + #if q.shape[-2] != k_1.shape[-2]: + # assert self.rope_k_repeat if USE_MAT_ROTARY_ENC: q, k_1 = apply_rotary_matenc( diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 9ea16217f..21437facc 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -950,13 +950,27 @@ def _prepare_memory_conditioned_features( import tensorflow as tf sample_inputs = (current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} - edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) + n_1 = torch.export.Dim("n_1", min=1, max=256) + n_4096 = n_1 * 4096 + n_2 = torch.export.Dim("n_2", min=1, max=256) + n_4 = n_2 * 4 + dynamic_shapes={ + 'curr': None, + 'memory_1': {0: n_4096}, + 'memory_2': {0: n_4}, + 'curr_pos': None, + 'memory_pos_1': {0: n_4096}, + 'memory_pos_2': {0: n_4} + } + edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags, dynamic_shapes=dynamic_shapes) edge_model.export("model/memory_attention_"+model_id+".tflite") if import_from_tflite: import tensorflow as tf + import os + #os.environ['TF_ENABLE_XNNPACK'] = '0' memory_attention = tf.lite.Interpreter(model_path="model/memory_attention_"+model_id+".tflite") - memory_attention.allocate_tensors() + #memory_attention.allocate_tensors() input_details = memory_attention.get_input_details() output_details = memory_attention.get_output_details() memory_attention.resize_tensor_input( From ccc4b4d3c8bae0d058e7ea3a9dc2416c6f6ab04a Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 6 Sep 2024 18:39:28 +0900 Subject: [PATCH 62/79] Fix shape --- sam2/modeling/position_encoding.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py index 9eefe8883..6f8666cb9 100644 --- a/sam2/modeling/position_encoding.py +++ b/sam2/modeling/position_encoding.py @@ -268,7 +268,7 @@ def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): #print(q_out.shape) bk, hk, nk, ck = xk.shape - k_rotmat = q_rotmat.repeat(nk // nq, 1, 1, 1)# if repeat_freqs_k else rotmats # for tflite trace, repeat_freqs_k == Falseの場合は nk // nq == 1 なのでrepeatを常に呼び出しても等価になる + k_rotmat = q_rotmat.repeat(nk // 4096, 1, 1, 1)# if repeat_freqs_k else rotmats # for tflite trace, repeat_freqs_k == Falseの場合は nk // nq == 1 なのでrepeatを常に呼び出しても等価になる bk, hk, nk, ck = xk.shape #torch._check_is_size(bq == 1) From dfed5fe48ee987df0be37ab58e0b7bad51c5f268 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 6 Sep 2024 19:35:23 +0900 Subject: [PATCH 63/79] Fix num maskmem for tflite --- export_video_predictor.py | 5 ++++- sam2/modeling/sam2_base.py | 40 ++++++++++++++++++++++++-------------- 2 files changed, 29 insertions(+), 16 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index 00dde3f3c..0f76eeacd 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -48,7 +48,10 @@ from sam2.build_sam import build_sam2_video_predictor -predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) +if export_to_tflite or import_from_tflite: + predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device, num_maskmem=1, max_obj_ptrs_in_encoder=1) +else: + predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) def show_mask(mask, ax, obj_id=None, random_color=False): diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 21437facc..69bcbac76 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -207,7 +207,7 @@ def __init__( assert(self.image_size == 1024) assert(self.num_feature_levels == 3) assert(self.hidden_dim == 256) - assert(self.num_maskmem == 7) + assert(self.num_maskmem == 1 or self.num_maskmem == 7) assert(self.directly_add_no_mem_embed == True) #assert(self.training == False) assert(self.mem_dim == 64) @@ -235,7 +235,7 @@ def __init__( assert(self.sam_mask_decoder.dynamic_multimask_stability_thresh == 0.98) assert(self.max_cond_frames_in_attn == -1) assert(self.memory_temporal_stride_for_eval == 1) - assert(self.max_obj_ptrs_in_encoder == 16) + assert(self.max_obj_ptrs_in_encoder == 1 or self.max_obj_ptrs_in_encoder == 16) assert(self.only_obj_ptrs_in_the_past_for_eval == True) assert(self.multimask_output_for_tracking == True) assert(self.use_multimask_token_for_obj_ptr == True) @@ -903,6 +903,13 @@ def _prepare_memory_conditioned_features( memory_pos_embed_1 = memory_pos_embed[:-num_obj_ptr_tokens,:,:] memory_pos_embed_2 = memory_pos_embed[-num_obj_ptr_tokens:,:,:] + print("memory attention shape") + print("curr", current_vision_feats[0].shape) + print("memory", memory.shape) + print("curr_pos", current_vision_pos_embeds[0].shape) + print("memory_pos", memory_pos_embed.shape) + print("num_obj_ptr_tokens", num_obj_ptr_tokens) + if export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True #print("current_vision_feats", current_vision_feats[0].shape, current_vision_feats[0].dtype) @@ -950,19 +957,22 @@ def _prepare_memory_conditioned_features( import tensorflow as tf sample_inputs = (current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2) tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS]}} - n_1 = torch.export.Dim("n_1", min=1, max=256) - n_4096 = n_1 * 4096 - n_2 = torch.export.Dim("n_2", min=1, max=256) - n_4 = n_2 * 4 - dynamic_shapes={ - 'curr': None, - 'memory_1': {0: n_4096}, - 'memory_2': {0: n_4}, - 'curr_pos': None, - 'memory_pos_1': {0: n_4096}, - 'memory_pos_2': {0: n_4} - } - edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags, dynamic_shapes=dynamic_shapes) + if self.num_maskmem == 1 and self.max_obj_ptrs_in_encoder == 1: + edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) + else: + n_1 = torch.export.Dim("n_1", min=1, max=256) + n_4096 = n_1 * 4096 + n_2 = torch.export.Dim("n_2", min=1, max=256) + n_4 = n_2 * 4 + dynamic_shapes={ + 'curr': None, + 'memory_1': {0: n_4096}, + 'memory_2': {0: n_4}, + 'curr_pos': None, + 'memory_pos_1': {0: n_4096}, + 'memory_pos_2': {0: n_4} + } + edge_model = ai_edge_torch.convert(self.memory_attention, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags, dynamic_shapes=dynamic_shapes) edge_model.export("model/memory_attention_"+model_id+".tflite") if import_from_tflite: From 1dbe5c6b88e05bab4c2195d3b5bf6594a08bbde6 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sat, 7 Sep 2024 11:07:43 +0900 Subject: [PATCH 64/79] Added model link --- README.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/README.md b/README.md index c6b939bf1..7badaebbc 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,7 @@ tflite ``` torch 2.4.0 ai-edge-torch 0.2.0 +tf-nightly 2.18.0.dev20240905 ``` ## Export and Inference @@ -63,16 +64,46 @@ python3 export_video_predictor.py --framework torch ## Artifacts +The deliverables will be stored below. + ``` output/* model/* ``` +You can also download it from the following. + +### ONNX + +- https://storage.googleapis.com/ailia-models/segment-anything-2/image_encoder_hiera_t.onnx +- https://storage.googleapis.com/ailia-models/segment-anything-2/prompt_encoder_hiera_t.onnx +- https://storage.googleapis.com/ailia-models/segment-anything-2/mask_decoder_hiera_t.onnx +- https://storage.googleapis.com/ailia-models/segment-anything-2/memory_encoder_hiera_t.onnx +- https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.onnx +- https://storage.googleapis.com/ailia-models/segment-anything-2/mlp_hiera_t.onnx + +In addition, it is planned to update to use a 6-dimensional MatMul in MemoryAttention in the future. + +### TFLITE + +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/image_encoder_hiera_t.tflite +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mask_decoder_hiera_t.tflite +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mlp_hiera_t.tflite +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_attention_hiera_t.tflite +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_encoder_hiera_t.tflite + +The memory attention in tflite does not support dynamic shapes, so num_maskmem and max_obj_ptrs_in_encoder need to be fixed to 1. + ## Inference Example +Image mode + - [ailia-models](https://github.com/axinc-ai/ailia-models/tree/master/image_segmentation/segment-anything-2) - [ailia-models-tflite](https://github.com/axinc-ai/ailia-models-tflite/pull/90) +Video mode +- [ailia-models PR](https://github.com/axinc-ai/ailia-models/pull/1539) + ## Original document - [README_ORIGINAL.md](README_ORIGINAL.md) From 0950d3e6acdeccfbe5c90afd0dd64c5c4a94a66b Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 9 Sep 2024 13:10:02 +0900 Subject: [PATCH 65/79] Change model name to opt --- README.md | 8 ++++---- sam2/modeling/sam2_base.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 7badaebbc..2097a888d 100644 --- a/README.md +++ b/README.md @@ -79,18 +79,18 @@ You can also download it from the following. - https://storage.googleapis.com/ailia-models/segment-anything-2/prompt_encoder_hiera_t.onnx - https://storage.googleapis.com/ailia-models/segment-anything-2/mask_decoder_hiera_t.onnx - https://storage.googleapis.com/ailia-models/segment-anything-2/memory_encoder_hiera_t.onnx -- https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.onnx - https://storage.googleapis.com/ailia-models/segment-anything-2/mlp_hiera_t.onnx - -In addition, it is planned to update to use a 6-dimensional MatMul in MemoryAttention in the future. +- https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.onnx (6dim matmul, batch = N) +- https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.opt.onnx (4dim matmul, batch = 1) ### TFLITE - https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/image_encoder_hiera_t.tflite +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/prompt_encoder_hiera_t.tflite - https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mask_decoder_hiera_t.tflite - https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mlp_hiera_t.tflite -- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_attention_hiera_t.tflite - https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_encoder_hiera_t.tflite +- https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_attention_hiera_t.tflite (4dim matmul, batch = 1, num_maskmem = 1) The memory attention in tflite does not support dynamic shapes, so num_maskmem and max_obj_ptrs_in_encoder need to be fixed to 1. diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 69bcbac76..658be1efd 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -918,7 +918,7 @@ def _prepare_memory_conditioned_features( #print("memory_pos_embed", memory_pos_embed.shape, memory_pos_embed.dtype) #print("num_obj_ptr_tokens", num_obj_ptr_tokens) torch.onnx.export( - self.memory_attention, (current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2), 'model/memory_attention_'+model_id+'.onnx', + self.memory_attention, (current_vision_feats[0], memory_1, memory_2, current_vision_pos_embeds[0], memory_pos_embed_1, memory_pos_embed_2), 'model/memory_attention_'+model_id+'.opt.onnx', input_names=["curr", "memory_1", "memory_2", "curr_pos", "memory_pos_1", "memory_pos_2"], output_names=["pix_feat"], dynamic_axes={ @@ -939,7 +939,7 @@ def _prepare_memory_conditioned_features( print("begin memory attention onnx") import onnxruntime if self.memory_attention_onnx == None: - self.memory_attention_onnx = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".onnx") + self.memory_attention_onnx = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".opt.onnx") import numpy as np #num_obj_ptr_tokens_numpy = np.array((num_obj_ptr_tokens)).astype(np.int64) #print("curr", np.sum(current_vision_feats[0].numpy())) From b571a156a134f5d8d816a8226d6d9636c8935cd8 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Mon, 9 Sep 2024 13:43:35 +0900 Subject: [PATCH 66/79] Fix num maskmem for tflite --- export_video_predictor.py | 7 +++---- sam2/modeling/sam2_base.py | 10 ++++++++-- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index 0f76eeacd..83fbfa07a 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -48,11 +48,10 @@ from sam2.build_sam import build_sam2_video_predictor -if export_to_tflite or import_from_tflite: - predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device, num_maskmem=1, max_obj_ptrs_in_encoder=1) -else: - predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) +predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) +if export_to_tflite or import_from_tflite: + predictor.set_num_maskmem(num_maskmem=1, max_obj_ptrs_in_encoder=1) def show_mask(mask, ax, obj_id=None, random_color=False): if random_color: diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 658be1efd..5d724ca32 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -207,7 +207,7 @@ def __init__( assert(self.image_size == 1024) assert(self.num_feature_levels == 3) assert(self.hidden_dim == 256) - assert(self.num_maskmem == 1 or self.num_maskmem == 7) + assert(self.num_maskmem == 7) assert(self.directly_add_no_mem_embed == True) #assert(self.training == False) assert(self.mem_dim == 64) @@ -235,11 +235,17 @@ def __init__( assert(self.sam_mask_decoder.dynamic_multimask_stability_thresh == 0.98) assert(self.max_cond_frames_in_attn == -1) assert(self.memory_temporal_stride_for_eval == 1) - assert(self.max_obj_ptrs_in_encoder == 1 or self.max_obj_ptrs_in_encoder == 16) + assert(self.max_obj_ptrs_in_encoder == 16) assert(self.only_obj_ptrs_in_the_past_for_eval == True) assert(self.multimask_output_for_tracking == True) assert(self.use_multimask_token_for_obj_ptr == True) + def set_num_maskmem(self, num_maskmem, max_obj_ptrs_in_encoder): + self.num_maskmem = num_maskmem + self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder + assert(self.num_maskmem == 1 or self.num_maskmem == 7) + assert(self.max_obj_ptrs_in_encoder == 1 or self.max_obj_ptrs_in_encoder == 16) + @property def device(self): return next(self.parameters()).device From cfe762f4256fbaeceff290edb8e53b3c4e5ec037 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Tue, 10 Sep 2024 13:17:59 +0900 Subject: [PATCH 67/79] Update inference example --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index 2097a888d..f9f0ac68d 100644 --- a/README.md +++ b/README.md @@ -96,14 +96,9 @@ The memory attention in tflite does not support dynamic shapes, so num_maskmem a ## Inference Example -Image mode - - [ailia-models](https://github.com/axinc-ai/ailia-models/tree/master/image_segmentation/segment-anything-2) - [ailia-models-tflite](https://github.com/axinc-ai/ailia-models-tflite/pull/90) -Video mode -- [ailia-models PR](https://github.com/axinc-ai/ailia-models/pull/1539) - ## Original document - [README_ORIGINAL.md](README_ORIGINAL.md) From f9327435dd8a22abcff5b795da09a24d040039d4 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 12 Sep 2024 13:52:09 +0900 Subject: [PATCH 68/79] Implement image size option --- export_image_predictor.py | 9 +++++++-- sam2/build_sam.py | 2 ++ sam2/modeling/sam2_base.py | 6 +++--- sam2/sam2_image_predictor.py | 13 ++++++++----- 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index 109fdda8c..d4dd38b4a 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -7,6 +7,7 @@ parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite", "torch"]) parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) parser.add_argument('--mode', default="both", choices=["both", "import", "export"]) +parser.add_argument('--image_size', default=1024, type=int, choices=[512, 1024]) args = parser.parse_args() import os @@ -53,6 +54,10 @@ print("unknown model id") exit() +# resolution settings +if args.image_size == 512: + model_id = model_id + "_512" + # use cpu for export device = torch.device("cpu") @@ -107,9 +112,9 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l image = Image.open('notebooks/images/truck.jpg') image = np.array(image.convert("RGB")) -sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device) +sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device, image_size=args.image_size) -predictor = SAM2ImagePredictor(sam2_model) +predictor = SAM2ImagePredictor(sam2_model, image_size = args.image_size) predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, export_to_tflite = export_to_tflite_image_encoder, diff --git a/sam2/build_sam.py b/sam2/build_sam.py index 3a29eda3c..0040b87b6 100644 --- a/sam2/build_sam.py +++ b/sam2/build_sam.py @@ -19,6 +19,7 @@ def build_sam2( mode="eval", hydra_overrides_extra=[], apply_postprocessing=True, + image_size=1024, **kwargs, ): @@ -32,6 +33,7 @@ def build_sam2( ] # Read config and init model cfg = compose(config_name=config_file, overrides=hydra_overrides_extra) + cfg.model.image_size = image_size OmegaConf.resolve(cfg) model = instantiate(cfg.model, _recursive_=True) _load_checkpoint(model, ckpt_path) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 5d724ca32..5264f0725 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -204,7 +204,7 @@ def __init__( self.memory_encoder_onnx = None # Check decoder sample parameter - assert(self.image_size == 1024) + assert(self.image_size == 512 or self.image_size == 1024) assert(self.num_feature_levels == 3) assert(self.hidden_dim == 256) assert(self.num_maskmem == 7) @@ -396,7 +396,7 @@ def _forward_sam_heads( if sam_mask_prompt is None: import numpy as np - mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + mask_input_dummy = torch.Tensor(np.zeros((1, self.image_size // 4, self.image_size // 4))) masks_enable = torch.tensor([0], dtype=torch.int) else: mask_input_dummy = sam_mask_prompt @@ -507,7 +507,7 @@ def _forward_sam_heads( print("backbone_features", backbone_features.shape) if sam_mask_prompt is None: import numpy as np - mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + mask_input_dummy = torch.Tensor(np.zeros((1, self.image_size // 4, self.image_size // 4))) masks_enable = torch.tensor([0], dtype=torch.int) else: mask_input_dummy = sam_mask_prompt diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index a5ee2587a..ee1648c5e 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -26,6 +26,7 @@ def __init__( mask_threshold=0.0, max_hole_area=0.0, max_sprinkle_area=0.0, + image_size=1024, **kwargs, ) -> None: """ @@ -44,7 +45,7 @@ def __init__( super().__init__() self.model = sam_model self._transforms = SAM2Transforms( - resolution=self.model.image_size, + resolution=image_size, mask_threshold=mask_threshold, max_hole_area=max_hole_area, max_sprinkle_area=max_sprinkle_area, @@ -62,11 +63,13 @@ def __init__( # Spatial dim for backbone feature maps self._bb_feat_sizes = [ - (256, 256), - (128, 128), - (64, 64), + (image_size // 4, image_size // 4), + (image_size // 8, image_size // 8), + (image_size // 16, image_size // 16), ] + self.image_size = image_size + @classmethod def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor": """ @@ -498,7 +501,7 @@ def _predict( if concat_points is None: raise ("concat points must be exists") # Noneの場合はtensorサイズが0のテンソルを返さないといけないためwhereで組めない if mask_input is None: - mask_input_dummy = torch.Tensor(np.zeros((1, 256, 256))) + mask_input_dummy = torch.Tensor(np.zeros((1, self.image_size // 4, self.image_size // 4))) masks_enable = torch.tensor([0], dtype=torch.int) # boolだとonnxへのエクスポートのwhereでエラーになる else: mask_input_dummy = mask_input From 285a6bf01fcdcf9f8089d0bfc9cbe94d450e4811 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 12 Sep 2024 14:15:23 +0900 Subject: [PATCH 69/79] Implement image size 512 --- export_image_predictor.py | 2 +- export_video_predictor.py | 7 ++++- sam2/build_sam.py | 2 ++ sam2/modeling/position_encoding.py | 14 ++++++++++ sam2/modeling/sam/transformer.py | 43 +++++++++++++++++++++--------- sam2/sam2_image_predictor.py | 12 ++++----- sam2/sam2_video_predictor.py | 2 +- 7 files changed, 59 insertions(+), 23 deletions(-) diff --git a/export_image_predictor.py b/export_image_predictor.py index d4dd38b4a..fb202a3ee 100644 --- a/export_image_predictor.py +++ b/export_image_predictor.py @@ -114,7 +114,7 @@ def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_l sam2_model = build_sam2(model_cfg, sam2_checkpoint, device=device, image_size=args.image_size) -predictor = SAM2ImagePredictor(sam2_model, image_size = args.image_size) +predictor = SAM2ImagePredictor(sam2_model) predictor.set_image(image, export_to_onnx = export_to_onnx_image_encoder, export_to_tflite = export_to_tflite_image_encoder, diff --git a/export_video_predictor.py b/export_video_predictor.py index 83fbfa07a..4c06a4a28 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -7,6 +7,7 @@ parser.add_argument('--framework', default="onnx", choices=["onnx", "tflite", "torch"]) parser.add_argument('--accuracy', default="float", choices=["float", "int8"]) parser.add_argument('--mode', default="both", choices=["both", "import", "export"]) +parser.add_argument('--image_size', default=1024, type=int, choices=[512, 1024]) args = parser.parse_args() import os @@ -43,12 +44,16 @@ else: raise("unknown model type") +# resolution settings +if args.image_size == 512: + model_id = model_id + "_512" + device = torch.device("cpu") print(f"using device: {device}") from sam2.build_sam import build_sam2_video_predictor -predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device) +predictor = build_sam2_video_predictor(model_cfg, sam2_checkpoint, device=device, image_size=args.image_size) if export_to_tflite or import_from_tflite: predictor.set_num_maskmem(num_maskmem=1, max_obj_ptrs_in_encoder=1) diff --git a/sam2/build_sam.py b/sam2/build_sam.py index 0040b87b6..69d165aef 100644 --- a/sam2/build_sam.py +++ b/sam2/build_sam.py @@ -50,6 +50,7 @@ def build_sam2_video_predictor( mode="eval", hydra_overrides_extra=[], apply_postprocessing=True, + image_size=1024, **kwargs, ): hydra_overrides = [ @@ -71,6 +72,7 @@ def build_sam2_video_predictor( # Read config and init model cfg = compose(config_name=config_file, overrides=hydra_overrides) + cfg.model.image_size = image_size OmegaConf.resolve(cfg) model = instantiate(cfg.model, _recursive_=True) _load_checkpoint(model, ckpt_path) diff --git a/sam2/modeling/position_encoding.py b/sam2/modeling/position_encoding.py index 6f8666cb9..41269d6b5 100644 --- a/sam2/modeling/position_encoding.py +++ b/sam2/modeling/position_encoding.py @@ -286,3 +286,17 @@ def apply_rotary_matenc(xq, xk, rotmats, repeat_freqs_k=False): #print("k_out", k_out.shape) return q_out, k_out + + +def apply_rotary_matenc_512(xq, xk, rotmats, repeat_freqs_k=False): + bq, hq, nq, cq = xq.shape + q_rotmat = rotmats.reshape(1024, 128, 2, 2) + q_out = torch.matmul(q_rotmat, xq.reshape(nq, 128, 2, 1)).reshape(1, 1, 1024, 256) + + bk, hk, nk, ck = xk.shape + k_rotmat = q_rotmat.repeat(nk // 1024, 1, 1, 1) + + bk, hk, nk, ck = xk.shape + k_in = xk.reshape(nk, 128, 2, 1) + k_out = torch.matmul(k_rotmat, k_in).reshape(1, 1, nk, 256) + return q_out, k_out diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index e34741a0f..d94cb05a1 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -15,7 +15,7 @@ from torch import nn, Tensor from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis -from sam2.modeling.position_encoding import apply_rotary_matenc, get_rotation_matrices +from sam2.modeling.position_encoding import apply_rotary_matenc, get_rotation_matrices, apply_rotary_matenc_512 from sam2.modeling.sam2_utils import MLP from sam2.utils.misc import get_sdpa_settings @@ -317,6 +317,7 @@ def __init__( ) freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) self.freqs_cis = freqs_cis + self.is_512 = feat_sizes[0] == 32 def allocate_rope_attention_weight( self, q: Tensor @@ -357,12 +358,20 @@ def self_attn( # assert self.rope_k_repeat if USE_MAT_ROTARY_ENC: - q, k = apply_rotary_matenc( - q, - k, - rotmats=self.rotmats, - repeat_freqs_k=self.rope_k_repeat, - ) + if self.is_512: + q, k = apply_rotary_matenc_512( + q, + k, + rotmats=self.rotmats, + repeat_freqs_k=self.rope_k_repeat, + ) + else: + q, k = apply_rotary_matenc( + q, + k, + rotmats=self.rotmats, + repeat_freqs_k=self.rope_k_repeat, + ) else: q, k = apply_rotary_enc( q, @@ -425,12 +434,20 @@ def cross_attn( # assert self.rope_k_repeat if USE_MAT_ROTARY_ENC: - q, k_1 = apply_rotary_matenc( - q, - k_1, - rotmats=self.rotmats, - repeat_freqs_k=self.rope_k_repeat, - ) + if self.is_512: + q, k_1 = apply_rotary_matenc_512( + q, + k_1, + rotmats=self.rotmats, + repeat_freqs_k=self.rope_k_repeat, + ) + else: + q, k_1 = apply_rotary_matenc( + q, + k_1, + rotmats=self.rotmats, + repeat_freqs_k=self.rope_k_repeat, + ) else: q, k_1 = apply_rotary_enc( q, diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index ee1648c5e..9d028e317 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -45,7 +45,7 @@ def __init__( super().__init__() self.model = sam_model self._transforms = SAM2Transforms( - resolution=image_size, + resolution=sam_model.image_size, mask_threshold=mask_threshold, max_hole_area=max_hole_area, max_sprinkle_area=max_sprinkle_area, @@ -63,13 +63,11 @@ def __init__( # Spatial dim for backbone feature maps self._bb_feat_sizes = [ - (image_size // 4, image_size // 4), - (image_size // 8, image_size // 8), - (image_size // 16, image_size // 16), + (sam_model.image_size // 4, sam_model.image_size // 4), + (sam_model.image_size // 8, sam_model.image_size // 8), + (sam_model.image_size // 16, sam_model.image_size // 16), ] - self.image_size = image_size - @classmethod def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor": """ @@ -501,7 +499,7 @@ def _predict( if concat_points is None: raise ("concat points must be exists") # Noneの場合はtensorサイズが0のテンソルを返さないといけないためwhereで組めない if mask_input is None: - mask_input_dummy = torch.Tensor(np.zeros((1, self.image_size // 4, self.image_size // 4))) + mask_input_dummy = torch.Tensor(np.zeros((1, self.model.image_size // 4, self.model.image_size // 4))) masks_enable = torch.tensor([0], dtype=torch.int) # boolだとonnxへのエクスポートのwhereでエラーになる else: mask_input_dummy = mask_input diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 126ffb662..8541053de 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -566,7 +566,7 @@ def _consolidate_temp_output_across_obj( batch_size=batch_size, high_res_masks=high_res_masks, is_mask_from_pts=True, # these frames are what the user interacted with - export_to_onnx=export_to_tflite, + export_to_onnx=export_to_onnx, import_from_onnx=import_from_onnx, export_to_tflite=export_to_tflite, import_from_tflite=import_from_tflite, From 8b968867cbc2a416d5d0fee6d5f15cba2acbaa46 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 12 Sep 2024 14:24:06 +0900 Subject: [PATCH 70/79] Implement image size for position encoder --- sam2/modeling/memory_attention.py | 5 +++-- sam2/modeling/sam/transformer.py | 4 ++-- sam2/modeling/sam2_base.py | 1 + 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/sam2/modeling/memory_attention.py b/sam2/modeling/memory_attention.py index f3b649fec..84c23764f 100644 --- a/sam2/modeling/memory_attention.py +++ b/sam2/modeling/memory_attention.py @@ -117,6 +117,7 @@ def allocate_rope_attention_weight( self, curr: torch.Tensor, # self-attention inputs curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs + image_size = 1024, ): if isinstance(curr, list): assert isinstance(curr_pos, list) @@ -134,9 +135,9 @@ def allocate_rope_attention_weight( for layer in self.layers: if isinstance(layer.cross_attn_image, RoPEAttention): - layer.cross_attn_image.allocate_rope_attention_weight(output) + layer.cross_attn_image.allocate_rope_attention_weight(output, image_size = image_size) if isinstance(layer.self_attn, RoPEAttention): - layer.self_attn.allocate_rope_attention_weight(output) + layer.self_attn.allocate_rope_attention_weight(output, image_size = image_size) def forward( self, diff --git a/sam2/modeling/sam/transformer.py b/sam2/modeling/sam/transformer.py index d94cb05a1..42b8bae3c 100644 --- a/sam2/modeling/sam/transformer.py +++ b/sam2/modeling/sam/transformer.py @@ -317,10 +317,9 @@ def __init__( ) freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1]) self.freqs_cis = freqs_cis - self.is_512 = feat_sizes[0] == 32 def allocate_rope_attention_weight( - self, q: Tensor + self, q: Tensor, image_size ): # prepare weight of rope attention for dynamo export w = h = math.sqrt(q.shape[-2]) @@ -330,6 +329,7 @@ def allocate_rope_attention_weight( else: if self.freqs_cis.shape[0] != q.shape[-2]: self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device) + self.is_512 = image_size == 512 def self_attn( self, q: Tensor, k: Tensor, v: Tensor diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 5264f0725..92f9bbb0d 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -900,6 +900,7 @@ def _prepare_memory_conditioned_features( self.memory_attention.allocate_rope_attention_weight( curr=current_vision_feats, curr_pos=current_vision_pos_embeds, + image_size=self.image_size, ) # 4096の倍数のRoPEAttentionが適用される部分と手協されない部分を事前に分割する From f9459733c1fd1e0dd93b78f4848382aeefc6592d Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Fri, 13 Sep 2024 16:27:10 +0900 Subject: [PATCH 71/79] Fix export model path --- sam2/sam2_image_predictor.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 9d028e317..5f55bfead 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -154,7 +154,7 @@ def set_image( if not tflite_int8: tfl_converter_flags = {'target_spec': {'supported_ops': [tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS]}} edge_model = ai_edge_torch.convert(self.model, sample_inputs, _ai_edge_converter_flags=tfl_converter_flags) - edge_model.export("image_encoder_"+model_id+".tflite") + edge_model.export("model/image_encoder_"+model_id+".tflite") if tflite_int8: from ai_edge_torch.quantize import pt2e_quantizer @@ -176,7 +176,7 @@ def set_image( quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), _ai_edge_converter_flags=tfl_converter_flags ) - with_quantizer.export("image_encoder_"+model_id+"_int8.tflite") + with_quantizer.export("model/image_encoder_"+model_id+"_int8.tflite") edge_model = model if import_from_tflite: @@ -534,7 +534,7 @@ def _predict( if not tflite_int8: edge_model = ai_edge_torch.convert(self.model.sam_prompt_encoder, sample_inputs) - edge_model.export("prompt_encoder_"+model_id+".tflite") + edge_model.export("model/prompt_encoder_"+model_id+".tflite") if False:#tflite_int8: # labelがint64で量子化できない from ai_edge_torch.quantize import pt2e_quantizer @@ -554,7 +554,7 @@ def _predict( sample_inputs, quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), ) - with_quantizer.export("prompt_encoder_"+model_id+"_int8.tflite") + with_quantizer.export("model/prompt_encoder_"+model_id+"_int8.tflite") edge_model = model @@ -620,7 +620,7 @@ def _predict( if not tflite_int8: import ai_edge_torch edge_model = ai_edge_torch.convert(self.model.sam_mask_decoder, sample_inputs) - edge_model.export("mask_decoder_"+model_id+".tflite") + edge_model.export("model/mask_decoder_"+model_id+".tflite") if tflite_int8: from ai_edge_torch.quantize import pt2e_quantizer @@ -640,7 +640,7 @@ def _predict( sample_inputs, quant_config=quant_config.QuantConfig(pt2e_quantizer=quantizer), ) - with_quantizer.export("mask_decoder_"+model_id+"_int8.tflite") + with_quantizer.export("model/mask_decoder_"+model_id+"_int8.tflite") edge_model = model From 3ab19301c526aea54f4dd2582d6f9ef16bef29b3 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Tue, 8 Oct 2024 17:52:27 +0900 Subject: [PATCH 72/79] Fix tensor order of tflite --- export_video_predictor.py | 11 +++++++++-- sam2/modeling/sam2_base.py | 4 ++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/export_video_predictor.py b/export_video_predictor.py index 4c06a4a28..8856b3173 100644 --- a/export_video_predictor.py +++ b/export_video_predictor.py @@ -99,9 +99,14 @@ def show_box(box, ax): # Let's add a 2nd positive click at (x, y) = (250, 220) to refine the mask # sending all clicks (and their labels) to `add_new_points_or_box` -points = np.array([[210, 350], [250, 220]], dtype=np.float32) # for labels, `1` means positive click and `0` means negative click -labels = np.array([1, 1], np.int32) +if args.framework == "tflite": + points = np.array([[210, 350]], dtype=np.float32) + labels = np.array([1], np.int32) +else: + points = np.array([[210, 350], [250, 220]], dtype=np.float32) + labels = np.array([1, 1], np.int32) + _, out_obj_ids, out_mask_logits = predictor.add_new_points_or_box( inference_state=inference_state, frame_idx=ann_frame_idx, @@ -110,6 +115,8 @@ def show_box(box, ax): labels=labels, import_from_onnx=import_from_onnx, export_to_onnx=export_to_onnx, + import_from_tflite=import_from_tflite, + export_to_tflite=export_to_tflite, model_id=model_id ) diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index 92f9bbb0d..c98cad7f7 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -464,8 +464,8 @@ def _forward_sam_heads( prompt_encoder.invoke() sparse_embeddings = prompt_encoder.get_tensor(output_details[1]["index"]) - dense_embeddings = prompt_encoder.get_tensor(output_details[2]["index"]) - dense_pe = prompt_encoder.get_tensor(output_details[0]["index"]) + dense_embeddings = prompt_encoder.get_tensor(output_details[0]["index"]) + dense_pe = prompt_encoder.get_tensor(output_details[2]["index"]) mask_decoder.allocate_tensors() input_details = mask_decoder.get_input_details() From d37e3c66c58268679f3ba698f8480c382e41cb5f Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Tue, 8 Oct 2024 18:47:52 +0900 Subject: [PATCH 73/79] Update required version --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f9f0ac68d..984d36172 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,8 @@ tflite ``` torch 2.4.0 ai-edge-torch 0.2.0 -tf-nightly 2.18.0.dev20240905 +tf-nightly 2.18.0.dev20240811 for image mode +tf-nightly 2.18.0.dev20240905 for video mode ``` ## Export and Inference From b5cb1f9476a5053fa7c4d070264ff8c51e11acf8 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 10 Oct 2024 15:55:54 +0900 Subject: [PATCH 74/79] Implement inference only code for tflite --- README.md | 9 +- download_onnx_models.sh | 0 download_tflite_models.sh | 0 sam2/modeling/sam2_base.py | 289 +++++++++++++++++++---------------- sam2/sam2_image_predictor.py | 157 ++++++++++++++----- sam2/sam2_video_predictor.py | 44 +++--- 6 files changed, 313 insertions(+), 186 deletions(-) create mode 100644 download_onnx_models.sh create mode 100644 download_tflite_models.sh diff --git a/README.md b/README.md index 984d36172..6082f405b 100644 --- a/README.md +++ b/README.md @@ -49,11 +49,18 @@ python3 export_video_predictor.py --framework tflite onnx ``` +download_onnx_models.sh python3 export_image_predictor.py --framework onnx --mode import python3 export_video_predictor.py --framework onnx --mode import ``` -tflite not supported inference only yet. +tflite + +``` +download_tflite_models.sh +python3 export_image_predictor.py --framework tflite --mode import +python3 export_video_predictor.py --framework tflite --mode import +``` ## Test diff --git a/download_onnx_models.sh b/download_onnx_models.sh new file mode 100644 index 000000000..e69de29bb diff --git a/download_tflite_models.sh b/download_tflite_models.sh new file mode 100644 index 000000000..e69de29bb diff --git a/sam2/modeling/sam2_base.py b/sam2/modeling/sam2_base.py index c98cad7f7..a1e0fbd95 100644 --- a/sam2/modeling/sam2_base.py +++ b/sam2/modeling/sam2_base.py @@ -194,7 +194,10 @@ def __init__( fullgraph=True, dynamic=False, ) - + + # debug + self.debug = False + # onnx self.image_encoder_onnx = None self.prompt_encoder_onnx = None @@ -203,6 +206,14 @@ def __init__( self.memory_attention_onnx = None self.memory_encoder_onnx = None + # tflite + self.image_encoder_tflite = None + self.prompt_encoder_tflite = None + self.mask_decoder_tflite = None + self.mlp_tflite = None + self.memory_attention_tflite = None + self.memory_encoder_tflite = None + # Check decoder sample parameter assert(self.image_size == 512 or self.image_size == 1024) assert(self.num_feature_levels == 3) @@ -403,9 +414,8 @@ def _forward_sam_heads( masks_enable = torch.tensor([1], dtype=torch.int) if import_from_onnx: - print("begin prompt encoder onnx") - if sam_mask_prompt != None: - raise("currently not supported mask prompt") + if self.debug: + print("begin prompt encoder onnx") import onnxruntime if self.prompt_encoder_onnx == None: self.prompt_encoder_onnx = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") @@ -416,15 +426,16 @@ def _forward_sam_heads( if self.mask_decoder_onnx == None: self.mask_decoder_onnx = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") - # print("backbone_features", backbone_features.shape) - print("begin mask decoder onnx") - print("begin mask decoder onnx") - print("backbone_features", np.sum(backbone_features.numpy())) - print("image_pe", np.sum(dense_pe.numpy())) - print("sparse_embeddings", np.sum(sparse_embeddings.numpy())) - print("dense_embeddings", np.sum(dense_embeddings.numpy())) - print("high_res_features", np.sum(high_res_features[0].numpy())) - print("high_res_features", np.sum(high_res_features[1].numpy())) + if self.debug: + print("backbone_features", backbone_features.shape) + print("begin mask decoder onnx") + print("begin mask decoder onnx") + print("backbone_features", np.sum(backbone_features.numpy())) + print("image_pe", np.sum(dense_pe.numpy())) + print("sparse_embeddings", np.sum(sparse_embeddings.numpy())) + print("dense_embeddings", np.sum(dense_embeddings.numpy())) + print("high_res_features", np.sum(high_res_features[0].numpy())) + print("high_res_features", np.sum(high_res_features[1].numpy())) masks, iou_pred, sam_tokens_out, object_score_logits = self.mask_decoder_onnx.run(None, { "image_embeddings":backbone_features.numpy(), "image_pe": dense_pe.numpy(), @@ -438,73 +449,81 @@ def _forward_sam_heads( sam_tokens_out = torch.Tensor(sam_tokens_out) object_score_logits = torch.Tensor(object_score_logits) low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) - print(low_res_multimasks.shape) - print(ious.shape) - print(sam_output_tokens.shape) - print(object_score_logits.shape) + #print(low_res_multimasks.shape) + #print(ious.shape) + #print(sam_output_tokens.shape) + #print(object_score_logits.shape) if import_from_tflite: + if self.debug: + print("begin prompt encoder tflite") + import tensorflow as tf - prompt_encoder = tf.lite.Interpreter(model_path="model/prompt_encoder_"+model_id+".tflite") - mask_decoder = tf.lite.Interpreter(model_path="model/mask_decoder_"+model_id+".tflite") - - prompt_encoder.allocate_tensors() - input_details = prompt_encoder.get_input_details() - output_details = prompt_encoder.get_output_details() - prompt_encoder.resize_tensor_input( - input_details[2]["index"], - [1, sam_point_coords.shape[1], 2] - ) - prompt_encoder.allocate_tensors() - - prompt_encoder.set_tensor(input_details[2]["index"], sam_point_coords) - prompt_encoder.set_tensor(input_details[3]["index"], sam_point_labels) - prompt_encoder.set_tensor(input_details[0]["index"], mask_input_dummy) - prompt_encoder.set_tensor(input_details[1]["index"], masks_enable) - prompt_encoder.invoke() - - sparse_embeddings = prompt_encoder.get_tensor(output_details[1]["index"]) - dense_embeddings = prompt_encoder.get_tensor(output_details[0]["index"]) - dense_pe = prompt_encoder.get_tensor(output_details[2]["index"]) - - mask_decoder.allocate_tensors() - input_details = mask_decoder.get_input_details() - output_details = mask_decoder.get_output_details() - mask_decoder.resize_tensor_input( - input_details[1]["index"], - [1, sparse_embeddings.shape[1], 256] - ) - mask_decoder.allocate_tensors() + if self.prompt_encoder_tflite == None: + self.prompt_encoder_tflite = tf.lite.Interpreter(model_path="model/prompt_encoder_"+model_id+".tflite") + input_details = self.prompt_encoder_tflite.get_input_details() + self.prompt_encoder_tflite.resize_tensor_input( + input_details[2]["index"], + [1, sam_point_coords.shape[1], 2] + ) + self.prompt_encoder_tflite.allocate_tensors() + + input_details = self.prompt_encoder_tflite.get_input_details() + output_details = self.prompt_encoder_tflite.get_output_details() + + self.prompt_encoder_tflite.set_tensor(input_details[2]["index"], sam_point_coords) + self.prompt_encoder_tflite.set_tensor(input_details[3]["index"], sam_point_labels) + self.prompt_encoder_tflite.set_tensor(input_details[0]["index"], mask_input_dummy) + self.prompt_encoder_tflite.set_tensor(input_details[1]["index"], masks_enable) + self.prompt_encoder_tflite.invoke() + + sparse_embeddings = self.prompt_encoder_tflite.get_tensor(output_details[1]["index"]) + dense_embeddings = self.prompt_encoder_tflite.get_tensor(output_details[0]["index"]) + dense_pe = self.prompt_encoder_tflite.get_tensor(output_details[2]["index"]) + + if self.mask_decoder_tflite == None: + self.mask_decoder_tflite = tf.lite.Interpreter(model_path="model/mask_decoder_"+model_id+".tflite") + + input_details = self.mask_decoder_tflite.get_input_details() + self.mask_decoder_tflite.resize_tensor_input( + input_details[1]["index"], + [1, sparse_embeddings.shape[1], 256] + ) + self.mask_decoder_tflite.allocate_tensors() + + input_details = self.mask_decoder_tflite.get_input_details() + output_details = self.mask_decoder_tflite.get_output_details() batched_mode = False - mask_decoder.set_tensor(input_details[3]["index"], backbone_features.numpy()) - mask_decoder.set_tensor(input_details[6]["index"], dense_pe) - mask_decoder.set_tensor(input_details[1]["index"], sparse_embeddings) - mask_decoder.set_tensor(input_details[2]["index"], dense_embeddings) - mask_decoder.set_tensor(input_details[5]["index"], batched_mode) - mask_decoder.set_tensor(input_details[0]["index"], high_res_features[0].numpy()) - mask_decoder.set_tensor(input_details[4]["index"], high_res_features[1].numpy()) - mask_decoder.invoke() + self.mask_decoder_tflite.set_tensor(input_details[3]["index"], backbone_features.numpy()) + self.mask_decoder_tflite.set_tensor(input_details[6]["index"], dense_pe) + self.mask_decoder_tflite.set_tensor(input_details[1]["index"], sparse_embeddings) + self.mask_decoder_tflite.set_tensor(input_details[2]["index"], dense_embeddings) + self.mask_decoder_tflite.set_tensor(input_details[5]["index"], batched_mode) + self.mask_decoder_tflite.set_tensor(input_details[0]["index"], high_res_features[0].numpy()) + self.mask_decoder_tflite.set_tensor(input_details[4]["index"], high_res_features[1].numpy()) + self.mask_decoder_tflite.invoke() - masks = mask_decoder.get_tensor(output_details[2]["index"]) - iou_pred = mask_decoder.get_tensor(output_details[0]["index"]) - sam_tokens_out = mask_decoder.get_tensor(output_details[3]["index"]) - object_score_logits = mask_decoder.get_tensor(output_details[1]["index"]) + masks = self.mask_decoder_tflite.get_tensor(output_details[2]["index"]) + iou_pred = self.mask_decoder_tflite.get_tensor(output_details[0]["index"]) + sam_tokens_out = self.mask_decoder_tflite.get_tensor(output_details[3]["index"]) + object_score_logits = self.mask_decoder_tflite.get_tensor(output_details[1]["index"]) masks = torch.Tensor(masks) iou_pred = torch.Tensor(iou_pred) sam_tokens_out = torch.Tensor(sam_tokens_out) object_score_logits = torch.Tensor(object_score_logits) low_res_multimasks, ious, sam_output_tokens, object_score_logits = self.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) - print(low_res_multimasks.shape) - print(ious.shape) - print(sam_output_tokens.shape) - print(object_score_logits.shape) + #print(low_res_multimasks.shape) + #print(ious.shape) + #print(sam_output_tokens.shape) + #print(object_score_logits.shape) if not import_from_onnx and not import_from_tflite: - print("begin mask decoder torch") - print("backbone_features", backbone_features.shape) + if self.debug: + print("begin mask decoder torch") + print("backbone_features", backbone_features.shape) if sam_mask_prompt is None: import numpy as np mask_input_dummy = torch.Tensor(np.zeros((1, self.image_size // 4, self.image_size // 4))) @@ -534,10 +553,11 @@ def _forward_sam_heads( high_res_features1=high_res_features[0], high_res_features2=high_res_features[1], ) - print(low_res_multimasks.shape) - print(ious.shape) - print(sam_output_tokens.shape) - print(object_score_logits.shape) + if self.debug: + print(low_res_multimasks.shape) + print(ious.shape) + print(sam_output_tokens.shape) + print(object_score_logits.shape) if self.pred_obj_scores: is_obj_appearing = object_score_logits > 0 @@ -606,16 +626,17 @@ def _forward_sam_heads( if import_from_tflite: import tensorflow as tf - mlp = tf.lite.Interpreter(model_path="model/mlp_"+model_id+".tflite") - mlp.allocate_tensors() - input_details = mlp.get_input_details() - output_details = mlp.get_output_details() - mlp.allocate_tensors() + if self.mlp_tflite == None: + self.mlp_tflite = tf.lite.Interpreter(model_path="model/mlp_"+model_id+".tflite") + self.mlp_tflite.allocate_tensors() - mlp.set_tensor(input_details[0]["index"], sam_output_token.numpy()) - mlp.invoke() + input_details = self.mlp_tflite.get_input_details() + output_details = self.mlp_tflite.get_output_details() - obj_ptr = mlp.get_tensor(output_details[0]["index"]) + self.mlp_tflite.set_tensor(input_details[0]["index"], sam_output_token.numpy()) + self.mlp_tflite.invoke() + + obj_ptr = self.mlp_tflite.get_tensor(output_details[0]["index"]) obj_ptr = torch.Tensor(obj_ptr) if not import_from_onnx and not import_from_tflite: @@ -910,12 +931,13 @@ def _prepare_memory_conditioned_features( memory_pos_embed_1 = memory_pos_embed[:-num_obj_ptr_tokens,:,:] memory_pos_embed_2 = memory_pos_embed[-num_obj_ptr_tokens:,:,:] - print("memory attention shape") - print("curr", current_vision_feats[0].shape) - print("memory", memory.shape) - print("curr_pos", current_vision_pos_embeds[0].shape) - print("memory_pos", memory_pos_embed.shape) - print("num_obj_ptr_tokens", num_obj_ptr_tokens) + if self.debug: + print("memory attention shape") + print("curr", current_vision_feats[0].shape) + print("memory", memory.shape) + print("curr_pos", current_vision_pos_embeds[0].shape) + print("memory_pos", memory_pos_embed.shape) + print("num_obj_ptr_tokens", num_obj_ptr_tokens) if export_to_onnx and not self.memory_attention_onnx_exported: self.memory_attention_onnx_exported = True @@ -943,7 +965,8 @@ def _prepare_memory_conditioned_features( #onnx_program.save('model/memory_attention_'+model_id+'.onnx') if import_from_onnx: - print("begin memory attention onnx") + if self.debug: + print("begin memory attention onnx") import onnxruntime if self.memory_attention_onnx == None: self.memory_attention_onnx = onnxruntime.InferenceSession("model/memory_attention_"+model_id+".opt.onnx") @@ -983,40 +1006,43 @@ def _prepare_memory_conditioned_features( edge_model.export("model/memory_attention_"+model_id+".tflite") if import_from_tflite: + if self.debug: + print("begin memory attention tflite") import tensorflow as tf - import os - #os.environ['TF_ENABLE_XNNPACK'] = '0' - memory_attention = tf.lite.Interpreter(model_path="model/memory_attention_"+model_id+".tflite") - #memory_attention.allocate_tensors() - input_details = memory_attention.get_input_details() - output_details = memory_attention.get_output_details() - memory_attention.resize_tensor_input( - input_details[5]["index"], - [memory_1.shape[0], 1, 64] - ) - memory_attention.resize_tensor_input( - input_details[1]["index"], - [memory_2.shape[0], 1, 64] - ) - memory_attention.resize_tensor_input( - input_details[4]["index"], - [memory_pos_embed_1.shape[0], 1, 64] - ) - memory_attention.resize_tensor_input( - input_details[0]["index"], - [memory_pos_embed_2.shape[0], 1, 64] - ) - memory_attention.allocate_tensors() + if self.memory_attention_tflite == None: + self.memory_attention_tflite = tf.lite.Interpreter(model_path="model/memory_attention_"+model_id+".tflite") + self.memory_attention_tflite.allocate_tensors() + input_details = self.memory_attention_tflite.get_input_details() + self.memory_attention_tflite.resize_tensor_input( + input_details[5]["index"], + [memory_1.shape[0], 1, 64] + ) + self.memory_attention_tflite.resize_tensor_input( + input_details[1]["index"], + [memory_2.shape[0], 1, 64] + ) + self.memory_attention_tflite.resize_tensor_input( + input_details[4]["index"], + [memory_pos_embed_1.shape[0], 1, 64] + ) + self.memory_attention_tflite.resize_tensor_input( + input_details[0]["index"], + [memory_pos_embed_2.shape[0], 1, 64] + ) + self.memory_attention_tflite.allocate_tensors() + + input_details = self.memory_attention_tflite.get_input_details() + output_details = self.memory_attention_tflite.get_output_details() - memory_attention.set_tensor(input_details[3]["index"], current_vision_feats[0].numpy()) - memory_attention.set_tensor(input_details[5]["index"], memory_1.numpy()) - memory_attention.set_tensor(input_details[1]["index"], memory_2.numpy()) - memory_attention.set_tensor(input_details[2]["index"], current_vision_pos_embeds[0].numpy()) - memory_attention.set_tensor(input_details[4]["index"], memory_pos_embed_1.numpy()) - memory_attention.set_tensor(input_details[0]["index"], memory_pos_embed_2.numpy()) - memory_attention.invoke() + self.memory_attention_tflite.set_tensor(input_details[3]["index"], current_vision_feats[0].numpy()) + self.memory_attention_tflite.set_tensor(input_details[5]["index"], memory_1.numpy()) + self.memory_attention_tflite.set_tensor(input_details[1]["index"], memory_2.numpy()) + self.memory_attention_tflite.set_tensor(input_details[2]["index"], current_vision_pos_embeds[0].numpy()) + self.memory_attention_tflite.set_tensor(input_details[4]["index"], memory_pos_embed_1.numpy()) + self.memory_attention_tflite.set_tensor(input_details[0]["index"], memory_pos_embed_2.numpy()) + self.memory_attention_tflite.invoke() - pix_feat_with_mem = memory_attention.get_tensor(output_details[0]["index"]) + pix_feat_with_mem = self.memory_attention_tflite.get_tensor(output_details[0]["index"]) pix_feat_with_mem = torch.Tensor(pix_feat_with_mem) if not import_from_onnx and not import_from_tflite: @@ -1086,7 +1112,8 @@ def _encode_new_memory( ) if import_from_onnx: - print("begin memory encoder onnx") + if self.debug: + print("begin memory encoder onnx") import onnxruntime if self.memory_encoder_onnx == None: self.memory_encoder_onnx = onnxruntime.InferenceSession("model/memory_encoder_"+model_id+".onnx") @@ -1104,24 +1131,28 @@ def _encode_new_memory( edge_model.export("model/memory_encoder_"+model_id+".tflite") if import_from_tflite: + if self.debug: + print("begin memory encoder tflite") import tensorflow as tf - memory_encoder = tf.lite.Interpreter(model_path="model/memory_encoder_"+model_id+".tflite") - memory_encoder.allocate_tensors() - input_details = memory_encoder.get_input_details() - output_details = memory_encoder.get_output_details() - memory_encoder.allocate_tensors() - - memory_encoder.set_tensor(input_details[0]["index"], pix_feat.numpy()) - memory_encoder.set_tensor(input_details[1]["index"], mask_for_mem.numpy()) - memory_encoder.invoke() - - vision_features = memory_encoder.get_tensor(output_details[1]["index"]) - vision_pos_enc = memory_encoder.get_tensor(output_details[0]["index"]) + if self.memory_encoder_tflite == None: + self.memory_encoder_tflite = tf.lite.Interpreter(model_path="model/memory_encoder_"+model_id+".tflite") + self.memory_encoder_tflite.allocate_tensors() + + input_details = self.memory_encoder_tflite.get_input_details() + output_details = self.memory_encoder_tflite.get_output_details() + + self.memory_encoder_tflite.set_tensor(input_details[0]["index"], pix_feat.numpy()) + self.memory_encoder_tflite.set_tensor(input_details[1]["index"], mask_for_mem.numpy()) + self.memory_encoder_tflite.invoke() + + vision_features = self.memory_encoder_tflite.get_tensor(output_details[1]["index"]) + vision_pos_enc = self.memory_encoder_tflite.get_tensor(output_details[0]["index"]) vision_features = torch.Tensor(vision_features) vision_pos_enc = torch.Tensor(vision_pos_enc) if not import_from_onnx and not import_from_tflite: - print("begin memory encoder torch") + if self.debug: + print("begin memory encoder torch") vision_features, vision_pos_enc = self.memory_encoder( pix_feat, mask_for_mem#, skip_mask_sigmoid=True # sigmoid already applied (fixed to constant) ) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index 5f55bfead..de3022fac 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -68,6 +68,19 @@ def __init__( (sam_model.image_size // 16, sam_model.image_size // 16), ] + # debug + self.debug = False + + # onnx + self.image_encoder_onnx = None + self.prompt_encoder_onnx = None + self.mask_decoder_onnx = None + + # tflite + self.image_encoder_tflite = None + self.prompt_encoder_tflite = None + self.mask_decoder_tflite = None + @classmethod def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor": """ @@ -135,15 +148,17 @@ def set_image( ) if import_from_onnx: - model = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") - vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = model.run(None, {"input_image":input_image.numpy()}) - print("vision_features", vision_features.shape) - print("vision_pos_enc_0", vision_pos_enc_0.shape) - print("vision_pos_enc_1", vision_pos_enc_1.shape) - print("vision_pos_enc_2", vision_pos_enc_2.shape) - print("backbone_fpn_0", backbone_fpn_0.shape) - print("backbone_fpn_1", backbone_fpn_1.shape) - print("backbone_fpn_2", backbone_fpn_2.shape) + if self.image_encoder_onnx == None: + self.image_encoder_onnx = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") + vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.image_encoder_onnx.run(None, {"input_image":input_image.numpy()}) + if self.debug: + print("vision_features", vision_features.shape) + print("vision_pos_enc_0", vision_pos_enc_0.shape) + print("vision_pos_enc_1", vision_pos_enc_1.shape) + print("vision_pos_enc_2", vision_pos_enc_2.shape) + print("backbone_fpn_0", backbone_fpn_0.shape) + print("backbone_fpn_1", backbone_fpn_1.shape) + print("backbone_fpn_2", backbone_fpn_2.shape) if export_to_tflite: import ai_edge_torch @@ -179,10 +194,27 @@ def set_image( with_quantizer.export("model/image_encoder_"+model_id+"_int8.tflite") edge_model = model - if import_from_tflite: - vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = edge_model(input_image) + if import_from_tflite: + import tensorflow as tf + if self.image_encoder_tflite == None: + self.image_encoder_tflite = tf.lite.Interpreter(model_path="model/image_encoder_"+model_id+".tflite") + self.image_encoder_tflite.allocate_tensors() + + input_details = self.image_encoder_tflite.get_input_details() + output_details = self.image_encoder_tflite.get_output_details() + + self.image_encoder_tflite.set_tensor(input_details[0]["index"], input_image.numpy()) + self.image_encoder_tflite.invoke() - if not import_from_onnx and (not import_from_tflite or not export_to_tflite): + vision_features = self.image_encoder_tflite.get_tensor(output_details[4]["index"]) + vision_pos_enc_0 = self.image_encoder_tflite.get_tensor(output_details[1]["index"]) + vision_pos_enc_1 = self.image_encoder_tflite.get_tensor(output_details[5]["index"]) + vision_pos_enc_2 = self.image_encoder_tflite.get_tensor(output_details[3]["index"]) + backbone_fpn_0 = self.image_encoder_tflite.get_tensor(output_details[0]["index"]) + backbone_fpn_1 = self.image_encoder_tflite.get_tensor(output_details[2]["index"]) + backbone_fpn_2 = self.image_encoder_tflite.get_tensor(output_details[6]["index"]) + + if not import_from_onnx and not import_from_tflite: vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.model.forward_image(input_image) backbone_out = {"vision_features":torch.Tensor(vision_features), @@ -504,7 +536,6 @@ def _predict( else: mask_input_dummy = mask_input masks_enable = torch.tensor([1], dtype=torch.int) - print("mask_input_dummy", mask_input_dummy.shape) if export_to_onnx: #print("concat_points", concat_points.shape) @@ -522,8 +553,9 @@ def _predict( ) if import_from_onnx: - model = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") - sparse_embeddings, dense_embeddings, dense_pe = model.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy(), "masks": mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) + if self.prompt_encoder_onnx == None: + self.prompt_encoder_onnx = onnxruntime.InferenceSession("model/prompt_encoder_"+model_id+".onnx") + sparse_embeddings, dense_embeddings, dense_pe = self.prompt_encoder_onnx.run(None, {"coords":concat_points[0].numpy(), "labels":concat_points[1].numpy(), "masks": mask_input_dummy.numpy(), "masks_enable":masks_enable.numpy()}) sparse_embeddings = torch.Tensor(sparse_embeddings) dense_embeddings = torch.Tensor(dense_embeddings) dense_pe = torch.Tensor(dense_pe) @@ -558,13 +590,36 @@ def _predict( edge_model = model - if import_from_tflite and not tflite_int8: - sparse_embeddings, dense_embeddings, dense_pe = edge_model(concat_points[0], concat_points[1], mask_input_dummy, masks_enable) - sparse_embeddings = torch.Tensor(sparse_embeddings) - dense_embeddings = torch.Tensor(dense_embeddings) - dense_pe = torch.Tensor(dense_pe) + if import_from_tflite: + import tensorflow as tf + if self.prompt_encoder_tflite == None: + self.prompt_encoder_tflite = tf.lite.Interpreter(model_path="model/prompt_encoder_"+model_id+".tflite") + self.prompt_encoder_tflite.allocate_tensors() + input_details = self.prompt_encoder_tflite.get_input_details() + self.prompt_encoder_tflite.resize_tensor_input( + input_details[2]["index"], + [1, concat_points[0].shape[1], 2] + ) + self.prompt_encoder_tflite.allocate_tensors() + + input_details = self.prompt_encoder_tflite.get_input_details() + output_details = self.prompt_encoder_tflite.get_output_details() - if not import_from_onnx and (not import_from_tflite or not export_to_tflite or tflite_int8): + self.prompt_encoder_tflite.set_tensor(input_details[2]["index"], concat_points[0]) + self.prompt_encoder_tflite.set_tensor(input_details[3]["index"], concat_points[1]) + self.prompt_encoder_tflite.set_tensor(input_details[0]["index"], mask_input_dummy) + self.prompt_encoder_tflite.set_tensor(input_details[1]["index"], masks_enable) + self.prompt_encoder_tflite.invoke() + + sparse_embeddings = self.prompt_encoder_tflite.get_tensor(output_details[1]["index"]) + dense_embeddings = self.prompt_encoder_tflite.get_tensor(output_details[0]["index"]) + dense_pe = self.prompt_encoder_tflite.get_tensor(output_details[2]["index"]) + + sparse_embeddings = torch.Tensor(sparse_embeddings) + dense_embeddings = torch.Tensor(dense_embeddings) + dense_pe = torch.Tensor(dense_pe) + + if not import_from_onnx and not import_from_tflite: sparse_embeddings, dense_embeddings, dense_pe = self.model.sam_prompt_encoder.forward( coords=concat_points[0], labels=concat_points[1], @@ -599,8 +654,9 @@ def _predict( ) if import_from_onnx: - model = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") - masks, iou_pred, sam_tokens_out, object_score_logits = model.run(None, { + if self.mask_decoder_onnx == None: + self.mask_decoder_onnx = onnxruntime.InferenceSession("model/mask_decoder_"+model_id+".onnx") + masks, iou_pred, sam_tokens_out, object_score_logits = self.mask_decoder_onnx.run(None, { "image_embeddings":self._features["image_embed"][img_idx].unsqueeze(0).numpy(), "image_pe": dense_pe.numpy(), "sparse_prompt_embeddings": sparse_embeddings.numpy(), @@ -644,18 +700,49 @@ def _predict( edge_model = model - if import_from_tflite: - batched_mode_np = np.zeros((1), dtype=bool) - if batched_mode: - batched_mode_np[0] = True - masks, iou_pred, sam_tokens_out, object_score_logits = edge_model(self._features["image_embed"][img_idx].unsqueeze(0), dense_pe, sparse_embeddings, dense_embeddings, batched_mode_np, high_res_features[0], high_res_features[1]) - masks = torch.Tensor(masks) - iou_pred = torch.Tensor(iou_pred) - sam_tokens_out = torch.Tensor(sam_tokens_out) - object_score_logits = torch.Tensor(object_score_logits) - low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) - - if not import_from_onnx and (not import_from_tflite or not export_to_tflite): + if import_from_tflite: + batched_mode_np = np.zeros((1), dtype=bool) + if batched_mode: + batched_mode_np[0] = True + + import tensorflow as tf + if self.mask_decoder_tflite == None: + self.mask_decoder_tflite = tf.lite.Interpreter(model_path="model/mask_decoder_"+model_id+".tflite") + self.mask_decoder_tflite.allocate_tensors() + input_details = self.mask_decoder_tflite.get_input_details() + self.mask_decoder_tflite.resize_tensor_input( + input_details[1]["index"], + [1, sparse_embeddings.shape[1], 256] + ) + self.mask_decoder_tflite.allocate_tensors() + + input_details = self.mask_decoder_tflite.get_input_details() + output_details = self.mask_decoder_tflite.get_output_details() + + batched_mode = False + + self.mask_decoder_tflite.set_tensor(input_details[3]["index"], self._features["image_embed"][img_idx].unsqueeze(0).numpy()) + self.mask_decoder_tflite.set_tensor(input_details[6]["index"], dense_pe) + self.mask_decoder_tflite.set_tensor(input_details[1]["index"], sparse_embeddings) + self.mask_decoder_tflite.set_tensor(input_details[2]["index"], dense_embeddings) + self.mask_decoder_tflite.set_tensor(input_details[5]["index"], batched_mode) + self.mask_decoder_tflite.set_tensor(input_details[0]["index"], high_res_features[0].numpy()) + self.mask_decoder_tflite.set_tensor(input_details[4]["index"], high_res_features[1].numpy()) + self.mask_decoder_tflite.invoke() + + masks = self.mask_decoder_tflite.get_tensor(output_details[2]["index"]) + iou_pred = self.mask_decoder_tflite.get_tensor(output_details[0]["index"]) + sam_tokens_out = self.mask_decoder_tflite.get_tensor(output_details[3]["index"]) + object_score_logits = self.mask_decoder_tflite.get_tensor(output_details[1]["index"]) + + masks = torch.Tensor(masks) + iou_pred = torch.Tensor(iou_pred) + sam_tokens_out = torch.Tensor(sam_tokens_out) + object_score_logits = torch.Tensor(object_score_logits) + + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) + + if not import_from_onnx and not import_from_tflite: self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_normal masks, iou_pred, sam_tokens_out, object_score_logits = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), diff --git a/sam2/sam2_video_predictor.py b/sam2/sam2_video_predictor.py index 8541053de..a4c40a86a 100644 --- a/sam2/sam2_video_predictor.py +++ b/sam2/sam2_video_predictor.py @@ -854,35 +854,38 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from device = inference_state["device"] image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0) if import_from_onnx: - print("begin image encoder onnx") - print(image.shape) + if self.debug: + print("begin image encoder onnx") import onnxruntime if self.image_encoder_onnx == None: self.image_encoder_onnx = onnxruntime.InferenceSession("model/image_encoder_"+model_id+".onnx") vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.image_encoder_onnx.run(None, {"input_image":image.numpy()}) if import_from_tflite: - print("begin image encoder tflite") + if self.debug: + print("begin image encoder tflite") import tensorflow as tf - image_encoder = tf.lite.Interpreter(model_path="model/image_encoder_"+model_id+".tflite") - image_encoder.allocate_tensors() - input_details = image_encoder.get_input_details() - output_details = image_encoder.get_output_details() - - image_encoder.set_tensor(input_details[0]["index"], image.numpy()) - image_encoder.invoke() - - vision_features = image_encoder.get_tensor(output_details[4]["index"]) - vision_pos_enc_0 = image_encoder.get_tensor(output_details[1]["index"]) - vision_pos_enc_1 = image_encoder.get_tensor(output_details[5]["index"]) - vision_pos_enc_2 = image_encoder.get_tensor(output_details[3]["index"]) - backbone_fpn_0 = image_encoder.get_tensor(output_details[0]["index"]) - backbone_fpn_1 = image_encoder.get_tensor(output_details[2]["index"]) - backbone_fpn_2 = image_encoder.get_tensor(output_details[6]["index"]) + if self.image_encoder_tflite == None: + self.image_encoder_tflite = tf.lite.Interpreter(model_path="model/image_encoder_"+model_id+".tflite") + self.image_encoder_tflite.allocate_tensors() + + input_details = self.image_encoder_tflite.get_input_details() + output_details = self.image_encoder_tflite.get_output_details() + + self.image_encoder_tflite.set_tensor(input_details[0]["index"], image.numpy()) + self.image_encoder_tflite.invoke() + + vision_features = self.image_encoder_tflite.get_tensor(output_details[4]["index"]) + vision_pos_enc_0 = self.image_encoder_tflite.get_tensor(output_details[1]["index"]) + vision_pos_enc_1 = self.image_encoder_tflite.get_tensor(output_details[5]["index"]) + vision_pos_enc_2 = self.image_encoder_tflite.get_tensor(output_details[3]["index"]) + backbone_fpn_0 = self.image_encoder_tflite.get_tensor(output_details[0]["index"]) + backbone_fpn_1 = self.image_encoder_tflite.get_tensor(output_details[2]["index"]) + backbone_fpn_2 = self.image_encoder_tflite.get_tensor(output_details[6]["index"]) if not import_from_onnx and not import_from_tflite: - print("begin image encoder torch") - print(image.shape) + if self.debug: + print("begin image encoder torch") vision_features, vision_pos_enc_0, vision_pos_enc_1, vision_pos_enc_2, backbone_fpn_0, backbone_fpn_1, backbone_fpn_2 = self.forward_image(image) backbone_out = {"vision_features":torch.Tensor(vision_features), @@ -894,7 +897,6 @@ def _get_image_feature(self, inference_state, frame_idx, batch_size, import_from inference_state["cached_features"] = {frame_idx: (image, backbone_out)} # expand the features to have the same dimension as the number of objects - print("batch_size", batch_size) expanded_image = image.expand(batch_size, -1, -1, -1) expanded_backbone_out = { "backbone_fpn": backbone_out["backbone_fpn"].copy(), From 14c67f78e933089814f7d5db9d75117eecebe951 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 10 Oct 2024 16:16:46 +0900 Subject: [PATCH 75/79] Download all models --- README.md | 2 ++ download_onnx_models.sh | 7 +++++++ download_tflite_models.sh | 12 ++++++++++++ 3 files changed, 21 insertions(+) diff --git a/README.md b/README.md index 6082f405b..9a2fca471 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,8 @@ tflite download_tflite_models.sh python3 export_image_predictor.py --framework tflite --mode import python3 export_video_predictor.py --framework tflite --mode import +python3 export_image_predictor.py --framework tflite --mode import --image_size 512 +python3 export_video_predictor.py --framework tflite --mode import --image_size 512 ``` ## Test diff --git a/download_onnx_models.sh b/download_onnx_models.sh index e69de29bb..3a38c895f 100644 --- a/download_onnx_models.sh +++ b/download_onnx_models.sh @@ -0,0 +1,7 @@ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/image_encoder_hiera_t.onnx -P ./model/ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/prompt_encoder_hiera_t.onnx -P ./model/ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/mask_decoder_hiera_t.onnx -P ./model/ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/memory_encoder_hiera_t.onnx -P ./model/ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/mlp_hiera_t.onnx -P ./model/ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.onnx -P ./model/ +wget https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.opt.onnx -P ./model/ \ No newline at end of file diff --git a/download_tflite_models.sh b/download_tflite_models.sh index e69de29bb..f295991a9 100644 --- a/download_tflite_models.sh +++ b/download_tflite_models.sh @@ -0,0 +1,12 @@ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/image_encoder_hiera_t.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/prompt_encoder_hiera_t.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mask_decoder_hiera_t.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mlp_hiera_t.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_encoder_hiera_t.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_attention_hiera_t.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/image_encoder_hiera_t_512.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/prompt_encoder_hiera_t_512.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mask_decoder_hiera_t_512.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/mlp_hiera_t_512.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_encoder_hiera_t_512.tflite -P ./model/ +wget https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/memory_attention_hiera_t_512.tflite -P ./model/ \ No newline at end of file From 6bb6b1cca798acaecd6a50645415cd01affdf72f Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Wed, 4 Dec 2024 11:01:07 +0900 Subject: [PATCH 76/79] Update checkpoint information --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index 9a2fca471..d6d8848fd 100644 --- a/README.md +++ b/README.md @@ -112,3 +112,13 @@ The memory attention in tflite does not support dynamic shapes, so num_maskmem a ## Original document - [README_ORIGINAL.md](README_ORIGINAL.md) + +## Tags + +### 4dim matmul + +main + +### 6dim matmul + +https://github.com/axinc-ai/segment-anything-2/tree/f36169e87ec302c75279fadc60cda1c3763165eb From 70b93fae13e630646b1c63d3226b9c4bb0eda6a4 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Sun, 8 Dec 2024 13:16:06 +0900 Subject: [PATCH 77/79] Fix duplicated post process for torch --- sam2/sam2_image_predictor.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/sam2/sam2_image_predictor.py b/sam2/sam2_image_predictor.py index de3022fac..071dca649 100644 --- a/sam2/sam2_image_predictor.py +++ b/sam2/sam2_image_predictor.py @@ -744,7 +744,7 @@ def _predict( if not import_from_onnx and not import_from_tflite: self.model.sam_mask_decoder.forward = self.model.sam_mask_decoder.forward_normal - masks, iou_pred, sam_tokens_out, object_score_logits = self.model.sam_mask_decoder( + low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder( image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0), image_pe=dense_pe, sparse_prompt_embeddings=sparse_embeddings, @@ -754,7 +754,6 @@ def _predict( high_res_features1=high_res_features[0], high_res_features2=high_res_features[1], ) - low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder.forward_postprocess(masks, iou_pred, sam_tokens_out, object_score_logits, multimask_output) # Upscale the masks to the original image resolution masks = self._transforms.postprocess_masks( From 7e6f9056a69af6a8e8c1d3366a4af65dd1565a13 Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 19 Dec 2024 11:05:09 +0900 Subject: [PATCH 78/79] Fix prompt encoder mismatch --- sam2/modeling/sam/prompt_encoder.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/sam2/modeling/sam/prompt_encoder.py b/sam2/modeling/sam/prompt_encoder.py index d62c9bdf8..49c45b7af 100644 --- a/sam2/modeling/sam/prompt_encoder.py +++ b/sam2/modeling/sam/prompt_encoder.py @@ -106,15 +106,21 @@ def _embed_points( #point_embedding[labels == 3] += self.point_embeddings[3].weight # こっちだと、tfliteでも動く + + # Create the index mask for each label labels = labels.int() - table = torch.zeros((5, self.point_embeddings[0].weight.shape[1])) - table[0] = self.not_a_point_embed.weight - table[1] = self.point_embeddings[0].weight - table[2] = self.point_embeddings[1].weight - table[3] = self.point_embeddings[2].weight - table[4] = self.point_embeddings[3].weight - for i in range(labels.shape[0]): - point_embedding[i] = point_embedding[i] + table[labels[i] + 1] + mask_neg1 = (labels == -1).unsqueeze(-1).expand_as(point_embedding) + mask_0 = (labels == 0).unsqueeze(-1).expand_as(point_embedding) + mask_1 = (labels == 1).unsqueeze(-1).expand_as(point_embedding) + mask_2 = (labels == 2).unsqueeze(-1).expand_as(point_embedding) + mask_3 = (labels == 3).unsqueeze(-1).expand_as(point_embedding) + + # Apply the weights according to the mask + point_embedding = torch.where(mask_neg1, self.not_a_point_embed.weight.expand_as(point_embedding), point_embedding) + point_embedding = torch.where(mask_0, point_embedding + self.point_embeddings[0].weight.expand_as(point_embedding), point_embedding) + point_embedding = torch.where(mask_1, point_embedding + self.point_embeddings[1].weight.expand_as(point_embedding), point_embedding) + point_embedding = torch.where(mask_2, point_embedding + self.point_embeddings[2].weight.expand_as(point_embedding), point_embedding) + point_embedding = torch.where(mask_3, point_embedding + self.point_embeddings[3].weight.expand_as(point_embedding), point_embedding) return point_embedding From b898bd6a4295059f5ea26d618eb649b518baec5e Mon Sep 17 00:00:00 2001 From: Kazuki Kyakuno Date: Thu, 19 Dec 2024 11:13:03 +0900 Subject: [PATCH 79/79] Update release note --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index d6d8848fd..46f67e871 100644 --- a/README.md +++ b/README.md @@ -93,6 +93,8 @@ You can also download it from the following. - https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.onnx (6dim matmul, batch = N) - https://storage.googleapis.com/ailia-models/segment-anything-2/memory_attention_hiera_t.opt.onnx (4dim matmul, batch = 1) +(The model of the Prompt Encoder was replaced on 2024/12/19 due to a problem found in the Prompt Encoder.) + ### TFLITE - https://storage.googleapis.com/ailia-models-tflite/segment-anything-2/image_encoder_hiera_t.tflite @@ -104,6 +106,8 @@ You can also download it from the following. The memory attention in tflite does not support dynamic shapes, so num_maskmem and max_obj_ptrs_in_encoder need to be fixed to 1. +(The model of the Prompt Encoder was replaced on 2024/12/19 due to a problem found in the Prompt Encoder.) + ## Inference Example - [ailia-models](https://github.com/axinc-ai/ailia-models/tree/master/image_segmentation/segment-anything-2)

< zg1QpT1W_Dx&)^?N5Oim^3$}cgu}aQY1>H`<@0hYk05I}h)H7Rrw5{^WL668sCE1D+cJSSReG!+*a%!2dms8X}L=BVilC8;C^*1`8FJR6%glBU(wY4mfUl1@`zAaZRMQ#@J#J#OVNU zG+Fq__q3rZH7eRHst9~U^D0^oGW!I~Za{t^&FKLfCk$uv32tK^GbYJcz{>iG^A9c? zcI3Ma9Pc9STlgx9C%Q^U2_wD!5$?j?-V4tl_Hug>FlsOhGFC}TFQG8QbI{jv9oc7Q zlvgL5EP7o#*aY#1TarN`?CN;4hk&h11WagaDr)09izV1T_;4qz6vs#&HkFJFJt_2w zN7U-IdUU=$uBoOx!&4hbmJ)Z(6E?H_Mr_^@a90Uf9KpnC%sIOVP2e-2rT+j6q7rc; zcf-GHn-qbU?Q=74$sCCIT=_OJ%Y0THuV};M;%)wlOR5Qc4}encP9-%GWoX zKM-#I(UI(1_zFf~YvqNmcO|ZN7dhRSt#j0;Du6KR&ZC|D}x}N_r@Ljxrw;! zjpo?4ZBs00wZKSzn?PeROT&?;Xm>fMz*jf|mA0m;b6dJO2OWTJ0GBo3ee#Th=8djt zLjM2{f>}ItK#}FJx;K3$I3IaMu$M*&-U6;^0^nb;TIb$C`KC-WN~CLn2B)aiPT})< zXrqmu&C@rq$mfr9iCBGOqHXvN@W~MW0BoUP?;Qkf-riQ>kZy_t+ed?dxAqz)`@wfA91HsQ8$4(+v; z4B@UJ%zIiqt6~SM1|#x8!2)T8__Vb#*k78#DE|PPC25mEFCtcx=;jZ(z!XCyibI96 zG0xu5?8jKzJRw&{6TPSe4|wV*GEnt8W|?2js-&lz&WWG2iTs;6v9u1|>o}QdUa9@jSuv)YYy1u1!ZSYvtenBv3wCfTIR6?_dPrL{nAacpvt z=K+A8i-z^7GMQbeL3YnVCb#n`WoZr_A~|?&9a*l&tI^Kv4`oF=y&9Nb2|Y!_3%d(R z3$fP3<7u%|9pYhv-+Phna(@hwcJqef4Z&0qm2^;47dBX%ByRFv0Ucud2;4?tcArl% z0m7@72I%D+Y#ohxaDEsA25!j1ZdjZ@% z$iT@CjD^evI|pLvi!70Y$a@Xy%TY~E+)r~G=eRU`%G=>})+D5C%q)?tk*8~31DbpU zBLT+F=eF^TU0H>RqMCLP>^3`E@Ei|VAq_8ew6{AGB9@umG_?~`cx@AdeoHOTH3+|@ z)@#@|GNXf5!TscIf9Zs#06F`SPGxmP1w{H)ONb^~7Z}>^koa6(3$bi<+p#}R(>uip z;n!7B)-(*IWN(5~=P{x2{SrEnF;Vx9p1{`7Og|kf;U~4uWbPO3!krk@$G19IcR-eV z_ydDcfXBf<(r?f8EAUU;_M;((h;PJ=-`VCrXs%At3CYez4xpVwR#@Q4pbk4RF~`*9 z$mz9aj&roj4v&d(_gs3`2K_GRV=sZI{yG9nW{C&i};#NC3GU(+Ihfpzx1lw7)MCNw@Iz( z`hhKiVT^L&edDN%^gbyAUf*(irvrILU;=|)(;}bYO(@f5eoTs@lv43~8t~i_Uik*R z?M3`s_S}xau_CNf)x!8^iIXwgTJRj+yB*_k+7v@-U!c{)Nj$R2K~P&2B=VCZpq<>B zSzc~SAF?OAFnOl;7Lb&yrlG9%u(Uj8$(eZOW4$KG!qW8tI>1tjJUr_+CER#GYXh{M1Mi(@PZMry;RDXCe zQ_>3f*@{{R3Z~dy&W*WE5dP94a@%kbam?AFj%E<%O4hxO8`|IqDFj78ew*G=HXmO)0WBk?rsF#$CJVfP#Z6DBG;^pbBB(#v5*a8tTHH4hZY-&!62kFzNXN0j^f{z`syjlKM`>^}R|iW^ijq(q{|((9pM1@D3QFX;|&J$D%ux;cI(j-G$s!yEnV z9;1>lVf8}RI*xK|W2s|0+c+^1?{iTxI>iItOhZVkco#7+Oge89gZ{)+Qnl=4jm@rW zTn|Vh;Ss4whX-GmmrbqUvPAa(04MBQbg~VNaO)<^4|H>Kh-3W{dAYapPK!nk8F_&PtHqqZ3d2OUPY&9_g{M_i_W}OHoq(unSl}Wjf9d@$!xG*4ec$%3FtT zxe=^0Nzyw6Pj^EceZfbu{Py;9y}+Ymb_zM2(Hp%=I#+PxmQO|rPw1-3Q7&wA>1CTw zjFr71;l<$l!s)CHh8lsBU560p4zS6UL{YH1kqPXv4ZaVC+omy2RH27)* z4-5YGGo4%fnMJ1*39>I;$S#y#M2oL`CTGN zYM{8D31cIrg>*-FbI)nPGBGLHT{1}$i4Jv>fXlY56PhH@)Nt|eiEXcNgq%6K=VXnm z=X(`hD@Npq>Ps9$fg^ImomW05HZz1Btrz#J?Sc~8o{O&hQ(P?IfP18mEWunAJDe}t zros9ylT(LLfwNP;6HY`CXEDQTTsIJ#jm047@M99wlS?h`=SDJ+{{V>FfbQJ$PL4Xr zhm4J~bQT*E_kZH3-E&WB=Lox{YD%U-&Xx(7Hs1y@`XsH#RZT2>NZ!W|mgn+9xdg4! zBCmkOGYQ_ugJ;I^vFvuxLgf~lSx(*^Lt9YL1*dDUyp7yO9E-a_vN6>c8DDL-Sz*s} z#?%UzQ8gBy7Zzx0IBm`EVGrSr?sHm8tT)&d*!4*FzPY6G#rw7QlRTdj!bNdpcbk&|Lv|&3z7?F*lkIJ=`cf&#DVfQ9Tms4t* zNOR$H>nVXG(>vQlI%0c?G6Cot9)wL5d{vQSiTH)cBV(FI*14xJ;k*?0YBb|4B#xi* z5OdR0)Ks>nk^cY@i{I;eTU=&{VaKapBZN`bYE} zPL1D+IU3x@onzRac(+F+-KEWAf9EXkl64In09rN>KL}jox*k-K`L1V-^?-&N#Ve-x zPJJ}~OfuP+#@oi`oHFw>sfD%7w6M1qlOX23zH8c> zO-9K1+tc2`ZGi-Xl-AI{1yrVJF=1svJmw!lTMJvFOoroR=p0(>j)lERYno}KrE%MYm&SVHal@YSELB#SEQn~~ zumXmwt1UJgJ!_(B*!iGS?GDF~XkdSk^@@*vx zQ=!MHT0+;yGYVBSq- zF=eF#bK!g1=D)GG2XZ-*(=gtnjS&9;5+|{!nWnI_a!QU@^ryrf1oV@)^76+Z{4NCi zipiV={j6lG}*OrF!VcQ z);^JGH`;EAK@4go>0N@RntofKOn0`akAcJKw#b<2V2d(SCuI&j?1WdI~~)N+f7m^&YeGCIV8sllyyyI#UO9@3oq z4aj~JSN0HpcHh*1V2lOaAuW{gx02jGY?2Sr#f5S7(w;?3@qc$CevFNB0gi|ri#!sQ zAk~@drlptrNF-_cOq&nLC2@PxVTK(jej;aezV0A^D}q)_ES4yBxq$eMQIQWRl(dYD z{W+a0g}Q?5`bIc#e06Yt?=i>wA|dNQaE@}&Vur2n8+01RQ)_LSI(NCTMX1F2cxFSe zO%Wy0UvsWok-e0=J|0-xBz1KUW59DuWw;>p8!4DnOHN=boHKQY(b7msbhS?Z0P-du zvgVJH8f`i4ACuH;>7Bard_;XlI)MKGlY#9_40A(mh=JF=Wx|~8V;$DUl@(K{>soLQ ztckjWV{Ft+aFvZ7Q|a7Go!VnP9Ng1r^=fMPgtXH#Rvo3gTJ7y~NDgTm!+U0`) zLvH9qZrv;{bTW;b9~lRCQjMQc)pWLpz0cJt8hQBxT@!X(27o66pbjUTq$=bt*m6Q1 zg%`JcDy>si!CwO>f=0<3#oFeZ{{Yx}LSUK9-k|Fts?aJQs8BGzjrVnZ8+@Lcs%HN4 zX_bST{kwL_(pAA`V?d)@`c#P?d z_i5-#)Af3RIn#9z*48j%j!4=>HE`_W$Fw!zgN`RV5Zjn2#~}4xJZcp7Ym8IG;Vm@n z<-;p>ICO)t?KwDPPzO6=6IGyQr$ap}$!L|f4sK3khHZOJ=QY3(dlIyLS=yWuiI^g* zrj_*dagQcOc|-6#bJ^JBW+Vi6cC~4>3Q1||#v1wwO)X=3Tsabqwo1-@`bkH;D1C50 zN7X5H*vq&U_)STz-wVj$PVMe6NuOg}RJf6^XmB?KTph$9k(&D?WD!=vNS2Cb&TKAU z2Fi%?+~}{DNEstzkUfofH*0`!BX}a~v&Pr7f-iAn+78qXj`JK6(K*g@sAhFHKbB*# zk`jOPp4Q)`r6UQaytEZn6fVBf*k8;irtKp?a+Pzg zETJ@fG+pD@X@DWKic036Chud}CLFToFkD|EP!jGoQ61iQ1NfGmpX95h5&r;TJ*JA; z4rs*dDOn1Pm7<%xvB=xBS5U~u*mFziWNsUPd01r1_qcCjB;ae0N{gc$4O3Ky7dA)5 zR>tleMqrZNZ;X?k^&>08K~a-QZF-wL=Ax<=&-qeD;jn;hi+YoRjw&}o4uT4*7ZODs zT+v5yW5ac&jQtm*9U%w`Trp~zoxhpzvmZIpxrgMyk&CK{wKVfWa}vh*&GvJeMt<}j zi927UP&vnB5w!KNVLvd5GN}C^;G=wFAj+!7H?RrbN3A#sPvQ|dxMYF;b{tLjtUa_9 z0`~*3yAR0O1p2Dm@Zp)(^jaH#ejE5yW~Wf*o*UD zaZwgc(>z(EBWYwjR$HWc)GY+>ITNtiO3=FVGpu^iYO9iVH{=tekp~o{8x+>arVHpW zNn{0lq|{h$Ye!y5!a{I$?AA@w}V(MVEr&4wdv8 z5ra(A;{Dw$4e;O>P78KS8&hKC1fjJuEuj?ZNZDV5PnYposxMJ~3L2+>f?M>;KB`a3 z2L2$TwO9WDqvvm==wnrV3K5e?8A{>iw0-Q9e|Vl5qse{k1Jn^!GG$K7^6DXJ7TN|Q z_8@!Fh1zS{;c<>3Zn+v364DYHh{Z=B{{Ren(3DTc`Q`Mck+3u`I7sK`{Pd=U3@UQ zQ^zeeOX1?i$ADaLi}S5#0@b$G>scRxyhu`@&W zvA5jdr=O=({{SYU3ETLZ0r#;gN_xnTKA5<4vDks|WLBAEh;D@>F@`o~$zg2TTYs5C z)o2F`uM>93SV2{3bka25Fz%HRSD{0U@SJ1wvmavhucCp+ zCq2I~ho4Jphc^R{;ub#pl;ZM#qwBHMU4eAU^RWX-snJXXd_c*6btTyU0GLKLKo-Q| z;yCR>RcUXP+*lp(^8u6EIj-z`G7cSij+Aybor{=?KocsafQaj3;JaXzmo)F~2Wxtq zJSe6xxMFDz2Y4%3af>(y1GEuved$9@F~1co&Zg;OKy*j$5&-Zbp!UXAn#8sZL4acv z@9k^LencB3yHt{y<+W6j!qNQJkt|;J*}~nePXJdAHv%5UxE-OuA6mnrp@djL2px?8 ze3k`UCx}S{+apfLG=e?jB$tU~a#L&{7G`TMO+1gLe&;loF!ni)HY2LVCACt^AcCN9 zr#mmmmflkxzWc_|h!oNt%Tj5O)JK9~9Nv!I$D|Ru%5#%ieL1hOlE<+Z9V`(JV*oajAIt{H2)Hv-hh3g@M(($3Z`7y`npUt8d^X&6 zAzvhNWr-z>_@?n8lZe-OLCb8SlYRZ_PZ(mSOrz8z!7g^qju}0U-1a)xX&swYql;3a zVH3Pp#t$-9fXa+L5;VID?Uy;Ebt2%SYe*zzpM?^}F@qow2!g6Ni{PnkwueCbj=NlZ z*Cy2Iqi~7k(LL9|Bw=8GKm_kS8&jv0hL99yq++Rw@lMe8I@bhz$3Yv2%oGA^WiNgk z_4re%H1*8P@j&1Bk3fEH$O;0Wn<-_Ch-b+Ah{mm(_Jkpjn zs!JnU(=HlnqK(JNlN4_EJ)tp`Fv@&fvB~B*JJh-WJ3u{a6H!vnd~ifhP5=*p+Tl<+ zt`}XBIHeYmjCdQI(Cc+yVhqvLKyL+9n0LoFjBURQt{&6T&OJ-7t(pPjspKu+Bwjbd zSS4r|!pb{4oh)Kf%l8Cen2>&g>}?TO*U22<-_nQP2_WLkQUD+~}My{h4lEma-a$Tcp&QcoN)>RBi>5 zGtOgxaQ7!U#mM9flj5QPQ%>U1e-|~MbC!7Q9I(wFsiM>s8 zLUtO+N3^Gxr{mb>7sS5iZV7B+*p4`!1zI1k=H7*22Kr1Hy#|2)G+L1%@N8vq>NeK z77n%3Sm?nu6!S%GrJzjTd}jXkjFBK&E>|M}wp3Hp3{c9+M@vW`dzL4prm8+NCOfv) zaOfez&Ihy*Qstt8Swx^1WE&)an4NBeV`mPK;BK_W>JWe`cL}u=AZmD|$hEC)`B7wZ_e>{{Yo#B>v8&%_qS4 zi2Wr^cBIkI(&`US4rnCzUF^DKu6Cy1B82tJH`xGKl4|jHHj)>Q!&PE_oGl+UuZ?4y zd1>n4^WiSHS%|z3Qp8o1(;G)1KZM<<+~5iggQ-=-M;A=0!f$s#_$RmB5;fl?uj4c= z1js62w%#=~?6-#jZ_N_M;MbHh)$)+z8+(oB3+O*{PAV<+4lb=Nho z7+f0Qc835#@EnlyX_{oW$45%+TTca-J!Alw7+jX=6l|I_ak~{IOCTfo2syGnXgf5l+o1FOxYulVg!1RXx@wBwZkI5d#Y%fw{|OM)pQnO zjqVSrplNmqJvYWN03J5sxE%^>yM6DZ*6CX%JcY6hrGe+h!tRF;_$>X&SGM#sO>2Nu zUZJJ3d4496XV6uT=R^hxdIa%X46W<4yHg~sbM}>yHTRUc%D6y!Eo{qDbn5txEn`L7 zw8K)&tt1aEZZE{mtvM7?+J@BVX|+mz4EWn)jyWT7WRXZSYuM(HK?gm#p)`F~pid^1 zs5m;d4BH^)$5&6}&lo!n=^AZk9f?h{G)bs=FWuc$k~f*A>Js3bA7@8WZ-Pep5ll`O z0nHDw8&t6=Rg`qvg*24aEaF8gOJ3cV1pwyBe2S$;Xq8hj^J!JC2C9%#@dl{TE9+3EpbKXJCAS$95>S2-zTaGyz zSoZX^jff2a@&QJbsefS{C<@%@@JYO)hrLo-oShcLlOsB$~s5-71XDw7Zx9rl2jWZ zxKUbzi#6>X+l+dP4~971Z_!eZa*NfCwLKW`GgPzbQ@`~j1W4mzP!T-;02i+tgfw>l z0G~#`Vd_!Tgq>qkzUEN@)q+FoB_k5|=7G`;sirqJb!}-EJGjT~x-I9Nqx2VI(;;*7 z4w|LM@6Pg^3lppNiV8U7iX$8Va~$q#99NLld)LJ#I!2ja<;ZT<{Vyp6tIJbYsMT?$ zE{U}=;cuGnD>REn!C)5Dqbpy&9dVYS*ynkW_VP!XHU-0GoQ>7QPQ}(n=NqfSJIslo zc4bRx=f}s^6lE(K)gh<*W+<~-bf#M5);SEv?kS9Vktc_ss$Y7UI?*k9250#BI?)QL zj2PS+Hj)7Fn=hj`n84l54QqOu&==0M#__cpIPaV1iMQ)}SJ2C|P7}Eq!+4hAam15} zG9956)d$u%2-rGJ{Yo=gb*Nd}Qp4^D`j=g;>`{93m|~`HNs!yC0xWga7c;*xMnA+a z@HRIOwFBOFUYNK1T5<25G+;bRxc>mdjpxGmG!Kv{4)Es~9W&Dp`PLt?WVEe+i=da% z5#)r*o|_AgCLBLSpeAb1CYuKLIx+e|Cb-@r(Y4NFJPvre0@UoIZ$slg=3IQj5J)4S zs+#`Q@?h<;5wR0nAZy)jR8YCyM_~0Kp4$ot0rG>$>1#Q}_2R;ThN{4fMA76mt@i*C zRA%Ij{3ai9Bke&|IJw6CAv%Uun&?^SQ@-8@xAmZ4HnAoK)Uw>^rH%;m+i)-L1lB7tH)D4(ySDAl={ngHQO46_XWV;= zy{@=~ZpX@yqWax~o5!P;FU1Up54fqlKkGM0tmd}Ahxlh0e@iv-rOa8q9I*M{{RtDBYY}TPVaws zwD$(%ilKw|t+DWyshWwa=ud<f5;m!9u63y#d{K#eS7~St~Ca*C~^_D;LbP_l5 zLh&c)y2VLS_usqqU-`oWy&0o?Xu@WxKZ=>Wx@kV?Ysb3au6%Vd+Wr~ncym8$!DcGz zO11|>Q53B^3nXj3M(a+qn`dEm+>MW|2Zz#XsA;tb#~ql7Yi#X~Z);BE8=0-?j`1ya zp-$b!%@eV`g2NrU)WdiUjkf3?FtX{RXvuUSG&;ozjgOi`O^Vr0$FkSiwYJ&_&d9jt zhCmE*HQ?+mHw7i~GErwWu5Y5@a8zwxfgRm0pEQ&c5*^~ti8kD0z7c(w2?XXg4eOj> zHW0k)JI7n4Ny66P3}x)fxUo7ncDc4VZ`P&hYU%4DY_PmY*9(F($pBHDo#8FcbFu_` z*8qs_RL2g|ND6Z*2PTqZsWmh&6w(q*k>+5U{*!Obcj9lkGMib78CeFP!$mu+0C^6H z?3jMPG=x%su&i2Es7GJgILh~ zTrQVVp&KJ}zc&EE8`TV7@<1vNg<;s|%6`&#B?&f)Zn_mRUKizv4`7 z6+5{=IS~~+OdQYx6_kBN2z&Un?~;n5_u@rt54PzeBMZk`%{I4PO7j+}NMdVl1zaG$ zo=oy23%!QhaSH>L)R}B;H!7N#V;MH*+^H-xSJJTJ__#ipA0=I_XJ=1hDiAj zY2GZ6L#@jk-wUb4DW@jUNprxe?TDR@>0!(%xJWZkf@q5>B5bj{BF77df1Sl{9X!+F z6!mk&#^u74{6u!>=kvhodh2DihUC_|=*VEyRl?j-!^Lj*Kye@L-Dh|O%Ip2GV}O@J??RwDB08~T zd<9I0CS(;iAd4h8YIM?<(^Z;?tN^5)z&HW>x$TeczUZtMT5Jp{7;0@z zl`=joPL6gSd@sZ#Z*JTrr*oP}PQ>?QN%{SycBe?9t!NW4;$a1nF~yRw{s|k-Z-A;P zYiXD`6&zv>z^H3nY&HyF2W;ANx&v{*c}`3bH=$v(R7SzYrIP#jPHf-w56Nk@Q65(k zJb^q&-rHZnWXP(mA5N`yDQVgt``F8)oBPHP?|D`&HhORy6?D#MJ1Sb)&AKrT8~Ss^ zh>BPxCxtXp5w)#wE*)*!JuRIb*d7lNiLs{_oaU}N3UX=2N+@s#jZT`NJd2opQS z7@SjT5zD7ea&HpOr;*-_bq%u|ZJmf|w&ZYrTW`Gws?$-{!075CY^`_zwZITZuc(U< zyyUgIhjg8b;vZF|VzwjWDrj1LCuZp&={CD^z0%3ObL6v7^A`|+rq{U9wovv|M(_(r zBMAqffQe;%6OL)ZeG;dfE8DUoSka`Cq4nBy#)iKSIrha*P}cbs5nl^|zD~9=xTNWQ z;5Aadg0?S&)FI`-;%zH;O9RV5k~7jX8w!!FscE=r>`>a~oHa<>fR?+;qTgl914Z2i@ zfv*1m6sCJ-92&}L3>9#Vjgh`ne*?vw)4d*Dh$zX-BKx=AihXg8hDJJ;z7B0oEQ1T! z;k37%Oj5>Uvzi2dTlFPX`NS{1SEar}t;PhgWN?^38W~R=5p&!H=FJwY1bXD)(*m}g4VVAZH% ztA~>D&S~XF@ylbz=QY;^CzE9hGmm%mxT;%~2Bjc^rMRrhdbfKyz_oyw3=(YTfZ}sg zB;rmB0JW+a>LZG-gnoXF+*)7pb8ixz{VbWH*7Sfoks5tuTAdtqwD_<-;I+cejpLUa z#^;mope)>LET&AQG7g2KIqwg#vVGPu1Ula4Q zoi!=M!_~u);G179sQhXpy^|V9w7Sus!)la{c6RY{lN0Vpwi`ah1e7HUHW8>$&2Cr3 zQn%g-lpUxH)aiSfC_kJ};`@E|(4?O+773cVRJ48;SZj zl!?s;s56U0TMe!qPcXMhz-*8ZT!Yq+U$d3cv|+6^F%5PCihXgzy2elf8OQ)0Ql0W5>Oh`!yl$Na=tz=J26ElDF)0MW_(hvEBj6|<%F%ot(yWB2-(Qw70P~tWp4kRbF*q(l9 zokZZGs($ec?d~WlLfG^eTBgdazI!yK^GJ6Wy!+fzv9Z#K@6K@+2zOP! zN7=V)NdvdHHM#+gxwy9yM}c%#NORk)W8rLeT8(OPn^zbTn>4IJ@FS=l<6GfC#EYT! zfhnA+Cdn?C$qF5&$&GSEc##7u{A>K;iMj~8p6;@UG?#W7X4j}1c5#}0*{^#3QC=Il zZO7gUf5ORM;SI3$uX0Vw1-rR)*k5F&*5s2jW_`+u*B^PJZ$xDK+<#J&Hc=YhRkrAf zpFBktlc*S7ido;d7RC=w5psTL9d+Eu2c=a`KTIF79S%@mdz|;5yl#Ni^K~!%?ho2o z%Cb9Bb|M`%^ON zWD*%irzCL>(R;Z2uCVjPu3>J-p>aR@j<5sVxZ?Lt&VcD8J|1X3wVHEzs^l9B*%8w?EUJm(*g2&f&XG<-v!z0Gg9 z5~C~L;`Xxn3z6YcHOB89L)KJ#fR<8Sp6_s4c-77G^hWKzTm>K;&*QeCWxD?sExg`63|KHR?WWT7<-O zOKBVWODGQ*NgPg%y+LD~Hx$Sm*;*XeWo5h#JJ!i42cN>VaM7CH7KMk5Sy=$N z#8a@b%xsN05a0+Kl-9syn!@H-3PHa7!7ocn%D{Q7}kciT|&reoF zHDUA>ZOQP?3q}6`3;8YwaSBzgE>^2cp{!;Kig@J~Bb+lA=sQxGqq3QSwAD2c2J%Ck zC2K&~EhWu|Mb!aO#BYmoU~4z1nu`AL>8x{{+I>nJ+=#qQKpV~;b5B+!%{#nwdb1p9 zZ$!g9nBeMt=%RHvW;XW8-^pVJMFw#>Fb6ohIc9`9R z<*_bk4-X7=m5q4v_>DgHxr7uzWNdBScN9HLxN~b&XC?|wTCR$24R{5PaBMYg=EU>4 zl8@!Pw&kT0#BTC8L_>U46wEtt6P&DW=9@Tl zl>sYG*Mo@=Pd4akD#}`>c&Py+l*7%DzH<*aA|0_fkPfiC;<6j~Tsn+qI;fd6Zrt&( z?k(tW+(I%?913MLWuPUgfv3|oO*X=sQCk51O-TY~j)>>Z{f&6HYAPD|aR}xQrjp=b znYe#TkaM`&ivX<48zpIEjSp$s4q-A;n%*2ElZc?jB}F6eM=ja^09Tt4VDn$jb9#-! z+14PZHminMKg4@+Mg8M(0pS4vL1rVG%!dPTg+){{ut^ix_MMJl#0p!STu>bXOJ60Y zQ^z|nOvb&3Yg_>zFu2pPF|NrHEF1WL220Y&=-d0Zc-XuF+~)n)CNfE)%1BF(ry0b< z_f8l5lP;~wLM39ThHgXJPWIl;9c?*}2wKDj^c8(AG}e2@2^*UwVJwk{rLJii_1tzK zAFI}BAe!6NQkMYQrdx{*{{ZVO1C#zMe()Pmcuc;sHe3UrN*Y~4R~t3Rb5_chhsS4Q z7X&+RZPuZPJ2wO$6?HRf33Tlkwzcf$NYm0D-2TPh9;*ep?XHgCVSS>W-f$QlZqN!L zkzwjgK@w_Vfw-oMrT{jV!ty=r#1pA!qNx{xrWv|;Zw?^#i`{nUD!m_1sd0IWg(1vM z0pQFC*z9bz$oyD`Vgg8(wPn(C%wbjaHO$TJj>Sjk~~pZI;_b!(51`*R;u` z5lJKIWH<3uV8HCRjvx&=<&%mz4k;ruaN8meN*-EDDcy*{8(rzJaNqQqVwr`fHjqKs zLAWa6Kn(g?#@tfZJ-&-4Yi4D;TyV+yYo8UWQ=3gi#skWxkQWit!jLeGwgBf{6e9~w zWsm{4U(-WDozqdf%56*%$A_~P64TpbWKW}fT+-^ZTV@dD4qcgd=)ZfdwDV!uUkYQChi-Xhr<+@qd!C?x~w?uNtRKH;qAJgadEcU z?;N^(c(piM=tur;ls&+9%LBR09j$Z4k&#r=*G49!o;l-o$1vP{lvAd6{i+jDin~FT z`ks!=O*z$2FyM+-?Z*p8HIe~yX0!(ZYbEX-vV3nGsNN1?&EB0=Uma5mO9M&XKkQR`{HI#+m!GIF?$Lc+8yzN^ z=i;iQ8M>1e*{RNa&GInX+d`Ixsw#+CG!h9Tc*NJYVFxktNSHad85Wy|1DGK0P%zdz zy4t71Il;PJbeDv@i&D<*vFB?s_?Xv%U%v!9GS#p*p_>Y5u|Fr#puMcN5H6vm};Od4tz5=l+$g;YfkWrQ1vpmOz$-# z+s$UJ1kg;;d@iV_V02I0U`%ARoUdVXl9b}*u>wle=_1tVDrn_vTqcRlX+BHxEOqx` za0FP4(mS!KOy;KN7^rDqcd2?~5Hb zb~QX8m$Du!EH7t9_Pl)Bj(gTBq4>HQnsfZ&~OYkE?z1}ms^`D1@2IFuAlz9I0?fNgD!vbwYHrB(j`Xa3t(G`9hy(SKmiQs${6 ze}+{9zCZSwh*UOYKx;3&+ED4#BR=S*D6826tG@MGOGe*|bNmc~Qc?}cAq4@ZKNWto z{{Z*KM}O?#j9b8tdu~*Il0Wemov+mDulGc4`w(X8&$gLZdLWSekQ&s#58axu92gs? zI?g>j9i9|cwI&@Zv++b&I^IJ^%&5Nn96i<4ZIzh|M^mK2N4NyzQheqOzj+&`3)MLE zT?GiY(oz~jL43!weD1nEA&1`;F3_tsF3ppyxXp?z)~7^^p!VN%p&~_58%j@NMl?BX zu1F5eZLUZ`u75D09R7JvbGfAm=Vx@_Jr~vUSp8yGovfyPyDsM4dog=2BlUA0DQn~- zV_*f+#=!w!b1t4bYoPxCX+)|8)CRj$ z2We$F0Qu4j`jV>MYJ(@E1%d5zhw565{7TxKxgsWh(~qoskots<50fK=aM{*yxx?*D zLc-&BNJ%2r9ZU-pCe&?q`5z>w?>O7#WV(m^E}13dZYIL=;t!r03*(oty9>WbZL!kFcdrPly!pup`rR_Jo-@U#kbWRB50Z^> z!c~<|h+Qy%0vbAuJCj>h*KSICh?bSYv&CUuYtrLkm~p`^%uaKfcPte6*nGcN-2VX8 z{XbR0-Xo9^@zr;SM>eXZZmSpXX}Prnt|xN7>ydO_=F;mqoMBBa_$TpF4+GOgj=EKD zm8a@s4OKjHSY+#2np?+%#3Q72)|o5$xup_kRZ!8^8_f$`eKF0D zd9Q9^4<#n0Nu3p0nSg&!RB!ZV4a73s>!)#F(zs=RN_hmssYfKj8itx_jqJ%bKHG3O zWb1F(q+)G?0(c>;Zw{h1%HD?y+q85Q7pl)$-OzptuA_$A;f8lMXq&pj8ware~ zxB!tf*i=7vyT_<%u;Qo?-ouc>-M@Jj>0bBPpK>NL1$;s5Sph5v)Aw~t^A-dhpym=W zIcd45GA5u7JW5Fo47+a_@lHRE8{l>KkOR9?=^ zif)svRn@%3FARo@;tjKpU5&lKASCRmX*&|QjxGZ0a!BR@hQR{`RJSfpB7uBnbHBxIDb z?}}!YeHU_F;3>4*d}2XRyTk;Tt(5bt_L%-nwgwx;k3n zpNyq{2Q9?5TY>bi*rgX*;tB#dccOwWZDbYNdw~r<5lryVPvd4L+qU*b(&|(dt-Uow zQ)WTH_Pb|NX>lkT?I%sFX=JpqbB=C|aG!m+gQ6`U^;rOGq-#=NUmIO(U;OniA5*Ss zdM9|9rb98FnfI`%ysuzx8#S*5uC{(ra`by#Qpt|^x_6DatVsLv9K;0K3xtk5zLHry zAF658+KhZUxSJ^QU2@m3?sFc}9MU&fs9J+xqf6rq_0(JEuzgHDIM^-4Z$q2T=~AB) zExAZ}D&@bE?o%dIb4O7VXQ!4qqISo|aSpDvadfXgtkz4Q_l}TIhB7+dC&WTqc%DH3 zI5b-et-Yiv*X%1ckg`nFJg>RF)rZLJ^GbZLw$&V!5QEj?o4dNJb&61a4tH?%2u8-a z+?uAh(gxw=Z3Rt7R9wcmI5-dpJ8lYG*lHtu60wp;BTF9F3xW>S5#3p26g3pZ#muCu zo}LnOaE+&$(s$w5GT4=^+b4whoI58VwcR2uT7r662R2soUA>Ol9Ny4Yn^Ie!ZSgNdt zDJx;FqKG8K=?X|Xxi>a<4cE#uW?DsoPpeJwfN&pDj+sU+c zsaU0c0h%O8hrL5+PyYbwe(-I#5{rNPxl&G%$9jeC%iR5EyiO-Sj*Y$n18kj1=q)s5gk>&xEFoFp^jX8{`f11;;cJPF+TmiuO7{*RM9q$^UEHe3{7zil6K=BmJye_VzDH7Bc(IGHT6^Iv?1@l0@jYM5!$@b zTJ7z^EqNPFtm;u>@$Qor98HtP2V3yFB`HJSTKr6ccw>GpZYGyT?_L!G&7o@ck**s- zPHPe9D=GOq*Qa@^_nSPBo(tU#qK9D_J!!Xv)^rUu2x4gpm^DwX8Q(wzSL)mCGP~pZ%C-2 zp{n+@u)nmn&iA(pMJ-&j#MiqBBwrz6>sm8N4%GReEk|{<*^i1cKu>NkJQ;e>H9gL& zj1vUV&Ct?JZ$1NcJl7J?RV1et$cK}kNYv>s#if{AB13Vv-rJ<{aO7?eQubKfa=JxT zEhM1vX`q)Qo7gY2rM4=fSQ^esBL%i!+rPGE3%cMYz=EjR>s2AAL-+%>qnJHagkClD-m^4<5Vci;Wv zbyZJypXoYv>eSh5uf6x`ziWTr0YD`)FALreY@FOYY@8e%fWMmnNdO}3zXtbTga1Dp z0s;aeA|m7$9{#@>A`%i3FnZI^#EaH*0(A@iyGEi*kfk%6z)r7x*I4HHxCmlw|+YAIiLqoy* zYp_r-aQ~hL3K|GQ$%(}!4vS5t9^z`o4TqDgAwk_=DA_Pz4xYcp{o4lsB0`hE^ua(; z0ib~Z7$D&91^^WX3bKIAMFB5E7Nne)@q@vK4Gbgiz-tuO>(P%7ER((qbcjMH1dx`Ndx69Gp>85Ia4}*FrRnHK4%@-U-p}WNg~-IkS>pDd59fhyy13g}CCT+sfQ=x* zl(rF1PRY8G0_2+Nb1l_@XgY`^%J>Bv=Z<$c!xLrlUs`nU+q_2g^EmQc~ z=cpIzT|8?98==Us7fMd6S#RN(EEE@Yf=!P=#rmjU@TL^)Tn3R*pOEm^sO2Dgae??- zqEf8P!`vdfDu&G&UZ7dHsqa&PC@L@GpO}M(%{Eo*M8d7)0=NwXqDBqJF_ zk#CvZ(+{1f0l!FQO3_qC5_uZlqQSF9hnW!dXwmMXD+T$w*arJaV=K7*-lRndB5aZ* z41_|t%C{BA;Y0TtCS@)6Vd7RO0e{3QX&*T`q&mj2M^g|JX7QNME{ zE+whSYR08RI}ns50ZVt3xbx91+4#;+ki~MWUAe_&l-~iJ|k_ z@DdKxKvj{`y*`Fps(-hXY1&d9mrb^e=k(?-gKWfIBySW!QAox4jc(6*QMgS8?DtYj zq`cOy^F|A?y;K^H{kH*$w;|D@-T5$2gSlsIBT3SQn_zQ51Vzx|w`{-TrM}$qg}V~u z%}}2fll58h=aJgt1Ry@c*gcd z$O;o5SK4Ed8an*o$3&WYLR#gA9A6j^E$b?$G^t+U+3xK4*xe`eyy^^w?{tf|xD-pW zQnR?8$Km!_;WjijRE(V0@bajISDfuU3DhAQqWn|kw#1Ms=-b=fWUI*f!tU{*{0#NL zZb_{^Xwh7Q+ARN8_qP`{OYX2S`Em>5>qR~d6PZ5DAQEXupaUHy{K6t>Q`Eya1`D=s zPfe}&yK9tak*?_NJCVPO>+Hzk(NBiM(D*KOiS_1lyMw3Pzkut)lQ=WhiIFmdYw_`S zxx}?6BX5mHQMEt^iLCTjG=AWtIYI^1Krns0x?3bfB01huFnOA8D=U6OJEm zR#X;Fc=`gR=0%{k5&GQ%v%<$8G_UEFD)Ntl$I%$dg`PsqPQ*rMk+4n zcI{HM9bde@oUg11JU`3!F>+T#DS+<)jhNS5eij%HAUyiLhBk!05zs z9Jodeg3M=^J2600-f_~}A%ag)&}0m`tJKD?usJRe;a|Y-pr=gVn2WYF*W`EXzW}Ts z$6cJ}YS1P1nwA-q*~rnMcsk1qDGUSOO4wjgx7oi-eMnKf(%jMpa^462!U~vwW$;z? zipl6oA0kw&NbnxWk!&yR+PhOOydH=93)qyL8y#h>DC|J>ciRY6n-KLEnzyy*e2C1i z#0wtp#MbPljUP_~4d`u!c%j(pw_g~i?|(D71mWhXgODvJD}ZOtChn!i9A$%go?qx5 z6tEI(G8wl)b%9@yJXiiGI_-bQUrdE~mhY`$s*I1rk{C;On(67HRqa5SSJo`6f*`!s(nc1Fb zT3i~>h>(jwp*3y$D9p+}19IQyXf->uRSw54;jiLqrR@W`TU=^k zywLVmE+!3e<>v2tiK%>)wS6d5#nD#n1WDjrY1ok_&@VVKGT!fCWi~qUx?CD-6$CT& zl7kICK_E*5%o8yRbbP5ORCjzS2u>f>)~d<^cn02=P%*u|{sol7X32u3Ym!5w3rW{0 z=d~oy6z!4oH7jyD@=qR6nzI(irjXxgFrB<n8; z(*;>k27!KW7vg_RGoyhhHGaP_*ekE2GmdlS`NsOQ^IsCaA8%`dX_wgge;ZNSR*&Pz zN5iFc7@Hd|7I5j%wd%-;5irv_1~+OH;OQVLF=L4lfDMzIHEFPpxx~oz6C*$fmDVP- z6;BrRzUfx`22#`bF^)ovT*?VoG)lj45d9rsQM!!@#DwL9|h}hNopbyQ*o&)|FWTzr}T#Bu%mpoC>rexBO zW73KeftzvILI}rjWGG=J|4xj|2%VIj+Fqu0trykK09UUWcu9;4vBW^cgp#U_%=)SE zR&~&Cx@}fzJV0_+j@