Skip to content

Commit

Permalink
Rename core module src.controlnet_aux to src.custom_controlnet_aux to…
Browse files Browse the repository at this point in the history
… avoid conflict with HuggingFace's controlnet_aux
  • Loading branch information
Fannovel16 committed Aug 17, 2024
1 parent 3f71617 commit c47ccab
Show file tree
Hide file tree
Showing 541 changed files with 282 additions and 281 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,7 @@ for o in history['outputs']:

# Testing workflow
https://github.com/Fannovel16/comfyui_controlnet_aux/blob/main/examples/ExecuteAll.png
Input image: https://github.com/Fannovel16/comfyui_controlnet_aux/blob/main/examples/comfyui-controlnet-aux-logo.png

# Q&A:
## Why some nodes doesn't appear after I installed this repo?
Expand Down
2 changes: 1 addition & 1 deletion __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from .hint_image_enchance import NODE_DISPLAY_NAME_MAPPINGS as HIE_NODE_DISPLAY_NAME_MAPPINGS
#Ref: https://github.com/comfyanonymous/ComfyUI/blob/76d53c4622fc06372975ed2a43ad345935b8a551/nodes.py#L17
sys.path.insert(0, str(Path(here, "src").resolve()))
for pkg_name in ["controlnet_aux", "custom_mmpkg"]:
for pkg_name in ["custom_controlnet_aux", "custom_mmpkg"]:
sys.path.append(str(Path(here, "src", pkg_name).resolve()))

#Enable CPU fallback for ops not being supported by MPS like upsample_bicubic2d.out
Expand Down
2 changes: 1 addition & 1 deletion dev_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@
import sys
sys.path.append(str(Path(here, "src")))

from controlnet_aux import *
from custom_controlnet_aux import *
Binary file added examples/comfyui-controlnet-aux-logo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion node_wrappers/anime_face_segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"

def execute(self, image, remove_background_using_abg=True, resolution=512, **kwargs):
from controlnet_aux.anime_face_segment import AnimeFaceSegmentor
from custom_controlnet_aux.anime_face_segment import AnimeFaceSegmentor

model = AnimeFaceSegmentor.from_pretrained().to(model_management.get_torch_device())
if remove_background_using_abg:
Expand Down
10 changes: 5 additions & 5 deletions node_wrappers/anyline.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def __init__(self):
self.device = model_management.get_torch_device()

def get_anyline(self, image, merge_with_lineart="lineart_standard", resolution=512, lineart_lower_bound=0, lineart_upper_bound=1, object_min_size=36, object_connectivity=1):
from controlnet_aux.teed import TEDDetector
from custom_controlnet_aux.teed import TEDDetector
from skimage import morphology
pbar = comfy.utils.ProgressBar(3)

Expand All @@ -53,14 +53,14 @@ def get_anyline(self, image, merge_with_lineart="lineart_standard", resolution=5

# Process the image with the lineart standard preprocessor
if merge_with_lineart == "lineart_standard":
from controlnet_aux.lineart_standard import LineartStandardDetector
from custom_controlnet_aux.lineart_standard import LineartStandardDetector
lineart_standard_detector = LineartStandardDetector()
lineart_result = common_annotator_call(lineart_standard_detector, image, guassian_sigma=2, intensity_threshold=3, resolution=resolution, show_pbar=False).numpy()
del lineart_standard_detector
else:
from controlnet_aux.lineart import LineartDetector
from controlnet_aux.lineart_anime import LineartAnimeDetector
from controlnet_aux.manga_line import LineartMangaDetector
from custom_controlnet_aux.lineart import LineartDetector
from custom_controlnet_aux.lineart_anime import LineartAnimeDetector
from custom_controlnet_aux.manga_line import LineartMangaDetector
lineart_detector = dict(lineart_realisitic=LineartDetector, lineart_anime=LineartAnimeDetector, manga_line=LineartMangaDetector)[merge_with_lineart]
lineart_detector = lineart_detector.from_pretrained().to(self.device)
lineart_result = common_annotator_call(lineart_detector, image, resolution=resolution, show_pbar=False).numpy()
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, bin_threshold=100, resolution=512, **kwargs):
from controlnet_aux.binary import BinaryDetector
from custom_controlnet_aux.binary import BinaryDetector

return (common_annotator_call(BinaryDetector(), image, bin_threshold=bin_threshold, resolution=resolution), )

Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/canny.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, low_threshold=100, high_threshold=200, resolution=512, **kwargs):
from controlnet_aux.canny import CannyDetector
from custom_controlnet_aux.canny import CannyDetector

return (common_annotator_call(CannyDetector(), image, low_threshold=low_threshold, high_threshold=high_threshold, resolution=resolution), )

Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/color.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/T2IAdapter-only"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.color import ColorDetector
from custom_controlnet_aux.color import ColorDetector

return (common_annotator_call(ColorDetector(), image, resolution=resolution), )

Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/densepose.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"

def execute(self, image, model="densepose_r50_fpn_dl.torchscript", cmap="Viridis (MagicAnimate)", resolution=512):
from controlnet_aux.densepose import DenseposeDetector
from custom_controlnet_aux.densepose import DenseposeDetector
model = DenseposeDetector \
.from_pretrained(filename=model) \
.to(model_management.get_torch_device())
Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/depth_anything.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, ckpt_name="depth_anything_vitl14.pth", resolution=512, **kwargs):
from controlnet_aux.depth_anything import DepthAnythingDetector
from custom_controlnet_aux.depth_anything import DepthAnythingDetector

model = DepthAnythingDetector.from_pretrained(filename=ckpt_name).to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
Expand All @@ -38,7 +38,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, environment="indoor", resolution=512, **kwargs):
from controlnet_aux.zoe import ZoeDepthAnythingDetector
from custom_controlnet_aux.zoe import ZoeDepthAnythingDetector
ckpt_name = "depth_anything_metric_depth_indoor.pt" if environment == "indoor" else "depth_anything_metric_depth_outdoor.pt"
model = ZoeDepthAnythingDetector.from_pretrained(filename=ckpt_name).to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/depth_anything_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, ckpt_name="depth_anything_v2_vitl.pth", resolution=512, **kwargs):
from controlnet_aux.depth_anything_v2 import DepthAnythingV2Detector
from custom_controlnet_aux.depth_anything_v2 import DepthAnythingV2Detector

model = DepthAnythingV2Detector.from_pretrained(filename=ckpt_name).to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, max_depth=1)
Expand All @@ -39,7 +39,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"
def execute(self, image, environment, resolution=512, max_depth=20.0, **kwargs):
from controlnet_aux.depth_anything_v2 import DepthAnythingV2Detector
from custom_controlnet_aux.depth_anything_v2 import DepthAnythingV2Detector
filename = dict(indoor="depth_anything_v2_metric_hypersim_vitl.pth", outdoor="depth_anything_v2_metric_vkitti_vitl.pth")[environment]
model = DepthAnythingV2Detector.from_pretrained(filename=filename).to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, max_depth=max_depth)
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/diffusion_edge.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def INPUT_TYPES(s):

def execute(self, image, environment="indoor", patch_batch_size=4, resolution=512, **kwargs):
install_deps()
from controlnet_aux.diffusion_edge import DiffusionEdgeDetector
from custom_controlnet_aux.diffusion_edge import DiffusionEdgeDetector

model = DiffusionEdgeDetector \
.from_pretrained(filename = f"diffusion_edge_{environment}.pt") \
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/dsine.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, fov=60.0, iterations=5, resolution=512, **kwargs):
from controlnet_aux.dsine import DsineDetector
from custom_controlnet_aux.dsine import DsineDetector

model = DsineDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, fov=fov, iterations=iterations, resolution=resolution)
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/dwpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import comfy.model_management as model_management
import numpy as np
import warnings
from controlnet_aux.dwpose import DwposeDetector, AnimalposeDetector
from custom_controlnet_aux.dwpose import DwposeDetector, AnimalposeDetector
import os
import json

Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/hed.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.hed import HEDdetector
from custom_controlnet_aux.hed import HEDdetector

model = HEDdetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, safe = kwargs["safe"] == "enable")
Expand All @@ -36,7 +36,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.hed import HEDdetector
from custom_controlnet_aux.hed import HEDdetector

model = HEDdetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, scribble=True, safe=kwargs["safe"]=="enable")
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/leres.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, rm_nearest=0, rm_background=0, resolution=512, boost="disable", **kwargs):
from controlnet_aux.leres import LeresDetector
from custom_controlnet_aux.leres import LeresDetector

model = LeresDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, thr_a=rm_nearest, thr_b=rm_background, boost=boost == "enable")
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/lineart.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.lineart import LineartDetector
from custom_controlnet_aux.lineart import LineartDetector

model = LineartDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, coarse = kwargs["coarse"] == "enable")
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/lineart_anime.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.lineart_anime import LineartAnimeDetector
from custom_controlnet_aux.lineart_anime import LineartAnimeDetector

model = LineartAnimeDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/lineart_standard.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, guassian_sigma=6, intensity_threshold=8, resolution=512, **kwargs):
from controlnet_aux.lineart_standard import LineartStandardDetector
from custom_controlnet_aux.lineart_standard import LineartStandardDetector
return (common_annotator_call(LineartStandardDetector(), image, guassian_sigma=guassian_sigma, intensity_threshold=intensity_threshold, resolution=resolution), )

NODE_CLASS_MAPPINGS = {
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/manga_line.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.manga_line import LineartMangaDetector
from custom_controlnet_aux.manga_line import LineartMangaDetector

model = LineartMangaDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/mediapipe_face.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def INPUT_TYPES(s):
def detect(self, image, max_faces=10, min_confidence=0.5, resolution=512):
#Ref: https://github.com/Fannovel16/comfy_controlnet_preprocessors/issues/70#issuecomment-1677967369
install_deps()
from controlnet_aux.mediapipe_face import MediapipeFaceDetector
from custom_controlnet_aux.mediapipe_face import MediapipeFaceDetector
return (common_annotator_call(MediapipeFaceDetector(), image, max_faces=max_faces, min_confidence=min_confidence, resolution=resolution), )

NODE_CLASS_MAPPINGS = {
Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/mesh_graphormer.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def INPUT_TYPES(s):

def execute(self, image, mask_bbox_padding=30, mask_type="based_on_depth", mask_expand=5, resolution=512, rand_seed=88, detect_thr=0.6, presence_thr=0.6, **kwargs):
install_deps()
from controlnet_aux.mesh_graphormer import MeshGraphormerDetector
from custom_controlnet_aux.mesh_graphormer import MeshGraphormerDetector
model = kwargs["model"] if "model" in kwargs \
else MeshGraphormerDetector.from_pretrained(detect_thr=detect_thr, presence_thr=presence_thr).to(model_management.get_torch_device())

Expand Down Expand Up @@ -120,7 +120,7 @@ def INPUT_TYPES(s):

def execute(self, image, bbox_detector, bbox_threshold=0.5, bbox_dilation=10, bbox_crop_factor=3.0, drop_size=10, resolution=512, **mesh_graphormer_kwargs):
install_deps()
from controlnet_aux.mesh_graphormer import MeshGraphormerDetector
from custom_controlnet_aux.mesh_graphormer import MeshGraphormerDetector
mesh_graphormer_node = Mesh_Graphormer_Depth_Map_Preprocessor()
model = MeshGraphormerDetector.from_pretrained(detect_thr=0.6, presence_thr=0.6).to(model_management.get_torch_device())
mesh_graphormer_kwargs["model"] = model
Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/metric3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, backbone="vit-small", fx=1000, fy=1000, resolution=512):
from controlnet_aux.metric3d import Metric3DDetector
from custom_controlnet_aux.metric3d import Metric3DDetector
model = Metric3DDetector.from_pretrained(filename=f"metric_depth_{backbone.replace('-', '_')}_800k.pth").to(model_management.get_torch_device())
cb = lambda image, **kwargs: model(image, **kwargs)[0]
out = common_annotator_call(cb, image, resolution=resolution, fx=fx, fy=fy, depth_and_normal=True)
Expand All @@ -40,7 +40,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, backbone="vit-small", fx=1000, fy=1000, resolution=512):
from controlnet_aux.metric3d import Metric3DDetector
from custom_controlnet_aux.metric3d import Metric3DDetector
model = Metric3DDetector.from_pretrained(filename=f"metric_depth_{backbone.replace('-', '_')}_800k.pth").to(model_management.get_torch_device())
cb = lambda image, **kwargs: model(image, **kwargs)[1]
out = common_annotator_call(cb, image, resolution=resolution, fx=fx, fy=fy, depth_and_normal=True)
Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/midas.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, a=np.pi * 2.0, bg_threshold=0.1, resolution=512, **kwargs):
from controlnet_aux.midas import MidasDetector
from custom_controlnet_aux.midas import MidasDetector

model = MidasDetector.from_pretrained().to(model_management.get_torch_device())
#Dirty hack :))
Expand All @@ -41,7 +41,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, a=np.pi * 2.0, bg_threshold=0.1, resolution=512, **kwargs):
from controlnet_aux.midas import MidasDetector
from custom_controlnet_aux.midas import MidasDetector

# Ref: https://github.com/lllyasviel/ControlNet/blob/main/gradio_depth2image.py
model = MidasDetector.from_pretrained().to(model_management.get_torch_device())
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/mlsd.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, score_threshold, dist_threshold, resolution=512, **kwargs):
from controlnet_aux.mlsd import MLSDdetector
from custom_controlnet_aux.mlsd import MLSDdetector

model = MLSDdetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, thr_v=score_threshold, thr_d=dist_threshold)
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/normalbae.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Normal and Depth Estimators"

def execute(self, image, resolution=512, **kwargs):
from controlnet_aux.normalbae import NormalBaeDetector
from custom_controlnet_aux.normalbae import NormalBaeDetector

model = NormalBaeDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution)
Expand Down
4 changes: 2 additions & 2 deletions node_wrappers/oneformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"

def semantic_segmentate(self, image, resolution=512):
from controlnet_aux.oneformer import OneformerSegmentor
from custom_controlnet_aux.oneformer import OneformerSegmentor

model = OneformerSegmentor.from_pretrained(filename="150_16_swin_l_oneformer_coco_100ep.pth")
model = model.to(model_management.get_torch_device())
Expand All @@ -31,7 +31,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Semantic Segmentation"

def semantic_segmentate(self, image, resolution=512):
from controlnet_aux.oneformer import OneformerSegmentor
from custom_controlnet_aux.oneformer import OneformerSegmentor

model = OneformerSegmentor.from_pretrained(filename="250_16_swin_l_oneformer_ade20k_160k.pth")
model = model.to(model_management.get_torch_device())
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/openpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Faces and Poses Estimators"

def estimate_pose(self, image, detect_hand, detect_body, detect_face, resolution=512, **kwargs):
from controlnet_aux.open_pose import OpenposeDetector
from custom_controlnet_aux.open_pose import OpenposeDetector

detect_hand = detect_hand == "enable"
detect_body = detect_body == "enable"
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/pidinet.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def INPUT_TYPES(s):
CATEGORY = "ControlNet Preprocessors/Line Extractors"

def execute(self, image, safe, resolution=512, **kwargs):
from controlnet_aux.pidi import PidiNetDetector
from custom_controlnet_aux.pidi import PidiNetDetector

model = PidiNetDetector.from_pretrained().to(model_management.get_torch_device())
out = common_annotator_call(model, image, resolution=resolution, safe = safe == "enable")
Expand Down
2 changes: 1 addition & 1 deletion node_wrappers/pose_keypoint_postprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import torch
import itertools

from ..src.controlnet_aux.dwpose import draw_poses, draw_animalposes, decode_json_as_poses
from ..src.custom_controlnet_aux.dwpose import draw_poses, draw_animalposes, decode_json_as_poses


"""
Expand Down
Loading

0 comments on commit c47ccab

Please sign in to comment.