Skip to content

Commit 1a2dbc6

Browse files
committed
Second pass
1 parent e0f1c94 commit 1a2dbc6

File tree

9 files changed

+691
-221
lines changed

9 files changed

+691
-221
lines changed

NOTICE.txt

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
comfyui_jankdiffusehigh
2+
3+
Copyright https://gitub.com/blepping
4+
5+
This project was referenced from the original implementation at https://github.com/yhyun225/DiffuseHigh

py/config.py

Lines changed: 214 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,214 @@
1+
from __future__ import annotations
2+
3+
from comfy.samplers import ksampler
4+
from pytorch_wavelets import DTCWTForward, DTCWTInverse, DWTForward, DWTInverse
5+
6+
from .tensor_image_ops import (
7+
BLENDING_MODES,
8+
Sharpen,
9+
)
10+
from .upscale import Upscale
11+
from .utils import fallback
12+
from .vae import VAEHelper
13+
14+
15+
class Config:
16+
_overridable_fields = { # noqa: RUF012
17+
"blend_by_mode",
18+
"blend_mode",
19+
"denoised_wavelet_multiplier",
20+
"dtcwt_biort",
21+
"dtcwt_mode",
22+
"dtcwt_qshift",
23+
"dwt_flip_filters",
24+
"dwt_level",
25+
"dwt_mode",
26+
"dwt_wave",
27+
"fadeout_factor",
28+
"guidance_factor",
29+
"guidance_mode",
30+
"guidance_restart_s_noise",
31+
"guidance_restart",
32+
"guidance_steps",
33+
"iteration_override",
34+
"reference_wavelet_multiplier",
35+
"renoise_factor",
36+
"resample_mode",
37+
"rescale_increment",
38+
"scale_factor",
39+
"sharpen_gaussian_kernel_size",
40+
"sharpen_gaussian_sigma",
41+
"sharpen_mode",
42+
"sharpen_reference",
43+
"sharpen_strength",
44+
"sigma_offset",
45+
"vae_decode_kwargs",
46+
"vae_encode_kwargs",
47+
"vae_mode",
48+
}
49+
50+
_dict_exclude_keys = { # noqa: RUF012
51+
"as_dict",
52+
"blend_function",
53+
"dwt",
54+
"get_iteration_config",
55+
"idwt",
56+
"iteration_override",
57+
"sharpen",
58+
"upscale",
59+
"vae",
60+
}
61+
62+
def __init__(
63+
self,
64+
device,
65+
dtype,
66+
latent_format,
67+
*,
68+
blend_mode="lerp",
69+
blend_by_mode="image",
70+
denoised_wavelet_multiplier=1.0,
71+
dtcwt_biort="near_sym_a",
72+
dtcwt_mode=False,
73+
dtcwt_qshift="qshift_a",
74+
dwt_flip_filters=False,
75+
dwt_level=1,
76+
dwt_mode="symmetric",
77+
dwt_wave="db4",
78+
fadeout_factor=0.0,
79+
guidance_factor=1.0,
80+
guidance_mode="image",
81+
guidance_restart_s_noise=1.0,
82+
guidance_restart=0,
83+
guidance_sampler=None,
84+
guidance_steps=5,
85+
iteration_override=None,
86+
iterations=1,
87+
reference_sampler=None,
88+
reference_wavelet_multiplier=1.0,
89+
renoise_factor=1.0,
90+
resample_mode="bicubic",
91+
rescale_increment=64,
92+
sampler=None,
93+
scale_factor=2.0,
94+
sharpen_gaussian_kernel_size=3,
95+
sharpen_gaussian_sigma=(0.1, 2.0),
96+
sharpen_mode="gaussian",
97+
sharpen_reference=True,
98+
sharpen_strength=1.0,
99+
sigma_offset=0,
100+
upscale_model=None,
101+
vae_decode_kwargs=None,
102+
vae_encode_kwargs=None,
103+
vae_mode="normal",
104+
vae=None,
105+
):
106+
sampler = fallback(
107+
sampler,
108+
lambda: ksampler("euler"),
109+
default_is_fun=True,
110+
)
111+
self.sigma_offset = sigma_offset
112+
self.fadeout_factor = fadeout_factor
113+
self.scale_factor = scale_factor
114+
self.guidance_factor = guidance_factor
115+
self.renoise_factor = renoise_factor
116+
self.iterations = iterations
117+
self.guidance_steps = guidance_steps
118+
self.guidance_mode = guidance_mode
119+
self.guidance_restart = guidance_restart
120+
self.guidance_restart_s_noise = guidance_restart_s_noise
121+
self.sampler = sampler
122+
self.guidance_sampler = fallback(guidance_sampler, sampler)
123+
self.reference_sampler = fallback(reference_sampler, sampler)
124+
self.vae = VAEHelper(
125+
vae_mode,
126+
latent_format,
127+
device=device,
128+
dtype=dtype,
129+
vae=vae,
130+
encode_kwargs=fallback(vae_encode_kwargs, {}),
131+
decode_kwargs=fallback(vae_decode_kwargs, {}),
132+
)
133+
self.sharpen = Sharpen(
134+
mode=sharpen_mode,
135+
strength=sharpen_strength if sharpen_reference else 0,
136+
gaussian_kernel_size=sharpen_gaussian_kernel_size,
137+
gaussian_sigma=sharpen_gaussian_sigma,
138+
)
139+
self.upscale = Upscale(
140+
resample_mode=resample_mode,
141+
rescale_increment=rescale_increment,
142+
upscale_model=upscale_model,
143+
)
144+
self.dwt_mode = dwt_mode
145+
self.dwt_level = dwt_level
146+
self.dwt_wave = dwt_wave
147+
self.dtcwt_mode = dtcwt_mode
148+
self.dtcwt_biort = dtcwt_biort
149+
self.dtcwt_qshift = dtcwt_qshift
150+
if dtcwt_mode:
151+
self.dwt = DTCWTForward(
152+
J=dwt_level,
153+
mode=dwt_mode,
154+
biort=dtcwt_biort,
155+
qshift=dtcwt_qshift,
156+
).to(device)
157+
self.idwt = DTCWTInverse(
158+
mode=dwt_mode,
159+
biort=dtcwt_biort,
160+
qshift=dtcwt_qshift,
161+
).to(device)
162+
else:
163+
self.dwt = DWTForward(J=dwt_level, wave=dwt_wave, mode=dwt_mode).to(device)
164+
self.idwt = DWTInverse(wave=dwt_wave, mode=dwt_mode).to(device)
165+
self.dwt_flip_filters = dwt_flip_filters
166+
self.reference_wavelet_multiplier = reference_wavelet_multiplier
167+
self.denoised_wavelet_multiplier = denoised_wavelet_multiplier
168+
self.blend_mode = blend_mode
169+
if blend_by_mode not in {"image", "latent", "wavelet"}:
170+
raise ValueError("Bad blend_by_mode: must be one of image, latent, wavelet")
171+
self.blend_by_mode = blend_by_mode
172+
self.blend_function = BLENDING_MODES[blend_mode]
173+
self.iteration_override = {}
174+
if iteration_override is None or iteration_override == {}:
175+
return
176+
if not isinstance(iteration_override, dict):
177+
raise TypeError("Iteration override must be an object")
178+
# if isinstance(next(iter(iteration_override.values())), self.__class__):
179+
# self.iteration_Override = iteration_override
180+
# return
181+
selfdict = self.as_dict()
182+
overrides = self.iteration_override
183+
for k, v in iteration_override.items():
184+
if not isinstance(k, (int, str)) or not isinstance(v, dict):
185+
raise TypeError(
186+
"Bad type for override item: key must be integer or string, value must be an object",
187+
)
188+
okwargs = selfdict | {
189+
ok: ov for ok, ov in v.items() if ok in self._overridable_fields
190+
}
191+
overrides[k] = self.__class__(device, dtype, latent_format, **okwargs)
192+
193+
def as_dict(self) -> dict:
194+
result = {
195+
k: getattr(self, k)
196+
for k in dir(self)
197+
if not k.startswith("_") and k not in self._dict_exclude_keys
198+
}
199+
result["vae_mode"] = self.vae.mode.name.lower()
200+
result["vae"] = self.vae.vae
201+
result["vae_encode_kwargs"] = self.vae.encode_kwargs
202+
result["vae_decode_kwargs"] = self.vae.decode_kwargs
203+
result["sharpen_reference"] = self.sharpen.strength != 0
204+
result["sharpen_strength"] = self.sharpen.strength
205+
result["sharpen_gaussian_kernel_size"] = self.sharpen.gaussian_kernel_size
206+
result["sharpen_gaussian_sigma"] = self.sharpen.gaussian_sigma
207+
result["resample_mode"] = self.upscale.resample_mode
208+
result["rescale_increment"] = self.upscale.rescale_increment
209+
result["upscale_model"] = self.upscale.upscale_model
210+
return result
211+
212+
def get_iteration_config(self, iteration):
213+
override = self.iteration_override.get(iteration)
214+
return override.get_iteration_config(iteration) if override else self

py/external.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,10 @@
77
EXTERNAL["tiled_diffusion"] = importlib.import_module(
88
"custom_nodes.ComfyUI-TiledDiffusion",
99
)
10+
11+
with contextlib.suppress(ImportError, NotImplementedError):
12+
bleh = importlib.import_module("custom_nodes.ComfyUI-bleh")
13+
bleh_version = getattr(bleh, "BLEH_VERSION", -1)
14+
if bleh_version < 1:
15+
raise NotImplementedError
16+
EXTERNAL["bleh"] = bleh.py

py/nodes.py

Lines changed: 95 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,52 +4,129 @@
44
from comfy.samplers import KSAMPLER
55

66
from .sampler import diffusehigh_sampler
7+
from .vae import VAEMode
78

89

910
class DiffuseHighSamplerNode:
11+
DESCRIPTION = "Jank DiffuseHigh sampler node, used for generating directly to resolutions higher than what the model was trained for. Can be connected to a SamplerCustom or other sampler node that supports a SAMPLER input."
12+
OUTPUT_TOOLTIPS = (
13+
"SAMPLER that can be connected to a SamplerCustom or other sampler node that supports a SAMPLER input.",
14+
)
15+
CATEGORY = "sampling/custom_sampling/JankDiffuseHigh"
1016
RETURN_TYPES = ("SAMPLER",)
1117
FUNCTION = "go"
1218

1319
@classmethod
1420
def INPUT_TYPES(cls) -> dict:
1521
return {
1622
"required": {
17-
"highres_sigmas": ("SIGMAS",),
18-
"guidance_steps": ("INT", {"default": 5, "min": 0}),
23+
"highres_sigmas": (
24+
"SIGMAS",
25+
{
26+
"tooltip": "Sigmas used for steps after upscaling. Generally should be around 0.3-0.5 denoise. NOTE: I do not recommend plugging in raw 1.0 denoise sigmas here.",
27+
},
28+
),
29+
"guidance_steps": (
30+
"INT",
31+
{
32+
"default": 5,
33+
"min": 0,
34+
"tooltip": "Number of guidance steps after an upscale.",
35+
},
36+
),
1937
"guidance_mode": (
2038
(
2139
"image",
2240
"latent",
2341
),
42+
{
43+
"default": "image",
44+
"tooltip": "The original implementation uses image guidance. This requires a VAE encode/decode per guidance step. Alternatively, you can try using guidance via the latent instead which is much faster.",
45+
},
2446
),
2547
"guidance_factor": (
2648
"FLOAT",
2749
{
2850
"default": 1.0,
2951
"min": 0.0,
3052
"max": 1.0,
53+
"tooltip": "Mix factor used on guidance steps. 1.0 means use 100% DiffuseHigh guidance for those steps (like the original implementation).",
54+
},
55+
),
56+
"fadeout_factor": (
57+
"FLOAT",
58+
{
59+
"default": 0.0,
60+
"tooltip": "Can be enabled to fade out guidance_factor. For example, if guidance_factor is 1 and guidance_steps is 4 then fadeout_factor would use these guidance_factors for the guidance steps: 1.00, 0.75, 0.50, 0.25",
61+
},
62+
),
63+
"scale_factor": (
64+
"FLOAT",
65+
{
66+
"default": 2.0,
67+
"tooltip": "Upscale factor per iteration.",
68+
},
69+
),
70+
"renoise_factor": (
71+
"FLOAT",
72+
{
73+
"default": 1.0,
74+
"tooltip": "Strength of noise added at the start of each iteration. The default of 1.0 (100%) is the normal amount, but you can increase this slightly to add more detail.",
75+
},
76+
),
77+
"iterations": (
78+
"INT",
79+
{
80+
"default": 1,
81+
"min": 0,
82+
"tooltip": "Number of upscale iterations to run. Be careful, this can add up fast - if you start at 512x512 with a 2.0 scale factor then 3 iterations will get you to 4096x4096.",
3183
},
3284
),
33-
"fadeout_factor": ("FLOAT", {"default": 0.0}),
34-
"scale_factor": ("FLOAT", {"default": 2.0}),
35-
"renoise_factor": ("FLOAT", {"default": 1.0}),
36-
"iterations": ("INT", {"default": 1, "min": 0}),
37-
"sampler": ("SAMPLER",),
3885
"vae_mode": (
39-
(
40-
"taesd",
41-
"normal",
42-
"tiled",
43-
"tiled_diffusion",
44-
),
86+
tuple(vm.name.lower() for vm in VAEMode),
87+
{
88+
"default": "normal",
89+
"tooltip": "Mode used for encoding/decoding images. TAESD is fast/low VRAM but may reduce quality (you will also need the TAESD encoders installed). Normal will just use the normal VAE node, tiled with use the tiled VAE node. Alternatively, if you have ComfyUI-TiledDiffusion installed you can use tiled_diffusion here.",
90+
},
4591
),
4692
},
4793
"optional": {
48-
"reference_image_opt": ("IMAGE",),
49-
"guidance_sampler_opt": ("SAMPLER",),
50-
"reference_sampler_opt": ("SAMPLER",),
51-
"vae_opt": ("VAE",),
52-
"upscale_model_opt": ("UPSCALE_MODEL",),
94+
"sampler": (
95+
"SAMPLER",
96+
{
97+
"tooltip": "Default sampler used for steps. If not specified the sampler will default to non-ancestral Euler.",
98+
},
99+
),
100+
"reference_image_opt": (
101+
"IMAGE",
102+
{
103+
"tooltip": "Optional: Image used for the initial pass. If not connected, a low-res initial reference will be generated using the schedule from the normal sigmas.",
104+
},
105+
),
106+
"guidance_sampler_opt": (
107+
"SAMPLER",
108+
{
109+
"tooltip": "Optional: Sampler used for guidance steps. If not specified, will fallback to the base sampler. Note: The sampler is called on individual steps, samplers that keep history will not work well here.",
110+
},
111+
),
112+
"reference_sampler_opt": (
113+
"SAMPLER",
114+
{
115+
"tooltip": "Optional: Sampler used to generate the initial low-resolution reference. Only used if reference_image_opt is not connected.",
116+
},
117+
),
118+
"vae_opt": (
119+
"VAE",
120+
{
121+
"tooltip": "Optional when vae_mode is set to `taesd`, otherwise this is the VAE that will be used for encoding/decoding images.",
122+
},
123+
),
124+
"upscale_model_opt": (
125+
"UPSCALE_MODEL",
126+
{
127+
"tooltip": "Optional: Model used for upscaling. When not attached, simple image scaling will be used. Regardless, the image will be scaled to match the size expected based on scale_factor. For example, if you use scale_factor 2 and a 4x upscale model, the image will get scaled down after the upscale model runs.",
128+
},
129+
),
53130
"yaml_parameters": (
54131
"STRING",
55132
{

0 commit comments

Comments
 (0)