You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "D:\ComfyUI-aki-v1.4\execution.py", line 324, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "D:\ComfyUI-aki-v1.4\execution.py", line 199, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "D:\ComfyUI-aki-v1.4\execution.py", line 170, in _map_node_over_list
process_inputs(input_dict, i)
File "D:\ComfyUI-aki-v1.4\execution.py", line 159, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "D:\ComfyUI-aki-v1.4\custom_nodes\x-flux-comfyui\nodes.py", line 599, in applymodel
out = clip(pixel_values=pixel_values)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\comfy\clip_model.py", line 216, in forward
x = self.vision_model(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\comfy\clip_model.py", line 195, in forward
x = self.embeddings(pixel_values)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\comfy\clip_model.py", line 171, in forward
return embeds + comfy.ops.cast_to_input(self.position_embedding.weight, embeds)
RuntimeError: The size of tensor a (256) must match the size of tensor b (729) at non-singleton dimension 1
The text was updated successfully, but these errors were encountered:
Traceback (most recent call last):
File "D:\ComfyUI-aki-v1.4\execution.py", line 324, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "D:\ComfyUI-aki-v1.4\execution.py", line 199, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
File "D:\ComfyUI-aki-v1.4\execution.py", line 170, in _map_node_over_list
process_inputs(input_dict, i)
File "D:\ComfyUI-aki-v1.4\execution.py", line 159, in process_inputs
results.append(getattr(obj, func)(**inputs))
File "D:\ComfyUI-aki-v1.4\custom_nodes\x-flux-comfyui\nodes.py", line 599, in applymodel
out = clip(pixel_values=pixel_values)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\comfy\clip_model.py", line 216, in forward
x = self.vision_model(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\comfy\clip_model.py", line 195, in forward
x = self.embeddings(pixel_values)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1532, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\python\lib\site-packages\torch\nn\modules\module.py", line 1541, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ComfyUI-aki-v1.4\comfy\clip_model.py", line 171, in forward
return embeds + comfy.ops.cast_to_input(self.position_embedding.weight, embeds)
RuntimeError: The size of tensor a (256) must match the size of tensor b (729) at non-singleton dimension 1
The text was updated successfully, but these errors were encountered: