From 1b24bf9f896d410e5dfe629bfb04be3b3a36321f Mon Sep 17 00:00:00 2001 From: rivessamr Date: Tue, 27 Aug 2024 22:26:30 +0000 Subject: [PATCH] Initial file moves and renames Diffs= 25d423274 Initial file moves and renames (#7951) Co-authored-by: rivessamr --- .rive_head | 2 +- pls/renderer/pls_factory.cpp | 44 --- {pls => renderer}/LICENSE | 0 {pls => renderer}/README.md | 4 +- {pls => renderer}/glad/.clang-format | 0 {pls => renderer}/glad/glad.c | 0 {pls => renderer}/glad/glad.h | 0 {pls => renderer}/glad/glad_custom.c | 0 {pls => renderer}/glad/glad_custom.h | 0 {pls => renderer}/glad/khrplatform.h | 0 .../include/rive/renderer}/buffer_ring.hpp | 6 +- .../include/rive/renderer}/d3d/d3d11.hpp | 0 .../renderer/d3d/render_context_d3d_impl.hpp | 18 +- .../include/rive/renderer/draw.hpp | 83 ++--- .../include/rive/renderer}/fixed_queue.hpp | 6 +- .../include/rive/renderer}/gl/gl_state.hpp | 6 +- .../include/rive/renderer}/gl/gl_utils.hpp | 2 +- .../include/rive/renderer}/gl/gles3.hpp | 0 .../renderer}/gl/load_store_actions_ext.hpp | 10 +- .../renderer/gl/render_buffer_gl_impl.hpp | 12 +- .../renderer/gl/render_context_gl_impl.hpp | 50 +-- .../rive/renderer/gl/render_target_gl.hpp | 18 +- .../include/rive/renderer/gpu.hpp | 44 +-- .../include/rive/renderer/image.hpp | 6 +- .../metal/render_context_metal_impl.h | 20 +- .../include/rive/renderer/render_context.hpp | 87 ++--- .../renderer/render_context_helper_impl.hpp | 18 +- .../rive/renderer/render_context_impl.hpp | 16 +- .../include/rive/renderer/render_target.hpp | 4 +- .../rive/renderer/rive_render_factory.hpp | 6 +- .../include/rive/renderer/rive_renderer.hpp | 39 +-- .../renderer}/trivial_block_allocator.hpp | 0 .../vulkan/render_context_vulkan_impl.hpp | 16 +- .../include/rive/renderer}/vulkan/vkutil.hpp | 24 +- .../rive/renderer}/vulkan/vulkan_context.hpp | 8 +- .../rive/renderer}/webgpu/em_js_handle.hpp | 0 .../webgpu/render_context_webgpu_impl.hpp | 18 +- {pls => renderer}/make_dawn.sh | 0 {pls => renderer}/make_moltenvk.sh | 0 {pls => renderer}/make_swiftshader.sh | 0 .../path_fiddle/fiddle_context.hpp | 8 +- .../path_fiddle/fiddle_context_d3d.cpp | 16 +- .../path_fiddle/fiddle_context_dawn.cpp | 14 +- .../path_fiddle/fiddle_context_dawn_helper.mm | 0 .../path_fiddle/fiddle_context_gl.cpp | 20 +- .../path_fiddle/fiddle_context_metal.mm | 16 +- .../path_fiddle/fiddle_context_vulkan.cpp | 14 +- {pls => renderer}/path_fiddle/index.html | 0 {pls => renderer}/path_fiddle/path_fiddle.cpp | 4 +- {pls => renderer}/path_fiddle/path_fiddle.hpp | 0 {pls => renderer}/premake5.lua | 0 {pls => renderer}/premake5_pls_renderer.lua | 42 +-- .../rive_vk_bootstrap/bootstrap_project.lua | 2 +- .../rive_vk_bootstrap/rive_vk_bootstrap.hpp | 6 +- .../rive_vk_bootstrap/vulkan_fence_pool.hpp | 8 +- .../rive_vk_bootstrap/rive_vk_bootstrap.cpp | 4 +- .../src}/.vscode/c_cpp_properties.json | 0 .../src}/.vscode/settings.json | 0 .../src/d3d/render_context_d3d_impl.cpp | 146 ++++----- .../pls_draw.cpp => renderer/src/draw.cpp | 132 ++++---- {pls/renderer => renderer/src}/eval_cubic.hpp | 4 +- .../renderer => renderer/src}/gl/gl_state.cpp | 6 +- .../renderer => renderer/src}/gl/gl_utils.cpp | 4 +- .../src}/gl/load_gles_extensions.cpp | 2 +- .../src}/gl/load_store_actions_ext.cpp | 12 +- .../src}/gl/pls_impl_ext_native.cpp | 22 +- .../src}/gl/pls_impl_framebuffer_fetch.cpp | 32 +- .../src}/gl/pls_impl_rw_texture.cpp | 62 ++-- .../src}/gl/pls_impl_webgl.cpp | 22 +- .../src/gl/render_buffer_gl_impl.cpp | 14 +- .../src/gl/render_context_gl_impl.cpp | 298 +++++++++--------- .../src/gl/render_target_gl.cpp | 14 +- .../src}/gr_inner_fan_triangulator.hpp | 2 +- .../src}/gr_triangulator.cpp | 16 +- .../src}/gr_triangulator.hpp | 14 +- .../pls_image.cpp => renderer/src/image.cpp | 6 +- .../src}/intersection_board.cpp | 4 +- .../src}/intersection_board.hpp | 4 +- .../src}/metal/background_shader_compiler.h | 16 +- .../src}/metal/background_shader_compiler.mm | 72 ++--- .../src/metal/render_context_metal_impl.mm | 184 +++++------ {pls/renderer => renderer/src}/path_utils.cpp | 0 {pls/renderer => renderer/src}/path_utils.hpp | 0 {pls/renderer => renderer/src}/pls.cpp | 34 +- .../src/render_context.cpp | 222 ++++++------- .../src/render_context_helper_impl.cpp | 16 +- renderer/src/rive_render_factory.cpp | 44 +++ .../src/rive_render_paint.cpp | 38 +-- .../src/rive_render_paint.hpp | 18 +- .../src/rive_render_path.cpp | 28 +- .../src/rive_render_path.hpp | 10 +- .../src/rive_renderer.cpp | 117 +++---- .../src}/shaders/Makefile | 0 .../src}/shaders/advanced_blend.glsl | 0 .../src}/shaders/atomic_draw.glsl | 0 .../src}/shaders/blit_texture_as_draw.glsl | 0 .../src}/shaders/color_ramp.glsl | 0 .../src}/shaders/common.glsl | 0 .../src}/shaders/constants.glsl | 0 .../src}/shaders/draw_image_mesh.glsl | 0 .../src}/shaders/draw_path.glsl | 0 .../src}/shaders/draw_path_common.glsl | 0 .../src}/shaders/glsl.glsl | 0 .../src}/shaders/hlsl.glsl | 0 .../src}/shaders/metal.glsl | 0 .../src}/shaders/metal/color_ramp.metal | 0 .../src}/shaders/metal/draw.metal | 0 .../metal/generate_draw_combinations.py | 0 .../src}/shaders/metal/tessellate.metal | 0 .../src}/shaders/minify.py | 4 +- .../src}/shaders/pls_load_store_ext.glsl | 0 .../src}/shaders/specialization.glsl | 0 .../src}/shaders/spirv/atomic_base.glsl | 0 .../shaders/spirv/atomic_draw_image_mesh.main | 0 .../shaders/spirv/atomic_draw_image_rect.main | 0 .../spirv/atomic_draw_interior_triangles.main | 0 .../src}/shaders/spirv/atomic_draw_path.main | 0 .../shaders/spirv/atomic_resolve_pls.main | 0 .../src}/shaders/spirv/color_ramp.main | 0 .../src}/shaders/spirv/draw_image_mesh.main | 0 .../spirv/draw_interior_triangles.main | 0 .../src}/shaders/spirv/draw_path.main | 0 .../src}/shaders/spirv/tessellate.main | 0 .../src}/shaders/stencil_draw.glsl | 0 .../src}/shaders/tessellate.glsl | 0 .../src/vulkan/render_context_vulkan_impl.cpp | 212 ++++++------- .../src}/vulkan/vkutil.cpp | 10 +- .../src}/vulkan/vulkan_context.cpp | 10 +- .../src}/vulkan/vulkan_memory_allocator.cpp | 0 .../src}/webgpu/em_js_handle.cpp | 2 +- .../src/webgpu/render_context_webgpu_impl.cpp | 146 ++++----- .../webgpu/render_context_webgpu_vulkan.cpp | 10 +- .../webgpu/render_context_webgpu_vulkan.hpp | 10 +- {pls => renderer}/webgpu_player/icons.html | 0 {pls => renderer}/webgpu_player/index.html | 0 {pls => renderer}/webgpu_player/rive.js | 0 .../webgpu_player/webgpu_player.cpp | 34 +- 137 files changed, 1391 insertions(+), 1383 deletions(-) delete mode 100644 pls/renderer/pls_factory.cpp rename {pls => renderer}/LICENSE (100%) rename {pls => renderer}/README.md (79%) rename {pls => renderer}/glad/.clang-format (100%) rename {pls => renderer}/glad/glad.c (100%) rename {pls => renderer}/glad/glad.h (100%) rename {pls => renderer}/glad/glad_custom.c (100%) rename {pls => renderer}/glad/glad_custom.h (100%) rename {pls => renderer}/glad/khrplatform.h (100%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/buffer_ring.hpp (97%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/d3d/d3d11.hpp (100%) rename pls/include/rive/pls/d3d/pls_render_context_d3d_impl.hpp => renderer/include/rive/renderer/d3d/render_context_d3d_impl.hpp (94%) rename pls/include/rive/pls/pls_draw.hpp => renderer/include/rive/renderer/draw.hpp (84%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/fixed_queue.hpp (94%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/gl/gl_state.hpp (94%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/gl/gl_utils.hpp (99%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/gl/gles3.hpp (100%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/gl/load_store_actions_ext.hpp (77%) rename pls/include/rive/pls/gl/pls_render_buffer_gl_impl.hpp => renderer/include/rive/renderer/gl/render_buffer_gl_impl.hpp (82%) rename pls/include/rive/pls/gl/pls_render_context_gl_impl.hpp => renderer/include/rive/renderer/gl/render_context_gl_impl.hpp (86%) rename pls/include/rive/pls/gl/pls_render_target_gl.hpp => renderer/include/rive/renderer/gl/render_target_gl.hpp (95%) rename pls/include/rive/pls/pls.hpp => renderer/include/rive/renderer/gpu.hpp (97%) rename pls/include/rive/pls/pls_image.hpp => renderer/include/rive/renderer/image.hpp (95%) rename pls/include/rive/pls/metal/pls_render_context_metal_impl.h => renderer/include/rive/renderer/metal/render_context_metal_impl.h (93%) rename pls/include/rive/pls/pls_render_context.hpp => renderer/include/rive/renderer/render_context.hpp (91%) rename pls/include/rive/pls/pls_render_context_helper_impl.hpp => renderer/include/rive/renderer/render_context_helper_impl.hpp (89%) rename pls/include/rive/pls/pls_render_context_impl.hpp => renderer/include/rive/renderer/render_context_impl.hpp (91%) rename pls/include/rive/pls/pls_render_target.hpp => renderer/include/rive/renderer/render_target.hpp (94%) rename pls/include/rive/pls/pls_factory.hpp => renderer/include/rive/renderer/rive_render_factory.hpp (93%) rename pls/include/rive/pls/pls_renderer.hpp => renderer/include/rive/renderer/rive_renderer.hpp (75%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/trivial_block_allocator.hpp (100%) rename pls/include/rive/pls/vulkan/pls_render_context_vulkan_impl.hpp => renderer/include/rive/renderer/vulkan/render_context_vulkan_impl.hpp (96%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/vulkan/vkutil.hpp (93%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/vulkan/vulkan_context.hpp (98%) rename {pls/include/rive/pls => renderer/include/rive/renderer}/webgpu/em_js_handle.hpp (100%) rename pls/include/rive/pls/webgpu/pls_render_context_webgpu_impl.hpp => renderer/include/rive/renderer/webgpu/render_context_webgpu_impl.hpp (94%) rename {pls => renderer}/make_dawn.sh (100%) rename {pls => renderer}/make_moltenvk.sh (100%) rename {pls => renderer}/make_swiftshader.sh (100%) rename {pls => renderer}/path_fiddle/fiddle_context.hpp (87%) rename {pls => renderer}/path_fiddle/fiddle_context_d3d.cpp (94%) rename {pls => renderer}/path_fiddle/fiddle_context_dawn.cpp (97%) rename {pls => renderer}/path_fiddle/fiddle_context_dawn_helper.mm (100%) rename {pls => renderer}/path_fiddle/fiddle_context_gl.cpp (95%) rename {pls => renderer}/path_fiddle/fiddle_context_metal.mm (93%) rename {pls => renderer}/path_fiddle/fiddle_context_vulkan.cpp (97%) rename {pls => renderer}/path_fiddle/index.html (100%) rename {pls => renderer}/path_fiddle/path_fiddle.cpp (99%) rename {pls => renderer}/path_fiddle/path_fiddle.hpp (100%) rename {pls => renderer}/premake5.lua (100%) rename {pls => renderer}/premake5_pls_renderer.lua (86%) rename {pls => renderer}/rive_vk_bootstrap/bootstrap_project.lua (87%) rename {pls => renderer}/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp (88%) rename {pls => renderer}/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp (94%) rename {pls => renderer}/rive_vk_bootstrap/rive_vk_bootstrap.cpp (98%) rename {pls/renderer => renderer/src}/.vscode/c_cpp_properties.json (100%) rename {pls/renderer => renderer/src}/.vscode/settings.json (100%) rename pls/renderer/d3d/pls_render_context_d3d_impl.cpp => renderer/src/d3d/render_context_d3d_impl.cpp (94%) rename pls/renderer/pls_draw.cpp => renderer/src/draw.cpp (95%) rename {pls/renderer => renderer/src}/eval_cubic.hpp (97%) rename {pls/renderer => renderer/src}/gl/gl_state.cpp (98%) rename {pls/renderer => renderer/src}/gl/gl_utils.cpp (98%) rename {pls/renderer => renderer/src}/gl/load_gles_extensions.cpp (98%) rename {pls/renderer => renderer/src}/gl/load_store_actions_ext.cpp (83%) rename {pls/renderer => renderer/src}/gl/pls_impl_ext_native.cpp (91%) rename {pls/renderer => renderer/src}/gl/pls_impl_framebuffer_fetch.cpp (87%) rename {pls/renderer => renderer/src}/gl/pls_impl_rw_texture.cpp (78%) rename {pls/renderer => renderer/src}/gl/pls_impl_webgl.cpp (94%) rename pls/renderer/gl/pls_render_buffer_gl_impl.cpp => renderer/src/gl/render_buffer_gl_impl.cpp (92%) rename pls/renderer/gl/pls_render_context_gl_impl.cpp => renderer/src/gl/render_context_gl_impl.cpp (88%) rename pls/renderer/gl/pls_render_target_gl.cpp => renderer/src/gl/render_target_gl.cpp (97%) rename {pls/renderer => renderer/src}/gr_inner_fan_triangulator.hpp (97%) rename {pls/renderer => renderer/src}/gr_triangulator.cpp (99%) rename {pls/renderer => renderer/src}/gr_triangulator.hpp (98%) rename pls/renderer/pls_image.cpp => renderer/src/image.cpp (75%) rename {pls/renderer => renderer/src}/intersection_board.cpp (99%) rename {pls/renderer => renderer/src}/intersection_board.hpp (98%) rename {pls/renderer => renderer/src}/metal/background_shader_compiler.h (82%) rename {pls/renderer => renderer/src}/metal/background_shader_compiler.mm (74%) rename pls/renderer/metal/pls_render_context_metal_impl.mm => renderer/src/metal/render_context_metal_impl.mm (90%) rename {pls/renderer => renderer/src}/path_utils.cpp (100%) rename {pls/renderer => renderer/src}/path_utils.hpp (100%) rename {pls/renderer => renderer/src}/pls.cpp (96%) rename pls/renderer/pls_render_context.cpp => renderer/src/render_context.cpp (92%) rename pls/renderer/pls_render_context_helper_impl.cpp => renderer/src/render_context_helper_impl.cpp (92%) create mode 100644 renderer/src/rive_render_factory.cpp rename pls/renderer/pls_paint.cpp => renderer/src/rive_render_paint.cpp (89%) rename pls/renderer/pls_paint.hpp => renderer/src/rive_render_paint.hpp (93%) rename pls/renderer/pls_path.cpp => renderer/src/rive_render_path.cpp (85%) rename pls/renderer/pls_path.hpp => renderer/src/rive_render_path.hpp (89%) rename pls/renderer/pls_renderer.cpp => renderer/src/rive_renderer.cpp (81%) rename {pls/renderer => renderer/src}/shaders/Makefile (100%) rename {pls/renderer => renderer/src}/shaders/advanced_blend.glsl (100%) rename {pls/renderer => renderer/src}/shaders/atomic_draw.glsl (100%) rename {pls/renderer => renderer/src}/shaders/blit_texture_as_draw.glsl (100%) rename {pls/renderer => renderer/src}/shaders/color_ramp.glsl (100%) rename {pls/renderer => renderer/src}/shaders/common.glsl (100%) rename {pls/renderer => renderer/src}/shaders/constants.glsl (100%) rename {pls/renderer => renderer/src}/shaders/draw_image_mesh.glsl (100%) rename {pls/renderer => renderer/src}/shaders/draw_path.glsl (100%) rename {pls/renderer => renderer/src}/shaders/draw_path_common.glsl (100%) rename {pls/renderer => renderer/src}/shaders/glsl.glsl (100%) rename {pls/renderer => renderer/src}/shaders/hlsl.glsl (100%) rename {pls/renderer => renderer/src}/shaders/metal.glsl (100%) rename {pls/renderer => renderer/src}/shaders/metal/color_ramp.metal (100%) rename {pls/renderer => renderer/src}/shaders/metal/draw.metal (100%) rename {pls/renderer => renderer/src}/shaders/metal/generate_draw_combinations.py (100%) rename {pls/renderer => renderer/src}/shaders/metal/tessellate.metal (100%) rename {pls/renderer => renderer/src}/shaders/minify.py (99%) rename {pls/renderer => renderer/src}/shaders/pls_load_store_ext.glsl (100%) rename {pls/renderer => renderer/src}/shaders/specialization.glsl (100%) rename {pls/renderer => renderer/src}/shaders/spirv/atomic_base.glsl (100%) rename {pls/renderer => renderer/src}/shaders/spirv/atomic_draw_image_mesh.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/atomic_draw_image_rect.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/atomic_draw_interior_triangles.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/atomic_draw_path.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/atomic_resolve_pls.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/color_ramp.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/draw_image_mesh.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/draw_interior_triangles.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/draw_path.main (100%) rename {pls/renderer => renderer/src}/shaders/spirv/tessellate.main (100%) rename {pls/renderer => renderer/src}/shaders/stencil_draw.glsl (100%) rename {pls/renderer => renderer/src}/shaders/tessellate.glsl (100%) rename pls/renderer/vulkan/pls_render_context_vulkan_impl.cpp => renderer/src/vulkan/render_context_vulkan_impl.cpp (95%) rename {pls/renderer => renderer/src}/vulkan/vkutil.cpp (97%) rename {pls/renderer => renderer/src}/vulkan/vulkan_context.cpp (98%) rename {pls/renderer => renderer/src}/vulkan/vulkan_memory_allocator.cpp (100%) rename {pls/renderer => renderer/src}/webgpu/em_js_handle.cpp (98%) rename pls/renderer/webgpu/pls_render_context_webgpu_impl.cpp => renderer/src/webgpu/render_context_webgpu_impl.cpp (95%) rename pls/renderer/webgpu/pls_render_context_webgpu_vulkan.cpp => renderer/src/webgpu/render_context_webgpu_vulkan.cpp (98%) rename pls/renderer/webgpu/pls_render_context_webgpu_vulkan.hpp => renderer/src/webgpu/render_context_webgpu_vulkan.hpp (88%) rename {pls => renderer}/webgpu_player/icons.html (100%) rename {pls => renderer}/webgpu_player/index.html (100%) rename {pls => renderer}/webgpu_player/rive.js (100%) rename {pls => renderer}/webgpu_player/webgpu_player.cpp (95%) diff --git a/.rive_head b/.rive_head index 4ac252bf..80699bbb 100644 --- a/.rive_head +++ b/.rive_head @@ -1 +1 @@ -69cffe900faeb0241d20ea8ece3f5cfc5f1563a5 +25d423274ff8e2894c874093b2d5ca86fc2895c2 diff --git a/pls/renderer/pls_factory.cpp b/pls/renderer/pls_factory.cpp deleted file mode 100644 index 630a1e96..00000000 --- a/pls/renderer/pls_factory.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2022 Rive - */ - -#include "rive/pls/pls_factory.hpp" - -#include "pls_paint.hpp" -#include "pls_path.hpp" -#include "rive/pls/pls_renderer.hpp" - -namespace rive::pls -{ -rcp PLSFactory::makeLinearGradient(float sx, - float sy, - float ex, - float ey, - const ColorInt colors[], // [count] - const float stops[], // [count] - size_t count) -{ - - return PLSGradient::MakeLinear(sx, sy, ex, ey, colors, stops, count); -} - -rcp PLSFactory::makeRadialGradient(float cx, - float cy, - float radius, - const ColorInt colors[], // [count] - const float stops[], // [count] - size_t count) -{ - - return PLSGradient::MakeRadial(cx, cy, radius, colors, stops, count); -} - -rcp PLSFactory::makeRenderPath(RawPath& rawPath, FillRule fillRule) -{ - return make_rcp(fillRule, rawPath); -} - -rcp PLSFactory::makeEmptyRenderPath() { return make_rcp(); } - -rcp PLSFactory::makeRenderPaint() { return make_rcp(); } -} // namespace rive::pls diff --git a/pls/LICENSE b/renderer/LICENSE similarity index 100% rename from pls/LICENSE rename to renderer/LICENSE diff --git a/pls/README.md b/renderer/README.md similarity index 79% rename from pls/README.md rename to renderer/README.md index 272c7360..31d9957d 100644 --- a/pls/README.md +++ b/renderer/README.md @@ -2,13 +2,13 @@ The Rive Renderer is a vector and raster graphics renderer custom-built for Rive content, for animation, and for runtime. -This folder contains the renderer code and an example for how to interface with it directly. It contains the best in class concrete implementation of Rive's rendering abstraction layer, which we call the Rive Renderer. +This directory contains the renderer code and an example for how to interface with it directly. It contains the best in class concrete implementation of Rive's rendering abstraction layer, which we call the Rive Renderer. ## Clone the rive-runtime repo ``` git clone https://github.com/rive-app/rive-runtime.git -cd rive-runtime/pls +cd rive-runtime/renderer ``` ## Build GLFW diff --git a/pls/glad/.clang-format b/renderer/glad/.clang-format similarity index 100% rename from pls/glad/.clang-format rename to renderer/glad/.clang-format diff --git a/pls/glad/glad.c b/renderer/glad/glad.c similarity index 100% rename from pls/glad/glad.c rename to renderer/glad/glad.c diff --git a/pls/glad/glad.h b/renderer/glad/glad.h similarity index 100% rename from pls/glad/glad.h rename to renderer/glad/glad.h diff --git a/pls/glad/glad_custom.c b/renderer/glad/glad_custom.c similarity index 100% rename from pls/glad/glad_custom.c rename to renderer/glad/glad_custom.c diff --git a/pls/glad/glad_custom.h b/renderer/glad/glad_custom.h similarity index 100% rename from pls/glad/glad_custom.h rename to renderer/glad/glad_custom.h diff --git a/pls/glad/khrplatform.h b/renderer/glad/khrplatform.h similarity index 100% rename from pls/glad/khrplatform.h rename to renderer/glad/khrplatform.h diff --git a/pls/include/rive/pls/buffer_ring.hpp b/renderer/include/rive/renderer/buffer_ring.hpp similarity index 97% rename from pls/include/rive/pls/buffer_ring.hpp rename to renderer/include/rive/renderer/buffer_ring.hpp index 1238b6a8..86a67bbf 100644 --- a/pls/include/rive/pls/buffer_ring.hpp +++ b/renderer/include/rive/renderer/buffer_ring.hpp @@ -4,9 +4,9 @@ #pragma once -#include "rive/pls/pls.hpp" +#include "rive/renderer/gpu.hpp" -namespace rive::pls +namespace rive::gpu { // API-agnostic implementation of an abstract buffer ring. We use rings to ensure the GPU can render // one frame in parallel while the CPU prepares the next frame. @@ -86,4 +86,4 @@ class HeapBufferRing : public BufferRing void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override { return shadowBuffer(); } void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/d3d/d3d11.hpp b/renderer/include/rive/renderer/d3d/d3d11.hpp similarity index 100% rename from pls/include/rive/pls/d3d/d3d11.hpp rename to renderer/include/rive/renderer/d3d/d3d11.hpp diff --git a/pls/include/rive/pls/d3d/pls_render_context_d3d_impl.hpp b/renderer/include/rive/renderer/d3d/render_context_d3d_impl.hpp similarity index 94% rename from pls/include/rive/pls/d3d/pls_render_context_d3d_impl.hpp rename to renderer/include/rive/renderer/d3d/render_context_d3d_impl.hpp index 1fca55f0..5d741f85 100644 --- a/pls/include/rive/pls/d3d/pls_render_context_d3d_impl.hpp +++ b/renderer/include/rive/renderer/d3d/render_context_d3d_impl.hpp @@ -4,12 +4,12 @@ #pragma once -#include "rive/pls/d3d/d3d11.hpp" -#include "rive/pls/pls_render_context_helper_impl.hpp" +#include "rive/renderer/d3d/d3d11.hpp" +#include "rive/renderer/render_context_helper_impl.hpp" #include #include -namespace rive::pls +namespace rive::gpu { class PLSRenderContextD3DImpl; @@ -119,7 +119,7 @@ class PLSRenderContextD3DImpl : public PLSRenderContextHelperImpl std::unique_ptr makeUniformBufferRing(size_t capacityInBytes) override; std::unique_ptr makeStorageBufferRing(size_t capacityInBytes, - pls::StorageBufferStructure) override; + gpu::StorageBufferStructure) override; std::unique_ptr makeVertexBufferRing(size_t capacityInBytes) override; std::unique_ptr makeTextureTransferBufferRing(size_t capacityInBytes) override; @@ -134,9 +134,9 @@ class PLSRenderContextD3DImpl : public PLSRenderContextHelperImpl UINT firstHighLevelStruct); void setPipelineLayoutAndShaders(DrawType, - pls::ShaderFeatures, - pls::InterlockMode, - pls::ShaderMiscFlags pixelShaderMiscFlags); + gpu::ShaderFeatures, + gpu::InterlockMode, + gpu::ShaderMiscFlags pixelShaderMiscFlags); const D3DCapabilities m_d3dCapabilities; @@ -175,7 +175,7 @@ class PLSRenderContextD3DImpl : public PLSRenderContextHelperImpl ComPtr m_patchVertexBuffer; ComPtr m_patchIndexBuffer; - // Vertex/index buffers for drawing image rects. (pls::InterlockMode::atomics only.) + // Vertex/index buffers for drawing image rects. (gpu::InterlockMode::atomics only.) ComPtr m_imageRectVertexBuffer; ComPtr m_imageRectIndexBuffer; @@ -198,4 +198,4 @@ class PLSRenderContextD3DImpl : public PLSRenderContextHelperImpl ComPtr m_srcOverBlendState; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_draw.hpp b/renderer/include/rive/renderer/draw.hpp similarity index 84% rename from pls/include/rive/pls/pls_draw.hpp rename to renderer/include/rive/renderer/draw.hpp index 4666bda2..c9449cd7 100644 --- a/pls/include/rive/pls/pls_draw.hpp +++ b/renderer/include/rive/renderer/draw.hpp @@ -6,18 +6,18 @@ #include "rive/math/raw_path.hpp" #include "rive/math/wangs_formula.hpp" -#include "rive/pls/pls.hpp" -#include "rive/pls/pls_render_context.hpp" -#include "rive/pls/fixed_queue.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/render_context.hpp" +#include "rive/renderer/fixed_queue.hpp" #include "rive/shapes/paint/stroke_cap.hpp" #include "rive/shapes/paint/stroke_join.hpp" #include "rive/refcnt.hpp" -namespace rive::pls +namespace rive::gpu { class PLSDraw; -class PLSPath; -class PLSPaint; +class RiveRenderPath; +class RiveRenderPaint; class PLSRenderContext; class PLSGradient; @@ -47,28 +47,28 @@ class PLSDraw const Mat2D& matrix() const { return m_matrix; } BlendMode blendMode() const { return m_blendMode; } Type type() const { return m_type; } - pls::DrawContents drawContents() const { return m_drawContents; } - bool isStroked() const { return m_drawContents & pls::DrawContents::stroke; } - bool isEvenOddFill() const { return m_drawContents & pls::DrawContents::evenOddFill; } - bool isOpaque() const { return m_drawContents & pls::DrawContents::opaquePaint; } + gpu::DrawContents drawContents() const { return m_drawContents; } + bool isStroked() const { return m_drawContents & gpu::DrawContents::stroke; } + bool isEvenOddFill() const { return m_drawContents & gpu::DrawContents::evenOddFill; } + bool isOpaque() const { return m_drawContents & gpu::DrawContents::opaquePaint; } uint32_t clipID() const { return m_clipID; } bool hasClipRect() const { return m_clipRectInverseMatrix != nullptr; } - const pls::ClipRectInverseMatrix* clipRectInverseMatrix() const + const gpu::ClipRectInverseMatrix* clipRectInverseMatrix() const { return m_clipRectInverseMatrix; } - pls::SimplePaintValue simplePaintValue() const { return m_simplePaintValue; } + gpu::SimplePaintValue simplePaintValue() const { return m_simplePaintValue; } const PLSGradient* gradient() const { return m_gradientRef; } // Clipping setup. void setClipID(uint32_t clipID); - void setClipRect(const pls::ClipRectInverseMatrix* m) { m_clipRectInverseMatrix = m; } + void setClipRect(const gpu::ClipRectInverseMatrix* m) { m_clipRectInverseMatrix = m; } // Used to allocate GPU resources for a collection of draws. using ResourceCounters = PLSRenderContext::LogicalFlush::ResourceCounters; const ResourceCounters& resourceCounts() const { return m_resourceCounts; } - // Linked list of all PLSDraws within a pls::DrawBatch. + // Linked list of all PLSDraws within a gpu::DrawBatch. void setBatchInternalNeighbor(const PLSDraw* neighbor) { assert(m_batchInternalNeighbor == nullptr); @@ -97,9 +97,9 @@ class PLSDraw const Type m_type; uint32_t m_clipID = 0; - const pls::ClipRectInverseMatrix* m_clipRectInverseMatrix = nullptr; + const gpu::ClipRectInverseMatrix* m_clipRectInverseMatrix = nullptr; - pls::DrawContents m_drawContents = pls::DrawContents::none; + gpu::DrawContents m_drawContents = gpu::DrawContents::none; // Filled in by the subclass constructor. ResourceCounters m_resourceCounts; @@ -107,9 +107,9 @@ class PLSDraw // Gradient data used by some draws. Stored in the base class so allocateGradientIfNeeded() // doesn't have to be virtual. const PLSGradient* m_gradientRef = nullptr; - pls::SimplePaintValue m_simplePaintValue; + gpu::SimplePaintValue m_simplePaintValue; - // Linked list of all PLSDraws within a pls::DrawBatch. + // Linked list of all PLSDraws within a gpu::DrawBatch. const PLSDraw* m_batchInternalNeighbor = nullptr; }; @@ -117,57 +117,58 @@ class PLSDraw inline void PLSDrawReleaseRefs::operator()(PLSDraw* draw) { draw->releaseRefs(); } // High level abstraction of a single path to be drawn (midpoint fan or interior triangulation). -class PLSPathDraw : public PLSDraw +class RiveRenderPathDraw : public PLSDraw { public: // Creates either a normal path draw or an interior triangulation if the path is large enough. static PLSDrawUniquePtr Make(PLSRenderContext*, const Mat2D&, - rcp, + rcp, FillRule, - const PLSPaint*, + const RiveRenderPaint*, RawPath* scratchPath); FillRule fillRule() const { return m_fillRule; } - pls::PaintType paintType() const { return m_paintType; } + gpu::PaintType paintType() const { return m_paintType; } float strokeRadius() const { return m_strokeRadius; } - pls::ContourDirections contourDirections() const { return m_contourDirections; } + gpu::ContourDirections contourDirections() const { return m_contourDirections; } void pushToRenderContext(PLSRenderContext::LogicalFlush*) final; void releaseRefs() override; public: - PLSPathDraw(IAABB pathBounds, - const Mat2D&, - rcp, - FillRule, - const PLSPaint*, - Type, - pls::InterlockMode); + RiveRenderPathDraw(IAABB pathBounds, + const Mat2D&, + rcp, + FillRule, + const RiveRenderPaint*, + Type, + gpu::InterlockMode); virtual void onPushToRenderContext(PLSRenderContext::LogicalFlush*) = 0; - const PLSPath* const m_pathRef; - const FillRule m_fillRule; // Bc PLSPath fillRule can mutate during the artboard draw process. - const pls::PaintType m_paintType; + const RiveRenderPath* const m_pathRef; + const FillRule + m_fillRule; // Bc RiveRenderPath fillRule can mutate during the artboard draw process. + const gpu::PaintType m_paintType; float m_strokeRadius = 0; - pls::ContourDirections m_contourDirections; + gpu::ContourDirections m_contourDirections; // Used to guarantee m_pathRef doesn't change for the entire time we hold it. RIVE_DEBUG_CODE(uint64_t m_rawPathMutationID;) }; // Draws a path by fanning tessellation patches around the midpoint of each contour. -class MidpointFanPathDraw : public PLSPathDraw +class MidpointFanPathDraw : public RiveRenderPathDraw { public: MidpointFanPathDraw(PLSRenderContext*, IAABB pixelBounds, const Mat2D&, - rcp, + rcp, FillRule, - const PLSPaint*); + const RiveRenderPaint*); protected: void onPushToRenderContext(PLSRenderContext::LogicalFlush*) override; @@ -219,7 +220,7 @@ class MidpointFanPathDraw : public PLSPathDraw // Draws a path by triangulating the interior into non-overlapping triangles and tessellating the // outer curves. -class InteriorTriangulationDraw : public PLSPathDraw +class InteriorTriangulationDraw : public RiveRenderPathDraw { public: enum class TriangulatorAxis @@ -232,9 +233,9 @@ class InteriorTriangulationDraw : public PLSPathDraw InteriorTriangulationDraw(PLSRenderContext*, IAABB pixelBounds, const Mat2D&, - rcp, + rcp, FillRule, - const PLSPaint*, + const RiveRenderPaint*, RawPath* scratchPath, TriangulatorAxis); @@ -353,4 +354,4 @@ class StencilClipReset : public PLSDraw protected: const uint32_t m_previousClipID; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/fixed_queue.hpp b/renderer/include/rive/renderer/fixed_queue.hpp similarity index 94% rename from pls/include/rive/pls/fixed_queue.hpp rename to renderer/include/rive/renderer/fixed_queue.hpp index 3fe1e980..8e3e9f42 100644 --- a/pls/include/rive/pls/fixed_queue.hpp +++ b/renderer/include/rive/renderer/fixed_queue.hpp @@ -4,9 +4,9 @@ #pragma once -#include "rive/pls/trivial_block_allocator.hpp" +#include "rive/renderer/trivial_block_allocator.hpp" -namespace rive::pls +namespace rive::gpu { // Fast, simple queue that operates on a block-allocated array. push_back() may only be called up to // m_capacity times before the queue must be rewound. @@ -69,4 +69,4 @@ template class FixedQueue T* m_end = nullptr; RIVE_DEBUG_CODE(size_t m_capacity = 0;) }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/gl/gl_state.hpp b/renderer/include/rive/renderer/gl/gl_state.hpp similarity index 94% rename from pls/include/rive/pls/gl/gl_state.hpp rename to renderer/include/rive/renderer/gl/gl_state.hpp index cb706c4f..21bd6412 100644 --- a/pls/include/rive/pls/gl/gl_state.hpp +++ b/renderer/include/rive/renderer/gl/gl_state.hpp @@ -4,11 +4,11 @@ #pragma once -#include "rive/pls/gl/gles3.hpp" +#include "rive/renderer/gl/gles3.hpp" #include "rive/refcnt.hpp" #include "rive/shapes/paint/blend_mode.hpp" -namespace rive::pls +namespace rive::gpu { // Lightweight wrapper around common GL state. class GLState : public RefCnt @@ -57,4 +57,4 @@ class GLState : public RefCnt bool boundPixelUnpackBufferID : 1; } m_validState; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/gl/gl_utils.hpp b/renderer/include/rive/renderer/gl/gl_utils.hpp similarity index 99% rename from pls/include/rive/pls/gl/gl_utils.hpp rename to renderer/include/rive/renderer/gl/gl_utils.hpp index 601b4921..7d3bd8fd 100644 --- a/pls/include/rive/pls/gl/gl_utils.hpp +++ b/renderer/include/rive/renderer/gl/gl_utils.hpp @@ -4,7 +4,7 @@ #pragma once -#include "rive/pls/gl/gles3.hpp" +#include "rive/renderer/gl/gles3.hpp" #include "rive/math/aabb.hpp" #include #include diff --git a/pls/include/rive/pls/gl/gles3.hpp b/renderer/include/rive/renderer/gl/gles3.hpp similarity index 100% rename from pls/include/rive/pls/gl/gles3.hpp rename to renderer/include/rive/renderer/gl/gles3.hpp diff --git a/pls/include/rive/pls/gl/load_store_actions_ext.hpp b/renderer/include/rive/renderer/gl/load_store_actions_ext.hpp similarity index 77% rename from pls/include/rive/pls/gl/load_store_actions_ext.hpp rename to renderer/include/rive/renderer/gl/load_store_actions_ext.hpp index df39e1b6..06521d2f 100644 --- a/pls/include/rive/pls/gl/load_store_actions_ext.hpp +++ b/renderer/include/rive/renderer/gl/load_store_actions_ext.hpp @@ -4,11 +4,11 @@ #pragma once -#include "rive/pls/pls.hpp" +#include "rive/renderer/gpu.hpp" #include "rive/enum_bitset.hpp" #include -namespace rive::pls +namespace rive::gpu { // When using EXT_shader_pixel_local_storage, we have to emulate the render pass load/store actions // using a shader. These bits define specific actions that can be turned on or off in that shader. @@ -25,9 +25,9 @@ RIVE_MAKE_ENUM_BITSET(LoadStoreActionsEXT) // Determines the specific load actions that need to be emulated for the given render pass, and // unpacks the clear color, if required. -LoadStoreActionsEXT BuildLoadActionsEXT(const pls::FlushDescriptor&, +LoadStoreActionsEXT BuildLoadActionsEXT(const gpu::FlushDescriptor&, std::array* clearColor4f); -// Appends pls_load_store_ext.glsl to the stream, with the appropriate #defines prepended. +// Appends load_store_ext.glsl to the stream, with the appropriate #defines prepended. std::ostream& BuildLoadStoreEXTGLSL(std::ostream&, LoadStoreActionsEXT); -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/gl/pls_render_buffer_gl_impl.hpp b/renderer/include/rive/renderer/gl/render_buffer_gl_impl.hpp similarity index 82% rename from pls/include/rive/pls/gl/pls_render_buffer_gl_impl.hpp rename to renderer/include/rive/renderer/gl/render_buffer_gl_impl.hpp index 9aab7a97..45d56953 100644 --- a/pls/include/rive/pls/gl/pls_render_buffer_gl_impl.hpp +++ b/renderer/include/rive/renderer/gl/render_buffer_gl_impl.hpp @@ -5,11 +5,11 @@ #pragma once #include "rive/renderer.hpp" -#include "rive/pls/gl/gles3.hpp" -#include "rive/pls/pls.hpp" +#include "rive/renderer/gl/gles3.hpp" +#include "rive/renderer/gpu.hpp" #include -namespace rive::pls +namespace rive::gpu { class GLState; @@ -28,7 +28,7 @@ class PLSRenderBufferGLImpl : public lite_rtti_override); // Used by the android runtime to marshal buffers off to the GL thread for deletion. - std::array detachBuffers(); + std::array detachBuffers(); void* onMap() override; void onUnmap() override; @@ -41,9 +41,9 @@ class PLSRenderBufferGLImpl : public lite_rtti_override m_bufferIDs{}; + std::array m_bufferIDs{}; int m_submittedBufferIdx = -1; std::unique_ptr m_fallbackMappedMemory; // Used when canMapBuffer() is false. rcp m_state; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/gl/pls_render_context_gl_impl.hpp b/renderer/include/rive/renderer/gl/render_context_gl_impl.hpp similarity index 86% rename from pls/include/rive/pls/gl/pls_render_context_gl_impl.hpp rename to renderer/include/rive/renderer/gl/render_context_gl_impl.hpp index 7b02be4a..885c53ae 100644 --- a/pls/include/rive/pls/gl/pls_render_context_gl_impl.hpp +++ b/renderer/include/rive/renderer/gl/render_context_gl_impl.hpp @@ -4,15 +4,15 @@ #pragma once -#include "rive/pls/gl/gl_state.hpp" -#include "rive/pls/gl/gl_utils.hpp" -#include "rive/pls/pls_render_context_helper_impl.hpp" +#include "rive/renderer/gl/gl_state.hpp" +#include "rive/renderer/gl/gl_utils.hpp" +#include "rive/renderer/render_context_helper_impl.hpp" #include -namespace rive::pls +namespace rive::gpu { -class PLSPath; -class PLSPaint; +class RiveRenderPath; +class RiveRenderPaint; class PLSRenderTargetGL; // OpenGL backend implementation of PLSRenderContextImpl. @@ -74,26 +74,26 @@ class PLSRenderContextGLImpl : public PLSRenderContextHelperImpl const FlushDescriptor&) = 0; // Depending on how we handle PLS atomic resolves, the PLSImpl may require certain flags. - virtual pls::ShaderMiscFlags shaderMiscFlags(const pls::FlushDescriptor&, - pls::DrawType) const + virtual gpu::ShaderMiscFlags shaderMiscFlags(const gpu::FlushDescriptor&, + gpu::DrawType) const { - return pls::ShaderMiscFlags::none; + return gpu::ShaderMiscFlags::none; } // Called before issuing a plsAtomicResolve draw, so the PLSImpl can make any necessary GL // state changes. - virtual void setupAtomicResolve(PLSRenderContextGLImpl*, const pls::FlushDescriptor&) {} + virtual void setupAtomicResolve(PLSRenderContextGLImpl*, const gpu::FlushDescriptor&) {} - virtual void pushShaderDefines(pls::InterlockMode, + virtual void pushShaderDefines(gpu::InterlockMode, std::vector* defines) const = 0; void ensureRasterOrderingEnabled(PLSRenderContextGLImpl*, - const pls::FlushDescriptor&, + const gpu::FlushDescriptor&, bool enabled); - void barrier(const pls::FlushDescriptor& desc) + void barrier(const gpu::FlushDescriptor& desc) { - assert(m_rasterOrderingEnabled == pls::TriState::no); + assert(m_rasterOrderingEnabled == gpu::TriState::no); onBarrier(desc); } @@ -101,9 +101,9 @@ class PLSRenderContextGLImpl : public PLSRenderContextHelperImpl private: virtual void onEnableRasterOrdering(bool enabled) {} - virtual void onBarrier(const pls::FlushDescriptor& desc) {} + virtual void onBarrier(const gpu::FlushDescriptor& desc) {} - pls::TriState m_rasterOrderingEnabled = pls::TriState::unknown; + gpu::TriState m_rasterOrderingEnabled = gpu::TriState::unknown; }; class PLSImplEXTNative; @@ -133,10 +133,10 @@ class PLSRenderContextGLImpl : public PLSRenderContextHelperImpl DrawShader(PLSRenderContextGLImpl* plsContextImpl, GLenum shaderType, - pls::DrawType drawType, + gpu::DrawType drawType, ShaderFeatures shaderFeatures, - pls::InterlockMode interlockMode, - pls::ShaderMiscFlags shaderMiscFlags); + gpu::InterlockMode interlockMode, + gpu::ShaderMiscFlags shaderMiscFlags); ~DrawShader() { glDeleteShader(m_id); } @@ -155,10 +155,10 @@ class PLSRenderContextGLImpl : public PLSRenderContextHelperImpl DrawProgram(const DrawProgram&) = delete; DrawProgram& operator=(const DrawProgram&) = delete; DrawProgram(PLSRenderContextGLImpl*, - pls::DrawType, - pls::ShaderFeatures, - pls::InterlockMode, - pls::ShaderMiscFlags); + gpu::DrawType, + gpu::ShaderFeatures, + gpu::InterlockMode, + gpu::ShaderMiscFlags); ~DrawProgram(); GLuint id() const { return m_id; } @@ -173,7 +173,7 @@ class PLSRenderContextGLImpl : public PLSRenderContextHelperImpl std::unique_ptr makeUniformBufferRing(size_t capacityInBytes) override; std::unique_ptr makeStorageBufferRing(size_t capacityInBytes, - pls::StorageBufferStructure) override; + gpu::StorageBufferStructure) override; std::unique_ptr makeVertexBufferRing(size_t capacityInBytes) override; std::unique_ptr makeTextureTransferBufferRing(size_t capacityInBytes) override; @@ -223,4 +223,4 @@ class PLSRenderContextGLImpl : public PLSRenderContextHelperImpl const rcp m_state; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/gl/pls_render_target_gl.hpp b/renderer/include/rive/renderer/gl/render_target_gl.hpp similarity index 95% rename from pls/include/rive/pls/gl/pls_render_target_gl.hpp rename to renderer/include/rive/renderer/gl/render_target_gl.hpp index 7f13e75b..0344a650 100644 --- a/pls/include/rive/pls/gl/pls_render_target_gl.hpp +++ b/renderer/include/rive/renderer/gl/render_target_gl.hpp @@ -4,13 +4,13 @@ #pragma once -#include "rive/pls/pls.hpp" -#include "rive/pls/pls_render_target.hpp" -#include "rive/pls/gl/gles3.hpp" -#include "rive/pls/gl/gl_utils.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/render_target.hpp" +#include "rive/renderer/gl/gles3.hpp" +#include "rive/renderer/gl/gl_utils.hpp" #include "utils/lite_rtti.hpp" -namespace rive::pls +namespace rive::gpu { class PLSRenderContextGLImpl; @@ -25,7 +25,7 @@ class PLSRenderTargetGL : public PLSRenderTarget, public enable_lite_rtti((1 << kShaderFeatureCount) - 1); + static_cast((1 << kShaderFeatureCount) - 1); constexpr static ShaderFeatures kVertexShaderFeaturesMask = ShaderFeatures::ENABLE_CLIPPING | ShaderFeatures::ENABLE_CLIP_RECT | ShaderFeatures::ENABLE_ADVANCED_BLEND; @@ -606,7 +606,7 @@ constexpr static ShaderFeatures ShaderFeaturesMaskFor(DrawType drawType, { case DrawType::imageRect: case DrawType::imageMesh: - if (interlockMode != pls::InterlockMode::atomics) + if (interlockMode != gpu::InterlockMode::atomics) { mask = ShaderFeatures::ENABLE_CLIPPING | ShaderFeatures::ENABLE_CLIP_RECT | ShaderFeatures::ENABLE_ADVANCED_BLEND | @@ -619,11 +619,11 @@ constexpr static ShaderFeatures ShaderFeaturesMaskFor(DrawType drawType, case DrawType::midpointFanPatches: case DrawType::outerCurvePatches: case DrawType::interiorTriangulation: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: mask = kAllShaderFeatures; break; - case DrawType::plsAtomicInitialize: - assert(interlockMode == pls::InterlockMode::atomics); + case DrawType::gpuAtomicInitialize: + assert(interlockMode == gpu::InterlockMode::atomics); mask = ShaderFeatures::ENABLE_CLIPPING | ShaderFeatures::ENABLE_ADVANCED_BLEND; break; case DrawType::stencilClipReset: @@ -644,15 +644,15 @@ enum class ShaderMiscFlags : uint32_t // need to read the color buffer when advanced blend is not used. fixedFunctionColorBlend = 1 << 0, - // DrawType::plsAtomicInitialize only. Also store the color clear value to PLS when drawing a + // DrawType::gpuAtomicInitialize only. Also store the color clear value to PLS when drawing a // clear, in addition to clearing the other PLS planes. storeColorClear = 1 << 1, - // DrawType::plsAtomicInitialize only. Swizzle the existing framebuffer contents from BGRA to + // DrawType::gpuAtomicInitialize only. Swizzle the existing framebuffer contents from BGRA to // RGBA. (For when this data had to get copied from a BGRA target.) swizzleColorBGRAToRGBA = 1 << 2, - // DrawType::plsAtomicResolve only. Optimization for when rendering to an offscreen texture. + // DrawType::gpuAtomicResolve only. Optimization for when rendering to an offscreen texture. // // It renders the final "resolve" operation directly to the renderTarget in a single pass, // instead of (1) resolving the offscreen texture, and then (2) copying the offscreen texture to @@ -686,8 +686,8 @@ RIVE_MAKE_ENUM_BITSET(DrawContents) // A nestedClip draw updates the clip buffer while simultaneously clipping against the outerClip // that is currently in the clip buffer. -constexpr static pls::DrawContents kNestedClipUpdateMask = - (pls::DrawContents::activeClip | pls::DrawContents::clipUpdate); +constexpr static gpu::DrawContents kNestedClipUpdateMask = + (gpu::DrawContents::activeClip | gpu::DrawContents::clipUpdate); // Low-level batch of geometry to submit to the GPU. struct DrawBatch @@ -802,7 +802,7 @@ struct FlushDescriptor // Fence that will be signalled once "externalCommandBuffer" finishes executing the entire // frame. (Null if isFinalFlushOfFrame is false.) - pls::CommandBufferCompletionFence* frameCompletionFence = nullptr; + gpu::CommandBufferCompletionFence* frameCompletionFence = nullptr; bool hasTriangleVertices = false; bool wireframe = false; @@ -922,7 +922,7 @@ struct PathData private: WRITEONLY float m_matrix[6]; WRITEONLY float m_strokeRadius; // "0" indicates that the path is filled, not stroked. - WRITEONLY uint32_t m_zIndex; // pls::InterlockMode::depthStencil only. + WRITEONLY uint32_t m_zIndex; // gpu::InterlockMode::depthStencil only. }; static_assert(sizeof(PathData) == StorageBufferElementSizeInBytes(PathData::kBufferStructure) * 2); static_assert(256 % sizeof(PathData) == 0); @@ -972,7 +972,7 @@ struct PaintAuxData const PLSTexture*, const ClipRectInverseMatrix*, const PLSRenderTarget*, - const pls::PlatformFeatures&); + const gpu::PlatformFeatures&); private: WRITEONLY float m_matrix[6]; // Maps _fragCoord to paint coordinates. @@ -1052,7 +1052,7 @@ struct ImageDrawUniforms WRITEONLY float m_clipRectInverseMatrix[6]; WRITEONLY uint32_t m_clipID; WRITEONLY uint32_t m_blendMode; - WRITEONLY uint32_t m_zIndex; // pls::InterlockMode::depthStencil only. + WRITEONLY uint32_t m_zIndex; // gpu::InterlockMode::depthStencil only. // Uniform blocks must be multiples of 256 bytes in size. WRITEONLY uint8_t m_padTo256Bytes[256 - 68]; @@ -1163,4 +1163,4 @@ enum class TriState yes, unknown }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_image.hpp b/renderer/include/rive/renderer/image.hpp similarity index 95% rename from pls/include/rive/pls/pls_image.hpp rename to renderer/include/rive/renderer/image.hpp index e7386780..960b3e40 100644 --- a/pls/include/rive/pls/pls_image.hpp +++ b/renderer/include/rive/renderer/image.hpp @@ -6,9 +6,9 @@ #include "rive/refcnt.hpp" #include "rive/renderer.hpp" -#include "rive/pls/pls_render_context_impl.hpp" +#include "rive/renderer/render_context_impl.hpp" -namespace rive::pls +namespace rive::gpu { class PLSTexture : public RefCnt { @@ -64,4 +64,4 @@ class PLSImage : public lite_rtti_override private: rcp m_texture; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/metal/pls_render_context_metal_impl.h b/renderer/include/rive/renderer/metal/render_context_metal_impl.h similarity index 93% rename from pls/include/rive/pls/metal/pls_render_context_metal_impl.h rename to renderer/include/rive/renderer/metal/render_context_metal_impl.h index f9e9e8ba..46a8a8c7 100644 --- a/pls/include/rive/pls/metal/pls_render_context_metal_impl.h +++ b/renderer/include/rive/renderer/metal/render_context_metal_impl.h @@ -4,7 +4,7 @@ #pragma once -#include "rive/pls/pls_render_context_helper_impl.hpp" +#include "rive/renderer/render_context_helper_impl.hpp" #include #include @@ -12,7 +12,7 @@ #import #endif -namespace rive::pls +namespace rive::gpu { class BackgroundShaderCompiler; @@ -160,18 +160,18 @@ class PLSRenderContextMetalImpl : public PLSRenderContextHelperImpl void prepareToMapBuffers() override; // Creates a MTLRenderCommandEncoder and sets the common state for PLS draws. - id makeRenderPassForDraws(const pls::FlushDescriptor&, + id makeRenderPassForDraws(const gpu::FlushDescriptor&, MTLRenderPassDescriptor*, id, - pls::ShaderMiscFlags baselineMiscFlags); + gpu::ShaderMiscFlags baselineMiscFlags); // Returns the specific DrawPipeline for the given feature set, if it has been compiled. If it // has not finished compiling yet, this method may return a (potentially slower) DrawPipeline // that can draw a superset of the given features. - const DrawPipeline* findCompatibleDrawPipeline(pls::DrawType, - pls::ShaderFeatures, - pls::InterlockMode, - pls::ShaderMiscFlags); + const DrawPipeline* findCompatibleDrawPipeline(gpu::DrawType, + gpu::ShaderFeatures, + gpu::InterlockMode, + gpu::ShaderMiscFlags); void flush(const FlushDescriptor&) override; @@ -199,7 +199,7 @@ class PLSRenderContextMetalImpl : public PLSRenderContextHelperImpl id m_pathPatchVertexBuffer; id m_pathPatchIndexBuffer; - // Vertex/index buffers for drawing image rects. (pls::InterlockMode::atomics only.) + // Vertex/index buffers for drawing image rects. (gpu::InterlockMode::atomics only.) id m_imageRectVertexBuffer; id m_imageRectIndexBuffer; @@ -208,4 +208,4 @@ class PLSRenderContextMetalImpl : public PLSRenderContextHelperImpl std::mutex m_bufferRingLocks[kBufferRingSize]; int m_bufferRingIdx = 0; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_render_context.hpp b/renderer/include/rive/renderer/render_context.hpp similarity index 91% rename from pls/include/rive/pls/pls_render_context.hpp rename to renderer/include/rive/renderer/render_context.hpp index e49f80d0..11a5a770 100644 --- a/pls/include/rive/pls/pls_render_context.hpp +++ b/renderer/include/rive/renderer/render_context.hpp @@ -5,10 +5,10 @@ #pragma once #include "rive/math/vec2d.hpp" -#include "rive/pls/pls.hpp" -#include "rive/pls/pls_factory.hpp" -#include "rive/pls/pls_render_target.hpp" -#include "rive/pls/trivial_block_allocator.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/rive_render_factory.hpp" +#include "rive/renderer/render_target.hpp" +#include "rive/renderer/trivial_block_allocator.hpp" #include "rive/shapes/paint/color.hpp" #include #include @@ -21,7 +21,7 @@ namespace rive class RawPath; } // namespace rive -namespace rive::pls +namespace rive::gpu { class GradientLibrary; class IntersectionBoard; @@ -32,9 +32,9 @@ class MidpointFanPathDraw; class StencilClipReset; class PLSDraw; class PLSGradient; -class PLSPaint; -class PLSPath; -class PLSPathDraw; +class RiveRenderPaint; +class RiveRenderPath; +class RiveRenderPathDraw; class PLSRenderContextImpl; // Used as a key for complex gradients. @@ -66,7 +66,7 @@ struct PLSDrawReleaseRefs }; using PLSDrawUniquePtr = std::unique_ptr; -// Top-level, API agnostic rendering context for PLSRenderer. This class manages all the GPU +// Top-level, API agnostic rendering context for RiveRenderer. This class manages all the GPU // buffers, context state, and other resources required for Rive's pixel local storage path // rendering algorithm. // @@ -86,7 +86,7 @@ using PLSDrawUniquePtr = std::unique_ptr; // } // } // context->flush(); -class PLSRenderContext : public PLSFactory +class PLSRenderContext : public RiveRenderFactory { public: PLSRenderContext(std::unique_ptr); @@ -95,7 +95,7 @@ class PLSRenderContext : public PLSFactory PLSRenderContextImpl* impl() { return m_impl.get(); } template T* static_impl_cast() { return static_cast(m_impl.get()); } - const pls::PlatformFeatures& platformFeatures() const; + const gpu::PlatformFeatures& platformFeatures() const; // Options for controlling how and where a frame is rendered. struct FrameDescriptor @@ -138,7 +138,7 @@ class PLSRenderContext : public PLSFactory // as rectangular paths with an image paint. bool frameSupportsImagePaintForPaths() const; - const pls::InterlockMode frameInterlockMode() const { return m_frameInterlockMode; } + const gpu::InterlockMode frameInterlockMode() const { return m_frameInterlockMode; } // Generates a unique clip ID that is guaranteed to not exist in the current clip buffer, and // assigns a contentBounds to it. @@ -207,7 +207,7 @@ class PLSRenderContext : public PLSFactory void* externalCommandBuffer = nullptr; // Fence that will be signalled once "externalCommandBuffer" finishes executing. - pls::CommandBufferCompletionFence* frameCompletionFence = nullptr; + gpu::CommandBufferCompletionFence* frameCompletionFence = nullptr; }; // Submits all GPU commands that have been built up since beginFrame(). @@ -249,13 +249,13 @@ class PLSRenderContext : public PLSFactory return m_perFrameAllocator.make(std::forward(args)...); } - // Backend-specific PLSFactory implementation. + // Backend-specific RiveRenderFactory implementation. rcp makeRenderBuffer(RenderBufferType, RenderBufferFlags, size_t) override; rcp decodeImage(Span) override; private: friend class PLSDraw; - friend class PLSPathDraw; + friend class RiveRenderPathDraw; friend class MidpointFanPathDraw; friend class InteriorTriangulationDraw; friend class ImageRectDraw; @@ -322,8 +322,8 @@ class PLSRenderContext : public PLSFactory // Per-frame state. FrameDescriptor m_frameDescriptor; - pls::InterlockMode m_frameInterlockMode; - pls::ShaderFeatures m_frameShaderFeaturesMask; + gpu::InterlockMode m_frameInterlockMode; + gpu::ShaderFeatures m_frameShaderFeaturesMask; RIVE_DEBUG_CODE(bool m_didBeginFrame = false;) // Clipping state. @@ -333,18 +333,18 @@ class PLSRenderContext : public PLSFactory std::vector m_indirectDrawList; std::unique_ptr m_intersectionBoard; - WriteOnlyMappedMemory m_flushUniformData; - WriteOnlyMappedMemory m_pathData; - WriteOnlyMappedMemory m_paintData; - WriteOnlyMappedMemory m_paintAuxData; - WriteOnlyMappedMemory m_contourData; + WriteOnlyMappedMemory m_flushUniformData; + WriteOnlyMappedMemory m_pathData; + WriteOnlyMappedMemory m_paintData; + WriteOnlyMappedMemory m_paintAuxData; + WriteOnlyMappedMemory m_contourData; // Simple gradients get written by the CPU. - WriteOnlyMappedMemory m_simpleColorRampsData; + WriteOnlyMappedMemory m_simpleColorRampsData; // Complex gradients get rendered by the GPU. - WriteOnlyMappedMemory m_gradSpanData; - WriteOnlyMappedMemory m_tessSpanData; - WriteOnlyMappedMemory m_triangleVertexData; - WriteOnlyMappedMemory m_imageDrawUniformData; + WriteOnlyMappedMemory m_gradSpanData; + WriteOnlyMappedMemory m_tessSpanData; + WriteOnlyMappedMemory m_triangleVertexData; + WriteOnlyMappedMemory m_imageDrawUniformData; // Simple allocator for trivially-destructible data that needs to persist until the current // frame has completed. All memory in this allocator is dropped at the end of the every frame. @@ -383,10 +383,10 @@ class PLSRenderContext : public PLSFactory // Resets the CPU-side STL containers so they don't have unbounded growth. void resetContainers(); - // Access this flush's pls::FlushDescriptor (which is not valid until layoutResources()). + // Access this flush's gpu::FlushDescriptor (which is not valid until layoutResources()). // NOTE: Some fields in the FlushDescriptor (tessVertexSpanCount, hasTriangleVertices, // drawList, and combinedShaderFeatures) do not become valid until after writeResources(). - const pls::FlushDescriptor& desc() + const gpu::FlushDescriptor& desc() { assert(m_hasDoneLayout); return m_flushDesc; @@ -479,7 +479,7 @@ class PLSRenderContext : public PLSFactory // issue a logical flush and try again. [[nodiscard]] bool allocateGradient(const PLSGradient*, ResourceCounters*, - pls::ColorRampLocation*); + gpu::ColorRampLocation*); // Carves out space for this specific flush within the total frame's resource buffers and // lays out the flush-specific resource textures. Updates the total frame running conters @@ -497,14 +497,14 @@ class PLSRenderContext : public PLSFactory // Pushes a record to the GPU for the given path, which will be referenced by future calls // to pushContour() and pushCubic(). - void pushPath(PLSPathDraw*, pls::PatchType, uint32_t tessVertexCount); + void pushPath(RiveRenderPathDraw*, gpu::PatchType, uint32_t tessVertexCount); // Pushes a contour record to the GPU for the given contour, which references the // most-recently pushed path and will be referenced by future calls to pushCubic(). // // The first curve of the contour will be pre-padded with 'paddingVertexCount' tessellation // vertices, colocated at T=0. The caller must use this argument to align the end of the - // contour on a boundary of the patch size. (See pls::PaddingToAlignUp().) + // contour on a boundary of the patch size. (See gpu::PaddingToAlignUp().) void pushContour(Vec2D midpoint, bool closed, uint32_t paddingVertexCount); // Appends a cubic curve and join to the most-recently pushed contour, and reserves the @@ -571,7 +571,7 @@ class PLSRenderContext : public PLSFactory uint32_t contourIDWithFlags); // Functionally equivalent to "pushMirroredTessellationSpans(); pushTessellationSpans();", - // but packs each forward and mirrored pair into a single pls::TessVertexSpan. + // but packs each forward and mirrored pair into a single gpu::TessVertexSpan. RIVE_ALWAYS_INLINE void pushMirroredAndForwardTessellationSpans( const Vec2D pts[4], Vec2D joinTangent, @@ -583,10 +583,13 @@ class PLSRenderContext : public PLSFactory // Either appends a new drawBatch to m_drawList or merges into m_drawList.tail(). // Updates the batch's ShaderFeatures according to the passed parameters. - DrawBatch& pushPathDraw(PLSPathDraw*, DrawType, uint32_t vertexCount, uint32_t baseVertex); + DrawBatch& pushPathDraw(RiveRenderPathDraw*, + DrawType, + uint32_t vertexCount, + uint32_t baseVertex); DrawBatch& pushDraw(PLSDraw*, DrawType, - pls::PaintType, + gpu::PaintType, uint32_t elementCount, uint32_t baseElement); @@ -599,7 +602,7 @@ class PLSRenderContext : public PLSFactory // Simple gradients have one stop at t=0 and one stop at t=1. They're implemented with 2 // texels. std::unordered_map m_simpleGradients; // [color0, color1] -> texelsIdx. - std::vector m_pendingSimpleGradientWrites; + std::vector m_pendingSimpleGradientWrites; // Complex gradients have stop(s) between t=0 and t=1. In theory they should be scaled to a // ramp where every stop lands exactly on a pixel center, but for now we just always scale @@ -610,7 +613,7 @@ class PLSRenderContext : public PLSFactory std::vector m_clips; - // High-level draw list. These get built into a low-level list of pls::DrawBatch objects + // High-level draw list. These get built into a low-level list of gpu::DrawBatch objects // during writeResources(). std::vector m_plsDraws; IAABB m_combinedDrawBounds; @@ -626,15 +629,15 @@ class PLSRenderContext : public PLSFactory uint32_t m_outerCubicTessVertexIdx; uint32_t m_midpointFanTessVertexIdx; - pls::FlushDescriptor m_flushDesc; - pls::GradTextureLayout m_gradTextureLayout; // Not determined until writeResources(). + gpu::FlushDescriptor m_flushDesc; + gpu::GradTextureLayout m_gradTextureLayout; // Not determined until writeResources(). BlockAllocatedLinkedList m_drawList; - pls::ShaderFeatures m_combinedShaderFeatures; + gpu::ShaderFeatures m_combinedShaderFeatures; // Most recent path and contour state. bool m_currentPathIsStroked; - pls::ContourDirections m_currentPathContourDirections; + gpu::ContourDirections m_currentPathContourDirections; uint32_t m_currentPathID; uint32_t m_currentContourID; uint32_t m_currentContourPaddingVertexCount; // Padding to add to the first curve. @@ -653,4 +656,4 @@ class PLSRenderContext : public PLSFactory std::vector> m_logicalFlushes; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_render_context_helper_impl.hpp b/renderer/include/rive/renderer/render_context_helper_impl.hpp similarity index 89% rename from pls/include/rive/pls/pls_render_context_helper_impl.hpp rename to renderer/include/rive/renderer/render_context_helper_impl.hpp index be7bf229..481376cf 100644 --- a/pls/include/rive/pls/pls_render_context_helper_impl.hpp +++ b/renderer/include/rive/renderer/render_context_helper_impl.hpp @@ -4,11 +4,11 @@ #pragma once -#include "rive/pls/pls_render_context_impl.hpp" -#include "rive/pls/buffer_ring.hpp" +#include "rive/renderer/render_context_impl.hpp" +#include "rive/renderer/buffer_ring.hpp" #include -namespace rive::pls +namespace rive::gpu { // PLSRenderContextImpl that uses BufferRing to manage GPU resources. class PLSRenderContextHelperImpl : public PLSRenderContextImpl @@ -18,10 +18,10 @@ class PLSRenderContextHelperImpl : public PLSRenderContextImpl void resizeFlushUniformBuffer(size_t sizeInBytes) override; void resizeImageDrawUniformBuffer(size_t sizeInBytes) override; - void resizePathBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override; - void resizePaintBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override; - void resizePaintAuxBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override; - void resizeContourBuffer(size_t sizeInBytes, pls::StorageBufferStructure) override; + void resizePathBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override; + void resizePaintBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override; + void resizePaintAuxBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override; + void resizeContourBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) override; void resizeSimpleColorRampsBuffer(size_t sizeInBytes) override; void resizeGradSpanBuffer(size_t sizeInBytes) override; void resizeTessVertexSpanBuffer(size_t sizeInBytes) override; @@ -74,7 +74,7 @@ class PLSRenderContextHelperImpl : public PLSRenderContextImpl virtual std::unique_ptr makeUniformBufferRing(size_t capacityInBytes) = 0; virtual std::unique_ptr makeStorageBufferRing(size_t capacityInBytes, - pls::StorageBufferStructure) = 0; + gpu::StorageBufferStructure) = 0; virtual std::unique_ptr makeVertexBufferRing(size_t capacityInBytes) = 0; virtual std::unique_ptr makeTextureTransferBufferRing(size_t capacityInBytes) = 0; @@ -91,4 +91,4 @@ class PLSRenderContextHelperImpl : public PLSRenderContextImpl std::unique_ptr m_triangleBuffer; std::chrono::steady_clock::time_point m_localEpoch = std::chrono::steady_clock::now(); }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_render_context_impl.hpp b/renderer/include/rive/renderer/render_context_impl.hpp similarity index 91% rename from pls/include/rive/pls/pls_render_context_impl.hpp rename to renderer/include/rive/renderer/render_context_impl.hpp index 27615304..6c0ae4b8 100644 --- a/pls/include/rive/pls/pls_render_context_impl.hpp +++ b/renderer/include/rive/renderer/render_context_impl.hpp @@ -4,9 +4,9 @@ #pragma once -#include "rive/pls/pls_render_context.hpp" +#include "rive/renderer/render_context.hpp" -namespace rive::pls +namespace rive::gpu { class PLSTexture; @@ -33,10 +33,10 @@ class PLSRenderContextImpl // buffer as a storage buffer. virtual void resizeFlushUniformBuffer(size_t sizeInBytes) = 0; virtual void resizeImageDrawUniformBuffer(size_t sizeInBytes) = 0; - virtual void resizePathBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0; - virtual void resizePaintBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0; - virtual void resizePaintAuxBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0; - virtual void resizeContourBuffer(size_t sizeInBytes, pls::StorageBufferStructure) = 0; + virtual void resizePathBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0; + virtual void resizePaintBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0; + virtual void resizePaintAuxBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0; + virtual void resizeContourBuffer(size_t sizeInBytes, gpu::StorageBufferStructure) = 0; virtual void resizeSimpleColorRampsBuffer(size_t sizeInBytes) = 0; virtual void resizeGradSpanBuffer(size_t sizeInBytes) = 0; virtual void resizeTessVertexSpanBuffer(size_t sizeInBytes) = 0; @@ -87,7 +87,7 @@ class PLSRenderContextImpl // 3. Execute the draw list. (The Rive renderer shaders read the gradient and tessellation // textures in order to do path rendering.) // - virtual void flush(const pls::FlushDescriptor&) = 0; + virtual void flush(const gpu::FlushDescriptor&) = 0; // Steady clock, used to determine when we should trim our resource allocations. virtual double secondsNow() const = 0; @@ -95,4 +95,4 @@ class PLSRenderContextImpl protected: PlatformFeatures m_platformFeatures; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_render_target.hpp b/renderer/include/rive/renderer/render_target.hpp similarity index 94% rename from pls/include/rive/pls/pls_render_target.hpp rename to renderer/include/rive/renderer/render_target.hpp index 2f3da2e4..9df4b29d 100644 --- a/pls/include/rive/pls/pls_render_target.hpp +++ b/renderer/include/rive/renderer/render_target.hpp @@ -9,7 +9,7 @@ #include "rive/math/aabb.hpp" #include "rive/math/simd.hpp" -namespace rive::pls +namespace rive::gpu { // Wraps a backend-specific buffer that PLSRenderContext draws into. class PLSRenderTarget : public RefCnt @@ -32,4 +32,4 @@ class PLSRenderTarget : public RefCnt uint32_t m_width; uint32_t m_height; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_factory.hpp b/renderer/include/rive/renderer/rive_render_factory.hpp similarity index 93% rename from pls/include/rive/pls/pls_factory.hpp rename to renderer/include/rive/renderer/rive_render_factory.hpp index 781f0be9..b6b176dd 100644 --- a/pls/include/rive/pls/pls_factory.hpp +++ b/renderer/include/rive/renderer/rive_render_factory.hpp @@ -6,10 +6,10 @@ #include "rive/factory.hpp" -namespace rive::pls +namespace rive::gpu { // Partial rive::Factory implementation for the PLS objects that are backend-agnostic. -class PLSFactory : public Factory +class RiveRenderFactory : public Factory { public: rcp makeLinearGradient(float sx, @@ -33,4 +33,4 @@ class PLSFactory : public Factory rcp makeRenderPaint() override; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/pls_renderer.hpp b/renderer/include/rive/renderer/rive_renderer.hpp similarity index 75% rename from pls/include/rive/pls/pls_renderer.hpp rename to renderer/include/rive/renderer/rive_renderer.hpp index fde264e1..6c26f91c 100644 --- a/pls/include/rive/pls/pls_renderer.hpp +++ b/renderer/include/rive/renderer/rive_renderer.hpp @@ -6,9 +6,9 @@ #include "rive/math/raw_path.hpp" #include "rive/renderer.hpp" -#include "rive/pls/pls.hpp" -#include "rive/pls/pls_draw.hpp" -#include "rive/pls/pls_render_context.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/draw.hpp" +#include "rive/renderer/render_context.hpp" #include namespace rive @@ -16,18 +16,18 @@ namespace rive class GrInnerFanTriangulator; }; -namespace rive::pls +namespace rive::gpu { -class PLSPath; -class PLSPaint; +class RiveRenderPath; +class RiveRenderPaint; class PLSRenderContext; // Renderer implementation for Rive's pixel local storage renderer. -class PLSRenderer : public Renderer +class RiveRenderer : public Renderer { public: - PLSRenderer(PLSRenderContext*); - ~PLSRenderer() override; + RiveRenderer(PLSRenderContext*); + ~RiveRenderer() override; void save() override; void restore() override; @@ -54,8 +54,8 @@ class PLSRenderer : public Renderer #endif private: - void clipRectImpl(AABB, const PLSPath* originalPath); - void clipPathImpl(const PLSPath*); + void clipRectImpl(AABB, const RiveRenderPath* originalPath); + void clipPathImpl(const RiveRenderPath*); // Clips and pushes the given draw to m_context. If the clipped draw is too complex to be // supported by the GPU buffers, even after a logical flush, then nothing is drawn. @@ -73,7 +73,7 @@ class PLSRenderer : public Renderer size_t clipStackHeight = 0; AABB clipRect; Mat2D clipRectMatrix; - const pls::ClipRectInverseMatrix* clipRectInverseMatrix = nullptr; + const gpu::ClipRectInverseMatrix* clipRectInverseMatrix = nullptr; bool clipIsEmpty = false; }; std::vector m_stack{1}; @@ -81,17 +81,18 @@ class PLSRenderer : public Renderer struct ClipElement { ClipElement() = default; - ClipElement(const Mat2D&, const PLSPath*, FillRule); + ClipElement(const Mat2D&, const RiveRenderPath*, FillRule); ~ClipElement(); - void reset(const Mat2D&, const PLSPath*, FillRule); - bool isEquivalent(const Mat2D&, const PLSPath*) const; + void reset(const Mat2D&, const RiveRenderPath*, FillRule); + bool isEquivalent(const Mat2D&, const RiveRenderPath*) const; Mat2D matrix; uint64_t rawPathMutationID; AABB pathBounds; - rcp path; - FillRule fillRule; // Bc PLSPath fillRule can mutate during the artboard draw process. + rcp path; + FillRule + fillRule; // Bc RiveRenderPath fillRule can mutate during the artboard draw process. uint32_t clipID; }; std::vector m_clipStack; @@ -101,9 +102,9 @@ class PLSRenderer : public Renderer std::vector m_internalDrawBatch; // Path of the rectangle [0, 0, 1, 1]. Used to draw images. - rcp m_unitRectPath; + rcp m_unitRectPath; // Used to build coarse path interiors for the "interior triangulation" algorithm. RawPath m_scratchPath; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/trivial_block_allocator.hpp b/renderer/include/rive/renderer/trivial_block_allocator.hpp similarity index 100% rename from pls/include/rive/pls/trivial_block_allocator.hpp rename to renderer/include/rive/renderer/trivial_block_allocator.hpp diff --git a/pls/include/rive/pls/vulkan/pls_render_context_vulkan_impl.hpp b/renderer/include/rive/renderer/vulkan/render_context_vulkan_impl.hpp similarity index 96% rename from pls/include/rive/pls/vulkan/pls_render_context_vulkan_impl.hpp rename to renderer/include/rive/renderer/vulkan/render_context_vulkan_impl.hpp index bdfb3875..706243c3 100644 --- a/pls/include/rive/pls/vulkan/pls_render_context_vulkan_impl.hpp +++ b/renderer/include/rive/renderer/vulkan/render_context_vulkan_impl.hpp @@ -4,14 +4,14 @@ #pragma once -#include "rive/pls/pls_render_context_impl.hpp" -#include "rive/pls/vulkan/vulkan_context.hpp" +#include "rive/renderer/render_context_impl.hpp" +#include "rive/renderer/vulkan/vulkan_context.hpp" #include #include #include #include -namespace rive::pls +namespace rive::gpu { class PLSTextureVulkanImpl; @@ -70,10 +70,10 @@ class PLSRenderTargetVulkan : public PLSRenderTarget rcp m_offscreenColorTexture; // Used when m_targetTextureView does not have // VK_ACCESS_INPUT_ATTACHMENT_READ_BIT - rcp m_coverageTexture; // pls::InterlockMode::rasterOrdering. + rcp m_coverageTexture; // gpu::InterlockMode::rasterOrdering. rcp m_clipTexture; rcp m_scratchColorTexture; - rcp m_coverageAtomicTexture; // pls::InterlockMode::atomics. + rcp m_coverageAtomicTexture; // gpu::InterlockMode::atomics. rcp m_offscreenColorTextureView; rcp m_coverageTextureView; @@ -128,7 +128,7 @@ class PLSRenderContextVulkanImpl : public PLSRenderContextImpl void unmap##Name() override { m_name.flushMappedContentsAt(m_bufferRingIdx); } #define IMPLEMENT_PLS_STRUCTURED_BUFFER(Name, m_name) \ - void resize##Name(size_t sizeInBytes, pls::StorageBufferStructure) override \ + void resize##Name(size_t sizeInBytes, gpu::StorageBufferStructure) override \ { \ m_name.setTargetSize(sizeInBytes); \ } \ @@ -240,11 +240,11 @@ class PLSRenderContextVulkanImpl : public PLSRenderContextImpl rcp m_imageRectVertexBuffer; rcp m_imageRectIndexBuffer; - rcp m_frameCompletionFences[pls::kBufferRingSize]; + rcp m_frameCompletionFences[gpu::kBufferRingSize]; int m_bufferRingIdx = -1; // Pool of DescriptorSetPools that have been fully released. These can be // recycled once their expirationFrameIdx is reached. std::deque> m_descriptorSetPoolPool; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/vulkan/vkutil.hpp b/renderer/include/rive/renderer/vulkan/vkutil.hpp similarity index 93% rename from pls/include/rive/pls/vulkan/vkutil.hpp rename to renderer/include/rive/renderer/vulkan/vkutil.hpp index 9b0ef148..ab2cf7ae 100644 --- a/pls/include/rive/pls/vulkan/vkutil.hpp +++ b/renderer/include/rive/renderer/vulkan/vkutil.hpp @@ -5,19 +5,19 @@ #pragma once #include "rive/refcnt.hpp" -#include "rive/pls/pls.hpp" +#include "rive/renderer/gpu.hpp" #include #include #include #include #include -namespace rive::pls +namespace rive::gpu { class VulkanContext; -} // namespace rive::pls +} // namespace rive::gpu -namespace rive::pls::vkutil +namespace rive::gpu::vkutil { inline static void vk_check(VkResult res, const char* file, int line) { @@ -28,7 +28,7 @@ inline static void vk_check(VkResult res, const char* file, int line) } } -#define VK_CHECK(x) ::rive::pls::vkutil::vk_check(x, __FILE__, __LINE__) +#define VK_CHECK(x) ::rive::gpu::vkutil::vk_check(x, __FILE__, __LINE__) constexpr static VkColorComponentFlags kColorWriteMaskRGBA = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | @@ -69,7 +69,7 @@ class RenderingResource : public RefCnt template struct ZombieResource { ZombieResource(T* resource_, uint64_t lastFrameUsed) : - resource(resource_), expirationFrameIdx(lastFrameUsed + pls::kBufferRingSize) + resource(resource_), expirationFrameIdx(lastFrameUsed + gpu::kBufferRingSize) { assert(resource_->debugging_refcnt() == 0); } @@ -102,7 +102,7 @@ class Buffer : public RenderingResource void flushMappedContents(size_t updatedSizeInBytes); private: - friend class ::rive::pls::VulkanContext; + friend class ::rive::gpu::VulkanContext; Buffer(rcp, const VkBufferCreateInfo&, Mappability); @@ -156,7 +156,7 @@ class BufferRing private: size_t m_targetSize; size_t m_pendingFlushSize = 0; - rcp m_buffers[pls::kBufferRingSize]; + rcp m_buffers[gpu::kBufferRingSize]; }; class Texture : public RenderingResource @@ -169,7 +169,7 @@ class Texture : public RenderingResource const VkImage* vkImageAddressOf() const { return &m_vkImage; } private: - friend class ::rive::pls::VulkanContext; + friend class ::rive::gpu::VulkanContext; Texture(rcp, const VkImageCreateInfo&); @@ -190,7 +190,7 @@ class TextureView : public RenderingResource const VkImageView* vkImageViewAddressOf() const { return &m_vkImageView; } private: - friend class ::rive::pls::VulkanContext; + friend class ::rive::gpu::VulkanContext; TextureView(rcp, rcp textureRef, @@ -212,7 +212,7 @@ class Framebuffer : public RenderingResource operator VkFramebuffer() const { return m_vkFramebuffer; } private: - friend class ::rive::pls::VulkanContext; + friend class ::rive::gpu::VulkanContext; Framebuffer(rcp, const VkFramebufferCreateInfo&); @@ -277,4 +277,4 @@ inline VkClearColorValue color_clear_r32ui(uint32_t value) ret.uint32[0] = value; return ret; } -} // namespace rive::pls::vkutil +} // namespace rive::gpu::vkutil diff --git a/pls/include/rive/pls/vulkan/vulkan_context.hpp b/renderer/include/rive/renderer/vulkan/vulkan_context.hpp similarity index 98% rename from pls/include/rive/pls/vulkan/vulkan_context.hpp rename to renderer/include/rive/renderer/vulkan/vulkan_context.hpp index 608f728d..d67c7921 100644 --- a/pls/include/rive/pls/vulkan/vulkan_context.hpp +++ b/renderer/include/rive/renderer/vulkan/vulkan_context.hpp @@ -4,10 +4,10 @@ #pragma once -#include "rive/pls/vulkan/vkutil.hpp" +#include "rive/renderer/vulkan/vkutil.hpp" #include -namespace rive::pls +namespace rive::gpu { // Specifies the Vulkan API version and which relevant features have been enabled. // The client should ensure the features get enabled if they are supported. @@ -102,7 +102,7 @@ class VulkanContext : public RefCnt // Called at the beginning of a new frame. This is where we purge // m_resourcePurgatory, so the client is responsible to guarantee that all - // command buffers from frame "N + 1 - pls::kBufferRingSize" have finished + // command buffers from frame "N + 1 - gpu::kBufferRingSize" have finished // executing before calling this method. void onNewFrameBegun(); @@ -168,4 +168,4 @@ class VulkanContext : public RefCnt uint64_t m_currentFrameIdx = 0; bool m_shutdown = false; // Indicates that we are in a shutdown cycle. }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/include/rive/pls/webgpu/em_js_handle.hpp b/renderer/include/rive/renderer/webgpu/em_js_handle.hpp similarity index 100% rename from pls/include/rive/pls/webgpu/em_js_handle.hpp rename to renderer/include/rive/renderer/webgpu/em_js_handle.hpp diff --git a/pls/include/rive/pls/webgpu/pls_render_context_webgpu_impl.hpp b/renderer/include/rive/renderer/webgpu/render_context_webgpu_impl.hpp similarity index 94% rename from pls/include/rive/pls/webgpu/pls_render_context_webgpu_impl.hpp rename to renderer/include/rive/renderer/webgpu/render_context_webgpu_impl.hpp index 9b053eac..616d7bfe 100644 --- a/pls/include/rive/pls/webgpu/pls_render_context_webgpu_impl.hpp +++ b/renderer/include/rive/renderer/webgpu/render_context_webgpu_impl.hpp @@ -4,13 +4,13 @@ #pragma once -#include "rive/pls/pls_render_context_helper_impl.hpp" -#include "rive/pls/webgpu/em_js_handle.hpp" -#include "rive/pls/gl/load_store_actions_ext.hpp" +#include "rive/renderer/render_context_helper_impl.hpp" +#include "rive/renderer/webgpu/em_js_handle.hpp" +#include "rive/renderer/gl/load_store_actions_ext.hpp" #include #include -namespace rive::pls +namespace rive::gpu { class PLSRenderContextWebGPUVulkan; @@ -70,7 +70,7 @@ class PLSRenderContextWebGPUImpl : public PLSRenderContextHelperImpl wgpu::Device, wgpu::Queue, const ContextOptions&, - const pls::PlatformFeatures& baselinePlatformFeatures = {}); + const gpu::PlatformFeatures& baselinePlatformFeatures = {}); virtual ~PLSRenderContextWebGPUImpl(); @@ -89,7 +89,7 @@ class PLSRenderContextWebGPUImpl : public PLSRenderContextHelperImpl PLSRenderContextWebGPUImpl(wgpu::Device device, wgpu::Queue queue, const ContextOptions&, - const pls::PlatformFeatures& baselinePlatformFeatures); + const gpu::PlatformFeatures& baselinePlatformFeatures); // Create the BindGroupLayout that binds the PLS attachments as textures. This is not necessary // on all implementations. @@ -100,7 +100,7 @@ class PLSRenderContextWebGPUImpl : public PLSRenderContextHelperImpl } // Create a standard PLS "draw" pipeline for the current implementation. - virtual wgpu::RenderPipeline makePLSDrawPipeline(rive::pls::DrawType drawType, + virtual wgpu::RenderPipeline makePLSDrawPipeline(rive::gpu::DrawType drawType, wgpu::TextureFormat framebufferFormat, wgpu::ShaderModule vertexShader, wgpu::ShaderModule fragmentShader, @@ -126,7 +126,7 @@ class PLSRenderContextWebGPUImpl : public PLSRenderContextHelperImpl std::unique_ptr makeUniformBufferRing(size_t capacityInBytes) override; std::unique_ptr makeStorageBufferRing(size_t capacityInBytes, - pls::StorageBufferStructure) override; + gpu::StorageBufferStructure) override; std::unique_ptr makeVertexBufferRing(size_t capacityInBytes) override; std::unique_ptr makeTextureTransferBufferRing(size_t capacityInBytes) override; @@ -182,4 +182,4 @@ class PLSRenderContextWebGPUImpl : public PLSRenderContextHelperImpl wgpu::Texture m_nullImagePaintTexture; // Bound when there is not an image paint. wgpu::TextureView m_nullImagePaintTextureView; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/make_dawn.sh b/renderer/make_dawn.sh similarity index 100% rename from pls/make_dawn.sh rename to renderer/make_dawn.sh diff --git a/pls/make_moltenvk.sh b/renderer/make_moltenvk.sh similarity index 100% rename from pls/make_moltenvk.sh rename to renderer/make_moltenvk.sh diff --git a/pls/make_swiftshader.sh b/renderer/make_swiftshader.sh similarity index 100% rename from pls/make_swiftshader.sh rename to renderer/make_swiftshader.sh diff --git a/pls/path_fiddle/fiddle_context.hpp b/renderer/path_fiddle/fiddle_context.hpp similarity index 87% rename from pls/path_fiddle/fiddle_context.hpp rename to renderer/path_fiddle/fiddle_context.hpp index 09a8cb93..219cb6d6 100644 --- a/pls/path_fiddle/fiddle_context.hpp +++ b/renderer/path_fiddle/fiddle_context.hpp @@ -2,7 +2,7 @@ #include -#include "rive/pls/pls_render_context.hpp" +#include "rive/renderer/render_context.hpp" struct GLFWwindow; @@ -25,12 +25,12 @@ class FiddleContext virtual ~FiddleContext() {} virtual float dpiScale(GLFWwindow*) const = 0; virtual rive::Factory* factory() = 0; - virtual rive::pls::PLSRenderContext* plsContextOrNull() = 0; - virtual rive::pls::PLSRenderTarget* plsRenderTargetOrNull() = 0; + virtual rive::gpu::PLSRenderContext* plsContextOrNull() = 0; + virtual rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() = 0; virtual void onSizeChanged(GLFWwindow*, int width, int height, uint32_t sampleCount) {} virtual void toggleZoomWindow() = 0; virtual std::unique_ptr makeRenderer(int width, int height) = 0; - virtual void begin(const rive::pls::PLSRenderContext::FrameDescriptor&) = 0; + virtual void begin(const rive::gpu::PLSRenderContext::FrameDescriptor&) = 0; virtual void flushPLSContext() = 0; // Called by end() virtual void end(GLFWwindow*, std::vector* pixelData = nullptr) = 0; virtual void tick(){}; diff --git a/pls/path_fiddle/fiddle_context_d3d.cpp b/renderer/path_fiddle/fiddle_context_d3d.cpp similarity index 94% rename from pls/path_fiddle/fiddle_context_d3d.cpp rename to renderer/path_fiddle/fiddle_context_d3d.cpp index ff73d2b8..c81dd5ba 100644 --- a/pls/path_fiddle/fiddle_context_d3d.cpp +++ b/renderer/path_fiddle/fiddle_context_d3d.cpp @@ -6,9 +6,9 @@ std::unique_ptr FiddleContext::MakeD3DPLS(FiddleContextOptions) { #else -#include "rive/pls/pls_renderer.hpp" -#include "rive/pls/d3d/pls_render_context_d3d_impl.hpp" -#include "rive/pls/d3d/d3d11.hpp" +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/d3d/render_context_d3d_impl.hpp" +#include "rive/renderer/d3d/d3d11.hpp" #include #include @@ -18,7 +18,7 @@ std::unique_ptr FiddleContext::MakeD3DPLS(FiddleContextOptions) { #include using namespace rive; -using namespace rive::pls; +using namespace rive::gpu; class FiddleContextD3DPLS : public FiddleContext { @@ -37,9 +37,9 @@ class FiddleContextD3DPLS : public FiddleContext rive::Factory* factory() override { return m_plsContext.get(); } - rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } + rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } - rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } + rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override { @@ -69,10 +69,10 @@ class FiddleContextD3DPLS : public FiddleContext std::unique_ptr makeRenderer(int width, int height) override { - return std::make_unique(m_plsContext.get()); + return std::make_unique(m_plsContext.get()); } - void begin(const rive::pls::PLSRenderContext::FrameDescriptor& frameDescriptor) override + void begin(const rive::gpu::PLSRenderContext::FrameDescriptor& frameDescriptor) override { m_plsContext->beginFrame(frameDescriptor); } diff --git a/pls/path_fiddle/fiddle_context_dawn.cpp b/renderer/path_fiddle/fiddle_context_dawn.cpp similarity index 97% rename from pls/path_fiddle/fiddle_context_dawn.cpp rename to renderer/path_fiddle/fiddle_context_dawn.cpp index 8fe39f88..ba8d1c52 100644 --- a/pls/path_fiddle/fiddle_context_dawn.cpp +++ b/renderer/path_fiddle/fiddle_context_dawn.cpp @@ -12,15 +12,15 @@ std::unique_ptr FiddleContext::MakeDawnPLS(FiddleContextOptions o #include "dawn/native/DawnNative.h" #include "dawn/dawn_proc.h" -#include "rive/pls/pls_factory.hpp" -#include "rive/pls/pls_renderer.hpp" -#include "rive/pls/webgpu/pls_render_context_webgpu_impl.hpp" +#include "rive/renderer/rive_render_factory.hpp" +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp" #include #include using namespace rive; -using namespace rive::pls; +using namespace rive::gpu; static void print_device_error(WGPUErrorType errorType, const char* message, void*) { @@ -173,9 +173,9 @@ class FiddleContextDawnPLS : public FiddleContext Factory* factory() override { return m_plsContext.get(); } - rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } + rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } - rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } + rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override { @@ -214,7 +214,7 @@ class FiddleContextDawnPLS : public FiddleContext std::unique_ptr makeRenderer(int width, int height) override { - return std::make_unique(m_plsContext.get()); + return std::make_unique(m_plsContext.get()); } void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override diff --git a/pls/path_fiddle/fiddle_context_dawn_helper.mm b/renderer/path_fiddle/fiddle_context_dawn_helper.mm similarity index 100% rename from pls/path_fiddle/fiddle_context_dawn_helper.mm rename to renderer/path_fiddle/fiddle_context_dawn_helper.mm diff --git a/pls/path_fiddle/fiddle_context_gl.cpp b/renderer/path_fiddle/fiddle_context_gl.cpp similarity index 95% rename from pls/path_fiddle/fiddle_context_gl.cpp rename to renderer/path_fiddle/fiddle_context_gl.cpp index ea29bdb5..49b2c2e2 100644 --- a/pls/path_fiddle/fiddle_context_gl.cpp +++ b/renderer/path_fiddle/fiddle_context_gl.cpp @@ -7,10 +7,10 @@ std::unique_ptr FiddleContext::MakeGLPLS() { return nullptr; } #else #include "path_fiddle.hpp" -#include "rive/pls/gl/gles3.hpp" -#include "rive/pls/pls_renderer.hpp" -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" +#include "rive/renderer/gl/gles3.hpp" +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" #ifdef RIVE_WEBGL #include @@ -21,7 +21,7 @@ std::unique_ptr FiddleContext::MakeGLPLS() { return nullptr; } #include "GLFW/glfw3.h" using namespace rive; -using namespace rive::pls; +using namespace rive::gpu; #ifdef RIVE_DESKTOP_GL #ifdef DEBUG @@ -211,9 +211,9 @@ class FiddleContextGLPLS : public FiddleContextGL rive::Factory* factory() override { return m_plsContext.get(); } - rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } + rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } - rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } + rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override { @@ -222,7 +222,7 @@ class FiddleContextGLPLS : public FiddleContextGL std::unique_ptr makeRenderer(int width, int height) override { - return std::make_unique(m_plsContext.get()); + return std::make_unique(m_plsContext.get()); } void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override @@ -288,9 +288,9 @@ class FiddleContextGLSkia : public FiddleContextGL rive::Factory* factory() override { return &m_factory; } - rive::pls::PLSRenderContext* plsContextOrNull() override { return nullptr; } + rive::gpu::PLSRenderContext* plsContextOrNull() override { return nullptr; } - rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return nullptr; } + rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return nullptr; } std::unique_ptr makeRenderer(int width, int height) override { diff --git a/pls/path_fiddle/fiddle_context_metal.mm b/renderer/path_fiddle/fiddle_context_metal.mm similarity index 93% rename from pls/path_fiddle/fiddle_context_metal.mm rename to renderer/path_fiddle/fiddle_context_metal.mm index c837ed59..f0da876e 100644 --- a/pls/path_fiddle/fiddle_context_metal.mm +++ b/renderer/path_fiddle/fiddle_context_metal.mm @@ -1,9 +1,9 @@ #include "fiddle_context.hpp" -#include "rive/pls/pls_renderer.hpp" -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" -#include "rive/pls/metal/pls_render_context_metal_impl.h" +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" +#include "rive/renderer/metal/render_context_metal_impl.h" #import #import @@ -13,7 +13,7 @@ #include "GLFW/glfw3native.h" using namespace rive; -using namespace rive::pls; +using namespace rive::gpu; class FiddleContextMetalPLS : public FiddleContext { @@ -45,9 +45,9 @@ float dpiScale(GLFWwindow* window) const override Factory* factory() override { return m_plsContext.get(); } - rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } + rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } - rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } + rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override { @@ -73,7 +73,7 @@ void toggleZoomWindow() override {} std::unique_ptr makeRenderer(int width, int height) override { - return std::make_unique(m_plsContext.get()); + return std::make_unique(m_plsContext.get()); } void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override diff --git a/pls/path_fiddle/fiddle_context_vulkan.cpp b/renderer/path_fiddle/fiddle_context_vulkan.cpp similarity index 97% rename from pls/path_fiddle/fiddle_context_vulkan.cpp rename to renderer/path_fiddle/fiddle_context_vulkan.cpp index 1f7d7fec..32361ed6 100644 --- a/pls/path_fiddle/fiddle_context_vulkan.cpp +++ b/renderer/path_fiddle/fiddle_context_vulkan.cpp @@ -15,8 +15,8 @@ std::unique_ptr FiddleContext::MakeVulkanPLS(FiddleContextOptions #include "rive_vk_bootstrap/rive_vk_bootstrap.hpp" #include "rive_vk_bootstrap/vulkan_fence_pool.hpp" -#include "rive/pls/pls_renderer.hpp" -#include "rive/pls/vulkan/pls_render_context_vulkan_impl.hpp" +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/vulkan/render_context_vulkan_impl.hpp" #include #include #include @@ -24,11 +24,11 @@ std::unique_ptr FiddleContext::MakeVulkanPLS(FiddleContextOptions #include using namespace rive; -using namespace rive::pls; +using namespace rive::gpu; // +1 because PLS doesn't wait for the previous fence until partway through flush. // (After we need to acquire a new image from the swapchain.) -static constexpr int kResourcePoolSize = pls::kBufferRingSize + 1; +static constexpr int kResourcePoolSize = gpu::kBufferRingSize + 1; class FiddleContextVulkanPLS : public FiddleContext { @@ -144,9 +144,9 @@ class FiddleContextVulkanPLS : public FiddleContext Factory* factory() override { return m_plsContext.get(); } - rive::pls::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } + rive::gpu::PLSRenderContext* plsContextOrNull() override { return m_plsContext.get(); } - rive::pls::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } + rive::gpu::PLSRenderTarget* plsRenderTargetOrNull() override { return m_renderTarget.get(); } void onSizeChanged(GLFWwindow* window, int width, int height, uint32_t sampleCount) override { @@ -232,7 +232,7 @@ class FiddleContextVulkanPLS : public FiddleContext std::unique_ptr makeRenderer(int width, int height) override { - return std::make_unique(m_plsContext.get()); + return std::make_unique(m_plsContext.get()); } void begin(const PLSRenderContext::FrameDescriptor& frameDescriptor) override diff --git a/pls/path_fiddle/index.html b/renderer/path_fiddle/index.html similarity index 100% rename from pls/path_fiddle/index.html rename to renderer/path_fiddle/index.html diff --git a/pls/path_fiddle/path_fiddle.cpp b/renderer/path_fiddle/path_fiddle.cpp similarity index 99% rename from pls/path_fiddle/path_fiddle.cpp rename to renderer/path_fiddle/path_fiddle.cpp index 72089cbd..166610bd 100644 --- a/pls/path_fiddle/path_fiddle.cpp +++ b/renderer/path_fiddle/path_fiddle.cpp @@ -400,13 +400,13 @@ int main(int argc, const char** argv) } else if (!strcmp(argv[i], "--sw") || !strcmp(argv[i], "--swiftshader")) { - // Use the swiftshader built by packages/runtime/pls/make_swiftshader.sh + // Use the swiftshader built by packages/runtime/renderer/make_swiftshader.sh set_environment_variable("VK_ICD_FILENAMES", kSwiftShaderICD); api = API::vulkan; } else if (!strcmp(argv[i], "--swatomic") || !strcmp(argv[i], "--swiftshaderatomic")) { - // Use the swiftshader built by packages/runtime/pls/make_swiftshader.sh + // Use the swiftshader built by packages/runtime/renderer/make_swiftshader.sh set_environment_variable("VK_ICD_FILENAMES", kSwiftShaderICD); api = API::vulkan; s_forceAtomicMode = true; diff --git a/pls/path_fiddle/path_fiddle.hpp b/renderer/path_fiddle/path_fiddle.hpp similarity index 100% rename from pls/path_fiddle/path_fiddle.hpp rename to renderer/path_fiddle/path_fiddle.hpp diff --git a/pls/premake5.lua b/renderer/premake5.lua similarity index 100% rename from pls/premake5.lua rename to renderer/premake5.lua diff --git a/pls/premake5_pls_renderer.lua b/renderer/premake5_pls_renderer.lua similarity index 86% rename from pls/premake5_pls_renderer.lua rename to renderer/premake5_pls_renderer.lua index 0980e1fb..bce45492 100644 --- a/pls/premake5_pls_renderer.lua +++ b/renderer/premake5_pls_renderer.lua @@ -81,7 +81,7 @@ filter({}) local pls_generated_headers = RIVE_BUILD_OUT .. '/include' local pls_shaders_absolute_dir = path.getabsolute(pls_generated_headers .. '/generated/shaders') local makecommand = 'make -C ' - .. path.getabsolute('renderer/shaders') + .. path.getabsolute('src/shaders') .. ' OUT=' .. pls_shaders_absolute_dir @@ -154,20 +154,20 @@ do includedirs({ 'include', 'glad', - 'renderer', + 'src', '../include', pls_generated_headers, }) flags({ 'FatalWarnings' }) - files({ 'renderer/*.cpp', 'renderer/decoding/*.cpp' }) + files({ 'src/*.cpp', 'renderer/decoding/*.cpp' }) if _OPTIONS['with_vulkan'] then externalincludedirs({ vulkan_headers .. '/include', vulkan_memory_allocator .. '/include', }) - files({ 'renderer/vulkan/*.cpp' }) + files({ 'src/vulkan/*.cpp' }) end filter({ 'toolset:not msc' }) @@ -203,20 +203,20 @@ do filter({ 'system:not ios' }) do files({ - 'renderer/gl/gl_state.cpp', - 'renderer/gl/gl_utils.cpp', - 'renderer/gl/load_store_actions_ext.cpp', - 'renderer/gl/pls_render_buffer_gl_impl.cpp', - 'renderer/gl/pls_render_context_gl_impl.cpp', - 'renderer/gl/pls_render_target_gl.cpp', + 'src/gl/gl_state.cpp', + 'src/gl/gl_utils.cpp', + 'src/gl/load_store_actions_ext.cpp', + 'src/gl/render_buffer_gl_impl.cpp', + 'src/gl/render_context_gl_impl.cpp', + 'src/gl/render_target_gl.cpp', }) end filter({ 'system:windows or macosx or linux' }) do files({ - 'renderer/gl/pls_impl_webgl.cpp', -- Emulate WebGL with ANGLE. - 'renderer/gl/pls_impl_rw_texture.cpp', + 'src/gl/pls_impl_webgl.cpp', -- Emulate WebGL with ANGLE. + 'src/gl/pls_impl_rw_texture.cpp', 'glad/glad.c', 'glad/glad_custom.c', }) -- GL loader library for ANGLE. @@ -225,15 +225,15 @@ do filter('system:android') do files({ - 'renderer/gl/load_gles_extensions.cpp', - 'renderer/gl/pls_impl_ext_native.cpp', - 'renderer/gl/pls_impl_framebuffer_fetch.cpp', + 'src/gl/load_gles_extensions.cpp', + 'src/gl/pls_impl_ext_native.cpp', + 'src/gl/pls_impl_framebuffer_fetch.cpp', }) end filter({ 'system:macosx or ios', 'options:not nop-obj-c' }) do - files({ 'renderer/metal/*.mm' }) + files({ 'src/metal/*.mm' }) buildoptions({ '-fobjc-arc' }) end @@ -249,14 +249,14 @@ do filter({ 'options:with-webgpu or with-dawn' }) do files({ - 'renderer/webgpu/**.cpp', - 'renderer/gl/load_store_actions_ext.cpp', + 'src/webgpu/**.cpp', + 'src/gl/load_store_actions_ext.cpp', }) end filter({ 'options:nop-obj-c' }) do - files({ 'renderer/metal/pls_metal_nop.cpp' }) + files({ 'src/metal/metal_nop.cpp' }) end filter({ 'options:not no-rive-decoders' }) @@ -268,11 +268,11 @@ do filter('system:windows') do architecture('x64') - files({ 'renderer/d3d/*.cpp' }) + files({ 'src/d3d/*.cpp' }) end filter('system:emscripten') do - files({ 'renderer/gl/pls_impl_webgl.cpp' }) + files({ 'src/gl/pls_impl_webgl.cpp' }) end end diff --git a/pls/rive_vk_bootstrap/bootstrap_project.lua b/renderer/rive_vk_bootstrap/bootstrap_project.lua similarity index 87% rename from pls/rive_vk_bootstrap/bootstrap_project.lua rename to renderer/rive_vk_bootstrap/bootstrap_project.lua index 91f8866c..b83def46 100644 --- a/pls/rive_vk_bootstrap/bootstrap_project.lua +++ b/renderer/rive_vk_bootstrap/bootstrap_project.lua @@ -4,7 +4,7 @@ if not _OPTIONS['with_vulkan'] then end if not vulkan_headers or not vulkan_memory_allocator then - error('Please `dofile` packages/runtime/pls/premake5_pls_renderer.lua first.') + error('Please `dofile` packages/runtime/renderer/premake5_pls_renderer.lua first.') end local dependency = require('dependency') diff --git a/pls/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp similarity index 88% rename from pls/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp rename to renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp index 99ba83fe..26666f26 100644 --- a/pls/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/rive_vk_bootstrap.hpp @@ -3,7 +3,7 @@ */ #include -#include "rive/pls/vulkan/vulkan_context.hpp" +#include "rive/renderer/vulkan/vulkan_context.hpp" namespace rive_vkb { @@ -34,11 +34,11 @@ VKAPI_ATTR VkBool32 VKAPI_CALL default_debug_callback(VkDebugUtilsMessageSeverit // Select a GPU, using 'gpuNameFilter' or 'getenv("RIVE_GPU")', otherwise // preferring discrete. Abort if the filter matches more than one name. -std::tuple select_physical_device( +std::tuple select_physical_device( vkb::PhysicalDeviceSelector& selector, const char* gpuNameFilter = nullptr); -inline std::tuple select_physical_device( +inline std::tuple select_physical_device( vkb::Instance instance, const char* gpuNameFilter = nullptr) { diff --git a/pls/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp similarity index 94% rename from pls/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp rename to renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp index f428ba88..c80553d2 100644 --- a/pls/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp +++ b/renderer/rive_vk_bootstrap/include/rive_vk_bootstrap/vulkan_fence_pool.hpp @@ -4,10 +4,10 @@ #pragma once -#include "rive/pls/pls.hpp" -#include "rive/pls/vulkan/vulkan_context.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/vulkan/vulkan_context.hpp" -namespace rive::pls +namespace rive::gpu { class VulkanFence; @@ -90,4 +90,4 @@ inline rcp VulkanFencePool::makeFence() assert(fence->debugging_refcnt() == 1); return fence; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/rive_vk_bootstrap/rive_vk_bootstrap.cpp b/renderer/rive_vk_bootstrap/rive_vk_bootstrap.cpp similarity index 98% rename from pls/rive_vk_bootstrap/rive_vk_bootstrap.cpp rename to renderer/rive_vk_bootstrap/rive_vk_bootstrap.cpp index bdffbac7..8e34b269 100644 --- a/pls/rive_vk_bootstrap/rive_vk_bootstrap.cpp +++ b/renderer/rive_vk_bootstrap/rive_vk_bootstrap.cpp @@ -86,7 +86,7 @@ static const char* physical_device_type_name(VkPhysicalDeviceType type) // Select a GPU name if it contains the substring 'filter' or '$RIVE_GPU'. // Return false if 'filter' and '$RIVE_GPU' are both null. // Abort if the filter matches more than one name. -std::tuple select_physical_device( +std::tuple select_physical_device( vkb::PhysicalDeviceSelector& selector, const char* gpuNameFilter) { @@ -153,7 +153,7 @@ std::tuple select_physical_devic rive_vkb::physical_device_type_name(physicalDevice.properties.deviceType), physicalDevice.properties.deviceName); - rive::pls::VulkanFeatures plsVulkanFeatures; + rive::gpu::VulkanFeatures plsVulkanFeatures; physicalDevice.enable_features_if_present({ .independentBlend = VK_TRUE, .fillModeNonSolid = VK_TRUE, diff --git a/pls/renderer/.vscode/c_cpp_properties.json b/renderer/src/.vscode/c_cpp_properties.json similarity index 100% rename from pls/renderer/.vscode/c_cpp_properties.json rename to renderer/src/.vscode/c_cpp_properties.json diff --git a/pls/renderer/.vscode/settings.json b/renderer/src/.vscode/settings.json similarity index 100% rename from pls/renderer/.vscode/settings.json rename to renderer/src/.vscode/settings.json diff --git a/pls/renderer/d3d/pls_render_context_d3d_impl.cpp b/renderer/src/d3d/render_context_d3d_impl.cpp similarity index 94% rename from pls/renderer/d3d/pls_render_context_d3d_impl.cpp rename to renderer/src/d3d/render_context_d3d_impl.cpp index 59c96052..7f87a2ef 100644 --- a/pls/renderer/d3d/pls_render_context_d3d_impl.cpp +++ b/renderer/src/d3d/render_context_d3d_impl.cpp @@ -2,9 +2,9 @@ * Copyright 2023 Rive */ -#include "rive/pls/d3d/pls_render_context_d3d_impl.hpp" +#include "rive/renderer/d3d/render_context_d3d_impl.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" #include @@ -32,7 +32,7 @@ constexpr static UINT kImageRectVertexDataSlot = 2; constexpr static UINT kImageMeshVertexDataSlot = 3; constexpr static UINT kImageMeshUVDataSlot = 4; -namespace rive::pls +namespace rive::gpu { ComPtr make_simple_2d_texture(ID3D11Device* gpu, DXGI_FORMAT format, @@ -267,9 +267,9 @@ PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr gpu, nullptr, &m_tessellatePixelShader)); - m_tessSpanIndexBuffer = makeSimpleImmutableBuffer(sizeof(pls::kTessSpanIndices), + m_tessSpanIndexBuffer = makeSimpleImmutableBuffer(sizeof(gpu::kTessSpanIndices), D3D11_BIND_INDEX_BUFFER, - pls::kTessSpanIndices); + gpu::kTessSpanIndices); } // Set up the path patch rendering buffers. @@ -281,13 +281,13 @@ PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr gpu, m_patchIndexBuffer = makeSimpleImmutableBuffer(sizeof(patchIndices), D3D11_BIND_INDEX_BUFFER, patchIndices); - // Set up the imageRect rendering buffers. (pls::InterlockMode::atomics only.) - m_imageRectVertexBuffer = makeSimpleImmutableBuffer(sizeof(pls::kImageRectVertices), + // Set up the imageRect rendering buffers. (gpu::InterlockMode::atomics only.) + m_imageRectVertexBuffer = makeSimpleImmutableBuffer(sizeof(gpu::kImageRectVertices), D3D11_BIND_VERTEX_BUFFER, - pls::kImageRectVertices); - m_imageRectIndexBuffer = makeSimpleImmutableBuffer(sizeof(pls::kImageRectIndices), + gpu::kImageRectVertices); + m_imageRectIndexBuffer = makeSimpleImmutableBuffer(sizeof(gpu::kImageRectIndices), D3D11_BIND_INDEX_BUFFER, - pls::kImageRectIndices); + gpu::kImageRectIndices); // Create buffers for uniforms. { @@ -295,16 +295,16 @@ PLSRenderContextD3DImpl::PLSRenderContextD3DImpl(ComPtr gpu, desc.Usage = D3D11_USAGE_DEFAULT; desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER; - desc.ByteWidth = sizeof(pls::FlushUniforms); - desc.StructureByteStride = sizeof(pls::FlushUniforms); + desc.ByteWidth = sizeof(gpu::FlushUniforms); + desc.StructureByteStride = sizeof(gpu::FlushUniforms); VERIFY_OK(m_gpu->CreateBuffer(&desc, nullptr, m_flushUniforms.ReleaseAndGetAddressOf())); desc.ByteWidth = sizeof(DrawUniforms); desc.StructureByteStride = sizeof(DrawUniforms); VERIFY_OK(m_gpu->CreateBuffer(&desc, nullptr, m_drawUniforms.ReleaseAndGetAddressOf())); - desc.ByteWidth = sizeof(pls::ImageDrawUniforms); - desc.StructureByteStride = sizeof(pls::ImageDrawUniforms); + desc.ByteWidth = sizeof(gpu::ImageDrawUniforms); + desc.StructureByteStride = sizeof(gpu::ImageDrawUniforms); VERIFY_OK( m_gpu->CreateBuffer(&desc, nullptr, m_imageDrawUniforms.ReleaseAndGetAddressOf())); } @@ -677,12 +677,12 @@ std::unique_ptr PLSRenderContextD3DImpl::makeUniformBufferRing(size_ std::unique_ptr PLSRenderContextD3DImpl::makeStorageBufferRing( size_t capacityInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { return capacityInBytes != 0 ? std::make_unique( this, capacityInBytes, - pls::StorageBufferElementSizeInBytes(bufferStructure)) + gpu::StorageBufferElementSizeInBytes(bufferStructure)) : nullptr; } @@ -927,7 +927,7 @@ ID3D11ShaderResourceView* PLSRenderContextD3DImpl::replaceStructuredBufferSRV( // Shaders access our storage buffers as arrays of basic types, as opposed to structures. Our // SRV therefore needs to be indexed by the underlying basic type, not the high level structure. constexpr static UINT kUnderlyingTypeSizeInBytes = - pls::StorageBufferElementSizeInBytes(HighLevelStruct::kBufferStructure); + gpu::StorageBufferElementSizeInBytes(HighLevelStruct::kBufferStructure); static_assert(sizeof(HighLevelStruct) % kUnderlyingTypeSizeInBytes == 0); constexpr static UINT kStructIndexMultiplier = sizeof(HighLevelStruct) / kUnderlyingTypeSizeInBytes; @@ -938,14 +938,14 @@ ID3D11ShaderResourceView* PLSRenderContextD3DImpl::replaceStructuredBufferSRV( } void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, - pls::ShaderFeatures shaderFeatures, - pls::InterlockMode interlockMode, - pls::ShaderMiscFlags pixelShaderMiscFlags) + gpu::ShaderFeatures shaderFeatures, + gpu::InterlockMode interlockMode, + gpu::ShaderMiscFlags pixelShaderMiscFlags) { - uint32_t vertexShaderKey = pls::ShaderUniqueKey(drawType, + uint32_t vertexShaderKey = gpu::ShaderUniqueKey(drawType, shaderFeatures & kVertexShaderFeaturesMask, interlockMode, - pls::ShaderMiscFlags::none); + gpu::ShaderMiscFlags::none); auto vertexEntry = m_drawVertexShaders.find(vertexShaderKey); uint32_t pixelShaderKey = @@ -965,7 +965,7 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, } if (m_d3dCapabilities.supportsRasterizerOrderedViews) { - if ((interlockMode == pls::InterlockMode::rasterOrdering && + if ((interlockMode == gpu::InterlockMode::rasterOrdering && drawType != DrawType::interiorTriangulation) || drawType == DrawType::imageMesh) { @@ -980,11 +980,11 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, { s << "#define " << GLSL_ENABLE_MIN_16_PRECISION << '\n'; } - if (pixelShaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend) + if (pixelShaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend) { s << "#define " << GLSL_FIXED_FUNCTION_COLOR_BLEND << '\n'; } - if (pixelShaderMiscFlags & pls::ShaderMiscFlags::coalescedResolveAndTransfer) + if (pixelShaderMiscFlags & gpu::ShaderMiscFlags::coalescedResolveAndTransfer) { s << "#define " << GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER << '\n'; s << "#define " << GLSL_COLOR_PLANE_IDX_OVERRIDE << ' ' @@ -1000,7 +1000,7 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, s << "#define " << GLSL_DRAW_INTERIOR_TRIANGLES << '\n'; break; case DrawType::imageRect: - assert(interlockMode == pls::InterlockMode::atomics); + assert(interlockMode == gpu::InterlockMode::atomics); s << "#define " << GLSL_DRAW_IMAGE << '\n'; s << "#define " << GLSL_DRAW_IMAGE_RECT << '\n'; break; @@ -1008,12 +1008,12 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, s << "#define " << GLSL_DRAW_IMAGE << '\n'; s << "#define " << GLSL_DRAW_IMAGE_MESH << '\n'; break; - case DrawType::plsAtomicResolve: - assert(interlockMode == pls::InterlockMode::atomics); + case DrawType::gpuAtomicResolve: + assert(interlockMode == gpu::InterlockMode::atomics); s << "#define " << GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS << '\n'; s << "#define " << GLSL_RESOLVE_PLS << '\n'; break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -1028,33 +1028,33 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, { case DrawType::midpointFanPatches: case DrawType::outerCurvePatches: - s << pls::glsl::draw_path_common << '\n'; - s << (interlockMode == pls::InterlockMode::rasterOrdering ? pls::glsl::draw_path - : pls::glsl::atomic_draw) + s << gpu::glsl::draw_path_common << '\n'; + s << (interlockMode == gpu::InterlockMode::rasterOrdering ? gpu::glsl::draw_path + : gpu::glsl::atomic_draw) << '\n'; break; case DrawType::interiorTriangulation: - s << pls::glsl::draw_path_common << '\n'; - s << (interlockMode == pls::InterlockMode::rasterOrdering ? pls::glsl::draw_path - : pls::glsl::atomic_draw) + s << gpu::glsl::draw_path_common << '\n'; + s << (interlockMode == gpu::InterlockMode::rasterOrdering ? gpu::glsl::draw_path + : gpu::glsl::atomic_draw) << '\n'; break; case DrawType::imageRect: - assert(interlockMode == pls::InterlockMode::atomics); - s << pls::glsl::atomic_draw << '\n'; + assert(interlockMode == gpu::InterlockMode::atomics); + s << gpu::glsl::atomic_draw << '\n'; break; case DrawType::imageMesh: - s << (interlockMode == pls::InterlockMode::rasterOrdering - ? pls::glsl::draw_image_mesh - : pls::glsl::atomic_draw) + s << (interlockMode == gpu::InterlockMode::rasterOrdering + ? gpu::glsl::draw_image_mesh + : gpu::glsl::atomic_draw) << '\n'; break; - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: - assert(interlockMode == pls::InterlockMode::atomics); - s << pls::glsl::atomic_draw << '\n'; + assert(interlockMode == gpu::InterlockMode::atomics); + s << gpu::glsl::atomic_draw << '\n'; break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: RIVE_UNREACHABLE(); } @@ -1124,10 +1124,10 @@ void PLSRenderContextD3DImpl::setPipelineLayoutAndShaders(DrawType drawType, 0}; vertexAttribCount = 2; break; - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: vertexAttribCount = 0; break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -1216,22 +1216,22 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) // All programs use the same storage buffers. ID3D11ShaderResourceView* storageBufferBufferSRVs[] = { - desc.pathCount > 0 ? replaceStructuredBufferSRV( + desc.pathCount > 0 ? replaceStructuredBufferSRV( pathBufferRing(), desc.pathCount, math::lossless_numeric_cast(desc.firstPath)) : nullptr, - desc.pathCount > 0 ? replaceStructuredBufferSRV( + desc.pathCount > 0 ? replaceStructuredBufferSRV( paintBufferRing(), desc.pathCount, math::lossless_numeric_cast(desc.firstPaint)) : nullptr, - desc.pathCount > 0 ? replaceStructuredBufferSRV( + desc.pathCount > 0 ? replaceStructuredBufferSRV( paintAuxBufferRing(), desc.pathCount, math::lossless_numeric_cast(desc.firstPaintAux)) : nullptr, - desc.contourCount > 0 ? replaceStructuredBufferSRV( + desc.contourCount > 0 ? replaceStructuredBufferSRV( contourBufferRing(), desc.contourCount, math::lossless_numeric_cast(desc.firstContour)) @@ -1243,7 +1243,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) m_gpuContext->VSSetShaderResources(PATH_BUFFER_IDX, std::size(storageBufferBufferSRVs), storageBufferBufferSRVs); - if (desc.interlockMode == pls::InterlockMode::atomics) + if (desc.interlockMode == gpu::InterlockMode::atomics) { // Atomic mode accesses the paint buffers from the pixel shader. m_gpuContext->PSSetShaderResources(PAINT_BUFFER_IDX, 2, storageBufferBufferSRVs + 1); @@ -1334,7 +1334,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) m_gpuContext->OMSetRenderTargets(1, m_tessTextureRTV.GetAddressOf(), NULL); m_gpuContext->DrawIndexedInstanced( - std::size(pls::kTessSpanIndices), + std::size(gpu::kTessSpanIndices), desc.tessVertexSpanCount, 0, 0, @@ -1353,7 +1353,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) !(desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND); switch (desc.colorLoadAction) { - case pls::LoadAction::clear: + case gpu::LoadAction::clear: if (renderDirectToRasterPipeline) { float clearColor4f[4]; @@ -1369,11 +1369,11 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) } else { - UINT clearColorui[4] = {pls::SwizzleRiveColorToRGBA(desc.clearColor)}; + UINT clearColorui[4] = {gpu::SwizzleRiveColorToRGBA(desc.clearColor)}; m_gpuContext->ClearUnorderedAccessViewUint(renderTarget->targetUAV(), clearColorui); } break; - case pls::LoadAction::preserveRenderTarget: + case gpu::LoadAction::preserveRenderTarget: if (!renderDirectToRasterPipeline && !renderTarget->targetTextureSupportsUAV()) { // We're rendering to an offscreen UAV and preserving the target. Copy the target @@ -1384,10 +1384,10 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) desc.renderTargetUpdateBounds); } break; - case pls::LoadAction::dontCare: + case gpu::LoadAction::dontCare: break; } - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { constexpr static UINT kZero[4]{}; m_gpuContext->ClearUnorderedAccessViewUint(renderTarget->clipUAV(), kZero); @@ -1402,9 +1402,9 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) m_patchVertexBuffer.Get(), desc.hasTriangleVertices ? submitted_buffer(triangleBufferRing()) : NULL, m_imageRectVertexBuffer.Get()}; - UINT vertexStrides[3] = {sizeof(pls::PatchVertex), - sizeof(pls::TriangleVertex), - sizeof(pls::ImageRectVertex)}; + UINT vertexStrides[3] = {sizeof(gpu::PatchVertex), + sizeof(gpu::TriangleVertex), + sizeof(gpu::ImageRectVertex)}; UINT vertexOffsets[3] = {0, 0, 0}; static_assert(kPatchVertexDataSlot == 0); static_assert(kTriangleVertexDataSlot == 1); @@ -1428,7 +1428,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) ID3D11UnorderedAccessView* plsUAVs[] = { renderDirectToRasterPipeline ? NULL : renderTarget->targetUAV(), renderTarget->clipUAV(), - desc.interlockMode == pls::InterlockMode::rasterOrdering + desc.interlockMode == gpu::InterlockMode::rasterOrdering ? renderTarget->scratchColorUAV() : NULL, // Atomic mode doesn't use the scratchColor. renderTarget->coverageUAV(), @@ -1460,7 +1460,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) const char* const imageDrawUniformData = heap_buffer_contents(imageDrawUniformBufferRing()); bool renderPassHasCoalescedResolveAndTransfer = - desc.interlockMode == pls::InterlockMode::atomics && !renderDirectToRasterPipeline && + desc.interlockMode == gpu::InterlockMode::atomics && !renderDirectToRasterPipeline && !renderTarget->targetTextureSupportsUAV(); for (const DrawBatch& batch : *desc.drawList) @@ -1471,16 +1471,16 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) } DrawType drawType = batch.drawType; - auto shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics + auto shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics ? desc.combinedShaderFeatures : batch.shaderFeatures; auto pixelShaderMiscFlags = - drawType == pls::DrawType::plsAtomicResolve && renderPassHasCoalescedResolveAndTransfer - ? pls::ShaderMiscFlags::coalescedResolveAndTransfer - : pls::ShaderMiscFlags::none; + drawType == gpu::DrawType::gpuAtomicResolve && renderPassHasCoalescedResolveAndTransfer + ? gpu::ShaderMiscFlags::coalescedResolveAndTransfer + : gpu::ShaderMiscFlags::none; if (renderDirectToRasterPipeline) { - pixelShaderMiscFlags |= pls::ShaderMiscFlags::fixedFunctionColorBlend; + pixelShaderMiscFlags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend; } setPipelineLayoutAndShaders(drawType, shaderFeatures, @@ -1530,7 +1530,7 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) imageDrawUniformData + batch.imageDrawDataOffset, 0, 0); - m_gpuContext->DrawIndexed(std::size(pls::kImageRectIndices), 0, 0); + m_gpuContext->DrawIndexed(std::size(gpu::kImageRectIndices), 0, 0); break; case DrawType::imageMesh: { @@ -1560,8 +1560,8 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) m_gpuContext->DrawIndexed(batch.elementCount, batch.baseElement, 0); break; } - case DrawType::plsAtomicResolve: - assert(desc.interlockMode == pls::InterlockMode::atomics); + case DrawType::gpuAtomicResolve: + assert(desc.interlockMode == gpu::InterlockMode::atomics); m_gpuContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP); m_gpuContext->RSSetState(m_backCulledRasterState[0].Get()); if (renderPassHasCoalescedResolveAndTransfer) @@ -1595,13 +1595,13 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) } m_gpuContext->Draw(4, 0); break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } } - if (desc.interlockMode == pls::InterlockMode::rasterOrdering && + if (desc.interlockMode == gpu::InterlockMode::rasterOrdering && !renderTarget->targetTextureSupportsUAV()) { // We rendered to an offscreen UAV and did not resolve to the renderTarget. Copy back to the @@ -1614,4 +1614,4 @@ void PLSRenderContextD3DImpl::flush(const FlushDescriptor& desc) desc.renderTargetUpdateBounds); } } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_draw.cpp b/renderer/src/draw.cpp similarity index 95% rename from pls/renderer/pls_draw.cpp rename to renderer/src/draw.cpp index 8544fc97..bee84511 100644 --- a/pls/renderer/pls_draw.cpp +++ b/renderer/src/draw.cpp @@ -2,17 +2,17 @@ * Copyright 2023 Rive */ -#include "rive/pls/pls_draw.hpp" +#include "rive/renderer/draw.hpp" #include "gr_inner_fan_triangulator.hpp" #include "path_utils.hpp" -#include "pls_path.hpp" -#include "pls_paint.hpp" +#include "rive_render_path.hpp" +#include "rive_render_paint.hpp" #include "rive/math/wangs_formula.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" -namespace rive::pls +namespace rive::gpu { namespace { @@ -288,7 +288,7 @@ PLSDraw::PLSDraw(IAABB pixelBounds, { if (m_blendMode != BlendMode::srcOver) { - m_drawContents |= pls::DrawContents::advancedBlend; + m_drawContents |= gpu::DrawContents::advancedBlend; } } @@ -298,15 +298,15 @@ void PLSDraw::setClipID(uint32_t clipID) // For clipUpdates, m_clipID refers to the ID we are writing to the stencil buffer (NOT the ID // we are clipping against). It therefore doesn't affect the activeClip flag in that case. - if (!(m_drawContents & pls::DrawContents::clipUpdate)) + if (!(m_drawContents & gpu::DrawContents::clipUpdate)) { if (m_clipID != 0) { - m_drawContents |= pls::DrawContents::activeClip; + m_drawContents |= gpu::DrawContents::activeClip; } else { - m_drawContents &= ~pls::DrawContents::activeClip; + m_drawContents &= ~gpu::DrawContents::activeClip; } } } @@ -324,17 +324,17 @@ void PLSDraw::releaseRefs() safe_unref(m_gradientRef); } -PLSDrawUniquePtr PLSPathDraw::Make(PLSRenderContext* context, - const Mat2D& matrix, - rcp path, - FillRule fillRule, - const PLSPaint* paint, - RawPath* scratchPath) +PLSDrawUniquePtr RiveRenderPathDraw::Make(PLSRenderContext* context, + const Mat2D& matrix, + rcp path, + FillRule fillRule, + const RiveRenderPaint* paint, + RawPath* scratchPath) { assert(path != nullptr); assert(paint != nullptr); AABB mappedBounds; - if (context->frameInterlockMode() == pls::InterlockMode::atomics) + if (context->frameInterlockMode() == gpu::InterlockMode::atomics) { // In atomic mode, find a tighter bounding box in order to maximize reordering. mappedBounds = matrix.mapBoundingBox(path->getRawPath().points().data(), @@ -369,9 +369,9 @@ PLSDrawUniquePtr PLSPathDraw::Make(PLSRenderContext* context, // it. const AABB& localBounds = path->getBounds(); // FIXME! Implement interior triangulation in depthStencil mode. - if (context->frameInterlockMode() != pls::InterlockMode::depthStencil && + if (context->frameInterlockMode() != gpu::InterlockMode::depthStencil && path->getRawPath().verbs().count() < 1000 && - pls::FindTransformedArea(localBounds, matrix) > 512 * 512) + gpu::FindTransformedArea(localBounds, matrix) > 512 * 512) { return PLSDrawUniquePtr(context->make( context, @@ -394,13 +394,13 @@ PLSDrawUniquePtr PLSPathDraw::Make(PLSRenderContext* context, paint)); } -PLSPathDraw::PLSPathDraw(IAABB pixelBounds, - const Mat2D& matrix, - rcp path, - FillRule fillRule, - const PLSPaint* paint, - Type type, - pls::InterlockMode frameInterlockMode) : +RiveRenderPathDraw::RiveRenderPathDraw(IAABB pixelBounds, + const Mat2D& matrix, + rcp path, + FillRule fillRule, + const RiveRenderPaint* paint, + Type type, + gpu::InterlockMode frameInterlockMode) : PLSDraw(pixelBounds, matrix, paint->getBlendMode(), ref_rcp(paint->getImageTexture()), type), m_pathRef(path.release()), m_fillRule(paint->getIsStroked() ? FillRule::nonZero : fillRule), @@ -411,39 +411,39 @@ PLSPathDraw::PLSPathDraw(IAABB pixelBounds, assert(paint != nullptr); if (m_blendMode == BlendMode::srcOver && paint->getIsOpaque()) { - m_drawContents |= pls::DrawContents::opaquePaint; + m_drawContents |= gpu::DrawContents::opaquePaint; } if (paint->getIsStroked()) { - m_drawContents |= pls::DrawContents::stroke; + m_drawContents |= gpu::DrawContents::stroke; m_strokeRadius = paint->getThickness() * .5f; // Ensure stroke radius is nonzero. (In PLS, zero radius means the path is filled.) m_strokeRadius = fmaxf(m_strokeRadius, std::numeric_limits::min()); - assert(!std::isnan(m_strokeRadius)); // These should get culled in PLSRenderer::drawPath(). + assert(!std::isnan(m_strokeRadius)); // These should get culled in RiveRenderer::drawPath(). assert(m_strokeRadius > 0); } else if (m_fillRule == FillRule::evenOdd) { - m_drawContents |= pls::DrawContents::evenOddFill; + m_drawContents |= gpu::DrawContents::evenOddFill; } - if (paint->getType() == pls::PaintType::clipUpdate) + if (paint->getType() == gpu::PaintType::clipUpdate) { - m_drawContents |= pls::DrawContents::clipUpdate; + m_drawContents |= gpu::DrawContents::clipUpdate; if (paint->getSimpleValue().outerClipID != 0) { - m_drawContents |= pls::DrawContents::activeClip; + m_drawContents |= gpu::DrawContents::activeClip; } } if (isStroked()) { // Stroke triangles are always forward. - m_contourDirections = pls::ContourDirections::forward; + m_contourDirections = gpu::ContourDirections::forward; } - else if (frameInterlockMode != pls::InterlockMode::depthStencil) + else if (frameInterlockMode != gpu::InterlockMode::depthStencil) { // atomic and rasterOrdering fills need reverse AND forward triangles. - m_contourDirections = pls::ContourDirections::reverseAndForward; + m_contourDirections = gpu::ContourDirections::reverseAndForward; } else if (m_fillRule != FillRule::evenOdd) { @@ -452,13 +452,13 @@ PLSPathDraw::PLSPathDraw(IAABB pixelBounds, // counterclockwise triangles twice and clockwise only once. float matrixDeterminant = matrix[0] * matrix[3] - matrix[2] * matrix[1]; m_contourDirections = m_pathRef->getCoarseArea() * matrixDeterminant >= 0 - ? pls::ContourDirections::forward - : pls::ContourDirections::reverse; + ? gpu::ContourDirections::forward + : gpu::ContourDirections::reverse; } else { // "evenOdd" depthStencil fils just get drawn twice, so any direction is fine. - m_contourDirections = pls::ContourDirections::forward; + m_contourDirections = gpu::ContourDirections::forward; } m_simplePaintValue = paint->getSimpleValue(); @@ -468,7 +468,7 @@ PLSPathDraw::PLSPathDraw(IAABB pixelBounds, assert(isStroked() == (strokeRadius() > 0)); } -void PLSPathDraw::pushToRenderContext(PLSRenderContext::LogicalFlush* flush) +void RiveRenderPathDraw::pushToRenderContext(PLSRenderContext::LogicalFlush* flush) { // Make sure the rawPath in our path reference hasn't changed since we began holding! assert(m_rawPathMutationID == m_pathRef->getRawPathMutationID()); @@ -489,7 +489,7 @@ void PLSPathDraw::pushToRenderContext(PLSRenderContext::LogicalFlush* flush) } } -void PLSPathDraw::releaseRefs() +void RiveRenderPathDraw::releaseRefs() { PLSDraw::releaseRefs(); RIVE_DEBUG_CODE(m_pathRef->unlockRawPathMutations();) @@ -499,16 +499,16 @@ void PLSPathDraw::releaseRefs() MidpointFanPathDraw::MidpointFanPathDraw(PLSRenderContext* context, IAABB pixelBounds, const Mat2D& matrix, - rcp path, + rcp path, FillRule fillRule, - const PLSPaint* paint) : - PLSPathDraw(pixelBounds, - matrix, - std::move(path), - fillRule, - paint, - Type::midpointFanPath, - context->frameInterlockMode()) + const RiveRenderPaint* paint) : + RiveRenderPathDraw(pixelBounds, + matrix, + std::move(path), + fillRule, + paint, + Type::midpointFanPath, + context->frameInterlockMode()) { if (isStroked()) { @@ -999,11 +999,11 @@ MidpointFanPathDraw::MidpointFanPathDraw(PLSRenderContext* context, m_resourceCounts.contourCount = contourCount; // maxTessellatedSegmentCount does not get doubled when we emit both forward and mirrored // contours because the forward and mirrored pair both get packed into a single - // pls::TessVertexSpan. + // gpu::TessVertexSpan. m_resourceCounts.maxTessellatedSegmentCount = lineCount + unpaddedCurveCount + emptyStrokeCountForCaps; m_resourceCounts.midpointFanTessVertexCount = - m_contourDirections == pls::ContourDirections::reverseAndForward ? tessVertexCount * 2 + m_contourDirections == gpu::ContourDirections::reverseAndForward ? tessVertexCount * 2 : tessVertexCount; } } @@ -1342,18 +1342,18 @@ void MidpointFanPathDraw::pushEmulatedStrokeCapAsJoinBeforeCubic( InteriorTriangulationDraw::InteriorTriangulationDraw(PLSRenderContext* context, IAABB pixelBounds, const Mat2D& matrix, - rcp path, + rcp path, FillRule fillRule, - const PLSPaint* paint, + const RiveRenderPaint* paint, RawPath* scratchPath, TriangulatorAxis triangulatorAxis) : - PLSPathDraw(pixelBounds, - matrix, - std::move(path), - fillRule, - paint, - Type::interiorTriangulationPath, - context->frameInterlockMode()) + RiveRenderPathDraw(pixelBounds, + matrix, + std::move(path), + fillRule, + paint, + Type::interiorTriangulationPath, + context->frameInterlockMode()) { assert(!isStroked()); assert(m_strokeRadius == 0); @@ -1367,7 +1367,7 @@ InteriorTriangulationDraw::InteriorTriangulationDraw(PLSRenderContext* context, void InteriorTriangulationDraw::onPushToRenderContext(PLSRenderContext::LogicalFlush* flush) { processPath(PathOp::submitOuterCubics, nullptr, nullptr, TriangulatorAxis::dontCare, flush); - if (flush->desc().interlockMode == pls::InterlockMode::atomics) + if (flush->desc().interlockMode == gpu::InterlockMode::atomics) { // We need a barrier between the outer cubics and interior triangles in atomic mode. flush->pushBarrier(); @@ -1525,12 +1525,12 @@ void InteriorTriangulationDraw::processPath(PathOp op, m_resourceCounts.contourCount = contourCount; // maxTessellatedSegmentCount does not get doubled when we emit both forward and // mirrored contours because the forward and mirrored pair both get packed into a single - // pls::TessVertexSpan. + // gpu::TessVertexSpan. m_resourceCounts.maxTessellatedSegmentCount = patchCount; // outerCubic patches emit their tessellated geometry twice: once forward and once // mirrored. m_resourceCounts.outerCubicTessVertexCount = - m_contourDirections == pls::ContourDirections::reverseAndForward + m_contourDirections == gpu::ContourDirections::reverseAndForward ? patchCount * kOuterCurvePatchSegmentSpan * 2 : patchCount * kOuterCurvePatchSegmentSpan; m_resourceCounts.maxTriangleVertexCount = m_triangulator->maxVertexCount(); @@ -1628,10 +1628,10 @@ StencilClipReset::StencilClipReset(PLSRenderContext* context, switch (resetAction) { case ResetAction::intersectPreviousClip: - m_drawContents |= pls::DrawContents::activeClip; + m_drawContents |= gpu::DrawContents::activeClip; [[fallthrough]]; case ResetAction::clearPreviousClip: - m_drawContents |= pls::DrawContents::clipUpdate; + m_drawContents |= gpu::DrawContents::clipUpdate; break; } m_resourceCounts.maxTriangleVertexCount = 6; @@ -1641,4 +1641,4 @@ void StencilClipReset::pushToRenderContext(PLSRenderContext::LogicalFlush* flush { flush->pushStencilClipReset(this); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/eval_cubic.hpp b/renderer/src/eval_cubic.hpp similarity index 97% rename from pls/renderer/eval_cubic.hpp rename to renderer/src/eval_cubic.hpp index 592b496f..4e57b5d2 100644 --- a/pls/renderer/eval_cubic.hpp +++ b/renderer/src/eval_cubic.hpp @@ -7,7 +7,7 @@ #include "rive/math/simd.hpp" #include "rive/math/vec2d.hpp" -namespace rive::pls +namespace rive::gpu { // Optimized SIMD helper for evaluating a single cubic at many points. class EvalCubic @@ -52,4 +52,4 @@ class EvalCubic float4 m_C; float4 m_P0; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/gl_state.cpp b/renderer/src/gl/gl_state.cpp similarity index 98% rename from pls/renderer/gl/gl_state.cpp rename to renderer/src/gl/gl_state.cpp index 7ce7aaf3..0da831c5 100644 --- a/pls/renderer/gl/gl_state.cpp +++ b/renderer/src/gl/gl_state.cpp @@ -2,11 +2,11 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/gl_state.hpp" +#include "rive/renderer/gl/gl_state.hpp" #include "shaders/constants.glsl" -namespace rive::pls +namespace rive::gpu { void GLState::invalidate() { @@ -260,4 +260,4 @@ void GLState::deleteBuffer(GLuint bufferID) if (m_validState.boundPixelUnpackBufferID && m_boundPixelUnpackBufferID == bufferID) m_boundPixelUnpackBufferID = 0; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/gl_utils.cpp b/renderer/src/gl/gl_utils.cpp similarity index 98% rename from pls/renderer/gl/gl_utils.cpp rename to renderer/src/gl/gl_utils.cpp index 9493d08f..45954ea9 100644 --- a/pls/renderer/gl/gl_utils.cpp +++ b/renderer/src/gl/gl_utils.cpp @@ -2,7 +2,7 @@ * Copyright 2022 Rive */ -#include "rive/pls/gl/gl_utils.hpp" +#include "rive/renderer/gl/gl_utils.hpp" #include #include @@ -87,7 +87,7 @@ GLuint CompileShader(GLuint type, { shaderSource << "#define " << defines[i] << " true\n"; } - shaderSource << rive::pls::glsl::glsl << "\n"; + shaderSource << rive::gpu::glsl::glsl << "\n"; for (size_t i = 0; i < numInputSources; ++i) { shaderSource << inputSources[i] << "\n"; diff --git a/pls/renderer/gl/load_gles_extensions.cpp b/renderer/src/gl/load_gles_extensions.cpp similarity index 98% rename from pls/renderer/gl/load_gles_extensions.cpp rename to renderer/src/gl/load_gles_extensions.cpp index bab76d7b..0467ea25 100644 --- a/pls/renderer/gl/load_gles_extensions.cpp +++ b/renderer/src/gl/load_gles_extensions.cpp @@ -2,7 +2,7 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/gles3.hpp" +#include "rive/renderer/gl/gles3.hpp" #include diff --git a/pls/renderer/gl/load_store_actions_ext.cpp b/renderer/src/gl/load_store_actions_ext.cpp similarity index 83% rename from pls/renderer/gl/load_store_actions_ext.cpp rename to renderer/src/gl/load_store_actions_ext.cpp index 12de5876..13c59b2b 100644 --- a/pls/renderer/gl/load_store_actions_ext.cpp +++ b/renderer/src/gl/load_store_actions_ext.cpp @@ -2,13 +2,13 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/load_store_actions_ext.hpp" +#include "rive/renderer/gl/load_store_actions_ext.hpp" #include "generated/shaders/pls_load_store_ext.glsl.hpp" -namespace rive::pls +namespace rive::gpu { -LoadStoreActionsEXT BuildLoadActionsEXT(const pls::FlushDescriptor& desc, +LoadStoreActionsEXT BuildLoadActionsEXT(const gpu::FlushDescriptor& desc, std::array* clearColor4f) { LoadStoreActionsEXT actions = LoadStoreActionsEXT::clearCoverage; @@ -21,7 +21,7 @@ LoadStoreActionsEXT BuildLoadActionsEXT(const pls::FlushDescriptor& desc, { actions |= LoadStoreActionsEXT::loadColor; } - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { actions |= LoadStoreActionsEXT::clearClip; } @@ -51,7 +51,7 @@ std::ostream& BuildLoadStoreEXTGLSL(std::ostream& shader, LoadStoreActionsEXT ac { addDefine(GLSL_CLEAR_CLIP); } - shader << pls::glsl::pls_load_store_ext; + shader << gpu::glsl::pls_load_store_ext; return shader; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_impl_ext_native.cpp b/renderer/src/gl/pls_impl_ext_native.cpp similarity index 91% rename from pls/renderer/gl/pls_impl_ext_native.cpp rename to renderer/src/gl/pls_impl_ext_native.cpp index 089a34ce..55c2b0b0 100644 --- a/pls/renderer/gl/pls_impl_ext_native.cpp +++ b/renderer/src/gl/pls_impl_ext_native.cpp @@ -2,17 +2,17 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" -#include "rive/pls/gl/load_store_actions_ext.hpp" -#include "rive/pls/gl/gl_utils.hpp" +#include "rive/renderer/gl/load_store_actions_ext.hpp" +#include "rive/renderer/gl/gl_utils.hpp" #include "rive/math/simd.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" #include #include "generated/shaders/pls_load_store_ext.exports.h" -namespace rive::pls +namespace rive::gpu { // Wraps an EXT_shader_pixel_local_storage load/store program, described by a set of // LoadStoreActions. @@ -24,7 +24,7 @@ class PLSLoadStoreProgram PLSLoadStoreProgram(LoadStoreActionsEXT actions, GLuint vertexShader, - pls::ShaderFeatures combinedShaderFeatures, + gpu::ShaderFeatures combinedShaderFeatures, rcp state) : m_state(std::move(state)) { @@ -34,7 +34,7 @@ class PLSLoadStoreProgram std::ostringstream glsl; glsl << "#version 300 es\n"; glsl << "#define " GLSL_FRAGMENT "\n"; - if (combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { glsl << "#define " GLSL_ENABLE_CLIPPING "\n"; } @@ -131,16 +131,16 @@ class PLSRenderContextGLImpl::PLSImplEXTNative : public PLSRenderContextGLImpl:: glDisable(GL_SHADER_PIXEL_LOCAL_STORAGE_EXT); } - void pushShaderDefines(pls::InterlockMode, std::vector* defines) const override + void pushShaderDefines(gpu::InterlockMode, std::vector* defines) const override { defines->push_back(GLSL_PLS_IMPL_EXT_NATIVE); } private: const PLSLoadStoreProgram& findLoadStoreProgram(LoadStoreActionsEXT actions, - pls::ShaderFeatures combinedShaderFeatures) + gpu::ShaderFeatures combinedShaderFeatures) { - bool hasClipping = combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING; + bool hasClipping = combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING; uint32_t programKey = (static_cast(actions) << 1) | static_cast(hasClipping); @@ -180,4 +180,4 @@ std::unique_ptr PLSRenderContextGLImpl::MakePLS { return std::make_unique(capabilities); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_impl_framebuffer_fetch.cpp b/renderer/src/gl/pls_impl_framebuffer_fetch.cpp similarity index 87% rename from pls/renderer/gl/pls_impl_framebuffer_fetch.cpp rename to renderer/src/gl/pls_impl_framebuffer_fetch.cpp index a0bf53f9..ce0c4689 100644 --- a/pls/renderer/gl/pls_impl_framebuffer_fetch.cpp +++ b/renderer/src/gl/pls_impl_framebuffer_fetch.cpp @@ -2,15 +2,15 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" -#include "rive/pls/gl/gl_utils.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" +#include "rive/renderer/gl/gl_utils.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" #include "shaders/constants.glsl" #include "generated/shaders/glsl.exports.h" -namespace rive::pls +namespace rive::gpu { using DrawBufferMask = PLSRenderTargetGL::DrawBufferMask; @@ -52,11 +52,11 @@ class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextG GLuint coverageClear[4]{desc.coverageClearValue}; auto fbFetchBuffers = DrawBufferMask::color; - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { fbFetchBuffers |= DrawBufferMask::clip; } - if (desc.interlockMode == pls::InterlockMode::rasterOrdering) + if (desc.interlockMode == gpu::InterlockMode::rasterOrdering) { fbFetchBuffers |= DrawBufferMask::coverage | DrawBufferMask::scratchColor; } @@ -85,7 +85,7 @@ class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextG UnpackColorToRGBA32F(desc.clearColor, clearColor4f); glClearBufferfv(GL_COLOR, COLOR_PLANE_IDX, clearColor4f); } - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { constexpr static uint32_t kZero[4]{}; glClearBufferuiv(GL_COLOR, CLIP_PLANE_IDX, kZero); @@ -95,13 +95,13 @@ class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextG glClearBufferuiv(GL_COLOR, COVERAGE_PLANE_IDX, coverageClear); } - if (desc.interlockMode == pls::InterlockMode::atomics && - !(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIP_RECT)) + if (desc.interlockMode == gpu::InterlockMode::atomics && + !(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIP_RECT)) { plsContextImpl->state()->setBlendEquation(BlendMode::srcOver); } - if (desc.interlockMode == pls::InterlockMode::atomics) + if (desc.interlockMode == gpu::InterlockMode::atomics) { glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); } @@ -109,7 +109,7 @@ class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextG void deactivatePixelLocalStorage(PLSRenderContextGLImpl*, const FlushDescriptor& desc) override { - if (desc.interlockMode == pls::InterlockMode::atomics) + if (desc.interlockMode == gpu::InterlockMode::atomics) { glMemoryBarrierByRegion(GL_ALL_BARRIER_BITS); } @@ -131,11 +131,11 @@ class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextG } } - void pushShaderDefines(pls::InterlockMode interlockMode, + void pushShaderDefines(gpu::InterlockMode interlockMode, std::vector* defines) const override { defines->push_back(GLSL_PLS_IMPL_FRAMEBUFFER_FETCH); - if (interlockMode == pls::InterlockMode::atomics) + if (interlockMode == gpu::InterlockMode::atomics) { defines->push_back(GLSL_USING_PLS_STORAGE_TEXTURES); } @@ -157,13 +157,13 @@ class PLSRenderContextGLImpl::PLSImplFramebufferFetch : public PLSRenderContextG } } - void onBarrier(const pls::FlushDescriptor& desc) override + void onBarrier(const gpu::FlushDescriptor& desc) override { if (m_capabilities.QCOM_shader_framebuffer_fetch_noncoherent) { glFramebufferFetchBarrierQCOM(); } - if (desc.interlockMode == pls::InterlockMode::atomics) + if (desc.interlockMode == gpu::InterlockMode::atomics) { glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); } @@ -178,4 +178,4 @@ std::unique_ptr PLSRenderContextGLImpl:: { return std::make_unique(extensions); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_impl_rw_texture.cpp b/renderer/src/gl/pls_impl_rw_texture.cpp similarity index 78% rename from pls/renderer/gl/pls_impl_rw_texture.cpp rename to renderer/src/gl/pls_impl_rw_texture.cpp index 7f71a293..96df7455 100644 --- a/pls/renderer/gl/pls_impl_rw_texture.cpp +++ b/renderer/src/gl/pls_impl_rw_texture.cpp @@ -2,27 +2,27 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" #include "shaders/constants.glsl" -#include "rive/pls/gl/gl_utils.hpp" +#include "rive/renderer/gl/gl_utils.hpp" #include "generated/shaders/glsl.exports.h" -namespace rive::pls +namespace rive::gpu { using DrawBufferMask = PLSRenderTargetGL::DrawBufferMask; -static bool needs_atomic_fixed_function_color_blend(const pls::FlushDescriptor& desc) +static bool needs_atomic_fixed_function_color_blend(const gpu::FlushDescriptor& desc) { - assert(desc.interlockMode == pls::InterlockMode::atomics); - return !(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND); + assert(desc.interlockMode == gpu::InterlockMode::atomics); + return !(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND); } -static bool needs_coalesced_atomic_resolve_and_transfer(const pls::FlushDescriptor& desc) +static bool needs_coalesced_atomic_resolve_and_transfer(const gpu::FlushDescriptor& desc) { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); return (desc.combinedShaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND) && lite_rtti_cast( static_cast(desc.renderTarget)) != nullptr; @@ -42,7 +42,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: auto renderTarget = static_cast(desc.renderTarget); renderTarget->allocateInternalPLSTextures(desc.interlockMode); - if (desc.interlockMode == pls::InterlockMode::atomics && + if (desc.interlockMode == gpu::InterlockMode::atomics && needs_atomic_fixed_function_color_blend(desc)) { plsContextImpl->state()->setBlendEquation(BlendMode::srcOver); @@ -53,7 +53,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: // We're targeting an external FBO but can't render to it directly. Make sure to // allocate and attach an offscreen target texture. framebufferRenderTarget->allocateOffscreenTargetTexture(); - if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget) + if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget) { // Copy the framebuffer's contents to our offscreen texture. framebufferRenderTarget->bindDestinationFramebuffer(GL_READ_FRAMEBUFFER); @@ -65,7 +65,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: // Clear the necessary textures. auto rwTexBuffers = DrawBufferMask::coverage; - if (desc.interlockMode == pls::InterlockMode::rasterOrdering) + if (desc.interlockMode == gpu::InterlockMode::rasterOrdering) { rwTexBuffers |= DrawBufferMask::color | DrawBufferMask::scratchColor; } @@ -73,12 +73,12 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: { rwTexBuffers |= DrawBufferMask::color; } - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { rwTexBuffers |= DrawBufferMask::clip; } renderTarget->bindInternalFramebuffer(GL_FRAMEBUFFER, rwTexBuffers); - if (desc.colorLoadAction == pls::LoadAction::clear && + if (desc.colorLoadAction == gpu::LoadAction::clear && (rwTexBuffers & DrawBufferMask::color)) { // If the color buffer is not a storage texture, we will clear it once the main @@ -87,7 +87,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: UnpackColorToRGBA32F(desc.clearColor, clearColor4f); glClearBufferfv(GL_COLOR, COLOR_PLANE_IDX, clearColor4f); } - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { constexpr static GLuint kZeroClear[4]{}; glClearBufferuiv(GL_COLOR, CLIP_PLANE_IDX, kZeroClear); @@ -99,14 +99,14 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: switch (desc.interlockMode) { - case pls::InterlockMode::rasterOrdering: + case gpu::InterlockMode::rasterOrdering: // rasterOrdering mode renders by storing to an image texture. Bind a framebuffer // with no color attachments. renderTarget->bindHeadlessFramebuffer(plsContextImpl->m_capabilities); break; - case pls::InterlockMode::atomics: + case gpu::InterlockMode::atomics: renderTarget->bindDestinationFramebuffer(GL_FRAMEBUFFER); - if (desc.colorLoadAction == pls::LoadAction::clear && + if (desc.colorLoadAction == gpu::LoadAction::clear && !(rwTexBuffers & DrawBufferMask::color)) { // We're rendering directly to the main framebuffer. Clear it now. @@ -131,29 +131,29 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); } - pls::ShaderMiscFlags shaderMiscFlags(const pls::FlushDescriptor& desc, - pls::DrawType drawType) const final + gpu::ShaderMiscFlags shaderMiscFlags(const gpu::FlushDescriptor& desc, + gpu::DrawType drawType) const final { - auto flags = pls::ShaderMiscFlags::none; - if (desc.interlockMode == pls::InterlockMode::atomics) + auto flags = gpu::ShaderMiscFlags::none; + if (desc.interlockMode == gpu::InterlockMode::atomics) { if (needs_atomic_fixed_function_color_blend(desc)) { - flags |= pls::ShaderMiscFlags::fixedFunctionColorBlend; + flags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend; } - if (drawType == pls::DrawType::plsAtomicResolve && + if (drawType == gpu::DrawType::gpuAtomicResolve && needs_coalesced_atomic_resolve_and_transfer(desc)) { - flags |= pls::ShaderMiscFlags::coalescedResolveAndTransfer; + flags |= gpu::ShaderMiscFlags::coalescedResolveAndTransfer; } } return flags; } void setupAtomicResolve(PLSRenderContextGLImpl* plsContextImpl, - const pls::FlushDescriptor& desc) override + const gpu::FlushDescriptor& desc) override { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); if (needs_coalesced_atomic_resolve_and_transfer(desc)) { // Turn the color mask back on now that we're about to resolve. @@ -167,7 +167,7 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: // atomic mode never needs to copy anything here because it transfers the offscreen texture // during resolve. - if (desc.interlockMode == pls::InterlockMode::rasterOrdering) + if (desc.interlockMode == gpu::InterlockMode::rasterOrdering) { if (auto framebufferRenderTarget = lite_rtti_cast( static_cast(desc.renderTarget))) @@ -183,13 +183,13 @@ class PLSRenderContextGLImpl::PLSImplRWTexture : public PLSRenderContextGLImpl:: } } - void pushShaderDefines(pls::InterlockMode, std::vector* defines) const override + void pushShaderDefines(gpu::InterlockMode, std::vector* defines) const override { defines->push_back(GLSL_PLS_IMPL_STORAGE_TEXTURE); defines->push_back(GLSL_USING_PLS_STORAGE_TEXTURES); } - void onBarrier(const pls::FlushDescriptor&) override + void onBarrier(const gpu::FlushDescriptor&) override { return glMemoryBarrierByRegion(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT); } @@ -199,4 +199,4 @@ std::unique_ptr PLSRenderContextGLImpl::MakePLS { return std::make_unique(); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_impl_webgl.cpp b/renderer/src/gl/pls_impl_webgl.cpp similarity index 94% rename from pls/renderer/gl/pls_impl_webgl.cpp rename to renderer/src/gl/pls_impl_webgl.cpp index 169e31fb..601d5e8a 100644 --- a/pls/renderer/gl/pls_impl_webgl.cpp +++ b/renderer/src/gl/pls_impl_webgl.cpp @@ -2,10 +2,10 @@ * Copyright 2022 Rive */ -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" -#include "rive/pls/gl/gl_utils.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" +#include "rive/renderer/gl/gl_utils.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" #include "shaders/constants.glsl" #include "generated/shaders/glsl.exports.h" @@ -142,19 +142,19 @@ void glProvokingVertexANGLE(GLenum provokeMode) } #endif // RIVE_WEBGL -namespace rive::pls +namespace rive::gpu { using DrawBufferMask = PLSRenderTargetGL::DrawBufferMask; -static GLenum webgl_load_op(pls::LoadAction loadAction) +static GLenum webgl_load_op(gpu::LoadAction loadAction) { switch (loadAction) { - case pls::LoadAction::clear: + case gpu::LoadAction::clear: return GL_LOAD_OP_CLEAR_ANGLE; - case pls::LoadAction::preserveRenderTarget: + case gpu::LoadAction::preserveRenderTarget: return GL_LOAD_OP_LOAD_ANGLE; - case pls::LoadAction::dontCare: + case gpu::LoadAction::dontCare: return GL_LOAD_OP_ZERO_ANGLE; } RIVE_UNREACHABLE(); @@ -197,7 +197,7 @@ class PLSRenderContextGLImpl::PLSImplWebGL : public PLSRenderContextGLImpl::PLSI UnpackColorToRGBA32F(desc.clearColor, clearColor4f); glFramebufferPixelLocalClearValuefvANGLE(COLOR_PLANE_IDX, clearColor4f); } - GLenum clipLoadAction = (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + GLenum clipLoadAction = (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) ? GL_LOAD_OP_ZERO_ANGLE : GL_DONT_CARE; GLenum loadOps[4] = {webgl_load_op(desc.colorLoadAction), @@ -235,7 +235,7 @@ class PLSRenderContextGLImpl::PLSImplWebGL : public PLSRenderContextGLImpl::PLSI } } - void pushShaderDefines(pls::InterlockMode, std::vector* defines) const override + void pushShaderDefines(gpu::InterlockMode, std::vector* defines) const override { defines->push_back(GLSL_PLS_IMPL_ANGLE); } @@ -245,4 +245,4 @@ std::unique_ptr PLSRenderContextGLImpl::MakePLS { return std::make_unique(); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_render_buffer_gl_impl.cpp b/renderer/src/gl/render_buffer_gl_impl.cpp similarity index 92% rename from pls/renderer/gl/pls_render_buffer_gl_impl.cpp rename to renderer/src/gl/render_buffer_gl_impl.cpp index 1ef0f58f..df36b802 100644 --- a/pls/renderer/gl/pls_render_buffer_gl_impl.cpp +++ b/renderer/src/gl/render_buffer_gl_impl.cpp @@ -2,11 +2,11 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/pls_render_buffer_gl_impl.hpp" +#include "rive/renderer/gl/render_buffer_gl_impl.hpp" -#include "rive/pls/gl/gl_state.hpp" +#include "rive/renderer/gl/gl_state.hpp" -namespace rive::pls +namespace rive::gpu { PLSRenderBufferGLImpl::PLSRenderBufferGLImpl(RenderBufferType type, RenderBufferFlags flags, @@ -41,7 +41,7 @@ void PLSRenderBufferGLImpl::init(rcp state) assert(!m_bufferIDs[0]); m_state = std::move(state); int bufferCount = - (flags() & RenderBufferFlags::mappedOnceAtInitialization) ? 1 : pls::kBufferRingSize; + (flags() & RenderBufferFlags::mappedOnceAtInitialization) ? 1 : gpu::kBufferRingSize; glGenBuffers(bufferCount, m_bufferIDs.data()); m_state->bindVAO(0); for (int i = 0; i < bufferCount; ++i) @@ -55,7 +55,7 @@ void PLSRenderBufferGLImpl::init(rcp state) } } -std::array PLSRenderBufferGLImpl::detachBuffers() +std::array PLSRenderBufferGLImpl::detachBuffers() { auto detachedBuffers = m_bufferIDs; m_bufferIDs.fill(0); @@ -64,7 +64,7 @@ std::array PLSRenderBufferGLImpl::detachBuffers() void* PLSRenderBufferGLImpl::onMap() { - m_submittedBufferIdx = (m_submittedBufferIdx + 1) % pls::kBufferRingSize; + m_submittedBufferIdx = (m_submittedBufferIdx + 1) % gpu::kBufferRingSize; if (!canMapBuffer()) { if (!m_fallbackMappedMemory) @@ -123,4 +123,4 @@ bool PLSRenderBufferGLImpl::canMapBuffer() const return !(flags() & RenderBufferFlags::mappedOnceAtInitialization); #endif } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_render_context_gl_impl.cpp b/renderer/src/gl/render_context_gl_impl.cpp similarity index 88% rename from pls/renderer/gl/pls_render_context_gl_impl.cpp rename to renderer/src/gl/render_context_gl_impl.cpp index 3379e273..0c30bdf1 100644 --- a/pls/renderer/gl/pls_render_context_gl_impl.cpp +++ b/renderer/src/gl/render_context_gl_impl.cpp @@ -2,12 +2,12 @@ * Copyright 2022 Rive */ -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" -#include "rive/pls/gl/pls_render_buffer_gl_impl.hpp" -#include "rive/pls/gl/pls_render_target_gl.hpp" -#include "rive/pls/pls_draw.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/gl/render_buffer_gl_impl.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" +#include "rive/renderer/draw.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" #include "generated/shaders/advanced_blend.glsl.hpp" @@ -24,7 +24,7 @@ #ifdef RIVE_WEBGL // In an effort to save space on web, and since web doesn't have ES 3.1 level support, don't include // the atomic sources. -namespace rive::pls::glsl +namespace rive::gpu::glsl { const char atomic_draw[] = ""; } @@ -61,7 +61,7 @@ EM_JS(void, // GL_TEXTURE0 as a scratch texture index. constexpr static int kPLSTexIdxOffset = 1; -namespace rive::pls +namespace rive::gpu { PLSRenderContextGLImpl::PLSRenderContextGLImpl(const char* rendererString, GLCapabilities capabilities, @@ -160,8 +160,8 @@ PLSRenderContextGLImpl::PLSRenderContextGLImpl(const char* rendererString, m_state->bindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_tessSpanIndexBuffer); glBufferData(GL_ELEMENT_ARRAY_BUFFER, - sizeof(pls::kTessSpanIndices), - pls::kTessSpanIndices, + sizeof(gpu::kTessSpanIndices), + gpu::kTessSpanIndices, GL_STATIC_DRAW); m_state->bindVAO(m_drawVAO); @@ -198,17 +198,17 @@ PLSRenderContextGLImpl::PLSRenderContextGLImpl(const char* rendererString, m_state->bindBuffer(GL_ARRAY_BUFFER, m_imageRectVertexBuffer); glBufferData(GL_ARRAY_BUFFER, - sizeof(pls::kImageRectVertices), - pls::kImageRectVertices, + sizeof(gpu::kImageRectVertices), + gpu::kImageRectVertices, GL_STATIC_DRAW); glEnableVertexAttribArray(0); - glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(pls::ImageRectVertex), nullptr); + glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(gpu::ImageRectVertex), nullptr); m_state->bindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_imageRectIndexBuffer); glBufferData(GL_ELEMENT_ARRAY_BUFFER, - sizeof(pls::kImageRectIndices), - pls::kImageRectIndices, + sizeof(gpu::kImageRectIndices), + gpu::kImageRectIndices, GL_STATIC_DRAW); } @@ -401,45 +401,45 @@ class BufferRingGLImpl : public BufferRing }; // GL internalformat to use for a texture that polyfills a storage buffer. -static GLenum storage_texture_internalformat(pls::StorageBufferStructure bufferStructure) +static GLenum storage_texture_internalformat(gpu::StorageBufferStructure bufferStructure) { switch (bufferStructure) { - case pls::StorageBufferStructure::uint32x4: + case gpu::StorageBufferStructure::uint32x4: return GL_RGBA32UI; - case pls::StorageBufferStructure::uint32x2: + case gpu::StorageBufferStructure::uint32x2: return GL_RG32UI; - case pls::StorageBufferStructure::float32x4: + case gpu::StorageBufferStructure::float32x4: return GL_RGBA32F; } RIVE_UNREACHABLE(); } // GL format to use for a texture that polyfills a storage buffer. -static GLenum storage_texture_format(pls::StorageBufferStructure bufferStructure) +static GLenum storage_texture_format(gpu::StorageBufferStructure bufferStructure) { switch (bufferStructure) { - case pls::StorageBufferStructure::uint32x4: + case gpu::StorageBufferStructure::uint32x4: return GL_RGBA_INTEGER; - case pls::StorageBufferStructure::uint32x2: + case gpu::StorageBufferStructure::uint32x2: return GL_RG_INTEGER; - case pls::StorageBufferStructure::float32x4: + case gpu::StorageBufferStructure::float32x4: return GL_RGBA; } RIVE_UNREACHABLE(); } // GL type to use for a texture that polyfills a storage buffer. -static GLenum storage_texture_type(pls::StorageBufferStructure bufferStructure) +static GLenum storage_texture_type(gpu::StorageBufferStructure bufferStructure) { switch (bufferStructure) { - case pls::StorageBufferStructure::uint32x4: + case gpu::StorageBufferStructure::uint32x4: return GL_UNSIGNED_INT; - case pls::StorageBufferStructure::uint32x2: + case gpu::StorageBufferStructure::uint32x2: return GL_UNSIGNED_INT; - case pls::StorageBufferStructure::float32x4: + case gpu::StorageBufferStructure::float32x4: return GL_FLOAT; } RIVE_UNREACHABLE(); @@ -449,7 +449,7 @@ class StorageBufferRingGLImpl : public BufferRingGLImpl { public: StorageBufferRingGLImpl(size_t capacityInBytes, - pls::StorageBufferStructure bufferStructure, + gpu::StorageBufferStructure bufferStructure, rcp state) : BufferRingGLImpl( // If we don't support storage buffers, instead make a pixel-unpack buffer that @@ -472,24 +472,24 @@ class StorageBufferRingGLImpl : public BufferRingGLImpl } protected: - const pls::StorageBufferStructure m_bufferStructure; + const gpu::StorageBufferStructure m_bufferStructure; }; class TexelBufferRingWebGL : public BufferRing { public: TexelBufferRingWebGL(size_t capacityInBytes, - pls::StorageBufferStructure bufferStructure, + gpu::StorageBufferStructure bufferStructure, rcp state) : - BufferRing(pls::StorageTextureBufferSize(capacityInBytes, bufferStructure)), + BufferRing(gpu::StorageTextureBufferSize(capacityInBytes, bufferStructure)), m_bufferStructure(bufferStructure), m_state(std::move(state)) { - auto [width, height] = pls::StorageTextureSize(capacityInBytes, m_bufferStructure); + auto [width, height] = gpu::StorageTextureSize(capacityInBytes, m_bufferStructure); GLenum internalformat = storage_texture_internalformat(m_bufferStructure); - glGenTextures(pls::kBufferRingSize, m_textures); + glGenTextures(gpu::kBufferRingSize, m_textures); glActiveTexture(GL_TEXTURE0); - for (size_t i = 0; i < pls::kBufferRingSize; ++i) + for (size_t i = 0; i < gpu::kBufferRingSize; ++i) { glBindTexture(GL_TEXTURE_2D, m_textures[i]); glTexStorage2D(GL_TEXTURE_2D, 1, internalformat, width, height); @@ -498,7 +498,7 @@ class TexelBufferRingWebGL : public BufferRing glBindTexture(GL_TEXTURE_2D, 0); } - ~TexelBufferRingWebGL() { glDeleteTextures(pls::kBufferRingSize, m_textures); } + ~TexelBufferRingWebGL() { glDeleteTextures(gpu::kBufferRingSize, m_textures); } void* onMapBuffer(int bufferIdx, size_t mapSizeInBytes) override { return shadowBuffer(); } void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} @@ -508,7 +508,7 @@ class TexelBufferRingWebGL : public BufferRing size_t offsetSizeInBytes) const { auto [updateWidth, updateHeight] = - pls::StorageTextureSize(bindingSizeInBytes, m_bufferStructure); + gpu::StorageTextureSize(bindingSizeInBytes, m_bufferStructure); m_state->bindBuffer(GL_PIXEL_UNPACK_BUFFER, 0); glActiveTexture(GL_TEXTURE0 + kPLSTexIdxOffset + bindingIdx); glBindTexture(GL_TEXTURE_2D, m_textures[submittedBufferIdx()]); @@ -524,9 +524,9 @@ class TexelBufferRingWebGL : public BufferRing } protected: - const pls::StorageBufferStructure m_bufferStructure; + const gpu::StorageBufferStructure m_bufferStructure; const rcp m_state; - GLuint m_textures[pls::kBufferRingSize]; + GLuint m_textures[gpu::kBufferRingSize]; }; std::unique_ptr PLSRenderContextGLImpl::makeUniformBufferRing(size_t capacityInBytes) @@ -536,7 +536,7 @@ std::unique_ptr PLSRenderContextGLImpl::makeUniformBufferRing(size_t std::unique_ptr PLSRenderContextGLImpl::makeStorageBufferRing( size_t capacityInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { if (capacityInBytes == 0) { @@ -611,13 +611,13 @@ void PLSRenderContextGLImpl::resizeTessellationTexture(uint32_t width, uint32_t PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContextImpl, GLenum shaderType, - pls::DrawType drawType, + gpu::DrawType drawType, ShaderFeatures shaderFeatures, - pls::InterlockMode interlockMode, - pls::ShaderMiscFlags shaderMiscFlags) + gpu::InterlockMode interlockMode, + gpu::ShaderMiscFlags shaderMiscFlags) { #ifdef DISABLE_PLS_ATOMICS - if (interlockMode == pls::InterlockMode::atomics) + if (interlockMode == gpu::InterlockMode::atomics) { // Don't draw anything in atomic mode if support for it isn't compiled in. return; @@ -629,12 +629,12 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex { plsContextImpl->m_plsImpl->pushShaderDefines(interlockMode, &defines); } - if (interlockMode == pls::InterlockMode::atomics) + if (interlockMode == gpu::InterlockMode::atomics) { // Atomics are currently always done on storage textures. defines.push_back(GLSL_USING_PLS_STORAGE_TEXTURES); } - if (shaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend) + if (shaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend) { defines.push_back(GLSL_FIXED_FUNCTION_COLOR_BLEND); } @@ -644,8 +644,8 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex if (shaderFeatures & feature) { assert((kVertexShaderFeaturesMask & feature) || shaderType == GL_FRAGMENT_SHADER); - if (interlockMode == pls::InterlockMode::depthStencil && - feature == pls::ShaderFeatures::ENABLE_ADVANCED_BLEND && + if (interlockMode == gpu::InterlockMode::depthStencil && + feature == gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND && plsContextImpl->m_capabilities.KHR_blend_equation_advanced_coherent) { defines.push_back(GLSL_ENABLE_KHR_BLEND); @@ -656,7 +656,7 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex } } } - if (interlockMode == pls::InterlockMode::depthStencil) + if (interlockMode == gpu::InterlockMode::depthStencil) { defines.push_back(GLSL_USING_DEPTH_STENCIL); } @@ -679,8 +679,8 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex } switch (drawType) { - case pls::DrawType::midpointFanPatches: - case pls::DrawType::outerCurvePatches: + case gpu::DrawType::midpointFanPatches: + case gpu::DrawType::outerCurvePatches: if (shaderType == GL_VERTEX_SHADER) { defines.push_back(GLSL_ENABLE_INSTANCE_INDEX); @@ -690,46 +690,46 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex } } defines.push_back(GLSL_DRAW_PATH); - sources.push_back(pls::glsl::draw_path_common); - sources.push_back(interlockMode == pls::InterlockMode::atomics ? pls::glsl::atomic_draw - : pls::glsl::draw_path); + sources.push_back(gpu::glsl::draw_path_common); + sources.push_back(interlockMode == gpu::InterlockMode::atomics ? gpu::glsl::atomic_draw + : gpu::glsl::draw_path); break; - case pls::DrawType::stencilClipReset: - assert(interlockMode == pls::InterlockMode::depthStencil); - sources.push_back(pls::glsl::stencil_draw); + case gpu::DrawType::stencilClipReset: + assert(interlockMode == gpu::InterlockMode::depthStencil); + sources.push_back(gpu::glsl::stencil_draw); break; - case pls::DrawType::interiorTriangulation: + case gpu::DrawType::interiorTriangulation: defines.push_back(GLSL_DRAW_INTERIOR_TRIANGLES); - sources.push_back(pls::glsl::draw_path_common); - sources.push_back(interlockMode == pls::InterlockMode::atomics ? pls::glsl::atomic_draw - : pls::glsl::draw_path); + sources.push_back(gpu::glsl::draw_path_common); + sources.push_back(interlockMode == gpu::InterlockMode::atomics ? gpu::glsl::atomic_draw + : gpu::glsl::draw_path); break; - case pls::DrawType::imageRect: - assert(interlockMode == pls::InterlockMode::atomics); + case gpu::DrawType::imageRect: + assert(interlockMode == gpu::InterlockMode::atomics); defines.push_back(GLSL_DRAW_IMAGE); defines.push_back(GLSL_DRAW_IMAGE_RECT); - sources.push_back(pls::glsl::atomic_draw); + sources.push_back(gpu::glsl::atomic_draw); break; - case pls::DrawType::imageMesh: + case gpu::DrawType::imageMesh: defines.push_back(GLSL_DRAW_IMAGE); defines.push_back(GLSL_DRAW_IMAGE_MESH); - sources.push_back(interlockMode == pls::InterlockMode::atomics - ? pls::glsl::atomic_draw - : pls::glsl::draw_image_mesh); + sources.push_back(interlockMode == gpu::InterlockMode::atomics + ? gpu::glsl::atomic_draw + : gpu::glsl::draw_image_mesh); break; - case pls::DrawType::plsAtomicResolve: - assert(interlockMode == pls::InterlockMode::atomics); + case gpu::DrawType::gpuAtomicResolve: + assert(interlockMode == gpu::InterlockMode::atomics); defines.push_back(GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS); defines.push_back(GLSL_RESOLVE_PLS); - if (shaderMiscFlags & pls::ShaderMiscFlags::coalescedResolveAndTransfer) + if (shaderMiscFlags & gpu::ShaderMiscFlags::coalescedResolveAndTransfer) { assert(shaderType == GL_FRAGMENT_SHADER); defines.push_back(GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER); } - sources.push_back(pls::glsl::atomic_draw); + sources.push_back(gpu::glsl::atomic_draw); break; - case pls::DrawType::plsAtomicInitialize: - assert(interlockMode == pls::InterlockMode::atomics); + case gpu::DrawType::gpuAtomicInitialize: + assert(interlockMode == gpu::InterlockMode::atomics); RIVE_UNREACHABLE(); } if (plsContextImpl->m_capabilities.ARB_bindless_texture) @@ -750,10 +750,10 @@ PLSRenderContextGLImpl::DrawShader::DrawShader(PLSRenderContextGLImpl* plsContex } PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsContextImpl, - pls::DrawType drawType, - pls::ShaderFeatures shaderFeatures, - pls::InterlockMode interlockMode, - pls::ShaderMiscFlags fragmentShaderMiscFlags) : + gpu::DrawType drawType, + gpu::ShaderFeatures shaderFeatures, + gpu::InterlockMode interlockMode, + gpu::ShaderMiscFlags fragmentShaderMiscFlags) : m_fragmentShader(plsContextImpl, GL_FRAGMENT_SHADER, drawType, @@ -765,10 +765,10 @@ PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsCont // Not every vertex shader is unique. Cache them by just the vertex features and reuse when // possible. ShaderFeatures vertexShaderFeatures = shaderFeatures & kVertexShaderFeaturesMask; - uint32_t vertexShaderKey = pls::ShaderUniqueKey(drawType, + uint32_t vertexShaderKey = gpu::ShaderUniqueKey(drawType, vertexShaderFeatures, interlockMode, - pls::ShaderMiscFlags::none); + gpu::ShaderMiscFlags::none); const DrawShader& vertexShader = plsContextImpl->m_vertexShaders .try_emplace(vertexShaderKey, plsContextImpl, @@ -776,7 +776,7 @@ PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsCont drawType, vertexShaderFeatures, interlockMode, - pls::ShaderMiscFlags::none) + gpu::ShaderMiscFlags::none) .first->second; m_id = glCreateProgram(); @@ -811,8 +811,8 @@ PLSRenderContextGLImpl::DrawProgram::DrawProgram(PLSRenderContextGLImpl* plsCont glUniform1i(glGetUniformLocation(m_id, GLSL_gradTexture), kPLSTexIdxOffset + GRAD_TEXTURE_IDX); glUniform1i(glGetUniformLocation(m_id, GLSL_imageTexture), kPLSTexIdxOffset + IMAGE_TEXTURE_IDX); - if (interlockMode == pls::InterlockMode::depthStencil && - (shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) && + if (interlockMode == gpu::InterlockMode::depthStencil && + (shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) && !plsContextImpl->m_capabilities.KHR_blend_equation_advanced_coherent) { glUniform1i(glGetUniformLocation(m_id, GLSL_dstColorTexture), @@ -855,18 +855,18 @@ static void bind_storage_buffer(const GLCapabilities& capabilities, void PLSRenderContextGLImpl::PLSImpl::ensureRasterOrderingEnabled( PLSRenderContextGLImpl* plsContextImpl, - const pls::FlushDescriptor& desc, + const gpu::FlushDescriptor& desc, bool enabled) { assert(!enabled || supportsRasterOrdering(plsContextImpl->m_capabilities)); - auto rasterOrderState = enabled ? pls::TriState::yes : pls::TriState::no; + auto rasterOrderState = enabled ? gpu::TriState::yes : gpu::TriState::no; if (m_rasterOrderingEnabled != rasterOrderState) { onEnableRasterOrdering(enabled); m_rasterOrderingEnabled = rasterOrderState; // We only need a barrier when turning raster ordering OFF, because PLS already inserts the // necessary barriers after draws when it's disabled. - if (m_rasterOrderingEnabled == pls::TriState::no) + if (m_rasterOrderingEnabled == gpu::TriState::no) { onBarrier(desc); } @@ -956,7 +956,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) FLUSH_UNIFORM_BUFFER_IDX, gl_buffer_id(flushUniformBufferRing()), desc.flushUniformDataOffsetInBytes, - sizeof(pls::FlushUniforms)); + sizeof(gpu::FlushUniforms)); // All programs use the same storage buffers. if (desc.pathCount > 0) @@ -964,20 +964,20 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) bind_storage_buffer(m_capabilities, pathBufferRing(), PATH_BUFFER_IDX, - desc.pathCount * sizeof(pls::PathData), - desc.firstPath * sizeof(pls::PathData)); + desc.pathCount * sizeof(gpu::PathData), + desc.firstPath * sizeof(gpu::PathData)); bind_storage_buffer(m_capabilities, paintBufferRing(), PAINT_BUFFER_IDX, - desc.pathCount * sizeof(pls::PaintData), - desc.firstPaint * sizeof(pls::PaintData)); + desc.pathCount * sizeof(gpu::PaintData), + desc.firstPaint * sizeof(gpu::PaintData)); bind_storage_buffer(m_capabilities, paintAuxBufferRing(), PAINT_AUX_BUFFER_IDX, - desc.pathCount * sizeof(pls::PaintAuxData), - desc.firstPaintAux * sizeof(pls::PaintAuxData)); + desc.pathCount * sizeof(gpu::PaintAuxData), + desc.firstPaintAux * sizeof(gpu::PaintAuxData)); } if (desc.contourCount > 0) @@ -985,8 +985,8 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) bind_storage_buffer(m_capabilities, contourBufferRing(), CONTOUR_BUFFER_IDX, - desc.contourCount * sizeof(pls::ContourData), - desc.firstContour * sizeof(pls::ContourData)); + desc.contourCount * sizeof(gpu::ContourData), + desc.firstContour * sizeof(gpu::ContourData)); } // Render the complex color ramps into the gradient texture. @@ -1000,7 +1000,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) 4, GL_UNSIGNED_INT, 0, - reinterpret_cast(desc.firstComplexGradSpan * sizeof(pls::GradientSpan))); + reinterpret_cast(desc.firstComplexGradSpan * sizeof(gpu::GradientSpan))); glViewport(0, desc.complexGradRowsTop, kGradTextureWidth, desc.complexGradRowsHeight); glBindFramebuffer(GL_FRAMEBUFFER, m_colorRampFBO); m_state->bindProgram(m_colorRampProgram); @@ -1046,7 +1046,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) m_state->bindBuffer(GL_ARRAY_BUFFER, gl_buffer_id(tessSpanBufferRing())); m_state->bindVAO(m_tessellateVAO); m_state->setCullFace(GL_BACK); - size_t tessSpanOffsetInBytes = desc.firstTessVertexSpan * sizeof(pls::TessVertexSpan); + size_t tessSpanOffsetInBytes = desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan); for (GLuint i = 0; i < 3; ++i) { glVertexAttribPointer(i, @@ -1062,13 +1062,13 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) GL_UNSIGNED_INT, sizeof(TessVertexSpan), reinterpret_cast(tessSpanOffsetInBytes + offsetof(TessVertexSpan, x0x1))); - glViewport(0, 0, pls::kTessTextureWidth, desc.tessDataHeight); + glViewport(0, 0, gpu::kTessTextureWidth, desc.tessDataHeight); glBindFramebuffer(GL_FRAMEBUFFER, m_tessellateFBO); m_state->bindProgram(m_tessellateProgram); GLenum colorAttachment0 = GL_COLOR_ATTACHMENT0; glInvalidateFramebuffer(GL_FRAMEBUFFER, 1, &colorAttachment0); glDrawElementsInstanced(GL_TRIANGLES, - std::size(pls::kTessSpanIndices), + std::size(gpu::kTessSpanIndices), GL_UNSIGNED_SHORT, 0, desc.tessVertexSpanCount); @@ -1079,13 +1079,13 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) // (ANGLE_shader_pixel_local_storage doesn't allow shader compilation while active.) for (const DrawBatch& batch : *desc.drawList) { - auto shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics + auto shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics ? desc.combinedShaderFeatures : batch.shaderFeatures; auto fragmentShaderMiscFlags = m_plsImpl != nullptr ? m_plsImpl->shaderMiscFlags(desc, batch.drawType) - : pls::ShaderMiscFlags::none; - uint32_t fragmentShaderKey = pls::ShaderUniqueKey(batch.drawType, + : gpu::ShaderMiscFlags::none; + uint32_t fragmentShaderKey = gpu::ShaderUniqueKey(batch.drawType, shaderFeatures, desc.interlockMode, fragmentShaderMiscFlags); @@ -1117,7 +1117,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) auto msaaResolveAction = PLSRenderTargetGL::MSAAResolveAction::automatic; std::array msaaDepthStencilColor; - if (desc.interlockMode != pls::InterlockMode::depthStencil) + if (desc.interlockMode != gpu::InterlockMode::depthStencil) { assert(desc.msaaSampleCount == 0); m_plsImpl->activatePixelLocalStorage(this, desc); @@ -1126,7 +1126,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) { // Render with MSAA in depthStencil mode. assert(desc.msaaSampleCount > 0); - bool preserveRenderTarget = desc.colorLoadAction == pls::LoadAction::preserveRenderTarget; + bool preserveRenderTarget = desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget; bool isFBO0; msaaResolveAction = renderTarget->bindMSAAFramebuffer( this, @@ -1150,7 +1150,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) msaaDepthStencilColor.data()); GLbitfield buffersToClear = GL_STENCIL_BUFFER_BIT | GL_DEPTH_BUFFER_BIT; - if (desc.colorLoadAction == pls::LoadAction::clear) + if (desc.colorLoadAction == gpu::LoadAction::clear) { float cc[4]; UnpackColorToRGBA32F(desc.clearColor, cc); @@ -1162,7 +1162,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) glEnable(GL_STENCIL_TEST); glEnable(GL_DEPTH_TEST); - if (desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) + if (desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) { if (m_capabilities.KHR_blend_equation_advanced_coherent) { @@ -1187,13 +1187,13 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) continue; } - auto shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics + auto shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics ? desc.combinedShaderFeatures : batch.shaderFeatures; auto fragmentShaderMiscFlags = m_plsImpl != nullptr ? m_plsImpl->shaderMiscFlags(desc, batch.drawType) - : pls::ShaderMiscFlags::none; - uint32_t fragmentShaderKey = pls::ShaderUniqueKey(batch.drawType, + : gpu::ShaderMiscFlags::none; + uint32_t fragmentShaderKey = gpu::ShaderUniqueKey(batch.drawType, shaderFeatures, desc.interlockMode, fragmentShaderMiscFlags); @@ -1211,14 +1211,14 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) glBindTexture(GL_TEXTURE_2D, imageTextureGL->textureID()); } - if (desc.interlockMode == pls::InterlockMode::depthStencil) + if (desc.interlockMode == gpu::InterlockMode::depthStencil) { // Set up the next blend. - if (batch.drawContents & pls::DrawContents::opaquePaint) + if (batch.drawContents & gpu::DrawContents::opaquePaint) { m_state->disableBlending(); } - else if (!(batch.drawContents & pls::DrawContents::advancedBlend)) + else if (!(batch.drawContents & gpu::DrawContents::advancedBlend)) { assert(batch.internalDrawList->blendMode() == BlendMode::srcOver); m_state->setBlendEquation(BlendMode::srcOver); @@ -1245,7 +1245,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) } // Set up the next clipRect. - bool needsClipPlanes = (shaderFeatures & pls::ShaderFeatures::ENABLE_CLIP_RECT); + bool needsClipPlanes = (shaderFeatures & gpu::ShaderFeatures::ENABLE_CLIP_RECT); if (needsClipPlanes != clipPlanesEnabled) { auto toggleEnableOrDisable = needsClipPlanes ? glEnable : glDisable; @@ -1257,7 +1257,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) } } - switch (pls::DrawType drawType = batch.drawType) + switch (gpu::DrawType drawType = batch.drawType) { case DrawType::midpointFanPatches: case DrawType::outerCurvePatches: @@ -1270,14 +1270,14 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) batch.elementCount, batch.baseElement); - if (desc.interlockMode != pls::InterlockMode::depthStencil) + if (desc.interlockMode != gpu::InterlockMode::depthStencil) { m_plsImpl->ensureRasterOrderingEnabled(this, desc, desc.interlockMode == - pls::InterlockMode::rasterOrdering); - drawHelper.setIndexRange(pls::PatchIndexCount(drawType), - pls::PatchBaseIndex(drawType)); + gpu::InterlockMode::rasterOrdering); + drawHelper.setIndexRange(gpu::PatchIndexCount(drawType), + gpu::PatchBaseIndex(drawType)); m_state->setCullFace(GL_BACK); drawHelper.draw(); break; @@ -1285,19 +1285,19 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) // MSAA path draws require different stencil settings, depending on their // drawContents. - bool hasActiveClip = ((batch.drawContents & pls::DrawContents::activeClip)); - bool isClipUpdate = ((batch.drawContents & pls::DrawContents::clipUpdate)); + bool hasActiveClip = ((batch.drawContents & gpu::DrawContents::activeClip)); + bool isClipUpdate = ((batch.drawContents & gpu::DrawContents::clipUpdate)); bool isNestedClipUpdate = - (batch.drawContents & pls::kNestedClipUpdateMask) == pls::kNestedClipUpdateMask; - bool isEvenOddFill = (batch.drawContents & pls::DrawContents::evenOddFill); - bool isStroke = (batch.drawContents & pls::DrawContents::stroke); + (batch.drawContents & gpu::kNestedClipUpdateMask) == gpu::kNestedClipUpdateMask; + bool isEvenOddFill = (batch.drawContents & gpu::DrawContents::evenOddFill); + bool isStroke = (batch.drawContents & gpu::DrawContents::stroke); if (isStroke) { // MSAA strokes only use the "border" section of the patch. // (The depth test prevents double hits.) - assert(drawType == pls::DrawType::midpointFanPatches); - drawHelper.setIndexRange(pls::kMidpointFanPatchBorderIndexCount, - pls::kMidpointFanPatchBaseIndex); + assert(drawType == gpu::DrawType::midpointFanPatches); + drawHelper.setIndexRange(gpu::kMidpointFanPatchBorderIndexCount, + gpu::kMidpointFanPatchBaseIndex); m_state->setWriteMasks(true, true, 0xff); m_state->setCullFace(GL_BACK); drawHelper.drawWithStencilSettings(hasActiveClip ? GL_EQUAL : GL_ALWAYS, @@ -1310,8 +1310,8 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) } // MSAA fills only use the "fan" section of the patch (the don't need AA borders). - drawHelper.setIndexRange(pls::PatchFanIndexCount(drawType), - pls::PatchFanBaseIndex(drawType)); + drawHelper.setIndexRange(gpu::PatchFanIndexCount(drawType), + gpu::PatchFanBaseIndex(drawType)); // "nonZero" fill rules (that aren't nested clip updates) can be optimized to render // directly instead of using a "stencil then cover" approach. @@ -1378,12 +1378,12 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) } break; } - case pls::DrawType::stencilClipReset: + case gpu::DrawType::stencilClipReset: { - assert(desc.interlockMode == pls::InterlockMode::depthStencil); + assert(desc.interlockMode == gpu::InterlockMode::depthStencil); m_state->bindVAO(m_trianglesVAO); bool isNestedClipUpdate = - (batch.drawContents & pls::kNestedClipUpdateMask) == pls::kNestedClipUpdateMask; + (batch.drawContents & gpu::kNestedClipUpdateMask) == gpu::kNestedClipUpdateMask; if (isNestedClipUpdate) { // The nested clip just got stencilled and left in the stencil buffer. Intersect @@ -1403,18 +1403,18 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) glDrawArrays(GL_TRIANGLES, batch.baseElement, batch.elementCount); break; } - case pls::DrawType::interiorTriangulation: + case gpu::DrawType::interiorTriangulation: { - assert(desc.interlockMode != pls::InterlockMode::depthStencil); // TODO! + assert(desc.interlockMode != gpu::InterlockMode::depthStencil); // TODO! m_plsImpl->ensureRasterOrderingEnabled(this, desc, false); m_state->bindVAO(m_trianglesVAO); m_state->setCullFace(GL_BACK); glDrawArrays(GL_TRIANGLES, batch.baseElement, batch.elementCount); break; } - case pls::DrawType::imageRect: + case gpu::DrawType::imageRect: { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); assert(!m_capabilities.ARB_bindless_texture); assert(m_imageRectVAO != 0); // Should have gotten lazily allocated by now. m_plsImpl->ensureRasterOrderingEnabled(this, desc, false); @@ -1423,15 +1423,15 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) IMAGE_DRAW_UNIFORM_BUFFER_IDX, gl_buffer_id(imageDrawUniformBufferRing()), batch.imageDrawDataOffset, - sizeof(pls::ImageDrawUniforms)); + sizeof(gpu::ImageDrawUniforms)); m_state->setCullFace(GL_NONE); glDrawElements(GL_TRIANGLES, - std::size(pls::kImageRectIndices), + std::size(gpu::kImageRectIndices), GL_UNSIGNED_SHORT, nullptr); break; } - case pls::DrawType::imageMesh: + case gpu::DrawType::imageMesh: { LITE_RTTI_CAST_OR_BREAK(vertexBuffer, const PLSRenderBufferGLImpl*, @@ -1450,8 +1450,8 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) IMAGE_DRAW_UNIFORM_BUFFER_IDX, gl_buffer_id(imageDrawUniformBufferRing()), batch.imageDrawDataOffset, - sizeof(pls::ImageDrawUniforms)); - if (desc.interlockMode != pls::InterlockMode::depthStencil) + sizeof(gpu::ImageDrawUniforms)); + if (desc.interlockMode != gpu::InterlockMode::depthStencil) { // Try to enable raster ordering for image meshes in rasterOrdering and atomic // mode both; we have no control over whether the internal geometry has self @@ -1463,7 +1463,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) } else { - bool hasActiveClip = ((batch.drawContents & pls::DrawContents::activeClip)); + bool hasActiveClip = ((batch.drawContents & gpu::DrawContents::activeClip)); glStencilFunc(hasActiveClip ? GL_EQUAL : GL_ALWAYS, 0x80, 0xff); glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP); m_state->setWriteMasks(true, true, 0xff); @@ -1475,29 +1475,29 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) reinterpret_cast(batch.baseElement * sizeof(uint16_t))); break; } - case pls::DrawType::plsAtomicResolve: + case gpu::DrawType::gpuAtomicResolve: { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); m_plsImpl->ensureRasterOrderingEnabled(this, desc, false); m_state->bindVAO(m_emptyVAO); m_plsImpl->setupAtomicResolve(this, desc); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); break; } - case pls::DrawType::plsAtomicInitialize: + case gpu::DrawType::gpuAtomicInitialize: { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); RIVE_UNREACHABLE(); } } - if (desc.interlockMode != pls::InterlockMode::depthStencil && batch.needsBarrier && - batch.drawType != pls::DrawType::imageMesh /*EW!*/) + if (desc.interlockMode != gpu::InterlockMode::depthStencil && batch.needsBarrier && + batch.drawType != gpu::DrawType::imageMesh /*EW!*/) { m_plsImpl->barrier(desc); } } - if (desc.interlockMode != pls::InterlockMode::depthStencil) + if (desc.interlockMode != gpu::InterlockMode::depthStencil) { m_plsImpl->deactivatePixelLocalStorage(this, desc); } @@ -1505,7 +1505,7 @@ void PLSRenderContextGLImpl::flush(const FlushDescriptor& desc) { // Depth/stencil don't need to be written out. glInvalidateFramebuffer(GL_DRAW_FRAMEBUFFER, 2, msaaDepthStencilColor.data()); - if ((desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) && + if ((desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) && m_capabilities.KHR_blend_equation_advanced_coherent) { glDisable(GL_BLEND_ADVANCED_COHERENT_KHR); @@ -1792,7 +1792,7 @@ std::unique_ptr PLSRenderContextGLImpl::MakeContext( { int maxVertexShaderStorageBlocks; glGetIntegerv(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS, &maxVertexShaderStorageBlocks); - if (maxVertexShaderStorageBlocks < pls::kMaxStorageBuffers) + if (maxVertexShaderStorageBlocks < gpu::kMaxStorageBuffers) { capabilities.ARB_shader_storage_buffer_object = false; } @@ -1880,4 +1880,4 @@ std::unique_ptr PLSRenderContextGLImpl::MakeContext( new PLSRenderContextGLImpl(rendererString, capabilities, std::move(plsImpl))); return std::make_unique(std::move(plsContextImpl)); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gl/pls_render_target_gl.cpp b/renderer/src/gl/render_target_gl.cpp similarity index 97% rename from pls/renderer/gl/pls_render_target_gl.cpp rename to renderer/src/gl/render_target_gl.cpp index f9d956f0..4d0a4e85 100644 --- a/pls/renderer/gl/pls_render_target_gl.cpp +++ b/renderer/src/gl/render_target_gl.cpp @@ -2,13 +2,13 @@ * Copyright 2023 Rive */ -#include "rive/pls/gl/pls_render_target_gl.hpp" +#include "rive/renderer/gl/render_target_gl.hpp" -#include "rive/pls/pls.hpp" -#include "rive/pls/gl/pls_render_context_gl_impl.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/gl/render_context_gl_impl.hpp" #include "shaders/constants.glsl" -namespace rive::pls +namespace rive::gpu { TextureRenderTargetGL::~TextureRenderTargetGL() {} @@ -21,7 +21,7 @@ static glutils::Texture make_backing_texture(GLenum internalformat, uint32_t wid return texture; } -void TextureRenderTargetGL::allocateInternalPLSTextures(pls::InterlockMode interlockMode) +void TextureRenderTargetGL::allocateInternalPLSTextures(gpu::InterlockMode interlockMode) { if (m_coverageTexture == 0) { @@ -300,7 +300,7 @@ void FramebufferRenderTargetGL::allocateOffscreenTargetTexture() } } -void FramebufferRenderTargetGL::allocateInternalPLSTextures(pls::InterlockMode interlockMode) +void FramebufferRenderTargetGL::allocateInternalPLSTextures(gpu::InterlockMode interlockMode) { m_textureRenderTarget.allocateInternalPLSTextures(interlockMode); } @@ -378,4 +378,4 @@ void FramebufferRenderTargetGL::bindInternalDstTexture(GLenum activeTexture) allocateOffscreenTargetTexture(); m_textureRenderTarget.bindInternalDstTexture(activeTexture); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/gr_inner_fan_triangulator.hpp b/renderer/src/gr_inner_fan_triangulator.hpp similarity index 97% rename from pls/renderer/gr_inner_fan_triangulator.hpp rename to renderer/src/gr_inner_fan_triangulator.hpp index bb5a1669..378853eb 100644 --- a/pls/renderer/gr_inner_fan_triangulator.hpp +++ b/renderer/src/gr_inner_fan_triangulator.hpp @@ -45,7 +45,7 @@ class GrInnerFanTriangulator : private GrTriangulator size_t maxVertexCount() const { return m_maxVertexCount; } - size_t polysToTriangles(pls::WriteOnlyMappedMemory* bufferRing, + size_t polysToTriangles(gpu::WriteOnlyMappedMemory* bufferRing, uint16_t pathID) const { diff --git a/pls/renderer/gr_triangulator.cpp b/renderer/src/gr_triangulator.cpp similarity index 99% rename from pls/renderer/gr_triangulator.cpp rename to renderer/src/gr_triangulator.cpp index 2ae1126f..36e22d8f 100644 --- a/pls/renderer/gr_triangulator.cpp +++ b/renderer/src/gr_triangulator.cpp @@ -114,7 +114,7 @@ bool GrTriangulator::Comparator::sweep_lt(const Vec2D& a, const Vec2D& b) const static inline void emit_vertex(Vertex* v, int winding, uint16_t pathID, - pls::WriteOnlyMappedMemory* mappedMemory) + gpu::WriteOnlyMappedMemory* mappedMemory) { // GrTriangulator and pls unfortunately have opposite winding senses. int16_t plsWeight = -winding; @@ -126,7 +126,7 @@ static void emit_triangle(Vertex* v0, Vertex* v2, int winding, uint16_t pathID, - pls::WriteOnlyMappedMemory* mappedMemory) + gpu::WriteOnlyMappedMemory* mappedMemory) { TESS_LOG("emit_triangle %g (%g, %g) %d\n", v0->fID, v0->fPoint.x, v0->fPoint.y, v0->fAlpha); TESS_LOG(" %g (%g, %g) %d\n", v1->fID, v1->fPoint.x, v1->fPoint.y, v1->fAlpha); @@ -416,7 +416,7 @@ void GrTriangulator::emitMonotonePoly( const MonotonePoly* monotonePoly, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory* mappedMemory) const + gpu::WriteOnlyMappedMemory* mappedMemory) const { assert(monotonePoly->fWinding != 0); Edge* e = monotonePoly->fFirstEdge; @@ -494,7 +494,7 @@ void GrTriangulator::emitTriangle( int winding, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory* mappedMemory) const + gpu::WriteOnlyMappedMemory* mappedMemory) const { if (reverseTriangles) { @@ -583,7 +583,7 @@ Poly* GrTriangulator::Poly::addEdge(Edge* e, Side side, GrTriangulator* tri) void GrTriangulator::emitPoly(const Poly* poly, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory* mappedMemory) const + gpu::WriteOnlyMappedMemory* mappedMemory) const { if (poly->fCount < 3) { @@ -2277,7 +2277,7 @@ void GrTriangulator::polysToTriangles( FillRule overrideFillType, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory* mappedMemory) const + gpu::WriteOnlyMappedMemory* mappedMemory) const { for (Poly* poly = polys; poly; poly = poly->fNext) { @@ -2372,14 +2372,14 @@ size_t GrTriangulator::polysToTriangles( uint64_t maxVertexCount, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory* mappedMemory) const + gpu::WriteOnlyMappedMemory* mappedMemory) const { if (0 == maxVertexCount || maxVertexCount > std::numeric_limits::max()) { return 0; } - size_t vertexStride = sizeof(pls::TriangleVertex); + size_t vertexStride = sizeof(gpu::TriangleVertex); #if 0 if (fEmitCoverage) { diff --git a/pls/renderer/gr_triangulator.hpp b/renderer/src/gr_triangulator.hpp similarity index 98% rename from pls/renderer/gr_triangulator.hpp rename to renderer/src/gr_triangulator.hpp index dad30cf3..0a3076fc 100644 --- a/pls/renderer/gr_triangulator.hpp +++ b/renderer/src/gr_triangulator.hpp @@ -18,8 +18,8 @@ #include "rive/math/raw_path.hpp" #include "rive/math/vec2d.hpp" #include "rive/math/aabb.hpp" -#include "rive/pls/pls.hpp" -#include "rive/pls/trivial_block_allocator.hpp" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/trivial_block_allocator.hpp" namespace rive { @@ -119,7 +119,7 @@ class GrTriangulator FillRule overrideFillRule, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory*) const; + gpu::WriteOnlyMappedMemory*) const; // The vertex sorting in step (3) is a merge sort, since it plays well with the linked list // of vertices (and the necessity of inserting new vertices on intersection). @@ -168,18 +168,18 @@ class GrTriangulator void emitMonotonePoly(const MonotonePoly*, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory*) const; + gpu::WriteOnlyMappedMemory*) const; void emitTriangle(Vertex* prev, Vertex* curr, Vertex* next, int winding, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory*) const; + gpu::WriteOnlyMappedMemory*) const; void emitPoly(const Poly*, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory*) const; + gpu::WriteOnlyMappedMemory*) const; Poly* makePoly(Poly** head, Vertex* v, int winding) const; void appendPointToContour(const Vec2D& p, VertexList* contour) const; @@ -265,7 +265,7 @@ class GrTriangulator uint64_t maxVertexCount, uint16_t pathID, bool reverseTriangles, - pls::WriteOnlyMappedMemory*) const; + gpu::WriteOnlyMappedMemory*) const; Comparator::Direction fDirection; FillRule fFillRule; diff --git a/pls/renderer/pls_image.cpp b/renderer/src/image.cpp similarity index 75% rename from pls/renderer/pls_image.cpp rename to renderer/src/image.cpp index c81b1dfc..59e1e5bb 100644 --- a/pls/renderer/pls_image.cpp +++ b/renderer/src/image.cpp @@ -2,13 +2,13 @@ * Copyright 2022 Rive */ -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" -namespace rive::pls +namespace rive::gpu { PLSTexture::PLSTexture(uint32_t width, uint32_t height) : m_width(width), m_height(height) { static std::atomic_uint32_t textureResourceHashCounter = 0; m_textureResourceHash = ++textureResourceHashCounter; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/intersection_board.cpp b/renderer/src/intersection_board.cpp similarity index 99% rename from pls/renderer/intersection_board.cpp rename to renderer/src/intersection_board.cpp index a58318be..461eec5b 100644 --- a/pls/renderer/intersection_board.cpp +++ b/renderer/src/intersection_board.cpp @@ -13,7 +13,7 @@ #else #endif -namespace rive::pls +namespace rive::gpu { void IntersectionTile::reset(int left, int top, int16_t baselineGroupIndex) { @@ -252,4 +252,4 @@ int16_t IntersectionBoard::addRectangle(int4 ltrb) return nextGroupIndex; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/intersection_board.hpp b/renderer/src/intersection_board.hpp similarity index 98% rename from pls/renderer/intersection_board.hpp rename to renderer/src/intersection_board.hpp index 51d0f383..04469afe 100644 --- a/pls/renderer/intersection_board.hpp +++ b/renderer/src/intersection_board.hpp @@ -7,7 +7,7 @@ #include "rive/math/simd.hpp" #include -namespace rive::pls +namespace rive::gpu { // 255 x 255 tile that manages a set of rectangles and their groupIndex. // From a given rectangle, finds the max groupIndex in the set of internal rectangles it intersects. @@ -70,4 +70,4 @@ class IntersectionBoard int32_t m_rows; std::vector m_tiles; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/metal/background_shader_compiler.h b/renderer/src/metal/background_shader_compiler.h similarity index 82% rename from pls/renderer/metal/background_shader_compiler.h rename to renderer/src/metal/background_shader_compiler.h index 3c705309..0d9c8ee4 100644 --- a/pls/renderer/metal/background_shader_compiler.h +++ b/renderer/src/metal/background_shader_compiler.h @@ -4,24 +4,24 @@ #pragma once -#include "rive/pls/pls.hpp" -#include "rive/pls/metal/pls_render_context_metal_impl.h" +#include "rive/renderer/gpu.hpp" +#include "rive/renderer/metal/render_context_metal_impl.h" #include #include #import -namespace rive::pls +namespace rive::gpu { // Defines a job to compile a "draw" shader -- either draw_path.glsl or draw_image_mesh.glsl, with a // specific set of features enabled. struct BackgroundCompileJob { - pls::DrawType drawType; - pls::ShaderFeatures shaderFeatures; - pls::InterlockMode interlockMode; - pls::ShaderMiscFlags shaderMiscFlags; + gpu::DrawType drawType; + gpu::ShaderFeatures shaderFeatures; + gpu::InterlockMode interlockMode; + gpu::ShaderMiscFlags shaderMiscFlags; id compiledLibrary; }; @@ -55,4 +55,4 @@ class BackgroundShaderCompiler bool m_shouldQuit; std::thread m_compilerThread; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/metal/background_shader_compiler.mm b/renderer/src/metal/background_shader_compiler.mm similarity index 74% rename from pls/renderer/metal/background_shader_compiler.mm rename to renderer/src/metal/background_shader_compiler.mm index 22b5b83b..4f1172b5 100644 --- a/pls/renderer/metal/background_shader_compiler.mm +++ b/renderer/src/metal/background_shader_compiler.mm @@ -19,7 +19,7 @@ #include -namespace rive::pls +namespace rive::gpu { BackgroundShaderCompiler::~BackgroundShaderCompiler() { @@ -81,24 +81,24 @@ lock.unlock(); - pls::DrawType drawType = job.drawType; - pls::ShaderFeatures shaderFeatures = job.shaderFeatures; - pls::InterlockMode interlockMode = job.interlockMode; - pls::ShaderMiscFlags shaderMiscFlags = job.shaderMiscFlags; + gpu::DrawType drawType = job.drawType; + gpu::ShaderFeatures shaderFeatures = job.shaderFeatures; + gpu::InterlockMode interlockMode = job.interlockMode; + gpu::ShaderMiscFlags shaderMiscFlags = job.shaderMiscFlags; auto defines = [[NSMutableDictionary alloc] init]; defines[@GLSL_VERTEX] = @""; defines[@GLSL_FRAGMENT] = @""; - for (size_t i = 0; i < pls::kShaderFeatureCount; ++i) + for (size_t i = 0; i < gpu::kShaderFeatureCount; ++i) { ShaderFeatures feature = static_cast(1 << i); if (shaderFeatures & feature) { - const char* macro = pls::GetShaderFeatureGLSLName(feature); + const char* macro = gpu::GetShaderFeatureGLSLName(feature); defines[[NSString stringWithUTF8String:macro]] = @"1"; } } - if (interlockMode == pls::InterlockMode::atomics) + if (interlockMode == gpu::InterlockMode::atomics) { // Atomic mode uses device buffers instead of framebuffer fetches. defines[@GLSL_PLS_IMPL_DEVICE_BUFFER] = @""; @@ -106,18 +106,18 @@ { defines[@GLSL_PLS_IMPL_DEVICE_BUFFER_RASTER_ORDERED] = @""; } - if (!(shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND)) + if (!(shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND)) { defines[@GLSL_FIXED_FUNCTION_COLOR_BLEND] = @""; } } - auto source = [[NSMutableString alloc] initWithCString:pls::glsl::metal + auto source = [[NSMutableString alloc] initWithCString:gpu::glsl::metal encoding:NSUTF8StringEncoding]; - [source appendFormat:@"%s\n%s\n", pls::glsl::constants, pls::glsl::common]; + [source appendFormat:@"%s\n%s\n", gpu::glsl::constants, gpu::glsl::common]; if (shaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND) { - [source appendFormat:@"%s\n", pls::glsl::advanced_blend]; + [source appendFormat:@"%s\n", gpu::glsl::advanced_blend]; } switch (drawType) @@ -127,26 +127,26 @@ // Add baseInstance to the instanceID for path draws. defines[@GLSL_ENABLE_INSTANCE_INDEX] = @""; defines[@GLSL_DRAW_PATH] = @""; - [source appendFormat:@"%s\n", pls::glsl::draw_path_common]; + [source appendFormat:@"%s\n", gpu::glsl::draw_path_common]; #ifdef RIVE_IOS - [source appendFormat:@"%s\n", pls::glsl::draw_path]; + [source appendFormat:@"%s\n", gpu::glsl::draw_path]; #else [source appendFormat:@"%s\n", - interlockMode == pls::InterlockMode::rasterOrdering - ? pls::glsl::draw_path - : pls::glsl::atomic_draw]; + interlockMode == gpu::InterlockMode::rasterOrdering + ? gpu::glsl::draw_path + : gpu::glsl::atomic_draw]; #endif break; case DrawType::interiorTriangulation: defines[@GLSL_DRAW_INTERIOR_TRIANGLES] = @""; - [source appendFormat:@"%s\n", pls::glsl::draw_path_common]; + [source appendFormat:@"%s\n", gpu::glsl::draw_path_common]; #ifdef RIVE_IOS - [source appendFormat:@"%s\n", pls::glsl::draw_path]; + [source appendFormat:@"%s\n", gpu::glsl::draw_path]; #else [source appendFormat:@"%s\n", - interlockMode == pls::InterlockMode::rasterOrdering - ? pls::glsl::draw_path - : pls::glsl::atomic_draw]; + interlockMode == gpu::InterlockMode::rasterOrdering + ? gpu::glsl::draw_path + : gpu::glsl::atomic_draw]; #endif break; case DrawType::imageRect: @@ -156,58 +156,58 @@ assert(interlockMode == InterlockMode::atomics); defines[@GLSL_DRAW_IMAGE] = @""; defines[@GLSL_DRAW_IMAGE_RECT] = @""; - [source appendFormat:@"%s\n", pls::glsl::atomic_draw]; + [source appendFormat:@"%s\n", gpu::glsl::atomic_draw]; #endif break; case DrawType::imageMesh: defines[@GLSL_DRAW_IMAGE] = @""; defines[@GLSL_DRAW_IMAGE_MESH] = @""; #ifdef RIVE_IOS - [source appendFormat:@"%s\n", pls::glsl::draw_image_mesh]; + [source appendFormat:@"%s\n", gpu::glsl::draw_image_mesh]; #else [source appendFormat:@"%s\n", - interlockMode == pls::InterlockMode::rasterOrdering - ? pls::glsl::draw_image_mesh - : pls::glsl::atomic_draw]; + interlockMode == gpu::InterlockMode::rasterOrdering + ? gpu::glsl::draw_image_mesh + : gpu::glsl::atomic_draw]; #endif break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: #ifdef RIVE_IOS RIVE_UNREACHABLE(); #else assert(interlockMode == InterlockMode::atomics); defines[@GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS] = @""; defines[@GLSL_INITIALIZE_PLS] = @""; - if (shaderMiscFlags & pls::ShaderMiscFlags::storeColorClear) + if (shaderMiscFlags & gpu::ShaderMiscFlags::storeColorClear) { defines[@GLSL_STORE_COLOR_CLEAR] = @""; } - if (shaderMiscFlags & pls::ShaderMiscFlags::swizzleColorBGRAToRGBA) + if (shaderMiscFlags & gpu::ShaderMiscFlags::swizzleColorBGRAToRGBA) { defines[@GLSL_SWIZZLE_COLOR_BGRA_TO_RGBA] = @""; } - [source appendFormat:@"%s\n", pls::glsl::atomic_draw]; + [source appendFormat:@"%s\n", gpu::glsl::atomic_draw]; #endif break; - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: #ifdef RIVE_IOS RIVE_UNREACHABLE(); #else assert(interlockMode == InterlockMode::atomics); defines[@GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS] = @""; defines[@GLSL_RESOLVE_PLS] = @""; - if (shaderMiscFlags & pls::ShaderMiscFlags::coalescedResolveAndTransfer) + if (shaderMiscFlags & gpu::ShaderMiscFlags::coalescedResolveAndTransfer) { defines[@GLSL_COALESCED_PLS_RESOLVE_AND_TRANSFER] = @""; } - [source appendFormat:@"%s\n", pls::glsl::atomic_draw]; + [source appendFormat:@"%s\n", gpu::glsl::atomic_draw]; #endif break; case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } - NSError* err = [NSError errorWithDomain:@"pls_compile" code:200 userInfo:nil]; + NSError* err = [NSError errorWithDomain:@"compile" code:200 userInfo:nil]; MTLCompileOptions* compileOptions = [MTLCompileOptions new]; #if defined(RIVE_IOS) || defined(RIVE_IOS_SIMULATOR) compileOptions.languageVersion = MTLLanguageVersion2_2; // On ios, we need version 2.2+ @@ -241,4 +241,4 @@ m_workFinishedCondition.notify_all(); } } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/metal/pls_render_context_metal_impl.mm b/renderer/src/metal/render_context_metal_impl.mm similarity index 90% rename from pls/renderer/metal/pls_render_context_metal_impl.mm rename to renderer/src/metal/render_context_metal_impl.mm index 714d5ff7..de27d115 100644 --- a/pls/renderer/metal/pls_render_context_metal_impl.mm +++ b/renderer/src/metal/render_context_metal_impl.mm @@ -2,11 +2,11 @@ * Copyright 2023 Rive */ -#include "rive/pls/metal/pls_render_context_metal_impl.h" +#include "rive/renderer/metal/render_context_metal_impl.h" #include "background_shader_compiler.h" -#include "rive/pls/buffer_ring.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/buffer_ring.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" #include @@ -17,7 +17,7 @@ #import #endif -namespace rive::pls +namespace rive::gpu { #ifdef RIVE_IOS #include "generated/shaders/rive_pls_ios.metallib.c" @@ -30,7 +30,7 @@ static id make_pipeline_state(id gpu, MTLRenderPipelineDescriptor* desc) { - NSError* err = [NSError errorWithDomain:@"pls_pipeline_create" code:201 userInfo:nil]; + NSError* err = [NSError errorWithDomain:@"pipeline_create" code:201 userInfo:nil]; id state = [gpu newRenderPipelineStateWithDescriptor:desc error:&err]; if (!state) { @@ -96,7 +96,7 @@ { namespaceID[0] = '1'; } - for (size_t i = 0; i < pls::kShaderFeatureCount; ++i) + for (size_t i = 0; i < gpu::kShaderFeatureCount; ++i) { ShaderFeatures feature = static_cast(1 << i); if (shaderFeatures & feature) @@ -124,8 +124,8 @@ case DrawType::imageMesh: namespacePrefix = 'm'; break; - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -138,10 +138,10 @@ id library, NSString* vertexFunctionName, NSString* fragmentFunctionName, - pls::DrawType drawType, - pls::InterlockMode interlockMode, - pls::ShaderFeatures shaderFeatures, - pls::ShaderMiscFlags shaderMiscFlags) + gpu::DrawType drawType, + gpu::InterlockMode interlockMode, + gpu::ShaderFeatures shaderFeatures, + gpu::ShaderMiscFlags shaderMiscFlags) { auto makePipelineState = [=](id vertexMain, id fragmentMain, @@ -155,16 +155,16 @@ switch (interlockMode) { - case pls::InterlockMode::rasterOrdering: + case gpu::InterlockMode::rasterOrdering: // In rasterOrdering mode, the PLS planes are accessed as color attachments. desc.colorAttachments[CLIP_PLANE_IDX].pixelFormat = MTLPixelFormatR32Uint; desc.colorAttachments[SCRATCH_COLOR_PLANE_IDX].pixelFormat = pixelFormat; desc.colorAttachments[COVERAGE_PLANE_IDX].pixelFormat = MTLPixelFormatR32Uint; break; - case pls::InterlockMode::atomics: + case gpu::InterlockMode::atomics: // In atomic mode, the PLS planes are accessed as device buffers. We only use // the "framebuffer" attachment configured above. - if (shaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend) + if (shaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend) { // The shader expectes a "src-over" blend function in order to to implement // antialiasing and opacity. @@ -177,7 +177,7 @@ framebuffer.alphaBlendOperation = MTLBlendOperationAdd; framebuffer.writeMask = MTLColorWriteMaskAll; } - else if (drawType == pls::DrawType::plsAtomicResolve) + else if (drawType == gpu::DrawType::gpuAtomicResolve) { // We're resolving from the offscreen color buffer to the framebuffer // attachment. Write out the final color directly without any blend modes. @@ -192,7 +192,7 @@ framebuffer.writeMask = MTLColorWriteMaskNone; } break; - case pls::InterlockMode::depthStencil: + case gpu::InterlockMode::depthStencil: RIVE_UNREACHABLE(); } return make_pipeline_state(gpu, desc); @@ -358,7 +358,7 @@ void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} #endif nil, nil); - NSError* err = [NSError errorWithDomain:@"pls_metallib_load" code:200 userInfo:nil]; + NSError* err = [NSError errorWithDomain:@"metallib_load" code:200 userInfo:nil]; m_plsPrecompiledLibrary = [m_gpu newLibraryWithData:metallibData error:&err]; if (m_plsPrecompiledLibrary == nil) { @@ -369,8 +369,8 @@ void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} m_colorRampPipeline = std::make_unique(m_gpu, m_plsPrecompiledLibrary); m_tessPipeline = std::make_unique(m_gpu, m_plsPrecompiledLibrary); - m_tessSpanIndexBuffer = [m_gpu newBufferWithBytes:pls::kTessSpanIndices - length:sizeof(pls::kTessSpanIndices) + m_tessSpanIndexBuffer = [m_gpu newBufferWithBytes:gpu::kTessSpanIndices + length:sizeof(gpu::kTessSpanIndices) options:MTLResourceStorageModeShared]; // The precompiled static library has a fully-featured shader for each drawType in @@ -381,26 +381,26 @@ void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} for (auto drawType : {DrawType::midpointFanPatches, DrawType::interiorTriangulation, DrawType::imageMesh}) { - pls::ShaderFeatures allShaderFeatures = - pls::ShaderFeaturesMaskFor(drawType, pls::InterlockMode::rasterOrdering); + gpu::ShaderFeatures allShaderFeatures = + gpu::ShaderFeaturesMaskFor(drawType, gpu::InterlockMode::rasterOrdering); uint32_t pipelineKey = ShaderUniqueKey(drawType, allShaderFeatures, - pls::InterlockMode::rasterOrdering, - pls::ShaderMiscFlags::none); + gpu::InterlockMode::rasterOrdering, + gpu::ShaderMiscFlags::none); m_drawPipelines[pipelineKey] = std::make_unique( m_gpu, m_plsPrecompiledLibrary, DrawPipeline::GetPrecompiledFunctionName(drawType, allShaderFeatures & - pls::kVertexShaderFeaturesMask, + gpu::kVertexShaderFeaturesMask, m_plsPrecompiledLibrary, GLSL_drawVertexMain), DrawPipeline::GetPrecompiledFunctionName( drawType, allShaderFeatures, m_plsPrecompiledLibrary, GLSL_drawFragmentMain), drawType, - pls::InterlockMode::rasterOrdering, + gpu::InterlockMode::rasterOrdering, allShaderFeatures, - pls::ShaderMiscFlags::none); + gpu::ShaderMiscFlags::none); } } @@ -413,12 +413,12 @@ void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} GeneratePatchBufferData(reinterpret_cast(m_pathPatchVertexBuffer.contents), reinterpret_cast(m_pathPatchIndexBuffer.contents)); - // Set up the imageRect rendering buffers. (pls::InterlockMode::atomics only.) - m_imageRectVertexBuffer = [m_gpu newBufferWithBytes:pls::kImageRectVertices - length:sizeof(pls::kImageRectVertices) + // Set up the imageRect rendering buffers. (gpu::InterlockMode::atomics only.) + m_imageRectVertexBuffer = [m_gpu newBufferWithBytes:gpu::kImageRectVertices + length:sizeof(gpu::kImageRectVertices) options:MTLResourceStorageModeShared]; - m_imageRectIndexBuffer = [m_gpu newBufferWithBytes:pls::kImageRectIndices - length:sizeof(pls::kImageRectIndices) + m_imageRectIndexBuffer = [m_gpu newBufferWithBytes:gpu::kImageRectIndices + length:sizeof(gpu::kImageRectIndices) options:MTLResourceStorageModeShared]; } @@ -485,7 +485,7 @@ void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} lite_rtti_override(renderBufferType, renderBufferFlags, sizeInBytes), m_gpu(gpu) { int bufferCount = - flags() & RenderBufferFlags::mappedOnceAtInitialization ? 1 : pls::kBufferRingSize; + flags() & RenderBufferFlags::mappedOnceAtInitialization ? 1 : gpu::kBufferRingSize; for (int i = 0; i < bufferCount; ++i) { m_buffers[i] = [gpu newBufferWithLength:sizeInBytes @@ -498,7 +498,7 @@ void onUnmapAndSubmitBuffer(int bufferIdx, size_t mapSizeInBytes) override {} protected: void* onMap() override { - m_submittedBufferIdx = (m_submittedBufferIdx + 1) % pls::kBufferRingSize; + m_submittedBufferIdx = (m_submittedBufferIdx + 1) % gpu::kBufferRingSize; assert(m_buffers[m_submittedBufferIdx] != nil); return m_buffers[m_submittedBufferIdx].contents; } @@ -507,7 +507,7 @@ void onUnmap() override {} private: id m_gpu; - id m_buffers[pls::kBufferRingSize]; + id m_buffers[gpu::kBufferRingSize]; int m_submittedBufferIdx = -1; }; @@ -580,7 +580,7 @@ void ensureMipmaps(id commandBuffer) const } std::unique_ptr PLSRenderContextMetalImpl::makeStorageBufferRing( - size_t capacityInBytes, pls::StorageBufferStructure) + size_t capacityInBytes, gpu::StorageBufferStructure) { return BufferRingMetalImpl::Make(m_gpu, capacityInBytes); } @@ -633,13 +633,13 @@ void ensureMipmaps(id commandBuffer) const } const PLSRenderContextMetalImpl::DrawPipeline* PLSRenderContextMetalImpl:: - findCompatibleDrawPipeline(pls::DrawType drawType, - pls::ShaderFeatures shaderFeatures, - pls::InterlockMode interlockMode, - pls::ShaderMiscFlags shaderMiscFlags) + findCompatibleDrawPipeline(gpu::DrawType drawType, + gpu::ShaderFeatures shaderFeatures, + gpu::InterlockMode interlockMode, + gpu::ShaderMiscFlags shaderMiscFlags) { uint32_t pipelineKey = - pls::ShaderUniqueKey(drawType, shaderFeatures, interlockMode, shaderMiscFlags); + gpu::ShaderUniqueKey(drawType, shaderFeatures, interlockMode, shaderMiscFlags); auto pipelineIter = m_drawPipelines.find(pipelineKey); if (pipelineIter == m_drawPipelines.end()) { @@ -663,8 +663,8 @@ void ensureMipmaps(id commandBuffer) const // The shader for this pipeline hasn't finished compiling yet. Start by finding a fully-featured // superset of features whose pipeline we can fall back on while waiting for it to compile. ShaderFeatures fullyFeaturedPipelineFeatures = - pls::ShaderFeaturesMaskFor(drawType, interlockMode); - if (interlockMode == pls::InterlockMode::atomics) + gpu::ShaderFeaturesMaskFor(drawType, interlockMode); + if (interlockMode == gpu::InterlockMode::atomics) { // Never add ENABLE_ADVANCED_BLEND to an atomic pipeline that doesn't use advanced blend, // since in atomic mode, the shaders behave differently depending on whether advanced blend @@ -680,7 +680,7 @@ void ensureMipmaps(id commandBuffer) const // Fully-featured "rasterOrdering" pipelines should have already been pre-loaded from the static // library. assert(shaderFeatures != fullyFeaturedPipelineFeatures || - interlockMode != pls::InterlockMode::rasterOrdering); + interlockMode != gpu::InterlockMode::rasterOrdering); // Poll to see if the shader is actually done compiling, but only wait if it's a fully-feature // pipeline. Otherwise, we can fall back on the fully-featured pipeline while we wait for @@ -690,7 +690,7 @@ void ensureMipmaps(id commandBuffer) const m_contextOptions.synchronousShaderCompilations; while (m_backgroundShaderCompiler->popFinishedJob(&job, shouldWaitForBackgroundCompilation)) { - uint32_t jobKey = pls::ShaderUniqueKey( + uint32_t jobKey = gpu::ShaderUniqueKey( job.drawType, job.shaderFeatures, job.interlockMode, job.shaderMiscFlags); m_drawPipelines[jobKey] = std::make_unique(m_gpu, job.compiledLibrary, @@ -742,10 +742,10 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ } id PLSRenderContextMetalImpl::makeRenderPassForDraws( - const pls::FlushDescriptor& flushDesc, + const gpu::FlushDescriptor& flushDesc, MTLRenderPassDescriptor* passDesc, id commandBuffer, - pls::ShaderMiscFlags baselineShaderMiscFlags) + gpu::ShaderMiscFlags baselineShaderMiscFlags) { auto* renderTarget = static_cast(flushDesc.renderTarget); @@ -764,45 +764,45 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ if (flushDesc.pathCount > 0) { [encoder setVertexBuffer:mtl_buffer(pathBufferRing()) - offset:flushDesc.firstPath * sizeof(pls::PathData) + offset:flushDesc.firstPath * sizeof(gpu::PathData) atIndex:PATH_BUFFER_IDX]; - if (flushDesc.interlockMode == pls::InterlockMode::atomics) + if (flushDesc.interlockMode == gpu::InterlockMode::atomics) { [encoder setFragmentBuffer:mtl_buffer(paintBufferRing()) - offset:flushDesc.firstPaint * sizeof(pls::PaintData) + offset:flushDesc.firstPaint * sizeof(gpu::PaintData) atIndex:PAINT_BUFFER_IDX]; [encoder setFragmentBuffer:mtl_buffer(paintAuxBufferRing()) - offset:flushDesc.firstPaintAux * sizeof(pls::PaintAuxData) + offset:flushDesc.firstPaintAux * sizeof(gpu::PaintAuxData) atIndex:PAINT_AUX_BUFFER_IDX]; } else { [encoder setVertexBuffer:mtl_buffer(paintBufferRing()) - offset:flushDesc.firstPaint * sizeof(pls::PaintData) + offset:flushDesc.firstPaint * sizeof(gpu::PaintData) atIndex:PAINT_BUFFER_IDX]; [encoder setVertexBuffer:mtl_buffer(paintAuxBufferRing()) - offset:flushDesc.firstPaintAux * sizeof(pls::PaintAuxData) + offset:flushDesc.firstPaintAux * sizeof(gpu::PaintAuxData) atIndex:PAINT_AUX_BUFFER_IDX]; } } if (flushDesc.contourCount > 0) { [encoder setVertexBuffer:mtl_buffer(contourBufferRing()) - offset:flushDesc.firstContour * sizeof(pls::ContourData) + offset:flushDesc.firstContour * sizeof(gpu::ContourData) atIndex:CONTOUR_BUFFER_IDX]; } - if (flushDesc.interlockMode == pls::InterlockMode::atomics) + if (flushDesc.interlockMode == gpu::InterlockMode::atomics) { // In atomic mode, the PLS planes are buffers that we need to bind separately. // Since the PLS plane indices collide with other buffer bindings, offset the binding // indices of these buffers by DEFAULT_BINDINGS_SET_SIZE. - if (!(baselineShaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend)) + if (!(baselineShaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend)) { [encoder setFragmentBuffer:renderTarget->colorAtomicBuffer() offset:0 atIndex:COLOR_PLANE_IDX + DEFAULT_BINDINGS_SET_SIZE]; } - if (flushDesc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING) + if (flushDesc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING) { [encoder setFragmentBuffer:renderTarget->clipAtomicBuffer() offset:0 @@ -821,7 +821,7 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ void PLSRenderContextMetalImpl::flush(const FlushDescriptor& desc) { - assert(desc.interlockMode != pls::InterlockMode::depthStencil); // TODO: msaa. + assert(desc.interlockMode != gpu::InterlockMode::depthStencil); // TODO: msaa. auto* renderTarget = static_cast(desc.renderTarget); id commandBuffer = (__bridge id)desc.externalCommandBuffer; @@ -847,7 +847,7 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ offset:desc.flushUniformDataOffsetInBytes atIndex:FLUSH_UNIFORM_BUFFER_IDX]; [gradEncoder setVertexBuffer:mtl_buffer(gradSpanBufferRing()) - offset:desc.firstComplexGradSpan * sizeof(pls::GradientSpan) + offset:desc.firstComplexGradSpan * sizeof(gpu::GradientSpan) atIndex:0]; [gradEncoder setCullMode:MTLCullModeBack]; [gradEncoder drawPrimitives:MTLPrimitiveTypeTriangleStrip @@ -893,19 +893,19 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ offset:desc.flushUniformDataOffsetInBytes atIndex:FLUSH_UNIFORM_BUFFER_IDX]; [tessEncoder setVertexBuffer:mtl_buffer(tessSpanBufferRing()) - offset:desc.firstTessVertexSpan * sizeof(pls::TessVertexSpan) + offset:desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan) atIndex:0]; assert(desc.pathCount > 0); [tessEncoder setVertexBuffer:mtl_buffer(pathBufferRing()) - offset:desc.firstPath * sizeof(pls::PathData) + offset:desc.firstPath * sizeof(gpu::PathData) atIndex:PATH_BUFFER_IDX]; assert(desc.contourCount > 0); [tessEncoder setVertexBuffer:mtl_buffer(contourBufferRing()) - offset:desc.firstContour * sizeof(pls::ContourData) + offset:desc.firstContour * sizeof(gpu::ContourData) atIndex:CONTOUR_BUFFER_IDX]; [tessEncoder setCullMode:MTLCullModeBack]; [tessEncoder drawIndexedPrimitives:MTLPrimitiveTypeTriangle - indexCount:std::size(pls::kTessSpanIndices) + indexCount:std::size(gpu::kTessSpanIndices) indexType:MTLIndexTypeUInt16 indexBuffer:m_tessSpanIndexBuffer indexBufferOffset:0 @@ -929,7 +929,7 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ pass.colorAttachments[COLOR_PLANE_IDX].texture = renderTarget->targetTexture(); switch (desc.colorLoadAction) { - case pls::LoadAction::clear: + case gpu::LoadAction::clear: { float cc[4]; UnpackColorToRGBA32F(desc.clearColor, cc); @@ -938,25 +938,25 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ MTLClearColorMake(cc[0], cc[1], cc[2], cc[3]); break; } - case pls::LoadAction::preserveRenderTarget: + case gpu::LoadAction::preserveRenderTarget: pass.colorAttachments[COLOR_PLANE_IDX].loadAction = MTLLoadActionLoad; break; - case pls::LoadAction::dontCare: + case gpu::LoadAction::dontCare: pass.colorAttachments[COLOR_PLANE_IDX].loadAction = MTLLoadActionDontCare; break; } pass.colorAttachments[COLOR_PLANE_IDX].storeAction = MTLStoreActionStore; - auto baselineShaderMiscFlags = pls::ShaderMiscFlags::none; + auto baselineShaderMiscFlags = gpu::ShaderMiscFlags::none; - if (desc.interlockMode == pls::InterlockMode::rasterOrdering) + if (desc.interlockMode == gpu::InterlockMode::rasterOrdering) { // In rasterOrdering mode, the PLS planes are accessed as color attachments. pass.colorAttachments[CLIP_PLANE_IDX].texture = renderTarget->m_clipMemorylessTexture; pass.colorAttachments[CLIP_PLANE_IDX].loadAction = MTLLoadActionClear; pass.colorAttachments[CLIP_PLANE_IDX].clearColor = MTLClearColorMake(0, 0, 0, 0); pass.colorAttachments[CLIP_PLANE_IDX].storeAction = - desc.interlockMode == pls::InterlockMode::atomics ? MTLStoreActionStore + desc.interlockMode == gpu::InterlockMode::atomics ? MTLStoreActionStore : MTLStoreActionDontCare; pass.colorAttachments[SCRATCH_COLOR_PLANE_IDX].texture = @@ -970,19 +970,19 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ pass.colorAttachments[COVERAGE_PLANE_IDX].clearColor = MTLClearColorMake(desc.coverageClearValue, 0, 0, 0); pass.colorAttachments[COVERAGE_PLANE_IDX].storeAction = - desc.interlockMode == pls::InterlockMode::atomics ? MTLStoreActionStore + desc.interlockMode == gpu::InterlockMode::atomics ? MTLStoreActionStore : MTLStoreActionDontCare; } - else if (!(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND)) + else if (!(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND)) { - assert(desc.interlockMode == pls::InterlockMode::atomics); - baselineShaderMiscFlags |= pls::ShaderMiscFlags::fixedFunctionColorBlend; + assert(desc.interlockMode == gpu::InterlockMode::atomics); + baselineShaderMiscFlags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend; } - else if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget) + else if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget) { // Since we need to preserve the renderTarget during load, and since we're rendering to an // offscreen color buffer, we have to literally copy the renderTarget into the color buffer. - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); id copyEncoder = [commandBuffer blitCommandEncoder]; auto updateOrigin = MTLOriginMake(desc.renderTargetUpdateBounds.left, desc.renderTargetUpdateBounds.top, 0); @@ -1013,30 +1013,30 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ } // Setup the pipeline for this specific drawType and shaderFeatures. - pls::ShaderFeatures shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics + gpu::ShaderFeatures shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics ? desc.combinedShaderFeatures : batch.shaderFeatures; - pls::ShaderMiscFlags batchMiscFlags = baselineShaderMiscFlags; - if (!(batchMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend)) + gpu::ShaderMiscFlags batchMiscFlags = baselineShaderMiscFlags; + if (!(batchMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend)) { - if (batch.drawType == pls::DrawType::plsAtomicResolve) + if (batch.drawType == gpu::DrawType::gpuAtomicResolve) { // Atomic mode can always do a coalesced resolve when rendering to an offscreen // color buffer. - batchMiscFlags |= pls::ShaderMiscFlags::coalescedResolveAndTransfer; + batchMiscFlags |= gpu::ShaderMiscFlags::coalescedResolveAndTransfer; } - else if (batch.drawType == pls::DrawType::plsAtomicInitialize) + else if (batch.drawType == gpu::DrawType::gpuAtomicInitialize) { - if (desc.colorLoadAction == pls::LoadAction::clear) + if (desc.colorLoadAction == gpu::LoadAction::clear) { - batchMiscFlags |= pls::ShaderMiscFlags::storeColorClear; + batchMiscFlags |= gpu::ShaderMiscFlags::storeColorClear; } - else if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget && + else if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget && renderTarget->pixelFormat() == MTLPixelFormatBGRA8Unorm) { // We already copied the renderTarget to our color buffer, but since the target // is BGRA, we also need to swizzle it to RGBA before it's ready for PLS. - batchMiscFlags |= pls::ShaderMiscFlags::swizzleColorBGRAToRGBA; + batchMiscFlags |= gpu::ShaderMiscFlags::swizzleColorBGRAToRGBA; } } } @@ -1097,10 +1097,10 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ [encoder setCullMode:MTLCullModeNone]; if (drawType == DrawType::imageRect) { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); [encoder setVertexBuffer:m_imageRectVertexBuffer offset:0 atIndex:0]; [encoder drawIndexedPrimitives:MTLPrimitiveTypeTriangle - indexCount:std::size(pls::kImageRectIndices) + indexCount:std::size(gpu::kImageRectIndices) indexType:MTLIndexTypeUInt16 indexBuffer:m_imageRectIndexBuffer indexBufferOffset:0]; @@ -1122,10 +1122,10 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ } break; } - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); [encoder setRenderPipelineState:drawPipelineState]; [encoder drawPrimitives:MTLPrimitiveTypeTriangleStrip vertexStart:0 vertexCount:4]; break; @@ -1135,7 +1135,7 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ RIVE_UNREACHABLE(); } } - if (desc.interlockMode == pls::InterlockMode::atomics && batch.needsBarrier) + if (desc.interlockMode == gpu::InterlockMode::atomics && batch.needsBarrier) { switch (m_metalFeatures.atomicBarrierType) { @@ -1181,4 +1181,4 @@ static MTLViewport make_viewport(uint32_t x, uint32_t y, uint32_t width, uint32_ }]; } } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/path_utils.cpp b/renderer/src/path_utils.cpp similarity index 100% rename from pls/renderer/path_utils.cpp rename to renderer/src/path_utils.cpp diff --git a/pls/renderer/path_utils.hpp b/renderer/src/path_utils.hpp similarity index 100% rename from pls/renderer/path_utils.hpp rename to renderer/src/path_utils.hpp diff --git a/pls/renderer/pls.cpp b/renderer/src/pls.cpp similarity index 96% rename from pls/renderer/pls.cpp rename to renderer/src/pls.cpp index 7d589da4..6ca83a44 100644 --- a/pls/renderer/pls.cpp +++ b/renderer/src/pls.cpp @@ -2,16 +2,16 @@ * Copyright 2022 Rive */ -#include "rive/pls/pls.hpp" +#include "rive/renderer/gpu.hpp" -#include "rive/pls/pls_render_target.hpp" +#include "rive/renderer/render_target.hpp" #include "shaders/constants.glsl" -#include "rive/pls/pls_image.hpp" -#include "pls_paint.hpp" +#include "rive/renderer/image.hpp" +#include "rive_render_paint.hpp" #include "generated/shaders/draw_path.exports.h" -namespace rive::pls +namespace rive::gpu { static_assert(kGradTextureWidth == GRAD_TEXTURE_WIDTH); static_assert(kTessTextureWidth == TESS_TEXTURE_WIDTH); @@ -24,13 +24,13 @@ uint32_t ShaderUniqueKey(DrawType drawType, { if (miscFlags & ShaderMiscFlags::coalescedResolveAndTransfer) { - assert(drawType == DrawType::plsAtomicResolve); + assert(drawType == DrawType::gpuAtomicResolve); assert(shaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND); assert(interlockMode == InterlockMode::atomics); } if (miscFlags & (ShaderMiscFlags::storeColorClear | ShaderMiscFlags::swizzleColorBGRAToRGBA)) { - assert(drawType == DrawType::plsAtomicInitialize); + assert(drawType == DrawType::gpuAtomicInitialize); } uint32_t drawTypeKey; switch (drawType) @@ -48,16 +48,16 @@ uint32_t ShaderUniqueKey(DrawType drawType, case DrawType::imageMesh: drawTypeKey = 3; break; - case DrawType::plsAtomicInitialize: - assert(interlockMode == pls::InterlockMode::atomics); + case DrawType::gpuAtomicInitialize: + assert(interlockMode == gpu::InterlockMode::atomics); drawTypeKey = 4; break; - case DrawType::plsAtomicResolve: - assert(interlockMode == pls::InterlockMode::atomics); + case DrawType::gpuAtomicResolve: + assert(interlockMode == gpu::InterlockMode::atomics); drawTypeKey = 5; break; case DrawType::stencilClipReset: - assert(interlockMode == pls::InterlockMode::depthStencil); + assert(interlockMode == gpu::InterlockMode::depthStencil); drawTypeKey = 6; break; } @@ -456,7 +456,7 @@ void PaintAuxData::set(const Mat2D& viewMatrix, const PLSTexture* imageTexture, const ClipRectInverseMatrix* clipRectInverseMatrix, const PLSRenderTarget* renderTarget, - const pls::PlatformFeatures& platformFeatures) + const gpu::PlatformFeatures& platformFeatures) { switch (paintType) { @@ -561,9 +561,9 @@ ImageDrawUniforms::ImageDrawUniforms(const Mat2D& matrix, std::tuple StorageTextureSize(size_t bufferSizeInBytes, StorageBufferStructure bufferStructure) { - assert(bufferSizeInBytes % pls::StorageBufferElementSizeInBytes(bufferStructure) == 0); + assert(bufferSizeInBytes % gpu::StorageBufferElementSizeInBytes(bufferStructure) == 0); uint32_t elementCount = math::lossless_numeric_cast(bufferSizeInBytes) / - pls::StorageBufferElementSizeInBytes(bufferStructure); + gpu::StorageBufferElementSizeInBytes(bufferStructure); uint32_t height = (elementCount + STORAGE_TEXTURE_WIDTH - 1) / STORAGE_TEXTURE_WIDTH; // PLSRenderContext is responsible for breaking up a flush before any storage buffer grows // larger than can be supported by a GL texture of width "STORAGE_TEXTURE_WIDTH". @@ -579,7 +579,7 @@ size_t StorageTextureBufferSize(size_t bufferSizeInBytes, StorageBufferStructure // The polyfill texture needs to be updated in entire rows at a time. Extend the buffer's length // to be able to service a worst-case scenario. return bufferSizeInBytes + - (STORAGE_TEXTURE_WIDTH - 1) * pls::StorageBufferElementSizeInBytes(bufferStructure); + (STORAGE_TEXTURE_WIDTH - 1) * gpu::StorageBufferElementSizeInBytes(bufferStructure); } float FindTransformedArea(const AABB& bounds, const Mat2D& matrix) @@ -595,4 +595,4 @@ float FindTransformedArea(const AABB& bounds, const Mat2D& matrix) screenSpacePts[3] - screenSpacePts[0]}; return (fabsf(Vec2D::cross(v[0], v[1])) + fabsf(Vec2D::cross(v[1], v[2]))) * .5f; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_render_context.cpp b/renderer/src/render_context.cpp similarity index 92% rename from pls/renderer/pls_render_context.cpp rename to renderer/src/render_context.cpp index d54cb47a..26775332 100644 --- a/pls/renderer/pls_render_context.cpp +++ b/renderer/src/render_context.cpp @@ -2,19 +2,19 @@ * Copyright 2022 Rive */ -#include "rive/pls/pls_render_context.hpp" +#include "rive/renderer/render_context.hpp" #include "gr_inner_fan_triangulator.hpp" #include "intersection_board.hpp" -#include "pls_paint.hpp" -#include "rive/pls/pls_draw.hpp" -#include "rive/pls/pls_image.hpp" -#include "rive/pls/pls_render_context_impl.hpp" +#include "rive_render_paint.hpp" +#include "rive/renderer/draw.hpp" +#include "rive/renderer/image.hpp" +#include "rive/renderer/render_context_impl.hpp" #include "shaders/constants.glsl" #include -namespace rive::pls +namespace rive::gpu { constexpr size_t kDefaultSimpleGradientCapacity = 512; constexpr size_t kDefaultComplexGradientCapacity = 1024; @@ -23,14 +23,14 @@ constexpr size_t kDefaultDrawCapacity = 2048; constexpr uint32_t kMaxTextureHeight = 2048; // TODO: Move this variable to PlatformFeatures. constexpr size_t kMaxTessellationVertexCount = kMaxTextureHeight * kTessTextureWidth; constexpr size_t kMaxTessellationPaddingVertexCount = - pls::kMidpointFanPatchSegmentSpan + // Padding at the beginning of the tess texture - (pls::kOuterCurvePatchSegmentSpan - 1) + // Max padding between patch types in the tess texture + gpu::kMidpointFanPatchSegmentSpan + // Padding at the beginning of the tess texture + (gpu::kOuterCurvePatchSegmentSpan - 1) + // Max padding between patch types in the tess texture 1; // Padding at the end of the tessellation texture constexpr size_t kMaxTessellationVertexCountBeforePadding = kMaxTessellationVertexCount - kMaxTessellationPaddingVertexCount; // Metal requires vertex buffers to be 256-byte aligned. -constexpr size_t kMaxTessellationAlignmentVertices = pls::kTessVertexBufferAlignmentInElements - 1; +constexpr size_t kMaxTessellationAlignmentVertices = gpu::kTessVertexBufferAlignmentInElements - 1; // We can only reorder 32767 draws at a time since the one-based groupIndex returned by // IntersectionBoard is a signed 16-bit integer. @@ -44,7 +44,7 @@ template constexpr static size_t resource_texture_height(s constexpr static size_t gradient_data_height(size_t simpleRampCount, size_t complexRampCount) { - return resource_texture_height(simpleRampCount) + + return resource_texture_height(simpleRampCount) + complexRampCount; } @@ -103,7 +103,7 @@ PLSRenderContext::~PLSRenderContext() m_logicalFlushes.clear(); } -const pls::PlatformFeatures& PLSRenderContext::platformFeatures() const +const gpu::PlatformFeatures& PLSRenderContext::platformFeatures() const { return m_impl->platformFeatures(); } @@ -176,10 +176,10 @@ void PLSRenderContext::LogicalFlush::rewind() m_flushDesc = FlushDescriptor(); m_drawList.reset(); - m_combinedShaderFeatures = pls::ShaderFeatures::NONE; + m_combinedShaderFeatures = gpu::ShaderFeatures::NONE; m_currentPathIsStroked = false; - m_currentPathContourDirections = pls::ContourDirections::none; + m_currentPathContourDirections = gpu::ContourDirections::none; m_currentPathID = 0; m_currentContourID = 0; m_currentContourPaddingVertexCount = 0; @@ -231,17 +231,17 @@ void PLSRenderContext::beginFrame(const FrameDescriptor& frameDescriptor) } if (m_frameDescriptor.msaaSampleCount > 0) { - m_frameInterlockMode = pls::InterlockMode::depthStencil; + m_frameInterlockMode = gpu::InterlockMode::depthStencil; } else if (m_frameDescriptor.disableRasterOrdering || !platformFeatures().supportsRasterOrdering) { - m_frameInterlockMode = pls::InterlockMode::atomics; + m_frameInterlockMode = gpu::InterlockMode::atomics; } else { - m_frameInterlockMode = pls::InterlockMode::rasterOrdering; + m_frameInterlockMode = gpu::InterlockMode::rasterOrdering; } - m_frameShaderFeaturesMask = pls::ShaderFeaturesMaskFor(m_frameInterlockMode); + m_frameShaderFeaturesMask = gpu::ShaderFeaturesMaskFor(m_frameInterlockMode); if (m_logicalFlushes.empty()) { m_logicalFlushes.emplace_back(new LogicalFlush(this)); @@ -261,14 +261,14 @@ bool PLSRenderContext::isOutsideCurrentFrame(const IAABB& pixelBounds) bool PLSRenderContext::frameSupportsClipRects() const { assert(m_didBeginFrame); - return m_frameInterlockMode != pls::InterlockMode::depthStencil || + return m_frameInterlockMode != gpu::InterlockMode::depthStencil || platformFeatures().supportsClipPlanes; } bool PLSRenderContext::frameSupportsImagePaintForPaths() const { assert(m_didBeginFrame); - return m_frameInterlockMode != pls::InterlockMode::atomics || + return m_frameInterlockMode != gpu::InterlockMode::atomics || platformFeatures().supportsBindlessTextures; } @@ -317,7 +317,7 @@ bool PLSRenderContext::LogicalFlush::pushDrawBatch(PLSDrawUniquePtr draws[], siz { assert(!m_hasDoneLayout); - if (m_flushDesc.interlockMode == pls::InterlockMode::atomics && + if (m_flushDesc.interlockMode == gpu::InterlockMode::atomics && m_drawList.count() + drawCount > kMaxReorderedDrawCount) { // We can only reorder 64k draws at a time since the sort key addresses them with a 16-bit @@ -367,7 +367,7 @@ bool PLSRenderContext::LogicalFlush::pushDrawBatch(PLSDrawUniquePtr draws[], siz bool PLSRenderContext::LogicalFlush::allocateGradient(const PLSGradient* gradient, PLSDraw::ResourceCounters* counters, - pls::ColorRampLocation* colorRampLocation) + gpu::ColorRampLocation* colorRampLocation) { assert(!m_hasDoneLayout); @@ -481,7 +481,7 @@ void PLSRenderContext::flush(const FlushResources& flushResources) // The gradient texture needs to be updated in entire rows at a time. Extend its // texture-transfer buffer's length in order to be able to serve a worst-case scenario. allocs.simpleGradientBufferCount = - layoutCounts.simpleGradCount + pls::kGradTextureWidthInSimpleRamps - 1; + layoutCounts.simpleGradCount + gpu::kGradTextureWidthInSimpleRamps - 1; allocs.complexGradSpanBufferCount = totalFrameResourceCounts.complexGradientSpanCount + layoutCounts.gradSpanPaddingCount; allocs.tessSpanBufferCount = totalFrameResourceCounts.maxTessellatedSegmentCount; @@ -594,16 +594,16 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush // Storage buffer offsets are required to be aligned on multiples of 256. m_pathPaddingCount = - pls::PaddingToAlignUp(m_resourceCounts.pathCount); + gpu::PaddingToAlignUp(m_resourceCounts.pathCount); m_paintPaddingCount = - pls::PaddingToAlignUp(m_resourceCounts.pathCount); + gpu::PaddingToAlignUp(m_resourceCounts.pathCount); m_paintAuxPaddingCount = - pls::PaddingToAlignUp(m_resourceCounts.pathCount); - m_contourPaddingCount = pls::PaddingToAlignUp( + gpu::PaddingToAlignUp(m_resourceCounts.pathCount); + m_contourPaddingCount = gpu::PaddingToAlignUp( m_resourceCounts.contourCount); // Metal requires vertex buffers to be 256-byte aligned. - m_gradSpanPaddingCount = pls::PaddingToAlignUp( + m_gradSpanPaddingCount = gpu::PaddingToAlignUp( m_resourceCounts.complexGradientSpanCount); size_t totalTessVertexCountWithPadding = 0; @@ -612,7 +612,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush { // midpointFan tessellation vertices reside at the beginning of the tessellation texture, // after 1 patch of padding vertices. - constexpr uint32_t kPrePadding = pls::kMidpointFanPatchSegmentSpan; + constexpr uint32_t kPrePadding = gpu::kMidpointFanPatchSegmentSpan; m_midpointFanTessVertexIdx = kPrePadding; m_midpointFanTessEndLocation = m_midpointFanTessVertexIdx + @@ -621,7 +621,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush // outerCubic tessellation vertices reside after the midpointFan vertices, aligned on a // multiple of the outerCubic patch size. uint32_t interiorPadding = - PaddingToAlignUp(m_midpointFanTessEndLocation); + PaddingToAlignUp(m_midpointFanTessEndLocation); m_outerCubicTessVertexIdx = m_midpointFanTessEndLocation + interiorPadding; m_outerCubicTessEndLocation = m_outerCubicTessVertexIdx + @@ -662,16 +662,16 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush if (logicalFlushIdx != 0) { // We always have to preserve the renderTarget between logical flushes. - m_flushDesc.colorLoadAction = pls::LoadAction::preserveRenderTarget; + m_flushDesc.colorLoadAction = gpu::LoadAction::preserveRenderTarget; } - else if (frameDescriptor.loadAction == pls::LoadAction::clear) + else if (frameDescriptor.loadAction == gpu::LoadAction::clear) { // In atomic mode, we can clear during the resolve operation if the clearColor is opaque // (because we don't want or have a "source only" blend mode). - doClearDuringAtomicResolve = m_ctx->frameInterlockMode() == pls::InterlockMode::atomics && + doClearDuringAtomicResolve = m_ctx->frameInterlockMode() == gpu::InterlockMode::atomics && colorAlpha(frameDescriptor.clearColor) == 255; m_flushDesc.colorLoadAction = - doClearDuringAtomicResolve ? pls::LoadAction::dontCare : pls::LoadAction::clear; + doClearDuringAtomicResolve ? gpu::LoadAction::dontCare : gpu::LoadAction::clear; } else { @@ -686,10 +686,10 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush // pathID=0 to be a solid fill matching the clearColor, so if we just initialize coverage // buffer to solid coverage with pathID=0, the resolve step will write out the correct clear // color. - assert(m_flushDesc.interlockMode == pls::InterlockMode::atomics); + assert(m_flushDesc.interlockMode == gpu::InterlockMode::atomics); m_flushDesc.coverageClearValue = static_cast(FIXED_COVERAGE_ONE); } - else if (m_flushDesc.interlockMode == pls::InterlockMode::atomics) + else if (m_flushDesc.interlockMode == gpu::InterlockMode::atomics) { // When we don't skip the initial clear in atomic mode, clear the coverage buffer to // pathID=0 and a transparent coverage value. @@ -704,7 +704,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush m_flushDesc.coverageClearValue = 0; } - if (doClearDuringAtomicResolve || m_flushDesc.colorLoadAction == pls::LoadAction::clear) + if (doClearDuringAtomicResolve || m_flushDesc.colorLoadAction == gpu::LoadAction::clear) { // If we're clearing then we always update the entire render target. m_flushDesc.renderTargetUpdateBounds = m_flushDesc.renderTarget->bounds(); @@ -721,7 +721,7 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush m_flushDesc.renderTargetUpdateBounds = {0, 0, 0, 0}; } - m_flushDesc.flushUniformDataOffsetInBytes = logicalFlushIdx * sizeof(pls::FlushUniforms); + m_flushDesc.flushUniformDataOffsetInBytes = logicalFlushIdx * sizeof(gpu::FlushUniforms); m_flushDesc.pathCount = math::lossless_numeric_cast(m_resourceCounts.pathCount); m_flushDesc.firstPath = runningFrameResourceCounts->pathCount + runningFrameLayoutCounts->pathPaddingCount; @@ -738,12 +738,12 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush runningFrameLayoutCounts->gradSpanPaddingCount; m_flushDesc.simpleGradTexelsWidth = std::min(math::lossless_numeric_cast(m_simpleGradients.size()), - pls::kGradTextureWidthInSimpleRamps) * + gpu::kGradTextureWidthInSimpleRamps) * 2; m_flushDesc.simpleGradTexelsHeight = static_cast( - resource_texture_height(m_simpleGradients.size())); + resource_texture_height(m_simpleGradients.size())); m_flushDesc.simpleGradDataOffsetInBytes = - runningFrameLayoutCounts->simpleGradCount * sizeof(pls::TwoTexelRamp); + runningFrameLayoutCounts->simpleGradCount * sizeof(gpu::TwoTexelRamp); m_flushDesc.complexGradRowsTop = m_flushDesc.simpleGradTexelsHeight; m_flushDesc.complexGradRowsHeight = math::lossless_numeric_cast(m_complexGradients.size()); @@ -771,17 +771,17 @@ void PLSRenderContext::LogicalFlush::layoutResources(const FlushResources& flush runningFrameLayoutCounts->maxTessTextureHeight = std::max(m_flushDesc.tessDataHeight, runningFrameLayoutCounts->maxTessTextureHeight); - assert(m_flushDesc.firstPath % pls::kPathBufferAlignmentInElements == 0); - assert(m_flushDesc.firstPaint % pls::kPaintBufferAlignmentInElements == 0); - assert(m_flushDesc.firstPaintAux % pls::kPaintAuxBufferAlignmentInElements == 0); - assert(m_flushDesc.firstContour % pls::kContourBufferAlignmentInElements == 0); - assert(m_flushDesc.firstComplexGradSpan % pls::kGradSpanBufferAlignmentInElements == 0); + assert(m_flushDesc.firstPath % gpu::kPathBufferAlignmentInElements == 0); + assert(m_flushDesc.firstPaint % gpu::kPaintBufferAlignmentInElements == 0); + assert(m_flushDesc.firstPaintAux % gpu::kPaintAuxBufferAlignmentInElements == 0); + assert(m_flushDesc.firstContour % gpu::kContourBufferAlignmentInElements == 0); + assert(m_flushDesc.firstComplexGradSpan % gpu::kGradSpanBufferAlignmentInElements == 0); RIVE_DEBUG_CODE(m_hasDoneLayout = true;) } void PLSRenderContext::LogicalFlush::writeResources() { - const pls::PlatformFeatures& platformFeatures = m_ctx->platformFeatures(); + const gpu::PlatformFeatures& platformFeatures = m_ctx->platformFeatures(); assert(m_hasDoneLayout); assert(m_flushDesc.firstPath == m_ctx->m_pathData.elementsWritten()); assert(m_flushDesc.firstPaint == m_ctx->m_paintData.elementsWritten()); @@ -798,7 +798,7 @@ void PLSRenderContext::LogicalFlush::writeResources() // Metal requires vertex buffers to be 256-byte aligned. size_t tessAlignmentPadding = - pls::PaddingToAlignUp(firstTessVertexSpan); + gpu::PaddingToAlignUp(firstTessVertexSpan); assert(tessAlignmentPadding <= kMaxTessellationAlignmentVertices); m_ctx->m_tessSpanData.push_back_n(nullptr, tessAlignmentPadding); m_flushDesc.firstTessVertexSpan = firstTessVertexSpan + tessAlignmentPadding; @@ -850,7 +850,7 @@ void PLSRenderContext::LogicalFlush::writeResources() // Write a path record for the clearColor paint (used by atomic mode). // This also allows us to index the storage buffers directly by pathID. - pls::SimplePaintValue clearColorValue; + gpu::SimplePaintValue clearColorValue; clearColorValue.color = m_ctx->frameDescriptor().clearColor; m_ctx->m_pathData.skip_back(); m_ctx->m_paintData.set_back(FillRule::nonZero, @@ -866,7 +866,7 @@ void PLSRenderContext::LogicalFlush::writeResources() if (m_flushDesc.tessDataHeight > 0) { // Padding at the beginning of the tessellation texture. - pushPaddingVertices(0, pls::kMidpointFanPatchSegmentSpan); + pushPaddingVertices(0, gpu::kMidpointFanPatchSegmentSpan); // Padding between patch types in the tessellation texture. if (m_outerCubicTessVertexIdx > m_midpointFanTessEndLocation) { @@ -881,7 +881,7 @@ void PLSRenderContext::LogicalFlush::writeResources() } // Write out all the data for our high level draws, and build up a low-level draw list. - if (m_ctx->frameInterlockMode() == pls::InterlockMode::rasterOrdering) + if (m_ctx->frameInterlockMode() == gpu::InterlockMode::rasterOrdering) { for (const PLSDrawUniquePtr& draw : m_plsDraws) { @@ -934,7 +934,7 @@ void PLSRenderContext::LogicalFlush::writeResources() // to maximize batching while preserving correctness. int64_t drawGroupIdx = intersectionBoard->addRectangle(drawBounds); assert(drawGroupIdx > 0); - if (m_flushDesc.interlockMode == pls::InterlockMode::depthStencil && draw->isOpaque()) + if (m_flushDesc.interlockMode == gpu::InterlockMode::depthStencil && draw->isOpaque()) { // In depthStencil mode we can reverse-sort opaque paths front to back, draw them // first, and take advantage of early Z culling. @@ -943,7 +943,7 @@ void PLSRenderContext::LogicalFlush::writeResources() // (Otherwise if a clip affects both opaque and transparent content, we would have // to apply it twice.) bool usesClipping = draw->drawContents() & - (pls::DrawContents::activeClip | pls::DrawContents::clipUpdate); + (gpu::DrawContents::activeClip | gpu::DrawContents::clipUpdate); if (!usesClipping) { drawGroupIdx = -drawGroupIdx; @@ -967,7 +967,7 @@ void PLSRenderContext::LogicalFlush::writeResources() // different blend modes. // If not using KHR_blend_equation_advanced, sorting by blend mode may still give us // better branching on the GPU. - int64_t blendMode = pls::ConvertBlendModeToPLSBlendMode(draw->blendMode()); + int64_t blendMode = gpu::ConvertBlendModeToPLSBlendMode(draw->blendMode()); assert(blendMode <= kBlendModeMask >> kBlendModeShift); key |= blendMode << kBlendModeShift; @@ -995,11 +995,11 @@ void PLSRenderContext::LogicalFlush::writeResources() // Atomic mode sometimes needs to initialize PLS with a draw when the backend can't do it // with typical clear/load APIs. - if (m_ctx->frameInterlockMode() == pls::InterlockMode::atomics && + if (m_ctx->frameInterlockMode() == gpu::InterlockMode::atomics && platformFeatures.atomicPLSMustBeInitializedAsDraw) { m_drawList.emplace_back(m_ctx->perFrameAllocator(), - DrawType::plsAtomicInitialize, + DrawType::gpuAtomicInitialize, nullptr, 1, 0); @@ -1009,7 +1009,7 @@ void PLSRenderContext::LogicalFlush::writeResources() // Draws with the same drawGroupIdx don't overlap, but once we cross into a new draw group, // we need to insert a barrier between the overlaps. int64_t needsBarrierMask = kDrawGroupMask; - if (m_flushDesc.interlockMode == pls::InterlockMode::depthStencil) + if (m_flushDesc.interlockMode == gpu::InterlockMode::depthStencil) { // depthStencil mode also draws clips, strokes, fills, and even/odd with different // stencil settings, so these also need a barrier. @@ -1040,11 +1040,11 @@ void PLSRenderContext::LogicalFlush::writeResources() } // Atomic mode needs one more draw to resolve all the pixels. - if (m_ctx->frameInterlockMode() == pls::InterlockMode::atomics) + if (m_ctx->frameInterlockMode() == gpu::InterlockMode::atomics) { pushBarrier(); m_drawList.emplace_back(m_ctx->perFrameAllocator(), - DrawType::plsAtomicResolve, + DrawType::gpuAtomicResolve, nullptr, 1, 0); @@ -1128,7 +1128,7 @@ void PLSRenderContext::setResourceSizes(ResourceAllocationCounts allocs, bool fo logger.logSize(#NAME, \ m_currentResourceAllocations.NAME, \ allocs.NAME, \ - allocs.NAME* ITEM_SIZE_IN_BYTES* pls::kBufferRingSize) + allocs.NAME* ITEM_SIZE_IN_BYTES* gpu::kBufferRingSize) #define LOG_TEXTURE_HEIGHT(NAME, BYTES_PER_ROW) \ logger.logSize(#NAME, \ m_currentResourceAllocations.NAME, \ @@ -1139,102 +1139,102 @@ void PLSRenderContext::setResourceSizes(ResourceAllocationCounts allocs, bool fo #define LOG_TEXTURE_HEIGHT(NAME, BYTES_PER_ROW) #endif - LOG_BUFFER_RING_SIZE(flushUniformBufferCount, sizeof(pls::FlushUniforms)); + LOG_BUFFER_RING_SIZE(flushUniformBufferCount, sizeof(gpu::FlushUniforms)); if (allocs.flushUniformBufferCount != m_currentResourceAllocations.flushUniformBufferCount || forceRealloc) { m_impl->resizeFlushUniformBuffer(allocs.flushUniformBufferCount * - sizeof(pls::FlushUniforms)); + sizeof(gpu::FlushUniforms)); } - LOG_BUFFER_RING_SIZE(imageDrawUniformBufferCount, sizeof(pls::ImageDrawUniforms)); + LOG_BUFFER_RING_SIZE(imageDrawUniformBufferCount, sizeof(gpu::ImageDrawUniforms)); if (allocs.imageDrawUniformBufferCount != m_currentResourceAllocations.imageDrawUniformBufferCount || forceRealloc) { m_impl->resizeImageDrawUniformBuffer(allocs.imageDrawUniformBufferCount * - sizeof(pls::ImageDrawUniforms)); + sizeof(gpu::ImageDrawUniforms)); } - LOG_BUFFER_RING_SIZE(pathBufferCount, sizeof(pls::PathData)); + LOG_BUFFER_RING_SIZE(pathBufferCount, sizeof(gpu::PathData)); if (allocs.pathBufferCount != m_currentResourceAllocations.pathBufferCount || forceRealloc) { - m_impl->resizePathBuffer(allocs.pathBufferCount * sizeof(pls::PathData), - pls::PathData::kBufferStructure); + m_impl->resizePathBuffer(allocs.pathBufferCount * sizeof(gpu::PathData), + gpu::PathData::kBufferStructure); } - LOG_BUFFER_RING_SIZE(paintBufferCount, sizeof(pls::PaintData)); + LOG_BUFFER_RING_SIZE(paintBufferCount, sizeof(gpu::PaintData)); if (allocs.paintBufferCount != m_currentResourceAllocations.paintBufferCount || forceRealloc) { - m_impl->resizePaintBuffer(allocs.paintBufferCount * sizeof(pls::PaintData), - pls::PaintData::kBufferStructure); + m_impl->resizePaintBuffer(allocs.paintBufferCount * sizeof(gpu::PaintData), + gpu::PaintData::kBufferStructure); } - LOG_BUFFER_RING_SIZE(paintAuxBufferCount, sizeof(pls::PaintAuxData)); + LOG_BUFFER_RING_SIZE(paintAuxBufferCount, sizeof(gpu::PaintAuxData)); if (allocs.paintAuxBufferCount != m_currentResourceAllocations.paintAuxBufferCount || forceRealloc) { - m_impl->resizePaintAuxBuffer(allocs.paintAuxBufferCount * sizeof(pls::PaintAuxData), - pls::PaintAuxData::kBufferStructure); + m_impl->resizePaintAuxBuffer(allocs.paintAuxBufferCount * sizeof(gpu::PaintAuxData), + gpu::PaintAuxData::kBufferStructure); } - LOG_BUFFER_RING_SIZE(contourBufferCount, sizeof(pls::ContourData)); + LOG_BUFFER_RING_SIZE(contourBufferCount, sizeof(gpu::ContourData)); if (allocs.contourBufferCount != m_currentResourceAllocations.contourBufferCount || forceRealloc) { - m_impl->resizeContourBuffer(allocs.contourBufferCount * sizeof(pls::ContourData), - pls::ContourData::kBufferStructure); + m_impl->resizeContourBuffer(allocs.contourBufferCount * sizeof(gpu::ContourData), + gpu::ContourData::kBufferStructure); } - LOG_BUFFER_RING_SIZE(simpleGradientBufferCount, sizeof(pls::TwoTexelRamp)); + LOG_BUFFER_RING_SIZE(simpleGradientBufferCount, sizeof(gpu::TwoTexelRamp)); if (allocs.simpleGradientBufferCount != m_currentResourceAllocations.simpleGradientBufferCount || forceRealloc) { m_impl->resizeSimpleColorRampsBuffer(allocs.simpleGradientBufferCount * - sizeof(pls::TwoTexelRamp)); + sizeof(gpu::TwoTexelRamp)); } - LOG_BUFFER_RING_SIZE(complexGradSpanBufferCount, sizeof(pls::GradientSpan)); + LOG_BUFFER_RING_SIZE(complexGradSpanBufferCount, sizeof(gpu::GradientSpan)); if (allocs.complexGradSpanBufferCount != m_currentResourceAllocations.complexGradSpanBufferCount || forceRealloc) { - m_impl->resizeGradSpanBuffer(allocs.complexGradSpanBufferCount * sizeof(pls::GradientSpan)); + m_impl->resizeGradSpanBuffer(allocs.complexGradSpanBufferCount * sizeof(gpu::GradientSpan)); } - LOG_BUFFER_RING_SIZE(tessSpanBufferCount, sizeof(pls::TessVertexSpan)); + LOG_BUFFER_RING_SIZE(tessSpanBufferCount, sizeof(gpu::TessVertexSpan)); if (allocs.tessSpanBufferCount != m_currentResourceAllocations.tessSpanBufferCount || forceRealloc) { m_impl->resizeTessVertexSpanBuffer(allocs.tessSpanBufferCount * - sizeof(pls::TessVertexSpan)); + sizeof(gpu::TessVertexSpan)); } - LOG_BUFFER_RING_SIZE(triangleVertexBufferCount, sizeof(pls::TriangleVertex)); + LOG_BUFFER_RING_SIZE(triangleVertexBufferCount, sizeof(gpu::TriangleVertex)); if (allocs.triangleVertexBufferCount != m_currentResourceAllocations.triangleVertexBufferCount || forceRealloc) { m_impl->resizeTriangleVertexBuffer(allocs.triangleVertexBufferCount * - sizeof(pls::TriangleVertex)); + sizeof(gpu::TriangleVertex)); } allocs.gradTextureHeight = std::min(allocs.gradTextureHeight, kMaxTextureHeight); - LOG_TEXTURE_HEIGHT(gradTextureHeight, pls::kGradTextureWidth * 4); + LOG_TEXTURE_HEIGHT(gradTextureHeight, gpu::kGradTextureWidth * 4); if (allocs.gradTextureHeight != m_currentResourceAllocations.gradTextureHeight || forceRealloc) { m_impl->resizeGradientTexture( - pls::kGradTextureWidth, + gpu::kGradTextureWidth, math::lossless_numeric_cast(allocs.gradTextureHeight)); } allocs.tessTextureHeight = std::min(allocs.tessTextureHeight, kMaxTextureHeight); - LOG_TEXTURE_HEIGHT(tessTextureHeight, pls::kTessTextureWidth * 4 * 4); + LOG_TEXTURE_HEIGHT(tessTextureHeight, gpu::kTessTextureWidth * 4 * 4); if (allocs.tessTextureHeight != m_currentResourceAllocations.tessTextureHeight || forceRealloc) { m_impl->resizeTessellationTexture( - pls::kTessTextureWidth, + gpu::kTessTextureWidth, math::lossless_numeric_cast(allocs.tessTextureHeight)); } @@ -1397,8 +1397,8 @@ void PLSRenderContext::LogicalFlush::pushPaddingVertices(uint32_t tessLocation, assert(m_pathTessLocation == m_expectedPathTessLocationAtEndOfPath); } -void PLSRenderContext::LogicalFlush::pushPath(PLSPathDraw* draw, - pls::PatchType patchType, +void PLSRenderContext::LogicalFlush::pushPath(RiveRenderPathDraw* draw, + gpu::PatchType patchType, uint32_t tessVertexCount) { assert(m_hasDoneLayout); @@ -1432,7 +1432,7 @@ void PLSRenderContext::LogicalFlush::pushPath(PLSPathDraw* draw, assert(m_flushDesc.firstPaintAux + m_currentPathID + 1 == m_ctx->m_paintAuxData.elementsWritten()); - pls::DrawType drawType; + gpu::DrawType drawType; uint32_t tessLocation; if (patchType == PatchType::midpointFan) { @@ -1455,18 +1455,18 @@ void PLSRenderContext::LogicalFlush::pushPath(PLSPathDraw* draw, uint32_t baseInstance = math::lossless_numeric_cast(tessLocation / patchSize); assert(baseInstance * patchSize == tessLocation); // flush() is responsible for alignment. - if (m_currentPathContourDirections == pls::ContourDirections::reverseAndForward) + if (m_currentPathContourDirections == gpu::ContourDirections::reverseAndForward) { assert(tessVertexCount % 2 == 0); m_pathTessLocation = m_pathMirroredTessLocation = tessLocation + tessVertexCount / 2; } - else if (m_currentPathContourDirections == pls::ContourDirections::forward) + else if (m_currentPathContourDirections == gpu::ContourDirections::forward) { m_pathTessLocation = m_pathMirroredTessLocation = tessLocation; } else { - assert(m_currentPathContourDirections == pls::ContourDirections::reverse); + assert(m_currentPathContourDirections == gpu::ContourDirections::reverse); m_pathTessLocation = m_pathMirroredTessLocation = tessLocation + tessVertexCount; } @@ -1489,17 +1489,17 @@ void PLSRenderContext::LogicalFlush::pushContour(Vec2D midpoint, midpoint.x = closed ? 1 : 0; } // If the contour is closed, the shader needs a vertex to wrap back around to at the end of it. - uint32_t vertexIndex0 = m_currentPathContourDirections & pls::ContourDirections::forward + uint32_t vertexIndex0 = m_currentPathContourDirections & gpu::ContourDirections::forward ? m_pathTessLocation : m_pathMirroredTessLocation - 1; m_ctx->m_contourData.emplace_back(midpoint, m_currentPathID, vertexIndex0); ++m_currentContourID; - assert(0 < m_currentContourID && m_currentContourID <= pls::kMaxContourID); + assert(0 < m_currentContourID && m_currentContourID <= gpu::kMaxContourID); assert(m_flushDesc.firstContour + m_currentContourID == m_ctx->m_contourData.elementsWritten()); // The first curve of the contour will be pre-padded with 'paddingVertexCount' tessellation // vertices, colocated at T=0. The caller must use this argument align the end of the contour on - // a boundary of the patch size. (See pls::PaddingToAlignUp().) + // a boundary of the patch size. (See gpu::PaddingToAlignUp().) m_currentContourPaddingVertexCount = paddingVertexCount; } @@ -1526,7 +1526,7 @@ void PLSRenderContext::LogicalFlush::pushCubic(const Vec2D pts[4], // Only the first curve of a contour gets padding vertices. m_currentContourPaddingVertexCount = 0; - if (m_currentPathContourDirections == pls::ContourDirections::reverseAndForward) + if (m_currentPathContourDirections == gpu::ContourDirections::reverseAndForward) { pushMirroredAndForwardTessellationSpans(pts, joinTangent, @@ -1536,7 +1536,7 @@ void PLSRenderContext::LogicalFlush::pushCubic(const Vec2D pts[4], joinSegmentCount, m_currentContourID | additionalContourFlags); } - else if (m_currentPathContourDirections == pls::ContourDirections::forward) + else if (m_currentPathContourDirections == gpu::ContourDirections::forward) { pushTessellationSpans(pts, joinTangent, @@ -1548,7 +1548,7 @@ void PLSRenderContext::LogicalFlush::pushCubic(const Vec2D pts[4], } else { - assert(m_currentPathContourDirections == pls::ContourDirections::reverse); + assert(m_currentPathContourDirections == gpu::ContourDirections::reverse); pushMirroredTessellationSpans(pts, joinTangent, totalVertexCount, @@ -1786,7 +1786,7 @@ void PLSRenderContext::LogicalFlush::pushStencilClipReset(StencilClipReset* draw void PLSRenderContext::LogicalFlush::pushBarrier() { assert(m_hasDoneLayout); - assert(m_flushDesc.interlockMode != pls::InterlockMode::rasterOrdering); + assert(m_flushDesc.interlockMode != gpu::InterlockMode::rasterOrdering); if (!m_drawList.empty()) { @@ -1794,7 +1794,7 @@ void PLSRenderContext::LogicalFlush::pushBarrier() } } -pls::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(PLSPathDraw* draw, +gpu::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(RiveRenderPathDraw* draw, DrawType drawType, uint32_t vertexCount, uint32_t baseVertex) @@ -1802,7 +1802,7 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(PLSPathDraw* draw, assert(m_hasDoneLayout); DrawBatch& batch = pushDraw(draw, drawType, draw->paintType(), vertexCount, baseVertex); - auto pathShaderFeatures = pls::ShaderFeatures::NONE; + auto pathShaderFeatures = gpu::ShaderFeatures::NONE; if (draw->fillRule() == FillRule::evenOdd) { pathShaderFeatures |= ShaderFeatures::ENABLE_EVEN_ODD; @@ -1814,7 +1814,7 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushPathDraw(PLSPathDraw* draw, batch.shaderFeatures |= pathShaderFeatures & m_ctx->m_frameShaderFeaturesMask; m_combinedShaderFeatures |= batch.shaderFeatures; assert((batch.shaderFeatures & - pls::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) == + gpu::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) == batch.shaderFeatures); return batch; } @@ -1832,9 +1832,9 @@ RIVE_ALWAYS_INLINE static bool can_combine_draw_images(const PLSTexture* current return currentDrawTexture == nextDrawTexture; } -pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw, +gpu::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw, DrawType drawType, - pls::PaintType paintType, + gpu::PaintType paintType, uint32_t elementCount, uint32_t baseElement) { @@ -1845,8 +1845,8 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw, { case DrawType::midpointFanPatches: case DrawType::outerCurvePatches: - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: needsNewBatch = m_drawList.empty() || m_drawList.tail().drawType != drawType || @@ -1872,11 +1872,11 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw, assert(batch.drawType == drawType); assert(can_combine_draw_images(batch.imageTexture, draw->imageTexture())); assert(!batch.needsBarrier); - if (m_flushDesc.interlockMode == pls::InterlockMode::depthStencil) + if (m_flushDesc.interlockMode == gpu::InterlockMode::depthStencil) { // depthStencil can't mix drawContents in a batch. assert(batch.drawContents == draw->drawContents()); - assert((batch.shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND) == + assert((batch.shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND) == (draw->blendMode() != BlendMode::srcOver)); // If using KHR_blend_equation_advanced, we can't mix blend modes in a batch. assert(!m_ctx->platformFeatures().supportsKHRBlendEquations || @@ -1938,8 +1938,8 @@ pls::DrawBatch& PLSRenderContext::LogicalFlush::pushDraw(PLSDraw* draw, m_combinedShaderFeatures |= batch.shaderFeatures; batch.drawContents |= draw->drawContents(); assert((batch.shaderFeatures & - pls::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) == + gpu::ShaderFeaturesMaskFor(drawType, m_ctx->frameInterlockMode())) == batch.shaderFeatures); return batch; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_render_context_helper_impl.cpp b/renderer/src/render_context_helper_impl.cpp similarity index 92% rename from pls/renderer/pls_render_context_helper_impl.cpp rename to renderer/src/render_context_helper_impl.cpp index 458d15ca..ae9df0ec 100644 --- a/pls/renderer/pls_render_context_helper_impl.cpp +++ b/renderer/src/render_context_helper_impl.cpp @@ -2,16 +2,16 @@ * Copyright 2022 Rive */ -#include "rive/pls/pls_render_context_helper_impl.hpp" +#include "rive/renderer/render_context_helper_impl.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" #ifdef RIVE_DECODERS #include "rive/decoders/bitmap_decoder.hpp" #endif -namespace rive::pls +namespace rive::gpu { rcp PLSRenderContextHelperImpl::decodeImageTexture(Span encodedBytes) { @@ -44,25 +44,25 @@ void PLSRenderContextHelperImpl::resizeImageDrawUniformBuffer(size_t sizeInBytes } void PLSRenderContextHelperImpl::resizePathBuffer(size_t sizeInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { m_pathBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure); } void PLSRenderContextHelperImpl::resizePaintBuffer(size_t sizeInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { m_paintBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure); } void PLSRenderContextHelperImpl::resizePaintAuxBuffer(size_t sizeInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { m_paintAuxBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure); } void PLSRenderContextHelperImpl::resizeContourBuffer(size_t sizeInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { m_contourBuffer = makeStorageBufferRing(sizeInBytes, bufferStructure); } @@ -171,4 +171,4 @@ void PLSRenderContextHelperImpl::unmapTriangleVertexBuffer() { m_triangleBuffer->unmapAndSubmitBuffer(); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/renderer/src/rive_render_factory.cpp b/renderer/src/rive_render_factory.cpp new file mode 100644 index 00000000..f253f8cc --- /dev/null +++ b/renderer/src/rive_render_factory.cpp @@ -0,0 +1,44 @@ +/* + * Copyright 2022 Rive + */ + +#include "rive/renderer/rive_render_factory.hpp" + +#include "rive_render_paint.hpp" +#include "rive_render_path.hpp" +#include "rive/renderer/rive_renderer.hpp" + +namespace rive::gpu +{ +rcp RiveRenderFactory::makeLinearGradient(float sx, + float sy, + float ex, + float ey, + const ColorInt colors[], // [count] + const float stops[], // [count] + size_t count) +{ + + return PLSGradient::MakeLinear(sx, sy, ex, ey, colors, stops, count); +} + +rcp RiveRenderFactory::makeRadialGradient(float cx, + float cy, + float radius, + const ColorInt colors[], // [count] + const float stops[], // [count] + size_t count) +{ + + return PLSGradient::MakeRadial(cx, cy, radius, colors, stops, count); +} + +rcp RiveRenderFactory::makeRenderPath(RawPath& rawPath, FillRule fillRule) +{ + return make_rcp(fillRule, rawPath); +} + +rcp RiveRenderFactory::makeEmptyRenderPath() { return make_rcp(); } + +rcp RiveRenderFactory::makeRenderPaint() { return make_rcp(); } +} // namespace rive::gpu diff --git a/pls/renderer/pls_paint.cpp b/renderer/src/rive_render_paint.cpp similarity index 89% rename from pls/renderer/pls_paint.cpp rename to renderer/src/rive_render_paint.cpp index 3fab1d63..9590ea96 100644 --- a/pls/renderer/pls_paint.cpp +++ b/renderer/src/rive_render_paint.cpp @@ -2,15 +2,15 @@ * Copyright 2022 Rive */ -#include "pls_paint.hpp" +#include "rive_render_paint.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" -namespace rive::pls +namespace rive::gpu { -PLSPaint::PLSPaint() {} +RiveRenderPaint::RiveRenderPaint() {} -PLSPaint::~PLSPaint() {} +RiveRenderPaint::~RiveRenderPaint() {} // Ensure the given gradient stops are in a format expected by PLS. static bool validate_gradient_stops(const ColorInt colors[], // [count] @@ -173,19 +173,19 @@ rcp PLSGradient::MakeRadial(float cx, bool PLSGradient::isOpaque() const { - if (m_isOpaque == pls::TriState::unknown) + if (m_isOpaque == gpu::TriState::unknown) { ColorInt allColors = ~0; for (int i = 0; i < m_count; ++i) { allColors &= m_colors[i]; } - m_isOpaque = colorAlpha(allColors) == 0xff ? pls::TriState::yes : pls::TriState::no; + m_isOpaque = colorAlpha(allColors) == 0xff ? gpu::TriState::yes : gpu::TriState::no; } - return m_isOpaque == pls::TriState::yes; + return m_isOpaque == gpu::TriState::yes; } -void PLSPaint::color(ColorInt color) +void RiveRenderPaint::color(ColorInt color) { m_paintType = PaintType::solidColor; m_simpleValue.color = color; @@ -193,7 +193,7 @@ void PLSPaint::color(ColorInt color) m_imageTexture.reset(); } -void PLSPaint::shader(rcp shader) +void RiveRenderPaint::shader(rcp shader) { m_gradient = static_rcp_cast(std::move(shader)); m_paintType = m_gradient ? m_gradient->paintType() : PaintType::solidColor; @@ -203,7 +203,7 @@ void PLSPaint::shader(rcp shader) m_imageTexture.reset(); } -void PLSPaint::image(rcp imageTexture, float opacity) +void RiveRenderPaint::image(rcp imageTexture, float opacity) { m_paintType = PaintType::image; m_simpleValue.imageOpacity = opacity; @@ -211,7 +211,7 @@ void PLSPaint::image(rcp imageTexture, float opacity) m_imageTexture = std::move(imageTexture); } -void PLSPaint::clipUpdate(uint32_t outerClipID) +void RiveRenderPaint::clipUpdate(uint32_t outerClipID) { m_paintType = PaintType::clipUpdate; m_simpleValue.outerClipID = outerClipID; @@ -219,19 +219,19 @@ void PLSPaint::clipUpdate(uint32_t outerClipID) m_imageTexture.reset(); } -bool PLSPaint::getIsOpaque() const +bool RiveRenderPaint::getIsOpaque() const { switch (m_paintType) { - case pls::PaintType::solidColor: + case gpu::PaintType::solidColor: return colorAlpha(m_simpleValue.color) == 0xff; - case pls::PaintType::linearGradient: - case pls::PaintType::radialGradient: + case gpu::PaintType::linearGradient: + case gpu::PaintType::radialGradient: return m_gradient->isOpaque(); - case pls::PaintType::image: - case pls::PaintType::clipUpdate: + case gpu::PaintType::image: + case gpu::PaintType::clipUpdate: return false; } RIVE_UNREACHABLE(); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_paint.hpp b/renderer/src/rive_render_paint.hpp similarity index 93% rename from pls/renderer/pls_paint.hpp rename to renderer/src/rive_render_paint.hpp index 6edd8d3b..e08321ae 100644 --- a/pls/renderer/pls_paint.hpp +++ b/renderer/src/rive_render_paint.hpp @@ -4,11 +4,11 @@ #pragma once -#include "rive/pls/pls.hpp" +#include "rive/renderer/gpu.hpp" #include "rive/renderer.hpp" #include -namespace rive::pls +namespace rive::gpu { // Copies an array of colors or stops for a gradient. // Stores the data locally if there are 4 values or fewer. @@ -103,15 +103,15 @@ class PLSGradient : public lite_rtti_override PLSGradDataArray m_stops; size_t m_count; std::array m_coeffs; - mutable pls::TriState m_isOpaque = pls::TriState::unknown; + mutable gpu::TriState m_isOpaque = gpu::TriState::unknown; }; // RenderPaint implementation for Rive's pixel local storage renderer. -class PLSPaint : public lite_rtti_override +class RiveRenderPaint : public lite_rtti_override { public: - PLSPaint(); - ~PLSPaint(); + RiveRenderPaint(); + ~RiveRenderPaint(); void style(RenderPaintStyle style) override { m_stroked = style == RenderPaintStyle::stroke; } void color(ColorInt color) override; @@ -135,12 +135,12 @@ class PLSPaint : public lite_rtti_override StrokeJoin getJoin() const { return m_join; } StrokeCap getCap() const { return m_cap; } BlendMode getBlendMode() const { return m_blendMode; } - pls::SimplePaintValue getSimpleValue() const { return m_simpleValue; } + gpu::SimplePaintValue getSimpleValue() const { return m_simpleValue; } bool getIsOpaque() const; private: PaintType m_paintType = PaintType::solidColor; - pls::SimplePaintValue m_simpleValue; + gpu::SimplePaintValue m_simpleValue; rcp m_gradient; rcp m_imageTexture; float m_thickness = 1; @@ -149,4 +149,4 @@ class PLSPaint : public lite_rtti_override BlendMode m_blendMode = BlendMode::srcOver; bool m_stroked = false; }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_path.cpp b/renderer/src/rive_render_path.cpp similarity index 85% rename from pls/renderer/pls_path.cpp rename to renderer/src/rive_render_path.cpp index a5ae9a02..373d482d 100644 --- a/pls/renderer/pls_path.cpp +++ b/renderer/src/rive_render_path.cpp @@ -2,35 +2,35 @@ * Copyright 2022 Rive */ -#include "pls_path.hpp" +#include "rive_render_path.hpp" #include "eval_cubic.hpp" #include "rive/math/simd.hpp" #include "rive/math/wangs_formula.hpp" -namespace rive::pls +namespace rive::gpu { -PLSPath::PLSPath(FillRule fillRule, RawPath& rawPath) +RiveRenderPath::RiveRenderPath(FillRule fillRule, RawPath& rawPath) { m_rawPath.swap(rawPath); m_rawPath.pruneEmptySegments(); } -void PLSPath::rewind() +void RiveRenderPath::rewind() { assert(m_rawPathMutationLockCount == 0); m_rawPath.rewind(); m_dirt = kAllDirt; } -void PLSPath::moveTo(float x, float y) +void RiveRenderPath::moveTo(float x, float y) { assert(m_rawPathMutationLockCount == 0); m_rawPath.moveTo(x, y); m_dirt = kAllDirt; } -void PLSPath::lineTo(float x, float y) +void RiveRenderPath::lineTo(float x, float y) { assert(m_rawPathMutationLockCount == 0); @@ -46,7 +46,7 @@ void PLSPath::lineTo(float x, float y) m_dirt = kAllDirt; } -void PLSPath::cubicTo(float ox, float oy, float ix, float iy, float x, float y) +void RiveRenderPath::cubicTo(float ox, float oy, float ix, float iy, float x, float y) { assert(m_rawPathMutationLockCount == 0); @@ -64,17 +64,17 @@ void PLSPath::cubicTo(float ox, float oy, float ix, float iy, float x, float y) m_dirt = kAllDirt; } -void PLSPath::close() +void RiveRenderPath::close() { assert(m_rawPathMutationLockCount == 0); m_rawPath.close(); m_dirt = kAllDirt; } -void PLSPath::addRenderPath(RenderPath* path, const Mat2D& matrix) +void RiveRenderPath::addRenderPath(RenderPath* path, const Mat2D& matrix) { assert(m_rawPathMutationLockCount == 0); - PLSPath* plsPath = static_cast(path); + RiveRenderPath* plsPath = static_cast(path); RawPath::Iter transformedPathIter = m_rawPath.addPath(plsPath->m_rawPath, &matrix); if (matrix != Mat2D()) { @@ -84,7 +84,7 @@ void PLSPath::addRenderPath(RenderPath* path, const Mat2D& matrix) m_dirt = kAllDirt; } -const AABB& PLSPath::getBounds() const +const AABB& RiveRenderPath::getBounds() const { if (m_dirt & kPathBoundsDirt) { @@ -94,7 +94,7 @@ const AABB& PLSPath::getBounds() const return m_bounds; } -float PLSPath::getCoarseArea() const +float RiveRenderPath::getCoarseArea() const { if (m_dirt & kPathCoarseAreaDirt) { @@ -153,7 +153,7 @@ float PLSPath::getCoarseArea() const return m_coarseArea; } -uint64_t PLSPath::getRawPathMutationID() const +uint64_t RiveRenderPath::getRawPathMutationID() const { static std::atomic uniqueIDCounter = 0; if (m_dirt & kRawPathMutationIDDirt) @@ -163,4 +163,4 @@ uint64_t PLSPath::getRawPathMutationID() const } return m_rawPathMutationID; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_path.hpp b/renderer/src/rive_render_path.hpp similarity index 89% rename from pls/renderer/pls_path.hpp rename to renderer/src/rive_render_path.hpp index 700fa82e..5bcd8edf 100644 --- a/pls/renderer/pls_path.hpp +++ b/renderer/src/rive_render_path.hpp @@ -7,14 +7,14 @@ #include "rive/math/raw_path.hpp" #include "rive/renderer.hpp" -namespace rive::pls +namespace rive::gpu { // RenderPath implementation for Rive's pixel local storage renderer. -class PLSPath : public lite_rtti_override +class RiveRenderPath : public lite_rtti_override { public: - PLSPath() = default; - PLSPath(FillRule fillRule, RawPath& rawPath); + RiveRenderPath() = default; + RiveRenderPath(FillRule fillRule, RawPath& rawPath); void rewind() override; void fillRule(FillRule rule) override { m_fillRule = rule; } @@ -65,4 +65,4 @@ class PLSPath : public lite_rtti_override mutable uint32_t m_dirt = kAllDirt; RIVE_DEBUG_CODE(mutable int m_rawPathMutationLockCount = 0;) }; -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/pls_renderer.cpp b/renderer/src/rive_renderer.cpp similarity index 81% rename from pls/renderer/pls_renderer.cpp rename to renderer/src/rive_renderer.cpp index 7d552b03..ce87e6af 100644 --- a/pls/renderer/pls_renderer.cpp +++ b/renderer/src/rive_renderer.cpp @@ -2,18 +2,18 @@ * Copyright 2022 Rive */ -#include "rive/pls/pls_renderer.hpp" +#include "rive/renderer/rive_renderer.hpp" -#include "pls_paint.hpp" -#include "pls_path.hpp" +#include "rive_render_paint.hpp" +#include "rive_render_path.hpp" #include "rive/math/math_types.hpp" #include "rive/math/simd.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" -namespace rive::pls +namespace rive::gpu { -bool PLSRenderer::IsAABB(const RawPath& path, AABB* result) +bool RiveRenderer::IsAABB(const RawPath& path, AABB* result) { // Any quadrilateral begins with a move plus 3 lines. constexpr static size_t kAABBVerbCount = 4; @@ -49,16 +49,18 @@ bool PLSRenderer::IsAABB(const RawPath& path, AABB* result) return false; } -PLSRenderer::ClipElement::ClipElement(const Mat2D& matrix_, - const PLSPath* path_, - FillRule fillRule_) +RiveRenderer::ClipElement::ClipElement(const Mat2D& matrix_, + const RiveRenderPath* path_, + FillRule fillRule_) { reset(matrix_, path_, fillRule_); } -PLSRenderer::ClipElement::~ClipElement() {} +RiveRenderer::ClipElement::~ClipElement() {} -void PLSRenderer::ClipElement::reset(const Mat2D& matrix_, const PLSPath* path_, FillRule fillRule_) +void RiveRenderer::ClipElement::reset(const Mat2D& matrix_, + const RiveRenderPath* path_, + FillRule fillRule_) { matrix = matrix_; rawPathMutationID = path_->getRawPathMutationID(); @@ -68,17 +70,18 @@ void PLSRenderer::ClipElement::reset(const Mat2D& matrix_, const PLSPath* path_, clipID = 0; // This gets initialized lazily. } -bool PLSRenderer::ClipElement::isEquivalent(const Mat2D& matrix_, const PLSPath* path_) const +bool RiveRenderer::ClipElement::isEquivalent(const Mat2D& matrix_, + const RiveRenderPath* path_) const { return matrix_ == matrix && path_->getRawPathMutationID() == rawPathMutationID && path_->getFillRule() == fillRule; } -PLSRenderer::PLSRenderer(PLSRenderContext* context) : m_context(context) {} +RiveRenderer::RiveRenderer(PLSRenderContext* context) : m_context(context) {} -PLSRenderer::~PLSRenderer() {} +RiveRenderer::~RiveRenderer() {} -void PLSRenderer::save() +void RiveRenderer::save() { // Copy the back of the stack before pushing, in case the vector grows and invalidates the // reference. @@ -86,22 +89,22 @@ void PLSRenderer::save() m_stack.push_back(copy); } -void PLSRenderer::restore() +void RiveRenderer::restore() { assert(m_stack.size() > 1); assert(m_stack.back().clipStackHeight >= m_stack[m_stack.size() - 2].clipStackHeight); m_stack.pop_back(); } -void PLSRenderer::transform(const Mat2D& matrix) +void RiveRenderer::transform(const Mat2D& matrix) { m_stack.back().matrix = m_stack.back().matrix * matrix; } -void PLSRenderer::drawPath(RenderPath* renderPath, RenderPaint* renderPaint) +void RiveRenderer::drawPath(RenderPath* renderPath, RenderPaint* renderPaint) { - LITE_RTTI_CAST_OR_RETURN(path, PLSPath*, renderPath); - LITE_RTTI_CAST_OR_RETURN(paint, PLSPaint*, renderPaint); + LITE_RTTI_CAST_OR_RETURN(path, RiveRenderPath*, renderPath); + LITE_RTTI_CAST_OR_RETURN(paint, RiveRenderPaint*, renderPaint); if (path->getRawPath().empty()) { @@ -122,17 +125,17 @@ void PLSRenderer::drawPath(RenderPath* renderPath, RenderPaint* renderPaint) return; } - clipAndPushDraw(PLSPathDraw::Make(m_context, - m_stack.back().matrix, - ref_rcp(path), - path->getFillRule(), - paint, - &m_scratchPath)); + clipAndPushDraw(RiveRenderPathDraw::Make(m_context, + m_stack.back().matrix, + ref_rcp(path), + path->getFillRule(), + paint, + &m_scratchPath)); } -void PLSRenderer::clipPath(RenderPath* renderPath) +void RiveRenderer::clipPath(RenderPath* renderPath) { - LITE_RTTI_CAST_OR_RETURN(path, PLSPath*, renderPath); + LITE_RTTI_CAST_OR_RETURN(path, RiveRenderPath*, renderPath); if (path->getRawPath().empty()) { @@ -190,7 +193,7 @@ static bool transform_rect_to_new_space(AABB* rect, return true; } -void PLSRenderer::clipRectImpl(AABB rect, const PLSPath* originalPath) +void RiveRenderer::clipRectImpl(AABB rect, const RiveRenderPath* originalPath) { bool hasClipRect = m_stack.back().clipRectInverseMatrix != nullptr; if (rect.isEmptyOrNaN()) @@ -225,11 +228,11 @@ void PLSRenderer::clipRectImpl(AABB rect, const PLSPath* originalPath) } m_stack.back().clipRectInverseMatrix = - m_context->make(m_stack.back().clipRectMatrix, + m_context->make(m_stack.back().clipRectMatrix, m_stack.back().clipRect); } -void PLSRenderer::clipPathImpl(const PLSPath* path) +void RiveRenderer::clipPathImpl(const RiveRenderPath* path) { if (path->getBounds().isEmptyOrNaN()) { @@ -254,7 +257,7 @@ void PLSRenderer::clipPathImpl(const PLSPath* path) m_stack.back().clipStackHeight = clipStackHeight + 1; } -void PLSRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode, float opacity) +void RiveRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode, float opacity) { LITE_RTTI_CAST_OR_RETURN(image, const PLSImage*, renderImage); @@ -281,13 +284,13 @@ void PLSRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode, // Implement drawImage() as drawPath() with a rectangular path and an image paint. if (m_unitRectPath == nullptr) { - m_unitRectPath = make_rcp(); + m_unitRectPath = make_rcp(); m_unitRectPath->line({1, 0}); m_unitRectPath->line({1, 1}); m_unitRectPath->line({0, 1}); } - PLSPaint paint; + RiveRenderPaint paint; paint.image(image->refTexture(), opacity); paint.blendMode(blendMode); drawPath(m_unitRectPath.get(), &paint); @@ -296,14 +299,14 @@ void PLSRenderer::drawImage(const RenderImage* renderImage, BlendMode blendMode, restore(); } -void PLSRenderer::drawImageMesh(const RenderImage* renderImage, - rcp vertices_f32, - rcp uvCoords_f32, - rcp indices_u16, - uint32_t vertexCount, - uint32_t indexCount, - BlendMode blendMode, - float opacity) +void RiveRenderer::drawImageMesh(const RenderImage* renderImage, + rcp vertices_f32, + rcp uvCoords_f32, + rcp indices_u16, + uint32_t vertexCount, + uint32_t indexCount, + BlendMode blendMode, + float opacity) { LITE_RTTI_CAST_OR_RETURN(image, const PLSImage*, renderImage); const PLSTexture* plsTexture = image->getTexture(); @@ -323,7 +326,7 @@ void PLSRenderer::drawImageMesh(const RenderImage* renderImage, opacity))); } -void PLSRenderer::clipAndPushDraw(PLSDrawUniquePtr draw) +void RiveRenderer::clipAndPushDraw(PLSDrawUniquePtr draw) { if (m_stack.back().clipIsEmpty) { @@ -342,14 +345,14 @@ void PLSRenderer::clipAndPushDraw(PLSDrawUniquePtr draw) struct AutoResetInternalDrawBatch { public: - AutoResetInternalDrawBatch(PLSRenderer* renderer) : m_renderer(renderer) + AutoResetInternalDrawBatch(RiveRenderer* renderer) : m_renderer(renderer) { assert(m_renderer->m_internalDrawBatch.empty()); } ~AutoResetInternalDrawBatch() { m_renderer->m_internalDrawBatch.clear(); } private: - PLSRenderer* m_renderer; + RiveRenderer* m_renderer; }; AutoResetInternalDrawBatch aridb(this); @@ -379,10 +382,10 @@ void PLSRenderer::clipAndPushDraw(PLSDrawUniquePtr draw) // We failed to process the draw. Release its refs. fprintf(stderr, - "PLSRenderer::clipAndPushDraw failed. The draw and/or clip stack are too complex.\n"); + "RiveRenderer::clipAndPushDraw failed. The draw and/or clip stack are too complex.\n"); } -bool PLSRenderer::applyClip(PLSDraw* draw) +bool RiveRenderer::applyClip(PLSDraw* draw) { draw->setClipRect(m_stack.back().clipRectInverseMatrix); @@ -412,7 +415,7 @@ bool PLSRenderer::applyClip(PLSDraw* draw) uint32_t lastClipID = clipIdxCurrentlyInClipBuffer == -1 ? 0 // The next clip to be drawn is not nested. : m_clipStack[clipIdxCurrentlyInClipBuffer].clipID; - if (m_context->frameInterlockMode() == pls::InterlockMode::depthStencil) + if (m_context->frameInterlockMode() == gpu::InterlockMode::depthStencil) { if (lastClipID == 0 && m_context->getClipContentID() != 0) { @@ -436,14 +439,14 @@ bool PLSRenderer::applyClip(PLSDraw* draw) IAABB clipDrawBounds; { - PLSPaint clipUpdatePaint; + RiveRenderPaint clipUpdatePaint; clipUpdatePaint.clipUpdate(/*clip THIS clipDraw against:*/ lastClipID); - auto clipDraw = PLSPathDraw::Make(m_context, - clip.matrix, - clip.path, - clip.fillRule, - &clipUpdatePaint, - &m_scratchPath); + auto clipDraw = RiveRenderPathDraw::Make(m_context, + clip.matrix, + clip.path, + clip.fillRule, + &clipUpdatePaint, + &m_scratchPath); clipDrawBounds = clipDraw->pixelBounds(); // Generate a new clipID every time we (re-)render an element to the clip buffer. // (Each embodiment of the element needs its own separate readBounds.) @@ -463,7 +466,7 @@ bool PLSRenderer::applyClip(PLSDraw* draw) if (lastClipID != 0) { m_context->addClipReadBounds(lastClipID, clipDrawBounds); - if (m_context->frameInterlockMode() == pls::InterlockMode::depthStencil) + if (m_context->frameInterlockMode() == gpu::InterlockMode::depthStencil) { // When drawing nested stencil clips, we need to intersect them, which involves // erasing the region of the current clip in the stencil buffer that is outside the @@ -487,4 +490,4 @@ bool PLSRenderer::applyClip(PLSDraw* draw) m_context->setClipContentID(lastClipID); return true; } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/shaders/Makefile b/renderer/src/shaders/Makefile similarity index 100% rename from pls/renderer/shaders/Makefile rename to renderer/src/shaders/Makefile diff --git a/pls/renderer/shaders/advanced_blend.glsl b/renderer/src/shaders/advanced_blend.glsl similarity index 100% rename from pls/renderer/shaders/advanced_blend.glsl rename to renderer/src/shaders/advanced_blend.glsl diff --git a/pls/renderer/shaders/atomic_draw.glsl b/renderer/src/shaders/atomic_draw.glsl similarity index 100% rename from pls/renderer/shaders/atomic_draw.glsl rename to renderer/src/shaders/atomic_draw.glsl diff --git a/pls/renderer/shaders/blit_texture_as_draw.glsl b/renderer/src/shaders/blit_texture_as_draw.glsl similarity index 100% rename from pls/renderer/shaders/blit_texture_as_draw.glsl rename to renderer/src/shaders/blit_texture_as_draw.glsl diff --git a/pls/renderer/shaders/color_ramp.glsl b/renderer/src/shaders/color_ramp.glsl similarity index 100% rename from pls/renderer/shaders/color_ramp.glsl rename to renderer/src/shaders/color_ramp.glsl diff --git a/pls/renderer/shaders/common.glsl b/renderer/src/shaders/common.glsl similarity index 100% rename from pls/renderer/shaders/common.glsl rename to renderer/src/shaders/common.glsl diff --git a/pls/renderer/shaders/constants.glsl b/renderer/src/shaders/constants.glsl similarity index 100% rename from pls/renderer/shaders/constants.glsl rename to renderer/src/shaders/constants.glsl diff --git a/pls/renderer/shaders/draw_image_mesh.glsl b/renderer/src/shaders/draw_image_mesh.glsl similarity index 100% rename from pls/renderer/shaders/draw_image_mesh.glsl rename to renderer/src/shaders/draw_image_mesh.glsl diff --git a/pls/renderer/shaders/draw_path.glsl b/renderer/src/shaders/draw_path.glsl similarity index 100% rename from pls/renderer/shaders/draw_path.glsl rename to renderer/src/shaders/draw_path.glsl diff --git a/pls/renderer/shaders/draw_path_common.glsl b/renderer/src/shaders/draw_path_common.glsl similarity index 100% rename from pls/renderer/shaders/draw_path_common.glsl rename to renderer/src/shaders/draw_path_common.glsl diff --git a/pls/renderer/shaders/glsl.glsl b/renderer/src/shaders/glsl.glsl similarity index 100% rename from pls/renderer/shaders/glsl.glsl rename to renderer/src/shaders/glsl.glsl diff --git a/pls/renderer/shaders/hlsl.glsl b/renderer/src/shaders/hlsl.glsl similarity index 100% rename from pls/renderer/shaders/hlsl.glsl rename to renderer/src/shaders/hlsl.glsl diff --git a/pls/renderer/shaders/metal.glsl b/renderer/src/shaders/metal.glsl similarity index 100% rename from pls/renderer/shaders/metal.glsl rename to renderer/src/shaders/metal.glsl diff --git a/pls/renderer/shaders/metal/color_ramp.metal b/renderer/src/shaders/metal/color_ramp.metal similarity index 100% rename from pls/renderer/shaders/metal/color_ramp.metal rename to renderer/src/shaders/metal/color_ramp.metal diff --git a/pls/renderer/shaders/metal/draw.metal b/renderer/src/shaders/metal/draw.metal similarity index 100% rename from pls/renderer/shaders/metal/draw.metal rename to renderer/src/shaders/metal/draw.metal diff --git a/pls/renderer/shaders/metal/generate_draw_combinations.py b/renderer/src/shaders/metal/generate_draw_combinations.py similarity index 100% rename from pls/renderer/shaders/metal/generate_draw_combinations.py rename to renderer/src/shaders/metal/generate_draw_combinations.py diff --git a/pls/renderer/shaders/metal/tessellate.metal b/renderer/src/shaders/metal/tessellate.metal similarity index 100% rename from pls/renderer/shaders/metal/tessellate.metal rename to renderer/src/shaders/metal/tessellate.metal diff --git a/pls/renderer/shaders/minify.py b/renderer/src/shaders/minify.py similarity index 99% rename from pls/renderer/shaders/minify.py rename to renderer/src/shaders/minify.py index 526afb19..d61fcf03 100644 --- a/pls/renderer/shaders/minify.py +++ b/renderer/src/shaders/minify.py @@ -552,7 +552,7 @@ def write_embedded_glsl(self, outdir): # emit shader code. out.write("namespace rive {\n") - out.write("namespace pls {\n") + out.write("namespace gpu {\n") out.write("namespace glsl {\n") out.write('const char %s[] = R"===(' % os.path.splitext(self.basename)[0]) @@ -562,7 +562,7 @@ def write_embedded_glsl(self, outdir): out.write(')===";\n') out.write("} // namespace glsl\n") - out.write("} // namespace pls\n") + out.write("} // namespace gpu\n") out.write("} // namespace rive") out.close() diff --git a/pls/renderer/shaders/pls_load_store_ext.glsl b/renderer/src/shaders/pls_load_store_ext.glsl similarity index 100% rename from pls/renderer/shaders/pls_load_store_ext.glsl rename to renderer/src/shaders/pls_load_store_ext.glsl diff --git a/pls/renderer/shaders/specialization.glsl b/renderer/src/shaders/specialization.glsl similarity index 100% rename from pls/renderer/shaders/specialization.glsl rename to renderer/src/shaders/specialization.glsl diff --git a/pls/renderer/shaders/spirv/atomic_base.glsl b/renderer/src/shaders/spirv/atomic_base.glsl similarity index 100% rename from pls/renderer/shaders/spirv/atomic_base.glsl rename to renderer/src/shaders/spirv/atomic_base.glsl diff --git a/pls/renderer/shaders/spirv/atomic_draw_image_mesh.main b/renderer/src/shaders/spirv/atomic_draw_image_mesh.main similarity index 100% rename from pls/renderer/shaders/spirv/atomic_draw_image_mesh.main rename to renderer/src/shaders/spirv/atomic_draw_image_mesh.main diff --git a/pls/renderer/shaders/spirv/atomic_draw_image_rect.main b/renderer/src/shaders/spirv/atomic_draw_image_rect.main similarity index 100% rename from pls/renderer/shaders/spirv/atomic_draw_image_rect.main rename to renderer/src/shaders/spirv/atomic_draw_image_rect.main diff --git a/pls/renderer/shaders/spirv/atomic_draw_interior_triangles.main b/renderer/src/shaders/spirv/atomic_draw_interior_triangles.main similarity index 100% rename from pls/renderer/shaders/spirv/atomic_draw_interior_triangles.main rename to renderer/src/shaders/spirv/atomic_draw_interior_triangles.main diff --git a/pls/renderer/shaders/spirv/atomic_draw_path.main b/renderer/src/shaders/spirv/atomic_draw_path.main similarity index 100% rename from pls/renderer/shaders/spirv/atomic_draw_path.main rename to renderer/src/shaders/spirv/atomic_draw_path.main diff --git a/pls/renderer/shaders/spirv/atomic_resolve_pls.main b/renderer/src/shaders/spirv/atomic_resolve_pls.main similarity index 100% rename from pls/renderer/shaders/spirv/atomic_resolve_pls.main rename to renderer/src/shaders/spirv/atomic_resolve_pls.main diff --git a/pls/renderer/shaders/spirv/color_ramp.main b/renderer/src/shaders/spirv/color_ramp.main similarity index 100% rename from pls/renderer/shaders/spirv/color_ramp.main rename to renderer/src/shaders/spirv/color_ramp.main diff --git a/pls/renderer/shaders/spirv/draw_image_mesh.main b/renderer/src/shaders/spirv/draw_image_mesh.main similarity index 100% rename from pls/renderer/shaders/spirv/draw_image_mesh.main rename to renderer/src/shaders/spirv/draw_image_mesh.main diff --git a/pls/renderer/shaders/spirv/draw_interior_triangles.main b/renderer/src/shaders/spirv/draw_interior_triangles.main similarity index 100% rename from pls/renderer/shaders/spirv/draw_interior_triangles.main rename to renderer/src/shaders/spirv/draw_interior_triangles.main diff --git a/pls/renderer/shaders/spirv/draw_path.main b/renderer/src/shaders/spirv/draw_path.main similarity index 100% rename from pls/renderer/shaders/spirv/draw_path.main rename to renderer/src/shaders/spirv/draw_path.main diff --git a/pls/renderer/shaders/spirv/tessellate.main b/renderer/src/shaders/spirv/tessellate.main similarity index 100% rename from pls/renderer/shaders/spirv/tessellate.main rename to renderer/src/shaders/spirv/tessellate.main diff --git a/pls/renderer/shaders/stencil_draw.glsl b/renderer/src/shaders/stencil_draw.glsl similarity index 100% rename from pls/renderer/shaders/stencil_draw.glsl rename to renderer/src/shaders/stencil_draw.glsl diff --git a/pls/renderer/shaders/tessellate.glsl b/renderer/src/shaders/tessellate.glsl similarity index 100% rename from pls/renderer/shaders/tessellate.glsl rename to renderer/src/shaders/tessellate.glsl diff --git a/pls/renderer/vulkan/pls_render_context_vulkan_impl.cpp b/renderer/src/vulkan/render_context_vulkan_impl.cpp similarity index 95% rename from pls/renderer/vulkan/pls_render_context_vulkan_impl.cpp rename to renderer/src/vulkan/render_context_vulkan_impl.cpp index c8a30dac..e0493dea 100644 --- a/pls/renderer/vulkan/pls_render_context_vulkan_impl.cpp +++ b/renderer/src/vulkan/render_context_vulkan_impl.cpp @@ -2,9 +2,9 @@ * Copyright 2023 Rive */ -#include "rive/pls/vulkan/pls_render_context_vulkan_impl.hpp" +#include "rive/renderer/vulkan/render_context_vulkan_impl.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" namespace spirv @@ -42,7 +42,7 @@ namespace spirv #include "rive/decoders/bitmap_decoder.hpp" #endif -namespace rive::pls +namespace rive::gpu { static VkBufferUsageFlagBits render_buffer_usage_flags(RenderBufferType renderBufferType) { @@ -85,7 +85,7 @@ class RenderBufferVulkanImpl : public RenderBuffer protected: void* onMap() override { - m_bufferRingIdx = (m_bufferRingIdx + 1) % pls::kBufferRingSize; + m_bufferRingIdx = (m_bufferRingIdx + 1) % gpu::kBufferRingSize; m_bufferRing.synchronizeSizeAt(m_bufferRingIdx); return m_bufferRing.contentsAt(m_bufferRingIdx); } @@ -338,7 +338,7 @@ class PLSRenderContextVulkanImpl::ColorRampPipeline VkVertexInputBindingDescription vertexInputBindingDescription = { .binding = 0, - .stride = sizeof(pls::GradientSpan), + .stride = sizeof(gpu::GradientSpan), .inputRate = VK_VERTEX_INPUT_RATE_INSTANCE, }; @@ -563,7 +563,7 @@ class PLSRenderContextVulkanImpl::TessellatePipeline VkVertexInputBindingDescription vertexInputBindingDescription = { .binding = 0, - .stride = sizeof(pls::TessVertexSpan), + .stride = sizeof(gpu::TessVertexSpan), .inputRate = VK_VERTEX_INPUT_RATE_INSTANCE, }; @@ -736,7 +736,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout // (framebufferFormat x loadOp). constexpr static int kRenderPassVariantCount = 6; - static int RenderPassVariantIdx(VkFormat framebufferFormat, pls::LoadAction loadAction) + static int RenderPassVariantIdx(VkFormat framebufferFormat, gpu::LoadAction loadAction) { int loadActionIdx = static_cast(loadAction); assert(0 <= loadActionIdx && loadActionIdx < 3); @@ -754,30 +754,30 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout constexpr static VkAttachmentLoadOp LoadOpFromRenderPassVariant(int idx) { - auto loadAction = static_cast(idx >> 1); + auto loadAction = static_cast(idx >> 1); switch (loadAction) { - case pls::LoadAction::preserveRenderTarget: + case gpu::LoadAction::preserveRenderTarget: return VK_ATTACHMENT_LOAD_OP_LOAD; - case pls::LoadAction::clear: + case gpu::LoadAction::clear: return VK_ATTACHMENT_LOAD_OP_CLEAR; - case pls::LoadAction::dontCare: + case gpu::LoadAction::dontCare: return VK_ATTACHMENT_LOAD_OP_DONT_CARE; } RIVE_UNREACHABLE(); } - constexpr static uint32_t PLSAttachmentCount(pls::InterlockMode interlockMode) + constexpr static uint32_t PLSAttachmentCount(gpu::InterlockMode interlockMode) { - return interlockMode == pls::InterlockMode::atomics ? 2 : 4; + return interlockMode == gpu::InterlockMode::atomics ? 2 : 4; } DrawPipelineLayout(PLSRenderContextVulkanImpl* impl, - pls::InterlockMode interlockMode, + gpu::InterlockMode interlockMode, DrawPipelineLayoutOptions options) : m_vk(ref_rcp(impl->vulkanContext())), m_interlockMode(interlockMode), m_options(options) { - assert(interlockMode != pls::InterlockMode::depthStencil); // TODO: msaa. + assert(interlockMode != gpu::InterlockMode::depthStencil); // TODO: msaa. // Most bindings only need to be set once per flush. VkDescriptorSetLayoutBinding perFlushLayoutBindings[] = { @@ -804,7 +804,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, .descriptorCount = 1, .stageFlags = static_cast( - m_interlockMode == pls::InterlockMode::atomics ? VK_SHADER_STAGE_FRAGMENT_BIT + m_interlockMode == gpu::InterlockMode::atomics ? VK_SHADER_STAGE_FRAGMENT_BIT : VK_SHADER_STAGE_VERTEX_BIT), }, { @@ -812,7 +812,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, .descriptorCount = 1, .stageFlags = static_cast( - m_interlockMode == pls::InterlockMode::atomics ? VK_SHADER_STAGE_FRAGMENT_BIT + m_interlockMode == gpu::InterlockMode::atomics ? VK_SHADER_STAGE_FRAGMENT_BIT : VK_SHADER_STAGE_VERTEX_BIT), }, { @@ -916,7 +916,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout }, { .binding = COVERAGE_PLANE_IDX, - .descriptorType = m_interlockMode == pls::InterlockMode::atomics + .descriptorType = m_interlockMode == gpu::InterlockMode::atomics ? VK_DESCRIPTOR_TYPE_STORAGE_IMAGE : VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, .descriptorCount = 1, @@ -1116,7 +1116,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout .pColorAttachments = attachmentReferences, }; - if (m_interlockMode == pls::InterlockMode::rasterOrdering) + if (m_interlockMode == gpu::InterlockMode::rasterOrdering) { // With EXT_rasterization_order_attachment_access, we just need // this flag and all "subpassLoad" dependencies are implicit. @@ -1133,7 +1133,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout .pSubpasses = &subpassDescription, }; - if (m_interlockMode == pls::InterlockMode::atomics) + if (m_interlockMode == gpu::InterlockMode::atomics) { // Without EXT_rasterization_order_attachment_access (aka atomic mode), // "subpassLoad" calls require explicit dependencies and barriers. @@ -1173,7 +1173,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout } } - pls::InterlockMode interlockMode() const { return m_interlockMode; } + gpu::InterlockMode interlockMode() const { return m_interlockMode; } DrawPipelineLayoutOptions options() const { return m_options; } VkDescriptorSetLayout perFlushLayout() const @@ -1200,7 +1200,7 @@ class PLSRenderContextVulkanImpl::DrawPipelineLayout private: const rcp m_vk; - const pls::InterlockMode m_interlockMode; + const gpu::InterlockMode m_interlockMode; const DrawPipelineLayoutOptions m_options; VkDescriptorSetLayout m_descriptorSetLayouts[BINDINGS_SET_COUNT]; @@ -1218,16 +1218,16 @@ class PLSRenderContextVulkanImpl::DrawShader { public: DrawShader(VulkanContext* vk, - pls::DrawType drawType, - pls::InterlockMode interlockMode, - pls::ShaderFeatures shaderFeatures, - pls::ShaderMiscFlags shaderMiscFlags) : + gpu::DrawType drawType, + gpu::InterlockMode interlockMode, + gpu::ShaderFeatures shaderFeatures, + gpu::ShaderMiscFlags shaderMiscFlags) : m_vk(ref_rcp(vk)) { VkShaderModuleCreateInfo vsInfo = {.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO}; VkShaderModuleCreateInfo fsInfo = {.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO}; - if (interlockMode == pls::InterlockMode::rasterOrdering) + if (interlockMode == gpu::InterlockMode::rasterOrdering) { switch (drawType) { @@ -1248,17 +1248,17 @@ class PLSRenderContextVulkanImpl::DrawShader break; case DrawType::imageRect: - case DrawType::plsAtomicResolve: - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicResolve: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } } else { - assert(interlockMode == pls::InterlockMode::atomics); + assert(interlockMode == gpu::InterlockMode::atomics); bool fixedFunctionColorBlend = - shaderMiscFlags & pls::ShaderMiscFlags::fixedFunctionColorBlend; + shaderMiscFlags & gpu::ShaderMiscFlags::fixedFunctionColorBlend; switch (drawType) { case DrawType::midpointFanPatches: @@ -1297,7 +1297,7 @@ class PLSRenderContextVulkanImpl::DrawShader spirv::atomic_draw_image_mesh_frag); break; - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: vkutil::set_shader_code(vsInfo, spirv::atomic_resolve_pls_vert); vkutil::set_shader_code_if_then_else(fsInfo, fixedFunctionColorBlend, @@ -1305,7 +1305,7 @@ class PLSRenderContextVulkanImpl::DrawShader spirv::atomic_resolve_pls_frag); break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -1343,21 +1343,21 @@ class PLSRenderContextVulkanImpl::DrawPipeline { public: DrawPipeline(PLSRenderContextVulkanImpl* impl, - pls::DrawType drawType, + gpu::DrawType drawType, const DrawPipelineLayout& pipelineLayout, - pls::ShaderFeatures shaderFeatures, + gpu::ShaderFeatures shaderFeatures, DrawPipelineOptions drawPipelineOptions, VkRenderPass vkRenderPass) : m_vk(ref_rcp(impl->vulkanContext())) { - pls::InterlockMode interlockMode = pipelineLayout.interlockMode(); - auto shaderMiscFlags = pls::ShaderMiscFlags::none; + gpu::InterlockMode interlockMode = pipelineLayout.interlockMode(); + auto shaderMiscFlags = gpu::ShaderMiscFlags::none; if (pipelineLayout.options() & DrawPipelineLayoutOptions::fixedFunctionColorBlend) { - shaderMiscFlags |= pls::ShaderMiscFlags::fixedFunctionColorBlend; + shaderMiscFlags |= gpu::ShaderMiscFlags::fixedFunctionColorBlend; } uint32_t shaderKey = - pls::ShaderUniqueKey(drawType, shaderFeatures, interlockMode, shaderMiscFlags); + gpu::ShaderUniqueKey(drawType, shaderFeatures, interlockMode, shaderMiscFlags); const DrawShader& drawShader = impl->m_drawShaders .try_emplace(shaderKey, m_vk.get(), @@ -1368,12 +1368,12 @@ class PLSRenderContextVulkanImpl::DrawPipeline .first->second; VkBool32 shaderPermutationFlags[SPECIALIZATION_COUNT] = { - shaderFeatures & pls::ShaderFeatures::ENABLE_CLIPPING, - shaderFeatures & pls::ShaderFeatures::ENABLE_CLIP_RECT, - shaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND, - shaderFeatures & pls::ShaderFeatures::ENABLE_EVEN_ODD, - shaderFeatures & pls::ShaderFeatures::ENABLE_NESTED_CLIPPING, - shaderFeatures & pls::ShaderFeatures::ENABLE_HSL_BLEND_MODES, + shaderFeatures & gpu::ShaderFeatures::ENABLE_CLIPPING, + shaderFeatures & gpu::ShaderFeatures::ENABLE_CLIP_RECT, + shaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND, + shaderFeatures & gpu::ShaderFeatures::ENABLE_EVEN_ODD, + shaderFeatures & gpu::ShaderFeatures::ENABLE_NESTED_CLIPPING, + shaderFeatures & gpu::ShaderFeatures::ENABLE_HSL_BLEND_MODES, }; static_assert(CLIPPING_SPECIALIZATION_IDX == 0); static_assert(CLIP_RECT_SPECIALIZATION_IDX == 1); @@ -1439,7 +1439,7 @@ class PLSRenderContextVulkanImpl::DrawPipeline { vertexInputBindingDescriptions = {{{ .binding = 0, - .stride = sizeof(pls::PatchVertex), + .stride = sizeof(gpu::PatchVertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, }}}; vertexAttributeDescriptions = {{ @@ -1465,7 +1465,7 @@ class PLSRenderContextVulkanImpl::DrawPipeline { vertexInputBindingDescriptions = {{{ .binding = 0, - .stride = sizeof(pls::TriangleVertex), + .stride = sizeof(gpu::TriangleVertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, }}}; vertexAttributeDescriptions = {{ @@ -1485,7 +1485,7 @@ class PLSRenderContextVulkanImpl::DrawPipeline { vertexInputBindingDescriptions = {{{ .binding = 0, - .stride = sizeof(pls::ImageRectVertex), + .stride = sizeof(gpu::ImageRectVertex), .inputRate = VK_VERTEX_INPUT_RATE_VERTEX, }}}; vertexAttributeDescriptions = {{ @@ -1536,7 +1536,7 @@ class PLSRenderContextVulkanImpl::DrawPipeline break; } - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: { pipelineVertexInputStateCreateInfo.vertexBindingDescriptionCount = 0; pipelineVertexInputStateCreateInfo.vertexAttributeDescriptionCount = 0; @@ -1545,7 +1545,7 @@ class PLSRenderContextVulkanImpl::DrawPipeline break; } - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -1602,7 +1602,7 @@ class PLSRenderContextVulkanImpl::DrawPipeline .pAttachments = blendColorAttachments, }; - if (interlockMode == pls::InterlockMode::rasterOrdering) + if (interlockMode == gpu::InterlockMode::rasterOrdering) { assert(m_vk->features.rasterizationOrderColorAttachmentAccess); pipelineColorBlendStateCreateInfo.flags |= @@ -1724,17 +1724,17 @@ void PLSRenderContextVulkanImpl::initGPUObjects() m_tessSpanIndexBuffer = m_vk->makeBuffer( { - .size = sizeof(pls::kTessSpanIndices), + .size = sizeof(gpu::kTessSpanIndices), .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT, }, vkutil::Mappability::writeOnly); memcpy(vkutil::ScopedBufferFlush(*m_tessSpanIndexBuffer), - pls::kTessSpanIndices, - sizeof(pls::kTessSpanIndices)); + gpu::kTessSpanIndices, + sizeof(gpu::kTessSpanIndices)); m_pathPatchVertexBuffer = m_vk->makeBuffer( { - .size = kPatchVertexBufferCount * sizeof(pls::PatchVertex), + .size = kPatchVertexBufferCount * sizeof(gpu::PatchVertex), .usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, }, vkutil::Mappability::writeOnly); @@ -1744,34 +1744,34 @@ void PLSRenderContextVulkanImpl::initGPUObjects() .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT, }, vkutil::Mappability::writeOnly); - pls::GeneratePatchBufferData( + gpu::GeneratePatchBufferData( vkutil::ScopedBufferFlush(*m_pathPatchVertexBuffer).as(), vkutil::ScopedBufferFlush(*m_pathPatchIndexBuffer).as()); m_imageRectVertexBuffer = m_vk->makeBuffer( { - .size = sizeof(pls::kImageRectVertices), + .size = sizeof(gpu::kImageRectVertices), .usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, }, vkutil::Mappability::writeOnly); memcpy(vkutil::ScopedBufferFlush(*m_imageRectVertexBuffer), - pls::kImageRectVertices, - sizeof(pls::kImageRectVertices)); + gpu::kImageRectVertices, + sizeof(gpu::kImageRectVertices)); m_imageRectIndexBuffer = m_vk->makeBuffer( { - .size = sizeof(pls::kImageRectIndices), + .size = sizeof(gpu::kImageRectIndices), .usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT, }, vkutil::Mappability::writeOnly); memcpy(vkutil::ScopedBufferFlush(*m_imageRectIndexBuffer), - pls::kImageRectIndices, - sizeof(pls::kImageRectIndices)); + gpu::kImageRectIndices, + sizeof(gpu::kImageRectIndices)); } PLSRenderContextVulkanImpl::~PLSRenderContextVulkanImpl() { // Wait for all fences before cleaning up. - for (const rcp& fence : m_frameCompletionFences) + for (const rcp& fence : m_frameCompletionFences) { if (fence != nullptr) { @@ -1843,10 +1843,10 @@ void PLSRenderContextVulkanImpl::resizeTessellationTexture(uint32_t width, uint3 void PLSRenderContextVulkanImpl::prepareToMapBuffers() { - m_bufferRingIdx = (m_bufferRingIdx + 1) % pls::kBufferRingSize; + m_bufferRingIdx = (m_bufferRingIdx + 1) % gpu::kBufferRingSize; // Wait for the existing resources to finish before we release/recycle them. - if (rcp fence = + if (rcp fence = std::move(m_frameCompletionFences[m_bufferRingIdx])) { fence->wait(); @@ -2098,7 +2098,7 @@ VkImageView PLSRenderTargetVulkan::ensureCoverageAtomicTextureView(VkCommandBuff void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) { - if (desc.interlockMode == pls::InterlockMode::depthStencil) + if (desc.interlockMode == gpu::InterlockMode::depthStencil) { return; // TODO: support MSAA. } @@ -2119,7 +2119,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) { VkRect2D renderArea = { .offset = {0, static_cast(desc.complexGradRowsTop)}, - .extent = {pls::kGradTextureWidth, desc.complexGradRowsHeight}, + .extent = {gpu::kGradTextureWidth, desc.complexGradRowsHeight}, }; VkRenderPassBeginInfo renderPassBeginInfo = { @@ -2140,7 +2140,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) m_vk->CmdSetScissor(commandBuffer, 0, 1, &renderArea); VkBuffer gradSpanBuffer = m_gradSpanBufferRing.vkBufferAt(m_bufferRingIdx); - VkDeviceSize gradSpanOffset = desc.firstComplexGradSpan * sizeof(pls::GradientSpan); + VkDeviceSize gradSpanOffset = desc.firstComplexGradSpan * sizeof(gpu::GradientSpan); m_vk->CmdBindVertexBuffers(commandBuffer, 0, 1, &gradSpanBuffer, &gradSpanOffset); VkDescriptorSet descriptorSet = @@ -2155,7 +2155,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) {{ .buffer = m_flushUniformBufferRing.vkBufferAt(m_bufferRingIdx), .offset = desc.flushUniformDataOffsetInBytes, - .range = sizeof(pls::FlushUniforms), + .range = sizeof(gpu::FlushUniforms), }}); m_vk->CmdBindDescriptorSets(commandBuffer, @@ -2182,7 +2182,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) { VkBufferImageCopy bufferImageCopy{ .bufferOffset = desc.simpleGradDataOffsetInBytes, - .bufferRowLength = pls::kGradTextureWidth, + .bufferRowLength = gpu::kGradTextureWidth, .imageSubresource = { .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, @@ -2218,7 +2218,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) if (desc.tessVertexSpanCount > 0) { VkRect2D renderArea = { - .extent = {pls::kTessTextureWidth, desc.tessDataHeight}, + .extent = {gpu::kTessTextureWidth, desc.tessDataHeight}, }; VkRenderPassBeginInfo renderPassBeginInfo = { @@ -2239,7 +2239,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) m_vk->CmdSetScissor(commandBuffer, 0, 1, &renderArea); VkBuffer tessBuffer = m_tessSpanBufferRing.vkBufferAt(m_bufferRingIdx); - VkDeviceSize tessOffset = desc.firstTessVertexSpan * sizeof(pls::TessVertexSpan); + VkDeviceSize tessOffset = desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan); m_vk->CmdBindVertexBuffers(commandBuffer, 0, 1, &tessBuffer, &tessOffset); m_vk->CmdBindIndexBuffer(commandBuffer, *m_tessSpanIndexBuffer, 0, VK_INDEX_TYPE_UINT16); @@ -2254,7 +2254,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) }, {{ .buffer = m_pathBufferRing.vkBufferAt(m_bufferRingIdx), - .offset = desc.firstPath * sizeof(pls::PathData), + .offset = desc.firstPath * sizeof(gpu::PathData), .range = VK_WHOLE_SIZE, }}); @@ -2266,7 +2266,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) }, {{ .buffer = m_contourBufferRing.vkBufferAt(m_bufferRingIdx), - .offset = desc.firstContour * sizeof(pls::ContourData), + .offset = desc.firstContour * sizeof(gpu::ContourData), .range = VK_WHOLE_SIZE, }}); @@ -2292,7 +2292,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) nullptr); m_vk->CmdDrawIndexed(commandBuffer, - std::size(pls::kTessSpanIndices), + std::size(gpu::kTessSpanIndices), desc.tessVertexSpanCount, 0, 0, @@ -2323,14 +2323,14 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) } auto pipelineLayoutOptions = DrawPipelineLayoutOptions::none; - if (m_vk->features.independentBlend && desc.interlockMode == pls::InterlockMode::atomics && - !(desc.combinedShaderFeatures & pls::ShaderFeatures::ENABLE_ADVANCED_BLEND)) + if (m_vk->features.independentBlend && desc.interlockMode == gpu::InterlockMode::atomics && + !(desc.combinedShaderFeatures & gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND)) { pipelineLayoutOptions |= DrawPipelineLayoutOptions::fixedFunctionColorBlend; } int pipelineLayoutIdx = - ((desc.interlockMode == pls::InterlockMode::atomics) << kDrawPipelineLayoutOptionCount) | + ((desc.interlockMode == gpu::InterlockMode::atomics) << kDrawPipelineLayoutOptionCount) | static_cast(pipelineLayoutOptions); assert(pipelineLayoutIdx < m_drawPipelineLayouts.size()); if (m_drawPipelineLayouts[pipelineLayoutIdx] == nullptr) @@ -2348,14 +2348,14 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) ? renderTarget->targetTextureView() : renderTarget->ensureOffscreenColorTextureView(commandBuffer); auto clipView = renderTarget->ensureClipTextureView(commandBuffer); - auto scratchColorTextureView = desc.interlockMode == pls::InterlockMode::atomics + auto scratchColorTextureView = desc.interlockMode == gpu::InterlockMode::atomics ? VK_NULL_HANDLE : renderTarget->ensureScratchColorTextureView(commandBuffer); - auto coverageTextureView = desc.interlockMode == pls::InterlockMode::atomics + auto coverageTextureView = desc.interlockMode == gpu::InterlockMode::atomics ? renderTarget->ensureCoverageAtomicTextureView(commandBuffer) : renderTarget->ensureCoverageTextureView(commandBuffer); - if (desc.colorLoadAction == pls::LoadAction::preserveRenderTarget && + if (desc.colorLoadAction == gpu::LoadAction::preserveRenderTarget && targetView == renderTarget->offscreenColorTextureView()) { // Copy the target into our offscreen color texture before rendering. @@ -2399,7 +2399,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) targetView, clipView, scratchColorTextureView, - desc.interlockMode == pls::InterlockMode::atomics ? VK_NULL_HANDLE : coverageTextureView, + desc.interlockMode == gpu::InterlockMode::atomics ? VK_NULL_HANDLE : coverageTextureView, }; static_assert(COLOR_PLANE_IDX == 0); @@ -2433,7 +2433,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) static_assert(COVERAGE_PLANE_IDX == 3); bool needsBarrierBeforeNextDraw = false; - if (desc.interlockMode == pls::InterlockMode::atomics) + if (desc.interlockMode == gpu::InterlockMode::atomics) { // If the color attachment will be cleared, make sure we get a barrier on // it before shaders access it via subpassLoad(). @@ -2442,9 +2442,9 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) // TODO: If we end up using HW blend when not using advanced blend, we // don't need a barrier after the clear. desc.combinedShaderFeatures & - pls::ShaderFeatures::ENABLE_ADVANCED_BLEND && + gpu::ShaderFeatures::ENABLE_ADVANCED_BLEND && #endif - desc.colorLoadAction == pls::LoadAction::clear; + desc.colorLoadAction == gpu::LoadAction::clear; // Clear the coverage texture, which is not an attachment. m_vk->insertImageMemoryBarrier(commandBuffer, @@ -2517,7 +2517,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) }, {{ .buffer = m_pathBufferRing.vkBufferAt(m_bufferRingIdx), - .offset = desc.firstPath * sizeof(pls::PathData), + .offset = desc.firstPath * sizeof(gpu::PathData), .range = VK_WHOLE_SIZE, }}); @@ -2530,12 +2530,12 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) { { .buffer = m_paintBufferRing.vkBufferAt(m_bufferRingIdx), - .offset = desc.firstPaint * sizeof(pls::PaintData), + .offset = desc.firstPaint * sizeof(gpu::PaintData), .range = VK_WHOLE_SIZE, }, { .buffer = m_paintAuxBufferRing.vkBufferAt(m_bufferRingIdx), - .offset = desc.firstPaintAux * sizeof(pls::PaintAuxData), + .offset = desc.firstPaintAux * sizeof(gpu::PaintAuxData), .range = VK_WHOLE_SIZE, }, }); @@ -2548,7 +2548,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) }, {{ .buffer = m_contourBufferRing.vkBufferAt(m_bufferRingIdx), - .offset = desc.firstContour * sizeof(pls::ContourData), + .offset = desc.firstContour * sizeof(gpu::ContourData), .range = VK_WHOLE_SIZE, }}); @@ -2561,7 +2561,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) {{ .buffer = m_flushUniformBufferRing.vkBufferAt(m_bufferRingIdx), .offset = desc.flushUniformDataOffsetInBytes, - .range = sizeof(pls::FlushUniforms), + .range = sizeof(gpu::FlushUniforms), }}); m_vk->updateBufferDescriptorSets( @@ -2573,7 +2573,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) {{ .buffer = m_imageDrawUniformBufferRing.vkBufferAt(m_bufferRingIdx), .offset = 0, - .range = sizeof(pls::ImageDrawUniforms), + .range = sizeof(gpu::ImageDrawUniforms), }}); // Update the PLS input attachment descriptor sets. @@ -2603,7 +2603,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) .imageLayout = VK_IMAGE_LAYOUT_GENERAL, }}); - if (desc.interlockMode == pls::InterlockMode::rasterOrdering) + if (desc.interlockMode == gpu::InterlockMode::rasterOrdering) { m_vk->updateImageDescriptorSets(inputAttachmentDescriptorSet, { @@ -2620,7 +2620,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) inputAttachmentDescriptorSet, { .dstBinding = COVERAGE_PLANE_IDX, - .descriptorType = desc.interlockMode == pls::InterlockMode::atomics + .descriptorType = desc.interlockMode == gpu::InterlockMode::atomics ? VK_DESCRIPTOR_TYPE_STORAGE_IMAGE : VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, }, @@ -2716,13 +2716,13 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) } // Setup the pipeline for this specific drawType and shaderFeatures. - pls::ShaderFeatures shaderFeatures = desc.interlockMode == pls::InterlockMode::atomics + gpu::ShaderFeatures shaderFeatures = desc.interlockMode == gpu::InterlockMode::atomics ? desc.combinedShaderFeatures : batch.shaderFeatures; - uint32_t pipelineKey = pls::ShaderUniqueKey(drawType, + uint32_t pipelineKey = gpu::ShaderUniqueKey(drawType, shaderFeatures, desc.interlockMode, - pls::ShaderMiscFlags::none); + gpu::ShaderMiscFlags::none); auto drawPipelineOptions = DrawPipelineOptions::none; if (desc.wireframe && m_vk->features.fillModeNonSolid) { @@ -2751,7 +2751,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) if (needsBarrierBeforeNextDraw) { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); VkMemoryBarrier memoryBarrier = { .sType = VkStructureType::VK_STRUCTURE_TYPE_MEMORY_BARRIER, @@ -2787,9 +2787,9 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) 0, VK_INDEX_TYPE_UINT16); m_vk->CmdDrawIndexed(commandBuffer, - pls::PatchIndexCount(drawType), + gpu::PatchIndexCount(drawType), batch.elementCount, - pls::PatchBaseIndex(drawType), + gpu::PatchBaseIndex(drawType), 0, batch.baseElement); break; @@ -2805,7 +2805,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) case DrawType::imageRect: { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); m_vk->CmdBindVertexBuffers(commandBuffer, 0, 1, @@ -2816,7 +2816,7 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) 0, VK_INDEX_TYPE_UINT16); m_vk->CmdDrawIndexed(commandBuffer, - std::size(pls::kImageRectIndices), + std::size(gpu::kImageRectIndices), 1, batch.baseElement, 0, @@ -2847,20 +2847,20 @@ void PLSRenderContextVulkanImpl::flush(const FlushDescriptor& desc) break; } - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: { - assert(desc.interlockMode == pls::InterlockMode::atomics); + assert(desc.interlockMode == gpu::InterlockMode::atomics); m_vk->CmdDraw(commandBuffer, 4, 1, 0, 0); break; } - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } needsBarrierBeforeNextDraw = - desc.interlockMode == pls::InterlockMode::atomics && batch.needsBarrier; + desc.interlockMode == gpu::InterlockMode::atomics && batch.needsBarrier; } m_vk->CmdEndRenderPass(commandBuffer); @@ -2924,4 +2924,4 @@ std::unique_ptr PLSRenderContextVulkanImpl::MakeContext( impl->initGPUObjects(); return std::make_unique(std::move(impl)); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/vulkan/vkutil.cpp b/renderer/src/vulkan/vkutil.cpp similarity index 97% rename from pls/renderer/vulkan/vkutil.cpp rename to renderer/src/vulkan/vkutil.cpp index eb66d6b8..ae826c15 100644 --- a/pls/renderer/vulkan/vkutil.cpp +++ b/renderer/src/vulkan/vkutil.cpp @@ -2,12 +2,12 @@ * Copyright 2023 Rive */ -#include "rive/pls/vulkan/vkutil.hpp" +#include "rive/renderer/vulkan/vkutil.hpp" #include "rive/rive_types.hpp" -#include "rive/pls/vulkan/vulkan_context.hpp" +#include "rive/renderer/vulkan/vulkan_context.hpp" -namespace rive::pls::vkutil +namespace rive::gpu::vkutil { void vkutil::RenderingResource::onRefCntReachedZero() const { @@ -99,7 +99,7 @@ BufferRing::BufferRing(rcp vk, .size = size, .usage = usage, }; - for (int i = 0; i < pls::kBufferRingSize; ++i) + for (int i = 0; i < gpu::kBufferRingSize; ++i) { m_buffers[i] = vk->makeBuffer(bufferCreateInfo, mappability); } @@ -237,4 +237,4 @@ Framebuffer::Framebuffer(rcp vk, const VkFramebufferCreateInfo& i } Framebuffer::~Framebuffer() { m_vk->DestroyFramebuffer(m_vk->device, m_vkFramebuffer, nullptr); } -} // namespace rive::pls::vkutil +} // namespace rive::gpu::vkutil diff --git a/pls/renderer/vulkan/vulkan_context.cpp b/renderer/src/vulkan/vulkan_context.cpp similarity index 98% rename from pls/renderer/vulkan/vulkan_context.cpp rename to renderer/src/vulkan/vulkan_context.cpp index 9c972fdd..71bf42cc 100644 --- a/pls/renderer/vulkan/vulkan_context.cpp +++ b/renderer/src/vulkan/vulkan_context.cpp @@ -2,11 +2,11 @@ * Copyright 2023 Rive */ -#include "rive/pls/vulkan/vulkan_context.hpp" +#include "rive/renderer/vulkan/vulkan_context.hpp" #include "rive/rive_types.hpp" -namespace rive::pls +namespace rive::gpu { static VmaAllocator make_vma_allocator(VmaAllocatorCreateInfo vmaCreateInfo) { @@ -87,8 +87,8 @@ void VulkanContext::shutdown() m_shutdown = true; // Validate m_resourcePurgatory: We shouldn't have any resources queued up with - // larger expirations than "pls::kBufferRingSize" frames. - for (size_t i = 0; i < pls::kBufferRingSize; ++i) + // larger expirations than "gpu::kBufferRingSize" frames. + for (size_t i = 0; i < gpu::kBufferRingSize; ++i) { onNewFrameBegun(); } @@ -355,4 +355,4 @@ void VulkanContext::blitSubRect(VkCommandBuffer commandBuffer, &imageBlit, VK_FILTER_NEAREST); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/vulkan/vulkan_memory_allocator.cpp b/renderer/src/vulkan/vulkan_memory_allocator.cpp similarity index 100% rename from pls/renderer/vulkan/vulkan_memory_allocator.cpp rename to renderer/src/vulkan/vulkan_memory_allocator.cpp diff --git a/pls/renderer/webgpu/em_js_handle.cpp b/renderer/src/webgpu/em_js_handle.cpp similarity index 98% rename from pls/renderer/webgpu/em_js_handle.cpp rename to renderer/src/webgpu/em_js_handle.cpp index ed6559fe..b261b177 100644 --- a/pls/renderer/webgpu/em_js_handle.cpp +++ b/renderer/src/webgpu/em_js_handle.cpp @@ -2,7 +2,7 @@ * Copyright 2023 Rive */ -#include "rive/pls/webgpu/em_js_handle.hpp" +#include "rive/renderer/webgpu/em_js_handle.hpp" EmJsHandle& EmJsHandle::operator=(EmJsHandle&& rhs) { diff --git a/pls/renderer/webgpu/pls_render_context_webgpu_impl.cpp b/renderer/src/webgpu/render_context_webgpu_impl.cpp similarity index 95% rename from pls/renderer/webgpu/pls_render_context_webgpu_impl.cpp rename to renderer/src/webgpu/render_context_webgpu_impl.cpp index edff0185..a0524f5c 100644 --- a/pls/renderer/webgpu/pls_render_context_webgpu_impl.cpp +++ b/renderer/src/webgpu/render_context_webgpu_impl.cpp @@ -2,9 +2,9 @@ * Copyright 2023 Rive */ -#include "rive/pls/webgpu/pls_render_context_webgpu_impl.hpp" +#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp" -#include "rive/pls/pls_image.hpp" +#include "rive/renderer/image.hpp" #include "shaders/constants.glsl" #include "generated/shaders/spirv/color_ramp.vert.h" @@ -67,7 +67,7 @@ static void write_buffer(wgpu::Queue queue, wgpu::Buffer buffer, const void* dat #endif #ifdef RIVE_WEBGPU -#include "pls_render_context_webgpu_vulkan.hpp" +#include "render_context_webgpu_vulkan.hpp" #include #include #include @@ -140,7 +140,7 @@ static void write_buffer(wgpu::Queue queue, wgpu::Buffer buffer, const void* dat } #endif -namespace rive::pls +namespace rive::gpu { // Draws emulated render-pass load/store actions for EXT_shader_pixel_local_storage. class PLSRenderContextWebGPUImpl::LoadStoreEXTPipeline @@ -296,7 +296,7 @@ class PLSRenderContextWebGPUImpl::ColorRampPipeline }; wgpu::VertexBufferLayout vertexBufferLayout = { - .arrayStride = sizeof(pls::GradientSpan), + .arrayStride = sizeof(gpu::GradientSpan), .stepMode = wgpu::VertexStepMode::Instance, .attributeCount = std::size(attrs), .attributes = attrs, @@ -467,7 +467,7 @@ class PLSRenderContextWebGPUImpl::TessellatePipeline }; wgpu::VertexBufferLayout vertexBufferLayout = { - .arrayStride = sizeof(pls::TessVertexSpan), + .arrayStride = sizeof(gpu::TessVertexSpan), .stepMode = wgpu::VertexStepMode::Instance, .attributeCount = std::size(attrs), .attributes = attrs, @@ -526,7 +526,7 @@ class PLSRenderContextWebGPUImpl::DrawPipeline public: DrawPipeline(PLSRenderContextWebGPUImpl* context, DrawType drawType, - pls::ShaderFeatures shaderFeatures, + gpu::ShaderFeatures shaderFeatures, const ContextOptions& contextOptions) { PixelLocalStorageType plsType = context->m_contextOptions.plsType; @@ -592,12 +592,12 @@ class PLSRenderContextWebGPUImpl::DrawPipeline RIVE_UNREACHABLE(); case DrawType::imageMesh: break; - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } - for (size_t i = 0; i < pls::kShaderFeatureCount; ++i) + for (size_t i = 0; i < gpu::kShaderFeatureCount; ++i) { ShaderFeatures feature = static_cast(1 << i); if (shaderFeatures & feature) @@ -605,12 +605,12 @@ class PLSRenderContextWebGPUImpl::DrawPipeline addDefine(GetShaderFeatureGLSLName(feature)); } } - glsl << pls::glsl::glsl << '\n'; - glsl << pls::glsl::constants << '\n'; - glsl << pls::glsl::common << '\n'; + glsl << gpu::glsl::glsl << '\n'; + glsl << gpu::glsl::constants << '\n'; + glsl << gpu::glsl::common << '\n'; if (shaderFeatures & ShaderFeatures::ENABLE_ADVANCED_BLEND) { - glsl << pls::glsl::advanced_blend << '\n'; + glsl << gpu::glsl::advanced_blend << '\n'; } if (context->platformFeatures().avoidFlatVaryings) { @@ -625,13 +625,13 @@ class PLSRenderContextWebGPUImpl::DrawPipeline case DrawType::midpointFanPatches: case DrawType::outerCurvePatches: addDefine(GLSL_DRAW_PATH); - glsl << pls::glsl::draw_path_common << '\n'; - glsl << pls::glsl::draw_path << '\n'; + glsl << gpu::glsl::draw_path_common << '\n'; + glsl << gpu::glsl::draw_path << '\n'; break; case DrawType::interiorTriangulation: addDefine(GLSL_DRAW_INTERIOR_TRIANGLES); - glsl << pls::glsl::draw_path_common << '\n'; - glsl << pls::glsl::draw_path << '\n'; + glsl << gpu::glsl::draw_path_common << '\n'; + glsl << gpu::glsl::draw_path << '\n'; break; case DrawType::imageRect: addDefine(GLSL_DRAW_IMAGE); @@ -640,13 +640,13 @@ class PLSRenderContextWebGPUImpl::DrawPipeline case DrawType::imageMesh: addDefine(GLSL_DRAW_IMAGE); addDefine(GLSL_DRAW_IMAGE_MESH); - glsl << pls::glsl::draw_image_mesh << '\n'; + glsl << gpu::glsl::draw_image_mesh << '\n'; break; - case DrawType::plsAtomicInitialize: + case DrawType::gpuAtomicInitialize: addDefine(GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS); addDefine(GLSL_INITIALIZE_PLS); RIVE_UNREACHABLE(); - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicResolve: addDefine(GLSL_DRAW_RENDER_TARGET_UPDATE_BOUNDS); addDefine(GLSL_RESOLVE_PLS); RIVE_UNREACHABLE(); @@ -709,8 +709,8 @@ class PLSRenderContextWebGPUImpl::DrawPipeline draw_image_mesh_frag, std::size(draw_image_mesh_frag)); break; - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -887,7 +887,7 @@ void PLSRenderContextWebGPUImpl::initGPUObjects() { .type = wgpu::BufferBindingType::Uniform, .hasDynamicOffset = true, - .minBindingSize = sizeof(pls::ImageDrawUniforms), + .minBindingSize = sizeof(gpu::ImageDrawUniforms), }, }, }; @@ -1024,13 +1024,13 @@ void PLSRenderContextWebGPUImpl::initGPUObjects() wgpu::BufferDescriptor tessSpanIndexBufferDesc = { .usage = wgpu::BufferUsage::Index, - .size = sizeof(pls::kTessSpanIndices), + .size = sizeof(gpu::kTessSpanIndices), .mappedAtCreation = true, }; m_tessSpanIndexBuffer = m_device.CreateBuffer(&tessSpanIndexBufferDesc); memcpy(m_tessSpanIndexBuffer.GetMappedRange(), - pls::kTessSpanIndices, - sizeof(pls::kTessSpanIndices)); + gpu::kTessSpanIndices, + sizeof(gpu::kTessSpanIndices)); m_tessSpanIndexBuffer.Unmap(); wgpu::BufferDescriptor patchBufferDesc = { @@ -1121,7 +1121,7 @@ class RenderBufferWebGPUImpl : public RenderBuffer m_queue(queue) { bool mappedOnceAtInitialization = flags() & RenderBufferFlags::mappedOnceAtInitialization; - int bufferCount = mappedOnceAtInitialization ? 1 : pls::kBufferRingSize; + int bufferCount = mappedOnceAtInitialization ? 1 : gpu::kBufferRingSize; wgpu::BufferDescriptor desc = { .usage = type() == RenderBufferType::index ? wgpu::BufferUsage::Index : wgpu::BufferUsage::Vertex, @@ -1144,7 +1144,7 @@ class RenderBufferWebGPUImpl : public RenderBuffer protected: void* onMap() override { - m_submittedBufferIdx = (m_submittedBufferIdx + 1) % pls::kBufferRingSize; + m_submittedBufferIdx = (m_submittedBufferIdx + 1) % gpu::kBufferRingSize; assert(m_buffers[m_submittedBufferIdx] != nullptr); if (flags() & RenderBufferFlags::mappedOnceAtInitialization) { @@ -1178,7 +1178,7 @@ class RenderBufferWebGPUImpl : public RenderBuffer private: const wgpu::Device m_device; const wgpu::Queue m_queue; - wgpu::Buffer m_buffers[pls::kBufferRingSize]; + wgpu::Buffer m_buffers[gpu::kBufferRingSize]; int m_submittedBufferIdx = -1; std::unique_ptr m_stagingBuffer; }; @@ -1265,7 +1265,7 @@ class BufferWebGPU : public BufferRing .usage = wgpu::BufferUsage::CopyDst | usage, .size = capacityInBytes, }; - for (int i = 0; i < pls::kBufferRingSize; ++i) + for (int i = 0; i < gpu::kBufferRingSize; ++i) { m_buffers[i] = device.CreateBuffer(&desc); } @@ -1286,15 +1286,15 @@ class BufferWebGPU : public BufferRing }; // GL TextureFormat to use for a texture that polyfills a storage buffer. -static wgpu::TextureFormat storage_texture_format(pls::StorageBufferStructure bufferStructure) +static wgpu::TextureFormat storage_texture_format(gpu::StorageBufferStructure bufferStructure) { switch (bufferStructure) { - case pls::StorageBufferStructure::uint32x4: + case gpu::StorageBufferStructure::uint32x4: return wgpu::TextureFormat::RGBA32Uint; - case pls::StorageBufferStructure::uint32x2: + case gpu::StorageBufferStructure::uint32x2: return wgpu::TextureFormat::RG32Uint; - case pls::StorageBufferStructure::float32x4: + case gpu::StorageBufferStructure::float32x4: return wgpu::TextureFormat::RGBA32Float; } RIVE_UNREACHABLE(); @@ -1307,16 +1307,16 @@ class StorageTextureBufferWebGPU : public BufferWebGPU StorageTextureBufferWebGPU(wgpu::Device device, wgpu::Queue queue, size_t capacityInBytes, - pls::StorageBufferStructure bufferStructure) : + gpu::StorageBufferStructure bufferStructure) : BufferWebGPU(device, queue, - pls::StorageTextureBufferSize(capacityInBytes, bufferStructure), + gpu::StorageTextureBufferSize(capacityInBytes, bufferStructure), wgpu::BufferUsage::CopySrc), m_bufferStructure(bufferStructure) { // Create a texture to mirror the buffer contents. auto [textureWidth, textureHeight] = - pls::StorageTextureSize(this->capacityInBytes(), bufferStructure); + gpu::StorageTextureSize(this->capacityInBytes(), bufferStructure); wgpu::TextureDescriptor desc{ .usage = wgpu::TextureUsage::TextureBinding | wgpu::TextureUsage::CopyDst, @@ -1333,13 +1333,13 @@ class StorageTextureBufferWebGPU : public BufferWebGPU wgpu::CommandEncoder encoder) const { auto [updateWidth, updateHeight] = - pls::StorageTextureSize(bindingSizeInBytes, m_bufferStructure); + gpu::StorageTextureSize(bindingSizeInBytes, m_bufferStructure); wgpu::ImageCopyBuffer srcBuffer = { .layout = { .offset = offsetSizeInBytes, .bytesPerRow = (STORAGE_TEXTURE_WIDTH * - pls::StorageBufferElementSizeInBytes(m_bufferStructure)), + gpu::StorageBufferElementSizeInBytes(m_bufferStructure)), }, .buffer = submittedBuffer(), }; @@ -1376,7 +1376,7 @@ std::unique_ptr PLSRenderContextWebGPUImpl::makeUniformBufferRing( std::unique_ptr PLSRenderContextWebGPUImpl::makeStorageBufferRing( size_t capacityInBytes, - pls::StorageBufferStructure bufferStructure) + gpu::StorageBufferStructure bufferStructure) { if (m_contextOptions.disableStorageBuffers) { @@ -1443,7 +1443,7 @@ void PLSRenderContextWebGPUImpl::resizeTessellationTexture(uint32_t width, uint3 } wgpu::RenderPipeline PLSRenderContextWebGPUImpl::makePLSDrawPipeline( - rive::pls::DrawType drawType, + rive::gpu::DrawType drawType, wgpu::TextureFormat framebufferFormat, wgpu::ShaderModule vertexShader, wgpu::ShaderModule fragmentShader, @@ -1471,7 +1471,7 @@ wgpu::RenderPipeline PLSRenderContextWebGPUImpl::makePLSDrawPipeline( vertexBufferLayouts = { { - .arrayStride = sizeof(pls::PatchVertex), + .arrayStride = sizeof(gpu::PatchVertex), .stepMode = wgpu::VertexStepMode::Vertex, .attributeCount = std::size(attrs), .attributes = attrs.data(), @@ -1491,7 +1491,7 @@ wgpu::RenderPipeline PLSRenderContextWebGPUImpl::makePLSDrawPipeline( vertexBufferLayouts = { { - .arrayStride = sizeof(pls::TriangleVertex), + .arrayStride = sizeof(gpu::TriangleVertex), .stepMode = wgpu::VertexStepMode::Vertex, .attributeCount = std::size(attrs), .attributes = attrs.data(), @@ -1530,8 +1530,8 @@ wgpu::RenderPipeline PLSRenderContextWebGPUImpl::makePLSDrawPipeline( }, }; break; - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -1665,22 +1665,22 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) { if (desc.pathCount > 0) { - update_webgpu_storage_texture(pathBufferRing(), + update_webgpu_storage_texture(pathBufferRing(), desc.pathCount, desc.firstPath, encoder); - update_webgpu_storage_texture(paintBufferRing(), + update_webgpu_storage_texture(paintBufferRing(), desc.pathCount, desc.firstPaint, encoder); - update_webgpu_storage_texture(paintAuxBufferRing(), + update_webgpu_storage_texture(paintAuxBufferRing(), desc.pathCount, desc.firstPaintAux, encoder); } if (desc.contourCount > 0) { - update_webgpu_storage_texture(contourBufferRing(), + update_webgpu_storage_texture(contourBufferRing(), desc.contourCount, desc.firstContour, encoder); @@ -1721,14 +1721,14 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::RenderPassEncoder gradPass = encoder.BeginRenderPass(&gradPassDesc); gradPass.SetViewport(0.f, static_cast(desc.complexGradRowsTop), - pls::kGradTextureWidth, + gpu::kGradTextureWidth, static_cast(desc.complexGradRowsHeight), 0.0, 1.0); gradPass.SetPipeline(m_colorRampPipeline->renderPipeline()); gradPass.SetVertexBuffer(0, webgpu_buffer(gradSpanBufferRing()), - desc.firstComplexGradSpan * sizeof(pls::GradientSpan)); + desc.firstComplexGradSpan * sizeof(gpu::GradientSpan)); gradPass.SetBindGroup(0, bindings); gradPass.Draw(4, desc.complexGradSpanCount, 0, 0); gradPass.End(); @@ -1741,7 +1741,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) .layout = { .offset = desc.simpleGradDataOffsetInBytes, - .bytesPerRow = pls::kGradTextureWidth * 4, + .bytesPerRow = gpu::kGradTextureWidth * 4, }, .buffer = webgpu_buffer(simpleColorRampsBufferRing()), }; @@ -1768,7 +1768,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::BindGroupEntry{ .binding = PATH_BUFFER_IDX, .buffer = webgpu_buffer(pathBufferRing()), - .offset = desc.firstPath * sizeof(pls::PathData), + .offset = desc.firstPath * sizeof(gpu::PathData), }, m_contextOptions.disableStorageBuffers ? wgpu::BindGroupEntry{ @@ -1778,7 +1778,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::BindGroupEntry{ .binding = CONTOUR_BUFFER_IDX, .buffer = webgpu_buffer(contourBufferRing()), - .offset = desc.firstContour * sizeof(pls::ContourData), + .offset = desc.firstContour * sizeof(gpu::ContourData), }, { .binding = FLUSH_UNIFORM_BUFFER_IDX, @@ -1810,17 +1810,17 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::RenderPassEncoder tessPass = encoder.BeginRenderPass(&tessPassDesc); tessPass.SetViewport(0.f, 0.f, - pls::kTessTextureWidth, + gpu::kTessTextureWidth, static_cast(desc.tessDataHeight), 0.0, 1.0); tessPass.SetPipeline(m_tessellatePipeline->renderPipeline()); tessPass.SetVertexBuffer(0, webgpu_buffer(tessSpanBufferRing()), - desc.firstTessVertexSpan * sizeof(pls::TessVertexSpan)); + desc.firstTessVertexSpan * sizeof(gpu::TessVertexSpan)); tessPass.SetIndexBuffer(m_tessSpanIndexBuffer, wgpu::IndexFormat::Uint16); tessPass.SetBindGroup(0, bindings); - tessPass.DrawIndexed(std::size(pls::kTessSpanIndices), desc.tessVertexSpanCount, 0, 0, 0); + tessPass.DrawIndexed(std::size(gpu::kTessSpanIndices), desc.tessVertexSpanCount, 0, 0, 0); tessPass.End(); } @@ -1881,7 +1881,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) // Draw the load action for EXT_shader_pixel_local_storage. std::array clearColor; - LoadStoreActionsEXT loadActions = pls::BuildLoadActionsEXT(desc, &clearColor); + LoadStoreActionsEXT loadActions = gpu::BuildLoadActionsEXT(desc, &clearColor); const LoadStoreEXTPipeline& loadPipeline = m_loadStoreEXTPipelines .try_emplace(loadActions, this, loadActions, renderTarget->framebufferFormat()) @@ -1933,7 +1933,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::BindGroupEntry{ .binding = PATH_BUFFER_IDX, .buffer = webgpu_buffer(pathBufferRing()), - .offset = desc.firstPath * sizeof(pls::PathData), + .offset = desc.firstPath * sizeof(gpu::PathData), }, m_contextOptions.disableStorageBuffers ? wgpu::BindGroupEntry{ @@ -1943,7 +1943,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::BindGroupEntry{ .binding = PAINT_BUFFER_IDX, .buffer = webgpu_buffer(paintBufferRing()), - .offset = desc.firstPaint * sizeof(pls::PaintData), + .offset = desc.firstPaint * sizeof(gpu::PaintData), }, m_contextOptions.disableStorageBuffers ? wgpu::BindGroupEntry{ @@ -1953,7 +1953,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::BindGroupEntry{ .binding = PAINT_AUX_BUFFER_IDX, .buffer = webgpu_buffer(paintAuxBufferRing()), - .offset = desc.firstPaintAux * sizeof(pls::PaintAuxData), + .offset = desc.firstPaintAux * sizeof(gpu::PaintAuxData), }, m_contextOptions.disableStorageBuffers ? wgpu::BindGroupEntry{ @@ -1963,7 +1963,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) wgpu::BindGroupEntry{ .binding = CONTOUR_BUFFER_IDX, .buffer = webgpu_buffer(contourBufferRing()), - .offset = desc.firstContour * sizeof(pls::ContourData), + .offset = desc.firstContour * sizeof(gpu::ContourData), }, { .binding = FLUSH_UNIFORM_BUFFER_IDX, @@ -1973,7 +1973,7 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) { .binding = IMAGE_DRAW_UNIFORM_BUFFER_IDX, .buffer = webgpu_buffer(imageDrawUniformBufferRing()), - .size = sizeof(pls::ImageDrawUniforms), + .size = sizeof(gpu::ImageDrawUniforms), }, }; @@ -2033,10 +2033,10 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) // Setup the pipeline for this specific drawType and shaderFeatures. const DrawPipeline& drawPipeline = m_drawPipelines - .try_emplace(pls::ShaderUniqueKey(drawType, + .try_emplace(gpu::ShaderUniqueKey(drawType, batch.shaderFeatures, - pls::InterlockMode::rasterOrdering, - pls::ShaderMiscFlags::none), + gpu::InterlockMode::rasterOrdering, + gpu::ShaderMiscFlags::none), this, drawType, batch.shaderFeatures, @@ -2052,9 +2052,9 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) // Draw PLS patches that connect the tessellation vertices. drawPass.SetVertexBuffer(0, m_pathPatchVertexBuffer); drawPass.SetIndexBuffer(m_pathPatchIndexBuffer, wgpu::IndexFormat::Uint16); - drawPass.DrawIndexed(pls::PatchIndexCount(drawType), + drawPass.DrawIndexed(gpu::PatchIndexCount(drawType), batch.elementCount, - pls::PatchBaseIndex(drawType), + gpu::PatchBaseIndex(drawType), 0, batch.baseElement); break; @@ -2078,8 +2078,8 @@ void PLSRenderContextWebGPUImpl::flush(const FlushDescriptor& desc) drawPass.DrawIndexed(batch.elementCount, 1, batch.baseElement); break; } - case DrawType::plsAtomicInitialize: - case DrawType::plsAtomicResolve: + case DrawType::gpuAtomicInitialize: + case DrawType::gpuAtomicResolve: case DrawType::stencilClipReset: RIVE_UNREACHABLE(); } @@ -2110,7 +2110,7 @@ std::unique_ptr PLSRenderContextWebGPUImpl::MakeContext( wgpu::Device device, wgpu::Queue queue, const ContextOptions& contextOptions, - const pls::PlatformFeatures& baselinePlatformFeatures) + const gpu::PlatformFeatures& baselinePlatformFeatures) { std::unique_ptr impl; switch (contextOptions.plsType) @@ -2136,4 +2136,4 @@ std::unique_ptr PLSRenderContextWebGPUImpl::MakeContext( impl->initGPUObjects(); return std::make_unique(std::move(impl)); } -} // namespace rive::pls +} // namespace rive::gpu diff --git a/pls/renderer/webgpu/pls_render_context_webgpu_vulkan.cpp b/renderer/src/webgpu/render_context_webgpu_vulkan.cpp similarity index 98% rename from pls/renderer/webgpu/pls_render_context_webgpu_vulkan.cpp rename to renderer/src/webgpu/render_context_webgpu_vulkan.cpp index 978f7d59..86843764 100644 --- a/pls/renderer/webgpu/pls_render_context_webgpu_vulkan.cpp +++ b/renderer/src/webgpu/render_context_webgpu_vulkan.cpp @@ -4,7 +4,7 @@ #ifdef RIVE_WEBGPU -#include "pls_render_context_webgpu_vulkan.hpp" +#include "render_context_webgpu_vulkan.hpp" #include "shaders/constants.glsl" @@ -12,7 +12,7 @@ #include #include -namespace rive::pls +namespace rive::gpu { // Create a group for binding PLS textures as Vulkan input attachments. The "inputTexture" property // is nonstandard WebGPU. @@ -78,7 +78,7 @@ rcp PLSRenderContextWebGPUVulkan::makeRenderTarget( EM_JS(int, make_pls_draw_pipeline, (int device, - rive::pls::DrawType drawType, + rive::gpu::DrawType drawType, wgpu::TextureFormat framebufferFormat, int vertexShader, int fragmentShader, @@ -211,7 +211,7 @@ EM_JS(int, }); wgpu::RenderPipeline PLSRenderContextWebGPUVulkan::makePLSDrawPipeline( - rive::pls::DrawType drawType, + rive::gpu::DrawType drawType, wgpu::TextureFormat framebufferFormat, wgpu::ShaderModule vertexShader, wgpu::ShaderModule fragmentShader, @@ -321,6 +321,6 @@ wgpu::RenderPassEncoder PLSRenderContextWebGPUVulkan::makePLSRenderPass( return wgpu::RenderPassEncoder::Acquire( emscripten_webgpu_import_render_pass_encoder(renderPassJSHandleIfNeeded->get())); } -} // namespace rive::pls +} // namespace rive::gpu #endif diff --git a/pls/renderer/webgpu/pls_render_context_webgpu_vulkan.hpp b/renderer/src/webgpu/render_context_webgpu_vulkan.hpp similarity index 88% rename from pls/renderer/webgpu/pls_render_context_webgpu_vulkan.hpp rename to renderer/src/webgpu/render_context_webgpu_vulkan.hpp index 70e37a49..37b72ee1 100644 --- a/pls/renderer/webgpu/pls_render_context_webgpu_vulkan.hpp +++ b/renderer/src/webgpu/render_context_webgpu_vulkan.hpp @@ -6,9 +6,9 @@ #ifdef RIVE_WEBGPU -#include "rive/pls/webgpu/pls_render_context_webgpu_impl.hpp" +#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp" -namespace rive::pls +namespace rive::gpu { // WebGPU implementation that uses Vulkan input attachments, // VK_EXT_rasterization_order_attachment_access, and subpassLoad() for pixel local storage. These @@ -23,7 +23,7 @@ class PLSRenderContextWebGPUVulkan : public PLSRenderContextWebGPUImpl protected: wgpu::BindGroupLayout initPLSTextureBindGroup() override; - wgpu::RenderPipeline makePLSDrawPipeline(rive::pls::DrawType drawType, + wgpu::RenderPipeline makePLSDrawPipeline(rive::gpu::DrawType drawType, wgpu::TextureFormat framebufferFormat, wgpu::ShaderModule vertexShader, wgpu::ShaderModule fragmentShader, @@ -41,7 +41,7 @@ class PLSRenderContextWebGPUVulkan : public PLSRenderContextWebGPUImpl PLSRenderContextWebGPUVulkan(wgpu::Device device, wgpu::Queue queue, const ContextOptions& contextOptions, - const pls::PlatformFeatures& baselinePlatformFeatures) : + const gpu::PlatformFeatures& baselinePlatformFeatures) : PLSRenderContextWebGPUImpl(device, queue, contextOptions, baselinePlatformFeatures) { assert(contextOptions.plsType == PixelLocalStorageType::subpassLoad); @@ -49,6 +49,6 @@ class PLSRenderContextWebGPUVulkan : public PLSRenderContextWebGPUImpl EmJsHandle m_plsTextureBindGroupJSHandle; }; -} // namespace rive::pls +} // namespace rive::gpu #endif diff --git a/pls/webgpu_player/icons.html b/renderer/webgpu_player/icons.html similarity index 100% rename from pls/webgpu_player/icons.html rename to renderer/webgpu_player/icons.html diff --git a/pls/webgpu_player/index.html b/renderer/webgpu_player/index.html similarity index 100% rename from pls/webgpu_player/index.html rename to renderer/webgpu_player/index.html diff --git a/pls/webgpu_player/rive.js b/renderer/webgpu_player/rive.js similarity index 100% rename from pls/webgpu_player/rive.js rename to renderer/webgpu_player/rive.js diff --git a/pls/webgpu_player/webgpu_player.cpp b/renderer/webgpu_player/webgpu_player.cpp similarity index 95% rename from pls/webgpu_player/webgpu_player.cpp rename to renderer/webgpu_player/webgpu_player.cpp index c098f228..401181c5 100644 --- a/pls/webgpu_player/webgpu_player.cpp +++ b/renderer/webgpu_player/webgpu_player.cpp @@ -9,10 +9,10 @@ #include "rive/animation/linear_animation_instance.hpp" #include "rive/animation/state_machine_input_instance.hpp" #include "rive/animation/state_machine_instance.hpp" -#include "rive/pls/pls_render_context.hpp" -#include "rive/pls/pls_renderer.hpp" -#include "rive/pls/webgpu/pls_render_context_webgpu_impl.hpp" -#include "rive/pls/webgpu/em_js_handle.hpp" +#include "rive/renderer/render_context.hpp" +#include "rive/renderer/rive_renderer.hpp" +#include "rive/renderer/webgpu/render_context_webgpu_impl.hpp" +#include "rive/renderer/webgpu/em_js_handle.hpp" #include #include @@ -31,7 +31,7 @@ #endif using namespace rive; -using namespace rive::pls; +using namespace rive::gpu; using PixelLocalStorageType = PLSRenderContextWebGPUImpl::PixelLocalStorageType; static std::unique_ptr s_plsContext; @@ -42,13 +42,13 @@ void riveInitPlayer(int w, int h, wgpu::Device gpu, wgpu::Queue queue, - const pls::PlatformFeatures& platformFeatures, + const gpu::PlatformFeatures& platformFeatures, PixelLocalStorageType plsType, int maxVertexStorageBlocks) { PLSRenderContextWebGPUImpl::ContextOptions contextOptions = { .plsType = plsType, - .disableStorageBuffers = maxVertexStorageBlocks < pls::kMaxStorageBuffers, + .disableStorageBuffers = maxVertexStorageBlocks < gpu::kMaxStorageBuffers, }; s_plsContext = PLSRenderContextWebGPUImpl::MakeContext(gpu, queue, contextOptions, platformFeatures); @@ -56,7 +56,7 @@ void riveInitPlayer(int w, wgpu::TextureFormat::BGRA8Unorm, w, h); - s_renderer = std::make_unique(s_plsContext.get()); + s_renderer = std::make_unique(s_plsContext.get()); } #ifdef RIVE_WEBGPU @@ -77,7 +77,7 @@ extern "C" { s_deviceHandle = EmJsHandle(deviceID); s_queueHandle = EmJsHandle(queueID); - pls::PlatformFeatures platformFeatures; + gpu::PlatformFeatures platformFeatures; if (invertedY) { platformFeatures.uninvertOnScreenY = true; @@ -103,7 +103,7 @@ extern "C" s_plsContext->beginFrame({ .renderTargetWidth = s_renderTarget->width(), .renderTargetHeight = s_renderTarget->height(), - .loadAction = static_cast(loadAction), + .loadAction = static_cast(loadAction), .clearColor = clearColor, }); @@ -191,7 +191,7 @@ extern "C" float alignmentY) { auto artboard = reinterpret_cast(nativePtr); - auto renderer = reinterpret_cast(rendererNativePtr); + auto renderer = reinterpret_cast(rendererNativePtr); auto fit = static_cast(fitValue); Alignment alignment = {alignmentX, alignmentY}; AABB frame = {frameLeft, frameTop, frameRight, frameBot}; @@ -265,7 +265,7 @@ extern "C" intptr_t rendererNativePtr) { auto stateMachine = reinterpret_cast(nativePtr); - auto renderer = reinterpret_cast(rendererNativePtr); + auto renderer = reinterpret_cast(rendererNativePtr); stateMachine->draw(renderer); } @@ -286,7 +286,7 @@ extern "C" intptr_t rendererNativePtr) { auto animation = reinterpret_cast(nativePtr); - auto renderer = reinterpret_cast(rendererNativePtr); + auto renderer = reinterpret_cast(rendererNativePtr); animation->draw(renderer); } @@ -298,19 +298,19 @@ extern "C" void EMSCRIPTEN_KEEPALIVE Renderer_save(intptr_t nativePtr) { - auto renderer = reinterpret_cast(nativePtr); + auto renderer = reinterpret_cast(nativePtr); renderer->save(); } void EMSCRIPTEN_KEEPALIVE Renderer_restore(intptr_t nativePtr) { - auto renderer = reinterpret_cast(nativePtr); + auto renderer = reinterpret_cast(nativePtr); renderer->restore(); } void EMSCRIPTEN_KEEPALIVE Renderer_translate(intptr_t nativePtr, float x, float y) { - auto renderer = reinterpret_cast(nativePtr); + auto renderer = reinterpret_cast(nativePtr); renderer->translate(x, y); } @@ -322,7 +322,7 @@ extern "C" float tx, float ty) { - auto renderer = reinterpret_cast(nativePtr); + auto renderer = reinterpret_cast(nativePtr); Mat2D matrix(xx, xy, yx, yy, tx, ty); renderer->transform(matrix); }