Merge pull request #79606 from clayjohn/ShaderRD-compilation-groups
Shader rd compilation groups
This commit is contained in:
commit
1c40263665
13 changed files with 355 additions and 140 deletions
|
@ -619,6 +619,7 @@
|
|||
<method name="shader_create_from_bytecode">
|
||||
<return type="RID" />
|
||||
<param index="0" name="binary_data" type="PackedByteArray" />
|
||||
<param index="1" name="placeholder_rid" type="RID" default="RID()" />
|
||||
<description>
|
||||
Creates a new shader instance from a binary compiled shader. It can be accessed with the RID that is returned.
|
||||
Once finished with your RID, you will want to free the RID using the RenderingDevice's [method free_rid] method. See also [method shader_compile_binary_from_spirv] and [method shader_create_from_spirv].
|
||||
|
@ -633,6 +634,12 @@
|
|||
Once finished with your RID, you will want to free the RID using the RenderingDevice's [method free_rid] method. See also [method shader_compile_spirv_from_source] and [method shader_create_from_bytecode].
|
||||
</description>
|
||||
</method>
|
||||
<method name="shader_create_placeholder">
|
||||
<return type="RID" />
|
||||
<description>
|
||||
Create a placeholder RID by allocating an RID without initializing it for use in [method shader_create_from_bytecode]. This allows you to create an RID for a shader and pass it around, but defer compiling the shader to a later time.
|
||||
</description>
|
||||
</method>
|
||||
<method name="shader_get_vertex_input_attribute_mask">
|
||||
<return type="int" />
|
||||
<param index="0" name="shader" type="RID" />
|
||||
|
|
|
@ -4858,7 +4858,7 @@ Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Ve
|
|||
return ret;
|
||||
}
|
||||
|
||||
RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary) {
|
||||
RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder) {
|
||||
const uint8_t *binptr = p_shader_binary.ptr();
|
||||
uint32_t binsize = p_shader_binary.size();
|
||||
|
||||
|
@ -5184,14 +5184,23 @@ RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_
|
|||
|
||||
ERR_FAIL_V_MSG(RID(), error_text);
|
||||
}
|
||||
|
||||
RID id = shader_owner.make_rid(shader);
|
||||
RID id;
|
||||
if (p_placeholder.is_null()) {
|
||||
id = shader_owner.make_rid(shader);
|
||||
} else {
|
||||
shader_owner.initialize_rid(p_placeholder, shader);
|
||||
id = p_placeholder;
|
||||
}
|
||||
#ifdef DEV_ENABLED
|
||||
set_resource_name(id, "RID:" + itos(id.get_id()));
|
||||
#endif
|
||||
return id;
|
||||
}
|
||||
|
||||
RID RenderingDeviceVulkan::shader_create_placeholder() {
|
||||
return shader_owner.allocate_rid();
|
||||
}
|
||||
|
||||
uint32_t RenderingDeviceVulkan::shader_get_vertex_input_attribute_mask(RID p_shader) {
|
||||
_THREAD_SAFE_METHOD_
|
||||
|
||||
|
|
|
@ -1135,7 +1135,8 @@ public:
|
|||
virtual String shader_get_binary_cache_key() const;
|
||||
virtual Vector<uint8_t> shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "");
|
||||
|
||||
virtual RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary);
|
||||
virtual RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder = RID());
|
||||
virtual RID shader_create_placeholder();
|
||||
|
||||
virtual uint32_t shader_get_vertex_input_attribute_mask(RID p_shader);
|
||||
|
||||
|
|
|
@ -779,6 +779,7 @@ void RenderForwardClustered::_fill_render_list(RenderListType p_render_list, con
|
|||
scene_state.used_screen_texture = false;
|
||||
scene_state.used_normal_texture = false;
|
||||
scene_state.used_depth_texture = false;
|
||||
scene_state.used_lightmap = false;
|
||||
}
|
||||
uint32_t lightmap_captures_used = 0;
|
||||
|
||||
|
@ -994,6 +995,7 @@ void RenderForwardClustered::_fill_render_list(RenderListType p_render_list, con
|
|||
|
||||
if (uses_lightmap) {
|
||||
surf->sort.uses_lightmap = 1;
|
||||
scene_state.used_lightmap = true;
|
||||
}
|
||||
|
||||
if (surf->flags & GeometryInstanceSurfaceDataCache::FLAG_USES_SUBSURFACE_SCATTERING) {
|
||||
|
@ -1628,6 +1630,7 @@ void RenderForwardClustered::_render_scene(RenderDataRD *p_render_data, const Co
|
|||
|
||||
if (rb->get_use_taa() || get_debug_draw_mode() == RS::VIEWPORT_DEBUG_DRAW_MOTION_VECTORS) {
|
||||
color_pass_flags |= COLOR_PASS_FLAG_MOTION_VECTORS;
|
||||
scene_shader.enable_advanced_shader_group();
|
||||
}
|
||||
|
||||
if (p_render_data->voxel_gi_instances->size() > 0) {
|
||||
|
@ -1647,6 +1650,8 @@ void RenderForwardClustered::_render_scene(RenderDataRD *p_render_data, const Co
|
|||
|
||||
if (p_render_data->scene_data->view_count > 1) {
|
||||
color_pass_flags |= COLOR_PASS_FLAG_MULTIVIEW;
|
||||
// Try enabling here in case is_xr_enabled() returns false.
|
||||
scene_shader.shader.enable_group(SceneShaderForwardClustered::SHADER_GROUP_MULTIVIEW);
|
||||
}
|
||||
|
||||
color_framebuffer = rb_data->get_color_pass_fb(color_pass_flags);
|
||||
|
@ -1712,6 +1717,11 @@ void RenderForwardClustered::_render_scene(RenderDataRD *p_render_data, const Co
|
|||
color_pass_flags |= COLOR_PASS_FLAG_SEPARATE_SPECULAR;
|
||||
color_framebuffer = rb_data->get_color_pass_fb(color_pass_flags);
|
||||
}
|
||||
|
||||
if (using_sss || using_separate_specular || scene_state.used_lightmap || using_voxelgi) {
|
||||
scene_shader.enable_advanced_shader_group(p_render_data->scene_data->view_count > 1);
|
||||
}
|
||||
|
||||
RID radiance_texture;
|
||||
bool draw_sky = false;
|
||||
bool draw_sky_fog_only = false;
|
||||
|
@ -2484,6 +2494,8 @@ void RenderForwardClustered::_render_material(const Transform3D &p_cam_transform
|
|||
render_data.cluster_max_elements = 32;
|
||||
render_data.instances = &p_instances;
|
||||
|
||||
scene_shader.enable_advanced_shader_group();
|
||||
|
||||
_update_render_base_uniform_set();
|
||||
|
||||
_setup_environment(&render_data, true, Vector2(1, 1), false, Color());
|
||||
|
@ -2533,6 +2545,8 @@ void RenderForwardClustered::_render_uv2(const PagedArray<RenderGeometryInstance
|
|||
render_data.cluster_max_elements = 32;
|
||||
render_data.instances = &p_instances;
|
||||
|
||||
scene_shader.enable_advanced_shader_group();
|
||||
|
||||
_update_render_base_uniform_set();
|
||||
|
||||
_setup_environment(&render_data, true, Vector2(1, 1), false, Color());
|
||||
|
@ -3321,6 +3335,10 @@ void RenderForwardClustered::sdfgi_update(const Ref<RenderSceneBuffers> &p_rende
|
|||
return;
|
||||
}
|
||||
|
||||
// Ensure advanced shaders are available if SDFGI is used.
|
||||
// Call here as this is the first entry point for SDFGI.
|
||||
scene_shader.enable_advanced_shader_group();
|
||||
|
||||
static const uint32_t history_frames_to_converge[RS::ENV_SDFGI_CONVERGE_MAX] = { 5, 10, 15, 20, 25, 30 };
|
||||
uint32_t requested_history_size = history_frames_to_converge[gi.sdfgi_frames_to_converge];
|
||||
|
||||
|
|
|
@ -321,6 +321,7 @@ class RenderForwardClustered : public RendererSceneRenderRD {
|
|||
bool used_normal_texture = false;
|
||||
bool used_depth_texture = false;
|
||||
bool used_sss = false;
|
||||
bool used_lightmap = false;
|
||||
|
||||
struct ShadowPass {
|
||||
uint32_t element_from;
|
||||
|
|
|
@ -301,7 +301,7 @@ void SceneShaderForwardClustered::ShaderData::set_code(const String &p_code) {
|
|||
|
||||
if (k == PIPELINE_VERSION_COLOR_PASS) {
|
||||
for (int l = 0; l < PIPELINE_COLOR_PASS_FLAG_COUNT; l++) {
|
||||
if (!shader_singleton->valid_color_pass_pipelines.has(l)) {
|
||||
if (!shader_singleton->valid_color_pass_pipelines[l]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -476,16 +476,16 @@ void SceneShaderForwardClustered::init(const String p_defines) {
|
|||
RendererRD::MaterialStorage *material_storage = RendererRD::MaterialStorage::get_singleton();
|
||||
|
||||
{
|
||||
Vector<String> shader_versions;
|
||||
shader_versions.push_back("\n#define MODE_RENDER_DEPTH\n"); // SHADER_VERSION_DEPTH_PASS
|
||||
shader_versions.push_back("\n#define MODE_RENDER_DEPTH\n#define MODE_DUAL_PARABOLOID\n"); // SHADER_VERSION_DEPTH_PASS_DP
|
||||
shader_versions.push_back("\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n"); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS
|
||||
shader_versions.push_back("\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n#define MODE_RENDER_VOXEL_GI\n"); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI
|
||||
shader_versions.push_back("\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_MATERIAL\n"); // SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL
|
||||
shader_versions.push_back("\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_SDF\n"); // SHADER_VERSION_DEPTH_PASS_WITH_SDF
|
||||
shader_versions.push_back("\n#define USE_MULTIVIEW\n#define MODE_RENDER_DEPTH\n"); // SHADER_VERSION_DEPTH_PASS_MULTIVIEW
|
||||
shader_versions.push_back("\n#define USE_MULTIVIEW\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n"); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW
|
||||
shader_versions.push_back("\n#define USE_MULTIVIEW\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n#define MODE_RENDER_VOXEL_GI\n"); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW
|
||||
Vector<ShaderRD::VariantDefine> shader_versions;
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_BASE, "\n#define MODE_RENDER_DEPTH\n", true)); // SHADER_VERSION_DEPTH_PASS
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_BASE, "\n#define MODE_RENDER_DEPTH\n#define MODE_DUAL_PARABOLOID\n", true)); // SHADER_VERSION_DEPTH_PASS_DP
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_BASE, "\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n", true)); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_ADVANCED, "\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n#define MODE_RENDER_VOXEL_GI\n", false)); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_ADVANCED, "\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_MATERIAL\n", false)); // SHADER_VERSION_DEPTH_PASS_WITH_MATERIAL
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_ADVANCED, "\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_SDF\n", false)); // SHADER_VERSION_DEPTH_PASS_WITH_SDF
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_MULTIVIEW, "\n#define USE_MULTIVIEW\n#define MODE_RENDER_DEPTH\n", false)); // SHADER_VERSION_DEPTH_PASS_MULTIVIEW
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_MULTIVIEW, "\n#define USE_MULTIVIEW\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n", false)); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(SHADER_GROUP_MULTIVIEW, "\n#define USE_MULTIVIEW\n#define MODE_RENDER_DEPTH\n#define MODE_RENDER_NORMAL_ROUGHNESS\n#define MODE_RENDER_VOXEL_GI\n", false)); // SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW
|
||||
|
||||
Vector<String> color_pass_flags = {
|
||||
"\n#define MODE_SEPARATE_SPECULAR\n", // SHADER_COLOR_PASS_FLAG_SEPARATE_SPECULAR
|
||||
|
@ -501,54 +501,38 @@ void SceneShaderForwardClustered::init(const String p_defines) {
|
|||
version += color_pass_flags[j];
|
||||
}
|
||||
}
|
||||
shader_versions.push_back(version);
|
||||
|
||||
// Assign a group based on what features this pass contains.
|
||||
ShaderGroup group = SHADER_GROUP_BASE;
|
||||
bool advanced_group = (i & SHADER_COLOR_PASS_FLAG_SEPARATE_SPECULAR) || (i & SHADER_COLOR_PASS_FLAG_LIGHTMAP) || (i & SHADER_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
bool multiview_group = i & SHADER_COLOR_PASS_FLAG_MULTIVIEW;
|
||||
if (advanced_group && multiview_group) {
|
||||
group = SHADER_GROUP_ADVANCED_MULTIVIEW;
|
||||
} else if (advanced_group) {
|
||||
group = SHADER_GROUP_ADVANCED;
|
||||
} else if (multiview_group) {
|
||||
group = SHADER_GROUP_MULTIVIEW;
|
||||
}
|
||||
|
||||
shader_versions.push_back(ShaderRD::VariantDefine(group, version, false));
|
||||
}
|
||||
|
||||
shader.initialize(shader_versions, p_defines);
|
||||
|
||||
if (!RendererCompositorRD::get_singleton()->is_xr_enabled()) {
|
||||
shader.set_variant_enabled(SHADER_VERSION_DEPTH_PASS_MULTIVIEW, false);
|
||||
shader.set_variant_enabled(SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_MULTIVIEW, false);
|
||||
shader.set_variant_enabled(SHADER_VERSION_DEPTH_PASS_WITH_NORMAL_AND_ROUGHNESS_AND_VOXEL_GI_MULTIVIEW, false);
|
||||
|
||||
// Disable Color Passes
|
||||
for (int i = 0; i < SHADER_COLOR_PASS_FLAG_COUNT; i++) {
|
||||
// Selectively disable any shader pass that includes Multiview.
|
||||
if ((i & SHADER_COLOR_PASS_FLAG_MULTIVIEW)) {
|
||||
shader.set_variant_enabled(i + SHADER_VERSION_COLOR_PASS, false);
|
||||
}
|
||||
}
|
||||
if (RendererCompositorRD::get_singleton()->is_xr_enabled()) {
|
||||
shader.enable_group(SHADER_GROUP_MULTIVIEW);
|
||||
}
|
||||
}
|
||||
|
||||
valid_color_pass_pipelines.insert(0);
|
||||
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_TRANSPARENT | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR | PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_LIGHTMAP);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_LIGHTMAP | PIPELINE_COLOR_PASS_FLAG_MULTIVIEW | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_MULTIVIEW);
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_MULTIVIEW | PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
|
||||
valid_color_pass_pipelines.insert(PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS);
|
||||
// Set flag to true if a combination is valid.
|
||||
// The only invalid combinations are those that include both TRANSPARENT and SEPARATE_SPECULAR.
|
||||
for (int i = 0; i < PIPELINE_COLOR_PASS_FLAG_COUNT; i++) {
|
||||
if ((i & PIPELINE_COLOR_PASS_FLAG_TRANSPARENT) && (i & PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR)) {
|
||||
valid_color_pass_pipelines[i] = false;
|
||||
} else {
|
||||
valid_color_pass_pipelines[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
material_storage->shader_set_data_request_function(RendererRD::MaterialStorage::SHADER_TYPE_3D, _create_shader_funcs);
|
||||
material_storage->material_set_data_request_function(RendererRD::MaterialStorage::SHADER_TYPE_3D, _create_material_funcs);
|
||||
|
@ -854,3 +838,11 @@ void SceneShaderForwardClustered::set_default_specialization_constants(const Vec
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SceneShaderForwardClustered::enable_advanced_shader_group(bool p_needs_multiview) {
|
||||
if (p_needs_multiview || RendererCompositorRD::get_singleton()->is_xr_enabled()) {
|
||||
shader.enable_group(SHADER_GROUP_ADVANCED_MULTIVIEW);
|
||||
} else {
|
||||
shader.enable_group(SHADER_GROUP_ADVANCED);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,13 @@ private:
|
|||
static SceneShaderForwardClustered *singleton;
|
||||
|
||||
public:
|
||||
enum ShaderGroup {
|
||||
SHADER_GROUP_BASE, // Always compiled at the beginning.
|
||||
SHADER_GROUP_ADVANCED,
|
||||
SHADER_GROUP_MULTIVIEW,
|
||||
SHADER_GROUP_ADVANCED_MULTIVIEW,
|
||||
};
|
||||
|
||||
enum ShaderVersion {
|
||||
SHADER_VERSION_DEPTH_PASS,
|
||||
SHADER_VERSION_DEPTH_PASS_DP,
|
||||
|
@ -78,8 +85,8 @@ public:
|
|||
};
|
||||
|
||||
enum PipelineColorPassFlags {
|
||||
PIPELINE_COLOR_PASS_FLAG_TRANSPARENT = 1 << 0,
|
||||
PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR = 1 << 1,
|
||||
PIPELINE_COLOR_PASS_FLAG_TRANSPARENT = 1 << 0, // Can't combine with SEPARATE_SPECULAR.
|
||||
PIPELINE_COLOR_PASS_FLAG_SEPARATE_SPECULAR = 1 << 1, // Can't combine with TRANSPARENT.
|
||||
PIPELINE_COLOR_PASS_FLAG_LIGHTMAP = 1 << 2,
|
||||
PIPELINE_COLOR_PASS_FLAG_MULTIVIEW = 1 << 3,
|
||||
PIPELINE_COLOR_PASS_FLAG_MOTION_VECTORS = 1 << 4,
|
||||
|
@ -242,12 +249,13 @@ public:
|
|||
ShaderData *debug_shadow_splits_material_shader_ptr = nullptr;
|
||||
|
||||
Vector<RD::PipelineSpecializationConstant> default_specialization_constants;
|
||||
HashSet<uint32_t> valid_color_pass_pipelines;
|
||||
bool valid_color_pass_pipelines[PIPELINE_COLOR_PASS_FLAG_COUNT];
|
||||
SceneShaderForwardClustered();
|
||||
~SceneShaderForwardClustered();
|
||||
|
||||
void init(const String p_defines);
|
||||
void set_default_specialization_constants(const Vector<RD::PipelineSpecializationConstant> &p_constants);
|
||||
void enable_advanced_shader_group(bool p_needs_multiview = false);
|
||||
};
|
||||
|
||||
} // namespace RendererSceneRenderImplementation
|
||||
|
|
|
@ -89,7 +89,7 @@ void PipelineCacheRD::setup(RID p_shader, RD::RenderPrimitive p_primitive, const
|
|||
ERR_FAIL_COND(p_shader.is_null());
|
||||
_clear();
|
||||
shader = p_shader;
|
||||
input_mask = RD::get_singleton()->shader_get_vertex_input_attribute_mask(p_shader);
|
||||
input_mask = 0;
|
||||
render_primitive = p_primitive;
|
||||
rasterization_state = p_rasterization_state;
|
||||
multisample_state = p_multisample;
|
||||
|
|
|
@ -91,7 +91,11 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
_FORCE_INLINE_ uint32_t get_vertex_input_mask() const {
|
||||
_FORCE_INLINE_ uint32_t get_vertex_input_mask() {
|
||||
if (input_mask == 0) {
|
||||
ERR_FAIL_COND_V(shader.is_null(), 0);
|
||||
input_mask = RD::get_singleton()->shader_get_vertex_input_attribute_mask(shader);
|
||||
}
|
||||
return input_mask;
|
||||
}
|
||||
void clear();
|
||||
|
|
|
@ -138,7 +138,7 @@ void ShaderRD::setup(const char *p_vertex_code, const char *p_fragment_code, con
|
|||
|
||||
RID ShaderRD::version_create() {
|
||||
//initialize() was never called
|
||||
ERR_FAIL_COND_V(variant_defines.size() == 0, RID());
|
||||
ERR_FAIL_COND_V(group_to_variant_map.size() == 0, RID());
|
||||
|
||||
Version version;
|
||||
version.dirty = true;
|
||||
|
@ -148,11 +148,20 @@ RID ShaderRD::version_create() {
|
|||
return version_owner.make_rid(version);
|
||||
}
|
||||
|
||||
void ShaderRD::_initialize_version(Version *p_version) {
|
||||
_clear_version(p_version);
|
||||
|
||||
p_version->valid = false;
|
||||
p_version->dirty = false;
|
||||
|
||||
p_version->variants = memnew_arr(RID, variant_defines.size());
|
||||
}
|
||||
|
||||
void ShaderRD::_clear_version(Version *p_version) {
|
||||
//clear versions if they exist
|
||||
// Clear versions if they exist.
|
||||
if (p_version->variants) {
|
||||
for (int i = 0; i < variant_defines.size(); i++) {
|
||||
if (variants_enabled[i]) {
|
||||
if (variants_enabled[i] && group_enabled[variant_defines[i].group]) {
|
||||
RD::get_singleton()->free(p_version->variants[i]);
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +180,7 @@ void ShaderRD::_build_variant_code(StringBuilder &builder, uint32_t p_variant, c
|
|||
case StageTemplate::Chunk::TYPE_VERSION_DEFINES: {
|
||||
builder.append("\n"); //make sure defines begin at newline
|
||||
builder.append(general_defines.get_data());
|
||||
builder.append(variant_defines[p_variant].get_data());
|
||||
builder.append(variant_defines[p_variant].text.get_data());
|
||||
for (int j = 0; j < p_version->custom_defines.size(); j++) {
|
||||
builder.append(p_version->custom_defines[j].get_data());
|
||||
}
|
||||
|
@ -211,9 +220,11 @@ void ShaderRD::_build_variant_code(StringBuilder &builder, uint32_t p_variant, c
|
|||
}
|
||||
}
|
||||
|
||||
void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
||||
if (!variants_enabled[p_variant]) {
|
||||
return; //variant is disabled, return
|
||||
void ShaderRD::_compile_variant(uint32_t p_variant, const CompileData *p_data) {
|
||||
uint32_t variant = group_to_variant_map[p_data->group][p_variant];
|
||||
|
||||
if (!variants_enabled[variant]) {
|
||||
return; // Variant is disabled, return.
|
||||
}
|
||||
|
||||
Vector<RD::ShaderStageSPIRVData> stages;
|
||||
|
@ -227,7 +238,7 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
//vertex stage
|
||||
|
||||
StringBuilder builder;
|
||||
_build_variant_code(builder, p_variant, p_version, stage_templates[STAGE_TYPE_VERTEX]);
|
||||
_build_variant_code(builder, variant, p_data->version, stage_templates[STAGE_TYPE_VERTEX]);
|
||||
|
||||
current_source = builder.as_string();
|
||||
RD::ShaderStageSPIRVData stage;
|
||||
|
@ -245,7 +256,7 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
current_stage = RD::SHADER_STAGE_FRAGMENT;
|
||||
|
||||
StringBuilder builder;
|
||||
_build_variant_code(builder, p_variant, p_version, stage_templates[STAGE_TYPE_FRAGMENT]);
|
||||
_build_variant_code(builder, variant, p_data->version, stage_templates[STAGE_TYPE_FRAGMENT]);
|
||||
|
||||
current_source = builder.as_string();
|
||||
RD::ShaderStageSPIRVData stage;
|
||||
|
@ -263,7 +274,7 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
current_stage = RD::SHADER_STAGE_COMPUTE;
|
||||
|
||||
StringBuilder builder;
|
||||
_build_variant_code(builder, p_variant, p_version, stage_templates[STAGE_TYPE_COMPUTE]);
|
||||
_build_variant_code(builder, variant, p_data->version, stage_templates[STAGE_TYPE_COMPUTE]);
|
||||
|
||||
current_source = builder.as_string();
|
||||
|
||||
|
@ -279,7 +290,7 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
|
||||
if (!build_ok) {
|
||||
MutexLock lock(variant_set_mutex); //properly print the errors
|
||||
ERR_PRINT("Error compiling " + String(current_stage == RD::SHADER_STAGE_COMPUTE ? "Compute " : (current_stage == RD::SHADER_STAGE_VERTEX ? "Vertex" : "Fragment")) + " shader, variant #" + itos(p_variant) + " (" + variant_defines[p_variant].get_data() + ").");
|
||||
ERR_PRINT("Error compiling " + String(current_stage == RD::SHADER_STAGE_COMPUTE ? "Compute " : (current_stage == RD::SHADER_STAGE_VERTEX ? "Vertex" : "Fragment")) + " shader, variant #" + itos(variant) + " (" + variant_defines[variant].text.get_data() + ").");
|
||||
ERR_PRINT(error);
|
||||
|
||||
#ifdef DEBUG_ENABLED
|
||||
|
@ -288,15 +299,15 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
return;
|
||||
}
|
||||
|
||||
Vector<uint8_t> shader_data = RD::get_singleton()->shader_compile_binary_from_spirv(stages, name + ":" + itos(p_variant));
|
||||
Vector<uint8_t> shader_data = RD::get_singleton()->shader_compile_binary_from_spirv(stages, name + ":" + itos(variant));
|
||||
|
||||
ERR_FAIL_COND(shader_data.size() == 0);
|
||||
|
||||
RID shader = RD::get_singleton()->shader_create_from_bytecode(shader_data);
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[p_variant] = shader;
|
||||
p_version->variant_data[p_variant] = shader_data;
|
||||
|
||||
p_data->version->variants[variant] = RD::get_singleton()->shader_create_from_bytecode(shader_data, p_data->version->variants[variant]);
|
||||
p_data->version->variant_data[variant] = shader_data;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -384,9 +395,9 @@ String ShaderRD::_version_get_sha1(Version *p_version) const {
|
|||
static const char *shader_file_header = "GDSC";
|
||||
static const uint32_t cache_file_version = 3;
|
||||
|
||||
bool ShaderRD::_load_from_cache(Version *p_version) {
|
||||
bool ShaderRD::_load_from_cache(Version *p_version, int p_group) {
|
||||
String sha1 = _version_get_sha1(p_version);
|
||||
String path = shader_cache_dir.path_join(name).path_join(base_sha256).path_join(sha1) + ".cache";
|
||||
String path = shader_cache_dir.path_join(name).path_join(group_sha256[p_group]).path_join(sha1) + ".cache";
|
||||
|
||||
Ref<FileAccess> f = FileAccess::open(path, FileAccess::READ);
|
||||
if (f.is_null()) {
|
||||
|
@ -404,12 +415,13 @@ bool ShaderRD::_load_from_cache(Version *p_version) {
|
|||
|
||||
uint32_t variant_count = f->get_32();
|
||||
|
||||
ERR_FAIL_COND_V(variant_count != (uint32_t)variant_defines.size(), false); //should not happen but check
|
||||
ERR_FAIL_COND_V(variant_count != (uint32_t)group_to_variant_map[p_group].size(), false); //should not happen but check
|
||||
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
uint32_t variant_size = f->get_32();
|
||||
ERR_FAIL_COND_V(variant_size == 0 && variants_enabled[i], false);
|
||||
if (!variants_enabled[i]) {
|
||||
ERR_FAIL_COND_V(variant_size == 0 && variants_enabled[variant_id], false);
|
||||
if (!variants_enabled[variant_id]) {
|
||||
continue;
|
||||
}
|
||||
Vector<uint8_t> variant_bytes;
|
||||
|
@ -419,25 +431,28 @@ bool ShaderRD::_load_from_cache(Version *p_version) {
|
|||
|
||||
ERR_FAIL_COND_V(br != variant_size, false);
|
||||
|
||||
p_version->variant_data[i] = variant_bytes;
|
||||
p_version->variant_data[variant_id] = variant_bytes;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
if (!variants_enabled[i]) {
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
if (!variants_enabled[variant_id]) {
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[i] = RID();
|
||||
p_version->variants[variant_id] = RID();
|
||||
continue;
|
||||
}
|
||||
RID shader = RD::get_singleton()->shader_create_from_bytecode(p_version->variant_data[i]);
|
||||
if (shader.is_null()) {
|
||||
for (uint32_t j = 0; j < i; j++) {
|
||||
RD::get_singleton()->free(p_version->variants[i]);
|
||||
}
|
||||
ERR_FAIL_COND_V(shader.is_null(), false);
|
||||
}
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[i] = shader;
|
||||
RID shader = RD::get_singleton()->shader_create_from_bytecode(p_version->variant_data[variant_id], p_version->variants[variant_id]);
|
||||
if (shader.is_null()) {
|
||||
for (uint32_t j = 0; j < i; j++) {
|
||||
int variant_free_id = group_to_variant_map[p_group][j];
|
||||
RD::get_singleton()->free(p_version->variants[variant_free_id]);
|
||||
}
|
||||
ERR_FAIL_COND_V(shader.is_null(), false);
|
||||
}
|
||||
|
||||
p_version->variants[variant_id] = shader;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -447,66 +462,85 @@ bool ShaderRD::_load_from_cache(Version *p_version) {
|
|||
return true;
|
||||
}
|
||||
|
||||
void ShaderRD::_save_to_cache(Version *p_version) {
|
||||
void ShaderRD::_save_to_cache(Version *p_version, int p_group) {
|
||||
String sha1 = _version_get_sha1(p_version);
|
||||
String path = shader_cache_dir.path_join(name).path_join(base_sha256).path_join(sha1) + ".cache";
|
||||
String path = shader_cache_dir.path_join(name).path_join(group_sha256[p_group]).path_join(sha1) + ".cache";
|
||||
|
||||
Ref<FileAccess> f = FileAccess::open(path, FileAccess::WRITE);
|
||||
ERR_FAIL_COND(f.is_null());
|
||||
f->store_buffer((const uint8_t *)shader_file_header, 4);
|
||||
f->store_32(cache_file_version); //file version
|
||||
uint32_t variant_count = variant_defines.size();
|
||||
f->store_32(variant_count); //variant count
|
||||
|
||||
f->store_32(cache_file_version); // File version.
|
||||
uint32_t variant_count = group_to_variant_map[p_group].size();
|
||||
f->store_32(variant_count); // Variant count.
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
f->store_32(p_version->variant_data[i].size()); //stage count
|
||||
f->store_buffer(p_version->variant_data[i].ptr(), p_version->variant_data[i].size());
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
f->store_32(p_version->variant_data[variant_id].size()); // Stage count.
|
||||
f->store_buffer(p_version->variant_data[variant_id].ptr(), p_version->variant_data[variant_id].size());
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderRD::_compile_version(Version *p_version) {
|
||||
_clear_version(p_version);
|
||||
void ShaderRD::_allocate_placeholders(Version *p_version, int p_group) {
|
||||
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
RID shader = RD::get_singleton()->shader_create_placeholder();
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[variant_id] = shader;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p_version->valid = false;
|
||||
p_version->dirty = false;
|
||||
// Try to compile all variants for a given group.
|
||||
// Will skip variants that are disabled.
|
||||
void ShaderRD::_compile_version(Version *p_version, int p_group) {
|
||||
if (!group_enabled[p_group]) {
|
||||
return;
|
||||
}
|
||||
|
||||
p_version->variants = memnew_arr(RID, variant_defines.size());
|
||||
typedef Vector<uint8_t> ShaderStageData;
|
||||
p_version->variant_data = memnew_arr(ShaderStageData, variant_defines.size());
|
||||
|
||||
p_version->dirty = false;
|
||||
|
||||
if (shader_cache_dir_valid) {
|
||||
if (_load_from_cache(p_version)) {
|
||||
if (_load_from_cache(p_version, p_group)) {
|
||||
print_line("loaded from cache!");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#if 1
|
||||
CompileData compile_data;
|
||||
compile_data.version = p_version;
|
||||
compile_data.group = p_group;
|
||||
|
||||
WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_singleton()->add_template_group_task(this, &ShaderRD::_compile_variant, p_version, variant_defines.size(), -1, true, SNAME("ShaderCompilation"));
|
||||
#if 1
|
||||
WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_singleton()->add_template_group_task(this, &ShaderRD::_compile_variant, &compile_data, group_to_variant_map[p_group].size(), -1, true, SNAME("ShaderCompilation"));
|
||||
WorkerThreadPool::get_singleton()->wait_for_group_task_completion(group_task);
|
||||
|
||||
#else
|
||||
for (int i = 0; i < variant_defines.size(); i++) {
|
||||
_compile_variant(i, p_version);
|
||||
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
|
||||
_compile_variant(i, &compile_data);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool all_valid = true;
|
||||
for (int i = 0; i < variant_defines.size(); i++) {
|
||||
if (!variants_enabled[i]) {
|
||||
continue; //disabled
|
||||
|
||||
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
|
||||
int variant_id = group_to_variant_map[p_group][i];
|
||||
if (!variants_enabled[variant_id]) {
|
||||
continue; // Disabled.
|
||||
}
|
||||
if (p_version->variants[i].is_null()) {
|
||||
if (p_version->variants[variant_id].is_null()) {
|
||||
all_valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!all_valid) {
|
||||
//clear versions if they exist
|
||||
// Clear versions if they exist.
|
||||
for (int i = 0; i < variant_defines.size(); i++) {
|
||||
if (!variants_enabled[i]) {
|
||||
continue; //disabled
|
||||
if (!variants_enabled[i] || !group_enabled[variant_defines[i].group]) {
|
||||
continue; // Disabled.
|
||||
}
|
||||
if (!p_version->variants[i].is_null()) {
|
||||
RD::get_singleton()->free(p_version->variants[i]);
|
||||
|
@ -520,8 +554,8 @@ void ShaderRD::_compile_version(Version *p_version) {
|
|||
p_version->variant_data = nullptr;
|
||||
return;
|
||||
} else if (shader_cache_dir_valid) {
|
||||
//save shader cache
|
||||
_save_to_cache(p_version);
|
||||
// Save shader cache.
|
||||
_save_to_cache(p_version, p_group);
|
||||
}
|
||||
|
||||
memdelete_arr(p_version->variant_data); //clear stages
|
||||
|
@ -550,7 +584,14 @@ void ShaderRD::version_set_code(RID p_version, const HashMap<String, String> &p_
|
|||
|
||||
version->dirty = true;
|
||||
if (version->initialize_needed) {
|
||||
_compile_version(version);
|
||||
_initialize_version(version);
|
||||
for (int i = 0; i < group_enabled.size(); i++) {
|
||||
if (!group_enabled[i]) {
|
||||
_allocate_placeholders(version, i);
|
||||
continue;
|
||||
}
|
||||
_compile_version(version, i);
|
||||
}
|
||||
version->initialize_needed = false;
|
||||
}
|
||||
}
|
||||
|
@ -576,7 +617,14 @@ void ShaderRD::version_set_compute_code(RID p_version, const HashMap<String, Str
|
|||
|
||||
version->dirty = true;
|
||||
if (version->initialize_needed) {
|
||||
_compile_version(version);
|
||||
_initialize_version(version);
|
||||
for (int i = 0; i < group_enabled.size(); i++) {
|
||||
if (!group_enabled[i]) {
|
||||
_allocate_placeholders(version, i);
|
||||
continue;
|
||||
}
|
||||
_compile_version(version, i);
|
||||
}
|
||||
version->initialize_needed = false;
|
||||
}
|
||||
}
|
||||
|
@ -586,7 +634,14 @@ bool ShaderRD::version_is_valid(RID p_version) {
|
|||
ERR_FAIL_COND_V(!version, false);
|
||||
|
||||
if (version->dirty) {
|
||||
_compile_version(version);
|
||||
_initialize_version(version);
|
||||
for (int i = 0; i < group_enabled.size(); i++) {
|
||||
if (!group_enabled[i]) {
|
||||
_allocate_placeholders(version, i);
|
||||
continue;
|
||||
}
|
||||
_compile_version(version, i);
|
||||
}
|
||||
}
|
||||
|
||||
return version->valid;
|
||||
|
@ -615,6 +670,29 @@ bool ShaderRD::is_variant_enabled(int p_variant) const {
|
|||
return variants_enabled[p_variant];
|
||||
}
|
||||
|
||||
void ShaderRD::enable_group(int p_group) {
|
||||
ERR_FAIL_INDEX(p_group, group_enabled.size());
|
||||
|
||||
if (group_enabled[p_group]) {
|
||||
// Group already enabled, do nothing.
|
||||
return;
|
||||
}
|
||||
|
||||
group_enabled.write[p_group] = true;
|
||||
|
||||
// Compile all versions again to include the new group.
|
||||
List<RID> all_versions;
|
||||
version_owner.get_owned_list(&all_versions);
|
||||
for (int i = 0; i < all_versions.size(); i++) {
|
||||
Version *version = version_owner.get_or_null(all_versions[i]);
|
||||
_compile_version(version, p_group);
|
||||
}
|
||||
}
|
||||
|
||||
bool ShaderRD::is_group_enabled(int p_group) const {
|
||||
return group_enabled[p_group];
|
||||
}
|
||||
|
||||
bool ShaderRD::shader_cache_cleanup_on_start = false;
|
||||
|
||||
ShaderRD::ShaderRD() {
|
||||
|
@ -639,24 +717,38 @@ void ShaderRD::initialize(const Vector<String> &p_variant_defines, const String
|
|||
|
||||
general_defines = p_general_defines.utf8();
|
||||
|
||||
// When initialized this way, there is just one group and its always enabled.
|
||||
group_to_variant_map.insert(0, LocalVector<int>{});
|
||||
group_enabled.push_back(true);
|
||||
|
||||
for (int i = 0; i < p_variant_defines.size(); i++) {
|
||||
variant_defines.push_back(p_variant_defines[i].utf8());
|
||||
variant_defines.push_back(VariantDefine(0, p_variant_defines[i], true));
|
||||
variants_enabled.push_back(true);
|
||||
group_to_variant_map[0].push_back(i);
|
||||
}
|
||||
|
||||
if (!shader_cache_dir.is_empty()) {
|
||||
group_sha256.resize(1);
|
||||
_initialize_cache();
|
||||
}
|
||||
}
|
||||
|
||||
void ShaderRD::_initialize_cache() {
|
||||
for (const KeyValue<int, LocalVector<int>> &E : group_to_variant_map) {
|
||||
StringBuilder hash_build;
|
||||
|
||||
hash_build.append("[base_hash]");
|
||||
hash_build.append(base_sha256);
|
||||
hash_build.append("[general_defines]");
|
||||
hash_build.append(general_defines.get_data());
|
||||
for (int i = 0; i < variant_defines.size(); i++) {
|
||||
hash_build.append("[variant_defines:" + itos(i) + "]");
|
||||
hash_build.append(variant_defines[i].get_data());
|
||||
hash_build.append("[group_id]");
|
||||
hash_build.append(itos(E.key));
|
||||
for (uint32_t i = 0; i < E.value.size(); i++) {
|
||||
hash_build.append("[variant_defines:" + itos(E.value[i]) + "]");
|
||||
hash_build.append(variant_defines[E.value[i]].text.get_data());
|
||||
}
|
||||
|
||||
base_sha256 = hash_build.as_string().sha256_text();
|
||||
group_sha256[E.key] = hash_build.as_string().sha256_text();
|
||||
|
||||
Ref<DirAccess> d = DirAccess::open(shader_cache_dir);
|
||||
ERR_FAIL_COND(d.is_null());
|
||||
|
@ -666,17 +758,58 @@ void ShaderRD::initialize(const Vector<String> &p_variant_defines, const String
|
|||
d->change_dir(name);
|
||||
}
|
||||
|
||||
//erase other versions?
|
||||
// Erase other versions?
|
||||
if (shader_cache_cleanup_on_start) {
|
||||
}
|
||||
//
|
||||
if (d->change_dir(base_sha256) != OK) {
|
||||
Error err = d->make_dir(base_sha256);
|
||||
if (d->change_dir(group_sha256[E.key]) != OK) {
|
||||
Error err = d->make_dir(group_sha256[E.key]);
|
||||
ERR_FAIL_COND(err != OK);
|
||||
}
|
||||
shader_cache_dir_valid = true;
|
||||
|
||||
print_verbose("Shader '" + name + "' SHA256: " + base_sha256);
|
||||
print_verbose("Shader '" + name + "' (group " + itos(E.key) + ") SHA256: " + group_sha256[E.key]);
|
||||
}
|
||||
}
|
||||
|
||||
// Same as above, but allows specifying shader compilation groups.
|
||||
void ShaderRD::initialize(const Vector<VariantDefine> &p_variant_defines, const String &p_general_defines) {
|
||||
ERR_FAIL_COND(variant_defines.size());
|
||||
ERR_FAIL_COND(p_variant_defines.size() == 0);
|
||||
|
||||
general_defines = p_general_defines.utf8();
|
||||
|
||||
int max_group_id = 0;
|
||||
|
||||
for (int i = 0; i < p_variant_defines.size(); i++) {
|
||||
// Fill variant array.
|
||||
variant_defines.push_back(p_variant_defines[i]);
|
||||
variants_enabled.push_back(true);
|
||||
|
||||
// Map variant array index to group id, so we can iterate over groups later.
|
||||
if (!group_to_variant_map.has(p_variant_defines[i].group)) {
|
||||
group_to_variant_map.insert(p_variant_defines[i].group, LocalVector<int>{});
|
||||
}
|
||||
group_to_variant_map[p_variant_defines[i].group].push_back(i);
|
||||
|
||||
// Track max size.
|
||||
if (p_variant_defines[i].group > max_group_id) {
|
||||
max_group_id = p_variant_defines[i].group;
|
||||
}
|
||||
}
|
||||
|
||||
// Set all to groups to false, then enable those that should be default.
|
||||
group_enabled.resize_zeroed(max_group_id + 1);
|
||||
bool *enabled_ptr = group_enabled.ptrw();
|
||||
for (int i = 0; i < p_variant_defines.size(); i++) {
|
||||
if (p_variant_defines[i].default_enabled) {
|
||||
enabled_ptr[p_variant_defines[i].group] = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!shader_cache_dir.is_empty()) {
|
||||
group_sha256.resize(max_group_id + 1);
|
||||
_initialize_cache();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,10 +41,26 @@
|
|||
#include "servers/rendering_server.h"
|
||||
|
||||
class ShaderRD {
|
||||
public:
|
||||
struct VariantDefine {
|
||||
int group = 0;
|
||||
CharString text;
|
||||
bool default_enabled = true;
|
||||
VariantDefine(){};
|
||||
VariantDefine(int p_group, const String &p_text, bool p_default_enabled) {
|
||||
group = p_group;
|
||||
default_enabled = p_default_enabled;
|
||||
text = p_text.utf8();
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
//versions
|
||||
CharString general_defines;
|
||||
Vector<CharString> variant_defines;
|
||||
Vector<VariantDefine> variant_defines;
|
||||
Vector<bool> variants_enabled;
|
||||
HashMap<int, LocalVector<int>> group_to_variant_map;
|
||||
Vector<bool> group_enabled;
|
||||
|
||||
struct Version {
|
||||
CharString uniforms;
|
||||
|
@ -55,7 +71,7 @@ class ShaderRD {
|
|||
Vector<CharString> custom_defines;
|
||||
|
||||
Vector<uint8_t> *variant_data = nullptr;
|
||||
RID *variants = nullptr; //same size as version defines
|
||||
RID *variants = nullptr; // Same size as variant defines.
|
||||
|
||||
bool valid;
|
||||
bool dirty;
|
||||
|
@ -64,10 +80,17 @@ class ShaderRD {
|
|||
|
||||
Mutex variant_set_mutex;
|
||||
|
||||
void _compile_variant(uint32_t p_variant, Version *p_version);
|
||||
struct CompileData {
|
||||
Version *version;
|
||||
int group = 0;
|
||||
};
|
||||
|
||||
void _compile_variant(uint32_t p_variant, const CompileData *p_data);
|
||||
|
||||
void _initialize_version(Version *p_version);
|
||||
void _clear_version(Version *p_version);
|
||||
void _compile_version(Version *p_version);
|
||||
void _compile_version(Version *p_version, int p_group);
|
||||
void _allocate_placeholders(Version *p_version, int p_group);
|
||||
|
||||
RID_Owner<Version> version_owner;
|
||||
|
||||
|
@ -97,6 +120,7 @@ class ShaderRD {
|
|||
CharString base_compute_defines;
|
||||
|
||||
String base_sha256;
|
||||
LocalVector<String> group_sha256;
|
||||
|
||||
static String shader_cache_dir;
|
||||
static bool shader_cache_cleanup_on_start;
|
||||
|
@ -119,8 +143,9 @@ class ShaderRD {
|
|||
void _add_stage(const char *p_code, StageType p_stage_type);
|
||||
|
||||
String _version_get_sha1(Version *p_version) const;
|
||||
bool _load_from_cache(Version *p_version);
|
||||
void _save_to_cache(Version *p_version);
|
||||
bool _load_from_cache(Version *p_version, int p_group);
|
||||
void _save_to_cache(Version *p_version, int p_group);
|
||||
void _initialize_cache();
|
||||
|
||||
protected:
|
||||
ShaderRD();
|
||||
|
@ -140,7 +165,14 @@ public:
|
|||
ERR_FAIL_COND_V(!version, RID());
|
||||
|
||||
if (version->dirty) {
|
||||
_compile_version(version);
|
||||
_initialize_version(version);
|
||||
for (int i = 0; i < group_enabled.size(); i++) {
|
||||
if (!group_enabled[i]) {
|
||||
_allocate_placeholders(version, i);
|
||||
continue;
|
||||
}
|
||||
_compile_version(version, i);
|
||||
}
|
||||
}
|
||||
|
||||
if (!version->valid) {
|
||||
|
@ -154,9 +186,14 @@ public:
|
|||
|
||||
bool version_free(RID p_version);
|
||||
|
||||
// Enable/disable variants for things that you know won't be used at engine initialization time .
|
||||
void set_variant_enabled(int p_variant, bool p_enabled);
|
||||
bool is_variant_enabled(int p_variant) const;
|
||||
|
||||
// Enable/disable groups for things that might be enabled at run time.
|
||||
void enable_group(int p_group);
|
||||
bool is_group_enabled(int p_group) const;
|
||||
|
||||
static void set_shader_cache_dir(const String &p_dir);
|
||||
static void set_shader_cache_save_compressed(bool p_enable);
|
||||
static void set_shader_cache_save_compressed_zstd(bool p_enable);
|
||||
|
@ -165,6 +202,8 @@ public:
|
|||
RS::ShaderNativeSourceCode version_get_native_source_code(RID p_version);
|
||||
|
||||
void initialize(const Vector<String> &p_variant_defines, const String &p_general_defines = "");
|
||||
void initialize(const Vector<VariantDefine> &p_variant_defines, const String &p_general_defines = "");
|
||||
|
||||
virtual ~ShaderRD();
|
||||
};
|
||||
|
||||
|
|
|
@ -754,7 +754,9 @@ void RenderingDevice::_bind_methods() {
|
|||
ClassDB::bind_method(D_METHOD("shader_compile_spirv_from_source", "shader_source", "allow_cache"), &RenderingDevice::_shader_compile_spirv_from_source, DEFVAL(true));
|
||||
ClassDB::bind_method(D_METHOD("shader_compile_binary_from_spirv", "spirv_data", "name"), &RenderingDevice::_shader_compile_binary_from_spirv, DEFVAL(""));
|
||||
ClassDB::bind_method(D_METHOD("shader_create_from_spirv", "spirv_data", "name"), &RenderingDevice::_shader_create_from_spirv, DEFVAL(""));
|
||||
ClassDB::bind_method(D_METHOD("shader_create_from_bytecode", "binary_data"), &RenderingDevice::shader_create_from_bytecode);
|
||||
ClassDB::bind_method(D_METHOD("shader_create_from_bytecode", "binary_data", "placeholder_rid"), &RenderingDevice::shader_create_from_bytecode, DEFVAL(RID()));
|
||||
ClassDB::bind_method(D_METHOD("shader_create_placeholder"), &RenderingDevice::shader_create_placeholder);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("shader_get_vertex_input_attribute_mask", "shader"), &RenderingDevice::shader_get_vertex_input_attribute_mask);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("uniform_buffer_create", "size_bytes", "data"), &RenderingDevice::uniform_buffer_create, DEFVAL(Vector<uint8_t>()));
|
||||
|
|
|
@ -734,7 +734,8 @@ public:
|
|||
virtual Vector<uint8_t> shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "") = 0;
|
||||
|
||||
virtual RID shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv, const String &p_shader_name = "");
|
||||
virtual RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary) = 0;
|
||||
virtual RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, RID p_placeholder = RID()) = 0;
|
||||
virtual RID shader_create_placeholder() = 0;
|
||||
|
||||
virtual uint32_t shader_get_vertex_input_attribute_mask(RID p_shader) = 0;
|
||||
|
||||
|
|
Loading…
Reference in a new issue