2019-06-16 04:45:24 +02:00
|
|
|
/**************************************************************************/
|
|
|
|
/* shader_rd.cpp */
|
|
|
|
/**************************************************************************/
|
|
|
|
/* This file is part of: */
|
|
|
|
/* GODOT ENGINE */
|
|
|
|
/* https://godotengine.org */
|
|
|
|
/**************************************************************************/
|
2020-02-11 14:01:43 +01:00
|
|
|
/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
|
|
|
|
/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
|
2019-06-16 04:45:24 +02:00
|
|
|
/* */
|
|
|
|
/* Permission is hereby granted, free of charge, to any person obtaining */
|
|
|
|
/* a copy of this software and associated documentation files (the */
|
|
|
|
/* "Software"), to deal in the Software without restriction, including */
|
|
|
|
/* without limitation the rights to use, copy, modify, merge, publish, */
|
|
|
|
/* distribute, sublicense, and/or sell copies of the Software, and to */
|
|
|
|
/* permit persons to whom the Software is furnished to do so, subject to */
|
|
|
|
/* the following conditions: */
|
|
|
|
/* */
|
|
|
|
/* The above copyright notice and this permission notice shall be */
|
|
|
|
/* included in all copies or substantial portions of the Software. */
|
|
|
|
/* */
|
|
|
|
/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
|
|
|
|
/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
|
|
|
|
/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
|
|
|
|
/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
|
|
|
|
/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
|
|
|
|
/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
|
|
|
|
/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
|
|
|
|
/**************************************************************************/
|
|
|
|
|
|
|
|
#include "shader_rd.h"
|
2020-03-24 09:50:51 +01:00
|
|
|
|
2021-05-25 02:25:11 +02:00
|
|
|
#include "core/io/compression.h"
|
2021-06-11 14:51:48 +02:00
|
|
|
#include "core/io/dir_access.h"
|
|
|
|
#include "core/io/file_access.h"
|
2023-02-13 19:45:06 +01:00
|
|
|
#include "core/object/worker_thread_pool.h"
|
2023-01-09 17:44:29 +01:00
|
|
|
#include "core/version.h"
|
2020-12-04 19:26:24 +01:00
|
|
|
#include "renderer_compositor_rd.h"
|
2020-03-27 19:21:27 +01:00
|
|
|
#include "servers/rendering/rendering_device.h"
|
2021-05-25 02:25:11 +02:00
|
|
|
#include "thirdparty/misc/smolv.h"
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2021-04-13 22:01:43 +02:00
|
|
|
void ShaderRD::_add_stage(const char *p_code, StageType p_stage_type) {
|
|
|
|
Vector<String> lines = String(p_code).split("\n");
|
|
|
|
|
|
|
|
String text;
|
|
|
|
|
|
|
|
for (int i = 0; i < lines.size(); i++) {
|
2023-11-18 23:40:56 +01:00
|
|
|
const String &l = lines[i];
|
2021-04-13 22:01:43 +02:00
|
|
|
bool push_chunk = false;
|
|
|
|
|
|
|
|
StageTemplate::Chunk chunk;
|
|
|
|
|
|
|
|
if (l.begins_with("#VERSION_DEFINES")) {
|
|
|
|
chunk.type = StageTemplate::Chunk::TYPE_VERSION_DEFINES;
|
|
|
|
push_chunk = true;
|
|
|
|
} else if (l.begins_with("#GLOBALS")) {
|
|
|
|
switch (p_stage_type) {
|
|
|
|
case STAGE_TYPE_VERTEX:
|
|
|
|
chunk.type = StageTemplate::Chunk::TYPE_VERTEX_GLOBALS;
|
|
|
|
break;
|
|
|
|
case STAGE_TYPE_FRAGMENT:
|
|
|
|
chunk.type = StageTemplate::Chunk::TYPE_FRAGMENT_GLOBALS;
|
|
|
|
break;
|
|
|
|
case STAGE_TYPE_COMPUTE:
|
|
|
|
chunk.type = StageTemplate::Chunk::TYPE_COMPUTE_GLOBALS;
|
|
|
|
break;
|
|
|
|
default: {
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-13 22:01:43 +02:00
|
|
|
push_chunk = true;
|
|
|
|
} else if (l.begins_with("#MATERIAL_UNIFORMS")) {
|
|
|
|
chunk.type = StageTemplate::Chunk::TYPE_MATERIAL_UNIFORMS;
|
|
|
|
push_chunk = true;
|
|
|
|
} else if (l.begins_with("#CODE")) {
|
|
|
|
chunk.type = StageTemplate::Chunk::TYPE_CODE;
|
|
|
|
push_chunk = true;
|
|
|
|
chunk.code = l.replace_first("#CODE", String()).replace(":", "").strip_edges().to_upper();
|
|
|
|
} else {
|
|
|
|
text += l + "\n";
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
|
2021-04-13 22:01:43 +02:00
|
|
|
if (push_chunk) {
|
2021-12-09 10:42:46 +01:00
|
|
|
if (!text.is_empty()) {
|
2021-04-13 22:01:43 +02:00
|
|
|
StageTemplate::Chunk text_chunk;
|
|
|
|
text_chunk.type = StageTemplate::Chunk::TYPE_TEXT;
|
|
|
|
text_chunk.text = text.utf8();
|
|
|
|
stage_templates[p_stage_type].chunks.push_back(text_chunk);
|
|
|
|
text = String();
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
2021-04-13 22:01:43 +02:00
|
|
|
stage_templates[p_stage_type].chunks.push_back(chunk);
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
}
|
2019-09-25 21:44:44 +02:00
|
|
|
|
2021-12-09 10:42:46 +01:00
|
|
|
if (!text.is_empty()) {
|
2021-04-13 22:01:43 +02:00
|
|
|
StageTemplate::Chunk text_chunk;
|
|
|
|
text_chunk.type = StageTemplate::Chunk::TYPE_TEXT;
|
|
|
|
text_chunk.text = text.utf8();
|
|
|
|
stage_templates[p_stage_type].chunks.push_back(text_chunk);
|
|
|
|
text = String();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShaderRD::setup(const char *p_vertex_code, const char *p_fragment_code, const char *p_compute_code, const char *p_name) {
|
|
|
|
name = p_name;
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2019-09-25 21:44:44 +02:00
|
|
|
if (p_compute_code) {
|
2021-04-13 22:01:43 +02:00
|
|
|
_add_stage(p_compute_code, STAGE_TYPE_COMPUTE);
|
2019-09-25 21:44:44 +02:00
|
|
|
is_compute = true;
|
2021-04-13 22:01:43 +02:00
|
|
|
} else {
|
|
|
|
is_compute = false;
|
|
|
|
if (p_vertex_code) {
|
|
|
|
_add_stage(p_vertex_code, STAGE_TYPE_VERTEX);
|
2019-09-25 21:44:44 +02:00
|
|
|
}
|
2021-04-13 22:01:43 +02:00
|
|
|
if (p_fragment_code) {
|
|
|
|
_add_stage(p_fragment_code, STAGE_TYPE_FRAGMENT);
|
2019-09-25 21:44:44 +02:00
|
|
|
}
|
|
|
|
}
|
2021-05-25 02:25:11 +02:00
|
|
|
|
|
|
|
StringBuilder tohash;
|
2023-01-09 17:44:29 +01:00
|
|
|
tohash.append("[GodotVersionNumber]");
|
|
|
|
tohash.append(VERSION_NUMBER);
|
|
|
|
tohash.append("[GodotVersionHash]");
|
|
|
|
tohash.append(VERSION_HASH);
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
tohash.append("[SpirvCacheKey]");
|
|
|
|
tohash.append(RenderingDevice::get_singleton()->shader_get_spirv_cache_key());
|
|
|
|
tohash.append("[BinaryCacheKey]");
|
|
|
|
tohash.append(RenderingDevice::get_singleton()->shader_get_binary_cache_key());
|
2021-05-25 02:25:11 +02:00
|
|
|
tohash.append("[Vertex]");
|
|
|
|
tohash.append(p_vertex_code ? p_vertex_code : "");
|
|
|
|
tohash.append("[Fragment]");
|
|
|
|
tohash.append(p_fragment_code ? p_fragment_code : "");
|
|
|
|
tohash.append("[Compute]");
|
|
|
|
tohash.append(p_compute_code ? p_compute_code : "");
|
|
|
|
|
|
|
|
base_sha256 = tohash.as_string().sha256_text();
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
RID ShaderRD::version_create() {
|
|
|
|
//initialize() was never called
|
2024-01-19 13:21:39 +01:00
|
|
|
ERR_FAIL_COND_V(group_to_variant_map.is_empty(), RID());
|
2019-06-16 04:45:24 +02:00
|
|
|
|
|
|
|
Version version;
|
|
|
|
version.dirty = true;
|
|
|
|
version.valid = false;
|
|
|
|
version.initialize_needed = true;
|
2020-04-02 01:20:12 +02:00
|
|
|
version.variants = nullptr;
|
2019-06-16 04:45:24 +02:00
|
|
|
return version_owner.make_rid(version);
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
void ShaderRD::_initialize_version(Version *p_version) {
|
|
|
|
_clear_version(p_version);
|
|
|
|
|
|
|
|
p_version->valid = false;
|
|
|
|
p_version->dirty = false;
|
|
|
|
|
|
|
|
p_version->variants = memnew_arr(RID, variant_defines.size());
|
|
|
|
}
|
|
|
|
|
2019-06-16 04:45:24 +02:00
|
|
|
void ShaderRD::_clear_version(Version *p_version) {
|
2023-07-18 11:21:27 +02:00
|
|
|
// Clear versions if they exist.
|
2019-06-16 04:45:24 +02:00
|
|
|
if (p_version->variants) {
|
|
|
|
for (int i = 0; i < variant_defines.size(); i++) {
|
2023-08-03 16:13:33 +02:00
|
|
|
if (p_version->variants[i].is_valid()) {
|
2021-07-31 08:52:50 +02:00
|
|
|
RD::get_singleton()->free(p_version->variants[i]);
|
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
memdelete_arr(p_version->variants);
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
if (p_version->variant_data) {
|
|
|
|
memdelete_arr(p_version->variant_data);
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
2020-04-02 01:20:12 +02:00
|
|
|
p_version->variants = nullptr;
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-13 22:01:43 +02:00
|
|
|
void ShaderRD::_build_variant_code(StringBuilder &builder, uint32_t p_variant, const Version *p_version, const StageTemplate &p_template) {
|
2022-12-29 01:24:45 +01:00
|
|
|
for (const StageTemplate::Chunk &chunk : p_template.chunks) {
|
2021-04-13 22:01:43 +02:00
|
|
|
switch (chunk.type) {
|
|
|
|
case StageTemplate::Chunk::TYPE_VERSION_DEFINES: {
|
|
|
|
builder.append("\n"); //make sure defines begin at newline
|
|
|
|
builder.append(general_defines.get_data());
|
2023-07-18 11:21:27 +02:00
|
|
|
builder.append(variant_defines[p_variant].text.get_data());
|
2021-04-13 22:01:43 +02:00
|
|
|
for (int j = 0; j < p_version->custom_defines.size(); j++) {
|
|
|
|
builder.append(p_version->custom_defines[j].get_data());
|
|
|
|
}
|
|
|
|
builder.append("\n"); //make sure defines begin at newline
|
|
|
|
if (p_version->uniforms.size()) {
|
|
|
|
builder.append("#define MATERIAL_UNIFORMS_USED\n");
|
|
|
|
}
|
2021-08-09 22:13:42 +02:00
|
|
|
for (const KeyValue<StringName, CharString> &E : p_version->code_sections) {
|
|
|
|
builder.append(String("#define ") + String(E.key) + "_CODE_USED\n");
|
2021-04-13 22:01:43 +02:00
|
|
|
}
|
2022-07-20 08:28:22 +02:00
|
|
|
#if defined(MACOS_ENABLED) || defined(IOS_ENABLED)
|
2021-11-24 08:14:19 +01:00
|
|
|
builder.append("#define MOLTENVK_USED\n");
|
|
|
|
#endif
|
2022-10-06 20:45:56 +02:00
|
|
|
builder.append(String("#define RENDER_DRIVER_") + OS::get_singleton()->get_current_rendering_driver_name().to_upper() + "\n");
|
2021-04-13 22:01:43 +02:00
|
|
|
} break;
|
|
|
|
case StageTemplate::Chunk::TYPE_MATERIAL_UNIFORMS: {
|
|
|
|
builder.append(p_version->uniforms.get_data()); //uniforms (same for vertex and fragment)
|
|
|
|
} break;
|
|
|
|
case StageTemplate::Chunk::TYPE_VERTEX_GLOBALS: {
|
|
|
|
builder.append(p_version->vertex_globals.get_data()); // vertex globals
|
|
|
|
} break;
|
|
|
|
case StageTemplate::Chunk::TYPE_FRAGMENT_GLOBALS: {
|
|
|
|
builder.append(p_version->fragment_globals.get_data()); // fragment globals
|
|
|
|
} break;
|
|
|
|
case StageTemplate::Chunk::TYPE_COMPUTE_GLOBALS: {
|
|
|
|
builder.append(p_version->compute_globals.get_data()); // compute globals
|
|
|
|
} break;
|
|
|
|
case StageTemplate::Chunk::TYPE_CODE: {
|
|
|
|
if (p_version->code_sections.has(chunk.code)) {
|
|
|
|
builder.append(p_version->code_sections[chunk.code].get_data());
|
|
|
|
}
|
|
|
|
} break;
|
|
|
|
case StageTemplate::Chunk::TYPE_TEXT: {
|
|
|
|
builder.append(chunk.text.get_data());
|
|
|
|
} break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
void ShaderRD::_compile_variant(uint32_t p_variant, const CompileData *p_data) {
|
|
|
|
uint32_t variant = group_to_variant_map[p_data->group][p_variant];
|
|
|
|
|
|
|
|
if (!variants_enabled[variant]) {
|
|
|
|
return; // Variant is disabled, return.
|
2020-12-07 22:27:38 +01:00
|
|
|
}
|
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
Vector<RD::ShaderStageSPIRVData> stages;
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
String error;
|
|
|
|
String current_source;
|
|
|
|
RD::ShaderStage current_stage = RD::SHADER_STAGE_VERTEX;
|
|
|
|
bool build_ok = true;
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-09-25 21:44:44 +02:00
|
|
|
if (!is_compute) {
|
2019-07-29 17:59:18 +02:00
|
|
|
//vertex stage
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
StringBuilder builder;
|
2023-07-18 11:21:27 +02:00
|
|
|
_build_variant_code(builder, variant, p_data->version, stage_templates[STAGE_TYPE_VERTEX]);
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
current_source = builder.as_string();
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
RD::ShaderStageSPIRVData stage;
|
2023-12-19 12:48:02 +01:00
|
|
|
stage.spirv = RD::get_singleton()->shader_compile_spirv_from_source(RD::SHADER_STAGE_VERTEX, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
|
|
|
if (stage.spirv.size() == 0) {
|
2019-07-29 17:59:18 +02:00
|
|
|
build_ok = false;
|
|
|
|
} else {
|
|
|
|
stage.shader_stage = RD::SHADER_STAGE_VERTEX;
|
|
|
|
stages.push_back(stage);
|
|
|
|
}
|
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-09-25 21:44:44 +02:00
|
|
|
if (!is_compute && build_ok) {
|
2019-07-29 17:59:18 +02:00
|
|
|
//fragment stage
|
|
|
|
current_stage = RD::SHADER_STAGE_FRAGMENT;
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
StringBuilder builder;
|
2023-07-18 11:21:27 +02:00
|
|
|
_build_variant_code(builder, variant, p_data->version, stage_templates[STAGE_TYPE_FRAGMENT]);
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
current_source = builder.as_string();
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
RD::ShaderStageSPIRVData stage;
|
2023-12-19 12:48:02 +01:00
|
|
|
stage.spirv = RD::get_singleton()->shader_compile_spirv_from_source(RD::SHADER_STAGE_FRAGMENT, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
|
|
|
if (stage.spirv.size() == 0) {
|
2019-07-29 17:59:18 +02:00
|
|
|
build_ok = false;
|
|
|
|
} else {
|
|
|
|
stage.shader_stage = RD::SHADER_STAGE_FRAGMENT;
|
|
|
|
stages.push_back(stage);
|
|
|
|
}
|
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-09-25 21:44:44 +02:00
|
|
|
if (is_compute) {
|
|
|
|
//compute stage
|
|
|
|
current_stage = RD::SHADER_STAGE_COMPUTE;
|
|
|
|
|
|
|
|
StringBuilder builder;
|
2023-07-18 11:21:27 +02:00
|
|
|
_build_variant_code(builder, variant, p_data->version, stage_templates[STAGE_TYPE_COMPUTE]);
|
2019-09-25 21:44:44 +02:00
|
|
|
|
|
|
|
current_source = builder.as_string();
|
2021-04-17 17:21:03 +02:00
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
RD::ShaderStageSPIRVData stage;
|
2023-12-19 12:48:02 +01:00
|
|
|
stage.spirv = RD::get_singleton()->shader_compile_spirv_from_source(RD::SHADER_STAGE_COMPUTE, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
|
|
|
if (stage.spirv.size() == 0) {
|
2019-09-25 21:44:44 +02:00
|
|
|
build_ok = false;
|
|
|
|
} else {
|
|
|
|
stage.shader_stage = RD::SHADER_STAGE_COMPUTE;
|
|
|
|
stages.push_back(stage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
if (!build_ok) {
|
2020-02-26 11:28:13 +01:00
|
|
|
MutexLock lock(variant_set_mutex); //properly print the errors
|
2023-07-18 11:21:27 +02:00
|
|
|
ERR_PRINT("Error compiling " + String(current_stage == RD::SHADER_STAGE_COMPUTE ? "Compute " : (current_stage == RD::SHADER_STAGE_VERTEX ? "Vertex" : "Fragment")) + " shader, variant #" + itos(variant) + " (" + variant_defines[variant].text.get_data() + ").");
|
2019-07-29 17:59:18 +02:00
|
|
|
ERR_PRINT(error);
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
#ifdef DEBUG_ENABLED
|
|
|
|
ERR_PRINT("code:\n" + current_source.get_with_code_lines());
|
|
|
|
#endif
|
|
|
|
return;
|
|
|
|
}
|
2019-07-29 00:58:32 +02:00
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
Vector<uint8_t> shader_data = RD::get_singleton()->shader_compile_binary_from_spirv(stages, name + ":" + itos(variant));
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
|
2024-01-19 13:21:39 +01:00
|
|
|
ERR_FAIL_COND(shader_data.is_empty());
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
|
2020-02-26 11:28:13 +01:00
|
|
|
{
|
|
|
|
MutexLock lock(variant_set_mutex);
|
2023-07-18 11:21:27 +02:00
|
|
|
|
|
|
|
p_data->version->variants[variant] = RD::get_singleton()->shader_create_from_bytecode(shader_data, p_data->version->variants[variant]);
|
|
|
|
p_data->version->variant_data[variant] = shader_data;
|
2020-02-26 11:28:13 +01:00
|
|
|
}
|
2019-07-29 17:59:18 +02:00
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2021-01-06 00:01:50 +01:00
|
|
|
RS::ShaderNativeSourceCode ShaderRD::version_get_native_source_code(RID p_version) {
|
2021-09-29 19:08:41 +02:00
|
|
|
Version *version = version_owner.get_or_null(p_version);
|
2021-01-06 00:01:50 +01:00
|
|
|
RS::ShaderNativeSourceCode source_code;
|
2023-09-09 17:04:18 +02:00
|
|
|
ERR_FAIL_NULL_V(version, source_code);
|
2021-01-06 00:01:50 +01:00
|
|
|
|
|
|
|
source_code.versions.resize(variant_defines.size());
|
|
|
|
|
|
|
|
for (int i = 0; i < source_code.versions.size(); i++) {
|
|
|
|
if (!is_compute) {
|
|
|
|
//vertex stage
|
|
|
|
|
|
|
|
StringBuilder builder;
|
2021-04-13 22:01:43 +02:00
|
|
|
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_VERTEX]);
|
2021-01-06 00:01:50 +01:00
|
|
|
|
|
|
|
RS::ShaderNativeSourceCode::Version::Stage stage;
|
|
|
|
stage.name = "vertex";
|
|
|
|
stage.code = builder.as_string();
|
|
|
|
|
|
|
|
source_code.versions.write[i].stages.push_back(stage);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!is_compute) {
|
|
|
|
//fragment stage
|
|
|
|
|
|
|
|
StringBuilder builder;
|
2021-04-13 22:01:43 +02:00
|
|
|
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_FRAGMENT]);
|
2021-01-06 00:01:50 +01:00
|
|
|
|
|
|
|
RS::ShaderNativeSourceCode::Version::Stage stage;
|
|
|
|
stage.name = "fragment";
|
|
|
|
stage.code = builder.as_string();
|
|
|
|
|
|
|
|
source_code.versions.write[i].stages.push_back(stage);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_compute) {
|
|
|
|
//compute stage
|
|
|
|
|
|
|
|
StringBuilder builder;
|
2021-04-13 22:01:43 +02:00
|
|
|
_build_variant_code(builder, i, version, stage_templates[STAGE_TYPE_COMPUTE]);
|
2021-01-06 00:01:50 +01:00
|
|
|
|
|
|
|
RS::ShaderNativeSourceCode::Version::Stage stage;
|
|
|
|
stage.name = "compute";
|
|
|
|
stage.code = builder.as_string();
|
|
|
|
|
|
|
|
source_code.versions.write[i].stages.push_back(stage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return source_code;
|
|
|
|
}
|
|
|
|
|
2021-05-25 02:25:11 +02:00
|
|
|
String ShaderRD::_version_get_sha1(Version *p_version) const {
|
|
|
|
StringBuilder hash_build;
|
|
|
|
|
|
|
|
hash_build.append("[uniforms]");
|
|
|
|
hash_build.append(p_version->uniforms.get_data());
|
|
|
|
hash_build.append("[vertex_globals]");
|
|
|
|
hash_build.append(p_version->vertex_globals.get_data());
|
|
|
|
hash_build.append("[fragment_globals]");
|
|
|
|
hash_build.append(p_version->fragment_globals.get_data());
|
|
|
|
hash_build.append("[compute_globals]");
|
|
|
|
hash_build.append(p_version->compute_globals.get_data());
|
|
|
|
|
|
|
|
Vector<StringName> code_sections;
|
2021-08-09 22:13:42 +02:00
|
|
|
for (const KeyValue<StringName, CharString> &E : p_version->code_sections) {
|
|
|
|
code_sections.push_back(E.key);
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
|
|
|
code_sections.sort_custom<StringName::AlphCompare>();
|
|
|
|
|
|
|
|
for (int i = 0; i < code_sections.size(); i++) {
|
|
|
|
hash_build.append(String("[code:") + String(code_sections[i]) + "]");
|
|
|
|
hash_build.append(p_version->code_sections[code_sections[i]].get_data());
|
|
|
|
}
|
|
|
|
for (int i = 0; i < p_version->custom_defines.size(); i++) {
|
|
|
|
hash_build.append("[custom_defines:" + itos(i) + "]");
|
|
|
|
hash_build.append(p_version->custom_defines[i].get_data());
|
|
|
|
}
|
|
|
|
|
|
|
|
return hash_build.as_string().sha1_text();
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *shader_file_header = "GDSC";
|
2023-07-29 23:28:33 +02:00
|
|
|
static const uint32_t cache_file_version = 3;
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2024-01-31 20:12:48 +01:00
|
|
|
String ShaderRD::_get_cache_file_path(Version *p_version, int p_group) {
|
|
|
|
const String &sha1 = _version_get_sha1(p_version);
|
2023-12-19 18:57:56 +01:00
|
|
|
const String &api_safe_name = String(RD::get_singleton()->get_device_api_name()).validate_filename().to_lower();
|
2024-01-31 20:12:48 +01:00
|
|
|
const String &path = shader_cache_dir.path_join(name).path_join(group_sha256[p_group]).path_join(sha1) + "." + api_safe_name + ".cache";
|
|
|
|
return path;
|
|
|
|
}
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2024-01-31 20:12:48 +01:00
|
|
|
bool ShaderRD::_load_from_cache(Version *p_version, int p_group) {
|
|
|
|
const String &path = _get_cache_file_path(p_version, p_group);
|
2022-03-23 10:08:58 +01:00
|
|
|
Ref<FileAccess> f = FileAccess::open(path, FileAccess::READ);
|
|
|
|
if (f.is_null()) {
|
2021-05-25 02:25:11 +02:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
char header[5] = { 0, 0, 0, 0, 0 };
|
|
|
|
f->get_buffer((uint8_t *)header, 4);
|
|
|
|
ERR_FAIL_COND_V(header != String(shader_file_header), false);
|
|
|
|
|
|
|
|
uint32_t file_version = f->get_32();
|
|
|
|
if (file_version != cache_file_version) {
|
|
|
|
return false; // wrong version
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t variant_count = f->get_32();
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
ERR_FAIL_COND_V(variant_count != (uint32_t)group_to_variant_map[p_group].size(), false); //should not happen but check
|
2021-05-25 02:25:11 +02:00
|
|
|
|
|
|
|
for (uint32_t i = 0; i < variant_count; i++) {
|
2023-07-18 11:21:27 +02:00
|
|
|
int variant_id = group_to_variant_map[p_group][i];
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
uint32_t variant_size = f->get_32();
|
2023-07-18 11:21:27 +02:00
|
|
|
ERR_FAIL_COND_V(variant_size == 0 && variants_enabled[variant_id], false);
|
|
|
|
if (!variants_enabled[variant_id]) {
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
continue;
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
Vector<uint8_t> variant_bytes;
|
|
|
|
variant_bytes.resize(variant_size);
|
2021-05-25 02:25:11 +02:00
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
uint32_t br = f->get_buffer(variant_bytes.ptrw(), variant_size);
|
2021-05-25 02:25:11 +02:00
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
ERR_FAIL_COND_V(br != variant_size, false);
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
p_version->variant_data[variant_id] = variant_bytes;
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
}
|
2021-05-25 02:25:11 +02:00
|
|
|
|
|
|
|
for (uint32_t i = 0; i < variant_count; i++) {
|
2023-07-18 11:21:27 +02:00
|
|
|
int variant_id = group_to_variant_map[p_group][i];
|
|
|
|
if (!variants_enabled[variant_id]) {
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
MutexLock lock(variant_set_mutex);
|
2023-07-18 11:21:27 +02:00
|
|
|
p_version->variants[variant_id] = RID();
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
continue;
|
|
|
|
}
|
2021-05-25 02:25:11 +02:00
|
|
|
{
|
|
|
|
MutexLock lock(variant_set_mutex);
|
2023-07-18 11:21:27 +02:00
|
|
|
RID shader = RD::get_singleton()->shader_create_from_bytecode(p_version->variant_data[variant_id], p_version->variants[variant_id]);
|
|
|
|
if (shader.is_null()) {
|
|
|
|
for (uint32_t j = 0; j < i; j++) {
|
|
|
|
int variant_free_id = group_to_variant_map[p_group][j];
|
|
|
|
RD::get_singleton()->free(p_version->variants[variant_free_id]);
|
|
|
|
}
|
|
|
|
ERR_FAIL_COND_V(shader.is_null(), false);
|
|
|
|
}
|
|
|
|
|
|
|
|
p_version->variants[variant_id] = shader;
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
memdelete_arr(p_version->variant_data); //clear stages
|
|
|
|
p_version->variant_data = nullptr;
|
2021-05-25 02:25:11 +02:00
|
|
|
p_version->valid = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
void ShaderRD::_save_to_cache(Version *p_version, int p_group) {
|
2024-01-12 04:17:57 +01:00
|
|
|
ERR_FAIL_COND(!shader_cache_dir_valid);
|
2024-01-31 20:12:48 +01:00
|
|
|
const String &path = _get_cache_file_path(p_version, p_group);
|
2022-03-23 10:08:58 +01:00
|
|
|
Ref<FileAccess> f = FileAccess::open(path, FileAccess::WRITE);
|
|
|
|
ERR_FAIL_COND(f.is_null());
|
2021-05-25 02:25:11 +02:00
|
|
|
f->store_buffer((const uint8_t *)shader_file_header, 4);
|
2023-07-18 11:21:27 +02:00
|
|
|
f->store_32(cache_file_version); // File version.
|
|
|
|
uint32_t variant_count = group_to_variant_map[p_group].size();
|
|
|
|
f->store_32(variant_count); // Variant count.
|
2021-05-25 02:25:11 +02:00
|
|
|
for (uint32_t i = 0; i < variant_count; i++) {
|
2023-07-18 11:21:27 +02:00
|
|
|
int variant_id = group_to_variant_map[p_group][i];
|
|
|
|
f->store_32(p_version->variant_data[variant_id].size()); // Stage count.
|
|
|
|
f->store_buffer(p_version->variant_data[variant_id].ptr(), p_version->variant_data[variant_id].size());
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
void ShaderRD::_allocate_placeholders(Version *p_version, int p_group) {
|
2023-10-22 16:46:00 +02:00
|
|
|
ERR_FAIL_NULL(p_version->variants);
|
2023-07-18 11:21:27 +02:00
|
|
|
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
|
|
|
|
int variant_id = group_to_variant_map[p_group][i];
|
|
|
|
RID shader = RD::get_singleton()->shader_create_placeholder();
|
|
|
|
{
|
|
|
|
MutexLock lock(variant_set_mutex);
|
|
|
|
p_version->variants[variant_id] = shader;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
// Try to compile all variants for a given group.
|
|
|
|
// Will skip variants that are disabled.
|
|
|
|
void ShaderRD::_compile_version(Version *p_version, int p_group) {
|
|
|
|
if (!group_enabled[p_group]) {
|
|
|
|
return;
|
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
typedef Vector<uint8_t> ShaderStageData;
|
|
|
|
p_version->variant_data = memnew_arr(ShaderStageData, variant_defines.size());
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
p_version->dirty = false;
|
|
|
|
|
2021-05-25 02:25:11 +02:00
|
|
|
if (shader_cache_dir_valid) {
|
2023-07-18 11:21:27 +02:00
|
|
|
if (_load_from_cache(p_version, p_group)) {
|
2021-05-25 02:25:11 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
CompileData compile_data;
|
|
|
|
compile_data.version = p_version;
|
|
|
|
compile_data.group = p_group;
|
2019-06-16 04:45:24 +02:00
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
#if 1
|
|
|
|
WorkerThreadPool::GroupID group_task = WorkerThreadPool::get_singleton()->add_template_group_task(this, &ShaderRD::_compile_variant, &compile_data, group_to_variant_map[p_group].size(), -1, true, SNAME("ShaderCompilation"));
|
2022-07-23 19:12:41 +02:00
|
|
|
WorkerThreadPool::get_singleton()->wait_for_group_task_completion(group_task);
|
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
#else
|
2023-07-18 11:21:27 +02:00
|
|
|
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
|
|
|
|
_compile_variant(i, &compile_data);
|
2019-07-29 17:59:18 +02:00
|
|
|
}
|
|
|
|
#endif
|
2019-07-29 00:58:32 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
bool all_valid = true;
|
2023-07-18 11:21:27 +02:00
|
|
|
|
|
|
|
for (uint32_t i = 0; i < group_to_variant_map[p_group].size(); i++) {
|
|
|
|
int variant_id = group_to_variant_map[p_group][i];
|
|
|
|
if (!variants_enabled[variant_id]) {
|
|
|
|
continue; // Disabled.
|
2020-12-07 22:27:38 +01:00
|
|
|
}
|
2023-07-18 11:21:27 +02:00
|
|
|
if (p_version->variants[variant_id].is_null()) {
|
2019-07-29 17:59:18 +02:00
|
|
|
all_valid = false;
|
|
|
|
break;
|
2019-07-29 00:58:32 +02:00
|
|
|
}
|
2019-07-29 17:59:18 +02:00
|
|
|
}
|
2019-07-29 00:58:32 +02:00
|
|
|
|
2019-07-29 17:59:18 +02:00
|
|
|
if (!all_valid) {
|
2023-07-18 11:21:27 +02:00
|
|
|
// Clear versions if they exist.
|
2019-07-29 17:59:18 +02:00
|
|
|
for (int i = 0; i < variant_defines.size(); i++) {
|
2023-07-18 11:21:27 +02:00
|
|
|
if (!variants_enabled[i] || !group_enabled[variant_defines[i].group]) {
|
|
|
|
continue; // Disabled.
|
2020-12-07 22:27:38 +01:00
|
|
|
}
|
2019-07-29 17:59:18 +02:00
|
|
|
if (!p_version->variants[i].is_null()) {
|
|
|
|
RD::get_singleton()->free(p_version->variants[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
memdelete_arr(p_version->variants);
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
if (p_version->variant_data) {
|
|
|
|
memdelete_arr(p_version->variant_data);
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
2020-04-02 01:20:12 +02:00
|
|
|
p_version->variants = nullptr;
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
p_version->variant_data = nullptr;
|
2019-07-29 17:59:18 +02:00
|
|
|
return;
|
2021-05-25 02:25:11 +02:00
|
|
|
} else if (shader_cache_dir_valid) {
|
2023-07-18 11:21:27 +02:00
|
|
|
// Save shader cache.
|
|
|
|
_save_to_cache(p_version, p_group);
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
|
Implement Binary Shader Compilation
* Added an extra stage before compiling shader, which is generating a binary blob.
* On Vulkan, this allows caching the SPIRV reflection information, which is expensive to parse.
* On other (future) RenderingDevices, it allows caching converted binary data, such as DXIL or MSL.
This PR makes the shader cache include the reflection information, hence editor startup times are significantly improved.
I tested this well and it appears to work, and I added a lot of consistency checks, but because it includes writing and reading binary information, rare bugs may pop up, so be aware.
There was not much of a choice for storing the reflection information, given shaders can be a lot, take a lot of space and take time to parse.
2021-07-25 16:22:55 +02:00
|
|
|
memdelete_arr(p_version->variant_data); //clear stages
|
|
|
|
p_version->variant_data = nullptr;
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2019-06-16 04:45:24 +02:00
|
|
|
p_version->valid = true;
|
|
|
|
}
|
|
|
|
|
2022-05-13 15:04:37 +02:00
|
|
|
void ShaderRD::version_set_code(RID p_version, const HashMap<String, String> &p_code, const String &p_uniforms, const String &p_vertex_globals, const String &p_fragment_globals, const Vector<String> &p_custom_defines) {
|
2019-09-25 21:44:44 +02:00
|
|
|
ERR_FAIL_COND(is_compute);
|
|
|
|
|
2021-09-29 19:08:41 +02:00
|
|
|
Version *version = version_owner.get_or_null(p_version);
|
2023-09-09 17:04:18 +02:00
|
|
|
ERR_FAIL_NULL(version);
|
2019-06-16 04:45:24 +02:00
|
|
|
version->vertex_globals = p_vertex_globals.utf8();
|
|
|
|
version->fragment_globals = p_fragment_globals.utf8();
|
2019-07-21 16:31:30 +02:00
|
|
|
version->uniforms = p_uniforms.utf8();
|
2021-04-13 22:01:43 +02:00
|
|
|
version->code_sections.clear();
|
2021-08-09 22:13:42 +02:00
|
|
|
for (const KeyValue<String, String> &E : p_code) {
|
|
|
|
version->code_sections[StringName(E.key.to_upper())] = E.value.utf8();
|
2021-04-13 22:01:43 +02:00
|
|
|
}
|
2019-07-21 16:31:30 +02:00
|
|
|
|
2019-06-16 04:45:24 +02:00
|
|
|
version->custom_defines.clear();
|
|
|
|
for (int i = 0; i < p_custom_defines.size(); i++) {
|
|
|
|
version->custom_defines.push_back(p_custom_defines[i].utf8());
|
|
|
|
}
|
|
|
|
|
|
|
|
version->dirty = true;
|
|
|
|
if (version->initialize_needed) {
|
2023-07-18 11:21:27 +02:00
|
|
|
_initialize_version(version);
|
|
|
|
for (int i = 0; i < group_enabled.size(); i++) {
|
|
|
|
if (!group_enabled[i]) {
|
|
|
|
_allocate_placeholders(version, i);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
_compile_version(version, i);
|
|
|
|
}
|
2019-06-16 04:45:24 +02:00
|
|
|
version->initialize_needed = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-13 15:04:37 +02:00
|
|
|
void ShaderRD::version_set_compute_code(RID p_version, const HashMap<String, String> &p_code, const String &p_uniforms, const String &p_compute_globals, const Vector<String> &p_custom_defines) {
|
2019-09-25 21:44:44 +02:00
|
|
|
ERR_FAIL_COND(!is_compute);
|
|
|
|
|
2021-09-29 19:08:41 +02:00
|
|
|
Version *version = version_owner.get_or_null(p_version);
|
2023-09-09 17:04:18 +02:00
|
|
|
ERR_FAIL_NULL(version);
|
2021-04-13 22:01:43 +02:00
|
|
|
|
2019-09-25 21:44:44 +02:00
|
|
|
version->compute_globals = p_compute_globals.utf8();
|
|
|
|
version->uniforms = p_uniforms.utf8();
|
|
|
|
|
2021-04-13 22:01:43 +02:00
|
|
|
version->code_sections.clear();
|
2021-08-09 22:13:42 +02:00
|
|
|
for (const KeyValue<String, String> &E : p_code) {
|
|
|
|
version->code_sections[StringName(E.key.to_upper())] = E.value.utf8();
|
2021-04-13 22:01:43 +02:00
|
|
|
}
|
|
|
|
|
2019-09-25 21:44:44 +02:00
|
|
|
version->custom_defines.clear();
|
|
|
|
for (int i = 0; i < p_custom_defines.size(); i++) {
|
|
|
|
version->custom_defines.push_back(p_custom_defines[i].utf8());
|
|
|
|
}
|
|
|
|
|
|
|
|
version->dirty = true;
|
|
|
|
if (version->initialize_needed) {
|
2023-07-18 11:21:27 +02:00
|
|
|
_initialize_version(version);
|
|
|
|
for (int i = 0; i < group_enabled.size(); i++) {
|
|
|
|
if (!group_enabled[i]) {
|
|
|
|
_allocate_placeholders(version, i);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
_compile_version(version, i);
|
|
|
|
}
|
2019-09-25 21:44:44 +02:00
|
|
|
version->initialize_needed = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-27 15:23:24 +02:00
|
|
|
bool ShaderRD::version_is_valid(RID p_version) {
|
2021-09-29 19:08:41 +02:00
|
|
|
Version *version = version_owner.get_or_null(p_version);
|
2023-09-09 17:04:18 +02:00
|
|
|
ERR_FAIL_NULL_V(version, false);
|
2019-07-27 15:23:24 +02:00
|
|
|
|
|
|
|
if (version->dirty) {
|
2023-07-18 11:21:27 +02:00
|
|
|
_initialize_version(version);
|
|
|
|
for (int i = 0; i < group_enabled.size(); i++) {
|
|
|
|
if (!group_enabled[i]) {
|
|
|
|
_allocate_placeholders(version, i);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
_compile_version(version, i);
|
|
|
|
}
|
2019-07-27 15:23:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return version->valid;
|
|
|
|
}
|
|
|
|
|
2019-06-16 04:45:24 +02:00
|
|
|
bool ShaderRD::version_free(RID p_version) {
|
|
|
|
if (version_owner.owns(p_version)) {
|
2021-09-29 19:08:41 +02:00
|
|
|
Version *version = version_owner.get_or_null(p_version);
|
2019-06-16 04:45:24 +02:00
|
|
|
_clear_version(version);
|
|
|
|
version_owner.free(p_version);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-12-07 22:27:38 +01:00
|
|
|
void ShaderRD::set_variant_enabled(int p_variant, bool p_enabled) {
|
|
|
|
ERR_FAIL_COND(version_owner.get_rid_count() > 0); //versions exist
|
|
|
|
ERR_FAIL_INDEX(p_variant, variants_enabled.size());
|
|
|
|
variants_enabled.write[p_variant] = p_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShaderRD::is_variant_enabled(int p_variant) const {
|
|
|
|
ERR_FAIL_INDEX_V(p_variant, variants_enabled.size(), false);
|
|
|
|
return variants_enabled[p_variant];
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
void ShaderRD::enable_group(int p_group) {
|
|
|
|
ERR_FAIL_INDEX(p_group, group_enabled.size());
|
|
|
|
|
|
|
|
if (group_enabled[p_group]) {
|
|
|
|
// Group already enabled, do nothing.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
group_enabled.write[p_group] = true;
|
|
|
|
|
|
|
|
// Compile all versions again to include the new group.
|
|
|
|
List<RID> all_versions;
|
|
|
|
version_owner.get_owned_list(&all_versions);
|
2024-04-15 15:18:34 +02:00
|
|
|
for (const RID &E : all_versions) {
|
|
|
|
Version *version = version_owner.get_or_null(E);
|
2023-07-18 11:21:27 +02:00
|
|
|
_compile_version(version, p_group);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ShaderRD::is_group_enabled(int p_group) const {
|
|
|
|
return group_enabled[p_group];
|
|
|
|
}
|
|
|
|
|
2021-05-25 02:25:11 +02:00
|
|
|
bool ShaderRD::shader_cache_cleanup_on_start = false;
|
|
|
|
|
2021-02-02 20:51:36 +01:00
|
|
|
ShaderRD::ShaderRD() {
|
|
|
|
// Do not feel forced to use this, in most cases it makes little to no difference.
|
|
|
|
bool use_32_threads = false;
|
|
|
|
if (RD::get_singleton()->get_device_vendor_name() == "NVIDIA") {
|
|
|
|
use_32_threads = true;
|
|
|
|
}
|
|
|
|
String base_compute_define_text;
|
|
|
|
if (use_32_threads) {
|
|
|
|
base_compute_define_text = "\n#define NATIVE_LOCAL_GROUP_SIZE 32\n#define NATIVE_LOCAL_SIZE_2D_X 8\n#define NATIVE_LOCAL_SIZE_2D_Y 4\n";
|
|
|
|
} else {
|
|
|
|
base_compute_define_text = "\n#define NATIVE_LOCAL_GROUP_SIZE 64\n#define NATIVE_LOCAL_SIZE_2D_X 8\n#define NATIVE_LOCAL_SIZE_2D_Y 8\n";
|
|
|
|
}
|
|
|
|
|
|
|
|
base_compute_defines = base_compute_define_text.ascii();
|
|
|
|
}
|
|
|
|
|
2019-07-10 22:44:55 +02:00
|
|
|
void ShaderRD::initialize(const Vector<String> &p_variant_defines, const String &p_general_defines) {
|
2019-06-16 04:45:24 +02:00
|
|
|
ERR_FAIL_COND(variant_defines.size());
|
2024-01-19 13:21:39 +01:00
|
|
|
ERR_FAIL_COND(p_variant_defines.is_empty());
|
2020-12-07 22:27:38 +01:00
|
|
|
|
2019-07-10 22:44:55 +02:00
|
|
|
general_defines = p_general_defines.utf8();
|
2020-12-07 22:27:38 +01:00
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
// When initialized this way, there is just one group and its always enabled.
|
|
|
|
group_to_variant_map.insert(0, LocalVector<int>{});
|
|
|
|
group_enabled.push_back(true);
|
|
|
|
|
2019-06-16 04:45:24 +02:00
|
|
|
for (int i = 0; i < p_variant_defines.size(); i++) {
|
2023-07-18 11:21:27 +02:00
|
|
|
variant_defines.push_back(VariantDefine(0, p_variant_defines[i], true));
|
2020-12-07 22:27:38 +01:00
|
|
|
variants_enabled.push_back(true);
|
2023-07-18 11:21:27 +02:00
|
|
|
group_to_variant_map[0].push_back(i);
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2021-12-09 10:42:46 +01:00
|
|
|
if (!shader_cache_dir.is_empty()) {
|
2023-07-18 11:21:27 +02:00
|
|
|
group_sha256.resize(1);
|
|
|
|
_initialize_cache();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShaderRD::_initialize_cache() {
|
|
|
|
for (const KeyValue<int, LocalVector<int>> &E : group_to_variant_map) {
|
2021-05-25 02:25:11 +02:00
|
|
|
StringBuilder hash_build;
|
|
|
|
|
|
|
|
hash_build.append("[base_hash]");
|
|
|
|
hash_build.append(base_sha256);
|
|
|
|
hash_build.append("[general_defines]");
|
|
|
|
hash_build.append(general_defines.get_data());
|
2023-07-18 11:21:27 +02:00
|
|
|
hash_build.append("[group_id]");
|
|
|
|
hash_build.append(itos(E.key));
|
|
|
|
for (uint32_t i = 0; i < E.value.size(); i++) {
|
|
|
|
hash_build.append("[variant_defines:" + itos(E.value[i]) + "]");
|
|
|
|
hash_build.append(variant_defines[E.value[i]].text.get_data());
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
group_sha256[E.key] = hash_build.as_string().sha256_text();
|
2021-05-25 02:25:11 +02:00
|
|
|
|
2022-03-23 10:08:58 +01:00
|
|
|
Ref<DirAccess> d = DirAccess::open(shader_cache_dir);
|
|
|
|
ERR_FAIL_COND(d.is_null());
|
2021-05-25 02:25:11 +02:00
|
|
|
if (d->change_dir(name) != OK) {
|
|
|
|
Error err = d->make_dir(name);
|
|
|
|
ERR_FAIL_COND(err != OK);
|
|
|
|
d->change_dir(name);
|
|
|
|
}
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
// Erase other versions?
|
2021-05-25 02:25:11 +02:00
|
|
|
if (shader_cache_cleanup_on_start) {
|
|
|
|
}
|
|
|
|
//
|
2023-07-18 11:21:27 +02:00
|
|
|
if (d->change_dir(group_sha256[E.key]) != OK) {
|
|
|
|
Error err = d->make_dir(group_sha256[E.key]);
|
2021-05-25 02:25:11 +02:00
|
|
|
ERR_FAIL_COND(err != OK);
|
|
|
|
}
|
|
|
|
shader_cache_dir_valid = true;
|
|
|
|
|
2023-07-18 11:21:27 +02:00
|
|
|
print_verbose("Shader '" + name + "' (group " + itos(E.key) + ") SHA256: " + group_sha256[E.key]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Same as above, but allows specifying shader compilation groups.
|
|
|
|
void ShaderRD::initialize(const Vector<VariantDefine> &p_variant_defines, const String &p_general_defines) {
|
|
|
|
ERR_FAIL_COND(variant_defines.size());
|
2024-01-19 13:21:39 +01:00
|
|
|
ERR_FAIL_COND(p_variant_defines.is_empty());
|
2023-07-18 11:21:27 +02:00
|
|
|
|
|
|
|
general_defines = p_general_defines.utf8();
|
|
|
|
|
|
|
|
int max_group_id = 0;
|
|
|
|
|
|
|
|
for (int i = 0; i < p_variant_defines.size(); i++) {
|
|
|
|
// Fill variant array.
|
|
|
|
variant_defines.push_back(p_variant_defines[i]);
|
|
|
|
variants_enabled.push_back(true);
|
|
|
|
|
|
|
|
// Map variant array index to group id, so we can iterate over groups later.
|
|
|
|
if (!group_to_variant_map.has(p_variant_defines[i].group)) {
|
|
|
|
group_to_variant_map.insert(p_variant_defines[i].group, LocalVector<int>{});
|
|
|
|
}
|
|
|
|
group_to_variant_map[p_variant_defines[i].group].push_back(i);
|
|
|
|
|
|
|
|
// Track max size.
|
|
|
|
if (p_variant_defines[i].group > max_group_id) {
|
|
|
|
max_group_id = p_variant_defines[i].group;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set all to groups to false, then enable those that should be default.
|
|
|
|
group_enabled.resize_zeroed(max_group_id + 1);
|
|
|
|
bool *enabled_ptr = group_enabled.ptrw();
|
|
|
|
for (int i = 0; i < p_variant_defines.size(); i++) {
|
|
|
|
if (p_variant_defines[i].default_enabled) {
|
|
|
|
enabled_ptr[p_variant_defines[i].group] = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!shader_cache_dir.is_empty()) {
|
|
|
|
group_sha256.resize(max_group_id + 1);
|
|
|
|
_initialize_cache();
|
2021-05-25 02:25:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShaderRD::set_shader_cache_dir(const String &p_dir) {
|
|
|
|
shader_cache_dir = p_dir;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShaderRD::set_shader_cache_save_compressed(bool p_enable) {
|
|
|
|
shader_cache_save_compressed = p_enable;
|
2019-06-16 04:45:24 +02:00
|
|
|
}
|
|
|
|
|
2021-05-25 02:25:11 +02:00
|
|
|
void ShaderRD::set_shader_cache_save_compressed_zstd(bool p_enable) {
|
|
|
|
shader_cache_save_compressed_zstd = p_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ShaderRD::set_shader_cache_save_debug(bool p_enable) {
|
|
|
|
shader_cache_save_debug = p_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
String ShaderRD::shader_cache_dir;
|
|
|
|
bool ShaderRD::shader_cache_save_compressed = true;
|
|
|
|
bool ShaderRD::shader_cache_save_compressed_zstd = true;
|
|
|
|
bool ShaderRD::shader_cache_save_debug = true;
|
|
|
|
|
2019-06-16 04:45:24 +02:00
|
|
|
ShaderRD::~ShaderRD() {
|
|
|
|
List<RID> remaining;
|
|
|
|
version_owner.get_owned_list(&remaining);
|
|
|
|
if (remaining.size()) {
|
|
|
|
ERR_PRINT(itos(remaining.size()) + " shaders of type " + name + " were never freed");
|
|
|
|
while (remaining.size()) {
|
|
|
|
version_free(remaining.front()->get());
|
|
|
|
remaining.pop_front();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|