Merge pull request #50847 from reduz/implement-binary-shader-compilation
Implement Binary Shader Compilation
This commit is contained in:
commit
8f6c16e4a4
15 changed files with 609 additions and 312 deletions
|
@ -7,8 +7,8 @@
|
|||
<tutorials>
|
||||
</tutorials>
|
||||
<methods>
|
||||
<method name="get_bytecode" qualifiers="const">
|
||||
<return type="RDShaderBytecode">
|
||||
<method name="get_spirv" qualifiers="const">
|
||||
<return type="RDShaderSPIRV">
|
||||
</return>
|
||||
<argument index="0" name="version" type="StringName" default="&""">
|
||||
</argument>
|
||||
|
@ -24,7 +24,7 @@
|
|||
<method name="set_bytecode">
|
||||
<return type="void">
|
||||
</return>
|
||||
<argument index="0" name="bytecode" type="RDShaderBytecode">
|
||||
<argument index="0" name="bytecode" type="RDShaderSPIRV">
|
||||
</argument>
|
||||
<argument index="1" name="version" type="StringName" default="&""">
|
||||
</argument>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<class name="RDShaderBytecode" inherits="Resource" version="4.0">
|
||||
<class name="RDShaderSPIRV" inherits="Resource" version="4.0">
|
||||
<brief_description>
|
||||
</brief_description>
|
||||
<description>
|
|
@ -635,8 +635,16 @@
|
|||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="shader_compile_from_source">
|
||||
<return type="RDShaderBytecode">
|
||||
<method name="shader_compile_binary_from_spirv">
|
||||
<return type="PackedByteArray">
|
||||
</return>
|
||||
<argument index="0" name="spirv_data" type="RDShaderSPIRV">
|
||||
</argument>
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="shader_compile_spirv_from_source">
|
||||
<return type="RDShaderSPIRV">
|
||||
</return>
|
||||
<argument index="0" name="shader_source" type="RDShaderSource">
|
||||
</argument>
|
||||
|
@ -645,10 +653,18 @@
|
|||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="shader_create">
|
||||
<method name="shader_create_from_bytecode">
|
||||
<return type="RID">
|
||||
</return>
|
||||
<argument index="0" name="shader_data" type="RDShaderBytecode">
|
||||
<argument index="0" name="binary_data" type="PackedByteArray">
|
||||
</argument>
|
||||
<description>
|
||||
</description>
|
||||
</method>
|
||||
<method name="shader_create_from_spirv">
|
||||
<return type="PackedByteArray">
|
||||
</return>
|
||||
<argument index="0" name="spirv_data" type="RDShaderSPIRV">
|
||||
</argument>
|
||||
<description>
|
||||
</description>
|
||||
|
|
|
@ -31,11 +31,14 @@
|
|||
#include "rendering_device_vulkan.h"
|
||||
|
||||
#include "core/config/project_settings.h"
|
||||
#include "core/io/compression.h"
|
||||
#include "core/io/file_access.h"
|
||||
#include "core/io/marshalls.h"
|
||||
#include "core/os/os.h"
|
||||
#include "core/templates/hashfuncs.h"
|
||||
#include "drivers/vulkan/vulkan_context.h"
|
||||
|
||||
#include "thirdparty/misc/smolv.h"
|
||||
#include "thirdparty/spirv-reflect/spirv_reflect.h"
|
||||
|
||||
//#define FORCE_FULL_BARRIER
|
||||
|
@ -4360,53 +4363,87 @@ bool RenderingDeviceVulkan::_uniform_add_binding(Vector<Vector<VkDescriptorSetLa
|
|||
}
|
||||
#endif
|
||||
|
||||
RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages) {
|
||||
//descriptor layouts
|
||||
Vector<Vector<VkDescriptorSetLayoutBinding>> set_bindings;
|
||||
Vector<Vector<UniformInfo>> uniform_info;
|
||||
Shader::PushConstant push_constant;
|
||||
push_constant.push_constant_size = 0;
|
||||
push_constant.push_constants_vk_stage = 0;
|
||||
#define SHADER_BINARY_VERSION 1
|
||||
|
||||
uint32_t vertex_input_mask = 0;
|
||||
String RenderingDeviceVulkan::shader_get_binary_cache_key() const {
|
||||
return "Vulkan-SV" + itos(SHADER_BINARY_VERSION);
|
||||
}
|
||||
|
||||
uint32_t fragment_outputs = 0;
|
||||
struct RenderingDeviceVulkanShaderBinaryDataBinding {
|
||||
uint32_t type;
|
||||
uint32_t binding;
|
||||
uint32_t stages;
|
||||
uint32_t length; //size of arrays (in total elements), or ubos (in bytes * total elements)
|
||||
};
|
||||
|
||||
struct RenderingDeviceVulkanShaderBinarySpecializationConstant {
|
||||
uint32_t type;
|
||||
uint32_t constant_id;
|
||||
union {
|
||||
uint32_t int_value;
|
||||
float float_value;
|
||||
bool bool_value;
|
||||
};
|
||||
uint32_t stage_flags;
|
||||
};
|
||||
|
||||
struct RenderingDeviceVulkanShaderBinaryData {
|
||||
uint32_t vertex_input_mask;
|
||||
uint32_t fragment_outputs;
|
||||
uint32_t specialization_constant_count;
|
||||
uint32_t is_compute;
|
||||
uint32_t compute_local_size[3];
|
||||
uint32_t set_count;
|
||||
uint32_t push_constant_size;
|
||||
uint32_t push_constants_vk_stage;
|
||||
uint32_t stage_count;
|
||||
};
|
||||
|
||||
Vector<uint8_t> RenderingDeviceVulkan::shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv) {
|
||||
RenderingDeviceVulkanShaderBinaryData binary_data;
|
||||
binary_data.vertex_input_mask = 0;
|
||||
binary_data.fragment_outputs = 0;
|
||||
binary_data.specialization_constant_count = 0;
|
||||
binary_data.is_compute = 0;
|
||||
binary_data.compute_local_size[0] = 0;
|
||||
binary_data.compute_local_size[1] = 0;
|
||||
binary_data.compute_local_size[2] = 0;
|
||||
binary_data.set_count = 0;
|
||||
binary_data.push_constant_size = 0;
|
||||
binary_data.push_constants_vk_stage = 0;
|
||||
|
||||
Vector<Vector<RenderingDeviceVulkanShaderBinaryDataBinding>> uniform_info; //set bindings
|
||||
Vector<RenderingDeviceVulkanShaderBinarySpecializationConstant> specialization_constants;
|
||||
|
||||
uint32_t stages_processed = 0;
|
||||
|
||||
Vector<Shader::SpecializationConstant> specialization_constants;
|
||||
|
||||
bool is_compute = false;
|
||||
|
||||
uint32_t compute_local_size[3] = { 0, 0, 0 };
|
||||
|
||||
for (int i = 0; i < p_stages.size(); i++) {
|
||||
if (p_stages[i].shader_stage == SHADER_STAGE_COMPUTE) {
|
||||
is_compute = true;
|
||||
ERR_FAIL_COND_V_MSG(p_stages.size() != 1, RID(),
|
||||
for (int i = 0; i < p_spirv.size(); i++) {
|
||||
if (p_spirv[i].shader_stage == SHADER_STAGE_COMPUTE) {
|
||||
binary_data.is_compute = true;
|
||||
ERR_FAIL_COND_V_MSG(p_spirv.size() != 1, Vector<uint8_t>(),
|
||||
"Compute shaders can only receive one stage, dedicated to compute.");
|
||||
}
|
||||
ERR_FAIL_COND_V_MSG(stages_processed & (1 << p_stages[i].shader_stage), RID(),
|
||||
"Stage " + String(shader_stage_names[p_stages[i].shader_stage]) + " submitted more than once.");
|
||||
ERR_FAIL_COND_V_MSG(stages_processed & (1 << p_spirv[i].shader_stage), Vector<uint8_t>(),
|
||||
"Stage " + String(shader_stage_names[p_spirv[i].shader_stage]) + " submitted more than once.");
|
||||
|
||||
{
|
||||
SpvReflectShaderModule module;
|
||||
const uint8_t *spirv = p_stages[i].spir_v.ptr();
|
||||
SpvReflectResult result = spvReflectCreateShaderModule(p_stages[i].spir_v.size(), spirv, &module);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed parsing shader.");
|
||||
const uint8_t *spirv = p_spirv[i].spir_v.ptr();
|
||||
SpvReflectResult result = spvReflectCreateShaderModule(p_spirv[i].spir_v.size(), spirv, &module);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed parsing shader.");
|
||||
|
||||
if (is_compute) {
|
||||
compute_local_size[0] = module.entry_points->local_size.x;
|
||||
compute_local_size[1] = module.entry_points->local_size.y;
|
||||
compute_local_size[2] = module.entry_points->local_size.z;
|
||||
if (binary_data.is_compute) {
|
||||
binary_data.compute_local_size[0] = module.entry_points->local_size.x;
|
||||
binary_data.compute_local_size[1] = module.entry_points->local_size.y;
|
||||
binary_data.compute_local_size[2] = module.entry_points->local_size.z;
|
||||
}
|
||||
uint32_t binding_count = 0;
|
||||
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, nullptr);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating descriptor bindings.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating descriptor bindings.");
|
||||
|
||||
uint32_t stage = p_stages[i].shader_stage;
|
||||
uint32_t stage = p_spirv[i].shader_stage;
|
||||
|
||||
if (binding_count > 0) {
|
||||
//Parse bindings
|
||||
|
@ -4415,56 +4452,47 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
bindings.resize(binding_count);
|
||||
result = spvReflectEnumerateDescriptorBindings(&module, &binding_count, bindings.ptrw());
|
||||
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed getting descriptor bindings.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed getting descriptor bindings.");
|
||||
|
||||
for (uint32_t j = 0; j < binding_count; j++) {
|
||||
const SpvReflectDescriptorBinding &binding = *bindings[j];
|
||||
|
||||
VkDescriptorSetLayoutBinding layout_binding;
|
||||
UniformInfo info;
|
||||
RenderingDeviceVulkanShaderBinaryDataBinding info;
|
||||
|
||||
bool need_array_dimensions = false;
|
||||
bool need_block_size = false;
|
||||
|
||||
switch (binding.descriptor_type) {
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
|
||||
info.type = UNIFORM_TYPE_SAMPLER;
|
||||
need_array_dimensions = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
info.type = UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
|
||||
need_array_dimensions = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
|
||||
info.type = UNIFORM_TYPE_TEXTURE;
|
||||
need_array_dimensions = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
|
||||
info.type = UNIFORM_TYPE_IMAGE;
|
||||
need_array_dimensions = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
|
||||
info.type = UNIFORM_TYPE_TEXTURE_BUFFER;
|
||||
need_array_dimensions = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
|
||||
info.type = UNIFORM_TYPE_IMAGE_BUFFER;
|
||||
need_array_dimensions = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_UNIFORM_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
info.type = UNIFORM_TYPE_UNIFORM_BUFFER;
|
||||
need_block_size = true;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_STORAGE_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
||||
info.type = UNIFORM_TYPE_STORAGE_BUFFER;
|
||||
need_block_size = true;
|
||||
} break;
|
||||
|
@ -4477,7 +4505,6 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
continue;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
|
||||
info.type = UNIFORM_TYPE_INPUT_ATTACHMENT;
|
||||
} break;
|
||||
case SPV_REFLECT_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR: {
|
||||
|
@ -4499,42 +4526,35 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
}
|
||||
}
|
||||
|
||||
layout_binding.descriptorCount = info.length;
|
||||
|
||||
} else if (need_block_size) {
|
||||
info.length = binding.block.size;
|
||||
layout_binding.descriptorCount = 1;
|
||||
} else {
|
||||
info.length = 0;
|
||||
layout_binding.descriptorCount = 1;
|
||||
}
|
||||
|
||||
info.binding = binding.binding;
|
||||
uint32_t set = binding.set;
|
||||
|
||||
//print_line("Stage: " + String(shader_stage_names[stage]) + " set=" + itos(set) + " binding=" + itos(info.binding) + " type=" + shader_uniform_names[info.type] + " length=" + itos(info.length));
|
||||
|
||||
ERR_FAIL_COND_V_MSG(set >= MAX_UNIFORM_SETS, RID(),
|
||||
ERR_FAIL_COND_V_MSG(set >= MAX_UNIFORM_SETS, Vector<uint8_t>(),
|
||||
"On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").");
|
||||
|
||||
ERR_FAIL_COND_V_MSG(set >= limits.maxBoundDescriptorSets, RID(),
|
||||
ERR_FAIL_COND_V_MSG(set >= limits.maxBoundDescriptorSets, Vector<uint8_t>(),
|
||||
"On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' uses a set (" + itos(set) + ") index larger than what is supported by the hardware (" + itos(limits.maxBoundDescriptorSets) + ").");
|
||||
|
||||
if (set < (uint32_t)set_bindings.size()) {
|
||||
if (set < (uint32_t)uniform_info.size()) {
|
||||
//check if this already exists
|
||||
bool exists = false;
|
||||
for (int k = 0; k < set_bindings[set].size(); k++) {
|
||||
if (set_bindings[set][k].binding == (uint32_t)info.binding) {
|
||||
for (int k = 0; k < uniform_info[set].size(); k++) {
|
||||
if (uniform_info[set][k].binding == (uint32_t)info.binding) {
|
||||
//already exists, verify that it's the same type
|
||||
ERR_FAIL_COND_V_MSG(set_bindings[set][k].descriptorType != layout_binding.descriptorType, RID(),
|
||||
ERR_FAIL_COND_V_MSG(uniform_info[set][k].type != info.type, Vector<uint8_t>(),
|
||||
"On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform type.");
|
||||
|
||||
//also, verify that it's the same size
|
||||
ERR_FAIL_COND_V_MSG(set_bindings[set][k].descriptorCount != layout_binding.descriptorCount || uniform_info[set][k].length != info.length, RID(),
|
||||
ERR_FAIL_COND_V_MSG(uniform_info[set][k].length != info.length, Vector<uint8_t>(),
|
||||
"On shader stage '" + String(shader_stage_names[stage]) + "', uniform '" + binding.name + "' trying to re-use location for set=" + itos(set) + ", binding=" + itos(info.binding) + " with different uniform size.");
|
||||
|
||||
//just append stage mask and return
|
||||
set_bindings.write[set].write[k].stageFlags |= shader_stage_masks[stage];
|
||||
uniform_info.write[set].write[k].stages |= 1 << stage;
|
||||
exists = true;
|
||||
}
|
||||
|
@ -4545,19 +4565,12 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
}
|
||||
}
|
||||
|
||||
layout_binding.binding = info.binding;
|
||||
layout_binding.stageFlags = shader_stage_masks[stage];
|
||||
layout_binding.pImmutableSamplers = nullptr; //no support for this yet
|
||||
|
||||
info.stages = 1 << stage;
|
||||
info.binding = info.binding;
|
||||
|
||||
if (set >= (uint32_t)set_bindings.size()) {
|
||||
set_bindings.resize(set + 1);
|
||||
if (set >= (uint32_t)uniform_info.size()) {
|
||||
uniform_info.resize(set + 1);
|
||||
}
|
||||
|
||||
set_bindings.write[set].push_back(layout_binding);
|
||||
uniform_info.write[set].push_back(info);
|
||||
}
|
||||
}
|
||||
|
@ -4567,41 +4580,41 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
|
||||
uint32_t sc_count = 0;
|
||||
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, nullptr);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating specialization constants.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating specialization constants.");
|
||||
|
||||
if (sc_count) {
|
||||
Vector<SpvReflectSpecializationConstant *> spec_constants;
|
||||
spec_constants.resize(sc_count);
|
||||
|
||||
result = spvReflectEnumerateSpecializationConstants(&module, &sc_count, spec_constants.ptrw());
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining specialization constants.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining specialization constants.");
|
||||
|
||||
for (uint32_t j = 0; j < sc_count; j++) {
|
||||
int32_t existing = -1;
|
||||
Shader::SpecializationConstant sconst;
|
||||
sconst.constant.constant_id = spec_constants[j]->constant_id;
|
||||
RenderingDeviceVulkanShaderBinarySpecializationConstant sconst;
|
||||
sconst.constant_id = spec_constants[j]->constant_id;
|
||||
switch (spec_constants[j]->constant_type) {
|
||||
case SPV_REFLECT_SPECIALIZATION_CONSTANT_BOOL: {
|
||||
sconst.constant.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
|
||||
sconst.constant.bool_value = spec_constants[j]->default_value.int_bool_value != 0;
|
||||
sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
|
||||
sconst.bool_value = spec_constants[j]->default_value.int_bool_value != 0;
|
||||
} break;
|
||||
case SPV_REFLECT_SPECIALIZATION_CONSTANT_INT: {
|
||||
sconst.constant.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
|
||||
sconst.constant.int_value = spec_constants[j]->default_value.int_bool_value;
|
||||
sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
|
||||
sconst.int_value = spec_constants[j]->default_value.int_bool_value;
|
||||
} break;
|
||||
case SPV_REFLECT_SPECIALIZATION_CONSTANT_FLOAT: {
|
||||
sconst.constant.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
|
||||
sconst.constant.float_value = spec_constants[j]->default_value.float_value;
|
||||
sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
|
||||
sconst.float_value = spec_constants[j]->default_value.float_value;
|
||||
} break;
|
||||
}
|
||||
sconst.stage_flags = 1 << p_stages[i].shader_stage;
|
||||
sconst.stage_flags = 1 << p_spirv[i].shader_stage;
|
||||
|
||||
for (int k = 0; k < specialization_constants.size(); k++) {
|
||||
if (specialization_constants[k].constant.constant_id == sconst.constant.constant_id) {
|
||||
ERR_FAIL_COND_V_MSG(specialization_constants[k].constant.type != sconst.constant.type, RID(), "More than one specialization constant used for id (" + itos(sconst.constant.constant_id) + "), but their types differ.");
|
||||
ERR_FAIL_COND_V_MSG(specialization_constants[k].constant.int_value != sconst.constant.int_value, RID(), "More than one specialization constant used for id (" + itos(sconst.constant.constant_id) + "), but their default values differ.");
|
||||
if (specialization_constants[k].constant_id == sconst.constant_id) {
|
||||
ERR_FAIL_COND_V_MSG(specialization_constants[k].type != sconst.type, Vector<uint8_t>(), "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their types differ.");
|
||||
ERR_FAIL_COND_V_MSG(specialization_constants[k].int_value != sconst.int_value, Vector<uint8_t>(), "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their default values differ.");
|
||||
existing = k;
|
||||
break;
|
||||
}
|
||||
|
@ -4619,20 +4632,20 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
if (stage == SHADER_STAGE_VERTEX) {
|
||||
uint32_t iv_count = 0;
|
||||
result = spvReflectEnumerateInputVariables(&module, &iv_count, nullptr);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating input variables.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating input variables.");
|
||||
|
||||
if (iv_count) {
|
||||
Vector<SpvReflectInterfaceVariable *> input_vars;
|
||||
input_vars.resize(iv_count);
|
||||
|
||||
result = spvReflectEnumerateInputVariables(&module, &iv_count, input_vars.ptrw());
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining input variables.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining input variables.");
|
||||
|
||||
for (uint32_t j = 0; j < iv_count; j++) {
|
||||
if (input_vars[j] && input_vars[j]->decoration_flags == 0) { //regular input
|
||||
vertex_input_mask |= (1 << uint32_t(input_vars[j]->location));
|
||||
binary_data.vertex_input_mask |= (1 << uint32_t(input_vars[j]->location));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4641,21 +4654,21 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
if (stage == SHADER_STAGE_FRAGMENT) {
|
||||
uint32_t ov_count = 0;
|
||||
result = spvReflectEnumerateOutputVariables(&module, &ov_count, nullptr);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating output variables.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating output variables.");
|
||||
|
||||
if (ov_count) {
|
||||
Vector<SpvReflectInterfaceVariable *> output_vars;
|
||||
output_vars.resize(ov_count);
|
||||
|
||||
result = spvReflectEnumerateOutputVariables(&module, &ov_count, output_vars.ptrw());
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining output variables.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining output variables.");
|
||||
|
||||
for (uint32_t j = 0; j < ov_count; j++) {
|
||||
const SpvReflectInterfaceVariable *refvar = output_vars[j];
|
||||
if (refvar != nullptr && refvar->built_in != SpvBuiltInFragDepth) {
|
||||
fragment_outputs |= 1 << refvar->location;
|
||||
binary_data.fragment_outputs |= 1 << refvar->location;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4663,18 +4676,18 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
|
||||
uint32_t pc_count = 0;
|
||||
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, nullptr);
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed enumerating push constants.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed enumerating push constants.");
|
||||
|
||||
if (pc_count) {
|
||||
ERR_FAIL_COND_V_MSG(pc_count > 1, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
|
||||
ERR_FAIL_COND_V_MSG(pc_count > 1, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "': Only one push constant is supported, which should be the same across shader stages.");
|
||||
|
||||
Vector<SpvReflectBlockVariable *> pconstants;
|
||||
pconstants.resize(pc_count);
|
||||
result = spvReflectEnumeratePushConstantBlocks(&module, &pc_count, pconstants.ptrw());
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "' failed obtaining push constants.");
|
||||
ERR_FAIL_COND_V_MSG(result != SPV_REFLECT_RESULT_SUCCESS, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "' failed obtaining push constants.");
|
||||
#if 0
|
||||
if (pconstants[0] == nullptr) {
|
||||
FileAccess *f = FileAccess::open("res://popo.spv", FileAccess::WRITE);
|
||||
|
@ -4683,11 +4696,11 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
}
|
||||
#endif
|
||||
|
||||
ERR_FAIL_COND_V_MSG(push_constant.push_constant_size && push_constant.push_constant_size != pconstants[0]->size, RID(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_stages[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
|
||||
ERR_FAIL_COND_V_MSG(binary_data.push_constant_size && binary_data.push_constant_size != pconstants[0]->size, Vector<uint8_t>(),
|
||||
"Reflection of SPIR-V shader stage '" + String(shader_stage_names[p_spirv[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
|
||||
|
||||
push_constant.push_constant_size = pconstants[0]->size;
|
||||
push_constant.push_constants_vk_stage |= shader_stage_masks[stage];
|
||||
binary_data.push_constant_size = pconstants[0]->size;
|
||||
binary_data.push_constants_vk_stage |= shader_stage_masks[stage];
|
||||
|
||||
//print_line("Stage: " + String(shader_stage_names[stage]) + " push constant of size=" + itos(push_constant.push_constant_size));
|
||||
}
|
||||
|
@ -4696,9 +4709,291 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
spvReflectDestroyShaderModule(&module);
|
||||
}
|
||||
|
||||
stages_processed |= (1 << p_stages[i].shader_stage);
|
||||
stages_processed |= (1 << p_spirv[i].shader_stage);
|
||||
}
|
||||
|
||||
Vector<Vector<uint8_t>> compressed_stages;
|
||||
Vector<uint32_t> smolv_size;
|
||||
Vector<uint32_t> zstd_size; //if 0, stdno t used
|
||||
|
||||
uint32_t stages_binary_size = 0;
|
||||
|
||||
bool strip_debug = false;
|
||||
|
||||
for (int i = 0; i < p_spirv.size(); i++) {
|
||||
smolv::ByteArray smolv;
|
||||
if (!smolv::Encode(p_spirv[i].spir_v.ptr(), p_spirv[i].spir_v.size(), smolv, strip_debug ? smolv::kEncodeFlagStripDebugInfo : 0)) {
|
||||
ERR_FAIL_V_MSG(Vector<uint8_t>(), "Error compressing shader stage :" + String(shader_stage_names[p_spirv[i].shader_stage]));
|
||||
} else {
|
||||
smolv_size.push_back(smolv.size());
|
||||
{ //zstd
|
||||
Vector<uint8_t> zstd;
|
||||
zstd.resize(Compression::get_max_compressed_buffer_size(smolv.size(), Compression::MODE_ZSTD));
|
||||
int dst_size = Compression::compress(zstd.ptrw(), &smolv[0], smolv.size(), Compression::MODE_ZSTD);
|
||||
|
||||
if (dst_size > 0 && (uint32_t)dst_size < smolv.size()) {
|
||||
zstd_size.push_back(dst_size);
|
||||
zstd.resize(dst_size);
|
||||
compressed_stages.push_back(zstd);
|
||||
} else {
|
||||
Vector<uint8_t> smv;
|
||||
smv.resize(smolv.size());
|
||||
memcpy(smv.ptrw(), &smolv[0], smolv.size());
|
||||
zstd_size.push_back(0); //not using zstd
|
||||
compressed_stages.push_back(smv);
|
||||
}
|
||||
}
|
||||
}
|
||||
uint32_t s = compressed_stages[i].size();
|
||||
if (s % 4 != 0) {
|
||||
s += 4 - (s % 4);
|
||||
}
|
||||
stages_binary_size += s;
|
||||
}
|
||||
|
||||
binary_data.specialization_constant_count = specialization_constants.size();
|
||||
binary_data.set_count = uniform_info.size();
|
||||
binary_data.stage_count = p_spirv.size();
|
||||
|
||||
uint32_t total_size = sizeof(uint32_t) * 3; //header + version + main datasize;
|
||||
total_size += sizeof(RenderingDeviceVulkanShaderBinaryData);
|
||||
|
||||
for (int i = 0; i < uniform_info.size(); i++) {
|
||||
total_size += sizeof(uint32_t);
|
||||
total_size += uniform_info[i].size() * sizeof(RenderingDeviceVulkanShaderBinaryDataBinding);
|
||||
}
|
||||
|
||||
total_size += sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) * specialization_constants.size();
|
||||
|
||||
total_size += compressed_stages.size() * sizeof(uint32_t) * 3; //sizes
|
||||
total_size += stages_binary_size;
|
||||
|
||||
Vector<uint8_t> ret;
|
||||
ret.resize(total_size);
|
||||
uint32_t offset = 0;
|
||||
{
|
||||
uint8_t *binptr = ret.ptrw();
|
||||
binptr[0] = 'G';
|
||||
binptr[1] = 'V';
|
||||
binptr[2] = 'B';
|
||||
binptr[3] = 'D'; //godot vulkan binary data
|
||||
offset += 4;
|
||||
encode_uint32(SHADER_BINARY_VERSION, binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(sizeof(RenderingDeviceVulkanShaderBinaryData), binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
memcpy(binptr + offset, &binary_data, sizeof(RenderingDeviceVulkanShaderBinaryData));
|
||||
offset += sizeof(RenderingDeviceVulkanShaderBinaryData);
|
||||
|
||||
for (int i = 0; i < uniform_info.size(); i++) {
|
||||
int count = uniform_info[i].size();
|
||||
encode_uint32(count, binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
if (count > 0) {
|
||||
memcpy(binptr + offset, uniform_info[i].ptr(), sizeof(RenderingDeviceVulkanShaderBinaryDataBinding) * count);
|
||||
offset += sizeof(RenderingDeviceVulkanShaderBinaryDataBinding) * count;
|
||||
}
|
||||
}
|
||||
|
||||
if (specialization_constants.size()) {
|
||||
memcpy(binptr + offset, specialization_constants.ptr(), sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) * specialization_constants.size());
|
||||
offset += sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) * specialization_constants.size();
|
||||
}
|
||||
|
||||
for (int i = 0; i < compressed_stages.size(); i++) {
|
||||
encode_uint32(p_spirv[i].shader_stage, binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(smolv_size[i], binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
encode_uint32(zstd_size[i], binptr + offset);
|
||||
offset += sizeof(uint32_t);
|
||||
memcpy(binptr + offset, compressed_stages[i].ptr(), compressed_stages[i].size());
|
||||
|
||||
uint32_t s = compressed_stages[i].size();
|
||||
|
||||
if (s % 4 != 0) {
|
||||
s += 4 - (s % 4);
|
||||
}
|
||||
|
||||
offset += s;
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(offset != (uint32_t)ret.size(), Vector<uint8_t>());
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
RID RenderingDeviceVulkan::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary) {
|
||||
const uint8_t *binptr = p_shader_binary.ptr();
|
||||
uint32_t binsize = p_shader_binary.size();
|
||||
|
||||
uint32_t read_offset = 0;
|
||||
//consistency check
|
||||
ERR_FAIL_COND_V(binsize < sizeof(uint32_t) * 3 + sizeof(RenderingDeviceVulkanShaderBinaryData), RID());
|
||||
ERR_FAIL_COND_V(binptr[0] != 'G' || binptr[1] != 'V' || binptr[2] != 'B' || binptr[3] != 'D', RID());
|
||||
|
||||
uint32_t bin_version = decode_uint32(binptr + 4);
|
||||
ERR_FAIL_COND_V(bin_version > SHADER_BINARY_VERSION, RID());
|
||||
|
||||
uint32_t bin_data_size = decode_uint32(binptr + 8);
|
||||
|
||||
const RenderingDeviceVulkanShaderBinaryData &binary_data = *(const RenderingDeviceVulkanShaderBinaryData *)(binptr + 12);
|
||||
|
||||
Shader::PushConstant push_constant;
|
||||
push_constant.push_constant_size = binary_data.push_constant_size;
|
||||
push_constant.push_constants_vk_stage = binary_data.push_constants_vk_stage;
|
||||
|
||||
uint32_t vertex_input_mask = binary_data.vertex_input_mask;
|
||||
|
||||
uint32_t fragment_outputs = binary_data.fragment_outputs;
|
||||
|
||||
bool is_compute = binary_data.is_compute;
|
||||
|
||||
uint32_t compute_local_size[3] = { binary_data.compute_local_size[0], binary_data.compute_local_size[1], binary_data.compute_local_size[2] };
|
||||
|
||||
read_offset += sizeof(uint32_t) * 3 + bin_data_size;
|
||||
|
||||
Vector<Vector<VkDescriptorSetLayoutBinding>> set_bindings;
|
||||
Vector<Vector<UniformInfo>> uniform_info;
|
||||
|
||||
set_bindings.resize(binary_data.set_count);
|
||||
uniform_info.resize(binary_data.set_count);
|
||||
|
||||
for (uint32_t i = 0; i < binary_data.set_count; i++) {
|
||||
ERR_FAIL_COND_V(read_offset + sizeof(uint32_t) >= binsize, RID());
|
||||
uint32_t set_count = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
const RenderingDeviceVulkanShaderBinaryDataBinding *set_ptr = (const RenderingDeviceVulkanShaderBinaryDataBinding *)(binptr + read_offset);
|
||||
uint32_t set_size = set_count * sizeof(RenderingDeviceVulkanShaderBinaryDataBinding);
|
||||
ERR_FAIL_COND_V(read_offset + set_size >= binsize, RID());
|
||||
|
||||
for (uint32_t j = 0; j < set_count; j++) {
|
||||
UniformInfo info;
|
||||
info.type = UniformType(set_ptr[j].type);
|
||||
info.length = set_ptr[j].length;
|
||||
info.binding = set_ptr[j].binding;
|
||||
info.stages = set_ptr[j].stages;
|
||||
|
||||
VkDescriptorSetLayoutBinding layout_binding;
|
||||
layout_binding.pImmutableSamplers = nullptr;
|
||||
layout_binding.binding = set_ptr[j].binding;
|
||||
layout_binding.descriptorCount = 1;
|
||||
layout_binding.stageFlags = 0;
|
||||
for (uint32_t k = 0; k < SHADER_STAGE_MAX; k++) {
|
||||
if (set_ptr[j].stages & (1 << k)) {
|
||||
layout_binding.stageFlags |= shader_stage_masks[k];
|
||||
}
|
||||
}
|
||||
|
||||
switch (info.type) {
|
||||
case UNIFORM_TYPE_SAMPLER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_TEXTURE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_IMAGE: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_TEXTURE_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
|
||||
layout_binding.descriptorCount = set_ptr[j].length;
|
||||
} break;
|
||||
case UNIFORM_TYPE_IMAGE_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
|
||||
} break;
|
||||
case UNIFORM_TYPE_UNIFORM_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
} break;
|
||||
case UNIFORM_TYPE_STORAGE_BUFFER: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
||||
} break;
|
||||
case UNIFORM_TYPE_INPUT_ATTACHMENT: {
|
||||
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
|
||||
} break;
|
||||
default: {
|
||||
ERR_FAIL_V(RID());
|
||||
}
|
||||
}
|
||||
|
||||
set_bindings.write[i].push_back(layout_binding);
|
||||
uniform_info.write[i].push_back(info);
|
||||
}
|
||||
|
||||
read_offset += set_size;
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(read_offset + binary_data.specialization_constant_count * sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant) >= binsize, RID());
|
||||
|
||||
Vector<Shader::SpecializationConstant> specialization_constants;
|
||||
|
||||
for (uint32_t i = 0; i < binary_data.specialization_constant_count; i++) {
|
||||
const RenderingDeviceVulkanShaderBinarySpecializationConstant &src_sc = *(const RenderingDeviceVulkanShaderBinarySpecializationConstant *)(binptr + read_offset);
|
||||
Shader::SpecializationConstant sc;
|
||||
sc.constant.int_value = src_sc.int_value;
|
||||
sc.constant.type = PipelineSpecializationConstantType(src_sc.type);
|
||||
sc.constant.constant_id = src_sc.constant_id;
|
||||
sc.stage_flags = src_sc.stage_flags;
|
||||
specialization_constants.push_back(sc);
|
||||
|
||||
read_offset += sizeof(RenderingDeviceVulkanShaderBinarySpecializationConstant);
|
||||
}
|
||||
|
||||
Vector<Vector<uint8_t>> stage_spirv_data;
|
||||
Vector<ShaderStage> stage_type;
|
||||
|
||||
for (uint32_t i = 0; i < binary_data.stage_count; i++) {
|
||||
ERR_FAIL_COND_V(read_offset + sizeof(uint32_t) * 3 >= binsize, RID());
|
||||
uint32_t stage = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
uint32_t smolv_size = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
uint32_t zstd_size = decode_uint32(binptr + read_offset);
|
||||
read_offset += sizeof(uint32_t);
|
||||
|
||||
uint32_t buf_size = (zstd_size > 0) ? zstd_size : smolv_size;
|
||||
|
||||
Vector<uint8_t> smolv;
|
||||
const uint8_t *src_smolv = nullptr;
|
||||
|
||||
if (zstd_size > 0) {
|
||||
//decompress to smolv
|
||||
smolv.resize(smolv_size);
|
||||
int dec_smolv_size = Compression::decompress(smolv.ptrw(), smolv.size(), binptr + read_offset, zstd_size, Compression::MODE_ZSTD);
|
||||
ERR_FAIL_COND_V(dec_smolv_size != (int32_t)smolv_size, RID());
|
||||
src_smolv = smolv.ptr();
|
||||
} else {
|
||||
src_smolv = binptr + read_offset;
|
||||
}
|
||||
|
||||
Vector<uint8_t> spirv;
|
||||
uint32_t spirv_size = smolv::GetDecodedBufferSize(src_smolv, smolv_size);
|
||||
spirv.resize(spirv_size);
|
||||
if (!smolv::Decode(src_smolv, smolv_size, spirv.ptrw(), spirv_size)) {
|
||||
ERR_FAIL_V_MSG(RID(), "Malformed smolv input uncompressing shader stage:" + String(shader_stage_names[stage]));
|
||||
}
|
||||
stage_spirv_data.push_back(spirv);
|
||||
stage_type.push_back(ShaderStage(stage));
|
||||
|
||||
if (buf_size % 4 != 0) {
|
||||
buf_size += 4 - (buf_size % 4);
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(read_offset + buf_size > binsize, RID());
|
||||
|
||||
read_offset += buf_size;
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(read_offset != binsize, RID());
|
||||
|
||||
//all good, let's create modules
|
||||
|
||||
_THREAD_SAFE_METHOD_
|
||||
|
@ -4717,13 +5012,13 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
String error_text;
|
||||
|
||||
bool success = true;
|
||||
for (int i = 0; i < p_stages.size(); i++) {
|
||||
for (int i = 0; i < stage_spirv_data.size(); i++) {
|
||||
VkShaderModuleCreateInfo shader_module_create_info;
|
||||
shader_module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
|
||||
shader_module_create_info.pNext = nullptr;
|
||||
shader_module_create_info.flags = 0;
|
||||
shader_module_create_info.codeSize = p_stages[i].spir_v.size();
|
||||
const uint8_t *r = p_stages[i].spir_v.ptr();
|
||||
shader_module_create_info.codeSize = stage_spirv_data[i].size();
|
||||
const uint8_t *r = stage_spirv_data[i].ptr();
|
||||
|
||||
shader_module_create_info.pCode = (const uint32_t *)r;
|
||||
|
||||
|
@ -4731,7 +5026,7 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
VkResult res = vkCreateShaderModule(device, &shader_module_create_info, nullptr, &module);
|
||||
if (res) {
|
||||
success = false;
|
||||
error_text = "Error (" + itos(res) + ") creating shader module for stage: " + String(shader_stage_names[p_stages[i].shader_stage]);
|
||||
error_text = "Error (" + itos(res) + ") creating shader module for stage: " + String(shader_stage_names[stage_type[i]]);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -4747,7 +5042,7 @@ RID RenderingDeviceVulkan::shader_create(const Vector<ShaderStageData> &p_stages
|
|||
shader_stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
|
||||
shader_stage.pNext = nullptr;
|
||||
shader_stage.flags = 0;
|
||||
shader_stage.stage = shader_stage_bits[p_stages[i].shader_stage];
|
||||
shader_stage.stage = shader_stage_bits[stage_type[i]];
|
||||
shader_stage.module = module;
|
||||
shader_stage.pName = "main";
|
||||
shader_stage.pSpecializationInfo = nullptr;
|
||||
|
|
|
@ -1083,7 +1083,11 @@ public:
|
|||
/**** SHADER ****/
|
||||
/****************/
|
||||
|
||||
virtual RID shader_create(const Vector<ShaderStageData> &p_stages);
|
||||
virtual String shader_get_binary_cache_key() const;
|
||||
virtual Vector<uint8_t> shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv);
|
||||
|
||||
virtual RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary);
|
||||
|
||||
virtual uint32_t shader_get_vertex_input_attribute_mask(RID p_shader);
|
||||
|
||||
/*****************/
|
||||
|
|
|
@ -55,7 +55,7 @@ void ShaderFileEditor::_version_selected(int p_option) {
|
|||
RD::ShaderStage stage = RD::SHADER_STAGE_MAX;
|
||||
int first_found = -1;
|
||||
|
||||
Ref<RDShaderBytecode> bytecode = shader_file->get_bytecode(version_txt);
|
||||
Ref<RDShaderSPIRV> bytecode = shader_file->get_spirv(version_txt);
|
||||
ERR_FAIL_COND(bytecode.is_null());
|
||||
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
|
@ -142,7 +142,7 @@ void ShaderFileEditor::_update_options() {
|
|||
|
||||
Ref<Texture2D> icon;
|
||||
|
||||
Ref<RDShaderBytecode> bytecode = shader_file->get_bytecode(version_list[i]);
|
||||
Ref<RDShaderSPIRV> bytecode = shader_file->get_spirv(version_list[i]);
|
||||
ERR_FAIL_COND(bytecode.is_null());
|
||||
|
||||
bool failed = false;
|
||||
|
@ -175,7 +175,7 @@ void ShaderFileEditor::_update_options() {
|
|||
return;
|
||||
}
|
||||
|
||||
Ref<RDShaderBytecode> bytecode = shader_file->get_bytecode(current_version);
|
||||
Ref<RDShaderSPIRV> bytecode = shader_file->get_spirv(current_version);
|
||||
ERR_FAIL_COND(bytecode.is_null());
|
||||
int first_valid = -1;
|
||||
int current = -1;
|
||||
|
|
|
@ -193,7 +193,7 @@ void preregister_glslang_types() {
|
|||
// initialize in case it's not initialized. This is done once per thread
|
||||
// and it's safe to call multiple times
|
||||
glslang::InitializeProcess();
|
||||
RenderingDevice::shader_set_compile_function(_compile_shader_glsl);
|
||||
RenderingDevice::shader_set_compile_to_spirv_function(_compile_shader_glsl);
|
||||
RenderingDevice::shader_set_get_cache_key_function(_get_cache_key_function_glsl);
|
||||
}
|
||||
|
||||
|
|
|
@ -794,7 +794,7 @@ LightmapperRD::BakeError LightmapperRD::bake(BakeQuality p_quality, bool p_use_d
|
|||
}
|
||||
ERR_FAIL_COND_V(err != OK, BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES);
|
||||
|
||||
RID rasterize_shader = rd->shader_create_from_bytecode(raster_shader->get_bytecode());
|
||||
RID rasterize_shader = rd->shader_create_from_spirv(raster_shader->get_spirv_stages());
|
||||
|
||||
ERR_FAIL_COND_V(rasterize_shader.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES); //this is a bug check, though, should not happen
|
||||
|
||||
|
@ -945,27 +945,27 @@ LightmapperRD::BakeError LightmapperRD::bake(BakeQuality p_quality, bool p_use_d
|
|||
ERR_FAIL_COND_V(err != OK, BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES);
|
||||
|
||||
// Unoccluder
|
||||
RID compute_shader_unocclude = rd->shader_create_from_bytecode(compute_shader->get_bytecode("unocclude"));
|
||||
RID compute_shader_unocclude = rd->shader_create_from_spirv(compute_shader->get_spirv_stages("unocclude"));
|
||||
ERR_FAIL_COND_V(compute_shader_unocclude.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES); // internal check, should not happen
|
||||
RID compute_shader_unocclude_pipeline = rd->compute_pipeline_create(compute_shader_unocclude);
|
||||
|
||||
// Direct light
|
||||
RID compute_shader_primary = rd->shader_create_from_bytecode(compute_shader->get_bytecode("primary"));
|
||||
RID compute_shader_primary = rd->shader_create_from_spirv(compute_shader->get_spirv_stages("primary"));
|
||||
ERR_FAIL_COND_V(compute_shader_primary.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES); // internal check, should not happen
|
||||
RID compute_shader_primary_pipeline = rd->compute_pipeline_create(compute_shader_primary);
|
||||
|
||||
// Indirect light
|
||||
RID compute_shader_secondary = rd->shader_create_from_bytecode(compute_shader->get_bytecode("secondary"));
|
||||
RID compute_shader_secondary = rd->shader_create_from_spirv(compute_shader->get_spirv_stages("secondary"));
|
||||
ERR_FAIL_COND_V(compute_shader_secondary.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES); //internal check, should not happen
|
||||
RID compute_shader_secondary_pipeline = rd->compute_pipeline_create(compute_shader_secondary);
|
||||
|
||||
// Dilate
|
||||
RID compute_shader_dilate = rd->shader_create_from_bytecode(compute_shader->get_bytecode("dilate"));
|
||||
RID compute_shader_dilate = rd->shader_create_from_spirv(compute_shader->get_spirv_stages("dilate"));
|
||||
ERR_FAIL_COND_V(compute_shader_dilate.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES); //internal check, should not happen
|
||||
RID compute_shader_dilate_pipeline = rd->compute_pipeline_create(compute_shader_dilate);
|
||||
|
||||
// Light probes
|
||||
RID compute_shader_light_probes = rd->shader_create_from_bytecode(compute_shader->get_bytecode("light_probes"));
|
||||
RID compute_shader_light_probes = rd->shader_create_from_spirv(compute_shader->get_spirv_stages("light_probes"));
|
||||
ERR_FAIL_COND_V(compute_shader_light_probes.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES); //internal check, should not happen
|
||||
RID compute_shader_light_probes_pipeline = rd->compute_pipeline_create(compute_shader_light_probes);
|
||||
|
||||
|
@ -1506,11 +1506,11 @@ LightmapperRD::BakeError LightmapperRD::bake(BakeQuality p_quality, bool p_use_d
|
|||
}
|
||||
ERR_FAIL_COND_V(err != OK, BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES);
|
||||
|
||||
RID blendseams_line_raster_shader = rd->shader_create_from_bytecode(blendseams_shader->get_bytecode("lines"));
|
||||
RID blendseams_line_raster_shader = rd->shader_create_from_spirv(blendseams_shader->get_spirv_stages("lines"));
|
||||
|
||||
ERR_FAIL_COND_V(blendseams_line_raster_shader.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES);
|
||||
|
||||
RID blendseams_triangle_raster_shader = rd->shader_create_from_bytecode(blendseams_shader->get_bytecode("triangles"));
|
||||
RID blendseams_triangle_raster_shader = rd->shader_create_from_spirv(blendseams_shader->get_spirv_stages("triangles"));
|
||||
|
||||
ERR_FAIL_COND_V(blendseams_triangle_raster_shader.is_null(), BAKE_ERROR_LIGHTMAP_CANT_PRE_BAKE_MESHES);
|
||||
|
||||
|
|
|
@ -205,7 +205,7 @@ void register_server_types() {
|
|||
GDREGISTER_CLASS(RDPipelineColorBlendStateAttachment);
|
||||
GDREGISTER_CLASS(RDPipelineColorBlendState);
|
||||
GDREGISTER_CLASS(RDShaderSource);
|
||||
GDREGISTER_CLASS(RDShaderBytecode);
|
||||
GDREGISTER_CLASS(RDShaderSPIRV);
|
||||
GDREGISTER_CLASS(RDShaderFile);
|
||||
GDREGISTER_CLASS(RDPipelineSpecializationConstant);
|
||||
|
||||
|
|
|
@ -116,8 +116,10 @@ void ShaderRD::setup(const char *p_vertex_code, const char *p_fragment_code, con
|
|||
}
|
||||
|
||||
StringBuilder tohash;
|
||||
tohash.append("[VersionKey]");
|
||||
tohash.append(RenderingDevice::get_singleton()->shader_get_cache_key());
|
||||
tohash.append("[SpirvCacheKey]");
|
||||
tohash.append(RenderingDevice::get_singleton()->shader_get_spirv_cache_key());
|
||||
tohash.append("[BinaryCacheKey]");
|
||||
tohash.append(RenderingDevice::get_singleton()->shader_get_binary_cache_key());
|
||||
tohash.append("[Vertex]");
|
||||
tohash.append(p_vertex_code ? p_vertex_code : "");
|
||||
tohash.append("[Fragment]");
|
||||
|
@ -148,8 +150,8 @@ void ShaderRD::_clear_version(Version *p_version) {
|
|||
}
|
||||
|
||||
memdelete_arr(p_version->variants);
|
||||
if (p_version->variant_stages) {
|
||||
memdelete_arr(p_version->variant_stages);
|
||||
if (p_version->variant_data) {
|
||||
memdelete_arr(p_version->variant_data);
|
||||
}
|
||||
p_version->variants = nullptr;
|
||||
}
|
||||
|
@ -203,7 +205,7 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
return; //variant is disabled, return
|
||||
}
|
||||
|
||||
Vector<RD::ShaderStageData> &stages = p_version->variant_stages[p_variant];
|
||||
Vector<RD::ShaderStageSPIRVData> stages;
|
||||
|
||||
String error;
|
||||
String current_source;
|
||||
|
@ -217,8 +219,8 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
_build_variant_code(builder, p_variant, p_version, stage_templates[STAGE_TYPE_VERTEX]);
|
||||
|
||||
current_source = builder.as_string();
|
||||
RD::ShaderStageData stage;
|
||||
stage.spir_v = RD::get_singleton()->shader_compile_from_source(RD::SHADER_STAGE_VERTEX, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
||||
RD::ShaderStageSPIRVData stage;
|
||||
stage.spir_v = RD::get_singleton()->shader_compile_spirv_from_source(RD::SHADER_STAGE_VERTEX, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
||||
if (stage.spir_v.size() == 0) {
|
||||
build_ok = false;
|
||||
} else {
|
||||
|
@ -235,8 +237,8 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
_build_variant_code(builder, p_variant, p_version, stage_templates[STAGE_TYPE_FRAGMENT]);
|
||||
|
||||
current_source = builder.as_string();
|
||||
RD::ShaderStageData stage;
|
||||
stage.spir_v = RD::get_singleton()->shader_compile_from_source(RD::SHADER_STAGE_FRAGMENT, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
||||
RD::ShaderStageSPIRVData stage;
|
||||
stage.spir_v = RD::get_singleton()->shader_compile_spirv_from_source(RD::SHADER_STAGE_FRAGMENT, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
||||
if (stage.spir_v.size() == 0) {
|
||||
build_ok = false;
|
||||
} else {
|
||||
|
@ -254,8 +256,8 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
|
||||
current_source = builder.as_string();
|
||||
|
||||
RD::ShaderStageData stage;
|
||||
stage.spir_v = RD::get_singleton()->shader_compile_from_source(RD::SHADER_STAGE_COMPUTE, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
||||
RD::ShaderStageSPIRVData stage;
|
||||
stage.spir_v = RD::get_singleton()->shader_compile_spirv_from_source(RD::SHADER_STAGE_COMPUTE, current_source, RD::SHADER_LANGUAGE_GLSL, &error);
|
||||
if (stage.spir_v.size() == 0) {
|
||||
build_ok = false;
|
||||
} else {
|
||||
|
@ -275,10 +277,15 @@ void ShaderRD::_compile_variant(uint32_t p_variant, Version *p_version) {
|
|||
return;
|
||||
}
|
||||
|
||||
RID shader = RD::get_singleton()->shader_create(stages);
|
||||
Vector<uint8_t> shader_data = RD::get_singleton()->shader_compile_binary_from_spirv(stages);
|
||||
|
||||
ERR_FAIL_COND(shader_data.size() == 0);
|
||||
|
||||
RID shader = RD::get_singleton()->shader_create_from_bytecode(shader_data);
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[p_variant] = shader;
|
||||
p_version->variant_data[p_variant] = shader_data;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -364,14 +371,12 @@ String ShaderRD::_version_get_sha1(Version *p_version) const {
|
|||
}
|
||||
|
||||
static const char *shader_file_header = "GDSC";
|
||||
static const uint32_t cache_file_version = 1;
|
||||
static const uint32_t cache_file_version = 2;
|
||||
|
||||
bool ShaderRD::_load_from_cache(Version *p_version) {
|
||||
String sha1 = _version_get_sha1(p_version);
|
||||
String path = shader_cache_dir.plus_file(name).plus_file(base_sha256).plus_file(sha1) + ".cache";
|
||||
|
||||
uint64_t time_from = OS::get_singleton()->get_ticks_usec();
|
||||
|
||||
FileAccessRef f = FileAccess::open(path, FileAccess::READ);
|
||||
if (!f) {
|
||||
return false;
|
||||
|
@ -390,76 +395,43 @@ bool ShaderRD::_load_from_cache(Version *p_version) {
|
|||
|
||||
ERR_FAIL_COND_V(variant_count != (uint32_t)variant_defines.size(), false); //should not happen but check
|
||||
|
||||
bool success = true;
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
uint32_t stage_count = f->get_32();
|
||||
p_version->variant_stages[i].resize(stage_count);
|
||||
for (uint32_t j = 0; j < stage_count; j++) {
|
||||
p_version->variant_stages[i].write[j].shader_stage = RD::ShaderStage(f->get_32());
|
||||
uint32_t variant_size = f->get_32();
|
||||
ERR_FAIL_COND_V(variant_size == 0 && variants_enabled[i], false);
|
||||
if (!variants_enabled[i]) {
|
||||
continue;
|
||||
}
|
||||
Vector<uint8_t> variant_bytes;
|
||||
variant_bytes.resize(variant_size);
|
||||
|
||||
int compression = f->get_32();
|
||||
uint32_t length = f->get_32();
|
||||
uint32_t br = f->get_buffer(variant_bytes.ptrw(), variant_size);
|
||||
|
||||
if (compression == 0) {
|
||||
Vector<uint8_t> data;
|
||||
data.resize(length);
|
||||
ERR_FAIL_COND_V(br != variant_size, false);
|
||||
|
||||
f->get_buffer(data.ptrw(), length);
|
||||
p_version->variant_data[i] = variant_bytes;
|
||||
}
|
||||
|
||||
p_version->variant_stages[i].write[j].spir_v = data;
|
||||
} else {
|
||||
Vector<uint8_t> data;
|
||||
|
||||
if (compression == 2) {
|
||||
//zstd
|
||||
int smol_length = f->get_32();
|
||||
Vector<uint8_t> zstd_data;
|
||||
|
||||
zstd_data.resize(smol_length);
|
||||
f->get_buffer(zstd_data.ptrw(), smol_length);
|
||||
|
||||
data.resize(length);
|
||||
Compression::decompress(data.ptrw(), data.size(), zstd_data.ptr(), zstd_data.size(), Compression::MODE_ZSTD);
|
||||
|
||||
} else {
|
||||
data.resize(length);
|
||||
f->get_buffer(data.ptrw(), length);
|
||||
}
|
||||
|
||||
Vector<uint8_t> spirv;
|
||||
uint32_t spirv_size = smolv::GetDecodedBufferSize(data.ptr(), data.size());
|
||||
spirv.resize(spirv_size);
|
||||
if (!smolv::Decode(data.ptr(), data.size(), spirv.ptrw(), spirv_size)) {
|
||||
ERR_PRINT("Malformed smolv input uncompressing shader " + name + ", variant #" + itos(i) + " stage :" + itos(j));
|
||||
success = false;
|
||||
break;
|
||||
}
|
||||
p_version->variant_stages[i].write[j].spir_v = spirv;
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
if (!variants_enabled[i]) {
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[i] = RID();
|
||||
continue;
|
||||
}
|
||||
RID shader = RD::get_singleton()->shader_create_from_bytecode(p_version->variant_data[i]);
|
||||
if (shader.is_null()) {
|
||||
for (uint32_t j = 0; j < i; j++) {
|
||||
RD::get_singleton()->free(p_version->variants[i]);
|
||||
}
|
||||
ERR_FAIL_COND_V(shader.is_null(), false);
|
||||
}
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
p_version->variant_stages[i].resize(0);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
float time_ms = double(OS::get_singleton()->get_ticks_usec() - time_from) / 1000.0;
|
||||
|
||||
print_verbose("Shader cache load success '" + path + "' " + rtos(time_ms) + "ms.");
|
||||
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
RID shader = RD::get_singleton()->shader_create(p_version->variant_stages[i]);
|
||||
{
|
||||
MutexLock lock(variant_set_mutex);
|
||||
p_version->variants[i] = shader;
|
||||
}
|
||||
}
|
||||
|
||||
memdelete_arr(p_version->variant_stages); //clear stages
|
||||
p_version->variant_stages = nullptr;
|
||||
memdelete_arr(p_version->variant_data); //clear stages
|
||||
p_version->variant_data = nullptr;
|
||||
p_version->valid = true;
|
||||
return true;
|
||||
}
|
||||
|
@ -476,49 +448,8 @@ void ShaderRD::_save_to_cache(Version *p_version) {
|
|||
f->store_32(variant_count); //variant count
|
||||
|
||||
for (uint32_t i = 0; i < variant_count; i++) {
|
||||
f->store_32(p_version->variant_stages[i].size()); //stage count
|
||||
for (int j = 0; j < p_version->variant_stages[i].size(); j++) {
|
||||
f->store_32(p_version->variant_stages[i][j].shader_stage); //stage count
|
||||
Vector<uint8_t> spirv = p_version->variant_stages[i][j].spir_v;
|
||||
|
||||
bool save_uncompressed = true;
|
||||
if (shader_cache_save_compressed) {
|
||||
smolv::ByteArray smolv;
|
||||
bool strip_debug = !shader_cache_save_debug;
|
||||
if (!smolv::Encode(spirv.ptr(), spirv.size(), smolv, strip_debug ? smolv::kEncodeFlagStripDebugInfo : 0)) {
|
||||
ERR_PRINT("Error compressing shader " + name + ", variant #" + itos(i) + " stage :" + itos(i));
|
||||
} else {
|
||||
bool compress_zstd = shader_cache_save_compressed_zstd;
|
||||
|
||||
if (compress_zstd) {
|
||||
Vector<uint8_t> zstd;
|
||||
zstd.resize(Compression::get_max_compressed_buffer_size(smolv.size(), Compression::MODE_ZSTD));
|
||||
int dst_size = Compression::compress(zstd.ptrw(), &smolv[0], smolv.size(), Compression::MODE_ZSTD);
|
||||
if (dst_size >= 0 && (uint32_t)dst_size < smolv.size()) {
|
||||
f->store_32(2); //compressed zstd
|
||||
f->store_32(smolv.size()); //size of smolv buffer
|
||||
f->store_32(dst_size); //size of smolv buffer
|
||||
f->store_buffer(zstd.ptr(), dst_size); //smolv buffer
|
||||
} else {
|
||||
compress_zstd = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!compress_zstd) {
|
||||
f->store_32(1); //compressed
|
||||
f->store_32(smolv.size()); //size of smolv buffer
|
||||
f->store_buffer(&smolv[0], smolv.size()); //smolv buffer
|
||||
}
|
||||
save_uncompressed = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (save_uncompressed) {
|
||||
f->store_32(0); //uncompressed
|
||||
f->store_32(spirv.size()); //stage count
|
||||
f->store_buffer(spirv.ptr(), spirv.size()); //stage count
|
||||
}
|
||||
}
|
||||
f->store_32(p_version->variant_data[i].size()); //stage count
|
||||
f->store_buffer(p_version->variant_data[i].ptr(), p_version->variant_data[i].size());
|
||||
}
|
||||
|
||||
f->close();
|
||||
|
@ -531,8 +462,8 @@ void ShaderRD::_compile_version(Version *p_version) {
|
|||
p_version->dirty = false;
|
||||
|
||||
p_version->variants = memnew_arr(RID, variant_defines.size());
|
||||
typedef Vector<RD::ShaderStageData> ShaderStageArray;
|
||||
p_version->variant_stages = memnew_arr(ShaderStageArray, variant_defines.size());
|
||||
typedef Vector<uint8_t> ShaderStageData;
|
||||
p_version->variant_data = memnew_arr(ShaderStageData, variant_defines.size());
|
||||
|
||||
if (shader_cache_dir_valid) {
|
||||
if (_load_from_cache(p_version)) {
|
||||
|
@ -571,19 +502,19 @@ void ShaderRD::_compile_version(Version *p_version) {
|
|||
}
|
||||
}
|
||||
memdelete_arr(p_version->variants);
|
||||
if (p_version->variant_stages) {
|
||||
memdelete_arr(p_version->variant_stages);
|
||||
if (p_version->variant_data) {
|
||||
memdelete_arr(p_version->variant_data);
|
||||
}
|
||||
p_version->variants = nullptr;
|
||||
p_version->variant_stages = nullptr;
|
||||
p_version->variant_data = nullptr;
|
||||
return;
|
||||
} else if (shader_cache_dir_valid) {
|
||||
//save shader cache
|
||||
_save_to_cache(p_version);
|
||||
}
|
||||
|
||||
memdelete_arr(p_version->variant_stages); //clear stages
|
||||
p_version->variant_stages = nullptr;
|
||||
memdelete_arr(p_version->variant_data); //clear stages
|
||||
p_version->variant_data = nullptr;
|
||||
|
||||
p_version->valid = true;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ class ShaderRD {
|
|||
Map<StringName, CharString> code_sections;
|
||||
Vector<CharString> custom_defines;
|
||||
|
||||
Vector<RD::ShaderStageData> *variant_stages = nullptr;
|
||||
Vector<uint8_t> *variant_data = nullptr;
|
||||
RID *variants = nullptr; //same size as version defines
|
||||
|
||||
bool valid;
|
||||
|
|
|
@ -38,23 +38,23 @@ RenderingDevice *RenderingDevice::get_singleton() {
|
|||
return singleton;
|
||||
}
|
||||
|
||||
RenderingDevice::ShaderCompileFunction RenderingDevice::compile_function = nullptr;
|
||||
RenderingDevice::ShaderCompileToSPIRVFunction RenderingDevice::compile_to_spirv_function = nullptr;
|
||||
RenderingDevice::ShaderCacheFunction RenderingDevice::cache_function = nullptr;
|
||||
RenderingDevice::ShaderGetCacheKeyFunction RenderingDevice::get_cache_key_function = nullptr;
|
||||
RenderingDevice::ShaderSPIRVGetCacheKeyFunction RenderingDevice::get_spirv_cache_key_function = nullptr;
|
||||
|
||||
void RenderingDevice::shader_set_compile_function(ShaderCompileFunction p_function) {
|
||||
compile_function = p_function;
|
||||
void RenderingDevice::shader_set_compile_to_spirv_function(ShaderCompileToSPIRVFunction p_function) {
|
||||
compile_to_spirv_function = p_function;
|
||||
}
|
||||
|
||||
void RenderingDevice::shader_set_cache_function(ShaderCacheFunction p_function) {
|
||||
void RenderingDevice::shader_set_spirv_cache_function(ShaderCacheFunction p_function) {
|
||||
cache_function = p_function;
|
||||
}
|
||||
|
||||
void RenderingDevice::shader_set_get_cache_key_function(ShaderGetCacheKeyFunction p_function) {
|
||||
get_cache_key_function = p_function;
|
||||
void RenderingDevice::shader_set_get_cache_key_function(ShaderSPIRVGetCacheKeyFunction p_function) {
|
||||
get_spirv_cache_key_function = p_function;
|
||||
}
|
||||
|
||||
Vector<uint8_t> RenderingDevice::shader_compile_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language, String *r_error, bool p_allow_cache) {
|
||||
Vector<uint8_t> RenderingDevice::shader_compile_spirv_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language, String *r_error, bool p_allow_cache) {
|
||||
if (p_allow_cache && cache_function) {
|
||||
Vector<uint8_t> cache = cache_function(p_stage, p_source_code, p_language);
|
||||
if (cache.size()) {
|
||||
|
@ -62,18 +62,24 @@ Vector<uint8_t> RenderingDevice::shader_compile_from_source(ShaderStage p_stage,
|
|||
}
|
||||
}
|
||||
|
||||
ERR_FAIL_COND_V(!compile_function, Vector<uint8_t>());
|
||||
ERR_FAIL_COND_V(!compile_to_spirv_function, Vector<uint8_t>());
|
||||
|
||||
return compile_function(p_stage, p_source_code, p_language, r_error, &device_capabilities);
|
||||
return compile_to_spirv_function(p_stage, p_source_code, p_language, r_error, &device_capabilities);
|
||||
}
|
||||
|
||||
String RenderingDevice::shader_get_cache_key() const {
|
||||
if (get_cache_key_function) {
|
||||
return get_cache_key_function(&device_capabilities);
|
||||
String RenderingDevice::shader_get_spirv_cache_key() const {
|
||||
if (get_spirv_cache_key_function) {
|
||||
return get_spirv_cache_key_function(&device_capabilities);
|
||||
}
|
||||
return String();
|
||||
}
|
||||
|
||||
RID RenderingDevice::shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv) {
|
||||
Vector<uint8_t> bytecode = shader_compile_binary_from_spirv(p_spirv);
|
||||
ERR_FAIL_COND_V(bytecode.size() == 0, RID());
|
||||
return shader_create_from_bytecode(bytecode);
|
||||
}
|
||||
|
||||
RID RenderingDevice::_texture_create(const Ref<RDTextureFormat> &p_format, const Ref<RDTextureView> &p_view, const TypedArray<PackedByteArray> &p_data) {
|
||||
ERR_FAIL_COND_V(p_format.is_null(), RID());
|
||||
ERR_FAIL_COND_V(p_view.is_null(), RID());
|
||||
|
@ -170,40 +176,59 @@ RID RenderingDevice::_vertex_array_create(uint32_t p_vertex_count, VertexFormatI
|
|||
return vertex_array_create(p_vertex_count, p_vertex_format, buffers);
|
||||
}
|
||||
|
||||
Ref<RDShaderBytecode> RenderingDevice::_shader_compile_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache) {
|
||||
ERR_FAIL_COND_V(p_source.is_null(), Ref<RDShaderBytecode>());
|
||||
Ref<RDShaderSPIRV> RenderingDevice::_shader_compile_spirv_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache) {
|
||||
ERR_FAIL_COND_V(p_source.is_null(), Ref<RDShaderSPIRV>());
|
||||
|
||||
Ref<RDShaderBytecode> bytecode;
|
||||
Ref<RDShaderSPIRV> bytecode;
|
||||
bytecode.instantiate();
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
String error;
|
||||
|
||||
ShaderStage stage = ShaderStage(i);
|
||||
Vector<uint8_t> spirv = shader_compile_from_source(stage, p_source->get_stage_source(stage), p_source->get_language(), &error, p_allow_cache);
|
||||
Vector<uint8_t> spirv = shader_compile_spirv_from_source(stage, p_source->get_stage_source(stage), p_source->get_language(), &error, p_allow_cache);
|
||||
bytecode->set_stage_bytecode(stage, spirv);
|
||||
bytecode->set_stage_compile_error(stage, error);
|
||||
}
|
||||
return bytecode;
|
||||
}
|
||||
|
||||
RID RenderingDevice::shader_create_from_bytecode(const Ref<RDShaderBytecode> &p_bytecode) {
|
||||
ERR_FAIL_COND_V(p_bytecode.is_null(), RID());
|
||||
Vector<uint8_t> RenderingDevice::_shader_compile_binary_from_spirv(const Ref<RDShaderSPIRV> &p_spirv) {
|
||||
ERR_FAIL_COND_V(p_spirv.is_null(), Vector<uint8_t>());
|
||||
|
||||
Vector<ShaderStageData> stage_data;
|
||||
Vector<ShaderStageSPIRVData> stage_data;
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
ShaderStage stage = ShaderStage(i);
|
||||
ShaderStageData sd;
|
||||
ShaderStageSPIRVData sd;
|
||||
sd.shader_stage = stage;
|
||||
String error = p_bytecode->get_stage_compile_error(stage);
|
||||
ERR_FAIL_COND_V_MSG(error != String(), RID(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
|
||||
sd.spir_v = p_bytecode->get_stage_bytecode(stage);
|
||||
String error = p_spirv->get_stage_compile_error(stage);
|
||||
ERR_FAIL_COND_V_MSG(error != String(), Vector<uint8_t>(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
|
||||
sd.spir_v = p_spirv->get_stage_bytecode(stage);
|
||||
if (sd.spir_v.is_empty()) {
|
||||
continue;
|
||||
}
|
||||
stage_data.push_back(sd);
|
||||
}
|
||||
|
||||
return shader_create(stage_data);
|
||||
return shader_compile_binary_from_spirv(stage_data);
|
||||
}
|
||||
|
||||
RID RenderingDevice::_shader_create_from_spirv(const Ref<RDShaderSPIRV> &p_spirv) {
|
||||
ERR_FAIL_COND_V(p_spirv.is_null(), RID());
|
||||
|
||||
Vector<ShaderStageSPIRVData> stage_data;
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
ShaderStage stage = ShaderStage(i);
|
||||
ShaderStageSPIRVData sd;
|
||||
sd.shader_stage = stage;
|
||||
String error = p_spirv->get_stage_compile_error(stage);
|
||||
ERR_FAIL_COND_V_MSG(error != String(), RID(), "Can't create a shader from an errored bytecode. Check errors in source bytecode.");
|
||||
sd.spir_v = p_spirv->get_stage_bytecode(stage);
|
||||
if (sd.spir_v.is_empty()) {
|
||||
continue;
|
||||
}
|
||||
stage_data.push_back(sd);
|
||||
}
|
||||
return shader_create_from_spirv(stage_data);
|
||||
}
|
||||
|
||||
RID RenderingDevice::_uniform_set_create(const Array &p_uniforms, RID p_shader, uint32_t p_shader_set) {
|
||||
|
@ -366,8 +391,10 @@ void RenderingDevice::_bind_methods() {
|
|||
ClassDB::bind_method(D_METHOD("index_buffer_create", "size_indices", "format", "data", "use_restart_indices"), &RenderingDevice::index_buffer_create, DEFVAL(Vector<uint8_t>()), DEFVAL(false));
|
||||
ClassDB::bind_method(D_METHOD("index_array_create", "index_buffer", "index_offset", "index_count"), &RenderingDevice::index_array_create);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("shader_compile_from_source", "shader_source", "allow_cache"), &RenderingDevice::_shader_compile_from_source, DEFVAL(true));
|
||||
ClassDB::bind_method(D_METHOD("shader_create", "shader_data"), &RenderingDevice::shader_create_from_bytecode);
|
||||
ClassDB::bind_method(D_METHOD("shader_compile_spirv_from_source", "shader_source", "allow_cache"), &RenderingDevice::_shader_compile_spirv_from_source, DEFVAL(true));
|
||||
ClassDB::bind_method(D_METHOD("shader_compile_binary_from_spirv", "spirv_data"), &RenderingDevice::_shader_compile_binary_from_spirv);
|
||||
ClassDB::bind_method(D_METHOD("shader_create_from_spirv", "spirv_data"), &RenderingDevice::_shader_compile_binary_from_spirv);
|
||||
ClassDB::bind_method(D_METHOD("shader_create_from_bytecode", "binary_data"), &RenderingDevice::shader_create_from_bytecode);
|
||||
ClassDB::bind_method(D_METHOD("shader_get_vertex_input_attribute_mask", "shader"), &RenderingDevice::shader_get_vertex_input_attribute_mask);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("uniform_buffer_create", "size_bytes", "data"), &RenderingDevice::uniform_buffer_create, DEFVAL(Vector<uint8_t>()));
|
||||
|
|
|
@ -41,7 +41,7 @@ class RDAttachmentFormat;
|
|||
class RDSamplerState;
|
||||
class RDVertexAttribute;
|
||||
class RDShaderSource;
|
||||
class RDShaderBytecode;
|
||||
class RDShaderSPIRV;
|
||||
class RDUniforms;
|
||||
class RDPipelineRasterizationState;
|
||||
class RDPipelineMultisampleState;
|
||||
|
@ -105,14 +105,14 @@ public:
|
|||
bool supports_multiview = false; // If true this device supports multiview options
|
||||
};
|
||||
|
||||
typedef String (*ShaderGetCacheKeyFunction)(const Capabilities *p_capabilities);
|
||||
typedef Vector<uint8_t> (*ShaderCompileFunction)(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language, String *r_error, const Capabilities *p_capabilities);
|
||||
typedef String (*ShaderSPIRVGetCacheKeyFunction)(const Capabilities *p_capabilities);
|
||||
typedef Vector<uint8_t> (*ShaderCompileToSPIRVFunction)(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language, String *r_error, const Capabilities *p_capabilities);
|
||||
typedef Vector<uint8_t> (*ShaderCacheFunction)(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language);
|
||||
|
||||
private:
|
||||
static ShaderCompileFunction compile_function;
|
||||
static ShaderCompileToSPIRVFunction compile_to_spirv_function;
|
||||
static ShaderCacheFunction cache_function;
|
||||
static ShaderGetCacheKeyFunction get_cache_key_function;
|
||||
static ShaderSPIRVGetCacheKeyFunction get_spirv_cache_key_function;
|
||||
|
||||
static RenderingDevice *singleton;
|
||||
|
||||
|
@ -651,24 +651,28 @@ public:
|
|||
|
||||
const Capabilities *get_device_capabilities() const { return &device_capabilities; };
|
||||
|
||||
virtual Vector<uint8_t> shader_compile_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language = SHADER_LANGUAGE_GLSL, String *r_error = nullptr, bool p_allow_cache = true);
|
||||
virtual String shader_get_cache_key() const;
|
||||
virtual Vector<uint8_t> shader_compile_spirv_from_source(ShaderStage p_stage, const String &p_source_code, ShaderLanguage p_language = SHADER_LANGUAGE_GLSL, String *r_error = nullptr, bool p_allow_cache = true);
|
||||
virtual String shader_get_spirv_cache_key() const;
|
||||
|
||||
static void shader_set_compile_function(ShaderCompileFunction p_function);
|
||||
static void shader_set_cache_function(ShaderCacheFunction p_function);
|
||||
static void shader_set_get_cache_key_function(ShaderGetCacheKeyFunction p_function);
|
||||
static void shader_set_compile_to_spirv_function(ShaderCompileToSPIRVFunction p_function);
|
||||
static void shader_set_spirv_cache_function(ShaderCacheFunction p_function);
|
||||
static void shader_set_get_cache_key_function(ShaderSPIRVGetCacheKeyFunction p_function);
|
||||
|
||||
struct ShaderStageData {
|
||||
struct ShaderStageSPIRVData {
|
||||
ShaderStage shader_stage;
|
||||
Vector<uint8_t> spir_v;
|
||||
|
||||
ShaderStageData() {
|
||||
ShaderStageSPIRVData() {
|
||||
shader_stage = SHADER_STAGE_VERTEX;
|
||||
}
|
||||
};
|
||||
|
||||
RID shader_create_from_bytecode(const Ref<RDShaderBytecode> &p_bytecode);
|
||||
virtual RID shader_create(const Vector<ShaderStageData> &p_stages) = 0;
|
||||
virtual String shader_get_binary_cache_key() const = 0;
|
||||
virtual Vector<uint8_t> shader_compile_binary_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv) = 0;
|
||||
|
||||
virtual RID shader_create_from_spirv(const Vector<ShaderStageSPIRVData> &p_spirv);
|
||||
virtual RID shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary) = 0;
|
||||
|
||||
virtual uint32_t shader_get_vertex_input_attribute_mask(RID p_shader) = 0;
|
||||
|
||||
/******************/
|
||||
|
@ -1194,7 +1198,9 @@ protected:
|
|||
VertexFormatID _vertex_format_create(const TypedArray<RDVertexAttribute> &p_vertex_formats);
|
||||
RID _vertex_array_create(uint32_t p_vertex_count, VertexFormatID p_vertex_format, const TypedArray<RID> &p_src_buffers);
|
||||
|
||||
Ref<RDShaderBytecode> _shader_compile_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache = true);
|
||||
Ref<RDShaderSPIRV> _shader_compile_spirv_from_source(const Ref<RDShaderSource> &p_source, bool p_allow_cache = true);
|
||||
Vector<uint8_t> _shader_compile_binary_from_spirv(const Ref<RDShaderSPIRV> &p_bytecode);
|
||||
RID _shader_create_from_spirv(const Ref<RDShaderSPIRV> &p_spirv);
|
||||
|
||||
RID _uniform_set_create(const Array &p_uniforms, RID p_shader, uint32_t p_shader_set);
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ Error RDShaderFile::parse_versions_from_text(const String &p_text, const String
|
|||
/* STEP 2, Compile the versions, add to shader file */
|
||||
|
||||
for (Map<StringName, String>::Element *E = version_texts.front(); E; E = E->next()) {
|
||||
Ref<RDShaderBytecode> bytecode;
|
||||
Ref<RDShaderSPIRV> bytecode;
|
||||
bytecode.instantiate();
|
||||
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
|
@ -182,7 +182,7 @@ Error RDShaderFile::parse_versions_from_text(const String &p_text, const String
|
|||
}
|
||||
code = code.replace("VERSION_DEFINES", E->get());
|
||||
String error;
|
||||
Vector<uint8_t> spirv = RenderingDevice::get_singleton()->shader_compile_from_source(RD::ShaderStage(i), code, RD::SHADER_LANGUAGE_GLSL, &error, false);
|
||||
Vector<uint8_t> spirv = RenderingDevice::get_singleton()->shader_compile_spirv_from_source(RD::ShaderStage(i), code, RD::SHADER_LANGUAGE_GLSL, &error, false);
|
||||
bytecode->set_stage_bytecode(RD::ShaderStage(i), spirv);
|
||||
if (error != "") {
|
||||
error += String() + "\n\nStage '" + stage_str[i] + "' source code: \n\n";
|
||||
|
|
|
@ -263,8 +263,8 @@ protected:
|
|||
}
|
||||
};
|
||||
|
||||
class RDShaderBytecode : public Resource {
|
||||
GDCLASS(RDShaderBytecode, Resource)
|
||||
class RDShaderSPIRV : public Resource {
|
||||
GDCLASS(RDShaderSPIRV, Resource)
|
||||
|
||||
Vector<uint8_t> bytecode[RD::SHADER_STAGE_MAX];
|
||||
String compile_error[RD::SHADER_STAGE_MAX];
|
||||
|
@ -280,6 +280,19 @@ public:
|
|||
return bytecode[p_stage];
|
||||
}
|
||||
|
||||
Vector<RD::ShaderStageSPIRVData> get_stages() const {
|
||||
Vector<RD::ShaderStageSPIRVData> stages;
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
if (bytecode[i].size()) {
|
||||
RD::ShaderStageSPIRVData stage;
|
||||
stage.shader_stage = RD::ShaderStage(i);
|
||||
stage.spir_v = bytecode[i];
|
||||
stages.push_back(stage);
|
||||
}
|
||||
}
|
||||
return stages;
|
||||
}
|
||||
|
||||
void set_stage_compile_error(RD::ShaderStage p_stage, const String &p_compile_error) {
|
||||
ERR_FAIL_INDEX(p_stage, RD::SHADER_STAGE_MAX);
|
||||
compile_error[p_stage] = p_compile_error;
|
||||
|
@ -292,11 +305,11 @@ public:
|
|||
|
||||
protected:
|
||||
static void _bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("set_stage_bytecode", "stage", "bytecode"), &RDShaderBytecode::set_stage_bytecode);
|
||||
ClassDB::bind_method(D_METHOD("get_stage_bytecode", "stage"), &RDShaderBytecode::get_stage_bytecode);
|
||||
ClassDB::bind_method(D_METHOD("set_stage_bytecode", "stage", "bytecode"), &RDShaderSPIRV::set_stage_bytecode);
|
||||
ClassDB::bind_method(D_METHOD("get_stage_bytecode", "stage"), &RDShaderSPIRV::get_stage_bytecode);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("set_stage_compile_error", "stage", "compile_error"), &RDShaderBytecode::set_stage_compile_error);
|
||||
ClassDB::bind_method(D_METHOD("get_stage_compile_error", "stage"), &RDShaderBytecode::get_stage_compile_error);
|
||||
ClassDB::bind_method(D_METHOD("set_stage_compile_error", "stage", "compile_error"), &RDShaderSPIRV::set_stage_compile_error);
|
||||
ClassDB::bind_method(D_METHOD("get_stage_compile_error", "stage"), &RDShaderSPIRV::get_stage_compile_error);
|
||||
|
||||
ADD_GROUP("Bytecode", "bytecode_");
|
||||
ADD_PROPERTYI(PropertyInfo(Variant::PACKED_BYTE_ARRAY, "bytecode_vertex"), "set_stage_bytecode", "get_stage_bytecode", RD::SHADER_STAGE_VERTEX);
|
||||
|
@ -316,24 +329,29 @@ protected:
|
|||
class RDShaderFile : public Resource {
|
||||
GDCLASS(RDShaderFile, Resource)
|
||||
|
||||
Map<StringName, Ref<RDShaderBytecode>> versions;
|
||||
Map<StringName, Ref<RDShaderSPIRV>> versions;
|
||||
String base_error;
|
||||
|
||||
public:
|
||||
void set_bytecode(const Ref<RDShaderBytecode> &p_bytecode, const StringName &p_version = StringName()) {
|
||||
void set_bytecode(const Ref<RDShaderSPIRV> &p_bytecode, const StringName &p_version = StringName()) {
|
||||
ERR_FAIL_COND(p_bytecode.is_null());
|
||||
versions[p_version] = p_bytecode;
|
||||
emit_changed();
|
||||
}
|
||||
|
||||
Ref<RDShaderBytecode> get_bytecode(const StringName &p_version = StringName()) const {
|
||||
ERR_FAIL_COND_V(!versions.has(p_version), Ref<RDShaderBytecode>());
|
||||
Ref<RDShaderSPIRV> get_spirv(const StringName &p_version = StringName()) const {
|
||||
ERR_FAIL_COND_V(!versions.has(p_version), Ref<RDShaderSPIRV>());
|
||||
return versions[p_version];
|
||||
}
|
||||
|
||||
Vector<RD::ShaderStageSPIRVData> get_spirv_stages(const StringName &p_version = StringName()) const {
|
||||
ERR_FAIL_COND_V(!versions.has(p_version), Vector<RD::ShaderStageSPIRVData>());
|
||||
return versions[p_version]->get_stages();
|
||||
}
|
||||
|
||||
Vector<StringName> get_version_list() const {
|
||||
Vector<StringName> vnames;
|
||||
for (Map<StringName, Ref<RDShaderBytecode>>::Element *E = versions.front(); E; E = E->next()) {
|
||||
for (Map<StringName, Ref<RDShaderSPIRV>>::Element *E = versions.front(); E; E = E->next()) {
|
||||
vnames.push_back(E->key());
|
||||
}
|
||||
vnames.sort_custom<StringName::AlphCompare>();
|
||||
|
@ -353,7 +371,7 @@ public:
|
|||
if (base_error != "") {
|
||||
ERR_PRINT("Error parsing shader '" + p_file + "':\n\n" + base_error);
|
||||
} else {
|
||||
for (Map<StringName, Ref<RDShaderBytecode>>::Element *E = versions.front(); E; E = E->next()) {
|
||||
for (Map<StringName, Ref<RDShaderSPIRV>>::Element *E = versions.front(); E; E = E->next()) {
|
||||
for (int i = 0; i < RD::SHADER_STAGE_MAX; i++) {
|
||||
String error = E->get()->get_stage_compile_error(RD::ShaderStage(i));
|
||||
if (error != String()) {
|
||||
|
@ -390,7 +408,7 @@ protected:
|
|||
p_versions.get_key_list(&keys);
|
||||
for (const Variant &E : keys) {
|
||||
StringName name = E;
|
||||
Ref<RDShaderBytecode> bc = p_versions[E];
|
||||
Ref<RDShaderSPIRV> bc = p_versions[E];
|
||||
ERR_CONTINUE(bc.is_null());
|
||||
versions[name] = bc;
|
||||
}
|
||||
|
@ -400,7 +418,7 @@ protected:
|
|||
|
||||
static void _bind_methods() {
|
||||
ClassDB::bind_method(D_METHOD("set_bytecode", "bytecode", "version"), &RDShaderFile::set_bytecode, DEFVAL(StringName()));
|
||||
ClassDB::bind_method(D_METHOD("get_bytecode", "version"), &RDShaderFile::get_bytecode, DEFVAL(StringName()));
|
||||
ClassDB::bind_method(D_METHOD("get_spirv", "version"), &RDShaderFile::get_spirv, DEFVAL(StringName()));
|
||||
ClassDB::bind_method(D_METHOD("get_version_list"), &RDShaderFile::get_version_list);
|
||||
|
||||
ClassDB::bind_method(D_METHOD("set_base_error", "error"), &RDShaderFile::set_base_error);
|
||||
|
|
Loading…
Reference in a new issue