mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2024-11-22 15:25:44 +01:00
Merge pull request #4034 from ReinUsesLisp/storage-texels
vk_rasterizer: Implement storage texels and atomic image operations
This commit is contained in:
commit
2293e8a11a
2
externals/sirit
vendored
2
externals/sirit
vendored
@ -1 +1 @@
|
||||
Subproject commit a62c5bbc100a5e5a31ea0ccc4a78d8fa6a4167ce
|
||||
Subproject commit eefca56afd49379bdebc97ded8b480839f930881
|
@ -53,8 +53,9 @@ vk::DescriptorSetLayout VKComputePipeline::CreateDescriptorSetLayout() const {
|
||||
};
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, entries.const_buffers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, entries.global_buffers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.texel_buffers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, entries.uniform_texels.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, entries.samplers.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, entries.storage_texels.size());
|
||||
add_bindings(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, entries.images.size());
|
||||
|
||||
VkDescriptorSetLayoutCreateInfo ci;
|
||||
|
@ -42,6 +42,7 @@ vk::DescriptorPool* VKDescriptorPool::AllocateNewPool() {
|
||||
{VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, num_sets * 60},
|
||||
{VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, num_sets * 64},
|
||||
{VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, num_sets * 64},
|
||||
{VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, num_sets * 64},
|
||||
{VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, num_sets * 40}};
|
||||
|
||||
VkDescriptorPoolCreateInfo ci;
|
||||
|
@ -45,6 +45,7 @@ constexpr VkDescriptorType UNIFORM_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
||||
constexpr VkDescriptorType STORAGE_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
|
||||
constexpr VkDescriptorType UNIFORM_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
|
||||
constexpr VkDescriptorType COMBINED_IMAGE_SAMPLER = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
|
||||
constexpr VkDescriptorType STORAGE_TEXEL_BUFFER = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
|
||||
constexpr VkDescriptorType STORAGE_IMAGE = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
|
||||
|
||||
constexpr VideoCommon::Shader::CompilerSettings compiler_settings{
|
||||
@ -104,8 +105,9 @@ u32 FillDescriptorLayout(const ShaderEntries& entries,
|
||||
u32 binding = base_binding;
|
||||
AddBindings<UNIFORM_BUFFER>(bindings, binding, flags, entries.const_buffers);
|
||||
AddBindings<STORAGE_BUFFER>(bindings, binding, flags, entries.global_buffers);
|
||||
AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.texel_buffers);
|
||||
AddBindings<UNIFORM_TEXEL_BUFFER>(bindings, binding, flags, entries.uniform_texels);
|
||||
AddBindings<COMBINED_IMAGE_SAMPLER>(bindings, binding, flags, entries.samplers);
|
||||
AddBindings<STORAGE_TEXEL_BUFFER>(bindings, binding, flags, entries.storage_texels);
|
||||
AddBindings<STORAGE_IMAGE>(bindings, binding, flags, entries.images);
|
||||
return binding;
|
||||
}
|
||||
@ -377,16 +379,17 @@ void AddEntry(std::vector<VkDescriptorUpdateTemplateEntry>& template_entries, u3
|
||||
return;
|
||||
}
|
||||
|
||||
if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER) {
|
||||
// Nvidia has a bug where updating multiple uniform texels at once causes the driver to
|
||||
// crash.
|
||||
if constexpr (descriptor_type == UNIFORM_TEXEL_BUFFER ||
|
||||
descriptor_type == STORAGE_TEXEL_BUFFER) {
|
||||
// Nvidia has a bug where updating multiple texels at once causes the driver to crash.
|
||||
// Note: Fixed in driver Windows 443.24, Linux 440.66.15
|
||||
for (u32 i = 0; i < count; ++i) {
|
||||
VkDescriptorUpdateTemplateEntry& entry = template_entries.emplace_back();
|
||||
entry.dstBinding = binding + i;
|
||||
entry.dstArrayElement = 0;
|
||||
entry.descriptorCount = 1;
|
||||
entry.descriptorType = descriptor_type;
|
||||
entry.offset = offset + i * entry_size;
|
||||
entry.offset = static_cast<std::size_t>(offset + i * entry_size);
|
||||
entry.stride = entry_size;
|
||||
}
|
||||
} else if (count > 0) {
|
||||
@ -407,8 +410,9 @@ void FillDescriptorUpdateTemplateEntries(
|
||||
std::vector<VkDescriptorUpdateTemplateEntryKHR>& template_entries) {
|
||||
AddEntry<UNIFORM_BUFFER>(template_entries, offset, binding, entries.const_buffers);
|
||||
AddEntry<STORAGE_BUFFER>(template_entries, offset, binding, entries.global_buffers);
|
||||
AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.texel_buffers);
|
||||
AddEntry<UNIFORM_TEXEL_BUFFER>(template_entries, offset, binding, entries.uniform_texels);
|
||||
AddEntry<COMBINED_IMAGE_SAMPLER>(template_entries, offset, binding, entries.samplers);
|
||||
AddEntry<STORAGE_TEXEL_BUFFER>(template_entries, offset, binding, entries.storage_texels);
|
||||
AddEntry<STORAGE_IMAGE>(template_entries, offset, binding, entries.images);
|
||||
}
|
||||
|
||||
|
@ -468,8 +468,9 @@ void RasterizerVulkan::DispatchCompute(GPUVAddr code_addr) {
|
||||
const auto& entries = pipeline.GetEntries();
|
||||
SetupComputeConstBuffers(entries);
|
||||
SetupComputeGlobalBuffers(entries);
|
||||
SetupComputeTexelBuffers(entries);
|
||||
SetupComputeUniformTexels(entries);
|
||||
SetupComputeTextures(entries);
|
||||
SetupComputeStorageTexels(entries);
|
||||
SetupComputeImages(entries);
|
||||
|
||||
buffer_cache.Unmap();
|
||||
@ -787,8 +788,9 @@ void RasterizerVulkan::SetupShaderDescriptors(
|
||||
const auto& entries = shader->GetEntries();
|
||||
SetupGraphicsConstBuffers(entries, stage);
|
||||
SetupGraphicsGlobalBuffers(entries, stage);
|
||||
SetupGraphicsTexelBuffers(entries, stage);
|
||||
SetupGraphicsUniformTexels(entries, stage);
|
||||
SetupGraphicsTextures(entries, stage);
|
||||
SetupGraphicsStorageTexels(entries, stage);
|
||||
SetupGraphicsImages(entries, stage);
|
||||
}
|
||||
texture_cache.GuardSamplers(false);
|
||||
@ -983,12 +985,12 @@ void RasterizerVulkan::SetupGraphicsGlobalBuffers(const ShaderEntries& entries,
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage) {
|
||||
void RasterizerVulkan::SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().Maxwell3D();
|
||||
for (const auto& entry : entries.texel_buffers) {
|
||||
for (const auto& entry : entries.uniform_texels) {
|
||||
const auto image = GetTextureInfo(gpu, entry, stage).tic;
|
||||
SetupTexelBuffer(image, entry);
|
||||
SetupUniformTexels(image, entry);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1003,6 +1005,15 @@ void RasterizerVulkan::SetupGraphicsTextures(const ShaderEntries& entries, std::
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().Maxwell3D();
|
||||
for (const auto& entry : entries.storage_texels) {
|
||||
const auto image = GetTextureInfo(gpu, entry, stage).tic;
|
||||
SetupStorageTexel(image, entry);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Images);
|
||||
const auto& gpu = system.GPU().Maxwell3D();
|
||||
@ -1035,12 +1046,12 @@ void RasterizerVulkan::SetupComputeGlobalBuffers(const ShaderEntries& entries) {
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeTexelBuffers(const ShaderEntries& entries) {
|
||||
void RasterizerVulkan::SetupComputeUniformTexels(const ShaderEntries& entries) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().KeplerCompute();
|
||||
for (const auto& entry : entries.texel_buffers) {
|
||||
for (const auto& entry : entries.uniform_texels) {
|
||||
const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
|
||||
SetupTexelBuffer(image, entry);
|
||||
SetupUniformTexels(image, entry);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1055,6 +1066,15 @@ void RasterizerVulkan::SetupComputeTextures(const ShaderEntries& entries) {
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeStorageTexels(const ShaderEntries& entries) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Textures);
|
||||
const auto& gpu = system.GPU().KeplerCompute();
|
||||
for (const auto& entry : entries.storage_texels) {
|
||||
const auto image = GetTextureInfo(gpu, entry, ComputeShaderIndex).tic;
|
||||
SetupStorageTexel(image, entry);
|
||||
}
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupComputeImages(const ShaderEntries& entries) {
|
||||
MICROPROFILE_SCOPE(Vulkan_Images);
|
||||
const auto& gpu = system.GPU().KeplerCompute();
|
||||
@ -1104,8 +1124,8 @@ void RasterizerVulkan::SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAdd
|
||||
update_descriptor_queue.AddBuffer(buffer, offset, size);
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupTexelBuffer(const Tegra::Texture::TICEntry& tic,
|
||||
const TexelBufferEntry& entry) {
|
||||
void RasterizerVulkan::SetupUniformTexels(const Tegra::Texture::TICEntry& tic,
|
||||
const UniformTexelEntry& entry) {
|
||||
const auto view = texture_cache.GetTextureSurface(tic, entry);
|
||||
ASSERT(view->IsBufferView());
|
||||
|
||||
@ -1127,6 +1147,14 @@ void RasterizerVulkan::SetupTexture(const Tegra::Texture::FullTextureInfo& textu
|
||||
sampled_views.push_back(ImageView{std::move(view), image_layout});
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupStorageTexel(const Tegra::Texture::TICEntry& tic,
|
||||
const StorageTexelEntry& entry) {
|
||||
const auto view = texture_cache.GetImageSurface(tic, entry);
|
||||
ASSERT(view->IsBufferView());
|
||||
|
||||
update_descriptor_queue.AddTexelBuffer(view->GetBufferView());
|
||||
}
|
||||
|
||||
void RasterizerVulkan::SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry) {
|
||||
auto view = texture_cache.GetImageSurface(tic, entry);
|
||||
|
||||
|
@ -193,12 +193,15 @@ private:
|
||||
/// Setup global buffers in the graphics pipeline.
|
||||
void SetupGraphicsGlobalBuffers(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup texel buffers in the graphics pipeline.
|
||||
void SetupGraphicsTexelBuffers(const ShaderEntries& entries, std::size_t stage);
|
||||
/// Setup uniform texels in the graphics pipeline.
|
||||
void SetupGraphicsUniformTexels(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup textures in the graphics pipeline.
|
||||
void SetupGraphicsTextures(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup storage texels in the graphics pipeline.
|
||||
void SetupGraphicsStorageTexels(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
/// Setup images in the graphics pipeline.
|
||||
void SetupGraphicsImages(const ShaderEntries& entries, std::size_t stage);
|
||||
|
||||
@ -209,11 +212,14 @@ private:
|
||||
void SetupComputeGlobalBuffers(const ShaderEntries& entries);
|
||||
|
||||
/// Setup texel buffers in the compute pipeline.
|
||||
void SetupComputeTexelBuffers(const ShaderEntries& entries);
|
||||
void SetupComputeUniformTexels(const ShaderEntries& entries);
|
||||
|
||||
/// Setup textures in the compute pipeline.
|
||||
void SetupComputeTextures(const ShaderEntries& entries);
|
||||
|
||||
/// Setup storage texels in the compute pipeline.
|
||||
void SetupComputeStorageTexels(const ShaderEntries& entries);
|
||||
|
||||
/// Setup images in the compute pipeline.
|
||||
void SetupComputeImages(const ShaderEntries& entries);
|
||||
|
||||
@ -222,10 +228,12 @@ private:
|
||||
|
||||
void SetupGlobalBuffer(const GlobalBufferEntry& entry, GPUVAddr address);
|
||||
|
||||
void SetupTexelBuffer(const Tegra::Texture::TICEntry& image, const TexelBufferEntry& entry);
|
||||
void SetupUniformTexels(const Tegra::Texture::TICEntry& image, const UniformTexelEntry& entry);
|
||||
|
||||
void SetupTexture(const Tegra::Texture::FullTextureInfo& texture, const SamplerEntry& entry);
|
||||
|
||||
void SetupStorageTexel(const Tegra::Texture::TICEntry& tic, const StorageTexelEntry& entry);
|
||||
|
||||
void SetupImage(const Tegra::Texture::TICEntry& tic, const ImageEntry& entry);
|
||||
|
||||
void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
|
||||
|
@ -400,8 +400,9 @@ private:
|
||||
u32 binding = specialization.base_binding;
|
||||
binding = DeclareConstantBuffers(binding);
|
||||
binding = DeclareGlobalBuffers(binding);
|
||||
binding = DeclareTexelBuffers(binding);
|
||||
binding = DeclareUniformTexels(binding);
|
||||
binding = DeclareSamplers(binding);
|
||||
binding = DeclareStorageTexels(binding);
|
||||
binding = DeclareImages(binding);
|
||||
|
||||
const Id main = OpFunction(t_void, {}, TypeFunction(t_void));
|
||||
@ -889,7 +890,7 @@ private:
|
||||
return binding;
|
||||
}
|
||||
|
||||
u32 DeclareTexelBuffers(u32 binding) {
|
||||
u32 DeclareUniformTexels(u32 binding) {
|
||||
for (const auto& sampler : ir.GetSamplers()) {
|
||||
if (!sampler.is_buffer) {
|
||||
continue;
|
||||
@ -910,7 +911,7 @@ private:
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
|
||||
texel_buffers.emplace(sampler.index, TexelBuffer{image_type, id});
|
||||
uniform_texels.emplace(sampler.index, TexelBuffer{image_type, id});
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
@ -945,31 +946,48 @@ private:
|
||||
return binding;
|
||||
}
|
||||
|
||||
u32 DeclareImages(u32 binding) {
|
||||
u32 DeclareStorageTexels(u32 binding) {
|
||||
for (const auto& image : ir.GetImages()) {
|
||||
const auto [dim, arrayed] = GetImageDim(image);
|
||||
constexpr int depth = 0;
|
||||
constexpr bool ms = false;
|
||||
constexpr int sampled = 2; // This won't be accessed with a sampler
|
||||
constexpr auto format = spv::ImageFormat::Unknown;
|
||||
const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
|
||||
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
|
||||
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
|
||||
AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
|
||||
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
if (image.is_read && !image.is_written) {
|
||||
Decorate(id, spv::Decoration::NonWritable);
|
||||
} else if (image.is_written && !image.is_read) {
|
||||
Decorate(id, spv::Decoration::NonReadable);
|
||||
if (image.type != Tegra::Shader::ImageType::TextureBuffer) {
|
||||
continue;
|
||||
}
|
||||
|
||||
images.emplace(image.index, StorageImage{image_type, id});
|
||||
DeclareImage(image, binding);
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
||||
u32 DeclareImages(u32 binding) {
|
||||
for (const auto& image : ir.GetImages()) {
|
||||
if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
|
||||
continue;
|
||||
}
|
||||
DeclareImage(image, binding);
|
||||
}
|
||||
return binding;
|
||||
}
|
||||
|
||||
void DeclareImage(const Image& image, u32& binding) {
|
||||
const auto [dim, arrayed] = GetImageDim(image);
|
||||
constexpr int depth = 0;
|
||||
constexpr bool ms = false;
|
||||
constexpr int sampled = 2; // This won't be accessed with a sampler
|
||||
const auto format = image.is_atomic ? spv::ImageFormat::R32ui : spv::ImageFormat::Unknown;
|
||||
const Id image_type = TypeImage(t_uint, dim, depth, arrayed, ms, sampled, format, {});
|
||||
const Id pointer_type = TypePointer(spv::StorageClass::UniformConstant, image_type);
|
||||
const Id id = OpVariable(pointer_type, spv::StorageClass::UniformConstant);
|
||||
AddGlobalVariable(Name(id, fmt::format("image_{}", image.index)));
|
||||
|
||||
Decorate(id, spv::Decoration::Binding, binding++);
|
||||
Decorate(id, spv::Decoration::DescriptorSet, DESCRIPTOR_SET);
|
||||
if (image.is_read && !image.is_written) {
|
||||
Decorate(id, spv::Decoration::NonWritable);
|
||||
} else if (image.is_written && !image.is_read) {
|
||||
Decorate(id, spv::Decoration::NonReadable);
|
||||
}
|
||||
|
||||
images.emplace(image.index, StorageImage{image_type, id});
|
||||
}
|
||||
|
||||
bool IsRenderTargetEnabled(u32 rt) const {
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (header.ps.IsColorComponentOutputEnabled(rt, component)) {
|
||||
@ -1256,7 +1274,7 @@ private:
|
||||
} else {
|
||||
UNREACHABLE_MSG("Unmanaged offset node type");
|
||||
}
|
||||
pointer = OpAccessChain(t_cbuf_float, buffer_id, Constant(t_uint, 0), buffer_index,
|
||||
pointer = OpAccessChain(t_cbuf_float, buffer_id, v_uint_zero, buffer_index,
|
||||
buffer_element);
|
||||
}
|
||||
return {OpLoad(t_float, pointer), Type::Float};
|
||||
@ -1611,7 +1629,7 @@ private:
|
||||
|
||||
const Id result = OpIAddCarry(TypeStruct({t_uint, t_uint}), op_a, op_b);
|
||||
const Id carry = OpCompositeExtract(t_uint, result, 1);
|
||||
return {OpINotEqual(t_bool, carry, Constant(t_uint, 0)), Type::Bool};
|
||||
return {OpINotEqual(t_bool, carry, v_uint_zero), Type::Bool};
|
||||
}
|
||||
|
||||
Expression LogicalAssign(Operation operation) {
|
||||
@ -1674,7 +1692,7 @@ private:
|
||||
const auto& meta = std::get<MetaTexture>(operation.GetMeta());
|
||||
const u32 index = meta.sampler.index;
|
||||
if (meta.sampler.is_buffer) {
|
||||
const auto& entry = texel_buffers.at(index);
|
||||
const auto& entry = uniform_texels.at(index);
|
||||
return OpLoad(entry.image_type, entry.image);
|
||||
} else {
|
||||
const auto& entry = sampled_images.at(index);
|
||||
@ -1951,39 +1969,20 @@ private:
|
||||
return {};
|
||||
}
|
||||
|
||||
Expression AtomicImageAdd(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
template <Id (Module::*func)(Id, Id, Id, Id, Id)>
|
||||
Expression AtomicImage(Operation operation) {
|
||||
const auto& meta{std::get<MetaImage>(operation.GetMeta())};
|
||||
ASSERT(meta.values.size() == 1);
|
||||
|
||||
Expression AtomicImageMin(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
const Id coordinate = GetCoordinates(operation, Type::Int);
|
||||
const Id image = images.at(meta.image.index).image;
|
||||
const Id sample = v_uint_zero;
|
||||
const Id pointer = OpImageTexelPointer(t_image_uint, image, coordinate, sample);
|
||||
|
||||
Expression AtomicImageMax(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
Expression AtomicImageAnd(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
Expression AtomicImageOr(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
Expression AtomicImageXor(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
}
|
||||
|
||||
Expression AtomicImageExchange(Operation operation) {
|
||||
UNIMPLEMENTED();
|
||||
return {};
|
||||
const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
|
||||
const Id semantics = v_uint_zero;
|
||||
const Id value = AsUint(Visit(meta.values[0]));
|
||||
return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
|
||||
}
|
||||
|
||||
template <Id (Module::*func)(Id, Id, Id, Id, Id)>
|
||||
@ -1998,7 +1997,7 @@ private:
|
||||
return {v_float_zero, Type::Float};
|
||||
}
|
||||
const Id scope = Constant(t_uint, static_cast<u32>(spv::Scope::Device));
|
||||
const Id semantics = Constant(t_uint, 0);
|
||||
const Id semantics = v_uint_zero;
|
||||
const Id value = AsUint(Visit(operation[1]));
|
||||
|
||||
return {(this->*func)(t_uint, pointer, scope, semantics, value), Type::Uint};
|
||||
@ -2622,11 +2621,11 @@ private:
|
||||
|
||||
&SPIRVDecompiler::ImageLoad,
|
||||
&SPIRVDecompiler::ImageStore,
|
||||
&SPIRVDecompiler::AtomicImageAdd,
|
||||
&SPIRVDecompiler::AtomicImageAnd,
|
||||
&SPIRVDecompiler::AtomicImageOr,
|
||||
&SPIRVDecompiler::AtomicImageXor,
|
||||
&SPIRVDecompiler::AtomicImageExchange,
|
||||
&SPIRVDecompiler::AtomicImage<&Module::OpAtomicIAdd>,
|
||||
&SPIRVDecompiler::AtomicImage<&Module::OpAtomicAnd>,
|
||||
&SPIRVDecompiler::AtomicImage<&Module::OpAtomicOr>,
|
||||
&SPIRVDecompiler::AtomicImage<&Module::OpAtomicXor>,
|
||||
&SPIRVDecompiler::AtomicImage<&Module::OpAtomicExchange>,
|
||||
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicExchange>,
|
||||
&SPIRVDecompiler::Atomic<&Module::OpAtomicIAdd>,
|
||||
@ -2768,8 +2767,11 @@ private:
|
||||
Decorate(TypeStruct(t_gmem_array), spv::Decoration::Block), 0, spv::Decoration::Offset, 0);
|
||||
const Id t_gmem_ssbo = TypePointer(spv::StorageClass::StorageBuffer, t_gmem_struct);
|
||||
|
||||
const Id t_image_uint = TypePointer(spv::StorageClass::Image, t_uint);
|
||||
|
||||
const Id v_float_zero = Constant(t_float, 0.0f);
|
||||
const Id v_float_one = Constant(t_float, 1.0f);
|
||||
const Id v_uint_zero = Constant(t_uint, 0);
|
||||
|
||||
// Nvidia uses these defaults for varyings (e.g. position and generic attributes)
|
||||
const Id v_varying_default =
|
||||
@ -2794,15 +2796,16 @@ private:
|
||||
std::unordered_map<u8, GenericVaryingDescription> output_attributes;
|
||||
std::map<u32, Id> constant_buffers;
|
||||
std::map<GlobalMemoryBase, Id> global_buffers;
|
||||
std::map<u32, TexelBuffer> texel_buffers;
|
||||
std::map<u32, TexelBuffer> uniform_texels;
|
||||
std::map<u32, SampledImage> sampled_images;
|
||||
std::map<u32, TexelBuffer> storage_texels;
|
||||
std::map<u32, StorageImage> images;
|
||||
|
||||
std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
|
||||
Id instance_index{};
|
||||
Id vertex_index{};
|
||||
Id base_instance{};
|
||||
Id base_vertex{};
|
||||
std::array<Id, Maxwell::NumRenderTargets> frag_colors{};
|
||||
Id frag_depth{};
|
||||
Id frag_coord{};
|
||||
Id front_facing{};
|
||||
@ -3058,13 +3061,17 @@ ShaderEntries GenerateShaderEntries(const VideoCommon::Shader::ShaderIR& ir) {
|
||||
}
|
||||
for (const auto& sampler : ir.GetSamplers()) {
|
||||
if (sampler.is_buffer) {
|
||||
entries.texel_buffers.emplace_back(sampler);
|
||||
entries.uniform_texels.emplace_back(sampler);
|
||||
} else {
|
||||
entries.samplers.emplace_back(sampler);
|
||||
}
|
||||
}
|
||||
for (const auto& image : ir.GetImages()) {
|
||||
entries.images.emplace_back(image);
|
||||
if (image.type == Tegra::Shader::ImageType::TextureBuffer) {
|
||||
entries.storage_texels.emplace_back(image);
|
||||
} else {
|
||||
entries.images.emplace_back(image);
|
||||
}
|
||||
}
|
||||
for (const auto& attribute : ir.GetInputAttributes()) {
|
||||
if (IsGenericAttribute(attribute)) {
|
||||
|
@ -21,8 +21,9 @@ class VKDevice;
|
||||
namespace Vulkan {
|
||||
|
||||
using Maxwell = Tegra::Engines::Maxwell3D::Regs;
|
||||
using TexelBufferEntry = VideoCommon::Shader::Sampler;
|
||||
using UniformTexelEntry = VideoCommon::Shader::Sampler;
|
||||
using SamplerEntry = VideoCommon::Shader::Sampler;
|
||||
using StorageTexelEntry = VideoCommon::Shader::Image;
|
||||
using ImageEntry = VideoCommon::Shader::Image;
|
||||
|
||||
constexpr u32 DESCRIPTOR_SET = 0;
|
||||
@ -66,13 +67,15 @@ private:
|
||||
struct ShaderEntries {
|
||||
u32 NumBindings() const {
|
||||
return static_cast<u32>(const_buffers.size() + global_buffers.size() +
|
||||
texel_buffers.size() + samplers.size() + images.size());
|
||||
uniform_texels.size() + samplers.size() + storage_texels.size() +
|
||||
images.size());
|
||||
}
|
||||
|
||||
std::vector<ConstBufferEntry> const_buffers;
|
||||
std::vector<GlobalBufferEntry> global_buffers;
|
||||
std::vector<TexelBufferEntry> texel_buffers;
|
||||
std::vector<UniformTexelEntry> uniform_texels;
|
||||
std::vector<SamplerEntry> samplers;
|
||||
std::vector<StorageTexelEntry> storage_texels;
|
||||
std::vector<ImageEntry> images;
|
||||
std::set<u32> attributes;
|
||||
std::array<bool, Maxwell::NumClipDistances> clip_distances{};
|
||||
|
@ -100,8 +100,8 @@ vk::Buffer CreateBuffer(const VKDevice& device, const SurfaceParams& params,
|
||||
ci.pNext = nullptr;
|
||||
ci.flags = 0;
|
||||
ci.size = static_cast<VkDeviceSize>(host_memory_size);
|
||||
ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
|
||||
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
||||
ci.queueFamilyIndexCount = 0;
|
||||
ci.pQueueFamilyIndices = nullptr;
|
||||
|
Loading…
Reference in New Issue
Block a user