mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2024-11-26 05:05:48 +01:00
Merge pull request #8024 from liamwhite/const-indexing
Add shader support for const buffer indirect addressing
This commit is contained in:
commit
cb86e7941b
@ -123,34 +123,36 @@ std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
|
||||
}
|
||||
|
||||
Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr, u32 element_size,
|
||||
const IR::Value& binding, const IR::Value& offset) {
|
||||
const IR::Value& binding, const IR::Value& offset, const Id indirect_func) {
|
||||
Id buffer_offset;
|
||||
const Id uniform_type{ctx.uniform_types.*member_ptr};
|
||||
if (offset.IsImmediate()) {
|
||||
// Hardware been proved to read the aligned offset (e.g. LDC.U32 at 6 will read offset 4)
|
||||
const Id imm_offset{ctx.Const(offset.U32() / element_size)};
|
||||
buffer_offset = imm_offset;
|
||||
} else if (element_size > 1) {
|
||||
const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
|
||||
const Id shift{ctx.Const(log2_element_size)};
|
||||
buffer_offset = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
|
||||
} else {
|
||||
buffer_offset = ctx.Def(offset);
|
||||
}
|
||||
if (!binding.IsImmediate()) {
|
||||
throw NotImplementedException("Constant buffer indexing");
|
||||
return ctx.OpFunctionCall(result_type, indirect_func, ctx.Def(binding), buffer_offset);
|
||||
}
|
||||
const Id cbuf{ctx.cbufs[binding.U32()].*member_ptr};
|
||||
const Id uniform_type{ctx.uniform_types.*member_ptr};
|
||||
if (!offset.IsImmediate()) {
|
||||
Id index{ctx.Def(offset)};
|
||||
if (element_size > 1) {
|
||||
const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
|
||||
const Id shift{ctx.Const(log2_element_size)};
|
||||
index = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
|
||||
}
|
||||
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, index)};
|
||||
return ctx.OpLoad(result_type, access_chain);
|
||||
}
|
||||
// Hardware been proved to read the aligned offset (e.g. LDC.U32 at 6 will read offset 4)
|
||||
const Id imm_offset{ctx.Const(offset.U32() / element_size)};
|
||||
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, imm_offset)};
|
||||
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, buffer_offset)};
|
||||
return ctx.OpLoad(result_type, access_chain);
|
||||
}
|
||||
|
||||
Id GetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
return GetCbuf(ctx, ctx.U32[1], &UniformDefinitions::U32, sizeof(u32), binding, offset);
|
||||
return GetCbuf(ctx, ctx.U32[1], &UniformDefinitions::U32, sizeof(u32), binding, offset,
|
||||
ctx.load_const_func_u32);
|
||||
}
|
||||
|
||||
Id GetCbufU32x4(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
return GetCbuf(ctx, ctx.U32[4], &UniformDefinitions::U32x4, sizeof(u32[4]), binding, offset);
|
||||
return GetCbuf(ctx, ctx.U32[4], &UniformDefinitions::U32x4, sizeof(u32[4]), binding, offset,
|
||||
ctx.load_const_func_u32x4);
|
||||
}
|
||||
|
||||
Id GetCbufElement(EmitContext& ctx, Id vector, const IR::Value& offset, u32 index_offset) {
|
||||
@ -201,7 +203,8 @@ void EmitGetIndirectBranchVariable(EmitContext&) {
|
||||
|
||||
Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
|
||||
const Id load{GetCbuf(ctx, ctx.U8, &UniformDefinitions::U8, sizeof(u8), binding, offset)};
|
||||
const Id load{GetCbuf(ctx, ctx.U8, &UniformDefinitions::U8, sizeof(u8), binding, offset,
|
||||
ctx.load_const_func_u8)};
|
||||
return ctx.OpUConvert(ctx.U32[1], load);
|
||||
}
|
||||
Id element{};
|
||||
@ -217,7 +220,8 @@ Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& of
|
||||
|
||||
Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
|
||||
const Id load{GetCbuf(ctx, ctx.S8, &UniformDefinitions::S8, sizeof(s8), binding, offset)};
|
||||
const Id load{GetCbuf(ctx, ctx.S8, &UniformDefinitions::S8, sizeof(s8), binding, offset,
|
||||
ctx.load_const_func_u8)};
|
||||
return ctx.OpSConvert(ctx.U32[1], load);
|
||||
}
|
||||
Id element{};
|
||||
@ -233,8 +237,8 @@ Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& of
|
||||
|
||||
Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
|
||||
const Id load{
|
||||
GetCbuf(ctx, ctx.U16, &UniformDefinitions::U16, sizeof(u16), binding, offset)};
|
||||
const Id load{GetCbuf(ctx, ctx.U16, &UniformDefinitions::U16, sizeof(u16), binding, offset,
|
||||
ctx.load_const_func_u16)};
|
||||
return ctx.OpUConvert(ctx.U32[1], load);
|
||||
}
|
||||
Id element{};
|
||||
@ -250,8 +254,8 @@ Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
|
||||
|
||||
Id EmitGetCbufS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
|
||||
const Id load{
|
||||
GetCbuf(ctx, ctx.S16, &UniformDefinitions::S16, sizeof(s16), binding, offset)};
|
||||
const Id load{GetCbuf(ctx, ctx.S16, &UniformDefinitions::S16, sizeof(s16), binding, offset,
|
||||
ctx.load_const_func_u16)};
|
||||
return ctx.OpSConvert(ctx.U32[1], load);
|
||||
}
|
||||
Id element{};
|
||||
@ -276,7 +280,8 @@ Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
|
||||
|
||||
Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
if (ctx.profile.support_descriptor_aliasing) {
|
||||
return GetCbuf(ctx, ctx.F32[1], &UniformDefinitions::F32, sizeof(f32), binding, offset);
|
||||
return GetCbuf(ctx, ctx.F32[1], &UniformDefinitions::F32, sizeof(f32), binding, offset,
|
||||
ctx.load_const_func_f32);
|
||||
} else {
|
||||
const Id vector{GetCbufU32x4(ctx, binding, offset)};
|
||||
return ctx.OpBitcast(ctx.F32[1], GetCbufElement(ctx, vector, offset, 0u));
|
||||
@ -285,8 +290,8 @@ Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& o
|
||||
|
||||
Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
|
||||
if (ctx.profile.support_descriptor_aliasing) {
|
||||
return GetCbuf(ctx, ctx.U32[2], &UniformDefinitions::U32x2, sizeof(u32[2]), binding,
|
||||
offset);
|
||||
return GetCbuf(ctx, ctx.U32[2], &UniformDefinitions::U32x2, sizeof(u32[2]), binding, offset,
|
||||
ctx.load_const_func_u32x2);
|
||||
} else {
|
||||
const Id vector{GetCbufU32x4(ctx, binding, offset)};
|
||||
return ctx.OpCompositeConstruct(ctx.U32[2], GetCbufElement(ctx, vector, offset, 0u),
|
||||
|
@ -464,6 +464,7 @@ EmitContext::EmitContext(const Profile& profile_, const RuntimeInfo& runtime_inf
|
||||
DefineSharedMemory(program);
|
||||
DefineSharedMemoryFunctions(program);
|
||||
DefineConstantBuffers(program.info, uniform_binding);
|
||||
DefineConstantBufferIndirectFunctions(program.info);
|
||||
DefineStorageBuffers(program.info, storage_binding);
|
||||
DefineTextureBuffers(program.info, texture_binding);
|
||||
DefineImageBuffers(program.info, image_binding);
|
||||
@ -993,7 +994,7 @@ void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
|
||||
}
|
||||
return;
|
||||
}
|
||||
IR::Type types{info.used_constant_buffer_types};
|
||||
IR::Type types{info.used_constant_buffer_types | info.used_indirect_cbuf_types};
|
||||
if (True(types & IR::Type::U8)) {
|
||||
if (profile.support_int8) {
|
||||
DefineConstBuffers(*this, info, &UniformDefinitions::U8, binding, U8, 'u', sizeof(u8));
|
||||
@ -1027,6 +1028,62 @@ void EmitContext::DefineConstantBuffers(const Info& info, u32& binding) {
|
||||
binding += static_cast<u32>(info.constant_buffer_descriptors.size());
|
||||
}
|
||||
|
||||
void EmitContext::DefineConstantBufferIndirectFunctions(const Info& info) {
|
||||
if (!info.uses_cbuf_indirect) {
|
||||
return;
|
||||
}
|
||||
const auto make_accessor{[&](Id buffer_type, Id UniformDefinitions::*member_ptr) {
|
||||
const Id func_type{TypeFunction(buffer_type, U32[1], U32[1])};
|
||||
const Id func{OpFunction(buffer_type, spv::FunctionControlMask::MaskNone, func_type)};
|
||||
const Id binding{OpFunctionParameter(U32[1])};
|
||||
const Id offset{OpFunctionParameter(U32[1])};
|
||||
|
||||
AddLabel();
|
||||
|
||||
const Id merge_label{OpLabel()};
|
||||
const Id uniform_type{uniform_types.*member_ptr};
|
||||
|
||||
std::array<Id, Info::MAX_CBUFS> buf_labels;
|
||||
std::array<Sirit::Literal, Info::MAX_CBUFS> buf_literals;
|
||||
for (u32 i = 0; i < Info::MAX_CBUFS; i++) {
|
||||
buf_labels[i] = OpLabel();
|
||||
buf_literals[i] = Sirit::Literal{i};
|
||||
}
|
||||
OpSelectionMerge(merge_label, spv::SelectionControlMask::MaskNone);
|
||||
OpSwitch(binding, buf_labels[0], buf_literals, buf_labels);
|
||||
for (u32 i = 0; i < Info::MAX_CBUFS; i++) {
|
||||
AddLabel(buf_labels[i]);
|
||||
const Id cbuf{cbufs[i].*member_ptr};
|
||||
const Id access_chain{OpAccessChain(uniform_type, cbuf, u32_zero_value, offset)};
|
||||
const Id result{OpLoad(buffer_type, access_chain)};
|
||||
OpReturnValue(result);
|
||||
}
|
||||
AddLabel(merge_label);
|
||||
OpUnreachable();
|
||||
OpFunctionEnd();
|
||||
return func;
|
||||
}};
|
||||
IR::Type types{info.used_indirect_cbuf_types};
|
||||
if (True(types & IR::Type::U8)) {
|
||||
load_const_func_u8 = make_accessor(U8, &UniformDefinitions::U8);
|
||||
}
|
||||
if (True(types & IR::Type::U16)) {
|
||||
load_const_func_u16 = make_accessor(U16, &UniformDefinitions::U16);
|
||||
}
|
||||
if (True(types & IR::Type::F32)) {
|
||||
load_const_func_f32 = make_accessor(F32[1], &UniformDefinitions::F32);
|
||||
}
|
||||
if (True(types & IR::Type::U32)) {
|
||||
load_const_func_u32 = make_accessor(U32[1], &UniformDefinitions::U32);
|
||||
}
|
||||
if (True(types & IR::Type::U32x2)) {
|
||||
load_const_func_u32x2 = make_accessor(U32[2], &UniformDefinitions::U32x2);
|
||||
}
|
||||
if (True(types & IR::Type::U32x4)) {
|
||||
load_const_func_u32x4 = make_accessor(U32[4], &UniformDefinitions::U32x4);
|
||||
}
|
||||
}
|
||||
|
||||
void EmitContext::DefineStorageBuffers(const Info& info, u32& binding) {
|
||||
if (info.storage_buffers_descriptors.empty()) {
|
||||
return;
|
||||
|
@ -294,6 +294,13 @@ public:
|
||||
|
||||
std::vector<Id> interfaces;
|
||||
|
||||
Id load_const_func_u8{};
|
||||
Id load_const_func_u16{};
|
||||
Id load_const_func_u32{};
|
||||
Id load_const_func_f32{};
|
||||
Id load_const_func_u32x2{};
|
||||
Id load_const_func_u32x4{};
|
||||
|
||||
private:
|
||||
void DefineCommonTypes(const Info& info);
|
||||
void DefineCommonConstants();
|
||||
@ -302,6 +309,7 @@ private:
|
||||
void DefineSharedMemory(const IR::Program& program);
|
||||
void DefineSharedMemoryFunctions(const IR::Program& program);
|
||||
void DefineConstantBuffers(const Info& info, u32& binding);
|
||||
void DefineConstantBufferIndirectFunctions(const Info& info);
|
||||
void DefineStorageBuffers(const Info& info, u32& binding);
|
||||
void DefineTextureBuffers(const Info& info, u32& binding);
|
||||
void DefineImageBuffers(const Info& info, u32& binding);
|
||||
|
@ -11,10 +11,20 @@ namespace Shader::Maxwell {
|
||||
using namespace LDC;
|
||||
namespace {
|
||||
std::pair<IR::U32, IR::U32> Slot(IR::IREmitter& ir, Mode mode, const IR::U32& imm_index,
|
||||
const IR::U32& reg, const IR::U32& imm) {
|
||||
const IR::U32& reg, const IR::U32& imm_offset) {
|
||||
switch (mode) {
|
||||
case Mode::Default:
|
||||
return {imm_index, ir.IAdd(reg, imm)};
|
||||
return {imm_index, ir.IAdd(reg, imm_offset)};
|
||||
case Mode::IS: {
|
||||
// Segmented addressing mode
|
||||
// Ra+imm_offset points into a flat mapping of const buffer
|
||||
// address space
|
||||
const IR::U32 address{ir.IAdd(reg, imm_offset)};
|
||||
const IR::U32 index{ir.BitFieldExtract(address, ir.Imm32(16), ir.Imm32(16))};
|
||||
const IR::U32 offset{ir.BitFieldExtract(address, ir.Imm32(0), ir.Imm32(16))};
|
||||
|
||||
return {ir.IAdd(index, imm_index), offset};
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -29,6 +29,46 @@ void AddConstantBufferDescriptor(Info& info, u32 index, u32 count) {
|
||||
});
|
||||
}
|
||||
|
||||
void AddRegisterIndexedLdc(Info& info) {
|
||||
info.uses_cbuf_indirect = true;
|
||||
|
||||
// The shader can use any possible constant buffer
|
||||
info.constant_buffer_mask = (1 << Info::MAX_CBUFS) - 1;
|
||||
|
||||
auto& cbufs{info.constant_buffer_descriptors};
|
||||
cbufs.clear();
|
||||
for (u32 i = 0; i < Info::MAX_CBUFS; i++) {
|
||||
cbufs.push_back(ConstantBufferDescriptor{.index = i, .count = 1});
|
||||
|
||||
// The shader can use any possible access size
|
||||
info.constant_buffer_used_sizes[i] = 0x10'000;
|
||||
}
|
||||
}
|
||||
|
||||
u32 GetElementSize(IR::Type& used_type, Shader::IR::Opcode opcode) {
|
||||
switch (opcode) {
|
||||
case IR::Opcode::GetCbufU8:
|
||||
case IR::Opcode::GetCbufS8:
|
||||
used_type |= IR::Type::U8;
|
||||
return 1;
|
||||
case IR::Opcode::GetCbufU16:
|
||||
case IR::Opcode::GetCbufS16:
|
||||
used_type |= IR::Type::U16;
|
||||
return 2;
|
||||
case IR::Opcode::GetCbufU32:
|
||||
used_type |= IR::Type::U32;
|
||||
return 4;
|
||||
case IR::Opcode::GetCbufF32:
|
||||
used_type |= IR::Type::F32;
|
||||
return 4;
|
||||
case IR::Opcode::GetCbufU32x2:
|
||||
used_type |= IR::Type::U32x2;
|
||||
return 8;
|
||||
default:
|
||||
throw InvalidArgument("Invalid opcode {}", opcode);
|
||||
}
|
||||
}
|
||||
|
||||
void GetPatch(Info& info, IR::Patch patch) {
|
||||
if (!IR::IsGeneric(patch)) {
|
||||
throw NotImplementedException("Reading non-generic patch {}", patch);
|
||||
@ -463,42 +503,18 @@ void VisitUsages(Info& info, IR::Inst& inst) {
|
||||
case IR::Opcode::GetCbufU32x2: {
|
||||
const IR::Value index{inst.Arg(0)};
|
||||
const IR::Value offset{inst.Arg(1)};
|
||||
if (!index.IsImmediate()) {
|
||||
throw NotImplementedException("Constant buffer with non-immediate index");
|
||||
}
|
||||
AddConstantBufferDescriptor(info, index.U32(), 1);
|
||||
u32 element_size{};
|
||||
switch (inst.GetOpcode()) {
|
||||
case IR::Opcode::GetCbufU8:
|
||||
case IR::Opcode::GetCbufS8:
|
||||
info.used_constant_buffer_types |= IR::Type::U8;
|
||||
element_size = 1;
|
||||
break;
|
||||
case IR::Opcode::GetCbufU16:
|
||||
case IR::Opcode::GetCbufS16:
|
||||
info.used_constant_buffer_types |= IR::Type::U16;
|
||||
element_size = 2;
|
||||
break;
|
||||
case IR::Opcode::GetCbufU32:
|
||||
info.used_constant_buffer_types |= IR::Type::U32;
|
||||
element_size = 4;
|
||||
break;
|
||||
case IR::Opcode::GetCbufF32:
|
||||
info.used_constant_buffer_types |= IR::Type::F32;
|
||||
element_size = 4;
|
||||
break;
|
||||
case IR::Opcode::GetCbufU32x2:
|
||||
info.used_constant_buffer_types |= IR::Type::U32x2;
|
||||
element_size = 8;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
u32& size{info.constant_buffer_used_sizes[index.U32()]};
|
||||
if (offset.IsImmediate()) {
|
||||
size = Common::AlignUp(std::max(size, offset.U32() + element_size), 16u);
|
||||
if (index.IsImmediate()) {
|
||||
AddConstantBufferDescriptor(info, index.U32(), 1);
|
||||
u32 element_size = GetElementSize(info.used_constant_buffer_types, inst.GetOpcode());
|
||||
u32& size{info.constant_buffer_used_sizes[index.U32()]};
|
||||
if (offset.IsImmediate()) {
|
||||
size = Common::AlignUp(std::max(size, offset.U32() + element_size), 16u);
|
||||
} else {
|
||||
size = 0x10'000;
|
||||
}
|
||||
} else {
|
||||
size = 0x10'000;
|
||||
AddRegisterIndexedLdc(info);
|
||||
GetElementSize(info.used_indirect_cbuf_types, inst.GetOpcode());
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -173,9 +173,11 @@ struct Info {
|
||||
bool uses_atomic_image_u32{};
|
||||
bool uses_shadow_lod{};
|
||||
bool uses_rescaling_uniform{};
|
||||
bool uses_cbuf_indirect{};
|
||||
|
||||
IR::Type used_constant_buffer_types{};
|
||||
IR::Type used_storage_buffer_types{};
|
||||
IR::Type used_indirect_cbuf_types{};
|
||||
|
||||
u32 constant_buffer_mask{};
|
||||
std::array<u32, MAX_CBUFS> constant_buffer_used_sizes{};
|
||||
|
Loading…
Reference in New Issue
Block a user