mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2024-11-22 23:55:41 +01:00
Merge pull request #4973 from ameerj/nvdec-opt
nvdec: Reuse allocated buffers and general cleanup
This commit is contained in:
commit
9cae3e6e90
@ -34,8 +34,7 @@ NvResult nvhost_nvdec::Ioctl1(Ioctl command, const std::vector<u8>& input,
|
||||
case 0xa: {
|
||||
if (command.length == 0x1c) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video stream ended");
|
||||
Tegra::ChCommandHeaderList cmdlist(1);
|
||||
cmdlist[0] = Tegra::ChCommandHeader{0xDEADB33F};
|
||||
Tegra::ChCommandHeaderList cmdlist{{0xDEADB33F}};
|
||||
system.GPU().PushCommandBuffer(cmdlist);
|
||||
}
|
||||
return UnmapBuffer(input, output);
|
||||
|
@ -28,8 +28,13 @@ NvResult nvhost_vic::Ioctl1(Ioctl command, const std::vector<u8>& input, std::ve
|
||||
return GetWaitbase(input, output);
|
||||
case 0x9:
|
||||
return MapBuffer(input, output);
|
||||
case 0xa:
|
||||
case 0xa: {
|
||||
if (command.length == 0x1c) {
|
||||
Tegra::ChCommandHeaderList cmdlist{{0xDEADB33F}};
|
||||
system.GPU().PushCommandBuffer(cmdlist);
|
||||
}
|
||||
return UnmapBuffer(input, output);
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -37,59 +37,43 @@ CDmaPusher::CDmaPusher(GPU& gpu_)
|
||||
|
||||
CDmaPusher::~CDmaPusher() = default;
|
||||
|
||||
void CDmaPusher::Push(ChCommandHeaderList&& entries) {
|
||||
cdma_queue.push(std::move(entries));
|
||||
}
|
||||
|
||||
void CDmaPusher::DispatchCalls() {
|
||||
while (!cdma_queue.empty()) {
|
||||
Step();
|
||||
}
|
||||
}
|
||||
|
||||
void CDmaPusher::Step() {
|
||||
const auto entries{cdma_queue.front()};
|
||||
cdma_queue.pop();
|
||||
|
||||
std::vector<u32> values(entries.size());
|
||||
std::memcpy(values.data(), entries.data(), entries.size() * sizeof(u32));
|
||||
|
||||
for (const u32 value : values) {
|
||||
void CDmaPusher::ProcessEntries(ChCommandHeaderList&& entries) {
|
||||
for (const auto& value : entries) {
|
||||
if (mask != 0) {
|
||||
const auto lbs = static_cast<u32>(std::countr_zero(mask));
|
||||
mask &= ~(1U << lbs);
|
||||
ExecuteCommand(static_cast<u32>(offset + lbs), value);
|
||||
ExecuteCommand(offset + lbs, value.raw);
|
||||
continue;
|
||||
} else if (count != 0) {
|
||||
--count;
|
||||
ExecuteCommand(static_cast<u32>(offset), value);
|
||||
ExecuteCommand(offset, value.raw);
|
||||
if (incrementing) {
|
||||
++offset;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
const auto mode = static_cast<ChSubmissionMode>((value >> 28) & 0xf);
|
||||
const auto mode = value.submission_mode.Value();
|
||||
switch (mode) {
|
||||
case ChSubmissionMode::SetClass: {
|
||||
mask = value & 0x3f;
|
||||
offset = (value >> 16) & 0xfff;
|
||||
current_class = static_cast<ChClassId>((value >> 6) & 0x3ff);
|
||||
mask = value.value & 0x3f;
|
||||
offset = value.method_offset;
|
||||
current_class = static_cast<ChClassId>((value.value >> 6) & 0x3ff);
|
||||
break;
|
||||
}
|
||||
case ChSubmissionMode::Incrementing:
|
||||
case ChSubmissionMode::NonIncrementing:
|
||||
count = value & 0xffff;
|
||||
offset = (value >> 16) & 0xfff;
|
||||
count = value.value;
|
||||
offset = value.method_offset;
|
||||
incrementing = mode == ChSubmissionMode::Incrementing;
|
||||
break;
|
||||
case ChSubmissionMode::Mask:
|
||||
mask = value & 0xffff;
|
||||
offset = (value >> 16) & 0xfff;
|
||||
mask = value.value;
|
||||
offset = value.method_offset;
|
||||
break;
|
||||
case ChSubmissionMode::Immediate: {
|
||||
const u32 data = value & 0xfff;
|
||||
offset = (value >> 16) & 0xfff;
|
||||
ExecuteCommand(static_cast<u32>(offset), data);
|
||||
const u32 data = value.value & 0xfff;
|
||||
offset = value.method_offset;
|
||||
ExecuteCommand(offset, data);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -102,8 +86,8 @@ void CDmaPusher::Step() {
|
||||
void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||
switch (current_class) {
|
||||
case ChClassId::NvDec:
|
||||
ThiStateWrite(nvdec_thi_state, state_offset, {data});
|
||||
switch (static_cast<ThiMethod>(state_offset)) {
|
||||
ThiStateWrite(nvdec_thi_state, offset, data);
|
||||
switch (static_cast<ThiMethod>(offset)) {
|
||||
case ThiMethod::IncSyncpt: {
|
||||
LOG_DEBUG(Service_NVDRV, "NVDEC Class IncSyncpt Method");
|
||||
const auto syncpoint_id = static_cast<u32>(data & 0xFF);
|
||||
@ -120,7 +104,7 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||
LOG_DEBUG(Service_NVDRV, "NVDEC method 0x{:X}",
|
||||
static_cast<u32>(nvdec_thi_state.method_0));
|
||||
nvdec_processor->ProcessMethod(static_cast<Nvdec::Method>(nvdec_thi_state.method_0),
|
||||
{data});
|
||||
data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -144,7 +128,7 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||
case ThiMethod::SetMethod1:
|
||||
LOG_DEBUG(Service_NVDRV, "VIC method 0x{:X}, Args=({})",
|
||||
static_cast<u32>(vic_thi_state.method_0), data);
|
||||
vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), {data});
|
||||
vic_processor->ProcessMethod(static_cast<Vic::Method>(vic_thi_state.method_0), data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -153,7 +137,7 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||
case ChClassId::Host1x:
|
||||
// This device is mainly for syncpoint synchronization
|
||||
LOG_DEBUG(Service_NVDRV, "Host1X Class Method");
|
||||
host1x_processor->ProcessMethod(static_cast<Host1x::Method>(state_offset), {data});
|
||||
host1x_processor->ProcessMethod(static_cast<Host1x::Method>(offset), data);
|
||||
break;
|
||||
default:
|
||||
UNIMPLEMENTED_MSG("Current class not implemented {:X}", static_cast<u32>(current_class));
|
||||
@ -161,10 +145,9 @@ void CDmaPusher::ExecuteCommand(u32 state_offset, u32 data) {
|
||||
}
|
||||
}
|
||||
|
||||
void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 state_offset,
|
||||
const std::vector<u32>& arguments) {
|
||||
u8* const state_offset_ptr = reinterpret_cast<u8*>(&state) + sizeof(u32) * state_offset;
|
||||
std::memcpy(state_offset_ptr, arguments.data(), sizeof(u32) * arguments.size());
|
||||
void CDmaPusher::ThiStateWrite(ThiRegisters& state, u32 state_offset, u32 argument) {
|
||||
u8* const offset_ptr = reinterpret_cast<u8*>(&state) + sizeof(u32) * state_offset;
|
||||
std::memcpy(offset_ptr, &argument, sizeof(u32));
|
||||
}
|
||||
|
||||
} // namespace Tegra
|
||||
|
@ -5,9 +5,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_types.h"
|
||||
@ -16,9 +14,9 @@
|
||||
namespace Tegra {
|
||||
|
||||
class GPU;
|
||||
class Host1x;
|
||||
class Nvdec;
|
||||
class Vic;
|
||||
class Host1x;
|
||||
|
||||
enum class ChSubmissionMode : u32 {
|
||||
SetClass = 0,
|
||||
@ -48,16 +46,10 @@ enum class ChClassId : u32 {
|
||||
NvDec = 0xf0
|
||||
};
|
||||
|
||||
enum class ChMethod : u32 {
|
||||
Empty = 0,
|
||||
SetMethod = 0x10,
|
||||
SetData = 0x11,
|
||||
};
|
||||
|
||||
union ChCommandHeader {
|
||||
u32 raw;
|
||||
BitField<0, 16, u32> value;
|
||||
BitField<16, 12, ChMethod> method_offset;
|
||||
BitField<16, 12, u32> method_offset;
|
||||
BitField<28, 4, ChSubmissionMode> submission_mode;
|
||||
};
|
||||
static_assert(sizeof(ChCommandHeader) == sizeof(u32), "ChCommand header is an invalid size");
|
||||
@ -99,21 +91,15 @@ public:
|
||||
explicit CDmaPusher(GPU& gpu_);
|
||||
~CDmaPusher();
|
||||
|
||||
/// Push NVDEC command buffer entries into queue
|
||||
void Push(ChCommandHeaderList&& entries);
|
||||
|
||||
/// Process queued command buffer entries
|
||||
void DispatchCalls();
|
||||
|
||||
/// Process one queue element
|
||||
void Step();
|
||||
/// Process the command entry
|
||||
void ProcessEntries(ChCommandHeaderList&& entries);
|
||||
|
||||
private:
|
||||
/// Invoke command class devices to execute the command based on the current state
|
||||
void ExecuteCommand(u32 state_offset, u32 data);
|
||||
|
||||
private:
|
||||
/// Write arguments value to the ThiRegisters member at the specified offset
|
||||
void ThiStateWrite(ThiRegisters& state, u32 state_offset, const std::vector<u32>& arguments);
|
||||
void ThiStateWrite(ThiRegisters& state, u32 offset, u32 argument);
|
||||
|
||||
GPU& gpu;
|
||||
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
|
||||
@ -124,13 +110,10 @@ private:
|
||||
ThiRegisters vic_thi_state{};
|
||||
ThiRegisters nvdec_thi_state{};
|
||||
|
||||
s32 count{};
|
||||
s32 offset{};
|
||||
u32 count{};
|
||||
u32 offset{};
|
||||
u32 mask{};
|
||||
bool incrementing{};
|
||||
|
||||
// Queue of command lists to be processed
|
||||
std::queue<ChCommandHeaderList> cdma_queue;
|
||||
};
|
||||
|
||||
} // namespace Tegra
|
||||
|
@ -44,8 +44,10 @@ Codec::~Codec() {
|
||||
}
|
||||
|
||||
void Codec::SetTargetCodec(NvdecCommon::VideoCodec codec) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", codec);
|
||||
if (current_codec != codec) {
|
||||
LOG_INFO(Service_NVDRV, "NVDEC video codec initialized to {}", static_cast<u32>(codec));
|
||||
current_codec = codec;
|
||||
}
|
||||
}
|
||||
|
||||
void Codec::StateWrite(u32 offset, u64 arguments) {
|
||||
@ -55,7 +57,6 @@ void Codec::StateWrite(u32 offset, u64 arguments) {
|
||||
|
||||
void Codec::Decode() {
|
||||
bool is_first_frame = false;
|
||||
|
||||
if (!initialized) {
|
||||
if (current_codec == NvdecCommon::VideoCodec::H264) {
|
||||
av_codec = avcodec_find_decoder(AV_CODEC_ID_H264);
|
||||
|
@ -12,16 +12,16 @@ Nvdec::Nvdec(GPU& gpu_) : gpu(gpu_), codec(std::make_unique<Codec>(gpu)) {}
|
||||
|
||||
Nvdec::~Nvdec() = default;
|
||||
|
||||
void Nvdec::ProcessMethod(Method method, const std::vector<u32>& arguments) {
|
||||
void Nvdec::ProcessMethod(Method method, u32 argument) {
|
||||
if (method == Method::SetVideoCodec) {
|
||||
codec->StateWrite(static_cast<u32>(method), arguments[0]);
|
||||
codec->StateWrite(static_cast<u32>(method), argument);
|
||||
} else {
|
||||
codec->StateWrite(static_cast<u32>(method), static_cast<u64>(arguments[0]) << 8);
|
||||
codec->StateWrite(static_cast<u32>(method), static_cast<u64>(argument) << 8);
|
||||
}
|
||||
|
||||
switch (method) {
|
||||
case Method::SetVideoCodec:
|
||||
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(arguments[0]));
|
||||
codec->SetTargetCodec(static_cast<NvdecCommon::VideoCodec>(argument));
|
||||
break;
|
||||
case Method::Execute:
|
||||
Execute();
|
||||
|
@ -23,7 +23,7 @@ public:
|
||||
~Nvdec();
|
||||
|
||||
/// Writes the method into the state, Invoke Execute() if encountered
|
||||
void ProcessMethod(Method method, const std::vector<u32>& arguments);
|
||||
void ProcessMethod(Method method, u32 argument);
|
||||
|
||||
/// Return most recently decoded frame
|
||||
[[nodiscard]] AVFramePtr GetFrame();
|
||||
|
@ -18,18 +18,14 @@ extern "C" {
|
||||
namespace Tegra {
|
||||
|
||||
Vic::Vic(GPU& gpu_, std::shared_ptr<Nvdec> nvdec_processor_)
|
||||
: gpu(gpu_), nvdec_processor(std::move(nvdec_processor_)) {}
|
||||
: gpu(gpu_),
|
||||
nvdec_processor(std::move(nvdec_processor_)), converted_frame_buffer{nullptr, av_free} {}
|
||||
|
||||
Vic::~Vic() = default;
|
||||
|
||||
void Vic::VicStateWrite(u32 offset, u32 arguments) {
|
||||
u8* const state_offset = reinterpret_cast<u8*>(&vic_state) + offset * sizeof(u32);
|
||||
std::memcpy(state_offset, &arguments, sizeof(u32));
|
||||
}
|
||||
|
||||
void Vic::ProcessMethod(Method method, const std::vector<u32>& arguments) {
|
||||
LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", method);
|
||||
VicStateWrite(static_cast<u32>(method), arguments[0]);
|
||||
const u64 arg = static_cast<u64>(arguments[0]) << 8;
|
||||
void Vic::ProcessMethod(Method method, u32 argument) {
|
||||
LOG_DEBUG(HW_GPU, "Vic method 0x{:X}", static_cast<u32>(method));
|
||||
const u64 arg = static_cast<u64>(argument) << 8;
|
||||
switch (method) {
|
||||
case Method::Execute:
|
||||
Execute();
|
||||
@ -53,8 +49,7 @@ void Vic::ProcessMethod(Method method, const std::vector<u32>& arguments) {
|
||||
|
||||
void Vic::Execute() {
|
||||
if (output_surface_luma_address == 0) {
|
||||
LOG_ERROR(Service_NVDRV, "VIC Luma address not set. Received 0x{:X}",
|
||||
vic_state.output_surface.luma_offset);
|
||||
LOG_ERROR(Service_NVDRV, "VIC Luma address not set.");
|
||||
return;
|
||||
}
|
||||
const VicConfig config{gpu.MemoryManager().Read<u64>(config_struct_address + 0x20)};
|
||||
@ -89,8 +84,10 @@ void Vic::Execute() {
|
||||
// Get Converted frame
|
||||
const std::size_t linear_size = frame->width * frame->height * 4;
|
||||
|
||||
using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
|
||||
AVMallocPtr converted_frame_buffer{static_cast<u8*>(av_malloc(linear_size)), av_free};
|
||||
// Only allocate frame_buffer once per stream, as the size is not expected to change
|
||||
if (!converted_frame_buffer) {
|
||||
converted_frame_buffer = AVMallocPtr{static_cast<u8*>(av_malloc(linear_size)), av_free};
|
||||
}
|
||||
|
||||
const int converted_stride{frame->width * 4};
|
||||
u8* const converted_frame_buf_addr{converted_frame_buffer.get()};
|
||||
@ -104,12 +101,12 @@ void Vic::Execute() {
|
||||
const u32 block_height = static_cast<u32>(config.block_linear_height_log2);
|
||||
const auto size = Tegra::Texture::CalculateSize(true, 4, frame->width, frame->height, 1,
|
||||
block_height, 0);
|
||||
std::vector<u8> swizzled_data(size);
|
||||
luma_buffer.resize(size);
|
||||
Tegra::Texture::SwizzleSubrect(frame->width, frame->height, frame->width * 4,
|
||||
frame->width, 4, swizzled_data.data(),
|
||||
frame->width, 4, luma_buffer.data(),
|
||||
converted_frame_buffer.get(), block_height, 0, 0);
|
||||
|
||||
gpu.MemoryManager().WriteBlock(output_surface_luma_address, swizzled_data.data(), size);
|
||||
gpu.MemoryManager().WriteBlock(output_surface_luma_address, luma_buffer.data(), size);
|
||||
} else {
|
||||
// send pitch linear frame
|
||||
gpu.MemoryManager().WriteBlock(output_surface_luma_address, converted_frame_buf_addr,
|
||||
@ -132,15 +129,15 @@ void Vic::Execute() {
|
||||
const auto stride = frame->linesize[0];
|
||||
const auto half_stride = frame->linesize[1];
|
||||
|
||||
std::vector<u8> luma_buffer(aligned_width * surface_height);
|
||||
std::vector<u8> chroma_buffer(aligned_width * half_height);
|
||||
luma_buffer.resize(aligned_width * surface_height);
|
||||
chroma_buffer.resize(aligned_width * half_height);
|
||||
|
||||
// Populate luma buffer
|
||||
for (std::size_t y = 0; y < surface_height - 1; ++y) {
|
||||
std::size_t src = y * stride;
|
||||
std::size_t dst = y * aligned_width;
|
||||
const std::size_t src = y * stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
|
||||
std::size_t size = surface_width;
|
||||
const std::size_t size = surface_width;
|
||||
|
||||
for (std::size_t offset = 0; offset < size; ++offset) {
|
||||
luma_buffer[dst + offset] = luma_ptr[src + offset];
|
||||
@ -151,8 +148,8 @@ void Vic::Execute() {
|
||||
|
||||
// Populate chroma buffer from both channels with interleaving.
|
||||
for (std::size_t y = 0; y < half_height; ++y) {
|
||||
std::size_t src = y * half_stride;
|
||||
std::size_t dst = y * aligned_width;
|
||||
const std::size_t src = y * half_stride;
|
||||
const std::size_t dst = y * aligned_width;
|
||||
|
||||
for (std::size_t x = 0; x < half_width; ++x) {
|
||||
chroma_buffer[dst + x * 2] = chroma_b_ptr[src + x];
|
||||
|
@ -15,43 +15,6 @@ namespace Tegra {
|
||||
class GPU;
|
||||
class Nvdec;
|
||||
|
||||
struct PlaneOffsets {
|
||||
u32 luma_offset{};
|
||||
u32 chroma_u_offset{};
|
||||
u32 chroma_v_offset{};
|
||||
};
|
||||
|
||||
struct VicRegisters {
|
||||
INSERT_PADDING_WORDS(64);
|
||||
u32 nop{};
|
||||
INSERT_PADDING_WORDS(15);
|
||||
u32 pm_trigger{};
|
||||
INSERT_PADDING_WORDS(47);
|
||||
u32 set_application_id{};
|
||||
u32 set_watchdog_timer{};
|
||||
INSERT_PADDING_WORDS(17);
|
||||
u32 context_save_area{};
|
||||
u32 context_switch{};
|
||||
INSERT_PADDING_WORDS(43);
|
||||
u32 execute{};
|
||||
INSERT_PADDING_WORDS(63);
|
||||
std::array<std::array<PlaneOffsets, 8>, 8> surfacex_slots{};
|
||||
u32 picture_index{};
|
||||
u32 control_params{};
|
||||
u32 config_struct_offset{};
|
||||
u32 filter_struct_offset{};
|
||||
u32 palette_offset{};
|
||||
u32 hist_offset{};
|
||||
u32 context_id{};
|
||||
u32 fce_ucode_size{};
|
||||
PlaneOffsets output_surface{};
|
||||
u32 fce_ucode_offset{};
|
||||
INSERT_PADDING_WORDS(4);
|
||||
std::array<u32, 8> slot_context_id{};
|
||||
INSERT_PADDING_WORDS(16);
|
||||
};
|
||||
static_assert(sizeof(VicRegisters) == 0x7A0, "VicRegisters is an invalid size");
|
||||
|
||||
class Vic {
|
||||
public:
|
||||
enum class Method : u32 {
|
||||
@ -67,14 +30,11 @@ public:
|
||||
~Vic();
|
||||
|
||||
/// Write to the device state.
|
||||
void ProcessMethod(Method method, const std::vector<u32>& arguments);
|
||||
void ProcessMethod(Method method, u32 argument);
|
||||
|
||||
private:
|
||||
void Execute();
|
||||
|
||||
void VicStateWrite(u32 offset, u32 arguments);
|
||||
VicRegisters vic_state{};
|
||||
|
||||
enum class VideoPixelFormat : u64_le {
|
||||
RGBA8 = 0x1f,
|
||||
BGRA8 = 0x20,
|
||||
@ -88,8 +48,6 @@ private:
|
||||
BitField<9, 2, u64_le> chroma_loc_vert;
|
||||
BitField<11, 4, u64_le> block_linear_kind;
|
||||
BitField<15, 4, u64_le> block_linear_height_log2;
|
||||
BitField<19, 3, u64_le> reserved0;
|
||||
BitField<22, 10, u64_le> reserved1;
|
||||
BitField<32, 14, u64_le> surface_width_minus1;
|
||||
BitField<46, 14, u64_le> surface_height_minus1;
|
||||
};
|
||||
@ -97,6 +55,13 @@ private:
|
||||
GPU& gpu;
|
||||
std::shared_ptr<Tegra::Nvdec> nvdec_processor;
|
||||
|
||||
/// Avoid reallocation of the following buffers every frame, as their
|
||||
/// size does not change during a stream
|
||||
using AVMallocPtr = std::unique_ptr<u8, decltype(&av_free)>;
|
||||
AVMallocPtr converted_frame_buffer;
|
||||
std::vector<u8> luma_buffer;
|
||||
std::vector<u8> chroma_buffer;
|
||||
|
||||
GPUVAddr config_struct_address{};
|
||||
GPUVAddr output_surface_luma_address{};
|
||||
GPUVAddr output_surface_chroma_u_address{};
|
||||
|
@ -30,8 +30,7 @@ MICROPROFILE_DEFINE(GPU_wait, "GPU", "Wait for the GPU", MP_RGB(128, 128, 192));
|
||||
|
||||
GPU::GPU(Core::System& system_, bool is_async_, bool use_nvdec_)
|
||||
: system{system_}, memory_manager{std::make_unique<Tegra::MemoryManager>(system)},
|
||||
dma_pusher{std::make_unique<Tegra::DmaPusher>(system, *this)},
|
||||
cdma_pusher{std::make_unique<Tegra::CDmaPusher>(*this)}, use_nvdec{use_nvdec_},
|
||||
dma_pusher{std::make_unique<Tegra::DmaPusher>(system, *this)}, use_nvdec{use_nvdec_},
|
||||
maxwell_3d{std::make_unique<Engines::Maxwell3D>(system, *memory_manager)},
|
||||
fermi_2d{std::make_unique<Engines::Fermi2D>()},
|
||||
kepler_compute{std::make_unique<Engines::KeplerCompute>(system, *memory_manager)},
|
||||
@ -494,8 +493,7 @@ void GPU::PushCommandBuffer(Tegra::ChCommandHeaderList& entries) {
|
||||
// TODO(ameerj): RE proper async nvdec operation
|
||||
// gpu_thread.SubmitCommandBuffer(std::move(entries));
|
||||
|
||||
cdma_pusher->Push(std::move(entries));
|
||||
cdma_pusher->DispatchCalls();
|
||||
cdma_pusher->ProcessEntries(std::move(entries));
|
||||
}
|
||||
|
||||
void GPU::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||
|
@ -48,8 +48,7 @@ static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
|
||||
dma_pusher.DispatchCalls();
|
||||
} else if (auto* command_list = std::get_if<SubmitChCommandEntries>(&next.data)) {
|
||||
// NVDEC
|
||||
cdma_pusher.Push(std::move(command_list->entries));
|
||||
cdma_pusher.DispatchCalls();
|
||||
cdma_pusher.ProcessEntries(std::move(command_list->entries));
|
||||
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
|
||||
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
|
||||
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
|
||||
|
Loading…
Reference in New Issue
Block a user