mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2024-11-23 04:25:41 +01:00
texture_cache: Address Feedback
This commit is contained in:
parent
30b176f92b
commit
3b9d89839d
@ -75,6 +75,7 @@ add_library(common STATIC
|
|||||||
assert.h
|
assert.h
|
||||||
detached_tasks.cpp
|
detached_tasks.cpp
|
||||||
detached_tasks.h
|
detached_tasks.h
|
||||||
|
binary_find.h
|
||||||
bit_field.h
|
bit_field.h
|
||||||
bit_util.h
|
bit_util.h
|
||||||
cityhash.cpp
|
cityhash.cpp
|
||||||
|
21
src/common/binary_find.h
Normal file
21
src/common/binary_find.h
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2019 yuzu emulator team
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
template <class ForwardIt, class T, class Compare = std::less<>>
|
||||||
|
ForwardIt BinaryFind(ForwardIt first, ForwardIt last, const T& value, Compare comp = {}) {
|
||||||
|
// Note: BOTH type T and the type after ForwardIt is dereferenced
|
||||||
|
// must be implicitly convertible to BOTH Type1 and Type2, used in Compare.
|
||||||
|
// This is stricter than lower_bound requirement (see above)
|
||||||
|
|
||||||
|
first = std::lower_bound(first, last, value, comp);
|
||||||
|
return first != last && !comp(value, *first) ? first : last;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace Common
|
@ -61,14 +61,4 @@ constexpr u32 MakeMagic(char a, char b, char c, char d) {
|
|||||||
return a | b << 8 | c << 16 | d << 24;
|
return a | b << 8 | c << 16 | d << 24;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class ForwardIt, class T, class Compare = std::less<>>
|
|
||||||
ForwardIt BinaryFind(ForwardIt first, ForwardIt last, const T& value, Compare comp = {}) {
|
|
||||||
// Note: BOTH type T and the type after ForwardIt is dereferenced
|
|
||||||
// must be implicitly convertible to BOTH Type1 and Type2, used in Compare.
|
|
||||||
// This is stricter than lower_bound requirement (see above)
|
|
||||||
|
|
||||||
first = std::lower_bound(first, last, value, comp);
|
|
||||||
return first != last && !comp(value, *first) ? first : last;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace Common
|
} // namespace Common
|
||||||
|
@ -103,14 +103,16 @@ constexpr std::tuple<const char*, const char*, u32> GetPrimitiveDescription(GLen
|
|||||||
/// Calculates the size of a program stream
|
/// Calculates the size of a program stream
|
||||||
std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
|
std::size_t CalculateProgramSize(const GLShader::ProgramCode& program) {
|
||||||
constexpr std::size_t start_offset = 10;
|
constexpr std::size_t start_offset = 10;
|
||||||
constexpr u64 key = 0xE2400FFFFF07000FULL;
|
// This is the encoded version of BRA that jumps to itself. All Nvidia
|
||||||
|
// shaders end with one.
|
||||||
|
constexpr u64 self_jumping_branch = 0xE2400FFFFF07000FULL;
|
||||||
constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
|
constexpr u64 mask = 0xFFFFFFFFFF7FFFFFULL;
|
||||||
std::size_t offset = start_offset;
|
std::size_t offset = start_offset;
|
||||||
std::size_t size = start_offset * sizeof(u64);
|
std::size_t size = start_offset * sizeof(u64);
|
||||||
while (offset < program.size()) {
|
while (offset < program.size()) {
|
||||||
const u64 instruction = program[offset];
|
const u64 instruction = program[offset];
|
||||||
if (!IsSchedInstruction(offset, start_offset)) {
|
if (!IsSchedInstruction(offset, start_offset)) {
|
||||||
if ((instruction & mask) == key) {
|
if ((instruction & mask) == self_jumping_branch) {
|
||||||
// End on Maxwell's "nop" instruction
|
// End on Maxwell's "nop" instruction
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -267,7 +267,7 @@ void CachedSurface::DownloadTexture(std::vector<u8>& staging_buffer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CachedSurface::UploadTexture(std::vector<u8>& staging_buffer) {
|
void CachedSurface::UploadTexture(const std::vector<u8>& staging_buffer) {
|
||||||
MICROPROFILE_SCOPE(OpenGL_Texture_Upload);
|
MICROPROFILE_SCOPE(OpenGL_Texture_Upload);
|
||||||
SCOPE_EXIT({ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); });
|
SCOPE_EXIT({ glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); });
|
||||||
for (u32 level = 0; level < params.emulated_levels; ++level) {
|
for (u32 level = 0; level < params.emulated_levels; ++level) {
|
||||||
@ -275,7 +275,7 @@ void CachedSurface::UploadTexture(std::vector<u8>& staging_buffer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void CachedSurface::UploadTextureMipmap(u32 level, std::vector<u8>& staging_buffer) {
|
void CachedSurface::UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer) {
|
||||||
glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
|
glPixelStorei(GL_UNPACK_ALIGNMENT, std::min(8U, params.GetRowAlignment(level)));
|
||||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
|
glPixelStorei(GL_UNPACK_ROW_LENGTH, static_cast<GLint>(params.GetMipWidth(level)));
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ void CachedSurface::UploadTextureMipmap(u32 level, std::vector<u8>& staging_buff
|
|||||||
const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
|
const std::size_t mip_offset = compression_type == SurfaceCompression::Converted
|
||||||
? params.GetConvertedMipmapOffset(level)
|
? params.GetConvertedMipmapOffset(level)
|
||||||
: params.GetHostMipmapLevelOffset(level);
|
: params.GetHostMipmapLevelOffset(level);
|
||||||
u8* buffer{staging_buffer.data() + mip_offset};
|
const u8* buffer{staging_buffer.data() + mip_offset};
|
||||||
if (is_compressed) {
|
if (is_compressed) {
|
||||||
const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
|
const auto image_size{static_cast<GLsizei>(params.GetHostMipmapSize(level))};
|
||||||
switch (params.target) {
|
switch (params.target) {
|
||||||
|
@ -39,7 +39,7 @@ public:
|
|||||||
explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params);
|
explicit CachedSurface(GPUVAddr gpu_addr, const SurfaceParams& params);
|
||||||
~CachedSurface();
|
~CachedSurface();
|
||||||
|
|
||||||
void UploadTexture(std::vector<u8>& staging_buffer) override;
|
void UploadTexture(const std::vector<u8>& staging_buffer) override;
|
||||||
void DownloadTexture(std::vector<u8>& staging_buffer) override;
|
void DownloadTexture(std::vector<u8>& staging_buffer) override;
|
||||||
|
|
||||||
GLenum GetTarget() const {
|
GLenum GetTarget() const {
|
||||||
@ -57,7 +57,7 @@ protected:
|
|||||||
View CreateViewInner(const ViewParams& view_key, bool is_proxy);
|
View CreateViewInner(const ViewParams& view_key, bool is_proxy);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void UploadTextureMipmap(u32 level, std::vector<u8>& staging_buffer);
|
void UploadTextureMipmap(u32 level, const std::vector<u8>& staging_buffer);
|
||||||
|
|
||||||
GLenum internal_format{};
|
GLenum internal_format{};
|
||||||
GLenum format{};
|
GLenum format{};
|
||||||
@ -72,14 +72,13 @@ private:
|
|||||||
|
|
||||||
class CachedSurfaceView final : public VideoCommon::ViewBase {
|
class CachedSurfaceView final : public VideoCommon::ViewBase {
|
||||||
public:
|
public:
|
||||||
explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params,
|
explicit CachedSurfaceView(CachedSurface& surface, const ViewParams& params, bool is_proxy);
|
||||||
const bool is_proxy);
|
|
||||||
~CachedSurfaceView();
|
~CachedSurfaceView();
|
||||||
|
|
||||||
/// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER
|
/// Attaches this texture view to the current bound GL_DRAW_FRAMEBUFFER
|
||||||
void Attach(GLenum attachment, GLenum target) const;
|
void Attach(GLenum attachment, GLenum target) const;
|
||||||
|
|
||||||
GLuint GetTexture() {
|
GLuint GetTexture() const {
|
||||||
if (is_proxy) {
|
if (is_proxy) {
|
||||||
return surface.GetTexture();
|
return surface.GetTexture();
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/common_funcs.h"
|
#include "common/binary_find.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "video_core/gpu.h"
|
#include "video_core/gpu.h"
|
||||||
#include "video_core/morton.h"
|
#include "video_core/morton.h"
|
||||||
@ -191,7 +191,7 @@ private:
|
|||||||
template <typename TView>
|
template <typename TView>
|
||||||
class SurfaceBase : public SurfaceBaseImpl {
|
class SurfaceBase : public SurfaceBaseImpl {
|
||||||
public:
|
public:
|
||||||
virtual void UploadTexture(std::vector<u8>& staging_buffer) = 0;
|
virtual void UploadTexture(const std::vector<u8>& staging_buffer) = 0;
|
||||||
|
|
||||||
virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0;
|
virtual void DownloadTexture(std::vector<u8>& staging_buffer) = 0;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user