gl_rasterizer_cache: Workaround for Texture2D -> Texture2DArray scenario.
This commit is contained in:
parent
ce452049d3
commit
fefb003b23
@ -738,7 +738,7 @@ u32 RasterizerOpenGL::SetupTextures(Maxwell::ShaderStage stage, Shader& shader,
|
||||
}
|
||||
|
||||
texture_samplers[current_bindpoint].SyncWithConfig(texture.tsc);
|
||||
Surface surface = res_cache.GetTextureSurface(texture);
|
||||
Surface surface = res_cache.GetTextureSurface(texture, entry);
|
||||
if (surface != nullptr) {
|
||||
state.texture_units[current_bindpoint].texture = surface->Texture().handle;
|
||||
state.texture_units[current_bindpoint].target = surface->Target();
|
||||
|
@ -41,7 +41,7 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) {
|
||||
}
|
||||
|
||||
/*static*/ SurfaceParams SurfaceParams::CreateForTexture(
|
||||
const Tegra::Texture::FullTextureInfo& config) {
|
||||
const Tegra::Texture::FullTextureInfo& config, const GLShader::SamplerEntry& entry) {
|
||||
SurfaceParams params{};
|
||||
params.addr = TryGetCpuAddr(config.tic.Address());
|
||||
params.is_tiled = config.tic.IsTiled();
|
||||
@ -61,8 +61,19 @@ static VAddr TryGetCpuAddr(Tegra::GPUVAddr gpu_addr) {
|
||||
params.depth = 1;
|
||||
break;
|
||||
case SurfaceTarget::Texture3D:
|
||||
params.depth = config.tic.Depth();
|
||||
break;
|
||||
case SurfaceTarget::Texture2DArray:
|
||||
params.depth = config.tic.Depth();
|
||||
if (!entry.IsArray()) {
|
||||
// TODO(bunnei): We have seen games re-use a Texture2D as Texture2DArray with depth of
|
||||
// one, but sample the texture in the shader as if it were not an array texture. This
|
||||
// probably is valid on hardware, but we still need to write a test to confirm this. In
|
||||
// emulation, the workaround here is to continue to treat this as a Texture2D. An
|
||||
// example game that does this is Super Mario Odyssey (in Cloud Kingdom).
|
||||
ASSERT(params.depth == 1);
|
||||
params.target = SurfaceTarget::Texture2D;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
LOG_CRITICAL(HW_GPU, "Unknown depth for target={}", static_cast<u32>(params.target));
|
||||
@ -726,8 +737,9 @@ RasterizerCacheOpenGL::RasterizerCacheOpenGL() {
|
||||
copy_pbo.Create();
|
||||
}
|
||||
|
||||
Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextureInfo& config) {
|
||||
return GetSurface(SurfaceParams::CreateForTexture(config));
|
||||
Surface RasterizerCacheOpenGL::GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
|
||||
const GLShader::SamplerEntry& entry) {
|
||||
return GetSurface(SurfaceParams::CreateForTexture(config, entry));
|
||||
}
|
||||
|
||||
Surface RasterizerCacheOpenGL::GetDepthBufferSurface(bool preserve_contents) {
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/rasterizer_cache.h"
|
||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||
#include "video_core/renderer_opengl/gl_shader_gen.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
|
||||
namespace OpenGL {
|
||||
@ -704,7 +705,8 @@ struct SurfaceParams {
|
||||
}
|
||||
|
||||
/// Creates SurfaceParams from a texture configuration
|
||||
static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config);
|
||||
static SurfaceParams CreateForTexture(const Tegra::Texture::FullTextureInfo& config,
|
||||
const GLShader::SamplerEntry& entry);
|
||||
|
||||
/// Creates SurfaceParams from a framebuffer configuration
|
||||
static SurfaceParams CreateForFramebuffer(std::size_t index);
|
||||
@ -806,7 +808,8 @@ public:
|
||||
RasterizerCacheOpenGL();
|
||||
|
||||
/// Get a surface based on the texture configuration
|
||||
Surface GetTextureSurface(const Tegra::Texture::FullTextureInfo& config);
|
||||
Surface GetTextureSurface(const Tegra::Texture::FullTextureInfo& config,
|
||||
const GLShader::SamplerEntry& entry);
|
||||
|
||||
/// Get the depth surface based on the framebuffer configuration
|
||||
Surface GetDepthBufferSurface(bool preserve_contents);
|
||||
|
Loading…
Reference in New Issue
Block a user