surface_base: Silence truncation warnings and minor renames and reordering
This commit is contained in:
parent
03d10ea3b4
commit
2b30000a1e
|
@ -18,17 +18,19 @@ MICROPROFILE_DEFINE(GPU_Flush_Texture, "GPU", "Texture Flush", MP_RGB(128, 192,
|
||||||
using Tegra::Texture::ConvertFromGuestToHost;
|
using Tegra::Texture::ConvertFromGuestToHost;
|
||||||
using VideoCore::MortonSwizzleMode;
|
using VideoCore::MortonSwizzleMode;
|
||||||
|
|
||||||
SurfaceBaseImpl::SurfaceBaseImpl(const GPUVAddr gpu_vaddr, const SurfaceParams& params)
|
SurfaceBaseImpl::SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params)
|
||||||
: gpu_addr{gpu_vaddr}, params{params}, mipmap_sizes{params.num_levels},
|
: params{params}, gpu_addr{gpu_addr}, layer_size{params.GetGuestLayerSize()},
|
||||||
mipmap_offsets{params.num_levels}, layer_size{params.GetGuestLayerSize()},
|
guest_memory_size{params.GetGuestSizeInBytes()}, host_memory_size{
|
||||||
memory_size{params.GetGuestSizeInBytes()}, host_memory_size{params.GetHostSizeInBytes()} {
|
params.GetHostSizeInBytes()} {
|
||||||
u32 offset = 0;
|
mipmap_offsets.reserve(params.num_levels);
|
||||||
mipmap_offsets.resize(params.num_levels);
|
mipmap_sizes.reserve(params.num_levels);
|
||||||
mipmap_sizes.resize(params.num_levels);
|
|
||||||
for (u32 i = 0; i < params.num_levels; i++) {
|
std::size_t offset = 0;
|
||||||
mipmap_offsets[i] = offset;
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
mipmap_sizes[i] = params.GetGuestMipmapSize(i);
|
const std::size_t mipmap_size{params.GetGuestMipmapSize(level)};
|
||||||
offset += mipmap_sizes[i];
|
mipmap_sizes.push_back(mipmap_size);
|
||||||
|
mipmap_offsets.push_back(offset);
|
||||||
|
offset += mipmap_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +46,7 @@ void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const Surf
|
||||||
std::size_t host_offset{0};
|
std::size_t host_offset{0};
|
||||||
const std::size_t guest_stride = layer_size;
|
const std::size_t guest_stride = layer_size;
|
||||||
const std::size_t host_stride = params.GetHostLayerSize(level);
|
const std::size_t host_stride = params.GetHostLayerSize(level);
|
||||||
for (u32 layer = 0; layer < params.depth; layer++) {
|
for (u32 layer = 0; layer < params.depth; ++layer) {
|
||||||
MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth, 1,
|
MortonSwizzle(mode, params.pixel_format, width, block_height, height, block_depth, 1,
|
||||||
params.tile_width_spacing, buffer + host_offset, memory + guest_offset);
|
params.tile_width_spacing, buffer + host_offset, memory + guest_offset);
|
||||||
guest_offset += guest_stride;
|
guest_offset += guest_stride;
|
||||||
|
@ -60,12 +62,12 @@ void SurfaceBaseImpl::SwizzleFunc(MortonSwizzleMode mode, u8* memory, const Surf
|
||||||
void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
std::vector<u8>& staging_buffer) {
|
std::vector<u8>& staging_buffer) {
|
||||||
MICROPROFILE_SCOPE(GPU_Load_Texture);
|
MICROPROFILE_SCOPE(GPU_Load_Texture);
|
||||||
auto host_ptr = memory_manager.GetPointer(gpu_addr);
|
const auto host_ptr{memory_manager.GetPointer(gpu_addr)};
|
||||||
if (params.is_tiled) {
|
if (params.is_tiled) {
|
||||||
ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture target {}",
|
ASSERT_MSG(params.block_width == 1, "Block width is defined as {} on texture target {}",
|
||||||
params.block_width, static_cast<u32>(params.target));
|
params.block_width, static_cast<u32>(params.target));
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
const u32 host_offset = params.GetHostMipmapLevelOffset(level);
|
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
|
||||||
SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
|
SwizzleFunc(MortonSwizzleMode::MortonToLinear, host_ptr, params,
|
||||||
staging_buffer.data() + host_offset, level);
|
staging_buffer.data() + host_offset, level);
|
||||||
}
|
}
|
||||||
|
@ -91,7 +93,7 @@ void SurfaceBaseImpl::LoadBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
}
|
}
|
||||||
|
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
const u32 host_offset = params.GetHostMipmapLevelOffset(level);
|
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
|
||||||
ConvertFromGuestToHost(staging_buffer.data() + host_offset, params.pixel_format,
|
ConvertFromGuestToHost(staging_buffer.data() + host_offset, params.pixel_format,
|
||||||
params.GetMipWidth(level), params.GetMipHeight(level),
|
params.GetMipWidth(level), params.GetMipHeight(level),
|
||||||
params.GetMipDepth(level), true, true);
|
params.GetMipDepth(level), true, true);
|
||||||
|
@ -105,7 +107,7 @@ void SurfaceBaseImpl::FlushBuffer(Tegra::MemoryManager& memory_manager,
|
||||||
if (params.is_tiled) {
|
if (params.is_tiled) {
|
||||||
ASSERT_MSG(params.block_width == 1, "Block width is defined as {}", params.block_width);
|
ASSERT_MSG(params.block_width == 1, "Block width is defined as {}", params.block_width);
|
||||||
for (u32 level = 0; level < params.num_levels; ++level) {
|
for (u32 level = 0; level < params.num_levels; ++level) {
|
||||||
const u32 host_offset = params.GetHostMipmapLevelOffset(level);
|
const std::size_t host_offset{params.GetHostMipmapLevelOffset(level)};
|
||||||
SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
|
SwizzleFunc(MortonSwizzleMode::LinearToMorton, host_ptr, params,
|
||||||
staging_buffer.data() + host_offset, level);
|
staging_buffer.data() + host_offset, level);
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,7 +78,7 @@ public:
|
||||||
|
|
||||||
void SetCacheAddr(const CacheAddr new_addr) {
|
void SetCacheAddr(const CacheAddr new_addr) {
|
||||||
cache_addr = new_addr;
|
cache_addr = new_addr;
|
||||||
cache_addr_end = new_addr + memory_size;
|
cache_addr_end = new_addr + guest_memory_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
const SurfaceParams& GetSurfaceParams() const {
|
const SurfaceParams& GetSurfaceParams() const {
|
||||||
|
@ -86,7 +86,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t GetSizeInBytes() const {
|
std::size_t GetSizeInBytes() const {
|
||||||
return memory_size;
|
return guest_memory_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::size_t GetHostSizeInBytes() const {
|
std::size_t GetHostSizeInBytes() const {
|
||||||
|
@ -135,17 +135,19 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<std::pair<u32, u32>> GetLayerMipmap(const GPUVAddr candidate_gpu_addr) const {
|
std::optional<std::pair<u32, u32>> GetLayerMipmap(const GPUVAddr candidate_gpu_addr) const {
|
||||||
if (candidate_gpu_addr < gpu_addr)
|
if (candidate_gpu_addr < gpu_addr) {
|
||||||
return {};
|
return {};
|
||||||
const GPUVAddr relative_address = candidate_gpu_addr - gpu_addr;
|
}
|
||||||
const u32 layer = relative_address / layer_size;
|
const auto relative_address{static_cast<GPUVAddr>(candidate_gpu_addr - gpu_addr)};
|
||||||
|
const auto layer{static_cast<u32>(relative_address / layer_size)};
|
||||||
const GPUVAddr mipmap_address = relative_address - layer_size * layer;
|
const GPUVAddr mipmap_address = relative_address - layer_size * layer;
|
||||||
const auto mipmap_it =
|
const auto mipmap_it =
|
||||||
binary_find(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
|
binary_find(mipmap_offsets.begin(), mipmap_offsets.end(), mipmap_address);
|
||||||
if (mipmap_it != mipmap_offsets.end()) {
|
if (mipmap_it == mipmap_offsets.end()) {
|
||||||
return {{layer, std::distance(mipmap_offsets.begin(), mipmap_it)}};
|
return {};
|
||||||
}
|
}
|
||||||
return {};
|
const auto level{static_cast<u32>(std::distance(mipmap_offsets.begin(), mipmap_it))};
|
||||||
|
return std::make_pair(layer, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<CopyParams> BreakDown(const SurfaceParams& in_params) const {
|
std::vector<CopyParams> BreakDown(const SurfaceParams& in_params) const {
|
||||||
|
@ -169,7 +171,7 @@ public:
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
result.reserve(mipmaps);
|
result.reserve(mipmaps);
|
||||||
for (std::size_t level = 0; level < mipmaps; level++) {
|
for (u32 level = 0; level < mipmaps; level++) {
|
||||||
const u32 width{std::min(params.GetMipWidth(level), in_params.GetMipWidth(level))};
|
const u32 width{std::min(params.GetMipWidth(level), in_params.GetMipWidth(level))};
|
||||||
const u32 height{
|
const u32 height{
|
||||||
std::min(params.GetMipHeight(level), in_params.GetMipHeight(level))};
|
std::min(params.GetMipHeight(level), in_params.GetMipHeight(level))};
|
||||||
|
@ -181,21 +183,22 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
explicit SurfaceBaseImpl(const GPUVAddr gpu_vaddr, const SurfaceParams& params);
|
explicit SurfaceBaseImpl(GPUVAddr gpu_addr, const SurfaceParams& params);
|
||||||
~SurfaceBaseImpl() = default;
|
~SurfaceBaseImpl() = default;
|
||||||
|
|
||||||
virtual void DecorateSurfaceName() = 0;
|
virtual void DecorateSurfaceName() = 0;
|
||||||
|
|
||||||
const SurfaceParams params;
|
const SurfaceParams params;
|
||||||
GPUVAddr gpu_addr{};
|
|
||||||
std::vector<u32> mipmap_sizes;
|
|
||||||
std::vector<u32> mipmap_offsets;
|
|
||||||
const std::size_t layer_size;
|
const std::size_t layer_size;
|
||||||
const std::size_t memory_size;
|
const std::size_t guest_memory_size;
|
||||||
const std::size_t host_memory_size;
|
const std::size_t host_memory_size;
|
||||||
CacheAddr cache_addr;
|
GPUVAddr gpu_addr{};
|
||||||
|
CacheAddr cache_addr{};
|
||||||
CacheAddr cache_addr_end{};
|
CacheAddr cache_addr_end{};
|
||||||
VAddr cpu_addr;
|
VAddr cpu_addr{};
|
||||||
|
|
||||||
|
std::vector<std::size_t> mipmap_sizes;
|
||||||
|
std::vector<std::size_t> mipmap_offsets;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params, u8* buffer,
|
void SwizzleFunc(MortonSwizzleMode mode, u8* memory, const SurfaceParams& params, u8* buffer,
|
||||||
|
|
Loading…
Reference in New Issue