mirror of https://github.com/yuzu-mirror/yuzu
shader: Add partial rasterizer integration
parent
72990df7ba
commit
260743f371
@ -1,15 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/exception.h"
|
||||
#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
|
||||
|
||||
namespace Shader::Maxwell {
|
||||
|
||||
void TranslatorVisitor::EXIT(u64) {
|
||||
ir.Exit();
|
||||
}
|
||||
|
||||
} // namespace Shader::Maxwell
|
@ -0,0 +1,43 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/exception.h"
|
||||
#include "shader_recompiler/frontend/maxwell/translate/impl/impl.h"
|
||||
|
||||
namespace Shader::Maxwell {
|
||||
namespace {
|
||||
void ExitFragment(TranslatorVisitor& v) {
|
||||
const ProgramHeader sph{v.env.SPH()};
|
||||
IR::Reg src_reg{IR::Reg::R0};
|
||||
for (u32 render_target = 0; render_target < 8; ++render_target) {
|
||||
const std::array<bool, 4> mask{sph.ps.EnabledOutputComponents(render_target)};
|
||||
for (u32 component = 0; component < 4; ++component) {
|
||||
if (!mask[component]) {
|
||||
continue;
|
||||
}
|
||||
v.ir.SetFragColor(render_target, component, v.F(src_reg));
|
||||
++src_reg;
|
||||
}
|
||||
}
|
||||
if (sph.ps.omap.sample_mask != 0) {
|
||||
throw NotImplementedException("Sample mask");
|
||||
}
|
||||
if (sph.ps.omap.depth != 0) {
|
||||
throw NotImplementedException("Fragment depth");
|
||||
}
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
void TranslatorVisitor::EXIT() {
|
||||
switch (env.ShaderStage()) {
|
||||
case Stage::Fragment:
|
||||
ExitFragment(*this);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Shader::Maxwell
|
@ -0,0 +1,143 @@
|
||||
// Copyright 2018 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
#include <optional>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "common/common_funcs.h"
|
||||
#include "common/common_types.h"
|
||||
|
||||
namespace Shader {
|
||||
|
||||
enum class OutputTopology : u32 {
|
||||
PointList = 1,
|
||||
LineStrip = 6,
|
||||
TriangleStrip = 7,
|
||||
};
|
||||
|
||||
enum class PixelImap : u8 {
|
||||
Unused = 0,
|
||||
Constant = 1,
|
||||
Perspective = 2,
|
||||
ScreenLinear = 3,
|
||||
};
|
||||
|
||||
// Documentation in:
|
||||
// http://download.nvidia.com/open-gpu-doc/Shader-Program-Header/1/Shader-Program-Header.html
|
||||
struct ProgramHeader {
|
||||
union {
|
||||
BitField<0, 5, u32> sph_type;
|
||||
BitField<5, 5, u32> version;
|
||||
BitField<10, 4, u32> shader_type;
|
||||
BitField<14, 1, u32> mrt_enable;
|
||||
BitField<15, 1, u32> kills_pixels;
|
||||
BitField<16, 1, u32> does_global_store;
|
||||
BitField<17, 4, u32> sass_version;
|
||||
BitField<21, 5, u32> reserved;
|
||||
BitField<26, 1, u32> does_load_or_store;
|
||||
BitField<27, 1, u32> does_fp64;
|
||||
BitField<28, 4, u32> stream_out_mask;
|
||||
} common0;
|
||||
|
||||
union {
|
||||
BitField<0, 24, u32> shader_local_memory_low_size;
|
||||
BitField<24, 8, u32> per_patch_attribute_count;
|
||||
} common1;
|
||||
|
||||
union {
|
||||
BitField<0, 24, u32> shader_local_memory_high_size;
|
||||
BitField<24, 8, u32> threads_per_input_primitive;
|
||||
} common2;
|
||||
|
||||
union {
|
||||
BitField<0, 24, u32> shader_local_memory_crs_size;
|
||||
BitField<24, 4, OutputTopology> output_topology;
|
||||
BitField<28, 4, u32> reserved;
|
||||
} common3;
|
||||
|
||||
union {
|
||||
BitField<0, 12, u32> max_output_vertices;
|
||||
BitField<12, 8, u32> store_req_start; // NOTE: not used by geometry shaders.
|
||||
BitField<20, 4, u32> reserved;
|
||||
BitField<24, 8, u32> store_req_end; // NOTE: not used by geometry shaders.
|
||||
} common4;
|
||||
|
||||
union {
|
||||
struct {
|
||||
INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
|
||||
INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
|
||||
INSERT_PADDING_BYTES_NOINIT(16); // ImapGenericVector[32]
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
|
||||
union {
|
||||
BitField<0, 8, u16> clip_distances;
|
||||
BitField<8, 1, u16> point_sprite_s;
|
||||
BitField<9, 1, u16> point_sprite_t;
|
||||
BitField<10, 1, u16> fog_coordinate;
|
||||
BitField<12, 1, u16> tessellation_eval_point_u;
|
||||
BitField<13, 1, u16> tessellation_eval_point_v;
|
||||
BitField<14, 1, u16> instance_id;
|
||||
BitField<15, 1, u16> vertex_id;
|
||||
};
|
||||
INSERT_PADDING_BYTES_NOINIT(5); // ImapFixedFncTexture[10]
|
||||
INSERT_PADDING_BYTES_NOINIT(1); // ImapReserved
|
||||
INSERT_PADDING_BYTES_NOINIT(3); // OmapSystemValuesA
|
||||
INSERT_PADDING_BYTES_NOINIT(1); // OmapSystemValuesB
|
||||
INSERT_PADDING_BYTES_NOINIT(16); // OmapGenericVector[32]
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // OmapColor
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // OmapSystemValuesC
|
||||
INSERT_PADDING_BYTES_NOINIT(5); // OmapFixedFncTexture[10]
|
||||
INSERT_PADDING_BYTES_NOINIT(1); // OmapReserved
|
||||
} vtg;
|
||||
|
||||
struct {
|
||||
INSERT_PADDING_BYTES_NOINIT(3); // ImapSystemValuesA
|
||||
INSERT_PADDING_BYTES_NOINIT(1); // ImapSystemValuesB
|
||||
|
||||
union {
|
||||
BitField<0, 2, PixelImap> x;
|
||||
BitField<2, 2, PixelImap> y;
|
||||
BitField<4, 2, PixelImap> z;
|
||||
BitField<6, 2, PixelImap> w;
|
||||
u8 raw;
|
||||
} imap_generic_vector[32];
|
||||
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // ImapColor
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // ImapSystemValuesC
|
||||
INSERT_PADDING_BYTES_NOINIT(10); // ImapFixedFncTexture[10]
|
||||
INSERT_PADDING_BYTES_NOINIT(2); // ImapReserved
|
||||
|
||||
struct {
|
||||
u32 target;
|
||||
union {
|
||||
BitField<0, 1, u32> sample_mask;
|
||||
BitField<1, 1, u32> depth;
|
||||
BitField<2, 30, u32> reserved;
|
||||
};
|
||||
} omap;
|
||||
|
||||
[[nodiscard]] std::array<bool, 4> EnabledOutputComponents(u32 rt) const noexcept {
|
||||
const u32 bits{omap.target >> (rt * 4)};
|
||||
return {(bits & 1) != 0, (bits & 2) != 0, (bits & 4) != 0, (bits & 8) != 0};
|
||||
}
|
||||
|
||||
[[nodiscard]] std::array<PixelImap, 4> GenericInputMap(u32 attribute) const {
|
||||
const auto& vector{imap_generic_vector[attribute]};
|
||||
return {vector.x, vector.y, vector.z, vector.w};
|
||||
}
|
||||
} ps;
|
||||
|
||||
std::array<u32, 0xf> raw;
|
||||
};
|
||||
|
||||
[[nodiscard]] u64 LocalMemorySize() const noexcept {
|
||||
return (common1.shader_local_memory_low_size |
|
||||
(common2.shader_local_memory_high_size << 24));
|
||||
}
|
||||
};
|
||||
static_assert(sizeof(ProgramHeader) == 0x50, "Incorrect structure size");
|
||||
|
||||
} // namespace Shader
|
@ -1,28 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/backend/spirv/emit_spirv.h"
|
||||
#include "shader_recompiler/environment.h"
|
||||
#include "shader_recompiler/frontend/maxwell/control_flow.h"
|
||||
#include "shader_recompiler/frontend/maxwell/program.h"
|
||||
#include "shader_recompiler/object_pool.h"
|
||||
#include "shader_recompiler/recompiler.h"
|
||||
|
||||
namespace Shader {
|
||||
|
||||
std::pair<Info, std::vector<u32>> RecompileSPIRV(const Profile& profile, Environment& env,
|
||||
u32 start_address) {
|
||||
ObjectPool<Maxwell::Flow::Block> flow_block_pool;
|
||||
ObjectPool<IR::Inst> inst_pool;
|
||||
ObjectPool<IR::Block> block_pool;
|
||||
|
||||
Maxwell::Flow::CFG cfg{env, flow_block_pool, start_address};
|
||||
IR::Program program{Maxwell::TranslateProgram(inst_pool, block_pool, env, cfg)};
|
||||
return {std::move(program.info), Backend::SPIRV::EmitSPIRV(profile, env, program)};
|
||||
}
|
||||
|
||||
} // namespace Shader
|
@ -1,20 +0,0 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/environment.h"
|
||||
#include "shader_recompiler/profile.h"
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
|
||||
namespace Shader {
|
||||
|
||||
[[nodiscard]] std::pair<Info, std::vector<u32>> RecompileSPIRV(const Profile& profile,
|
||||
Environment& env, u32 start_address);
|
||||
|
||||
} // namespace Shader
|
@ -0,0 +1,19 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
namespace Shader {
|
||||
|
||||
enum class Stage {
|
||||
Compute,
|
||||
VertexA,
|
||||
VertexB,
|
||||
TessellationControl,
|
||||
TessellationEval,
|
||||
Geometry,
|
||||
Fragment,
|
||||
};
|
||||
|
||||
} // namespace Shader
|
@ -0,0 +1,162 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
|
||||
#include "common/assert.h"
|
||||
#include "common/common_types.h"
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/texture_cache/texture_cache.h"
|
||||
#include "video_core/texture_cache/types.h"
|
||||
#include "video_core/textures/texture.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
struct TextureHandle {
|
||||
explicit TextureHandle(u32 data, bool via_header_index) {
|
||||
[[likely]] if (via_header_index) {
|
||||
image = data;
|
||||
sampler = data;
|
||||
} else {
|
||||
const Tegra::Texture::TextureHandle handle{data};
|
||||
image = handle.tic_id;
|
||||
sampler = via_header_index ? image : handle.tsc_id.Value();
|
||||
}
|
||||
}
|
||||
|
||||
u32 image;
|
||||
u32 sampler;
|
||||
};
|
||||
|
||||
struct DescriptorLayoutTuple {
|
||||
vk::DescriptorSetLayout descriptor_set_layout;
|
||||
vk::PipelineLayout pipeline_layout;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_update_template;
|
||||
};
|
||||
|
||||
class DescriptorLayoutBuilder {
|
||||
public:
|
||||
DescriptorLayoutTuple Create(const vk::Device& device) {
|
||||
DescriptorLayoutTuple result;
|
||||
if (!bindings.empty()) {
|
||||
result.descriptor_set_layout = device.CreateDescriptorSetLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.bindingCount = static_cast<u32>(bindings.size()),
|
||||
.pBindings = bindings.data(),
|
||||
});
|
||||
}
|
||||
result.pipeline_layout = device.CreatePipelineLayout({
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.setLayoutCount = result.descriptor_set_layout ? 1U : 0U,
|
||||
.pSetLayouts = bindings.empty() ? nullptr : result.descriptor_set_layout.address(),
|
||||
.pushConstantRangeCount = 0,
|
||||
.pPushConstantRanges = nullptr,
|
||||
});
|
||||
if (!entries.empty()) {
|
||||
result.descriptor_update_template = device.CreateDescriptorUpdateTemplateKHR({
|
||||
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.descriptorUpdateEntryCount = static_cast<u32>(entries.size()),
|
||||
.pDescriptorUpdateEntries = entries.data(),
|
||||
.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR,
|
||||
.descriptorSetLayout = *result.descriptor_set_layout,
|
||||
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
.pipelineLayout = *result.pipeline_layout,
|
||||
.set = 0,
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Add(const Shader::Info& info, VkShaderStageFlags stage) {
|
||||
for ([[maybe_unused]] const auto& desc : info.constant_buffer_descriptors) {
|
||||
Add(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, stage);
|
||||
}
|
||||
for ([[maybe_unused]] const auto& desc : info.storage_buffers_descriptors) {
|
||||
Add(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, stage);
|
||||
}
|
||||
for ([[maybe_unused]] const auto& desc : info.texture_descriptors) {
|
||||
Add(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, stage);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void Add(VkDescriptorType type, VkShaderStageFlags stage) {
|
||||
bindings.push_back({
|
||||
.binding = binding,
|
||||
.descriptorType = type,
|
||||
.descriptorCount = 1,
|
||||
.stageFlags = stage,
|
||||
.pImmutableSamplers = nullptr,
|
||||
});
|
||||
entries.push_back(VkDescriptorUpdateTemplateEntryKHR{
|
||||
.dstBinding = binding,
|
||||
.dstArrayElement = 0,
|
||||
.descriptorCount = 1,
|
||||
.descriptorType = type,
|
||||
.offset = offset,
|
||||
.stride = sizeof(DescriptorUpdateEntry),
|
||||
});
|
||||
++binding;
|
||||
offset += sizeof(DescriptorUpdateEntry);
|
||||
}
|
||||
|
||||
boost::container::small_vector<VkDescriptorSetLayoutBinding, 32> bindings;
|
||||
boost::container::small_vector<VkDescriptorUpdateTemplateEntryKHR, 32> entries;
|
||||
u32 binding{};
|
||||
size_t offset{};
|
||||
};
|
||||
|
||||
inline VideoCommon::ImageViewType CastType(Shader::TextureType type) {
|
||||
switch (type) {
|
||||
case Shader::TextureType::Color1D:
|
||||
case Shader::TextureType::Shadow1D:
|
||||
return VideoCommon::ImageViewType::e1D;
|
||||
case Shader::TextureType::ColorArray1D:
|
||||
case Shader::TextureType::ShadowArray1D:
|
||||
return VideoCommon::ImageViewType::e1DArray;
|
||||
case Shader::TextureType::Color2D:
|
||||
case Shader::TextureType::Shadow2D:
|
||||
return VideoCommon::ImageViewType::e2D;
|
||||
case Shader::TextureType::ColorArray2D:
|
||||
case Shader::TextureType::ShadowArray2D:
|
||||
return VideoCommon::ImageViewType::e2DArray;
|
||||
case Shader::TextureType::Color3D:
|
||||
case Shader::TextureType::Shadow3D:
|
||||
return VideoCommon::ImageViewType::e3D;
|
||||
case Shader::TextureType::ColorCube:
|
||||
case Shader::TextureType::ShadowCube:
|
||||
return VideoCommon::ImageViewType::Cube;
|
||||
case Shader::TextureType::ColorArrayCube:
|
||||
case Shader::TextureType::ShadowArrayCube:
|
||||
return VideoCommon::ImageViewType::CubeArray;
|
||||
}
|
||||
UNREACHABLE_MSG("Invalid texture type {}", type);
|
||||
return {};
|
||||
}
|
||||
|
||||
inline void PushImageDescriptors(const Shader::Info& info, const VkSampler* samplers,
|
||||
const ImageId* image_view_ids, TextureCache& texture_cache,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue, size_t& index) {
|
||||
for (const auto& desc : info.texture_descriptors) {
|
||||
const VkSampler sampler{samplers[index]};
|
||||
ImageView& image_view{texture_cache.GetImageView(image_view_ids[index])};
|
||||
const VkImageView vk_image_view{image_view.Handle(CastType(desc.type))};
|
||||
update_descriptor_queue.AddSampledImage(vk_image_view, sampler);
|
||||
++index;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
@ -0,0 +1,445 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <algorithm>
|
||||
#include <span>
|
||||
|
||||
#include <boost/container/small_vector.hpp>
|
||||
#include <boost/container/static_vector.hpp>
|
||||
|
||||
#include "common/bit_field.h"
|
||||
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/pipeline_helper.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_graphics_pipeline.h"
|
||||
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_scheduler.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_update_descriptor.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
|
||||
namespace Vulkan {
|
||||
namespace {
|
||||
using boost::container::small_vector;
|
||||
using boost::container::static_vector;
|
||||
using VideoCore::Surface::PixelFormat;
|
||||
using VideoCore::Surface::PixelFormatFromDepthFormat;
|
||||
using VideoCore::Surface::PixelFormatFromRenderTargetFormat;
|
||||
|
||||
DescriptorLayoutTuple CreateLayout(const Device& device, std::span<const Shader::Info> infos) {
|
||||
DescriptorLayoutBuilder builder;
|
||||
for (size_t index = 0; index < infos.size(); ++index) {
|
||||
static constexpr std::array stages{
|
||||
VK_SHADER_STAGE_VERTEX_BIT,
|
||||
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
|
||||
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
|
||||
VK_SHADER_STAGE_GEOMETRY_BIT,
|
||||
VK_SHADER_STAGE_FRAGMENT_BIT,
|
||||
};
|
||||
builder.Add(infos[index], stages.at(index));
|
||||
}
|
||||
return builder.Create(device.GetLogical());
|
||||
}
|
||||
|
||||
template <class StencilFace>
|
||||
VkStencilOpState GetStencilFaceState(const StencilFace& face) {
|
||||
return {
|
||||
.failOp = MaxwellToVK::StencilOp(face.ActionStencilFail()),
|
||||
.passOp = MaxwellToVK::StencilOp(face.ActionDepthPass()),
|
||||
.depthFailOp = MaxwellToVK::StencilOp(face.ActionDepthFail()),
|
||||
.compareOp = MaxwellToVK::ComparisonOp(face.TestFunc()),
|
||||
.compareMask = 0,
|
||||
.writeMask = 0,
|
||||
.reference = 0,
|
||||
};
|
||||
}
|
||||
|
||||
bool SupportsPrimitiveRestart(VkPrimitiveTopology topology) {
|
||||
static constexpr std::array unsupported_topologies{
|
||||
VK_PRIMITIVE_TOPOLOGY_POINT_LIST,
|
||||
VK_PRIMITIVE_TOPOLOGY_LINE_LIST,
|
||||
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
|
||||
VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY,
|
||||
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY,
|
||||
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST,
|
||||
// VK_PRIMITIVE_TOPOLOGY_QUAD_LIST_EXT,
|
||||
};
|
||||
return std::ranges::find(unsupported_topologies, topology) == unsupported_topologies.end();
|
||||
}
|
||||
|
||||
VkViewportSwizzleNV UnpackViewportSwizzle(u16 swizzle) {
|
||||
union Swizzle {
|
||||
u32 raw;
|
||||
BitField<0, 3, Maxwell::ViewportSwizzle> x;
|
||||
BitField<4, 3, Maxwell::ViewportSwizzle> y;
|
||||
BitField<8, 3, Maxwell::ViewportSwizzle> z;
|
||||
BitField<12, 3, Maxwell::ViewportSwizzle> w;
|
||||
};
|
||||
const Swizzle unpacked{swizzle};
|
||||
return VkViewportSwizzleNV{
|
||||
.x = MaxwellToVK::ViewportSwizzle(unpacked.x),
|
||||
.y = MaxwellToVK::ViewportSwizzle(unpacked.y),
|
||||
.z = MaxwellToVK::ViewportSwizzle(unpacked.z),
|
||||
.w = MaxwellToVK::ViewportSwizzle(unpacked.w),
|
||||
};
|
||||
}
|
||||
|
||||
PixelFormat DecodeFormat(u8 encoded_format) {
|
||||
const auto format{static_cast<Tegra::RenderTargetFormat>(encoded_format)};
|
||||
if (format == Tegra::RenderTargetFormat::NONE) {
|
||||
return PixelFormat::Invalid;
|
||||
}
|
||||
return PixelFormatFromRenderTargetFormat(format);
|
||||
}
|
||||
|
||||
RenderPassKey MakeRenderPassKey(const FixedPipelineState& state) {
|
||||
RenderPassKey key;
|
||||
std::ranges::transform(state.color_formats, key.color_formats.begin(), DecodeFormat);
|
||||
if (state.depth_enabled != 0) {
|
||||
const auto depth_format{static_cast<Tegra::DepthFormat>(state.depth_format.Value())};
|
||||
key.depth_format = PixelFormatFromDepthFormat(depth_format);
|
||||
} else {
|
||||
key.depth_format = PixelFormat::Invalid;
|
||||
}
|
||||
key.samples = MaxwellToVK::MsaaMode(state.msaa_mode);
|
||||
return key;
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
GraphicsPipeline::GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d_,
|
||||
Tegra::MemoryManager& gpu_memory_, VKScheduler& scheduler_,
|
||||
BufferCache& buffer_cache_, TextureCache& texture_cache_,
|
||||
const Device& device, VKDescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue_,
|
||||
RenderPassCache& render_pass_cache,
|
||||
const FixedPipelineState& state,
|
||||
std::array<vk::ShaderModule, NUM_STAGES> stages,
|
||||
const std::array<const Shader::Info*, NUM_STAGES>& infos)
|
||||
: maxwell3d{&maxwell3d_}, gpu_memory{&gpu_memory_}, texture_cache{&texture_cache_},
|
||||
buffer_cache{&buffer_cache_}, scheduler{&scheduler_},
|
||||
update_descriptor_queue{&update_descriptor_queue_}, spv_modules{std::move(stages)} {
|
||||
std::ranges::transform(infos, stage_infos.begin(),
|
||||
[](const Shader::Info* info) { return info ? *info : Shader::Info{}; });
|
||||
|
||||
DescriptorLayoutTuple tuple{CreateLayout(device, stage_infos)};
|
||||
descriptor_set_layout = std::move(tuple.descriptor_set_layout);
|
||||
pipeline_layout = std::move(tuple.pipeline_layout);
|
||||
descriptor_update_template = std::move(tuple.descriptor_update_template);
|
||||
descriptor_allocator = DescriptorAllocator(descriptor_pool, *descriptor_set_layout);
|
||||
|
||||
const VkRenderPass render_pass{render_pass_cache.Get(MakeRenderPassKey(state))};
|
||||
MakePipeline(device, state, render_pass);
|
||||
}
|
||||
|
||||
void GraphicsPipeline::Configure(bool is_indexed) {
|
||||
static constexpr size_t max_images_elements = 64;
|
||||
std::array<ImageId, max_images_elements> image_view_ids;
|
||||
static_vector<u32, max_images_elements> image_view_indices;
|
||||
static_vector<VkSampler, max_images_elements> samplers;
|
||||
|
||||
texture_cache->SynchronizeGraphicsDescriptors();
|
||||
texture_cache->UpdateRenderTargets(false);
|
||||
|
||||
const auto& regs{maxwell3d->regs};
|
||||
const bool via_header_index{regs.sampler_index == Maxwell::SamplerIndex::ViaHeaderIndex};
|
||||
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
|
||||
const Shader::Info& info{stage_infos[stage]};
|
||||
buffer_cache->SetEnabledUniformBuffers(stage, info.constant_buffer_mask);
|
||||
buffer_cache->UnbindGraphicsStorageBuffers(stage);
|
||||
size_t index{};
|
||||
for (const auto& desc : info.storage_buffers_descriptors) {
|
||||
ASSERT(desc.count == 1);
|
||||
buffer_cache->BindGraphicsStorageBuffer(stage, index, desc.cbuf_index, desc.cbuf_offset,
|
||||
true);
|
||||
++index;
|
||||
}
|
||||
const auto& cbufs{maxwell3d->state.shader_stages[stage].const_buffers};
|
||||
for (const auto& desc : info.texture_descriptors) {
|
||||
const u32 cbuf_index{desc.cbuf_index};
|
||||
const u32 cbuf_offset{desc.cbuf_offset};
|
||||
ASSERT(cbufs[cbuf_index].enabled);
|
||||
const GPUVAddr addr{cbufs[cbuf_index].address + cbuf_offset};
|
||||
const u32 raw_handle{gpu_memory->Read<u32>(addr)};
|
||||
|
||||
const TextureHandle handle(raw_handle, via_header_index);
|
||||
image_view_indices.push_back(handle.image);
|
||||
|
||||
Sampler* const sampler{texture_cache->GetGraphicsSampler(handle.sampler)};
|
||||
samplers.push_back(sampler->Handle());
|
||||
}
|
||||
}
|
||||
const std::span indices_span(image_view_indices.data(), image_view_indices.size());
|
||||
buffer_cache->UpdateGraphicsBuffers(is_indexed);
|
||||
texture_cache->FillGraphicsImageViews(indices_span, image_view_ids);
|
||||
|
||||
buffer_cache->BindHostGeometryBuffers(is_indexed);
|
||||
|
||||
size_t index{};
|
||||
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
|
||||
buffer_cache->BindHostStageBuffers(stage);
|
||||
PushImageDescriptors(stage_infos[stage], samplers.data(), image_view_ids.data(),
|
||||
*texture_cache, *update_descriptor_queue, index);
|
||||
}
|
||||
const VkDescriptorSet descriptor_set{descriptor_allocator.Commit()};
|
||||
update_descriptor_queue->Send(*descriptor_update_template, descriptor_set);
|
||||
|
||||
scheduler->BindGraphicsPipeline(*pipeline);
|
||||
scheduler->Record([descriptor_set, layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set,
|
||||
nullptr);
|
||||
});
|
||||
}
|
||||
|
||||
void GraphicsPipeline::MakePipeline(const Device& device, const FixedPipelineState& state,
|
||||
VkRenderPass render_pass) {
|
||||
FixedPipelineState::DynamicState dynamic{};
|
||||
if (!device.IsExtExtendedDynamicStateSupported()) {
|
||||
dynamic = state.dynamic_state;
|
||||
}
|
||||
static_vector<VkVertexInputBindingDescription, 32> vertex_bindings;
|
||||
static_vector<VkVertexInputBindingDivisorDescriptionEXT, 32> vertex_binding_divisors;
|
||||
for (size_t index = 0; index < Maxwell::NumVertexArrays; ++index) {
|
||||
const bool instanced = state.binding_divisors[index] != 0;
|
||||
const auto rate = instanced ? VK_VERTEX_INPUT_RATE_INSTANCE : VK_VERTEX_INPUT_RATE_VERTEX;
|
||||
vertex_bindings.push_back({
|
||||
.binding = static_cast<u32>(index),
|
||||
.stride = dynamic.vertex_strides[index],
|
||||
.inputRate = rate,
|
||||
});
|
||||
if (instanced) {
|
||||
vertex_binding_divisors.push_back({
|
||||
.binding = static_cast<u32>(index),
|
||||
.divisor = state.binding_divisors[index],
|
||||
});
|
||||
}
|
||||
}
|
||||
static_vector<VkVertexInputAttributeDescription, 32> vertex_attributes;
|
||||
const auto& input_attributes = stage_infos[0].loads_generics;
|
||||
for (size_t index = 0; index < state.attributes.size(); ++index) {
|
||||
const auto& attribute = state.attributes[index];
|
||||
if (!attribute.enabled || !input_attributes[index]) {
|
||||
continue;
|
||||
}
|
||||
vertex_attributes.push_back({
|
||||
.location = static_cast<u32>(index),
|
||||
.binding = attribute.buffer,
|
||||
.format = MaxwellToVK::VertexFormat(attribute.Type(), attribute.Size()),
|
||||
.offset = attribute.offset,
|
||||
});
|
||||
}
|
||||
VkPipelineVertexInputStateCreateInfo vertex_input_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.vertexBindingDescriptionCount = static_cast<u32>(vertex_bindings.size()),
|
||||
.pVertexBindingDescriptions = vertex_bindings.data(),
|
||||
.vertexAttributeDescriptionCount = static_cast<u32>(vertex_attributes.size()),
|
||||
.pVertexAttributeDescriptions = vertex_attributes.data(),
|
||||
};
|
||||
const VkPipelineVertexInputDivisorStateCreateInfoEXT input_divisor_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT,
|
||||
.pNext = nullptr,
|
||||
.vertexBindingDivisorCount = static_cast<u32>(vertex_binding_divisors.size()),
|
||||
.pVertexBindingDivisors = vertex_binding_divisors.data(),
|
||||
};
|
||||
if (!vertex_binding_divisors.empty()) {
|
||||
vertex_input_ci.pNext = &input_divisor_ci;
|
||||
}
|
||||
const auto input_assembly_topology = MaxwellToVK::PrimitiveTopology(device, state.topology);
|
||||
const VkPipelineInputAssemblyStateCreateInfo input_assembly_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.topology = MaxwellToVK::PrimitiveTopology(device, state.topology),
|
||||
.primitiveRestartEnable = state.primitive_restart_enable != 0 &&
|
||||
SupportsPrimitiveRestart(input_assembly_topology),
|
||||
};
|
||||
const VkPipelineTessellationStateCreateInfo tessellation_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.patchControlPoints = state.patch_control_points_minus_one.Value() + 1,
|
||||
};
|
||||
VkPipelineViewportStateCreateInfo viewport_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.viewportCount = Maxwell::NumViewports,
|
||||
.pViewports = nullptr,
|
||||
.scissorCount = Maxwell::NumViewports,
|
||||
.pScissors = nullptr,
|
||||
};
|
||||
std::array<VkViewportSwizzleNV, Maxwell::NumViewports> swizzles;
|
||||
std::ranges::transform(state.viewport_swizzles, swizzles.begin(), UnpackViewportSwizzle);
|
||||
VkPipelineViewportSwizzleStateCreateInfoNV swizzle_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.viewportCount = Maxwell::NumViewports,
|
||||
.pViewportSwizzles = swizzles.data(),
|
||||
};
|
||||
if (device.IsNvViewportSwizzleSupported()) {
|
||||
viewport_ci.pNext = &swizzle_ci;
|
||||
}
|
||||
|
||||
const VkPipelineRasterizationStateCreateInfo rasterization_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.depthClampEnable =
|
||||
static_cast<VkBool32>(state.depth_clamp_disabled == 0 ? VK_TRUE : VK_FALSE),
|
||||
.rasterizerDiscardEnable =
|
||||
static_cast<VkBool32>(state.rasterize_enable == 0 ? VK_TRUE : VK_FALSE),
|
||||
.polygonMode = VK_POLYGON_MODE_FILL,
|
||||
.cullMode = static_cast<VkCullModeFlags>(
|
||||
dynamic.cull_enable ? MaxwellToVK::CullFace(dynamic.CullFace()) : VK_CULL_MODE_NONE),
|
||||
.frontFace = MaxwellToVK::FrontFace(dynamic.FrontFace()),
|
||||
.depthBiasEnable = state.depth_bias_enable,
|
||||
.depthBiasConstantFactor = 0.0f,
|
||||
.depthBiasClamp = 0.0f,
|
||||
.depthBiasSlopeFactor = 0.0f,
|
||||
.lineWidth = 1.0f,
|
||||
};
|
||||
const VkPipelineMultisampleStateCreateInfo multisample_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.rasterizationSamples = MaxwellToVK::MsaaMode(state.msaa_mode),
|
||||
.sampleShadingEnable = VK_FALSE,
|
||||
.minSampleShading = 0.0f,
|
||||
.pSampleMask = nullptr,
|
||||
.alphaToCoverageEnable = VK_FALSE,
|
||||
.alphaToOneEnable = VK_FALSE,
|
||||
};
|
||||
const VkPipelineDepthStencilStateCreateInfo depth_stencil_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.depthTestEnable = dynamic.depth_test_enable,
|
||||
.depthWriteEnable = dynamic.depth_write_enable,
|
||||
.depthCompareOp = dynamic.depth_test_enable
|
||||
? MaxwellToVK::ComparisonOp(dynamic.DepthTestFunc())
|
||||
: VK_COMPARE_OP_ALWAYS,
|
||||
.depthBoundsTestEnable = dynamic.depth_bounds_enable,
|
||||
.stencilTestEnable = dynamic.stencil_enable,
|
||||
.front = GetStencilFaceState(dynamic.front),
|
||||
.back = GetStencilFaceState(dynamic.back),
|
||||
.minDepthBounds = 0.0f,
|
||||
.maxDepthBounds = 0.0f,
|
||||
};
|
||||
static_vector<VkPipelineColorBlendAttachmentState, Maxwell::NumRenderTargets> cb_attachments;
|
||||
for (size_t index = 0; index < Maxwell::NumRenderTargets; ++index) {
|
||||
static constexpr std::array mask_table{
|
||||
VK_COLOR_COMPONENT_R_BIT,
|
||||
VK_COLOR_COMPONENT_G_BIT,
|
||||
VK_COLOR_COMPONENT_B_BIT,
|
||||
VK_COLOR_COMPONENT_A_BIT,
|
||||
};
|
||||
const auto format{static_cast<Tegra::RenderTargetFormat>(state.color_formats[index])};
|
||||
if (format == Tegra::RenderTargetFormat::NONE) {
|
||||
continue;
|
||||
}
|
||||
const auto& blend{state.attachments[index]};
|
||||
const std::array mask{blend.Mask()};
|
||||
VkColorComponentFlags write_mask{};
|
||||
for (size_t i = 0; i < mask_table.size(); ++i) {
|
||||
write_mask |= mask[i] ? mask_table[i] : 0;
|
||||
}
|
||||
cb_attachments.push_back({
|
||||
.blendEnable = blend.enable != 0,
|
||||
.srcColorBlendFactor = MaxwellToVK::BlendFactor(blend.SourceRGBFactor()),
|
||||
.dstColorBlendFactor = MaxwellToVK::BlendFactor(blend.DestRGBFactor()),
|
||||
.colorBlendOp = MaxwellToVK::BlendEquation(blend.EquationRGB()),
|
||||
.srcAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.SourceAlphaFactor()),
|
||||
.dstAlphaBlendFactor = MaxwellToVK::BlendFactor(blend.DestAlphaFactor()),
|
||||
.alphaBlendOp = MaxwellToVK::BlendEquation(blend.EquationAlpha()),
|
||||
.colorWriteMask = write_mask,
|
||||
});
|
||||
}
|
||||
const VkPipelineColorBlendStateCreateInfo color_blend_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.logicOpEnable = VK_FALSE,
|
||||
.logicOp = VK_LOGIC_OP_COPY,
|
||||
.attachmentCount = static_cast<u32>(cb_attachments.size()),
|
||||
.pAttachments = cb_attachments.data(),
|
||||
.blendConstants = {},
|
||||
};
|
||||
static_vector<VkDynamicState, 17> dynamic_states{
|
||||
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
|
||||
VK_DYNAMIC_STATE_DEPTH_BIAS, VK_DYNAMIC_STATE_BLEND_CONSTANTS,
|
||||
VK_DYNAMIC_STATE_DEPTH_BOUNDS, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK,
|
||||
VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE,
|
||||
};
|
||||
if (device.IsExtExtendedDynamicStateSupported()) {
|
||||
static constexpr std::array extended{
|
||||
VK_DYNAMIC_STATE_CULL_MODE_EXT,
|
||||
VK_DYNAMIC_STATE_FRONT_FACE_EXT,
|
||||
VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT,
|
||||
VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT,
|
||||
VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT,
|
||||
VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT,
|
||||
VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT,
|
||||
VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT,
|
||||
VK_DYNAMIC_STATE_STENCIL_OP_EXT,
|
||||
};
|
||||
dynamic_states.insert(dynamic_states.end(), extended.begin(), extended.end());
|
||||
}
|
||||
const VkPipelineDynamicStateCreateInfo dynamic_state_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.dynamicStateCount = static_cast<u32>(dynamic_states.size()),
|
||||
.pDynamicStates = dynamic_states.data(),
|
||||
};
|
||||
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroup_size_ci{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
|
||||
.pNext = nullptr,
|
||||
.requiredSubgroupSize = GuestWarpSize,
|
||||
};
|
||||
static_vector<VkPipelineShaderStageCreateInfo, 5> shader_stages;
|
||||
for (size_t stage = 0; stage < Maxwell::MaxShaderStage; ++stage) {
|
||||
if (!spv_modules[stage]) {
|
||||
continue;
|
||||
}
|
||||
[[maybe_unused]] auto& stage_ci = shader_stages.emplace_back(VkPipelineShaderStageCreateInfo{
|
||||
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stage = MaxwellToVK::ShaderStage(static_cast<Tegra::Engines::ShaderType>(stage)),
|
||||
.module = *spv_modules[stage],
|
||||
.pName = "main",
|
||||
.pSpecializationInfo = nullptr,
|
||||
});
|
||||
/*
|
||||
if (program[stage]->entries.uses_warps && device.IsGuestWarpSizeSupported(stage_ci.stage)) {
|
||||
stage_ci.pNext = &subgroup_size_ci;
|
||||
}
|
||||
*/
|
||||
}
|
||||
pipeline = device.GetLogical().CreateGraphicsPipeline({
|
||||
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.stageCount = static_cast<u32>(shader_stages.size()),
|
||||
.pStages = shader_stages.data(),
|
||||
.pVertexInputState = &vertex_input_ci,
|
||||
.pInputAssemblyState = &input_assembly_ci,
|
||||
.pTessellationState = &tessellation_ci,
|
||||
.pViewportState = &viewport_ci,
|
||||
.pRasterizationState = &rasterization_ci,
|
||||
.pMultisampleState = &multisample_ci,
|
||||
.pDepthStencilState = &depth_stencil_ci,
|
||||
.pColorBlendState = &color_blend_ci,
|
||||
.pDynamicState = &dynamic_state_ci,
|
||||
.layout = *pipeline_layout,
|
||||
.renderPass = render_pass,
|
||||
.subpass = 0,
|
||||
.basePipelineHandle = nullptr,
|
||||
.basePipelineIndex = 0,
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
@ -0,0 +1,66 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <array>
|
||||
|
||||
#include "shader_recompiler/shader_info.h"
|
||||
#include "video_core/engines/maxwell_3d.h"
|
||||
#include "video_core/renderer_vulkan/fixed_pipeline_state.h"
|
||||
#include "video_core/renderer_vulkan/vk_descriptor_pool.h"
|
||||
#include "video_core/renderer_vulkan/vk_texture_cache.h"
|
||||
#include "video_core/renderer_vulkan/vk_buffer_cache.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
class RenderPassCache;
|
||||
class VKScheduler;
|
||||
class VKUpdateDescriptorQueue;
|
||||
|
||||
class GraphicsPipeline {
|
||||
static constexpr size_t NUM_STAGES = Tegra::Engines::Maxwell3D::Regs::MaxShaderStage;
|
||||
|
||||
public:
|
||||
explicit GraphicsPipeline() = default;
|
||||
explicit GraphicsPipeline(Tegra::Engines::Maxwell3D& maxwell3d,
|
||||
Tegra::MemoryManager& gpu_memory, VKScheduler& scheduler,
|
||||
BufferCache& buffer_cache,
|
||||
TextureCache& texture_cache, const Device& device, VKDescriptorPool& descriptor_pool,
|
||||
VKUpdateDescriptorQueue& update_descriptor_queue,
|
||||
RenderPassCache& render_pass_cache, const FixedPipelineState& state,
|
||||
std::array<vk::ShaderModule, NUM_STAGES> stages,
|
||||
const std::array<const Shader::Info*, NUM_STAGES>& infos);
|
||||
|
||||
void Configure(bool is_indexed);
|
||||
|
||||
GraphicsPipeline& operator=(GraphicsPipeline&&) noexcept = default;
|
||||
GraphicsPipeline(GraphicsPipeline&&) noexcept = default;
|
||||
|
||||
GraphicsPipeline& operator=(const GraphicsPipeline&) = delete;
|
||||
GraphicsPipeline(const GraphicsPipeline&) = delete;
|
||||
|
||||
private:
|
||||
void MakePipeline(const Device& device, const FixedPipelineState& state,
|
||||
VkRenderPass render_pass);
|
||||
|
||||
Tegra::Engines::Maxwell3D* maxwell3d{};
|
||||
Tegra::MemoryManager* gpu_memory{};
|
||||
TextureCache* texture_cache{};
|
||||
BufferCache* buffer_cache{};
|
||||
VKScheduler* scheduler{};
|
||||
VKUpdateDescriptorQueue* update_descriptor_queue{};
|
||||
|
||||
std::array<vk::ShaderModule, NUM_STAGES> spv_modules;
|
||||
std::array<Shader::Info, NUM_STAGES> stage_infos;
|
||||
vk::DescriptorSetLayout descriptor_set_layout;
|
||||
DescriptorAllocator descriptor_allocator;
|
||||
vk::PipelineLayout pipeline_layout;
|
||||
vk::DescriptorUpdateTemplateKHR descriptor_update_template;
|
||||
vk::Pipeline pipeline;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
@ -1,36 +0,0 @@
|
||||
// Copyright 2019 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Pipeline {
|
||||
public:
|
||||
/// Add a reference count to the pipeline
|
||||
void AddRef() noexcept {
|
||||
++ref_count;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool RemoveRef() noexcept {
|
||||
--ref_count;
|
||||
return ref_count == 0;
|
||||
}
|
||||
|
||||
[[nodiscard]] u64 UsageTick() const noexcept {
|
||||
return usage_tick;
|
||||
}
|
||||
|
||||
protected:
|
||||
u64 usage_tick{};
|
||||
|
||||
private:
|
||||
size_t ref_count{};
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
@ -0,0 +1,100 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include <boost/container/static_vector.hpp>
|
||||
|
||||
#include "video_core/renderer_vulkan/maxwell_to_vk.h"
|
||||
#include "video_core/renderer_vulkan/vk_render_pass_cache.h"
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/vulkan_common/vulkan_device.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
namespace {
|
||||
using VideoCore::Surface::PixelFormat;
|
||||
|
||||
constexpr std::array ATTACHMENT_REFERENCES{
|
||||
VkAttachmentReference{0, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{1, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{2, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{3, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{4, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{5, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{6, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{7, VK_IMAGE_LAYOUT_GENERAL},
|
||||
VkAttachmentReference{8, VK_IMAGE_LAYOUT_GENERAL},
|
||||
};
|
||||
|
||||
VkAttachmentDescription AttachmentDescription(const Device& device, PixelFormat format,
|
||||
VkSampleCountFlagBits samples) {
|
||||
using MaxwellToVK::SurfaceFormat;
|
||||
return {
|
||||
.flags = VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT,
|
||||
.format = SurfaceFormat(device, FormatType::Optimal, true, format).format,
|
||||
.samples = samples,
|
||||
.loadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD,
|
||||
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
|
||||
.initialLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
.finalLayout = VK_IMAGE_LAYOUT_GENERAL,
|
||||
};
|
||||
}
|
||||
} // Anonymous namespace
|
||||
|
||||
RenderPassCache::RenderPassCache(const Device& device_) : device{&device_} {}
|
||||
|
||||
VkRenderPass RenderPassCache::Get(const RenderPassKey& key) {
|
||||
const auto [pair, is_new] = cache.try_emplace(key);
|
||||
if (!is_new) {
|
||||
return *pair->second;
|
||||
}
|
||||
boost::container::static_vector<VkAttachmentDescription, 9> descriptions;
|
||||
u32 num_images{0};
|
||||
|
||||
for (size_t index = 0; index < key.color_formats.size(); ++index) {
|
||||
const PixelFormat format{key.color_formats[index]};
|
||||
if (format == PixelFormat::Invalid) {
|
||||
continue;
|
||||
}
|
||||
descriptions.push_back(AttachmentDescription(*device, format, key.samples));
|
||||
++num_images;
|
||||
}
|
||||
const size_t num_colors{descriptions.size()};
|
||||
const VkAttachmentReference* depth_attachment{};
|
||||
if (key.depth_format != PixelFormat::Invalid) {
|
||||
depth_attachment = &ATTACHMENT_REFERENCES[num_colors];
|
||||
descriptions.push_back(AttachmentDescription(*device, key.depth_format, key.samples));
|
||||
}
|
||||
const VkSubpassDescription subpass{
|
||||
.flags = 0,
|
||||
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
||||
.inputAttachmentCount = 0,
|
||||
.pInputAttachments = nullptr,
|
||||
.colorAttachmentCount = static_cast<u32>(num_colors),
|
||||
.pColorAttachments = num_colors != 0 ? ATTACHMENT_REFERENCES.data() : nullptr,
|
||||
.pResolveAttachments = nullptr,
|
||||
.pDepthStencilAttachment = depth_attachment,
|
||||
.preserveAttachmentCount = 0,
|
||||
.pPreserveAttachments = nullptr,
|
||||
};
|
||||
pair->second = device->GetLogical().CreateRenderPass({
|
||||
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
|
||||
.pNext = nullptr,
|
||||
.flags = 0,
|
||||
.attachmentCount = static_cast<u32>(descriptions.size()),
|
||||
.pAttachments = descriptions.data(),
|
||||
.subpassCount = 1,
|
||||
.pSubpasses = &subpass,
|
||||
.dependencyCount = 0,
|
||||
.pDependencies = nullptr,
|
||||
});
|
||||
return *pair->second;
|
||||
}
|
||||
|
||||
} // namespace Vulkan
|
@ -0,0 +1,53 @@
|
||||
// Copyright 2021 yuzu Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
|
||||
#include "video_core/surface.h"
|
||||
#include "video_core/vulkan_common/vulkan_wrapper.h"
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
struct RenderPassKey {
|
||||
auto operator<=>(const RenderPassKey&) const noexcept = default;
|
||||
|
||||
std::array<VideoCore::Surface::PixelFormat, 8> color_formats;
|
||||
VideoCore::Surface::PixelFormat depth_format;
|
||||
VkSampleCountFlagBits samples;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<Vulkan::RenderPassKey> {
|
||||
[[nodiscard]] size_t operator()(const Vulkan::RenderPassKey& key) const noexcept {
|
||||
size_t value = static_cast<size_t>(key.depth_format) << 48;
|
||||
value ^= static_cast<size_t>(key.samples) << 52;
|
||||
for (size_t i = 0; i < key.color_formats.size(); ++i) {
|
||||
value ^= static_cast<size_t>(key.color_formats[i]) << (i * 6);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
namespace Vulkan {
|
||||
|
||||
class Device;
|
||||
|
||||
class RenderPassCache {
|
||||
public:
|
||||
explicit RenderPassCache(const Device& device_);
|
||||
|
||||
VkRenderPass Get(const RenderPassKey& key);
|
||||
|
||||
private:
|
||||
const Device* device{};
|
||||
std::unordered_map<RenderPassKey, vk::RenderPass> cache;
|
||||
};
|
||||
|
||||
} // namespace Vulkan
|
Loading…
Reference in New Issue