From f1a2e367113518b277f34ffbb04499882c3b6051 Mon Sep 17 00:00:00 2001
From: Fernando Sahmkow <fsahmkow27@gmail.com>
Date: Fri, 4 Aug 2023 03:32:30 +0200
Subject: [PATCH] Query Cachge: Fully rework Vulkan's query cache

---
 src/common/settings.cpp                       |   10 +-
 src/common/settings.h                         |    2 +
 src/video_core/buffer_cache/buffer_cache.h    |   19 +-
 .../buffer_cache/buffer_cache_base.h          |   12 +
 src/video_core/control/channel_state_cache.h  |    2 +-
 src/video_core/engines/maxwell_3d.cpp         |   74 +-
 src/video_core/engines/maxwell_3d.h           |    3 -
 src/video_core/engines/maxwell_dma.cpp        |   12 +-
 src/video_core/engines/puller.cpp             |   11 +-
 src/video_core/fence_manager.h                |   23 +-
 src/video_core/gpu.cpp                        |    4 +-
 src/video_core/host_shaders/CMakeLists.txt    |    1 +
 .../resolve_conditional_render.comp           |   20 +
 src/video_core/macro/macro_hle.cpp            |   24 +
 src/video_core/query_cache.h                  |   13 +-
 src/video_core/rasterizer_interface.h         |   12 +-
 .../renderer_null/null_rasterizer.cpp         |   18 +-
 .../renderer_null/null_rasterizer.h           |    6 +-
 .../renderer_opengl/gl_query_cache.cpp        |    2 +-
 .../renderer_opengl/gl_query_cache.h          |    2 +-
 .../renderer_opengl/gl_rasterizer.cpp         |   32 +-
 .../renderer_opengl/gl_rasterizer.h           |    6 +-
 .../renderer_vulkan/vk_buffer_cache.cpp       |    3 +
 .../renderer_vulkan/vk_compute_pass.cpp       |   47 +
 .../renderer_vulkan/vk_compute_pass.h         |   13 +
 .../renderer_vulkan/vk_fence_manager.h        |    2 +-
 .../renderer_vulkan/vk_query_cache.cpp        | 1286 +++++++++++++++--
 .../renderer_vulkan/vk_query_cache.h          |  119 +-
 .../renderer_vulkan/vk_rasterizer.cpp         |   98 +-
 .../renderer_vulkan/vk_rasterizer.h           |   13 +-
 .../renderer_vulkan/vk_scheduler.cpp          |    9 +-
 src/video_core/renderer_vulkan/vk_scheduler.h |    2 +-
 src/video_core/vulkan_common/vulkan_device.h  |    6 +
 .../vulkan_common/vulkan_wrapper.cpp          |    3 +
 src/video_core/vulkan_common/vulkan_wrapper.h |   19 +
 35 files changed, 1573 insertions(+), 355 deletions(-)
 create mode 100644 src/video_core/host_shaders/resolve_conditional_render.comp

diff --git a/src/common/settings.cpp b/src/common/settings.cpp
index 4ecaf550b1..3fde3cae63 100644
--- a/src/common/settings.cpp
+++ b/src/common/settings.cpp
@@ -130,13 +130,17 @@ void LogSettings() {
     log_path("DataStorage_SDMCDir", Common::FS::GetYuzuPath(Common::FS::YuzuPath::SDMCDir));
 }
 
+void UpdateGPUAccuracy() {
+    values.current_gpu_accuracy = values.gpu_accuracy.GetValue();
+}
+
 bool IsGPULevelExtreme() {
-    return values.gpu_accuracy.GetValue() == GpuAccuracy::Extreme;
+    return values.current_gpu_accuracy == GpuAccuracy::Extreme;
 }
 
 bool IsGPULevelHigh() {
-    return values.gpu_accuracy.GetValue() == GpuAccuracy::Extreme ||
-           values.gpu_accuracy.GetValue() == GpuAccuracy::High;
+    return values.current_gpu_accuracy == GpuAccuracy::Extreme ||
+           values.current_gpu_accuracy == GpuAccuracy::High;
 }
 
 bool IsFastmemEnabled() {
diff --git a/src/common/settings.h b/src/common/settings.h
index 82ec9077ea..ae5e5d2b8c 100644
--- a/src/common/settings.h
+++ b/src/common/settings.h
@@ -307,6 +307,7 @@ struct Values {
                                                       Specialization::Default,
                                                       true,
                                                       true};
+    GpuAccuracy current_gpu_accuracy{GpuAccuracy::High};
     SwitchableSetting<AnisotropyMode, true> max_anisotropy{
         linkage,          AnisotropyMode::Automatic, AnisotropyMode::Automatic, AnisotropyMode::X16,
         "max_anisotropy", Category::RendererAdvanced};
@@ -522,6 +523,7 @@ struct Values {
 
 extern Values values;
 
+void UpdateGPUAccuracy();
 bool IsGPULevelExtreme();
 bool IsGPULevelHigh();
 
diff --git a/src/video_core/buffer_cache/buffer_cache.h b/src/video_core/buffer_cache/buffer_cache.h
index 8be7bd5947..f91b7d1e4f 100644
--- a/src/video_core/buffer_cache/buffer_cache.h
+++ b/src/video_core/buffer_cache/buffer_cache.h
@@ -272,13 +272,20 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad
     if (!cpu_addr) {
         return {&slot_buffers[NULL_BUFFER_ID], 0};
     }
-    const BufferId buffer_id = FindBuffer(*cpu_addr, size);
+    return ObtainCPUBuffer(*cpu_addr, size, sync_info, post_op);
+}
+
+template <class P>
+std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainCPUBuffer(VAddr cpu_addr, u32 size,
+                                                                 ObtainBufferSynchronize sync_info,
+                                                                 ObtainBufferOperation post_op) {
+    const BufferId buffer_id = FindBuffer(cpu_addr, size);
     Buffer& buffer = slot_buffers[buffer_id];
 
     // synchronize op
     switch (sync_info) {
     case ObtainBufferSynchronize::FullSynchronize:
-        SynchronizeBuffer(buffer, *cpu_addr, size);
+        SynchronizeBuffer(buffer, cpu_addr, size);
         break;
     default:
         break;
@@ -286,11 +293,11 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad
 
     switch (post_op) {
     case ObtainBufferOperation::MarkAsWritten:
-        MarkWrittenBuffer(buffer_id, *cpu_addr, size);
+        MarkWrittenBuffer(buffer_id, cpu_addr, size);
         break;
     case ObtainBufferOperation::DiscardWrite: {
-        VAddr cpu_addr_start = Common::AlignDown(*cpu_addr, 64);
-        VAddr cpu_addr_end = Common::AlignUp(*cpu_addr + size, 64);
+        VAddr cpu_addr_start = Common::AlignDown(cpu_addr, 64);
+        VAddr cpu_addr_end = Common::AlignUp(cpu_addr + size, 64);
         IntervalType interval{cpu_addr_start, cpu_addr_end};
         ClearDownload(interval);
         common_ranges.subtract(interval);
@@ -300,7 +307,7 @@ std::pair<typename P::Buffer*, u32> BufferCache<P>::ObtainBuffer(GPUVAddr gpu_ad
         break;
     }
 
-    return {&buffer, buffer.Offset(*cpu_addr)};
+    return {&buffer, buffer.Offset(cpu_addr)};
 }
 
 template <class P>
diff --git a/src/video_core/buffer_cache/buffer_cache_base.h b/src/video_core/buffer_cache/buffer_cache_base.h
index 0b7135d493..9507071e55 100644
--- a/src/video_core/buffer_cache/buffer_cache_base.h
+++ b/src/video_core/buffer_cache/buffer_cache_base.h
@@ -295,6 +295,10 @@ public:
     [[nodiscard]] std::pair<Buffer*, u32> ObtainBuffer(GPUVAddr gpu_addr, u32 size,
                                                        ObtainBufferSynchronize sync_info,
                                                        ObtainBufferOperation post_op);
+
+    [[nodiscard]] std::pair<Buffer*, u32> ObtainCPUBuffer(VAddr gpu_addr, u32 size,
+                                                       ObtainBufferSynchronize sync_info,
+                                                       ObtainBufferOperation post_op);
     void FlushCachedWrites();
 
     /// Return true when there are uncommitted buffers to be downloaded
@@ -335,6 +339,14 @@ public:
 
     [[nodiscard]] std::pair<Buffer*, u32> GetDrawIndirectBuffer();
 
+    template <typename Func>
+    void BufferOperations(Func&& func) {
+        do {
+            channel_state->has_deleted_buffers = false;
+            func();
+        } while (channel_state->has_deleted_buffers);
+    }
+
     std::recursive_mutex mutex;
     Runtime& runtime;
 
diff --git a/src/video_core/control/channel_state_cache.h b/src/video_core/control/channel_state_cache.h
index 46bc9e3227..5574e1fbaa 100644
--- a/src/video_core/control/channel_state_cache.h
+++ b/src/video_core/control/channel_state_cache.h
@@ -51,7 +51,7 @@ public:
     virtual void CreateChannel(Tegra::Control::ChannelState& channel);
 
     /// Bind a channel for execution.
-    void BindToChannel(s32 id);
+    virtual void BindToChannel(s32 id);
 
     /// Erase channel's state.
     void EraseChannel(s32 id);
diff --git a/src/video_core/engines/maxwell_3d.cpp b/src/video_core/engines/maxwell_3d.cpp
index 06e349e434..922c399e6a 100644
--- a/src/video_core/engines/maxwell_3d.cpp
+++ b/src/video_core/engines/maxwell_3d.cpp
@@ -20,8 +20,6 @@
 
 namespace Tegra::Engines {
 
-using VideoCore::QueryType;
-
 /// First register id that is actually a Macro call.
 constexpr u32 MacroRegistersStart = 0xE00;
 
@@ -500,27 +498,21 @@ void Maxwell3D::StampQueryResult(u64 payload, bool long_query) {
 }
 
 void Maxwell3D::ProcessQueryGet() {
+    VideoCommon::QueryPropertiesFlags flags{};
+    if (regs.report_semaphore.query.short_query == 0) {
+        flags |= VideoCommon::QueryPropertiesFlags::HasTimeout;
+    }
+    const GPUVAddr sequence_address{regs.report_semaphore.Address()};
+    const VideoCommon::QueryType query_type =
+        static_cast<VideoCommon::QueryType>(regs.report_semaphore.query.report.Value());
+    const u32 payload = regs.report_semaphore.payload;
+    const u32 subreport = regs.report_semaphore.query.sub_report;
     switch (regs.report_semaphore.query.operation) {
     case Regs::ReportSemaphore::Operation::Release:
         if (regs.report_semaphore.query.short_query != 0) {
-            const GPUVAddr sequence_address{regs.report_semaphore.Address()};
-            const u32 payload = regs.report_semaphore.payload;
-            std::function<void()> operation([this, sequence_address, payload] {
-                memory_manager.Write<u32>(sequence_address, payload);
-            });
-            rasterizer->SignalFence(std::move(operation));
-        } else {
-            struct LongQueryResult {
-                u64_le value;
-                u64_le timestamp;
-            };
-            const GPUVAddr sequence_address{regs.report_semaphore.Address()};
-            const u32 payload = regs.report_semaphore.payload;
-            [this, sequence_address, payload] {
-                memory_manager.Write<u64>(sequence_address + sizeof(u64), system.GPU().GetTicks());
-                memory_manager.Write<u64>(sequence_address, payload);
-            }();
+            flags |= VideoCommon::QueryPropertiesFlags::IsAFence;
         }
+        rasterizer->Query(sequence_address, query_type, flags, payload, subreport);
         break;
     case Regs::ReportSemaphore::Operation::Acquire:
         // TODO(Blinkhawk): Under this operation, the GPU waits for the CPU to write a value that
@@ -528,11 +520,7 @@ void Maxwell3D::ProcessQueryGet() {
         UNIMPLEMENTED_MSG("Unimplemented query operation ACQUIRE");
         break;
     case Regs::ReportSemaphore::Operation::ReportOnly:
-        if (const std::optional<u64> result = GetQueryResult()) {
-            // If the query returns an empty optional it means it's cached and deferred.
-            // In this case we have a non-empty result, so we stamp it immediately.
-            StampQueryResult(*result, regs.report_semaphore.query.short_query == 0);
-        }
+        rasterizer->Query(sequence_address, query_type, flags, payload, subreport);
         break;
     case Regs::ReportSemaphore::Operation::Trap:
         UNIMPLEMENTED_MSG("Unimplemented query operation TRAP");
@@ -544,6 +532,10 @@ void Maxwell3D::ProcessQueryGet() {
 }
 
 void Maxwell3D::ProcessQueryCondition() {
+    if (rasterizer->AccelerateConditionalRendering()) {
+        execute_on = true;
+        return;
+    }
     const GPUVAddr condition_address{regs.render_enable.Address()};
     switch (regs.render_enable_override) {
     case Regs::RenderEnable::Override::AlwaysRender:
@@ -553,10 +545,6 @@ void Maxwell3D::ProcessQueryCondition() {
         execute_on = false;
         break;
     case Regs::RenderEnable::Override::UseRenderEnable: {
-        if (rasterizer->AccelerateConditionalRendering()) {
-            execute_on = true;
-            return;
-        }
         switch (regs.render_enable.mode) {
         case Regs::RenderEnable::Mode::True: {
             execute_on = true;
@@ -606,7 +594,13 @@ void Maxwell3D::ProcessCounterReset() {
 #endif
     switch (regs.clear_report_value) {
     case Regs::ClearReport::ZPassPixelCount:
-        rasterizer->ResetCounter(QueryType::SamplesPassed);
+        rasterizer->ResetCounter(VideoCommon::QueryType::ZPassPixelCount64);
+        break;
+    case Regs::ClearReport::PrimitivesGenerated:
+        rasterizer->ResetCounter(VideoCommon::QueryType::StreamingByteCount);
+        break;
+    case Regs::ClearReport::VtgPrimitivesOut:
+        rasterizer->ResetCounter(VideoCommon::QueryType::StreamingByteCount);
         break;
     default:
         LOG_DEBUG(Render_OpenGL, "Unimplemented counter reset={}", regs.clear_report_value);
@@ -620,28 +614,6 @@ void Maxwell3D::ProcessSyncPoint() {
     rasterizer->SignalSyncPoint(sync_point);
 }
 
-std::optional<u64> Maxwell3D::GetQueryResult() {
-    switch (regs.report_semaphore.query.report) {
-    case Regs::ReportSemaphore::Report::Payload:
-        return regs.report_semaphore.payload;
-    case Regs::ReportSemaphore::Report::ZPassPixelCount64:
-#if ANDROID
-        if (!Settings::IsGPULevelHigh()) {
-            // This is problematic on Android, disable on GPU Normal.
-            return 120;
-        }
-#endif
-        // Deferred.
-        rasterizer->Query(regs.report_semaphore.Address(), QueryType::SamplesPassed,
-                          system.GPU().GetTicks());
-        return std::nullopt;
-    default:
-        LOG_DEBUG(HW_GPU, "Unimplemented query report type {}",
-                  regs.report_semaphore.query.report.Value());
-        return 1;
-    }
-}
-
 void Maxwell3D::ProcessCBBind(size_t stage_index) {
     // Bind the buffer currently in CB_ADDRESS to the specified index in the desired shader
     // stage.
diff --git a/src/video_core/engines/maxwell_3d.h b/src/video_core/engines/maxwell_3d.h
index 6c19354e1d..17faacc37c 100644
--- a/src/video_core/engines/maxwell_3d.h
+++ b/src/video_core/engines/maxwell_3d.h
@@ -3182,9 +3182,6 @@ private:
     /// Handles writes to syncing register.
     void ProcessSyncPoint();
 
-    /// Returns a query's value or an empty object if the value will be deferred through a cache.
-    std::optional<u64> GetQueryResult();
-
     void RefreshParametersImpl();
 
     bool IsMethodExecutable(u32 method);
diff --git a/src/video_core/engines/maxwell_dma.cpp b/src/video_core/engines/maxwell_dma.cpp
index 279f0daa13..422d4d859a 100644
--- a/src/video_core/engines/maxwell_dma.cpp
+++ b/src/video_core/engines/maxwell_dma.cpp
@@ -362,21 +362,17 @@ void MaxwellDMA::ReleaseSemaphore() {
     const auto type = regs.launch_dma.semaphore_type;
     const GPUVAddr address = regs.semaphore.address;
     const u32 payload = regs.semaphore.payload;
+    VideoCommon::QueryPropertiesFlags flags{VideoCommon::QueryPropertiesFlags::IsAFence};
     switch (type) {
     case LaunchDMA::SemaphoreType::NONE:
         break;
     case LaunchDMA::SemaphoreType::RELEASE_ONE_WORD_SEMAPHORE: {
-        std::function<void()> operation(
-            [this, address, payload] { memory_manager.Write<u32>(address, payload); });
-        rasterizer->SignalFence(std::move(operation));
+        rasterizer->Query(address, VideoCommon::QueryType::Payload, flags, payload, 0);
         break;
     }
     case LaunchDMA::SemaphoreType::RELEASE_FOUR_WORD_SEMAPHORE: {
-        std::function<void()> operation([this, address, payload] {
-            memory_manager.Write<u64>(address + sizeof(u64), system.GPU().GetTicks());
-            memory_manager.Write<u64>(address, payload);
-        });
-        rasterizer->SignalFence(std::move(operation));
+        rasterizer->Query(address, VideoCommon::QueryType::Payload,
+                          flags | VideoCommon::QueryPropertiesFlags::HasTimeout, payload, 0);
         break;
     }
     default:
diff --git a/src/video_core/engines/puller.cpp b/src/video_core/engines/puller.cpp
index 6de2543b7e..5827382342 100644
--- a/src/video_core/engines/puller.cpp
+++ b/src/video_core/engines/puller.cpp
@@ -82,10 +82,7 @@ void Puller::ProcessSemaphoreTriggerMethod() {
     if (op == GpuSemaphoreOperation::WriteLong) {
         const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
         const u32 payload = regs.semaphore_sequence;
-        [this, sequence_address, payload] {
-            memory_manager.Write<u64>(sequence_address + sizeof(u64), gpu.GetTicks());
-            memory_manager.Write<u64>(sequence_address, payload);
-        }();
+        rasterizer->Query(sequence_address, VideoCommon::QueryType::Payload, VideoCommon::QueryPropertiesFlags::HasTimeout, payload, 0);
     } else {
         do {
             const u32 word{memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress())};
@@ -120,10 +117,7 @@ void Puller::ProcessSemaphoreTriggerMethod() {
 void Puller::ProcessSemaphoreRelease() {
     const GPUVAddr sequence_address{regs.semaphore_address.SemaphoreAddress()};
     const u32 payload = regs.semaphore_release;
-    std::function<void()> operation([this, sequence_address, payload] {
-        memory_manager.Write<u32>(sequence_address, payload);
-    });
-    rasterizer->SignalFence(std::move(operation));
+    rasterizer->Query(sequence_address, VideoCommon::QueryType::Payload, VideoCommon::QueryPropertiesFlags::IsAFence, payload, 0);
 }
 
 void Puller::ProcessSemaphoreAcquire() {
@@ -132,7 +126,6 @@ void Puller::ProcessSemaphoreAcquire() {
     while (word != value) {
         regs.acquire_active = true;
         regs.acquire_value = value;
-        std::this_thread::sleep_for(std::chrono::milliseconds(1));
         rasterizer->ReleaseFences();
         word = memory_manager.Read<u32>(regs.semaphore_address.SemaphoreAddress());
         // TODO(kemathe73) figure out how to do the acquire_timeout
diff --git a/src/video_core/fence_manager.h b/src/video_core/fence_manager.h
index ab20ff30fd..8459a3092b 100644
--- a/src/video_core/fence_manager.h
+++ b/src/video_core/fence_manager.h
@@ -104,9 +104,28 @@ public:
         SignalFence(std::move(func));
     }
 
-    void WaitPendingFences() {
+    void WaitPendingFences(bool force) {
         if constexpr (!can_async_check) {
-            TryReleasePendingFences<true>();
+            if (force) {
+                TryReleasePendingFences<true>();
+            } else {
+                TryReleasePendingFences<false>();
+            }
+        } else {
+            if (!force) {
+                return;
+            }
+            std::mutex wait_mutex;
+            std::condition_variable wait_cv;
+            std::atomic<bool> wait_finished{};
+            std::function<void()> func([&] {
+                std::scoped_lock lk(wait_mutex);
+                wait_finished.store(true, std::memory_order_relaxed);
+                wait_cv.notify_all();
+            });
+            SignalFence(std::move(func));
+            std::unique_lock lk(wait_mutex);
+            wait_cv.wait(lk, [&wait_finished] { return wait_finished.load(std::memory_order_relaxed); });
         }
     }
 
diff --git a/src/video_core/gpu.cpp b/src/video_core/gpu.cpp
index c192e33b28..11549d448f 100644
--- a/src/video_core/gpu.cpp
+++ b/src/video_core/gpu.cpp
@@ -102,7 +102,8 @@ struct GPU::Impl {
 
     /// Signal the ending of command list.
     void OnCommandListEnd() {
-        rasterizer->ReleaseFences();
+        rasterizer->ReleaseFences(false);
+        Settings::UpdateGPUAccuracy();
     }
 
     /// Request a host GPU memory flush from the CPU.
@@ -220,6 +221,7 @@ struct GPU::Impl {
     /// This can be used to launch any necessary threads and register any necessary
     /// core timing events.
     void Start() {
+        Settings::UpdateGPUAccuracy();
         gpu_thread.StartThread(*renderer, renderer->Context(), *scheduler);
     }
 
diff --git a/src/video_core/host_shaders/CMakeLists.txt b/src/video_core/host_shaders/CMakeLists.txt
index c4d4590772..fb24b65329 100644
--- a/src/video_core/host_shaders/CMakeLists.txt
+++ b/src/video_core/host_shaders/CMakeLists.txt
@@ -41,6 +41,7 @@ set(SHADER_FILES
     pitch_unswizzle.comp
     present_bicubic.frag
     present_gaussian.frag
+    resolve_conditional_render.comp
     smaa_edge_detection.vert
     smaa_edge_detection.frag
     smaa_blending_weight_calculation.vert
diff --git a/src/video_core/host_shaders/resolve_conditional_render.comp b/src/video_core/host_shaders/resolve_conditional_render.comp
new file mode 100644
index 0000000000..307e77d1ad
--- /dev/null
+++ b/src/video_core/host_shaders/resolve_conditional_render.comp
@@ -0,0 +1,20 @@
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#version 450
+
+layout(local_size_x = 1) in;
+
+layout(std430, binding = 0) buffer Query {
+    uvec2 initial;
+    uvec2 unknown;
+    uvec2 current;
+};
+
+layout(std430, binding = 1) buffer Result {
+    uint result;
+};
+
+void main() {
+    result = all(equal(initial, current)) ? 1 : 0;
+}
diff --git a/src/video_core/macro/macro_hle.cpp b/src/video_core/macro/macro_hle.cpp
index 6272a4652e..e980af1718 100644
--- a/src/video_core/macro/macro_hle.cpp
+++ b/src/video_core/macro/macro_hle.cpp
@@ -319,6 +319,25 @@ private:
     }
 };
 
+class HLE_DrawIndirectByteCount final : public HLEMacroImpl {
+public:
+    explicit HLE_DrawIndirectByteCount(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
+
+    void Execute(const std::vector<u32>& parameters, [[maybe_unused]] u32 method) override {
+        maxwell3d.RefreshParameters();
+
+        maxwell3d.regs.draw.begin = parameters[0];
+        maxwell3d.regs.draw_auto_stride = parameters[1];
+        maxwell3d.regs.draw_auto_byte_count = parameters[2];
+
+        if (maxwell3d.ShouldExecute()) {
+            maxwell3d.draw_manager->DrawArray(
+                maxwell3d.regs.draw.topology, 0,
+                maxwell3d.regs.draw_auto_byte_count / maxwell3d.regs.draw_auto_stride, 0, 1);
+        }
+    }
+};
+
 class HLE_C713C83D8F63CCF3 final : public HLEMacroImpl {
 public:
     explicit HLE_C713C83D8F63CCF3(Maxwell3D& maxwell3d_) : HLEMacroImpl(maxwell3d_) {}
@@ -536,6 +555,11 @@ HLEMacro::HLEMacro(Maxwell3D& maxwell3d_) : maxwell3d{maxwell3d_} {
                          [](Maxwell3D& maxwell3d__) -> std::unique_ptr<CachedMacro> {
                              return std::make_unique<HLE_TransformFeedbackSetup>(maxwell3d__);
                          }));
+    builders.emplace(0xB5F74EDB717278ECULL,
+                     std::function<std::unique_ptr<CachedMacro>(Maxwell3D&)>(
+                         [](Maxwell3D& maxwell3d__) -> std::unique_ptr<CachedMacro> {
+                             return std::make_unique<HLE_DrawIndirectByteCount>(maxwell3d__);
+                         }));
 }
 
 HLEMacro::~HLEMacro() = default;
diff --git a/src/video_core/query_cache.h b/src/video_core/query_cache.h
index 7047e2e631..9fcaeeac7f 100644
--- a/src/video_core/query_cache.h
+++ b/src/video_core/query_cache.h
@@ -25,6 +25,13 @@
 #include "video_core/rasterizer_interface.h"
 #include "video_core/texture_cache/slot_vector.h"
 
+namespace VideoCore {
+enum class QueryType {
+    SamplesPassed,
+};
+constexpr std::size_t NumQueryTypes = 1;
+} // namespace VideoCore
+
 namespace VideoCommon {
 
 using AsyncJobId = SlotId;
@@ -98,10 +105,10 @@ private:
 };
 
 template <class QueryCache, class CachedQuery, class CounterStream, class HostCounter>
-class QueryCacheBase : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
+class QueryCacheLegacy : public VideoCommon::ChannelSetupCaches<VideoCommon::ChannelInfo> {
 public:
-    explicit QueryCacheBase(VideoCore::RasterizerInterface& rasterizer_,
-                            Core::Memory::Memory& cpu_memory_)
+    explicit QueryCacheLegacy(VideoCore::RasterizerInterface& rasterizer_,
+                              Core::Memory::Memory& cpu_memory_)
         : rasterizer{rasterizer_},
           // Use reinterpret_cast instead of static_cast as workaround for
           // UBSan bug (https://github.com/llvm/llvm-project/issues/59060)
diff --git a/src/video_core/rasterizer_interface.h b/src/video_core/rasterizer_interface.h
index cb8029a4fc..2ba7cbb0dd 100644
--- a/src/video_core/rasterizer_interface.h
+++ b/src/video_core/rasterizer_interface.h
@@ -9,6 +9,7 @@
 #include <utility>
 #include "common/common_types.h"
 #include "common/polyfill_thread.h"
+#include "video_core/query_cache/types.h"
 #include "video_core/cache_types.h"
 #include "video_core/engines/fermi_2d.h"
 #include "video_core/gpu.h"
@@ -26,11 +27,6 @@ struct ChannelState;
 
 namespace VideoCore {
 
-enum class QueryType {
-    SamplesPassed,
-};
-constexpr std::size_t NumQueryTypes = 1;
-
 enum class LoadCallbackStage {
     Prepare,
     Build,
@@ -58,10 +54,10 @@ public:
     virtual void DispatchCompute() = 0;
 
     /// Resets the counter of a query
-    virtual void ResetCounter(QueryType type) = 0;
+    virtual void ResetCounter(VideoCommon::QueryType type) = 0;
 
     /// Records a GPU query and caches it
-    virtual void Query(GPUVAddr gpu_addr, QueryType type, std::optional<u64> timestamp) = 0;
+    virtual void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) = 0;
 
     /// Signal an uniform buffer binding
     virtual void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
@@ -83,7 +79,7 @@ public:
     virtual void SignalReference() = 0;
 
     /// Release all pending fences.
-    virtual void ReleaseFences() = 0;
+    virtual void ReleaseFences(bool force = true) = 0;
 
     /// Notify rasterizer that all caches should be flushed to Switch memory
     virtual void FlushAll() = 0;
diff --git a/src/video_core/renderer_null/null_rasterizer.cpp b/src/video_core/renderer_null/null_rasterizer.cpp
index 92ecf6682f..65cd5aa067 100644
--- a/src/video_core/renderer_null/null_rasterizer.cpp
+++ b/src/video_core/renderer_null/null_rasterizer.cpp
@@ -26,16 +26,18 @@ void RasterizerNull::Draw(bool is_indexed, u32 instance_count) {}
 void RasterizerNull::DrawTexture() {}
 void RasterizerNull::Clear(u32 layer_count) {}
 void RasterizerNull::DispatchCompute() {}
-void RasterizerNull::ResetCounter(VideoCore::QueryType type) {}
-void RasterizerNull::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
-                           std::optional<u64> timestamp) {
+void RasterizerNull::ResetCounter(VideoCommon::QueryType type) {}
+void RasterizerNull::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
+                           VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
     if (!gpu_memory) {
         return;
     }
-
-    gpu_memory->Write(gpu_addr, u64{0});
-    if (timestamp) {
-        gpu_memory->Write(gpu_addr + 8, *timestamp);
+    if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
+        u64 ticks = m_gpu.GetTicks();
+        gpu_memory->Write<u64>(gpu_addr + 8, ticks);
+        gpu_memory->Write<u64>(gpu_addr, static_cast<u64>(payload));
+    } else {
+        gpu_memory->Write<u32>(gpu_addr, payload);
     }
 }
 void RasterizerNull::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
@@ -74,7 +76,7 @@ void RasterizerNull::SignalSyncPoint(u32 value) {
     syncpoint_manager.IncrementHost(value);
 }
 void RasterizerNull::SignalReference() {}
-void RasterizerNull::ReleaseFences() {}
+void RasterizerNull::ReleaseFences(bool) {}
 void RasterizerNull::FlushAndInvalidateRegion(VAddr addr, u64 size, VideoCommon::CacheType) {}
 void RasterizerNull::WaitForIdle() {}
 void RasterizerNull::FragmentBarrier() {}
diff --git a/src/video_core/renderer_null/null_rasterizer.h b/src/video_core/renderer_null/null_rasterizer.h
index 93b9a69714..57a8c4c85b 100644
--- a/src/video_core/renderer_null/null_rasterizer.h
+++ b/src/video_core/renderer_null/null_rasterizer.h
@@ -42,8 +42,8 @@ public:
     void DrawTexture() override;
     void Clear(u32 layer_count) override;
     void DispatchCompute() override;
-    void ResetCounter(VideoCore::QueryType type) override;
-    void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
+    void ResetCounter(VideoCommon::QueryType type) override;
+    void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
     void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
     void FlushAll() override;
@@ -63,7 +63,7 @@ public:
     void SyncOperation(std::function<void()>&& func) override;
     void SignalSyncPoint(u32 value) override;
     void SignalReference() override;
-    void ReleaseFences() override;
+    void ReleaseFences(bool force) override;
     void FlushAndInvalidateRegion(
         VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
     void WaitForIdle() override;
diff --git a/src/video_core/renderer_opengl/gl_query_cache.cpp b/src/video_core/renderer_opengl/gl_query_cache.cpp
index 99d7347f5c..ec142d48ea 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.cpp
+++ b/src/video_core/renderer_opengl/gl_query_cache.cpp
@@ -27,7 +27,7 @@ constexpr GLenum GetTarget(VideoCore::QueryType type) {
 } // Anonymous namespace
 
 QueryCache::QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_)
-    : QueryCacheBase(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
+    : QueryCacheLegacy(rasterizer_, cpu_memory_), gl_rasterizer{rasterizer_} {}
 
 QueryCache::~QueryCache() = default;
 
diff --git a/src/video_core/renderer_opengl/gl_query_cache.h b/src/video_core/renderer_opengl/gl_query_cache.h
index 872513f226..0721e0b3df 100644
--- a/src/video_core/renderer_opengl/gl_query_cache.h
+++ b/src/video_core/renderer_opengl/gl_query_cache.h
@@ -26,7 +26,7 @@ class RasterizerOpenGL;
 using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
 
 class QueryCache final
-    : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
+    : public VideoCommon::QueryCacheLegacy<QueryCache, CachedQuery, CounterStream, HostCounter> {
 public:
     explicit QueryCache(RasterizerOpenGL& rasterizer_, Core::Memory::Memory& cpu_memory_);
     ~QueryCache();
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.cpp b/src/video_core/renderer_opengl/gl_rasterizer.cpp
index dd03efecd4..a975bbe750 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.cpp
+++ b/src/video_core/renderer_opengl/gl_rasterizer.cpp
@@ -396,13 +396,31 @@ void RasterizerOpenGL::DispatchCompute() {
     has_written_global_memory |= pipeline->WritesGlobalMemory();
 }
 
-void RasterizerOpenGL::ResetCounter(VideoCore::QueryType type) {
-    query_cache.ResetCounter(type);
+void RasterizerOpenGL::ResetCounter(VideoCommon::QueryType type) {
+    if (type == VideoCommon::QueryType::ZPassPixelCount64) {
+        query_cache.ResetCounter(VideoCore::QueryType::SamplesPassed);
+    }
 }
 
-void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
-                             std::optional<u64> timestamp) {
-    query_cache.Query(gpu_addr, type, timestamp);
+void RasterizerOpenGL::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
+                             VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
+    if (type == VideoCommon::QueryType::ZPassPixelCount64) {
+        std::optional<u64> timestamp{True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)
+                                     ? std::make_optional<u64>(gpu.GetTicks()) : std:: nullopt };
+        if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
+            query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, {gpu.GetTicks()});
+        } else {
+            query_cache.Query(gpu_addr, VideoCore::QueryType::SamplesPassed, std::nullopt);
+        }
+        return;
+    }
+    if (True(flags & VideoCommon::QueryPropertiesFlags::HasTimeout)) {
+        u64 ticks = gpu.GetTicks();
+        gpu_memory->Write<u64>(gpu_addr + 8, ticks);
+        gpu_memory->Write<u64>(gpu_addr, static_cast<u64>(payload));
+    } else {
+        gpu_memory->Write<u32>(gpu_addr, payload);
+    }
 }
 
 void RasterizerOpenGL::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
@@ -573,8 +591,8 @@ void RasterizerOpenGL::SignalReference() {
     fence_manager.SignalOrdering();
 }
 
-void RasterizerOpenGL::ReleaseFences() {
-    fence_manager.WaitPendingFences();
+void RasterizerOpenGL::ReleaseFences(bool force) {
+    fence_manager.WaitPendingFences(force);
 }
 
 void RasterizerOpenGL::FlushAndInvalidateRegion(VAddr addr, u64 size,
diff --git a/src/video_core/renderer_opengl/gl_rasterizer.h b/src/video_core/renderer_opengl/gl_rasterizer.h
index 8eda2ddba6..05e048e155 100644
--- a/src/video_core/renderer_opengl/gl_rasterizer.h
+++ b/src/video_core/renderer_opengl/gl_rasterizer.h
@@ -86,8 +86,8 @@ public:
     void DrawTexture() override;
     void Clear(u32 layer_count) override;
     void DispatchCompute() override;
-    void ResetCounter(VideoCore::QueryType type) override;
-    void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
+    void ResetCounter(VideoCommon::QueryType type) override;
+    void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
     void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
     void FlushAll() override;
@@ -107,7 +107,7 @@ public:
     void SyncOperation(std::function<void()>&& func) override;
     void SignalSyncPoint(u32 value) override;
     void SignalReference() override;
-    void ReleaseFences() override;
+    void ReleaseFences(bool force = true) override;
     void FlushAndInvalidateRegion(
         VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
     void WaitForIdle() override;
diff --git a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
index e15865d16a..d8148e89a6 100644
--- a/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_buffer_cache.cpp
@@ -61,6 +61,9 @@ vk::Buffer CreateBuffer(const Device& device, const MemoryAllocator& memory_allo
     if (device.IsExtTransformFeedbackSupported()) {
         flags |= VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT;
     }
+    if (device.IsExtConditionalRendering()) {
+        flags |= VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT;
+    }
     const VkBufferCreateInfo buffer_ci = {
         .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
         .pNext = nullptr,
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.cpp b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
index 54ee030ce4..97cd4521df 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.cpp
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.cpp
@@ -12,6 +12,7 @@
 #include "common/common_types.h"
 #include "common/div_ceil.h"
 #include "video_core/host_shaders/astc_decoder_comp_spv.h"
+#include "video_core/host_shaders/resolve_conditional_render_comp_spv.h"
 #include "video_core/host_shaders/vulkan_quad_indexed_comp_spv.h"
 #include "video_core/host_shaders/vulkan_uint8_comp_spv.h"
 #include "video_core/renderer_vulkan/vk_compute_pass.h"
@@ -302,6 +303,52 @@ std::pair<VkBuffer, VkDeviceSize> QuadIndexedPass::Assemble(
     return {staging.buffer, staging.offset};
 }
 
+ConditionalRenderingResolvePass::ConditionalRenderingResolvePass(const Device& device_,
+                                                                 Scheduler& scheduler_,
+                                                                 DescriptorPool& descriptor_pool_, ComputePassDescriptorQueue& compute_pass_descriptor_queue_)
+    : ComputePass(device_, descriptor_pool_, INPUT_OUTPUT_DESCRIPTOR_SET_BINDINGS,
+                  INPUT_OUTPUT_DESCRIPTOR_UPDATE_TEMPLATE, INPUT_OUTPUT_BANK_INFO, nullptr,
+                  RESOLVE_CONDITIONAL_RENDER_COMP_SPV),
+      scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}
+
+void ConditionalRenderingResolvePass::Resolve(VkBuffer dst_buffer, VkBuffer src_buffer,
+                                              u32 src_offset, bool compare_to_zero) {
+    scheduler.RequestOutsideRenderPassOperationContext();
+
+    const size_t compare_size = compare_to_zero ? 8 : 24;
+
+    compute_pass_descriptor_queue.Acquire();
+    compute_pass_descriptor_queue.AddBuffer(src_buffer, src_offset, compare_size);
+    compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, sizeof(u32));
+    const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
+
+    scheduler.RequestOutsideRenderPassOperationContext();
+    scheduler.Record([this, descriptor_data](vk::CommandBuffer cmdbuf) {
+        static constexpr VkMemoryBarrier read_barrier{
+            .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+            .pNext = nullptr,
+            .srcAccessMask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+            .dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
+        };
+        static constexpr VkMemoryBarrier write_barrier{
+            .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+            .pNext = nullptr,
+            .srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
+            .dstAccessMask = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
+        };
+        const VkDescriptorSet set = descriptor_allocator.Commit();
+        device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
+
+        cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+                               VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier);
+        cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+        cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
+        cmdbuf.Dispatch(1, 1, 1);
+        cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
+                               VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, write_barrier);
+    });
+}
+
 ASTCDecoderPass::ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
                                  DescriptorPool& descriptor_pool_,
                                  StagingBufferPool& staging_buffer_pool_,
diff --git a/src/video_core/renderer_vulkan/vk_compute_pass.h b/src/video_core/renderer_vulkan/vk_compute_pass.h
index dd39273763..c62f30d300 100644
--- a/src/video_core/renderer_vulkan/vk_compute_pass.h
+++ b/src/video_core/renderer_vulkan/vk_compute_pass.h
@@ -82,6 +82,19 @@ private:
     ComputePassDescriptorQueue& compute_pass_descriptor_queue;
 };
 
+class ConditionalRenderingResolvePass final : public ComputePass {
+public:
+    explicit ConditionalRenderingResolvePass(
+        const Device& device_, Scheduler& scheduler_, DescriptorPool& descriptor_pool_,
+        ComputePassDescriptorQueue& compute_pass_descriptor_queue_);
+
+    void Resolve(VkBuffer dst_buffer, VkBuffer src_buffer, u32 src_offset, bool compare_to_zero);
+
+private:
+    Scheduler& scheduler;
+    ComputePassDescriptorQueue& compute_pass_descriptor_queue;
+};
+
 class ASTCDecoderPass final : public ComputePass {
 public:
     explicit ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
diff --git a/src/video_core/renderer_vulkan/vk_fence_manager.h b/src/video_core/renderer_vulkan/vk_fence_manager.h
index 145359d4e7..14fc5ad714 100644
--- a/src/video_core/renderer_vulkan/vk_fence_manager.h
+++ b/src/video_core/renderer_vulkan/vk_fence_manager.h
@@ -8,6 +8,7 @@
 #include "video_core/fence_manager.h"
 #include "video_core/renderer_vulkan/vk_buffer_cache.h"
 #include "video_core/renderer_vulkan/vk_texture_cache.h"
+#include "video_core/renderer_vulkan/vk_query_cache.h"
 
 namespace Core {
 class System;
@@ -20,7 +21,6 @@ class RasterizerInterface;
 namespace Vulkan {
 
 class Device;
-class QueryCache;
 class Scheduler;
 
 class InnerFence : public VideoCommon::FenceBase {
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.cpp b/src/video_core/renderer_vulkan/vk_query_cache.cpp
index 29e0b797bd..42f5710072 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.cpp
+++ b/src/video_core/renderer_vulkan/vk_query_cache.cpp
@@ -1,139 +1,1223 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
 
-#include <algorithm>
 #include <cstddef>
+#include <limits>
+#include <map>
+#include <memory>
+#include <span>
+#include <type_traits>
+#include <unordered_map>
 #include <utility>
 #include <vector>
 
+#include <boost/container/small_vector.hpp>
+#include <boost/icl/interval_set.hpp>
+
+#include "common/common_types.h"
+#include "core/memory.h"
+#include "video_core/query_cache/query_cache.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
+#include "video_core/renderer_vulkan/vk_compute_pass.h"
 #include "video_core/renderer_vulkan/vk_query_cache.h"
 #include "video_core/renderer_vulkan/vk_resource_pool.h"
 #include "video_core/renderer_vulkan/vk_scheduler.h"
+#include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
+#include "video_core/renderer_vulkan/vk_update_descriptor.h"
 #include "video_core/vulkan_common/vulkan_device.h"
+#include "video_core/vulkan_common/vulkan_memory_allocator.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
 
 namespace Vulkan {
 
-using VideoCore::QueryType;
+using VideoCommon::QueryType;
 
 namespace {
+class SamplesQueryBank : public VideoCommon::BankBase {
+public:
+    static constexpr size_t BANK_SIZE = 256;
+    static constexpr size_t QUERY_SIZE = 8;
+    SamplesQueryBank(const Device& device_, size_t index_)
+        : BankBase(BANK_SIZE), device{device_}, index{index_} {
+        const auto& dev = device.GetLogical();
+        query_pool = dev.CreateQueryPool({
+            .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .queryType = VK_QUERY_TYPE_OCCLUSION,
+            .queryCount = BANK_SIZE,
+            .pipelineStatistics = 0,
+        });
+        Reset();
+    }
 
-constexpr std::array QUERY_TARGETS = {VK_QUERY_TYPE_OCCLUSION};
+    ~SamplesQueryBank() = default;
 
-constexpr VkQueryType GetTarget(QueryType type) {
-    return QUERY_TARGETS[static_cast<std::size_t>(type)];
-}
+    void Reset() override {
+        ASSERT(references == 0);
+        VideoCommon::BankBase::Reset();
+        const auto& dev = device.GetLogical();
+        dev.ResetQueryPool(*query_pool, 0, BANK_SIZE);
+        host_results.fill(0ULL);
+        next_bank = 0;
+    }
 
-} // Anonymous namespace
+    void Sync(size_t start, size_t size) {
+        const auto& dev = device.GetLogical();
+        const VkResult query_result = dev.GetQueryResults(
+            *query_pool, static_cast<u32>(start), static_cast<u32>(size), sizeof(u64) * size,
+            &host_results[start], sizeof(u64), VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
+        switch (query_result) {
+        case VK_SUCCESS:
+            return;
+        case VK_ERROR_DEVICE_LOST:
+            device.ReportLoss();
+            [[fallthrough]];
+        default:
+            throw vk::Exception(query_result);
+        }
+    }
 
-QueryPool::QueryPool(const Device& device_, Scheduler& scheduler, QueryType type_)
-    : ResourcePool{scheduler.GetMasterSemaphore(), GROW_STEP}, device{device_}, type{type_} {}
+    VkQueryPool GetInnerPool() {
+        return *query_pool;
+    }
 
-QueryPool::~QueryPool() = default;
+    size_t GetIndex() const {
+        return index;
+    }
 
-std::pair<VkQueryPool, u32> QueryPool::Commit() {
-    std::size_t index;
-    do {
-        index = CommitResource();
-    } while (usage[index]);
-    usage[index] = true;
+    const std::array<u64, BANK_SIZE>& GetResults() const {
+        return host_results;
+    }
 
-    return {*pools[index / GROW_STEP], static_cast<u32>(index % GROW_STEP)};
-}
+    size_t next_bank;
 
-void QueryPool::Allocate(std::size_t begin, std::size_t end) {
-    usage.resize(end);
+private:
+    const Device& device;
+    const size_t index;
+    vk::QueryPool query_pool;
+    std::array<u64, BANK_SIZE> host_results;
+};
 
-    pools.push_back(device.GetLogical().CreateQueryPool({
-        .sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
-        .pNext = nullptr,
-        .flags = 0,
-        .queryType = GetTarget(type),
-        .queryCount = static_cast<u32>(end - begin),
-        .pipelineStatistics = 0,
-    }));
-}
+using BaseStreamer = VideoCommon::SimpleStreamer<VideoCommon::HostQueryBase>;
 
-void QueryPool::Reserve(std::pair<VkQueryPool, u32> query) {
-    const auto it =
-        std::find_if(pools.begin(), pools.end(), [query_pool = query.first](vk::QueryPool& pool) {
-            return query_pool == *pool;
+struct HostSyncValues {
+    VAddr address;
+    size_t size;
+    size_t offset;
+
+    static constexpr bool GeneratesBaseBuffer = false;
+};
+
+template <typename Traits>
+class SamplesStreamer : public BaseStreamer {
+public:
+    SamplesStreamer(size_t id, QueryCacheRuntime& runtime_, const Device& device_,
+                    Scheduler& scheduler_, const MemoryAllocator& memory_allocator_)
+        : BaseStreamer(id), runtime{runtime_}, device{device_}, scheduler{scheduler_},
+          memory_allocator{memory_allocator_} {
+        BuildResolveBuffer();
+        current_bank = nullptr;
+        current_query = nullptr;
+    }
+
+    void StartCounter() override {
+        if (has_started) {
+            return;
+        }
+        ReserveHostQuery();
+        scheduler.Record([query_pool = current_query_pool,
+                          query_index = current_bank_slot](vk::CommandBuffer cmdbuf) {
+            const bool use_precise = Settings::IsGPULevelHigh();
+            cmdbuf.BeginQuery(query_pool, static_cast<u32>(query_index),
+                              use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0);
+        });
+        has_started = true;
+    }
+
+    void PauseCounter() override {
+        if (!has_started) {
+            return;
+        }
+        scheduler.Record([query_pool = current_query_pool,
+                          query_index = current_bank_slot](vk::CommandBuffer cmdbuf) {
+            cmdbuf.EndQuery(query_pool, static_cast<u32>(query_index));
+        });
+        has_started = false;
+    }
+
+    void ResetCounter() override {
+        if (has_started) {
+            PauseCounter();
+        }
+        AbandonCurrentQuery();
+    }
+
+    void CloseCounter() override {
+        PauseCounter();
+    }
+
+    bool HasPendingSync() override {
+        return !pending_sync.empty();
+    }
+
+    void SyncWrites() override {
+        if (sync_values_stash.empty()) {
+            return;
+        }
+
+        for (size_t i = 0; i < sync_values_stash.size(); i++) {
+            runtime.template SyncValues<HostSyncValues>(sync_values_stash[i], *resolve_buffers[i]);
+        }
+
+        sync_values_stash.clear();
+    }
+
+    void PresyncWrites() override {
+        if (pending_sync.empty()) {
+            return;
+        }
+        PauseCounter();
+        sync_values_stash.clear();
+        sync_values_stash.emplace_back();
+        std::vector<HostSyncValues>* sync_values = &sync_values_stash.back();
+        sync_values->reserve(resolve_slots * SamplesQueryBank::BANK_SIZE);
+        std::unordered_map<size_t, std::pair<size_t, size_t>> offsets;
+        size_t this_bank_slot = std::numeric_limits<size_t>::max();
+        size_t resolve_slots_remaining = resolve_slots;
+        size_t resolve_buffer_index = 0;
+        ApplyBanksWideOp<true>(pending_sync, [&](SamplesQueryBank* bank, size_t start,
+                                                 size_t amount) {
+            size_t bank_id = bank->GetIndex();
+            if (this_bank_slot != bank_id) {
+                this_bank_slot = bank_id;
+                if (resolve_slots_remaining == 0) {
+                    resolve_buffer_index++;
+                    if (resolve_buffer_index >= resolve_buffers.size()) {
+                        BuildResolveBuffer();
+                    }
+                    resolve_slots_remaining = resolve_slots;
+                    sync_values_stash.emplace_back();
+                    sync_values = sync_values = &sync_values_stash.back();
+                    sync_values->reserve(resolve_slots * SamplesQueryBank::BANK_SIZE);
+                }
+                resolve_slots_remaining--;
+            }
+            auto& resolve_buffer = resolve_buffers[resolve_buffer_index];
+            const size_t base_offset = SamplesQueryBank::QUERY_SIZE * SamplesQueryBank::BANK_SIZE *
+                                       (resolve_slots - resolve_slots_remaining - 1);
+            VkQueryPool query_pool = bank->GetInnerPool();
+            scheduler.Record([start, amount, base_offset, query_pool,
+                              buffer = *resolve_buffer](vk::CommandBuffer cmdbuf) {
+                size_t final_offset = base_offset + start * SamplesQueryBank::QUERY_SIZE;
+                const VkBufferMemoryBarrier copy_query_pool_barrier{
+                    .sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
+                    .pNext = nullptr,
+                    .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
+                    .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
+                    .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+                    .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+                    .buffer = buffer,
+                    .offset = final_offset,
+                    .size = amount * SamplesQueryBank::QUERY_SIZE,
+                };
+
+                cmdbuf.CopyQueryPoolResults(
+                    query_pool, static_cast<u32>(start), static_cast<u32>(amount), buffer,
+                    static_cast<u32>(final_offset), SamplesQueryBank::QUERY_SIZE,
+                    VK_QUERY_RESULT_WAIT_BIT | VK_QUERY_RESULT_64_BIT);
+                cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
+                                       VK_PIPELINE_STAGE_TRANSFER_BIT, 0, copy_query_pool_barrier);
+            });
+            offsets[bank_id] = {sync_values_stash.size() - 1, base_offset};
         });
 
-    if (it != std::end(pools)) {
-        const std::ptrdiff_t pool_index = std::distance(std::begin(pools), it);
-        usage[pool_index * GROW_STEP + static_cast<std::ptrdiff_t>(query.second)] = false;
+        // Convert queries
+        for (auto q : pending_sync) {
+            auto* query = GetQuery(q);
+            if (True(query->flags & VideoCommon::QueryFlagBits::IsRewritten)) {
+                continue;
+            }
+            if (True(query->flags & VideoCommon::QueryFlagBits::IsInvalidated)) {
+                continue;
+            }
+            if (query->size_slots > 1) {
+                // This is problematic.
+                UNIMPLEMENTED();
+            }
+            query->flags |= VideoCommon::QueryFlagBits::IsHostSynced;
+            auto loc_data = offsets[query->start_bank_id];
+            sync_values_stash[loc_data.first].emplace_back(HostSyncValues{
+                .address = query->guest_address,
+                .size = SamplesQueryBank::QUERY_SIZE,
+                .offset = loc_data.second + query->start_slot * SamplesQueryBank::QUERY_SIZE,
+            });
+        }
+
+        AbandonCurrentQuery();
+        pending_sync.clear();
     }
-}
 
-QueryCache::QueryCache(VideoCore::RasterizerInterface& rasterizer_,
-                       Core::Memory::Memory& cpu_memory_, const Device& device_,
-                       Scheduler& scheduler_)
-    : QueryCacheBase{rasterizer_, cpu_memory_}, device{device_}, scheduler{scheduler_},
-      query_pools{
-          QueryPool{device_, scheduler_, QueryType::SamplesPassed},
-      } {}
-
-QueryCache::~QueryCache() {
-    // TODO(Rodrigo): This is a hack to destroy all HostCounter instances before the base class
-    // destructor is called. The query cache should be redesigned to have a proper ownership model
-    // instead of using shared pointers.
-    for (size_t query_type = 0; query_type < VideoCore::NumQueryTypes; ++query_type) {
-        auto& stream = Stream(static_cast<QueryType>(query_type));
-        stream.Update(false);
-        stream.Reset();
+    size_t WriteCounter(VAddr address, bool has_timestamp, u32 value,
+                        [[maybe_unused]] std::optional<u32> subreport) override {
+        auto index = BuildQuery();
+        auto* new_query = GetQuery(index);
+        new_query->guest_address = address;
+        new_query->value = 100;
+        new_query->flags &= ~VideoCommon::QueryFlagBits::IsOrphan;
+        if (has_timestamp) {
+            new_query->flags |= VideoCommon::QueryFlagBits::HasTimestamp;
+        }
+        if (!current_query) {
+            new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced;
+            return index;
+        }
+        new_query->start_bank_id = current_query->start_bank_id;
+        new_query->size_banks = current_query->size_banks;
+        new_query->start_slot = current_query->start_slot;
+        new_query->size_slots = current_query->size_slots;
+        ApplyBankOp(new_query, [](SamplesQueryBank* bank, size_t start, size_t amount) {
+            bank->AddReference(amount);
+        });
+        pending_sync.push_back(index);
+        pending_flush_queries.push_back(index);
+        return index;
     }
-}
 
-std::pair<VkQueryPool, u32> QueryCache::AllocateQuery(QueryType type) {
-    return query_pools[static_cast<std::size_t>(type)].Commit();
-}
-
-void QueryCache::Reserve(QueryType type, std::pair<VkQueryPool, u32> query) {
-    query_pools[static_cast<std::size_t>(type)].Reserve(query);
-}
-
-HostCounter::HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_,
-                         QueryType type_)
-    : HostCounterBase{std::move(dependency_)}, cache{cache_}, type{type_},
-      query{cache_.AllocateQuery(type_)}, tick{cache_.GetScheduler().CurrentTick()} {
-    const vk::Device* logical = &cache.GetDevice().GetLogical();
-    cache.GetScheduler().Record([logical, query_ = query](vk::CommandBuffer cmdbuf) {
-        const bool use_precise = Settings::IsGPULevelHigh();
-        logical->ResetQueryPool(query_.first, query_.second, 1);
-        cmdbuf.BeginQuery(query_.first, query_.second,
-                          use_precise ? VK_QUERY_CONTROL_PRECISE_BIT : 0);
-    });
-}
-
-HostCounter::~HostCounter() {
-    cache.Reserve(type, query);
-}
-
-void HostCounter::EndQuery() {
-    cache.GetScheduler().Record([query_ = query](vk::CommandBuffer cmdbuf) {
-        cmdbuf.EndQuery(query_.first, query_.second);
-    });
-}
-
-u64 HostCounter::BlockingQuery(bool async) const {
-    if (!async) {
-        cache.GetScheduler().Wait(tick);
+    bool HasUnsyncedQueries() override {
+        return !pending_flush_queries.empty();
     }
-    u64 data;
-    const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
-        query.first, query.second, 1, sizeof(data), &data, sizeof(data),
-        VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT);
 
-    switch (query_result) {
-    case VK_SUCCESS:
-        return data;
-    case VK_ERROR_DEVICE_LOST:
-        cache.GetDevice().ReportLoss();
-        [[fallthrough]];
+    void PushUnsyncedQueries() override {
+        PauseCounter();
+        {
+            std::scoped_lock lk(flush_guard);
+            pending_flush_sets.emplace_back(std::move(pending_flush_queries));
+        }
+    }
+
+    void PopUnsyncedQueries() override {
+        std::vector<size_t> current_flush_queries;
+        {
+            std::scoped_lock lk(flush_guard);
+            current_flush_queries = std::move(pending_flush_sets.front());
+            pending_flush_sets.pop_front();
+        }
+        ApplyBanksWideOp<false>(
+            current_flush_queries,
+            [](SamplesQueryBank* bank, size_t start, size_t amount) { bank->Sync(start, amount); });
+        for (auto q : current_flush_queries) {
+            auto* query = GetQuery(q);
+            u64 total = 0;
+            ApplyBankOp(query, [&total](SamplesQueryBank* bank, size_t start, size_t amount) {
+                const auto& results = bank->GetResults();
+                for (size_t i = 0; i < amount; i++) {
+                    total += results[start + i];
+                }
+            });
+            query->value = total;
+            query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced;
+        }
+    }
+
+private:
+    template <typename Func>
+    void ApplyBankOp(VideoCommon::HostQueryBase* query, Func&& func) {
+        size_t size_slots = query->size_slots;
+        if (size_slots == 0) {
+            return;
+        }
+        size_t bank_id = query->start_bank_id;
+        size_t banks_set = query->size_banks;
+        size_t start_slot = query->start_slot;
+        for (size_t i = 0; i < banks_set; i++) {
+            auto& the_bank = bank_pool.GetBank(bank_id);
+            size_t amount = std::min(the_bank.Size() - start_slot, size_slots);
+            func(&the_bank, start_slot, amount);
+            bank_id = the_bank.next_bank - 1;
+            start_slot = 0;
+            size_slots -= amount;
+        }
+    }
+
+    template <bool is_ordered, typename Func>
+    void ApplyBanksWideOp(std::vector<size_t>& queries, Func&& func) {
+        std::conditional_t<is_ordered, std::map<size_t, std::pair<size_t, size_t>>,
+                           std::unordered_map<size_t, std::pair<size_t, size_t>>>
+            indexer;
+        for (auto q : queries) {
+            auto* query = GetQuery(q);
+            ApplyBankOp(query, [&indexer](SamplesQueryBank* bank, size_t start, size_t amount) {
+                auto id = bank->GetIndex();
+                auto pair = indexer.try_emplace(id, std::numeric_limits<size_t>::max(),
+                                                std::numeric_limits<size_t>::min());
+                auto& current_pair = pair.first->second;
+                current_pair.first = std::min(current_pair.first, start);
+                current_pair.second = std::max(current_pair.second, amount + start);
+            });
+        }
+        for (auto& cont : indexer) {
+            func(&bank_pool.GetBank(cont.first), cont.second.first,
+                 cont.second.second - cont.second.first);
+        }
+    }
+
+    void ReserveBank() {
+        current_bank_id =
+            bank_pool.ReserveBank([this](std::deque<SamplesQueryBank>& queue, size_t index) {
+                queue.emplace_back(device, index);
+            });
+        if (current_bank) {
+            current_bank->next_bank = current_bank_id + 1;
+        }
+        current_bank = &bank_pool.GetBank(current_bank_id);
+        current_query_pool = current_bank->GetInnerPool();
+    }
+
+    size_t ReserveBankSlot() {
+        if (!current_bank || current_bank->IsClosed()) {
+            ReserveBank();
+        }
+        auto [built, index] = current_bank->Reserve();
+        current_bank_slot = index;
+        return index;
+    }
+
+    void ReserveHostQuery() {
+        size_t new_slot = ReserveBankSlot();
+        current_bank->AddReference(1);
+        if (current_query) {
+            size_t bank_id = current_query->start_bank_id;
+            size_t banks_set = current_query->size_banks - 1;
+            bool found = bank_id == current_bank_id;
+            while (!found && banks_set > 0) {
+                SamplesQueryBank& some_bank = bank_pool.GetBank(bank_id);
+                bank_id = some_bank.next_bank - 1;
+                found = bank_id == current_bank_id;
+                banks_set--;
+            }
+            if (!found) {
+                current_query->size_banks++;
+            }
+            current_query->size_slots++;
+        } else {
+            current_query_id = BuildQuery();
+            current_query = GetQuery(current_query_id);
+            current_query->start_bank_id = static_cast<u32>(current_bank_id);
+            current_query->size_banks = 1;
+            current_query->start_slot = new_slot;
+            current_query->size_slots = 1;
+        }
+    }
+
+    void Free(size_t query_id) override {
+        std::scoped_lock lk(guard);
+        auto* query = GetQuery(query_id);
+        ApplyBankOp(query, [](SamplesQueryBank* bank, size_t start, size_t amount) {
+            bank->CloseReference(amount);
+        });
+        ReleaseQuery(query_id);
+    }
+
+    void AbandonCurrentQuery() {
+        if (!current_query) {
+            return;
+        }
+        Free(current_query_id);
+        current_query = nullptr;
+        current_query_id = 0;
+    }
+
+    void BuildResolveBuffer() {
+        const VkBufferCreateInfo buffer_ci = {
+            .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .size = SamplesQueryBank::QUERY_SIZE * SamplesQueryBank::BANK_SIZE * resolve_slots,
+            .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
+            .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+            .queueFamilyIndexCount = 0,
+            .pQueueFamilyIndices = nullptr,
+        };
+        resolve_buffers.emplace_back(
+            std::move(memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal)));
+    }
+
+    static constexpr size_t resolve_slots = 8;
+
+    QueryCacheRuntime& runtime;
+    const Device& device;
+    Scheduler& scheduler;
+    const MemoryAllocator& memory_allocator;
+    VideoCommon::BankPool<SamplesQueryBank> bank_pool;
+    std::deque<vk::Buffer> resolve_buffers;
+    std::deque<std::vector<HostSyncValues>> sync_values_stash;
+
+    // syncing queue
+    std::vector<size_t> pending_sync;
+
+    // flush levels
+    std::vector<size_t> pending_flush_queries;
+    std::deque<std::vector<size_t>> pending_flush_sets;
+
+    // State Machine
+    size_t current_bank_slot;
+    size_t current_bank_id;
+    SamplesQueryBank* current_bank;
+    VkQueryPool current_query_pool;
+    size_t current_query_id;
+    VideoCommon::HostQueryBase* current_query;
+    bool has_started{};
+    std::mutex flush_guard;
+};
+
+// Transform feedback queries
+class TFBQueryBank : public VideoCommon::BankBase {
+public:
+    static constexpr size_t BANK_SIZE = 1024;
+    static constexpr size_t QUERY_SIZE = 4;
+    TFBQueryBank(Scheduler& scheduler_, const MemoryAllocator& memory_allocator, size_t index_)
+        : BankBase(BANK_SIZE), scheduler{scheduler_}, index{index_} {
+        const VkBufferCreateInfo buffer_ci = {
+            .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .size = QUERY_SIZE * BANK_SIZE,
+            .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT,
+            .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+            .queueFamilyIndexCount = 0,
+            .pQueueFamilyIndices = nullptr,
+        };
+        buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal);
+    }
+
+    ~TFBQueryBank() = default;
+
+    void Reset() override {
+        ASSERT(references == 0);
+        VideoCommon::BankBase::Reset();
+    }
+
+    void Sync(StagingBufferRef& stagging_buffer, size_t extra_offset, size_t start, size_t size) {
+        scheduler.RequestOutsideRenderPassOperationContext();
+        scheduler.Record([this, dst_buffer = stagging_buffer.buffer, extra_offset, start,
+                          size](vk::CommandBuffer cmdbuf) {
+            std::array<VkBufferCopy, 1> copy{VkBufferCopy{
+                .srcOffset = start * QUERY_SIZE,
+                .dstOffset = extra_offset,
+                .size = size * QUERY_SIZE,
+            }};
+            cmdbuf.CopyBuffer(*buffer, dst_buffer, copy);
+        });
+    }
+
+    size_t GetIndex() const {
+        return index;
+    }
+
+    VkBuffer GetBuffer() const {
+        return *buffer;
+    }
+
+private:
+    Scheduler& scheduler;
+    const size_t index;
+    vk::Buffer buffer;
+};
+
+template <typename Traits>
+class TFBCounterStreamer : public BaseStreamer {
+public:
+    TFBCounterStreamer(size_t id, QueryCacheRuntime& runtime_, const Device& device_,
+                       Scheduler& scheduler_, const MemoryAllocator& memory_allocator_,
+                       StagingBufferPool& staging_pool_)
+        : BaseStreamer(id), runtime{runtime_}, device{device_}, scheduler{scheduler_},
+          memory_allocator{memory_allocator_}, staging_pool{staging_pool_} {
+        buffers_count = 0;
+        current_bank = nullptr;
+        counter_buffers.fill(VK_NULL_HANDLE);
+        offsets.fill(0);
+        const VkBufferCreateInfo buffer_ci = {
+            .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .size = TFBQueryBank::QUERY_SIZE * NUM_STREAMS,
+            .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
+                     VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT,
+            .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+            .queueFamilyIndexCount = 0,
+            .pQueueFamilyIndices = nullptr,
+        };
+
+        counters_buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal);
+        for (auto& c : counter_buffers) {
+            c = *counters_buffer;
+        }
+        size_t base_offset = 0;
+        for (auto& o : offsets) {
+            o = base_offset;
+            base_offset += TFBQueryBank::QUERY_SIZE;
+        }
+    }
+
+    void StartCounter() override {
+        FlushBeginTFB();
+        has_started = true;
+    }
+
+    void PauseCounter() override {
+        CloseCounter();
+    }
+
+    void ResetCounter() override {
+        CloseCounter();
+    }
+
+    void CloseCounter() override {
+        if (has_flushed_end_pending) {
+            FlushEndTFB();
+        }
+        runtime.View3DRegs([this](Tegra::Engines::Maxwell3D::Regs& regs) {
+            if (regs.transform_feedback_enabled == 0) {
+                streams_mask = 0;
+                has_started = false;
+            }
+        });
+    }
+
+    bool HasPendingSync() override {
+        return !pending_sync.empty();
+    }
+
+    void SyncWrites() override {
+        CloseCounter();
+        std::unordered_map<size_t, std::vector<HostSyncValues>> sync_values_stash;
+        for (auto q : pending_sync) {
+            auto* query = GetQuery(q);
+            if (True(query->flags & VideoCommon::QueryFlagBits::IsRewritten)) {
+                continue;
+            }
+            if (True(query->flags & VideoCommon::QueryFlagBits::IsInvalidated)) {
+                continue;
+            }
+            query->flags |= VideoCommon::QueryFlagBits::IsHostSynced;
+            sync_values_stash.try_emplace(query->start_bank_id);
+            sync_values_stash[query->start_bank_id].emplace_back(HostSyncValues{
+                .address = query->guest_address,
+                .size = TFBQueryBank::QUERY_SIZE,
+                .offset = query->start_slot * TFBQueryBank::QUERY_SIZE,
+            });
+        }
+        for (auto& p : sync_values_stash) {
+            auto& bank = bank_pool.GetBank(p.first);
+            runtime.template SyncValues<HostSyncValues>(p.second, bank.GetBuffer());
+        }
+        pending_sync.clear();
+    }
+
+    size_t WriteCounter(VAddr address, bool has_timestamp, u32 value,
+                        std::optional<u32> subreport_) override {
+        auto index = BuildQuery();
+        auto* new_query = GetQuery(index);
+        new_query->guest_address = address;
+        new_query->value = 0;
+        new_query->flags &= ~VideoCommon::QueryFlagBits::IsOrphan;
+        if (has_timestamp) {
+            new_query->flags |= VideoCommon::QueryFlagBits::HasTimestamp;
+        }
+        if (!subreport_) {
+            new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced;
+            return index;
+        }
+        const size_t subreport = static_cast<size_t>(*subreport_);
+        UpdateBuffers();
+        if ((streams_mask & (1ULL << subreport)) == 0) {
+            new_query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced;
+            return index;
+        }
+        CloseCounter();
+        auto [bank_slot, data_slot] = ProduceCounterBuffer(subreport);
+        new_query->start_bank_id = static_cast<u32>(bank_slot);
+        new_query->size_banks = 1;
+        new_query->start_slot = static_cast<u32>(data_slot);
+        new_query->size_slots = 1;
+        pending_sync.push_back(index);
+        pending_flush_queries.push_back(index);
+        return index;
+    }
+
+    bool HasUnsyncedQueries() override {
+        return !pending_flush_queries.empty();
+    }
+
+    void PushUnsyncedQueries() override {
+        CloseCounter();
+        auto staging_ref = staging_pool.Request(
+            pending_flush_queries.size() * TFBQueryBank::QUERY_SIZE, MemoryUsage::Download, true);
+        size_t offset_base = staging_ref.offset;
+        for (auto q : pending_flush_queries) {
+            auto* query = GetQuery(q);
+            auto& bank = bank_pool.GetBank(query->start_bank_id);
+            bank.Sync(staging_ref, offset_base, query->start_slot, 1);
+            offset_base += TFBQueryBank::QUERY_SIZE;
+            bank.CloseReference();
+        }
+        static constexpr VkMemoryBarrier WRITE_BARRIER{
+            .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+            .pNext = nullptr,
+            .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
+            .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
+        };
+        scheduler.RequestOutsideRenderPassOperationContext();
+        scheduler.Record([](vk::CommandBuffer cmdbuf) {
+            cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
+                                   VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, WRITE_BARRIER);
+        });
+
+        std::scoped_lock lk(flush_guard);
+        for (auto& str : free_queue) {
+            staging_pool.FreeDeferred(str);
+        }
+        free_queue.clear();
+        download_buffers.emplace_back(staging_ref);
+        pending_flush_sets.emplace_back(std::move(pending_flush_queries));
+    }
+
+    void PopUnsyncedQueries() override {
+        StagingBufferRef staging_ref;
+        std::vector<size_t> flushed_queries;
+        {
+            std::scoped_lock lk(flush_guard);
+            staging_ref = download_buffers.front();
+            flushed_queries = std::move(pending_flush_sets.front());
+            download_buffers.pop_front();
+            pending_flush_sets.pop_front();
+        }
+
+        size_t offset_base = staging_ref.offset;
+        for (auto q : flushed_queries) {
+            auto* query = GetQuery(q);
+            u32 result = 0;
+            std::memcpy(&result, staging_ref.mapped_span.data() + offset_base, sizeof(u32));
+            query->value = static_cast<u64>(result);
+            query->flags |= VideoCommon::QueryFlagBits::IsFinalValueSynced;
+            offset_base += TFBQueryBank::QUERY_SIZE;
+        }
+
+        {
+            std::scoped_lock lk(flush_guard);
+            free_queue.emplace_back(staging_ref);
+        }
+    }
+
+private:
+    void FlushBeginTFB() {
+        if (has_flushed_end_pending) [[unlikely]] {
+            return;
+        }
+        has_flushed_end_pending = true;
+        if (!has_started || buffers_count == 0) {
+            scheduler.Record([](vk::CommandBuffer cmdbuf) {
+                cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr);
+            });
+            UpdateBuffers();
+            return;
+        }
+        scheduler.Record([this, total = static_cast<u32>(buffers_count)](vk::CommandBuffer cmdbuf) {
+            cmdbuf.BeginTransformFeedbackEXT(0, total, counter_buffers.data(), offsets.data());
+        });
+        UpdateBuffers();
+    }
+
+    void FlushEndTFB() {
+        if (!has_flushed_end_pending) [[unlikely]] {
+            UNREACHABLE();
+            return;
+        }
+        has_flushed_end_pending = false;
+
+        if (buffers_count == 0) {
+            scheduler.Record([](vk::CommandBuffer cmdbuf) {
+                cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr);
+            });
+        } else {
+            scheduler.Record([this, total = static_cast<u32>(buffers_count)](vk::CommandBuffer cmdbuf) {
+                cmdbuf.EndTransformFeedbackEXT(0, total, counter_buffers.data(), offsets.data());
+            });
+        }
+    }
+
+    void UpdateBuffers() {
+        runtime.View3DRegs([this](Tegra::Engines::Maxwell3D::Regs& regs) {
+            buffers_count = 0;
+            for (size_t i = 0; i < Tegra::Engines::Maxwell3D::Regs::NumTransformFeedbackBuffers;
+                 i++) {
+                const auto& tf = regs.transform_feedback;
+                if (tf.buffers[i].enable == 0) {
+                    continue;
+                }
+                const size_t stream = tf.controls[i].stream;
+                streams_mask |= 1ULL << stream;
+                buffers_count = std::max<size_t>(buffers_count, stream + 1);
+            }
+        });
+    }
+
+    std::pair<size_t, size_t> ProduceCounterBuffer(size_t stream) {
+        if (current_bank == nullptr || current_bank->IsClosed()) {
+            current_bank_id =
+                bank_pool.ReserveBank([this](std::deque<TFBQueryBank>& queue, size_t index) {
+                    queue.emplace_back(scheduler, memory_allocator, index);
+                });
+            current_bank = &bank_pool.GetBank(current_bank_id);
+        }
+        auto [dont_care, slot] = current_bank->Reserve();
+        current_bank->AddReference();
+
+        static constexpr VkMemoryBarrier READ_BARRIER{
+            .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+            .pNext = nullptr,
+            .srcAccessMask = VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
+            .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT,
+        };
+        static constexpr VkMemoryBarrier WRITE_BARRIER{
+            .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+            .pNext = nullptr,
+            .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
+            .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,
+        };
+        scheduler.RequestOutsideRenderPassOperationContext();
+        scheduler.Record([dst_buffer = current_bank->GetBuffer(),
+                          src_buffer = counter_buffers[stream], src_offset = offsets[stream],
+                          slot](vk::CommandBuffer cmdbuf) {
+            cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT,
+                                   VK_PIPELINE_STAGE_TRANSFER_BIT, 0, READ_BARRIER);
+            std::array<VkBufferCopy, 1> copy{VkBufferCopy{
+                .srcOffset = src_offset,
+                .dstOffset = slot * TFBQueryBank::QUERY_SIZE,
+                .size = TFBQueryBank::QUERY_SIZE,
+            }};
+            cmdbuf.CopyBuffer(src_buffer, dst_buffer, copy);
+            cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
+                                   0, WRITE_BARRIER);
+        });
+        return {current_bank_id, slot};
+    }
+
+    static constexpr size_t NUM_STREAMS = 4;
+    static constexpr size_t STREAMS_MASK = (1ULL << NUM_STREAMS) - 1ULL;
+
+    QueryCacheRuntime& runtime;
+    const Device& device;
+    Scheduler& scheduler;
+    const MemoryAllocator& memory_allocator;
+    StagingBufferPool& staging_pool;
+    VideoCommon::BankPool<TFBQueryBank> bank_pool;
+    size_t current_bank_id;
+    TFBQueryBank* current_bank;
+    vk::Buffer counters_buffer;
+
+    // syncing queue
+    std::vector<size_t> pending_sync;
+
+    // flush levels
+    std::vector<size_t> pending_flush_queries;
+    std::deque<StagingBufferRef> download_buffers;
+    std::deque<std::vector<size_t>> pending_flush_sets;
+    std::vector<StagingBufferRef> free_queue;
+    std::mutex flush_guard;
+
+    // state machine
+    bool has_started{};
+    bool has_flushed_end_pending{};
+    size_t buffers_count{};
+    std::array<VkBuffer, NUM_STREAMS> counter_buffers{};
+    std::array<VkDeviceSize, NUM_STREAMS> offsets{};
+    u64 streams_mask;
+};
+
+} // namespace
+
+struct QueryCacheRuntimeImpl {
+    QueryCacheRuntimeImpl(QueryCacheRuntime& runtime, VideoCore::RasterizerInterface* rasterizer_,
+                          Core::Memory::Memory& cpu_memory_, Vulkan::BufferCache& buffer_cache_,
+                          const Device& device_, const MemoryAllocator& memory_allocator_,
+                          Scheduler& scheduler_, StagingBufferPool& staging_pool_,
+                          ComputePassDescriptorQueue& compute_pass_descriptor_queue,
+                          DescriptorPool& descriptor_pool)
+        : rasterizer{rasterizer_}, cpu_memory{cpu_memory_},
+          buffer_cache{buffer_cache_}, device{device_},
+          memory_allocator{memory_allocator_}, scheduler{scheduler_}, staging_pool{staging_pool_},
+          guest_streamer(0, runtime),
+          sample_streamer(static_cast<size_t>(QueryType::ZPassPixelCount64), runtime, device,
+                          scheduler, memory_allocator),
+          tfb_streamer(static_cast<size_t>(QueryType::StreamingByteCount), runtime, device,
+                       scheduler, memory_allocator, staging_pool),
+          hcr_setup{}, hcr_is_set{}, is_hcr_running{} {
+
+        hcr_setup.sType = VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT;
+        hcr_setup.pNext = nullptr;
+        hcr_setup.flags = 0;
+
+        conditional_resolve_pass = std::make_unique<ConditionalRenderingResolvePass>(
+            device, scheduler, descriptor_pool, compute_pass_descriptor_queue);
+
+        const VkBufferCreateInfo buffer_ci = {
+            .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+            .pNext = nullptr,
+            .flags = 0,
+            .size = sizeof(u32),
+            .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
+                     VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT,
+            .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+            .queueFamilyIndexCount = 0,
+            .pQueueFamilyIndices = nullptr,
+        };
+        hcr_resolve_buffer = memory_allocator.CreateBuffer(buffer_ci, MemoryUsage::DeviceLocal);
+    }
+
+    VideoCore::RasterizerInterface* rasterizer;
+    Core::Memory::Memory& cpu_memory;
+    Vulkan::BufferCache& buffer_cache;
+
+    const Device& device;
+    const MemoryAllocator& memory_allocator;
+    Scheduler& scheduler;
+    StagingBufferPool& staging_pool;
+
+    // Streamers
+    VideoCommon::GuestStreamer<QueryCacheParams> guest_streamer;
+    SamplesStreamer<QueryCacheParams> sample_streamer;
+    TFBCounterStreamer<QueryCacheParams> tfb_streamer;
+
+    std::vector<std::pair<VAddr, VAddr>> little_cache;
+    std::vector<std::pair<VkBuffer, VkDeviceSize>> buffers_to_upload_to;
+    std::vector<size_t> redirect_cache;
+    std::vector<std::vector<VkBufferCopy>> copies_setup;
+
+    // Host conditional rendering data
+    std::unique_ptr<ConditionalRenderingResolvePass> conditional_resolve_pass;
+    vk::Buffer hcr_resolve_buffer;
+    VkConditionalRenderingBeginInfoEXT hcr_setup;
+    VkBuffer hcr_buffer;
+    size_t hcr_offset;
+    bool hcr_is_set;
+    bool is_hcr_running;
+
+    // maxwell3d
+    Tegra::Engines::Maxwell3D* maxwell3d;
+};
+
+QueryCacheRuntime::QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer,
+                                     Core::Memory::Memory& cpu_memory_,
+                                     Vulkan::BufferCache& buffer_cache_, const Device& device_,
+                                     const MemoryAllocator& memory_allocator_,
+                                     Scheduler& scheduler_, StagingBufferPool& staging_pool_,
+                                     ComputePassDescriptorQueue& compute_pass_descriptor_queue,
+                                     DescriptorPool& descriptor_pool) {
+    impl = std::make_unique<QueryCacheRuntimeImpl>(
+        *this, rasterizer, cpu_memory_, buffer_cache_, device_, memory_allocator_, scheduler_,
+        staging_pool_, compute_pass_descriptor_queue, descriptor_pool);
+}
+
+void QueryCacheRuntime::Bind3DEngine(Tegra::Engines::Maxwell3D* maxwell3d) {
+    impl->maxwell3d = maxwell3d;
+}
+
+template <typename Func>
+void QueryCacheRuntime::View3DRegs(Func&& func) {
+    func(impl->maxwell3d->regs);
+}
+
+void QueryCacheRuntime::EndHostConditionalRendering() {
+    PauseHostConditionalRendering();
+    impl->hcr_is_set = false;
+    impl->is_hcr_running = false;
+    impl->hcr_buffer = nullptr;
+    impl->hcr_offset = 0;
+}
+
+void QueryCacheRuntime::PauseHostConditionalRendering() {
+    if (!impl->hcr_is_set) {
+        return;
+    }
+    if (impl->is_hcr_running) {
+        impl->scheduler.Record(
+            [](vk::CommandBuffer cmdbuf) { cmdbuf.EndConditionalRenderingEXT(); });
+    }
+    impl->is_hcr_running = false;
+}
+
+void QueryCacheRuntime::ResumeHostConditionalRendering() {
+    if (!impl->hcr_is_set) {
+        return;
+    }
+    if (!impl->is_hcr_running) {
+        impl->scheduler.Record([hcr_setup = impl->hcr_setup](vk::CommandBuffer cmdbuf) {
+            cmdbuf.BeginConditionalRenderingEXT(hcr_setup);
+        });
+    }
+    impl->is_hcr_running = true;
+}
+
+void QueryCacheRuntime::HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object,
+                                                                 bool is_equal) {
+    {
+        std::scoped_lock lk(impl->buffer_cache.mutex);
+        static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
+        const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
+        const auto [buffer, offset] =
+            impl->buffer_cache.ObtainCPUBuffer(object.address, 8, sync_info, post_op);
+        impl->hcr_buffer = buffer->Handle();
+        impl->hcr_offset = offset;
+    }
+    if (impl->hcr_is_set) {
+        if (impl->hcr_setup.buffer == impl->hcr_buffer &&
+            impl->hcr_setup.offset == impl->hcr_offset) {
+            ResumeHostConditionalRendering();
+            return;
+        }
+        PauseHostConditionalRendering();
+    }
+    impl->hcr_setup.buffer = impl->hcr_buffer;
+    impl->hcr_setup.offset = impl->hcr_offset;
+    impl->hcr_setup.flags = is_equal ? VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT : 0;
+    impl->hcr_is_set = true;
+    impl->is_hcr_running = false;
+    ResumeHostConditionalRendering();
+}
+
+void QueryCacheRuntime::HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal) {
+    VkBuffer to_resolve;
+    u32 to_resolve_offset;
+    {
+        std::scoped_lock lk(impl->buffer_cache.mutex);
+        static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::NoSynchronize;
+        const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
+        const auto [buffer, offset] =
+            impl->buffer_cache.ObtainCPUBuffer(address, 24, sync_info, post_op);
+        to_resolve = buffer->Handle();
+        to_resolve_offset = static_cast<u32>(offset);
+    }
+    if (impl->is_hcr_running) {
+        PauseHostConditionalRendering();
+    }
+    impl->conditional_resolve_pass->Resolve(*impl->hcr_resolve_buffer, to_resolve,
+                                            to_resolve_offset, false);
+    impl->hcr_setup.buffer = *impl->hcr_resolve_buffer;
+    impl->hcr_setup.offset = 0;
+    impl->hcr_setup.flags = is_equal ? 0 : VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
+    impl->hcr_is_set = true;
+    impl->is_hcr_running = false;
+    ResumeHostConditionalRendering();
+}
+
+bool QueryCacheRuntime::HostConditionalRenderingCompareValue(VideoCommon::LookupData object_1,
+                                                             [[maybe_unused]] bool qc_dirty) {
+    if (!impl->device.IsExtConditionalRendering()) {
+        return false;
+    }
+    HostConditionalRenderingCompareValueImpl(object_1, false);
+    return true;
+}
+
+bool QueryCacheRuntime::HostConditionalRenderingCompareValues(VideoCommon::LookupData object_1,
+                                                              VideoCommon::LookupData object_2,
+                                                              bool qc_dirty, bool equal_check) {
+    if (!impl->device.IsExtConditionalRendering()) {
+        return false;
+    }
+
+    const auto check_in_bc = [&](VAddr address) {
+        return impl->buffer_cache.IsRegionGpuModified(address, 8);
+    };
+    const auto check_value = [&](VAddr address) {
+        u8* ptr = impl->cpu_memory.GetPointer(address);
+        u64 value{};
+        std::memcpy(&value, ptr, sizeof(value));
+        return value == 0;
+    };
+    std::array<VideoCommon::LookupData*, 2> objects{&object_1, &object_2};
+    std::array<bool, 2> is_in_bc{};
+    std::array<bool, 2> is_in_qc{};
+    std::array<bool, 2> is_in_ac{};
+    std::array<bool, 2> is_null{};
+    {
+        std::scoped_lock lk(impl->buffer_cache.mutex);
+        for (size_t i = 0; i < 2; i++) {
+            is_in_qc[i] = objects[i]->found_query != nullptr;
+            is_in_bc[i] = !is_in_qc[i] && check_in_bc(objects[i]->address);
+            is_in_ac[i] = is_in_qc[i] || is_in_bc[i];
+        }
+    }
+
+    if (!is_in_ac[0] && !is_in_ac[1]) {
+        EndHostConditionalRendering();
+        return false;
+    }
+
+    if (!qc_dirty && !is_in_bc[0] && !is_in_bc[1]) {
+        EndHostConditionalRendering();
+        return false;
+    }
+
+    for (size_t i = 0; i < 2; i++) {
+        is_null[i] = !is_in_ac[i] && check_value(objects[i]->address);
+    }
+
+    for (size_t i = 0; i < 2; i++) {
+        if (is_null[i]) {
+            size_t j = (i + 1) % 2;
+            HostConditionalRenderingCompareValueImpl(*objects[j], equal_check);
+            return true;
+        }
+    }
+    HostConditionalRenderingCompareBCImpl(object_1.address, equal_check);
+    return true;
+}
+
+QueryCacheRuntime::~QueryCacheRuntime() = default;
+
+VideoCommon::StreamerInterface* QueryCacheRuntime::GetStreamerInterface(QueryType query_type) {
+    switch (query_type) {
+    case QueryType::Payload:
+        return &impl->guest_streamer;
+    case QueryType::ZPassPixelCount64:
+        return &impl->sample_streamer;
+    case QueryType::StreamingByteCount:
+        return &impl->tfb_streamer;
     default:
-        throw vk::Exception(query_result);
+        return nullptr;
     }
 }
 
+void QueryCacheRuntime::Barriers(bool is_prebarrier) {
+    static constexpr VkMemoryBarrier READ_BARRIER{
+        .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+        .pNext = nullptr,
+        .srcAccessMask = VK_ACCESS_MEMORY_WRITE_BIT,
+        .dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
+    };
+    static constexpr VkMemoryBarrier WRITE_BARRIER{
+        .sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
+        .pNext = nullptr,
+        .srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
+        .dstAccessMask = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
+    };
+    if (is_prebarrier) {
+        impl->scheduler.Record([](vk::CommandBuffer cmdbuf) {
+            cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
+                                   VK_PIPELINE_STAGE_TRANSFER_BIT, 0, READ_BARRIER);
+        });
+    } else {
+        impl->scheduler.Record([](vk::CommandBuffer cmdbuf) {
+            cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
+                                   VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, WRITE_BARRIER);
+        });
+    }
+}
+
+template <typename SyncValuesType>
+void QueryCacheRuntime::SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer) {
+    if (values.size() == 0) {
+        return;
+    }
+    impl->redirect_cache.clear();
+    impl->little_cache.clear();
+    size_t total_size = 0;
+    for (auto& sync_val : values) {
+        total_size += sync_val.size;
+        bool found = false;
+        VAddr base = Common::AlignDown(sync_val.address, Core::Memory::YUZU_PAGESIZE);
+        VAddr base_end = base + Core::Memory::YUZU_PAGESIZE;
+        for (size_t i = 0; i < impl->little_cache.size(); i++) {
+            const auto set_found = [&] {
+                impl->redirect_cache.push_back(i);
+                found = true;
+            };
+            auto& loc = impl->little_cache[i];
+            if (base < loc.second && loc.first < base_end) {
+                set_found();
+                break;
+            }
+            if (loc.first == base_end) {
+                loc.first = base;
+                set_found();
+                break;
+            }
+            if (loc.second == base) {
+                loc.second = base_end;
+                set_found();
+                break;
+            }
+        }
+        if (!found) {
+            impl->redirect_cache.push_back(impl->little_cache.size());
+            impl->little_cache.emplace_back(base, base_end);
+        }
+    }
+
+    // Vulkan part.
+    std::scoped_lock lk(impl->buffer_cache.mutex);
+    impl->buffer_cache.BufferOperations([&] {
+        impl->buffers_to_upload_to.clear();
+        for (auto& pair : impl->little_cache) {
+            static constexpr auto sync_info = VideoCommon::ObtainBufferSynchronize::FullSynchronize;
+            const auto post_op = VideoCommon::ObtainBufferOperation::DoNothing;
+            const auto [buffer, offset] = impl->buffer_cache.ObtainCPUBuffer(
+                pair.first, static_cast<u32>(pair.second - pair.first), sync_info, post_op);
+            impl->buffers_to_upload_to.emplace_back(buffer->Handle(), offset);
+        }
+    });
+
+    VkBuffer src_buffer;
+    [[maybe_unused]] StagingBufferRef ref;
+    impl->copies_setup.clear();
+    impl->copies_setup.resize(impl->little_cache.size());
+    if constexpr (SyncValuesType::GeneratesBaseBuffer) {
+        ref = impl->staging_pool.Request(total_size, MemoryUsage::Upload);
+        size_t current_offset = ref.offset;
+        size_t accumulated_size = 0;
+        for (size_t i = 0; i < values.size(); i++) {
+            size_t which_copy = impl->redirect_cache[i];
+            impl->copies_setup[which_copy].emplace_back(VkBufferCopy{
+                .srcOffset = current_offset + accumulated_size,
+                .dstOffset = impl->buffers_to_upload_to[which_copy].second + values[i].address -
+                             impl->little_cache[which_copy].first,
+                .size = values[i].size,
+            });
+            std::memcpy(ref.mapped_span.data() + accumulated_size, &values[i].value,
+                        values[i].size);
+            accumulated_size += values[i].size;
+        }
+        src_buffer = ref.buffer;
+    } else {
+        for (size_t i = 0; i < values.size(); i++) {
+            size_t which_copy = impl->redirect_cache[i];
+            impl->copies_setup[which_copy].emplace_back(VkBufferCopy{
+                .srcOffset = values[i].offset,
+                .dstOffset = impl->buffers_to_upload_to[which_copy].second + values[i].address -
+                             impl->little_cache[which_copy].first,
+                .size = values[i].size,
+            });
+        }
+        src_buffer = base_src_buffer;
+    }
+
+    impl->scheduler.RequestOutsideRenderPassOperationContext();
+    impl->scheduler.Record([src_buffer, dst_buffers = std::move(impl->buffers_to_upload_to),
+                            vk_copies = std::move(impl->copies_setup)](vk::CommandBuffer cmdbuf) {
+        size_t size = dst_buffers.size();
+        for (size_t i = 0; i < size; i++) {
+            cmdbuf.CopyBuffer(src_buffer, dst_buffers[i].first, vk_copies[i]);
+        }
+    });
+}
+
 } // namespace Vulkan
+
+namespace VideoCommon {
+
+template class QueryCacheBase<Vulkan::QueryCacheParams>;
+
+} // namespace VideoCommon
diff --git a/src/video_core/renderer_vulkan/vk_query_cache.h b/src/video_core/renderer_vulkan/vk_query_cache.h
index c1b9552eb4..9ad2929d7f 100644
--- a/src/video_core/renderer_vulkan/vk_query_cache.h
+++ b/src/video_core/renderer_vulkan/vk_query_cache.h
@@ -1,101 +1,74 @@
-// SPDX-FileCopyrightText: Copyright 2020 yuzu Emulator Project
-// SPDX-License-Identifier: GPL-2.0-or-later
+// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
+// SPDX-License-Identifier: GPL-3.0-or-later
 
 #pragma once
 
-#include <cstddef>
 #include <memory>
-#include <utility>
-#include <vector>
 
-#include "common/common_types.h"
-#include "video_core/query_cache.h"
-#include "video_core/renderer_vulkan/vk_resource_pool.h"
-#include "video_core/vulkan_common/vulkan_wrapper.h"
+#include "video_core/query_cache/query_cache_base.h"
+#include "video_core/renderer_vulkan/vk_buffer_cache.h"
 
 namespace VideoCore {
 class RasterizerInterface;
 }
 
+namespace VideoCommon {
+class StreamerInterface;
+}
+
 namespace Vulkan {
 
-class CachedQuery;
 class Device;
-class HostCounter;
-class QueryCache;
 class Scheduler;
+class StagingBufferPool;
 
-using CounterStream = VideoCommon::CounterStreamBase<QueryCache, HostCounter>;
+struct QueryCacheRuntimeImpl;
 
-class QueryPool final : public ResourcePool {
+class QueryCacheRuntime {
 public:
-    explicit QueryPool(const Device& device, Scheduler& scheduler, VideoCore::QueryType type);
-    ~QueryPool() override;
+    explicit QueryCacheRuntime(VideoCore::RasterizerInterface* rasterizer,
+                               Core::Memory::Memory& cpu_memory_,
+                               Vulkan::BufferCache& buffer_cache_, const Device& device_,
+                               const MemoryAllocator& memory_allocator_, Scheduler& scheduler_,
+                               StagingBufferPool& staging_pool_,
+                               ComputePassDescriptorQueue& compute_pass_descriptor_queue,
+                               DescriptorPool& descriptor_pool);
+    ~QueryCacheRuntime();
 
-    std::pair<VkQueryPool, u32> Commit();
+    template <typename SyncValuesType>
+    void SyncValues(std::span<SyncValuesType> values, VkBuffer base_src_buffer = nullptr);
 
-    void Reserve(std::pair<VkQueryPool, u32> query);
+    void Barriers(bool is_prebarrier);
 
-protected:
-    void Allocate(std::size_t begin, std::size_t end) override;
+    void EndHostConditionalRendering();
+
+    void PauseHostConditionalRendering();
+
+    void ResumeHostConditionalRendering();
+
+    bool HostConditionalRenderingCompareValue(VideoCommon::LookupData object_1, bool qc_dirty);
+
+    bool HostConditionalRenderingCompareValues(VideoCommon::LookupData object_1,
+                                               VideoCommon::LookupData object_2, bool qc_dirty, bool equal_check);
+
+    VideoCommon::StreamerInterface* GetStreamerInterface(VideoCommon::QueryType query_type);
+
+    void Bind3DEngine(Tegra::Engines::Maxwell3D* maxwell3d);
+
+    template <typename Func>
+    void View3DRegs(Func&& func);
 
 private:
-    static constexpr std::size_t GROW_STEP = 512;
-
-    const Device& device;
-    const VideoCore::QueryType type;
-
-    std::vector<vk::QueryPool> pools;
-    std::vector<bool> usage;
+    void HostConditionalRenderingCompareValueImpl(VideoCommon::LookupData object, bool is_equal);
+    void HostConditionalRenderingCompareBCImpl(VAddr address, bool is_equal);
+    friend struct QueryCacheRuntimeImpl;
+    std::unique_ptr<QueryCacheRuntimeImpl> impl;
 };
 
-class QueryCache final
-    : public VideoCommon::QueryCacheBase<QueryCache, CachedQuery, CounterStream, HostCounter> {
-public:
-    explicit QueryCache(VideoCore::RasterizerInterface& rasterizer_,
-                        Core::Memory::Memory& cpu_memory_, const Device& device_,
-                        Scheduler& scheduler_);
-    ~QueryCache();
-
-    std::pair<VkQueryPool, u32> AllocateQuery(VideoCore::QueryType type);
-
-    void Reserve(VideoCore::QueryType type, std::pair<VkQueryPool, u32> query);
-
-    const Device& GetDevice() const noexcept {
-        return device;
-    }
-
-    Scheduler& GetScheduler() const noexcept {
-        return scheduler;
-    }
-
-private:
-    const Device& device;
-    Scheduler& scheduler;
-    std::array<QueryPool, VideoCore::NumQueryTypes> query_pools;
+struct QueryCacheParams {
+    using RuntimeType = Vulkan::QueryCacheRuntime;
 };
 
-class HostCounter final : public VideoCommon::HostCounterBase<QueryCache, HostCounter> {
-public:
-    explicit HostCounter(QueryCache& cache_, std::shared_ptr<HostCounter> dependency_,
-                         VideoCore::QueryType type_);
-    ~HostCounter();
-
-    void EndQuery();
-
-private:
-    u64 BlockingQuery(bool async = false) const override;
-
-    QueryCache& cache;
-    const VideoCore::QueryType type;
-    const std::pair<VkQueryPool, u32> query;
-    const u64 tick;
-};
-
-class CachedQuery : public VideoCommon::CachedQueryBase<HostCounter> {
-public:
-    explicit CachedQuery(QueryCache&, VideoCore::QueryType, VAddr cpu_addr_, u8* host_ptr_)
-        : CachedQueryBase{cpu_addr_, host_ptr_} {}
-};
+using QueryCache = VideoCommon::QueryCacheBase<QueryCacheParams>;
 
 } // namespace Vulkan
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.cpp b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
index 01e76a82ca..e8862ba046 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.cpp
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.cpp
@@ -24,6 +24,7 @@
 #include "video_core/renderer_vulkan/vk_compute_pipeline.h"
 #include "video_core/renderer_vulkan/vk_descriptor_pool.h"
 #include "video_core/renderer_vulkan/vk_pipeline_cache.h"
+#include "video_core/renderer_vulkan/vk_query_cache.h"
 #include "video_core/renderer_vulkan/vk_rasterizer.h"
 #include "video_core/renderer_vulkan/vk_scheduler.h"
 #include "video_core/renderer_vulkan/vk_staging_buffer_pool.h"
@@ -170,9 +171,11 @@ RasterizerVulkan::RasterizerVulkan(Core::Frontend::EmuWindow& emu_window_, Tegra
       buffer_cache_runtime(device, memory_allocator, scheduler, staging_pool,
                            guest_descriptor_queue, compute_pass_descriptor_queue, descriptor_pool),
       buffer_cache(*this, cpu_memory_, buffer_cache_runtime),
+      query_cache_runtime(this, cpu_memory_, buffer_cache, device, memory_allocator, scheduler,
+                          staging_pool, compute_pass_descriptor_queue, descriptor_pool),
+      query_cache(gpu, *this, cpu_memory_, query_cache_runtime),
       pipeline_cache(*this, device, scheduler, descriptor_pool, guest_descriptor_queue,
                      render_pass_cache, buffer_cache, texture_cache, gpu.ShaderNotify()),
-      query_cache{*this, cpu_memory_, device, scheduler},
       accelerate_dma(buffer_cache, texture_cache, scheduler),
       fence_manager(*this, gpu, texture_cache, buffer_cache, query_cache, device, scheduler),
       wfi_event(device.GetLogical().CreateEvent()) {
@@ -189,13 +192,15 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
     FlushWork();
     gpu_memory->FlushCaching();
 
+    query_cache.NotifySegment(true);
+
 #if ANDROID
     if (Settings::IsGPULevelHigh()) {
         // This is problematic on Android, disable on GPU Normal.
-        query_cache.UpdateCounters();
+        // query_cache.UpdateCounters();
     }
 #else
-    query_cache.UpdateCounters();
+    // query_cache.UpdateCounters();
 #endif
 
     GraphicsPipeline* const pipeline{pipeline_cache.CurrentGraphicsPipeline()};
@@ -207,13 +212,12 @@ void RasterizerVulkan::PrepareDraw(bool is_indexed, Func&& draw_func) {
     pipeline->SetEngine(maxwell3d, gpu_memory);
     pipeline->Configure(is_indexed);
 
-    BeginTransformFeedback();
-
     UpdateDynamicStates();
 
+    HandleTransformFeedback();
+    query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
+                              maxwell3d->regs.zpass_pixel_count_enable);
     draw_func();
-
-    EndTransformFeedback();
 }
 
 void RasterizerVulkan::Draw(bool is_indexed, u32 instance_count) {
@@ -241,6 +245,14 @@ void RasterizerVulkan::DrawIndirect() {
         const auto indirect_buffer = buffer_cache.GetDrawIndirectBuffer();
         const auto& buffer = indirect_buffer.first;
         const auto& offset = indirect_buffer.second;
+        if (params.is_byte_count) {
+            scheduler.Record([buffer_obj = buffer->Handle(), offset,
+                              stride = params.stride](vk::CommandBuffer cmdbuf) {
+                cmdbuf.DrawIndirectByteCountEXT(1, 0, buffer_obj, offset, 0,
+                                                static_cast<u32>(stride));
+            });
+            return;
+        }
         if (params.include_count) {
             const auto count = buffer_cache.GetDrawIndirectCount();
             const auto& draw_buffer = count.first;
@@ -280,13 +292,15 @@ void RasterizerVulkan::DrawTexture() {
     SCOPE_EXIT({ gpu.TickWork(); });
     FlushWork();
 
+    query_cache.NotifySegment(true);
+
 #if ANDROID
     if (Settings::IsGPULevelHigh()) {
         // This is problematic on Android, disable on GPU Normal.
-        query_cache.UpdateCounters();
+        // query_cache.UpdateCounters();
     }
 #else
-    query_cache.UpdateCounters();
+    // query_cache.UpdateCounters();
 #endif
 
     texture_cache.SynchronizeGraphicsDescriptors();
@@ -294,6 +308,8 @@ void RasterizerVulkan::DrawTexture() {
 
     UpdateDynamicStates();
 
+    query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
+                              maxwell3d->regs.zpass_pixel_count_enable);
     const auto& draw_texture_state = maxwell3d->draw_manager->GetDrawTextureState();
     const auto& sampler = texture_cache.GetGraphicsSampler(draw_texture_state.src_sampler);
     const auto& texture = texture_cache.GetImageView(draw_texture_state.src_texture);
@@ -319,12 +335,16 @@ void RasterizerVulkan::Clear(u32 layer_count) {
 #if ANDROID
     if (Settings::IsGPULevelHigh()) {
         // This is problematic on Android, disable on GPU Normal.
-        query_cache.UpdateCounters();
+        // query_cache.UpdateCounters();
     }
 #else
-    query_cache.UpdateCounters();
+    // query_cache.UpdateCounters();
 #endif
 
+    query_cache.NotifySegment(true);
+    query_cache.CounterEnable(VideoCommon::QueryType::ZPassPixelCount64,
+                              maxwell3d->regs.zpass_pixel_count_enable);
+
     auto& regs = maxwell3d->regs;
     const bool use_color = regs.clear_surface.R || regs.clear_surface.G || regs.clear_surface.B ||
                            regs.clear_surface.A;
@@ -482,13 +502,13 @@ void RasterizerVulkan::DispatchCompute() {
     scheduler.Record([dim](vk::CommandBuffer cmdbuf) { cmdbuf.Dispatch(dim[0], dim[1], dim[2]); });
 }
 
-void RasterizerVulkan::ResetCounter(VideoCore::QueryType type) {
-    query_cache.ResetCounter(type);
+void RasterizerVulkan::ResetCounter(VideoCommon::QueryType type) {
+    query_cache.CounterReset(type);
 }
 
-void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCore::QueryType type,
-                             std::optional<u64> timestamp) {
-    query_cache.Query(gpu_addr, type, timestamp);
+void RasterizerVulkan::Query(GPUVAddr gpu_addr, VideoCommon::QueryType type,
+                             VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) {
+    query_cache.CounterReport(gpu_addr, type, flags, payload, subreport);
 }
 
 void RasterizerVulkan::BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr,
@@ -669,8 +689,8 @@ void RasterizerVulkan::SignalReference() {
     fence_manager.SignalReference();
 }
 
-void RasterizerVulkan::ReleaseFences() {
-    fence_manager.WaitPendingFences();
+void RasterizerVulkan::ReleaseFences(bool force) {
+    fence_manager.WaitPendingFences(force);
 }
 
 void RasterizerVulkan::FlushAndInvalidateRegion(VAddr addr, u64 size,
@@ -694,6 +714,8 @@ void RasterizerVulkan::WaitForIdle() {
         flags |= VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT;
     }
 
+    query_cache.NotifyWFI();
+
     scheduler.RequestOutsideRenderPassOperationContext();
     scheduler.Record([event = *wfi_event, flags](vk::CommandBuffer cmdbuf) {
         cmdbuf.SetEvent(event, flags);
@@ -737,19 +759,7 @@ void RasterizerVulkan::TickFrame() {
 
 bool RasterizerVulkan::AccelerateConditionalRendering() {
     gpu_memory->FlushCaching();
-    if (Settings::IsGPULevelHigh()) {
-        // TODO(Blinkhawk): Reimplement Host conditional rendering.
-        return false;
-    }
-    // Medium / Low Hack: stub any checks on queries written into the buffer cache.
-    const GPUVAddr condition_address{maxwell3d->regs.render_enable.Address()};
-    Maxwell::ReportSemaphore::Compare cmp;
-    if (gpu_memory->IsMemoryDirty(condition_address, sizeof(cmp),
-                                  VideoCommon::CacheType::BufferCache |
-                                      VideoCommon::CacheType::QueryCache)) {
-        return true;
-    }
-    return false;
+    return query_cache.AccelerateHostConditionalRendering();
 }
 
 bool RasterizerVulkan::AccelerateSurfaceCopy(const Tegra::Engines::Fermi2D::Surface& src,
@@ -795,6 +805,7 @@ bool RasterizerVulkan::AccelerateDisplay(const Tegra::FramebufferConfig& config,
     if (!image_view) {
         return false;
     }
+    query_cache.NotifySegment(false);
     screen_info.image = image_view->ImageHandle();
     screen_info.image_view = image_view->Handle(Shader::TextureType::Color2D);
     screen_info.width = image_view->size.width;
@@ -933,31 +944,18 @@ void RasterizerVulkan::UpdateDynamicStates() {
     }
 }
 
-void RasterizerVulkan::BeginTransformFeedback() {
+void RasterizerVulkan::HandleTransformFeedback() {
     const auto& regs = maxwell3d->regs;
-    if (regs.transform_feedback_enabled == 0) {
-        return;
-    }
     if (!device.IsExtTransformFeedbackSupported()) {
         LOG_ERROR(Render_Vulkan, "Transform feedbacks used but not supported");
         return;
     }
-    UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
-                     regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
-    scheduler.Record(
-        [](vk::CommandBuffer cmdbuf) { cmdbuf.BeginTransformFeedbackEXT(0, 0, nullptr, nullptr); });
-}
-
-void RasterizerVulkan::EndTransformFeedback() {
-    const auto& regs = maxwell3d->regs;
-    if (regs.transform_feedback_enabled == 0) {
-        return;
+    query_cache.CounterEnable(VideoCommon::QueryType::StreamingByteCount,
+                              regs.transform_feedback_enabled);
+    if (regs.transform_feedback_enabled != 0) {
+        UNIMPLEMENTED_IF(regs.IsShaderConfigEnabled(Maxwell::ShaderType::TessellationInit) ||
+                         regs.IsShaderConfigEnabled(Maxwell::ShaderType::Tessellation));
     }
-    if (!device.IsExtTransformFeedbackSupported()) {
-        return;
-    }
-    scheduler.Record(
-        [](vk::CommandBuffer cmdbuf) { cmdbuf.EndTransformFeedbackEXT(0, 0, nullptr, nullptr); });
 }
 
 void RasterizerVulkan::UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs) {
diff --git a/src/video_core/renderer_vulkan/vk_rasterizer.h b/src/video_core/renderer_vulkan/vk_rasterizer.h
index b319824855..ffd44c68d6 100644
--- a/src/video_core/renderer_vulkan/vk_rasterizer.h
+++ b/src/video_core/renderer_vulkan/vk_rasterizer.h
@@ -84,8 +84,8 @@ public:
     void DrawTexture() override;
     void Clear(u32 layer_count) override;
     void DispatchCompute() override;
-    void ResetCounter(VideoCore::QueryType type) override;
-    void Query(GPUVAddr gpu_addr, VideoCore::QueryType type, std::optional<u64> timestamp) override;
+    void ResetCounter(VideoCommon::QueryType type) override;
+    void Query(GPUVAddr gpu_addr, VideoCommon::QueryType type, VideoCommon::QueryPropertiesFlags flags, u32 payload, u32 subreport) override;
     void BindGraphicsUniformBuffer(size_t stage, u32 index, GPUVAddr gpu_addr, u32 size) override;
     void DisableGraphicsUniformBuffer(size_t stage, u32 index) override;
     void FlushAll() override;
@@ -106,7 +106,7 @@ public:
     void SyncOperation(std::function<void()>&& func) override;
     void SignalSyncPoint(u32 value) override;
     void SignalReference() override;
-    void ReleaseFences() override;
+    void ReleaseFences(bool force = true) override;
     void FlushAndInvalidateRegion(
         VAddr addr, u64 size, VideoCommon::CacheType which = VideoCommon::CacheType::All) override;
     void WaitForIdle() override;
@@ -146,9 +146,7 @@ private:
 
     void UpdateDynamicStates();
 
-    void BeginTransformFeedback();
-
-    void EndTransformFeedback();
+    void HandleTransformFeedback();
 
     void UpdateViewportsState(Tegra::Engines::Maxwell3D::Regs& regs);
     void UpdateScissorsState(Tegra::Engines::Maxwell3D::Regs& regs);
@@ -195,8 +193,9 @@ private:
     TextureCache texture_cache;
     BufferCacheRuntime buffer_cache_runtime;
     BufferCache buffer_cache;
-    PipelineCache pipeline_cache;
+    QueryCacheRuntime query_cache_runtime;
     QueryCache query_cache;
+    PipelineCache pipeline_cache;
     AccelerateDMA accelerate_dma;
     FenceManager fence_manager;
 
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.cpp b/src/video_core/renderer_vulkan/vk_scheduler.cpp
index 89fd31b4f5..3be7837f40 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.cpp
+++ b/src/video_core/renderer_vulkan/vk_scheduler.cpp
@@ -243,10 +243,10 @@ void Scheduler::AllocateNewContext() {
 #if ANDROID
         if (Settings::IsGPULevelHigh()) {
             // This is problematic on Android, disable on GPU Normal.
-            query_cache->UpdateCounters();
+            query_cache->NotifySegment(true);
         }
 #else
-        query_cache->UpdateCounters();
+        query_cache->NotifySegment(true);
 #endif
     }
 }
@@ -261,11 +261,12 @@ void Scheduler::EndPendingOperations() {
 #if ANDROID
     if (Settings::IsGPULevelHigh()) {
         // This is problematic on Android, disable on GPU Normal.
-        query_cache->DisableStreams();
+        // query_cache->DisableStreams();
     }
 #else
-    query_cache->DisableStreams();
+    // query_cache->DisableStreams();
 #endif
+    query_cache->NotifySegment(false);
     EndRenderPass();
 }
 
diff --git a/src/video_core/renderer_vulkan/vk_scheduler.h b/src/video_core/renderer_vulkan/vk_scheduler.h
index 475c682eb2..c87e5fb071 100644
--- a/src/video_core/renderer_vulkan/vk_scheduler.h
+++ b/src/video_core/renderer_vulkan/vk_scheduler.h
@@ -15,6 +15,7 @@
 #include "common/common_types.h"
 #include "common/polyfill_thread.h"
 #include "video_core/renderer_vulkan/vk_master_semaphore.h"
+#include "video_core/renderer_vulkan/vk_query_cache.h"
 #include "video_core/vulkan_common/vulkan_wrapper.h"
 
 namespace Vulkan {
@@ -24,7 +25,6 @@ class Device;
 class Framebuffer;
 class GraphicsPipeline;
 class StateTracker;
-class QueryCache;
 
 /// The scheduler abstracts command buffer and fence management with an interface that's able to do
 /// OpenGL-like operations on Vulkan command buffers.
diff --git a/src/video_core/vulkan_common/vulkan_device.h b/src/video_core/vulkan_common/vulkan_device.h
index 6c7fa34e5a..16f0425beb 100644
--- a/src/video_core/vulkan_common/vulkan_device.h
+++ b/src/video_core/vulkan_common/vulkan_device.h
@@ -61,6 +61,7 @@ VK_DEFINE_HANDLE(VmaAllocator)
 
 // Define miscellaneous extensions which may be used by the implementation here.
 #define FOR_EACH_VK_EXTENSION(EXTENSION)                                                           \
+    EXTENSION(EXT, CONDITIONAL_RENDERING, conditional_rendering)                                   \
     EXTENSION(EXT, CONSERVATIVE_RASTERIZATION, conservative_rasterization)                         \
     EXTENSION(EXT, DEPTH_RANGE_UNRESTRICTED, depth_range_unrestricted)                             \
     EXTENSION(EXT, MEMORY_BUDGET, memory_budget)                                                   \
@@ -93,6 +94,7 @@ VK_DEFINE_HANDLE(VmaAllocator)
 
 // Define extensions where the absence of the extension may result in a degraded experience.
 #define FOR_EACH_VK_RECOMMENDED_EXTENSION(EXTENSION_NAME)                                          \
+    EXTENSION_NAME(VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME)                                    \
     EXTENSION_NAME(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME)                               \
     EXTENSION_NAME(VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME)                                 \
     EXTENSION_NAME(VK_EXT_EXTENDED_DYNAMIC_STATE_EXTENSION_NAME)                                   \
@@ -536,6 +538,10 @@ public:
         return extensions.shader_atomic_int64;
     }
 
+    bool IsExtConditionalRendering() const {
+        return extensions.conditional_rendering;
+    }
+
     bool HasTimelineSemaphore() const;
 
     /// Returns the minimum supported version of SPIR-V.
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.cpp b/src/video_core/vulkan_common/vulkan_wrapper.cpp
index c3f388d895..5a08a92e18 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.cpp
+++ b/src/video_core/vulkan_common/vulkan_wrapper.cpp
@@ -75,6 +75,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
     X(vkBeginCommandBuffer);
     X(vkBindBufferMemory);
     X(vkBindImageMemory);
+    X(vkCmdBeginConditionalRenderingEXT);
     X(vkCmdBeginQuery);
     X(vkCmdBeginRenderPass);
     X(vkCmdBeginTransformFeedbackEXT);
@@ -91,6 +92,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
     X(vkCmdCopyBufferToImage);
     X(vkCmdCopyImage);
     X(vkCmdCopyImageToBuffer);
+    X(vkCmdCopyQueryPoolResults);
     X(vkCmdDispatch);
     X(vkCmdDispatchIndirect);
     X(vkCmdDraw);
@@ -99,6 +101,7 @@ void Load(VkDevice device, DeviceDispatch& dld) noexcept {
     X(vkCmdDrawIndexedIndirect);
     X(vkCmdDrawIndirectCount);
     X(vkCmdDrawIndexedIndirectCount);
+    X(vkCmdEndConditionalRenderingEXT);
     X(vkCmdEndQuery);
     X(vkCmdEndRenderPass);
     X(vkCmdEndTransformFeedbackEXT);
diff --git a/src/video_core/vulkan_common/vulkan_wrapper.h b/src/video_core/vulkan_common/vulkan_wrapper.h
index 049fa80387..27d94a7d59 100644
--- a/src/video_core/vulkan_common/vulkan_wrapper.h
+++ b/src/video_core/vulkan_common/vulkan_wrapper.h
@@ -185,6 +185,7 @@ struct DeviceDispatch : InstanceDispatch {
     PFN_vkBeginCommandBuffer vkBeginCommandBuffer{};
     PFN_vkBindBufferMemory vkBindBufferMemory{};
     PFN_vkBindImageMemory vkBindImageMemory{};
+    PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT{};
     PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT{};
     PFN_vkCmdBeginQuery vkCmdBeginQuery{};
     PFN_vkCmdBeginRenderPass vkCmdBeginRenderPass{};
@@ -202,6 +203,7 @@ struct DeviceDispatch : InstanceDispatch {
     PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImage{};
     PFN_vkCmdCopyImage vkCmdCopyImage{};
     PFN_vkCmdCopyImageToBuffer vkCmdCopyImageToBuffer{};
+    PFN_vkCmdCopyQueryPoolResults vkCmdCopyQueryPoolResults{};
     PFN_vkCmdDispatch vkCmdDispatch{};
     PFN_vkCmdDispatchIndirect vkCmdDispatchIndirect{};
     PFN_vkCmdDraw vkCmdDraw{};
@@ -210,6 +212,7 @@ struct DeviceDispatch : InstanceDispatch {
     PFN_vkCmdDrawIndexedIndirect vkCmdDrawIndexedIndirect{};
     PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount{};
     PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount{};
+    PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT{};
     PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT{};
     PFN_vkCmdEndQuery vkCmdEndQuery{};
     PFN_vkCmdEndRenderPass vkCmdEndRenderPass{};
@@ -1270,6 +1273,13 @@ public:
                                     regions.data());
     }
 
+    void CopyQueryPoolResults(VkQueryPool query_pool, u32 first_query, u32 query_count,
+                              VkBuffer dst_buffer, VkDeviceSize dst_offset, VkDeviceSize stride,
+                              VkQueryResultFlags flags) const noexcept {
+        dld->vkCmdCopyQueryPoolResults(handle, query_pool, first_query, query_count, dst_buffer,
+                                       dst_offset, stride, flags);
+    }
+
     void FillBuffer(VkBuffer dst_buffer, VkDeviceSize dst_offset, VkDeviceSize size,
                     u32 data) const noexcept {
         dld->vkCmdFillBuffer(handle, dst_buffer, dst_offset, size, data);
@@ -1448,6 +1458,15 @@ public:
                                           counter_buffers, counter_buffer_offsets);
     }
 
+    void BeginConditionalRenderingEXT(
+        const VkConditionalRenderingBeginInfoEXT& info) const noexcept {
+        dld->vkCmdBeginConditionalRenderingEXT(handle, &info);
+    }
+
+    void EndConditionalRenderingEXT() const noexcept {
+        dld->vkCmdEndConditionalRenderingEXT(handle);
+    }
+
     void BeginDebugUtilsLabelEXT(const char* label, std::span<float, 4> color) const noexcept {
         const VkDebugUtilsLabelEXT label_info{
             .sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,