mirror of
https://github.com/yuzu-emu/yuzu.git
synced 2024-11-15 06:40:06 +00:00
Query Cache: Fix Prefix Sums
This commit is contained in:
parent
bf0d6b8806
commit
57d8cd6c40
@ -34,11 +34,16 @@
|
||||
#endif
|
||||
|
||||
BEGIN_PUSH_CONSTANTS
|
||||
UNIFORM(0) uint max_accumulation_base;
|
||||
UNIFORM(1) uint accumulation_limit;
|
||||
UNIFORM(0) uint min_accumulation_base;
|
||||
UNIFORM(1) uint max_accumulation_base;
|
||||
UNIFORM(2) uint accumulation_limit;
|
||||
UNIFORM(3) uint buffer_offset;
|
||||
END_PUSH_CONSTANTS
|
||||
|
||||
layout(local_size_x = 32) in;
|
||||
#define LOCAL_RESULTS 8
|
||||
#define QUERIES_PER_INVOC 2048
|
||||
|
||||
layout(local_size_x = QUERIES_PER_INVOC / LOCAL_RESULTS) in;
|
||||
|
||||
layout(std430, binding = 0) readonly buffer block1 {
|
||||
uvec2 input_data[];
|
||||
@ -52,7 +57,7 @@ layout(std430, binding = 2) coherent buffer block3 {
|
||||
uvec2 accumulated_data;
|
||||
};
|
||||
|
||||
shared uvec2 shared_data[2];
|
||||
shared uvec2 shared_data[128];
|
||||
|
||||
// Simple Uint64 add that uses 2 uint variables for GPUs that don't support uint64
|
||||
uvec2 AddUint64(uvec2 value_1, uvec2 value_2) {
|
||||
@ -67,8 +72,8 @@ uvec2 AddUint64(uvec2 value_1, uvec2 value_2) {
|
||||
uvec2 subgroupInclusiveAddUint64(uvec2 value) {
|
||||
uvec2 result = value;
|
||||
for (uint i = 1; i < gl_SubgroupSize; i *= 2) {
|
||||
uvec2 other = subgroupShuffleUp(result, i); // get value from subgroup_inv_id - i;
|
||||
if (i <= gl_SubgroupInvocationID) {
|
||||
uvec2 other = subgroupShuffleUp(result, i); // get value from subgroup_inv_id - i;
|
||||
result = AddUint64(result, other);
|
||||
}
|
||||
}
|
||||
@ -76,89 +81,93 @@ uvec2 subgroupInclusiveAddUint64(uvec2 value) {
|
||||
}
|
||||
|
||||
// Writes down the results to the output buffer and to the accumulation buffer
|
||||
void WriteResults(uvec2 result) {
|
||||
uint current_global_id = gl_GlobalInvocationID.x;
|
||||
uvec2 base_data = current_global_id < max_accumulation_base ? accumulated_data : uvec2(0);
|
||||
output_data[current_global_id] = result + base_data;
|
||||
if (max_accumulation_base >= accumulation_limit + 1) {
|
||||
if (current_global_id == accumulation_limit) {
|
||||
accumulated_data = result;
|
||||
void WriteResults(uvec2 results[LOCAL_RESULTS]) {
|
||||
const uint current_id = gl_LocalInvocationID.x;
|
||||
const uvec2 accum = accumulated_data;
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
uvec2 base_data = current_id * LOCAL_RESULTS + i < min_accumulation_base ? accum : uvec2(0, 0);
|
||||
AddUint64(results[i], base_data);
|
||||
}
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
output_data[buffer_offset + current_id * LOCAL_RESULTS + i] = results[i];
|
||||
}
|
||||
uint index = accumulation_limit % LOCAL_RESULTS;
|
||||
uint base_id = accumulation_limit / LOCAL_RESULTS;
|
||||
if (min_accumulation_base >= accumulation_limit + 1) {
|
||||
if (current_id == base_id) {
|
||||
accumulated_data = results[index];
|
||||
}
|
||||
return;
|
||||
}
|
||||
// We have that ugly case in which the accumulation data is reset in the middle somewhere.
|
||||
barrier();
|
||||
groupMemoryBarrier();
|
||||
if (current_global_id == accumulation_limit) {
|
||||
uvec2 value_1 = output_data[max_accumulation_base];
|
||||
accumulated_data = AddUint64(result, -value_1);
|
||||
|
||||
if (current_id == base_id) {
|
||||
uvec2 reset_value = output_data[max_accumulation_base - 1];
|
||||
// Calculate two complement / negate manually
|
||||
reset_value = AddUint64(uvec2(1,0), ~reset_value);
|
||||
accumulated_data = AddUint64(results[index], reset_value);
|
||||
}
|
||||
}
|
||||
|
||||
void main() {
|
||||
uint subgroup_inv_id = gl_SubgroupInvocationID;
|
||||
uint subgroup_id = gl_SubgroupID;
|
||||
uint last_subgroup_id = subgroupMax(subgroup_inv_id);
|
||||
uint current_global_id = gl_GlobalInvocationID.x;
|
||||
uint total_work = gl_NumWorkGroups.x * gl_WorkGroupSize.x;
|
||||
uvec2 data = input_data[current_global_id];
|
||||
const uint subgroup_inv_id = gl_SubgroupInvocationID;
|
||||
const uint subgroup_id = gl_SubgroupID + gl_WorkGroupID.x * gl_NumSubgroups;
|
||||
const uint last_subgroup_id = subgroupMax(subgroup_inv_id);
|
||||
const uint current_id = gl_LocalInvocationID.x;
|
||||
const uint total_work = accumulation_limit;
|
||||
const uint last_result_id = LOCAL_RESULTS - 1;
|
||||
uvec2 data[LOCAL_RESULTS];
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
data[i] = input_data[buffer_offset + current_id * LOCAL_RESULTS + i];
|
||||
}
|
||||
uvec2 results[LOCAL_RESULTS];
|
||||
results[0] = data[0];
|
||||
for (uint i = 1; i < LOCAL_RESULTS; i++) {
|
||||
results[i] = AddUint64(data[i], results[i - 1]);
|
||||
}
|
||||
// make sure all input data has been loaded
|
||||
subgroupBarrier();
|
||||
subgroupMemoryBarrier();
|
||||
|
||||
uvec2 result = subgroupInclusiveAddUint64(data);
|
||||
// on the last local result, do a subgroup inclusive scan sum
|
||||
results[last_result_id] = subgroupInclusiveAddUint64(results[last_result_id]);
|
||||
// get the last local result from the subgroup behind the current
|
||||
uvec2 result_behind = subgroupShuffleUp(results[last_result_id], 1);
|
||||
if (subgroup_inv_id != 0) {
|
||||
for (uint i = 1; i < LOCAL_RESULTS; i++) {
|
||||
results[i - 1] = AddUint64(results[i - 1], result_behind);
|
||||
}
|
||||
}
|
||||
|
||||
// if we had less queries than our subgroup, just write down the results.
|
||||
if (total_work <= gl_SubgroupSize) { // This condition is constant per dispatch.
|
||||
WriteResults(result);
|
||||
if (total_work <= gl_SubgroupSize * LOCAL_RESULTS) { // This condition is constant per dispatch.
|
||||
WriteResults(results);
|
||||
return;
|
||||
}
|
||||
|
||||
// We now have more, so lets write the last result into shared memory.
|
||||
// Only pick the last subgroup.
|
||||
if (subgroup_inv_id == last_subgroup_id) {
|
||||
shared_data[subgroup_id] = result;
|
||||
shared_data[subgroup_id] = results[last_result_id];
|
||||
}
|
||||
// wait until everyone loaded their stuffs
|
||||
barrier();
|
||||
memoryBarrierShared();
|
||||
|
||||
// Case 1: the total work for the grouped results can be calculated in a single subgroup
|
||||
// operation (about 1024 queries).
|
||||
uint total_extra_work = gl_NumSubgroups * gl_NumWorkGroups.x;
|
||||
if (total_extra_work <= gl_SubgroupSize) { // This condition is constant per dispatch.
|
||||
if (subgroup_id != 0) {
|
||||
uvec2 tmp = shared_data[subgroup_inv_id];
|
||||
subgroupBarrier();
|
||||
subgroupMemoryBarrierShared();
|
||||
tmp = subgroupInclusiveAddUint64(tmp);
|
||||
result = AddUint64(result, subgroupShuffle(tmp, subgroup_id - 1));
|
||||
}
|
||||
|
||||
WriteResults(result);
|
||||
return;
|
||||
}
|
||||
|
||||
// Case 2: our work amount is huge, so lets do it in O(log n) steps.
|
||||
const uint extra = (total_extra_work ^ (total_extra_work - 1)) != 0 ? 1 : 0;
|
||||
const uint steps = 1 << (findMSB(total_extra_work) + extra);
|
||||
uint step;
|
||||
// Hillis and Steele's algorithm
|
||||
for (step = 1; step < steps; step *= 2) {
|
||||
if (current_global_id < steps && current_global_id >= step) {
|
||||
uvec2 current = shared_data[current_global_id];
|
||||
uvec2 other = shared_data[current_global_id - step];
|
||||
shared_data[current_global_id] = AddUint64(current, other);
|
||||
}
|
||||
// steps is constant, so this will always execute in ever workgroup's thread.
|
||||
barrier();
|
||||
memoryBarrierShared();
|
||||
}
|
||||
// Only add results for groups higher than 0
|
||||
// only if it's not the first subgroup
|
||||
if (subgroup_id != 0) {
|
||||
result = AddUint64(result, shared_data[subgroup_id - 1]);
|
||||
// get the results from some previous invocation
|
||||
uvec2 tmp = shared_data[subgroup_inv_id];
|
||||
subgroupBarrier();
|
||||
subgroupMemoryBarrierShared();
|
||||
tmp = subgroupInclusiveAddUint64(tmp);
|
||||
// obtain the result that would be equivalent to the previous result
|
||||
uvec2 shuffled_result = subgroupShuffle(tmp, subgroup_id - 1);
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
results[i] = AddUint64(results[i], shuffled_result);
|
||||
}
|
||||
}
|
||||
|
||||
// Just write the final results. We are done
|
||||
WriteResults(result);
|
||||
WriteResults(results);
|
||||
}
|
@ -32,25 +32,30 @@
|
||||
#endif
|
||||
|
||||
BEGIN_PUSH_CONSTANTS
|
||||
UNIFORM(0) uint max_accumulation_base;
|
||||
UNIFORM(1) uint accumulation_limit;
|
||||
UNIFORM(0) uint min_accumulation_base;
|
||||
UNIFORM(1) uint max_accumulation_base;
|
||||
UNIFORM(2) uint accumulation_limit;
|
||||
UNIFORM(3) uint buffer_offset;
|
||||
END_PUSH_CONSTANTS
|
||||
|
||||
layout(local_size_x = 32) in;
|
||||
#define LOCAL_RESULTS 4
|
||||
#define QUERIES_PER_INVOC 2048
|
||||
|
||||
layout(local_size_x = QUERIES_PER_INVOC / LOCAL_RESULTS) in;
|
||||
|
||||
layout(std430, binding = 0) readonly buffer block1 {
|
||||
uvec2 input_data[gl_WorkGroupSize.x];
|
||||
uvec2 input_data[gl_WorkGroupSize.x * LOCAL_RESULTS];
|
||||
};
|
||||
|
||||
layout(std430, binding = 1) writeonly coherent buffer block2 {
|
||||
uvec2 output_data[gl_WorkGroupSize.x];
|
||||
uvec2 output_data[gl_WorkGroupSize.x * LOCAL_RESULTS];
|
||||
};
|
||||
|
||||
layout(std430, binding = 2) coherent buffer block3 {
|
||||
uvec2 accumulated_data;
|
||||
};
|
||||
|
||||
shared uvec2 shared_data[gl_WorkGroupSize.x * 2];
|
||||
shared uvec2 shared_data[gl_WorkGroupSize.x * LOCAL_RESULTS];
|
||||
|
||||
uvec2 AddUint64(uvec2 value_1, uvec2 value_2) {
|
||||
uint carry = 0;
|
||||
@ -62,23 +67,31 @@ uvec2 AddUint64(uvec2 value_1, uvec2 value_2) {
|
||||
|
||||
void main(void) {
|
||||
uint id = gl_LocalInvocationID.x;
|
||||
uvec2 base_value_1 = (id * 2) < max_accumulation_base ? accumulated_data : uvec2(0);
|
||||
uvec2 base_value_2 = (id * 2 + 1) < max_accumulation_base ? accumulated_data : uvec2(0);
|
||||
uvec2 base_value[LOCAL_RESULTS];
|
||||
const uvec2 accum = accumulated_data;
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
base_value[i] = (buffer_offset + id * LOCAL_RESULTS + i) < min_accumulation_base
|
||||
? accumulated_data
|
||||
: uvec2(0);
|
||||
}
|
||||
uint work_size = gl_WorkGroupSize.x;
|
||||
uint rd_id;
|
||||
uint wr_id;
|
||||
uint mask;
|
||||
uvec2 input_1 = input_data[id * 2];
|
||||
uvec2 input_2 = input_data[id * 2 + 1];
|
||||
uvec2 inputs[LOCAL_RESULTS];
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
inputs[i] = input_data[buffer_offset + id * LOCAL_RESULTS + i];
|
||||
}
|
||||
// The number of steps is the log base 2 of the
|
||||
// work group size, which should be a power of 2
|
||||
const uint steps = uint(log2(work_size)) + 1;
|
||||
const uint steps = uint(log2(work_size)) + uint(log2(LOCAL_RESULTS));
|
||||
uint step = 0;
|
||||
|
||||
// Each invocation is responsible for the content of
|
||||
// two elements of the output array
|
||||
shared_data[id * 2] = input_1;
|
||||
shared_data[id * 2 + 1] = input_2;
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
shared_data[id * LOCAL_RESULTS + i] = inputs[i];
|
||||
}
|
||||
// Synchronize to make sure that everyone has initialized
|
||||
// their elements of shared_data[] with data loaded from
|
||||
// the input arrays
|
||||
@ -100,21 +113,26 @@ void main(void) {
|
||||
memoryBarrierShared();
|
||||
}
|
||||
// Add the accumulation
|
||||
shared_data[id * 2] = AddUint64(shared_data[id * 2], base_value_1);
|
||||
shared_data[id * 2 + 1] = AddUint64(shared_data[id * 2 + 1], base_value_2);
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
shared_data[id * LOCAL_RESULTS + i] =
|
||||
AddUint64(shared_data[id * LOCAL_RESULTS + i], base_value[i]);
|
||||
}
|
||||
barrier();
|
||||
memoryBarrierShared();
|
||||
|
||||
// Finally write our data back to the output buffer
|
||||
output_data[id * 2] = shared_data[id * 2];
|
||||
output_data[id * 2 + 1] = shared_data[id * 2 + 1];
|
||||
for (uint i = 0; i < LOCAL_RESULTS; i++) {
|
||||
output_data[buffer_offset + id * LOCAL_RESULTS + i] = shared_data[id * LOCAL_RESULTS + i];
|
||||
}
|
||||
if (id == 0) {
|
||||
if (max_accumulation_base >= accumulation_limit + 1) {
|
||||
if (min_accumulation_base >= accumulation_limit + 1) {
|
||||
accumulated_data = shared_data[accumulation_limit];
|
||||
return;
|
||||
}
|
||||
uvec2 value_1 = shared_data[max_accumulation_base];
|
||||
uvec2 value_2 = shared_data[accumulation_limit];
|
||||
accumulated_data = AddUint64(value_1, -value_2);
|
||||
uvec2 reset_value = shared_data[max_accumulation_base - 1];
|
||||
uvec2 final_value = shared_data[accumulation_limit];
|
||||
// Two complements
|
||||
reset_value = AddUint64(uvec2(1, 0), ~reset_value);
|
||||
accumulated_data = AddUint64(final_value, reset_value);
|
||||
}
|
||||
}
|
@ -179,8 +179,10 @@ struct AstcPushConstants {
|
||||
};
|
||||
|
||||
struct QueriesPrefixScanPushConstants {
|
||||
u32 min_accumulation_base;
|
||||
u32 max_accumulation_base;
|
||||
u32 accumulation_limit;
|
||||
u32 buffer_offset;
|
||||
};
|
||||
} // Anonymous namespace
|
||||
|
||||
@ -416,56 +418,65 @@ QueriesPrefixScanPass::QueriesPrefixScanPass(
|
||||
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_BIT) &&
|
||||
device_.IsSubgroupFeatureSupported(VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT)
|
||||
? std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_COMP_SPV)
|
||||
: std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_NOSUBGROUPS_COMP_SPV),
|
||||
{32}),
|
||||
: std::span<const u32>(QUERIES_PREFIX_SCAN_SUM_NOSUBGROUPS_COMP_SPV)),
|
||||
scheduler{scheduler_}, compute_pass_descriptor_queue{compute_pass_descriptor_queue_} {}
|
||||
|
||||
void QueriesPrefixScanPass::Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer,
|
||||
VkBuffer src_buffer, size_t number_of_sums,
|
||||
size_t max_accumulation_limit) {
|
||||
size_t aligned_runs = Common::AlignUp(number_of_sums, 32);
|
||||
size_t min_accumulation_limit, size_t max_accumulation_limit) {
|
||||
size_t current_runs = number_of_sums;
|
||||
size_t offset = 0;
|
||||
while (current_runs != 0) {
|
||||
static constexpr size_t DISPATCH_SIZE = 2048U;
|
||||
size_t runs_to_do = std::min<size_t>(current_runs, DISPATCH_SIZE);
|
||||
current_runs -= runs_to_do;
|
||||
compute_pass_descriptor_queue.Acquire();
|
||||
compute_pass_descriptor_queue.AddBuffer(src_buffer, 0, number_of_sums * sizeof(u64));
|
||||
compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, number_of_sums * sizeof(u64));
|
||||
compute_pass_descriptor_queue.AddBuffer(accumulation_buffer, 0, sizeof(u64));
|
||||
const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
|
||||
size_t used_offset = offset;
|
||||
offset += runs_to_do;
|
||||
|
||||
compute_pass_descriptor_queue.Acquire();
|
||||
compute_pass_descriptor_queue.AddBuffer(src_buffer, 0, aligned_runs * sizeof(u64));
|
||||
compute_pass_descriptor_queue.AddBuffer(dst_buffer, 0, aligned_runs * sizeof(u64));
|
||||
compute_pass_descriptor_queue.AddBuffer(accumulation_buffer, 0, sizeof(u64));
|
||||
const void* const descriptor_data{compute_pass_descriptor_queue.UpdateData()};
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([this, descriptor_data, min_accumulation_limit, max_accumulation_limit,
|
||||
runs_to_do, used_offset](vk::CommandBuffer cmdbuf) {
|
||||
static constexpr VkMemoryBarrier read_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
|
||||
};
|
||||
static constexpr VkMemoryBarrier write_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT |
|
||||
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
|
||||
VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT |
|
||||
VK_ACCESS_UNIFORM_READ_BIT |
|
||||
VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
|
||||
};
|
||||
const QueriesPrefixScanPushConstants uniforms{
|
||||
.min_accumulation_base = static_cast<u32>(min_accumulation_limit),
|
||||
.max_accumulation_base = static_cast<u32>(max_accumulation_limit),
|
||||
.accumulation_limit = static_cast<u32>(runs_to_do - 1),
|
||||
.buffer_offset = static_cast<u32>(used_offset),
|
||||
};
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([this, descriptor_data, max_accumulation_limit, number_of_sums,
|
||||
aligned_runs](vk::CommandBuffer cmdbuf) {
|
||||
static constexpr VkMemoryBarrier read_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
|
||||
};
|
||||
static constexpr VkMemoryBarrier write_barrier{
|
||||
.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER,
|
||||
.pNext = nullptr,
|
||||
.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT,
|
||||
.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_TRANSFER_READ_BIT |
|
||||
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
|
||||
VK_ACCESS_INDIRECT_COMMAND_READ_BIT | VK_ACCESS_INDEX_READ_BIT |
|
||||
VK_ACCESS_UNIFORM_READ_BIT |
|
||||
VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT,
|
||||
};
|
||||
const QueriesPrefixScanPushConstants uniforms{
|
||||
.max_accumulation_base = static_cast<u32>(max_accumulation_limit),
|
||||
.accumulation_limit = static_cast<u32>(number_of_sums - 1),
|
||||
};
|
||||
const VkDescriptorSet set = descriptor_allocator.Commit();
|
||||
device.GetLogical().UpdateDescriptorSet(set, *descriptor_template, descriptor_data);
|
||||
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
|
||||
cmdbuf.Dispatch(static_cast<u32>(aligned_runs / 32U), 1, 1);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, write_barrier);
|
||||
});
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
|
||||
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, read_barrier);
|
||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
|
||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_COMPUTE, *layout, 0, set, {});
|
||||
cmdbuf.PushConstants(*layout, VK_SHADER_STAGE_COMPUTE_BIT, uniforms);
|
||||
cmdbuf.Dispatch(1, 1, 1);
|
||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
|
||||
VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0,
|
||||
write_barrier);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
ASTCDecoderPass::ASTCDecoderPass(const Device& device_, Scheduler& scheduler_,
|
||||
|
@ -104,7 +104,7 @@ public:
|
||||
ComputePassDescriptorQueue& compute_pass_descriptor_queue_);
|
||||
|
||||
void Run(VkBuffer accumulation_buffer, VkBuffer dst_buffer, VkBuffer src_buffer,
|
||||
size_t number_of_sums, size_t max_accumulation_limit);
|
||||
size_t number_of_sums, size_t min_accumulation_limit, size_t max_accumulation_limit);
|
||||
|
||||
private:
|
||||
Scheduler& scheduler;
|
||||
|
@ -181,7 +181,8 @@ public:
|
||||
});
|
||||
rasterizer->SyncOperation(std::move(func));
|
||||
accumulation_since_last_sync = false;
|
||||
last_accumulation_checkpoint = std::min(last_accumulation_checkpoint, num_slots_used);
|
||||
first_accumulation_checkpoint = std::min(first_accumulation_checkpoint, num_slots_used);
|
||||
last_accumulation_checkpoint = std::max(last_accumulation_checkpoint, num_slots_used);
|
||||
}
|
||||
|
||||
void CloseCounter() override {
|
||||
@ -285,7 +286,9 @@ public:
|
||||
resolve_buffers.push_back(intermediary_buffer_index);
|
||||
queries_prefix_scan_pass->Run(*accumulation_buffer, *buffers[intermediary_buffer_index],
|
||||
*buffers[resolve_buffer_index], num_slots_used,
|
||||
std::min(last_accumulation_checkpoint, num_slots_used));
|
||||
std::min(first_accumulation_checkpoint, num_slots_used),
|
||||
last_accumulation_checkpoint);
|
||||
|
||||
} else {
|
||||
scheduler.RequestOutsideRenderPassOperationContext();
|
||||
scheduler.Record([buffer = *accumulation_buffer](vk::CommandBuffer cmdbuf) {
|
||||
@ -298,7 +301,8 @@ public:
|
||||
rasterizer->SyncOperation(std::move(func));
|
||||
AbandonCurrentQuery();
|
||||
num_slots_used = 0;
|
||||
last_accumulation_checkpoint = std::numeric_limits<size_t>::max();
|
||||
first_accumulation_checkpoint = std::numeric_limits<size_t>::max();
|
||||
last_accumulation_checkpoint = 0;
|
||||
accumulation_since_last_sync = has_multi_queries;
|
||||
pending_sync.clear();
|
||||
}
|
||||
@ -506,7 +510,7 @@ private:
|
||||
|
||||
template <bool is_resolve>
|
||||
size_t ObtainBuffer(size_t num_needed) {
|
||||
const size_t log_2 = std::max<size_t>(6U, Common::Log2Ceil64(num_needed));
|
||||
const size_t log_2 = std::max<size_t>(11U, Common::Log2Ceil64(num_needed));
|
||||
if constexpr (is_resolve) {
|
||||
if (resolve_table[log_2] != 0) {
|
||||
return resolve_table[log_2] - 1;
|
||||
@ -563,6 +567,7 @@ private:
|
||||
VkQueryPool current_query_pool;
|
||||
size_t current_query_id;
|
||||
size_t num_slots_used{};
|
||||
size_t first_accumulation_checkpoint{};
|
||||
size_t last_accumulation_checkpoint{};
|
||||
bool accumulation_since_last_sync{};
|
||||
VideoCommon::HostQueryBase* current_query;
|
||||
|
Loading…
Reference in New Issue
Block a user