summaryrefslogtreecommitdiff
path: root/src/virtio
diff options
context:
space:
mode:
authorJuston Li <justonli@google.com>2023-04-20 11:42:35 -0700
committerMarge Bot <emma+marge@anholt.net>2023-05-09 04:35:16 +0000
commita55d26b566f1ba67f770f689e4de9b0f70c0d47b (patch)
tree513eb250b539b7fa0b104b137d1090cad57de0c5 /src/virtio
parentc3e1140c20bdfa630381797b321dbc5d7a315d52 (diff)
downloadmesa-a55d26b566f1ba67f770f689e4de9b0f70c0d47b.tar.gz
venus: add back sparse binding support
Add back support for vkQueueBindSparse that works with fence and timeline semaphore feedback. For each vkQueueBindSparse batch, if it contains feedback then move the signal operations to a subsequent vkQueueSubmit with feedback cmds. This requires queue families that support vkQueueSubmit alongside sparse binding support so any queue familes that exclusively support sparse binding will be filtered out. Signed-off-by: Juston Li <justonli@google.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22616>
Diffstat (limited to 'src/virtio')
-rw-r--r--src/virtio/vulkan/vn_device.c3
-rw-r--r--src/virtio/vulkan/vn_queue.c230
-rw-r--r--src/virtio/vulkan/vn_queue.h6
3 files changed, 233 insertions, 6 deletions
diff --git a/src/virtio/vulkan/vn_device.c b/src/virtio/vulkan/vn_device.c
index a1b188b0ef2..2080df78c90 100644
--- a/src/virtio/vulkan/vn_device.c
+++ b/src/virtio/vulkan/vn_device.c
@@ -32,6 +32,9 @@ vn_queue_fini(struct vn_queue *queue)
if (queue->sync_fence != VK_NULL_HANDLE) {
vn_DestroyFence(dev_handle, queue->sync_fence, NULL);
}
+ if (queue->sparse_semaphore != VK_NULL_HANDLE) {
+ vn_DestroySemaphore(dev_handle, queue->sparse_semaphore, NULL);
+ }
vn_object_base_fini(&queue->base);
}
diff --git a/src/virtio/vulkan/vn_queue.c b/src/virtio/vulkan/vn_queue.c
index 57ba3cbca5c..f1aa8d1de6c 100644
--- a/src/virtio/vulkan/vn_queue.c
+++ b/src/virtio/vulkan/vn_queue.c
@@ -279,7 +279,8 @@ vn_queue_submission_prepare(struct vn_queue_submission *submit)
assert(!has_external_fence || !submit->has_feedback_fence);
submit->wsi_mem = NULL;
- if (submit->batch_count == 1) {
+ if (submit->batch_count == 1 &&
+ submit->batch_type != VK_STRUCTURE_TYPE_BIND_SPARSE_INFO) {
const struct wsi_memory_signal_submit_info *info = vk_find_struct_const(
submit->submit_batches[0].pNext, WSI_MEMORY_SIGNAL_SUBMIT_INFO_MESA);
if (info) {
@@ -966,13 +967,230 @@ vn_QueueSubmit2(VkQueue queue,
return vn_queue_submit(&submit);
}
+static VkResult
+vn_queue_bind_sparse_submit(struct vn_queue_submission *submit)
+{
+ struct vn_queue *queue = vn_queue_from_handle(submit->queue_handle);
+ struct vn_device *dev = queue->device;
+ struct vn_instance *instance = dev->instance;
+ VkResult result;
+
+ if (VN_PERF(NO_ASYNC_QUEUE_SUBMIT)) {
+ result = vn_call_vkQueueBindSparse(
+ instance, submit->queue_handle, submit->batch_count,
+ submit->sparse_batches, submit->fence_handle);
+ if (result != VK_SUCCESS)
+ return vn_error(dev->instance, result);
+ } else {
+ struct vn_instance_submit_command instance_submit;
+ vn_submit_vkQueueBindSparse(instance, 0, submit->queue_handle,
+ submit->batch_count, submit->sparse_batches,
+ submit->fence_handle, &instance_submit);
+
+ if (!instance_submit.ring_seqno_valid)
+ return vn_error(dev->instance, VK_ERROR_DEVICE_LOST);
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+vn_queue_bind_sparse_submit_batch(struct vn_queue_submission *submit,
+ uint32_t batch_index)
+{
+ struct vn_queue *queue = vn_queue_from_handle(submit->queue_handle);
+ VkDevice dev_handle = vn_device_to_handle(queue->device);
+ const VkBindSparseInfo *sparse_info = &submit->sparse_batches[batch_index];
+ const VkSemaphore *signal_sem = sparse_info->pSignalSemaphores;
+ uint32_t signal_sem_count = sparse_info->signalSemaphoreCount;
+ uint32_t sem_feedback_count = 0;
+ VkResult result;
+
+ struct vn_queue_submission sparse_batch = {
+ .batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,
+ .queue_handle = submit->queue_handle,
+ .batch_count = 1,
+ .fence_handle = VK_NULL_HANDLE,
+ };
+
+ for (uint32_t i = 0; i < signal_sem_count; i++) {
+ struct vn_semaphore *sem =
+ vn_semaphore_from_handle(sparse_info->pSignalSemaphores[i]);
+ if (sem->feedback.slot)
+ sem_feedback_count++;
+ }
+
+ /* lazily create sparse semaphore */
+ if (queue->sparse_semaphore == VK_NULL_HANDLE) {
+ queue->sparse_semaphore_counter = 1;
+ const VkSemaphoreTypeCreateInfo sem_type_create_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO,
+ .pNext = NULL,
+ /* This must be timeline type to adhere to mesa's requirement
+ * not to mix binary semaphores with wait-before-signal.
+ */
+ .semaphoreType = VK_SEMAPHORE_TYPE_TIMELINE,
+ .initialValue = 1,
+ };
+ const VkSemaphoreCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ .pNext = &sem_type_create_info,
+ .flags = 0,
+ };
+
+ result = vn_CreateSemaphore(dev_handle, &create_info, NULL,
+ &queue->sparse_semaphore);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ /* Setup VkTimelineSemaphoreSubmitInfo's for our queue sparse semaphore
+ * so that the vkQueueSubmit waits on the vkQueueBindSparse signal.
+ */
+ queue->sparse_semaphore_counter++;
+ struct VkTimelineSemaphoreSubmitInfo wait_timeline_sem_info = { 0 };
+ wait_timeline_sem_info.sType =
+ VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
+ wait_timeline_sem_info.signalSemaphoreValueCount = 1;
+ wait_timeline_sem_info.pSignalSemaphoreValues =
+ &queue->sparse_semaphore_counter;
+
+ struct VkTimelineSemaphoreSubmitInfo signal_timeline_sem_info = { 0 };
+ signal_timeline_sem_info.sType =
+ VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
+ signal_timeline_sem_info.waitSemaphoreValueCount = 1;
+ signal_timeline_sem_info.pWaitSemaphoreValues =
+ &queue->sparse_semaphore_counter;
+
+ /* Split up the original wait and signal semaphores into its respective
+ * vkTimelineSemaphoreSubmitInfo
+ */
+ const struct VkTimelineSemaphoreSubmitInfo *timeline_sem_info =
+ vk_find_struct_const(sparse_info->pNext,
+ TIMELINE_SEMAPHORE_SUBMIT_INFO);
+ if (timeline_sem_info) {
+ if (timeline_sem_info->waitSemaphoreValueCount) {
+ wait_timeline_sem_info.waitSemaphoreValueCount =
+ timeline_sem_info->waitSemaphoreValueCount;
+ wait_timeline_sem_info.pWaitSemaphoreValues =
+ timeline_sem_info->pWaitSemaphoreValues;
+ }
+
+ if (timeline_sem_info->signalSemaphoreValueCount) {
+ signal_timeline_sem_info.signalSemaphoreValueCount =
+ timeline_sem_info->signalSemaphoreValueCount;
+ signal_timeline_sem_info.pSignalSemaphoreValues =
+ timeline_sem_info->pSignalSemaphoreValues;
+ }
+ }
+
+ /* Attach the original VkDeviceGroupBindSparseInfo if it exists */
+ struct VkDeviceGroupBindSparseInfo batch_device_group_info;
+ const struct VkDeviceGroupBindSparseInfo *device_group_info =
+ vk_find_struct_const(sparse_info->pNext, DEVICE_GROUP_BIND_SPARSE_INFO);
+ if (device_group_info) {
+ memcpy(&batch_device_group_info, device_group_info,
+ sizeof(*device_group_info));
+ batch_device_group_info.pNext = NULL;
+
+ wait_timeline_sem_info.pNext = &batch_device_group_info;
+ }
+
+ /* Copy the original batch VkBindSparseInfo modified to signal
+ * our sparse semaphore.
+ */
+ VkBindSparseInfo batch_sparse_info;
+ memcpy(&batch_sparse_info, sparse_info, sizeof(*sparse_info));
+
+ batch_sparse_info.pNext = &wait_timeline_sem_info;
+ batch_sparse_info.signalSemaphoreCount = 1;
+ batch_sparse_info.pSignalSemaphores = &queue->sparse_semaphore;
+
+ /* Set up the SubmitInfo to wait on our sparse semaphore before sending
+ * feedback and signaling the original semaphores/fence
+ *
+ * Even if this VkBindSparse batch does not have feedback semaphores,
+ * we still glue all the batches together to ensure the feedback
+ * fence occurs after.
+ */
+ VkPipelineStageFlags stage_masks = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ VkSubmitInfo batch_submit_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .pNext = &signal_timeline_sem_info,
+ .waitSemaphoreCount = 1,
+ .pWaitSemaphores = &queue->sparse_semaphore,
+ .pWaitDstStageMask = &stage_masks,
+ .signalSemaphoreCount = signal_sem_count,
+ .pSignalSemaphores = signal_sem,
+ };
+
+ /* Set the possible fence if on the last batch */
+ VkFence fence_handle = VK_NULL_HANDLE;
+ if (submit->has_feedback_fence &&
+ batch_index == (submit->batch_count - 1)) {
+ fence_handle = submit->fence_handle;
+ }
+
+ sparse_batch.sparse_batches = &batch_sparse_info;
+ result = vn_queue_bind_sparse_submit(&sparse_batch);
+ if (result != VK_SUCCESS)
+ return result;
+
+ result = vn_QueueSubmit(submit->queue_handle, 1, &batch_submit_info,
+ fence_handle);
+ if (result != VK_SUCCESS)
+ return result;
+
+ return VK_SUCCESS;
+}
+
VkResult
-vn_QueueBindSparse(UNUSED VkQueue queue,
- UNUSED uint32_t bindInfoCount,
- UNUSED const VkBindSparseInfo *pBindInfo,
- UNUSED VkFence fence)
+vn_QueueBindSparse(VkQueue queue,
+ uint32_t bindInfoCount,
+ const VkBindSparseInfo *pBindInfo,
+ VkFence fence)
{
- return VK_ERROR_DEVICE_LOST;
+ VN_TRACE_FUNC();
+ VkResult result;
+
+ struct vn_queue_submission submit = {
+ .batch_type = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO,
+ .queue_handle = queue,
+ .batch_count = bindInfoCount,
+ .sparse_batches = pBindInfo,
+ .fence_handle = fence,
+ };
+
+ result = vn_queue_submission_prepare(&submit);
+ if (result != VK_SUCCESS)
+ return result;
+
+ if (!submit.batch_count) {
+ /* skip no-op submit */
+ if (submit.fence_handle == VK_NULL_HANDLE)
+ return VK_SUCCESS;
+
+ /* if empty batch, just send a vkQueueSubmit with the fence */
+ result =
+ vn_QueueSubmit(submit.queue_handle, 0, NULL, submit.fence_handle);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ /* if feedback isn't used in the batch, can directly submit */
+ if (!submit.has_feedback_fence && !submit.has_feedback_semaphore) {
+ result = vn_queue_bind_sparse_submit(&submit);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ for (uint32_t i = 0; i < submit.batch_count; i++) {
+ result = vn_queue_bind_sparse_submit_batch(&submit, i);
+ if (result != VK_SUCCESS)
+ return result;
+ }
+
+ return VK_SUCCESS;
}
VkResult
diff --git a/src/virtio/vulkan/vn_queue.h b/src/virtio/vulkan/vn_queue.h
index b3bf2145875..b91ef1bcf67 100644
--- a/src/virtio/vulkan/vn_queue.h
+++ b/src/virtio/vulkan/vn_queue.h
@@ -31,6 +31,12 @@ struct vn_queue {
/* sync fence used for Android wsi */
VkFence sync_fence;
+
+ /* semaphore for gluing vkQueueSubmit feedback commands to
+ * vkQueueBindSparse
+ */
+ VkSemaphore sparse_semaphore;
+ uint64_t sparse_semaphore_counter;
};
VK_DEFINE_HANDLE_CASTS(vn_queue, base.base, VkQueue, VK_OBJECT_TYPE_QUEUE)