summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2014-08-11 11:45:10 -0700
committerEric Anholt <eric@anholt.net>2015-06-04 14:15:18 -0700
commitb02eb5ff854b847710f7df4527f90c2d12beb094 (patch)
tree7f4e90ead7718533d6a6030a4e845eb8f0840954
parent318fbbe04d301ee577289c02ca8632a3f3674ca1 (diff)
downloadlinux-b02eb5ff854b847710f7df4527f90c2d12beb094.tar.gz
drm/vc4: Add support for binner overflow memory allocation.
We should probably hang on to some of this memory across renders, since most renders will want some binner overflow. Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h11
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c47
2 files changed, 52 insertions, 6 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index e741627211a1..82482a3fb5f6 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -92,6 +92,11 @@ enum vc4_bo_mode {
VC4_MODE_SHADER,
};
+struct vc4_bo_list_entry {
+ struct list_head head;
+ struct drm_gem_cma_object *bo;
+};
+
struct vc4_bo_exec_state {
struct drm_gem_cma_object *bo;
enum vc4_bo_mode mode;
@@ -117,6 +122,12 @@ struct exec_info {
*/
struct drm_gem_cma_object *exec_bo;
+ /* List of struct vc4_list_bo_entry allocated to accomodate
+ * binner overflow. These will be freed when the exec is
+ * done.
+ */
+ struct list_head overflow_list;
+
/**
* This tracks the per-shader-record state (packet 64) that
* determines the length of the shader record and the offset
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 3918d5bc8723..c449420d06c2 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -70,10 +70,35 @@ thread_stopped(struct drm_device *dev, uint32_t thread)
}
static int
-wait_for_bin_thread(struct drm_device *dev)
+try_adding_overflow_memory(struct drm_device *dev, struct exec_info *exec)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
+ struct vc4_bo_list_entry *entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return -ENOMEM;
+
+ entry->bo = drm_gem_cma_create(dev, 256 * 1024);
+ if (IS_ERR(entry->bo)) {
+ int ret = PTR_ERR(entry->bo);
+ DRM_ERROR("Couldn't allocate binner overflow mem\n");
+ kfree(entry);
+ return ret;
+ }
+
+ list_add_tail(&entry->head, &exec->overflow_list);
+
+ V3D_WRITE(V3D_BPOA, entry->bo->paddr);
+ V3D_WRITE(V3D_BPOS, entry->bo->base.size);
+
+ return 0;
+}
+
+static int
+wait_for_bin_thread(struct drm_device *dev, struct exec_info *exec)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int i, ret;
for (i = 0; i < 1000000; i++) {
if (thread_stopped(dev, 0)) {
@@ -86,9 +111,9 @@ wait_for_bin_thread(struct drm_device *dev)
}
if (V3D_READ(V3D_PCS) & V3D_BMOOM) {
- /* XXX */
- DRM_ERROR("binner oom\n");
- return -EINVAL;
+ ret = try_adding_overflow_memory(dev, exec);
+ if (ret)
+ return ret;
}
}
@@ -148,7 +173,7 @@ vc4_submit(struct drm_device *dev, struct exec_info *exec)
submit_cl(dev, 0, ct0ca, ct0ea);
- ret = wait_for_bin_thread(dev);
+ ret = wait_for_bin_thread(dev, exec);
if (ret)
return ret;
@@ -366,6 +391,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
memset(&exec, 0, sizeof(exec));
exec.args = data;
+ INIT_LIST_HEAD(&exec.overflow_list);
mutex_lock(&dev->struct_mutex);
@@ -390,6 +416,15 @@ fail:
kfree(exec.bo);
}
+ while (!list_empty(&exec.overflow_list)) {
+ struct vc4_bo_list_entry *entry =
+ list_first_entry(&exec.overflow_list,
+ struct vc4_bo_list_entry, head);
+ drm_gem_object_unreference(&entry->bo->base);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+
drm_gem_object_unreference(&exec.exec_bo->base);
mutex_unlock(&dev->struct_mutex);