summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2015-06-29 16:11:45 -0700
committerEric Anholt <eric@anholt.net>2015-06-29 19:18:59 -0700
commitba13f38c76b9cc5f16bd02cf310abe7a6ae91df4 (patch)
tree49c4360f60d23599f6842113c179825da108a594
parent3c84e9023b144abfb2626fe5fe37a6e42dc3205a (diff)
downloadlinux-ba13f38c76b9cc5f16bd02cf310abe7a6ae91df4.tar.gz
drm/vc4: Implement async atomic modesets.
In order to support the pageflip ioctl, we have to be able to defer the body of the mode set to a workqueue, so that the pageflip ioctl can return immediately. Pageflipping gets us ~3x performance on fullscreen GL rendering in X. Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h16
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c98
4 files changed, 129 insertions, 22 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 64fea2618084..5c023ba4f6ad 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -87,7 +87,6 @@ static int vc4_drm_unload(struct drm_device *dev)
static void vc4_drm_preclose(struct drm_device *dev, struct drm_file *file)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index ed3f48212e00..13b475785ca0 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -40,6 +40,11 @@ struct vc4_dev {
wait_queue_head_t job_wait_queue;
struct work_struct job_done_work;
+ /* List of struct vc4_seqno_cb for callbacks to be made from a
+ * workqueue when the given seqno is passed.
+ */
+ struct list_head seqno_cb_list;
+
/* The binner overflow memory that's currently set up in
* BPOA/BPOS registers. When overflow occurs and a new one is
* allocated, the previous one will be moved to
@@ -74,6 +79,8 @@ struct vc4_dev {
struct timer_list timer;
struct work_struct reset_work;
} hangcheck;
+
+ struct semaphore async_modeset;
};
static inline struct vc4_dev *
@@ -113,6 +120,12 @@ to_vc4_bo(struct drm_gem_object *bo)
return (struct vc4_bo *)bo;
}
+struct vc4_seqno_cb {
+ struct work_struct work;
+ uint64_t seqno;
+ void (*func)(struct vc4_seqno_cb *cb);
+};
+
struct vc4_v3d {
struct platform_device *pdev;
void __iomem *regs;
@@ -385,6 +398,9 @@ void vc4_submit_next_job(struct drm_device *dev);
int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
uint64_t timeout_ns, bool interruptible);
void vc4_job_handle_completed(struct vc4_dev *vc4);
+int vc4_queue_seqno_cb(struct drm_device *dev,
+ struct vc4_seqno_cb *cb, uint64_t seqno,
+ void (*func)(struct vc4_seqno_cb *cb));
/* vc4_hdmi.c */
void vc4_hdmi_register(void);
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index c77fbf6336a5..33779de7f2e0 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -458,6 +458,7 @@ void
vc4_job_handle_completed(struct vc4_dev *vc4)
{
unsigned long irqflags;
+ struct vc4_seqno_cb *cb, *cb_temp;
spin_lock_irqsave(&vc4->job_lock, irqflags);
while (!list_empty(&vc4->job_done_list)) {
@@ -472,6 +473,40 @@ vc4_job_handle_completed(struct vc4_dev *vc4)
}
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+ list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
+ if (cb->seqno <= vc4->finished_seqno) {
+ list_del_init(&cb->work.entry);
+ schedule_work(&cb->work);
+ }
+ }
+}
+
+static void vc4_seqno_cb_work(struct work_struct *work)
+{
+ struct vc4_seqno_cb *cb = container_of(work, struct vc4_seqno_cb, work);
+ cb->func(cb);
+}
+
+int vc4_queue_seqno_cb(struct drm_device *dev,
+ struct vc4_seqno_cb *cb, uint64_t seqno,
+ void (*func)(struct vc4_seqno_cb *cb))
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ int ret = 0;
+
+ cb->func = func;
+ INIT_WORK(&cb->work, vc4_seqno_cb_work);
+
+ mutex_lock(&dev->struct_mutex);
+ if (seqno > vc4->finished_seqno) {
+ cb->seqno = seqno;
+ list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
+ } else {
+ schedule_work(&cb->work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
/* Scheduled when any job has been completed, this walks the list of
@@ -621,6 +656,7 @@ vc4_gem_init(struct drm_device *dev)
INIT_LIST_HEAD(&vc4->job_list);
INIT_LIST_HEAD(&vc4->job_done_list);
+ INIT_LIST_HEAD(&vc4->seqno_cb_list);
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index e498696fe21f..acfd1b23ab4a 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -14,6 +14,55 @@
#include "drm_fb_cma_helper.h"
#include "vc4_drv.h"
+struct vc4_commit {
+ struct drm_device *dev;
+ struct drm_atomic_state *state;
+ struct vc4_seqno_cb cb;
+};
+
+static void
+vc4_atomic_complete_commit(struct vc4_commit *c)
+{
+ struct drm_atomic_state *state = c->state;
+ struct drm_device *dev = state->dev;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ drm_atomic_helper_commit_modeset_disables(dev, state);
+
+ drm_atomic_helper_commit_planes(dev, state);
+
+ drm_atomic_helper_commit_modeset_enables(dev, state);
+
+ drm_atomic_helper_wait_for_vblanks(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ drm_atomic_state_free(state);
+
+ up(&vc4->async_modeset);
+
+ kfree(c);
+}
+
+static void
+vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
+{
+ struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
+
+ vc4_atomic_complete_commit(c);
+}
+
+static struct vc4_commit *commit_init(struct drm_atomic_state *state)
+{
+ struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+ c->dev = state->dev;
+ c->state = state;
+
+ return c;
+}
/**
* vc4_atomic_commit - commit validated state object
@@ -32,18 +81,29 @@ static int vc4_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
int i;
uint64_t wait_seqno = 0;
+ struct vc4_commit *c;
- if (async) {
- DRM_ERROR("async\n");
- return -EBUSY;
+ c = commit_init(state);
+ if (!c)
+ return -ENOMEM;
+
+ /* Make sure that any outstanding modesets have finished. */
+ ret = down_interruptible(&vc4->async_modeset);
+ if (ret) {
+ kfree(c);
+ return ret;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
+ if (ret) {
+ kfree(c);
+ up(&vc4->async_modeset);
return ret;
+ }
for (i = 0; i < dev->mode_config.num_total_plane; i++) {
struct drm_plane *plane = state->planes[i];
@@ -53,10 +113,9 @@ static int vc4_atomic_commit(struct drm_device *dev,
continue;
if ((plane->state->fb != new_state->fb) && new_state->fb) {
- struct drm_gem_cma_object *cma_bo;
- struct vc4_bo *bo;
- cma_bo = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
- bo = to_vc4_bo(&cma_bo->base);
+ struct drm_gem_cma_object *cma_bo =
+ drm_fb_cma_get_gem_obj(new_state->fb, 0);
+ struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
wait_seqno = max(bo->seqno, wait_seqno);
}
}
@@ -85,19 +144,13 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
- vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
-
- drm_atomic_helper_commit_modeset_disables(dev, state);
-
- drm_atomic_helper_commit_planes(dev, state);
-
- drm_atomic_helper_commit_modeset_enables(dev, state);
-
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
- drm_atomic_helper_cleanup_planes(dev, state);
-
- drm_atomic_state_free(state);
+ if (async) {
+ vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
+ vc4_atomic_complete_commit_seqno_cb);
+ } else {
+ vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
+ vc4_atomic_complete_commit(c);
+ }
return 0;
}
@@ -139,8 +192,11 @@ fail:
int
vc4_kms_load(struct drm_device *dev)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
+ sema_init(&vc4->async_modeset, 1);
+
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");