diff options
author | Eric Anholt <eric@anholt.net> | 2015-01-02 10:48:08 -0800 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2015-06-04 14:15:30 -0700 |
commit | 6fb045e5d83019fe4f7ece6b0161adb2c9e61cb8 (patch) | |
tree | 9e85b8925fb42e073f44313f2bc369971bd1c73d | |
parent | 92640f289261e674398a712e96aa022418c596d6 (diff) | |
download | linux-6fb045e5d83019fe4f7ece6b0161adb2c9e61cb8.tar.gz |
drm/vc4: Evict user mappings of shaders while they're being executed.
If the user could rewrite shader code while the GPU was executing it,
then all of vc4_validate_shaders.c's safety checks would be
invalidated, notably by using the direct-addressing TMU fetches to
read arbitrary system memory.
This hopefully closes our last remaining root hole.
Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_bo.c | 107 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_drv.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_drv.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vc4/vc4_validate_shaders.c | 2 |
5 files changed, 118 insertions, 3 deletions
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index ae3ade165888..952b7c0cc325 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -253,3 +253,110 @@ vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags) return drm_gem_prime_export(dev, obj, flags); } + + +/* vc4_gem_fault - fault handler for user mappings of objects. + * + * We don't just use the GEM helpers because we have to make sure that + * the user can't touch shader contents while they're being executed. + */ +static int +vc4_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *gem_bo = vma->vm_private_data; + struct vc4_bo *bo = to_vc4_bo(gem_bo); + struct drm_device *dev = gem_bo->dev; + pgoff_t page_offset; + unsigned long pfn; + int ret = 0; + + /* We don't use vmf->pgoff since that has the fake offset */ + page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> + PAGE_SHIFT; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + goto out; + + if (bo->validated_shader) { + ret = vc4_wait_for_seqno(dev, bo->seqno, ~0ull); + if (ret) + goto unlock; + + kfree(bo->validated_shader); + bo->validated_shader = NULL; + } + + pfn = (bo->base.paddr >> PAGE_SHIFT) + page_offset; + + ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); +unlock: + mutex_unlock(&dev->struct_mutex); +out: + switch (ret) { + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + ret = VM_FAULT_NOPAGE; + break; + case -ENOMEM: + ret = VM_FAULT_OOM; + break; + case -ENOSPC: + case -EFAULT: + ret = VM_FAULT_SIGBUS; + break; + default: + WARN_ONCE(ret, "unhandled error in vc4_gem_fault: %i\n", ret); + ret = VM_FAULT_SIGBUS; + break; + } + + return ret; +} + +const struct vm_operations_struct vc4_vm_ops = { + .fault = vc4_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +int +vc4_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + /* Since our objects all come from normal system memory, clear + * PFNMAP that was defaulted by drm_gem_mmap_obj() to indicate + * that they have a "struct page" managing them. + */ + vma->vm_flags &= ~VM_PFNMAP; + + /* Not sure why we need to do this. */ + vma->vm_flags |= VM_MIXEDMAP; + + /* We only do whole-object mappings. */ + vma->vm_pgoff = 0; + + return 0; +} + +/* Removes all user mappings of the object. + * + * This is used to ensure that the user can't modify shaders while the + * GPU is executing them. If the user tries to access these unmapped + * pages, they'll hit a pagefault and end up in vc4_gem_fault(), which + * then can wait for execution to finish. + */ +void +vc4_force_user_unmap(struct drm_gem_object *gem_obj) +{ + struct drm_device *dev = gem_obj->dev; + + drm_vma_node_unmap(&gem_obj->vma_node, dev->anon_inode->i_mapping); +} diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 7408deecc857..0d16e13014a4 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -100,7 +100,7 @@ static const struct file_operations vc4_drm_fops = { .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, - .mmap = drm_gem_cma_mmap, + .mmap = vc4_mmap, .poll = drm_poll, .read = drm_read, #ifdef CONFIG_COMPAT @@ -115,6 +115,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), }; + static struct drm_driver vc4_drm_driver = { .driver_features = (DRIVER_MODESET | DRIVER_GEM | @@ -139,7 +140,7 @@ static struct drm_driver vc4_drm_driver = { #endif .gem_free_object = vc4_free_object, - .gem_vm_ops = &drm_gem_cma_vm_ops, + .gem_vm_ops = &vc4_vm_ops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 57ace7cfb322..bce9493d5383 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -352,6 +352,7 @@ void vc4_disable_vblank(struct drm_device *dev, int crtc_id); #define wait_for(COND, MS) _wait_for(COND, MS, 1) /* vc4_bo.c */ +extern const struct vm_operations_struct vc4_vm_ops; void vc4_bo_cache_init(struct drm_device *dev); void vc4_free_object(struct drm_gem_object *gem_obj); struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size); @@ -362,6 +363,8 @@ struct drm_gem_object *vc4_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct dma_buf *vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags); +int vc4_mmap(struct file *filp, struct vm_area_struct *vma); +void vc4_force_user_unmap(struct drm_gem_object *gem_obj); /* vc4_debugfs.c */ int vc4_debugfs_init(struct drm_minor *minor); @@ -379,6 +382,8 @@ int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void vc4_submit_next_job(struct drm_device *dev); +int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, + uint64_t timeout_ns); /* vc4_hdmi.c */ void vc4_hdmi_register(void); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index e0627f446667..d0cef8627d1b 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -117,7 +117,7 @@ submit_cl(struct drm_device *dev, uint32_t thread, uint32_t start, uint32_t end) barrier(); } -static int +int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns) { struct vc4_dev *vc4 = to_vc4_dev(dev); diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 839d5521cfe5..2b4a4ec64f7a 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -382,6 +382,8 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) if (shader_bo->validated_shader) return shader_bo->validated_shader; + vc4_force_user_unmap(&shader_obj->base); + /* Our validation relies on nothing modifying the shader * contents after us, so just ban sending us busy BOs. */ |