summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2007-02-09 00:02:02 +0100
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2007-02-09 00:02:02 +0100
commit6a49d9a8abd9f168211017c2d585d0d64e89c530 (patch)
treef977ac29910f0abee4bbbc632104d17490cdf325
parentb2bcbf874b0f26ca0c490fb0453bef64ce6d9dd7 (diff)
downloaddrm-6a49d9a8abd9f168211017c2d585d0d64e89c530.tar.gz
Fix evict_mutex locking range.
Implement unmappable buffers. (fault moves them to mappable when needed). Various bugfixes.
-rw-r--r--linux-core/drm_bo.c31
-rw-r--r--linux-core/drm_bo_move.c5
-rw-r--r--linux-core/drm_vm.c24
3 files changed, 38 insertions, 22 deletions
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 67e7d37f..3e0d05d2 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -1149,7 +1149,7 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv,
*/
static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
- int no_wait, int force_no_move)
+ int no_wait, int force_no_move, int move_unfenced)
{
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
@@ -1162,14 +1162,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
drm_bo_busy(bo);
/*
- * Make sure we're not mapped.
- */
-
- ret = drm_bo_wait_unmapped(bo, no_wait);
- if (ret)
- return ret;
-
- /*
* Wait for outstanding fences.
*/
@@ -1195,15 +1187,15 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
* Determine where to move the buffer.
*/
ret = drm_bo_mem_space(dev, &mem, no_wait);
- mutex_unlock(&bm->evict_mutex);
if (ret)
- return ret;
+ goto out_unlock;
ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
- if (ret) {
- mutex_lock(&dev->struct_mutex);
+ out_unlock:
+ if (ret || !move_unfenced) {
+ mutex_lock(&dev->struct_mutex);
if (mem.mm_node) {
drm_mm_put_block(mem.mm_node);
mem.mm_node = NULL;
@@ -1214,6 +1206,7 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&bm->evict_mutex);
return ret;
}
@@ -1293,6 +1286,10 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
return ret;
}
+ ret = drm_bo_wait_unmapped(bo, no_wait);
+ if (ret)
+ return ret;
+
if (bo->type == drm_bo_type_fake) {
ret = drm_bo_check_fake(dev, &bo->mem);
if (ret)
@@ -1315,7 +1312,7 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
if (!drm_bo_mem_compat(&bo->mem)) {
ret = drm_bo_move_buffer(bo, bo->mem.mask & DRM_BO_MASK_MEMTYPE,
- no_wait, 1);
+ no_wait, 1, move_unfenced);
if (ret) {
if (ret != -EAGAIN)
DRM_ERROR("Failed moving buffer.\n");
@@ -1728,11 +1725,9 @@ static int drm_bo_force_list_clean(drm_device_t * dev,
unsigned long _end = jiffies + 3 * DRM_HZ;
do {
ret = drm_bo_wait(entry, 0, 1, 0);
- if (ret && allow_errors) {
- if (ret == -EINTR)
- ret = -EAGAIN;
+ if (ret && allow_errors)
goto out_err;
- }
+
} while (ret && !time_after_eq(jiffies, _end));
if (entry->fence) {
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index d2c44501..53f7fea8 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -295,10 +295,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t *bo,
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
- bo->mem.mm_node = NULL;
- bo->ttm = NULL;
atomic_inc(&bo->fence->usage);
- bo->mem.flags = 0;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
@@ -355,7 +352,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t *bo,
/* This should help pipeline ordinary buffer moves.
*
* Hang old buffer memory on a new buffer object,
- * and leave it to be released when the blit
+ * and leave it to be released when the GPU
* operation has completed.
*/
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 5afa9800..4a41e761 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -746,6 +746,30 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
mutex_lock(&bo->mutex);
+ /*
+ * If buffer happens to be in a non-mappable location,
+ * move it to a mappable.
+ */
+
+ if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
+ uint32_t mask_save = bo->mem.mask;
+ uint32_t new_mask = bo->mem.mask |
+ DRM_BO_FLAG_MAPPABLE |
+ DRM_BO_FLAG_FORCE_MAPPABLE;
+
+ err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+ bo->mem.mask = mask_save;
+
+ if (!err)
+ err = drm_bo_wait(bo, 0, 0, 0);
+
+ if (err) {
+ data->type = (err == -EAGAIN) ?
+ VM_FAULT_MINOR : VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ }
+
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
goto out_unlock;