summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2007-02-02 19:49:11 +0100
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2007-02-02 19:49:11 +0100
commit63f2abd721c40f1cddae555c79b4ab4c55aae006 (patch)
treebb898c1ae63c044fc506463b7bb078466829a732
parentc269d560e4d71448cfc9c2ea51eee3d5feafaad4 (diff)
downloaddrm-63f2abd721c40f1cddae555c79b4ab4c55aae006.tar.gz
Make also later kernels work with buffer object vm
and clean up some function names.
-rw-r--r--linux-core/drmP.h7
-rw-r--r--linux-core/drm_agpsupport.c2
-rw-r--r--linux-core/drm_bo.c106
-rw-r--r--linux-core/drm_compat.c138
-rw-r--r--linux-core/drm_compat.h18
-rw-r--r--linux-core/drm_ttm.c51
-rw-r--r--linux-core/drm_ttm.h13
-rw-r--r--linux-core/drm_vm.c16
8 files changed, 166 insertions, 185 deletions
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 84a06470..dd07a603 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -1044,6 +1044,13 @@ typedef struct drm_buffer_object{
unsigned long num_pages;
uint32_t vm_flags;
void *iomap;
+
+
+#ifdef DRM_ODD_MM_COMPAT
+ /* dev->struct_mutex only protected. */
+ struct list_head vma_list;
+ struct list_head p_mm_list;
+#endif
} drm_buffer_object_t;
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 7a692af1..177180f9 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -620,7 +620,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
drm_agp_ttm_priv *agp_priv = (drm_agp_ttm_priv *) backend->private;
-
+
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_priv->mem->is_bound)
return drm_agp_unbind_memory(agp_priv->mem);
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 8fe5e8ef..9a27a4b5 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -107,23 +107,31 @@ static int drm_move_tt_to_local(drm_buffer_object_t * bo, int evict,
int force_no_move)
{
drm_device_t *dev = bo->dev;
- int ret;
+ int ret = 0;
if (bo->mm_node) {
- drm_bo_unmap_virtual(bo);
+#ifdef DRM_ODD_MM_COMPAT
mutex_lock(&dev->struct_mutex);
- if (evict)
- ret = drm_evict_ttm(bo->ttm);
- else
- ret = drm_unbind_ttm(bo->ttm);
-
+ ret = drm_bo_lock_kmm(bo);
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (ret == -EAGAIN)
schedule();
return ret;
}
+ drm_bo_unmap_virtual(bo);
+ drm_bo_finish_unmap(bo);
+ drm_bo_unlock_kmm(bo);
+#else
+ drm_bo_unmap_virtual(bo);
+ mutex_lock(&dev->struct_mutex);
+#endif
+ if (evict)
+ drm_ttm_evict(bo->ttm);
+ else
+ drm_ttm_unbind(bo->ttm);
+ bo->mem_type = DRM_BO_MEM_LOCAL;
if (!(bo->flags & DRM_BO_FLAG_NO_MOVE) || force_no_move) {
drm_mm_put_block(bo->mm_node);
bo->mm_node = NULL;
@@ -262,23 +270,13 @@ static void drm_bo_destroy_locked(drm_buffer_object_t *bo)
if (list_empty(&bo->lru) && bo->mm_node == NULL && atomic_read(&bo->usage) == 0) {
BUG_ON(bo->fence != NULL);
- if (bo->ttm) {
- unsigned long _end = jiffies + DRM_HZ;
- int ret;
-
- do {
- ret = drm_unbind_ttm(bo->ttm);
- if (ret == -EAGAIN) {
- mutex_unlock(&dev->struct_mutex);
- schedule();
- mutex_lock(&dev->struct_mutex);
- }
- } while (ret == -EAGAIN && !time_after_eq(jiffies, _end));
+#ifdef DRM_ODD_MM_COMPAT
+ BUG_ON(!list_empty(&bo->vma_list));
+ BUG_ON(!list_empty(&bo->p_mm_list));
+#endif
- if (ret) {
- DRM_ERROR("Couldn't unbind TTM region while destroying a buffer. "
- "Bad. Continuing anyway\n");
- }
+ if (bo->ttm) {
+ drm_ttm_unbind(bo->ttm);
drm_destroy_ttm(bo->ttm);
bo->ttm = NULL;
}
@@ -597,8 +595,7 @@ int drm_bo_alloc_space(drm_buffer_object_t * bo, unsigned mem_type,
static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
{
drm_device_t *dev = bo->dev;
- drm_ttm_backend_t *be;
- int ret;
+ int ret = 0;
if (!(bo->mm_node && (bo->flags & DRM_BO_FLAG_NO_MOVE))) {
BUG_ON(bo->mm_node);
@@ -608,26 +605,41 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
}
DRM_DEBUG("Flipping in to AGP 0x%08lx\n", bo->mm_node->start);
-
- drm_bo_unmap_virtual(bo);
+
+#ifdef DRM_ODD_MM_COMPAT
mutex_lock(&dev->struct_mutex);
+ ret = drm_bo_lock_kmm(bo);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ goto out_put_unlock;
+ }
+#endif
+ drm_bo_unmap_virtual(bo);
ret = drm_bind_ttm(bo->ttm, bo->flags & DRM_BO_FLAG_BIND_CACHED,
bo->mm_node->start);
+
if (ret) {
- drm_mm_put_block(bo->mm_node);
- bo->mm_node = NULL;
+#ifdef DRM_ODD_MM_COMPAT
+ drm_bo_unlock_kmm(bo);
+ mutex_unlock(&dev->struct_mutex);
+#endif
+ goto out_put_unlock;
}
- mutex_unlock(&dev->struct_mutex);
+
+ if (!(bo->flags & DRM_BO_FLAG_BIND_CACHED))
+ bo->flags &= DRM_BO_FLAG_CACHED;
+ bo->flags &= ~DRM_BO_MASK_MEM;
+ bo->flags |= DRM_BO_FLAG_MEM_TT;
+ bo->mem_type = DRM_BO_MEM_TT;
+#ifdef DRM_ODD_MM_COMPAT
+ ret = drm_bo_remap_bound(bo);
if (ret) {
- return ret;
+ /* FIXME */
}
-
- be = bo->ttm->be;
- if (be->needs_ub_cache_adjust(be))
- bo->flags &= ~DRM_BO_FLAG_CACHED;
- bo->flags &= ~DRM_BO_MASK_MEM;
- bo->flags |= DRM_BO_FLAG_MEM_TT;
+ drm_bo_unlock_kmm(bo);
+ mutex_unlock(&dev->struct_mutex);
+#endif
if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
ret = dev->driver->bo_driver->invalidate_caches(dev, bo->flags);
@@ -637,6 +649,13 @@ static int drm_move_local_to_tt(drm_buffer_object_t * bo, int no_wait)
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_EVICTED);
return 0;
+
+out_put_unlock:
+ mutex_lock(&dev->struct_mutex);
+ drm_mm_put_block(bo->mm_node);
+ bo->mm_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
static int drm_bo_new_flags(drm_device_t * dev,
@@ -1120,7 +1139,6 @@ static int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_flags,
} else {
drm_move_tt_to_local(bo, 0, force_no_move);
}
-
return 0;
}
@@ -1213,13 +1231,12 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
list_add_tail(&bo->lru, &bm->unfenced);
mutex_unlock(&dev->struct_mutex);
} else {
-
mutex_lock(&dev->struct_mutex);
list_del_init(&bo->lru);
drm_bo_add_to_lru(bo, bm);
mutex_unlock(&dev->struct_mutex);
}
-
+
bo->flags = new_flags;
return 0;
}
@@ -1427,6 +1444,10 @@ int drm_buffer_object_create(drm_file_t * priv,
DRM_INIT_WAITQUEUE(&bo->event_queue);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
+#ifdef DRM_ODD_MM_COMPAT
+ INIT_LIST_HEAD(&bo->p_mm_list);
+ INIT_LIST_HEAD(&bo->vma_list);
+#endif
bo->dev = dev;
bo->type = type;
bo->num_pages = num_pages;
@@ -2041,7 +2062,6 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo,
drm_mem_type_manager_t *man = &bm->man[bo->mem_type];
*bus_size = 0;
-
if (bo->type != drm_bo_type_dc)
return -EINVAL;
@@ -2057,11 +2077,10 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo,
drm_ttm_fixup_caching(ttm);
- if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
+ if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
return 0;
if (ttm->be->flags & DRM_BE_FLAG_CMA)
return 0;
-
*bus_base = ttm->be->aperture_base;
} else {
*bus_base = man->io_offset;
@@ -2069,7 +2088,6 @@ int drm_bo_pci_offset(const drm_buffer_object_t *bo,
*bus_offset = bo->mm_node->start << PAGE_SHIFT;
*bus_size = bo->num_pages << PAGE_SHIFT;
-
return 0;
}
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 3639ea4f..48d598e8 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -212,78 +212,85 @@ typedef struct vma_entry {
} vma_entry_t;
-struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma,
+struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
- drm_local_map_t *map = (drm_local_map_t *) vma->vm_private_data;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
drm_ttm_t *ttm;
drm_buffer_manager_t *bm;
drm_device_t *dev;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+ int err;
- /*
- * FIXME: Check can't map aperture flag.
- */
+ mutex_lock(&bo->mutex);
if (type)
*type = VM_FAULT_MINOR;
- if (!map)
- return NOPAGE_OOM;
-
- if (address > vma->vm_end)
- return NOPAGE_SIGBUS;
+ if (address > vma->vm_end) {
+ page = NOPAGE_SIGBUS;
+ goto out_unlock;
+ }
+
+ dev = bo->dev;
+ err = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
+
+ if (err) {
+ page = NOPAGE_SIGBUS;
+ goto out_unlock;
+ }
- ttm = (drm_ttm_t *) map->offset;
- dev = ttm->dev;
- mutex_lock(&dev->struct_mutex);
- drm_fixup_ttm_caching(ttm);
- BUG_ON(ttm->page_flags & DRM_TTM_PAGE_UNCACHED);
+ if (bus_size != 0) {
+ DRM_ERROR("Invalid compat nopage.\n");
+ page = NOPAGE_SIGBUS;
+ goto out_unlock;
+ }
bm = &dev->bm;
+ ttm = bo->ttm;
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
page = ttm->pages[page_offset];
if (!page) {
- if (drm_alloc_memctl(PAGE_SIZE)) {
- page = NOPAGE_OOM;
- goto out;
- }
- page = ttm->pages[page_offset] =
- alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ page = drm_ttm_alloc_page();
if (!page) {
- drm_free_memctl(PAGE_SIZE);
page = NOPAGE_OOM;
- goto out;
+ goto out_unlock;
}
- ++bm->cur_pages;
- SetPageLocked(page);
+ ttm->pages[page_offset] = page;
+ ++bm->cur_pages;
}
get_page(page);
- out:
- mutex_unlock(&dev->struct_mutex);
+
+out_unlock:
+ mutex_unlock(&bo->mutex);
return page;
}
-int drm_ttm_map_bound(struct vm_area_struct *vma)
+int drm_bo_map_bound(struct vm_area_struct *vma)
{
- drm_local_map_t *map = (drm_local_map_t *)vma->vm_private_data;
- drm_ttm_t *ttm = (drm_ttm_t *) map->offset;
+ drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
int ret = 0;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
+
+ ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
+ BUG_ON(ret);
- if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) &&
- !(ttm->be->flags & DRM_BE_FLAG_CMA)) {
-
- unsigned long pfn = ttm->aper_offset +
- (ttm->be->aperture_base >> PAGE_SHIFT);
- pgprot_t pgprot = drm_io_prot(ttm->be->drm_map_type, vma);
-
+ if (bus_size) {
+ unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
+ pgprot_t pgprot = drm_io_prot(_DRM_AGP, vma);
+
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
vma->vm_end - vma->vm_start,
pgprot);
@@ -293,31 +300,29 @@ int drm_ttm_map_bound(struct vm_area_struct *vma)
}
-int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
- drm_local_map_t *map = (drm_local_map_t *)
- vma->vm_private_data;
struct mm_struct *mm = vma->vm_mm;
- v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_TTM);
+ v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
if (!v_entry) {
DRM_ERROR("Allocation of vma pointer entry failed\n");
return -ENOMEM;
}
v_entry->vma = vma;
- map->handle = (void *) v_entry;
- list_add_tail(&v_entry->head, &ttm->vma_list);
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_add_tail(&v_entry->head, &bo->vma_list);
+
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
if (mm == entry->mm) {
atomic_inc(&entry->refcount);
return 0;
} else if ((unsigned long)mm < (unsigned long)entry->mm) ;
}
- n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_TTM);
+ n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
if (!n_entry) {
DRM_ERROR("Allocation of process mm pointer entry failed\n");
return -ENOMEM;
@@ -331,29 +336,29 @@ int drm_ttm_add_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
return 0;
}
-void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
+void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
int found = 0;
struct mm_struct *mm = vma->vm_mm;
- list_for_each_entry_safe(v_entry, v_n, &ttm->vma_list, head) {
+ list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
if (v_entry->vma == vma) {
found = 1;
list_del(&v_entry->head);
- drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_TTM);
+ drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
break;
}
}
BUG_ON(!found);
- list_for_each_entry_safe(entry, n, &ttm->p_mm_list, head) {
+ list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
if (mm == entry->mm) {
if (atomic_add_negative(-1, &entry->refcount)) {
list_del(&entry->head);
BUG_ON(entry->locked);
- drm_ctl_free(entry, sizeof(*entry), DRM_MEM_TTM);
+ drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
}
return;
}
@@ -363,12 +368,12 @@ void drm_ttm_delete_vma(drm_ttm_t * ttm, struct vm_area_struct *vma)
-int drm_ttm_lock_mm(drm_ttm_t * ttm)
+int drm_bo_lock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
lock_ok = 0;
@@ -380,7 +385,7 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
if (lock_ok)
return 0;
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
@@ -395,47 +400,46 @@ int drm_ttm_lock_mm(drm_ttm_t * ttm)
return -EAGAIN;
}
-void drm_ttm_unlock_mm(drm_ttm_t * ttm)
+void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
{
p_mm_entry_t *entry;
- list_for_each_entry(entry, &ttm->p_mm_list, head) {
+ list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
}
}
-int drm_ttm_remap_bound(drm_ttm_t *ttm)
+int drm_bo_remap_bound(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
int ret = 0;
+ unsigned long bus_base;
+ unsigned long bus_offset;
+ unsigned long bus_size;
- if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) &&
- !(ttm->be->flags & DRM_BE_FLAG_CMA)) {
+ ret = drm_bo_pci_offset(bo, &bus_base, &bus_offset, &bus_size);
+ BUG_ON(ret);
- list_for_each_entry(v_entry, &ttm->vma_list, head) {
- ret = drm_ttm_map_bound(v_entry->vma);
+ if (bus_size) {
+ list_for_each_entry(v_entry, &bo->vma_list, head) {
+ ret = drm_bo_map_bound(v_entry->vma);
if (ret)
break;
}
}
- drm_ttm_unlock_mm(ttm);
return ret;
}
-void drm_ttm_finish_unmap(drm_ttm_t *ttm)
+void drm_bo_finish_unmap(drm_buffer_object_t *bo)
{
vma_entry_t *v_entry;
-
- if (!(ttm->page_flags & DRM_TTM_PAGE_UNCACHED))
- return;
- list_for_each_entry(v_entry, &ttm->vma_list, head) {
+ list_for_each_entry(v_entry, &bo->vma_list, head) {
v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
- drm_ttm_unlock_mm(ttm);
}
#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 9048f021..313aab85 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -238,7 +238,7 @@ extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
#ifdef DRM_ODD_MM_COMPAT
-struct drm_ttm;
+struct drm_buffer_object;
/*
@@ -246,14 +246,14 @@ struct drm_ttm;
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
-extern int drm_ttm_add_vma(struct drm_ttm * ttm,
+extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
-extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
- struct vm_area_struct *vma);
+extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
+ struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
@@ -262,12 +262,12 @@ extern void drm_ttm_delete_vma(struct drm_ttm * ttm,
* schedule() and try again.
*/
-extern int drm_ttm_lock_mm(struct drm_ttm * ttm);
+extern int drm_bo_lock_kmm(struct drm_buffer_object * bo);
/*
* Unlock all relevant mmap_sems for a ttm.
*/
-extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
+extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
/*
* If the ttm was bound to the aperture, this function shall be called
@@ -277,7 +277,7 @@ extern void drm_ttm_unlock_mm(struct drm_ttm * ttm);
* releases the mmap_sems for this ttm.
*/
-extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
+extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
* Remap all vmas of this ttm using io_remap_pfn_range. We cannot
@@ -286,14 +286,14 @@ extern void drm_ttm_finish_unmap(struct drm_ttm *ttm);
* releases the mmap_sems for this ttm.
*/
-extern int drm_ttm_remap_bound(struct drm_ttm *ttm);
+extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
/*
* Remap a vma for a bound ttm. Call with the ttm mutex held and
* the relevant mmap_sem locked.
*/
-extern int drm_ttm_map_bound(struct vm_area_struct *vma);
+extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
#endif
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 6699a0dd..9a2ce5cd 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -153,12 +153,6 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
if (!ttm)
return 0;
- DRM_ERROR("Drm destroy ttm\n");
-
-#ifdef DRM_ODD_MM_COMPAT
- BUG_ON(!list_empty(&ttm->vma_list));
- BUG_ON(!list_empty(&ttm->p_mm_list));
-#endif
be = ttm->be;
if (be) {
be->destroy(be);
@@ -241,11 +235,6 @@ drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size)
if (!ttm)
return NULL;
-#ifdef DRM_ODD_MM_COMPAT
- INIT_LIST_HEAD(&ttm->p_mm_list);
- INIT_LIST_HEAD(&ttm->vma_list);
-#endif
-
ttm->dev = dev;
atomic_set(&ttm->vma_count, 0);
@@ -278,15 +267,17 @@ drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size)
* Unbind a ttm region from the aperture.
*/
-int drm_evict_ttm(drm_ttm_t * ttm)
+void drm_ttm_evict(drm_ttm_t * ttm)
{
drm_ttm_backend_t *be = ttm->be;
+ int ret;
- if (ttm->state == ttm_bound)
- be->unbind(be);
+ if (ttm->state == ttm_bound) {
+ ret = be->unbind(be);
+ BUG_ON(ret);
+ }
ttm->state = ttm_evicted;
- return 0;
}
void drm_ttm_fixup_caching(drm_ttm_t * ttm)
@@ -301,18 +292,12 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm)
}
}
-int drm_unbind_ttm(drm_ttm_t * ttm)
+void drm_ttm_unbind(drm_ttm_t * ttm)
{
- int ret = 0;
-
if (ttm->state == ttm_bound)
- ret = drm_evict_ttm(ttm);
-
- if (ret)
- return ret;
+ drm_ttm_evict(ttm);
drm_ttm_fixup_caching(ttm);
- return 0;
}
int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
@@ -335,19 +320,9 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
if (ttm->state == ttm_unbound && !cached) {
drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
}
-#ifdef DRM_ODD_MM_COMPAT
- else if (ttm->state == ttm_evicted && !cached) {
- ret = drm_ttm_lock_mm(ttm);
- if (ret)
- return ret;
- }
-#endif
+
if ((ret = be->bind(be, aper_offset, cached))) {
ttm->state = ttm_evicted;
-#ifdef DRM_ODD_MM_COMPAT
- if (be->needs_ub_cache_adjust(be))
- drm_ttm_unlock_mm(ttm);
-#endif
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
@@ -355,13 +330,5 @@ int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
ttm->aper_offset = aper_offset;
ttm->state = ttm_bound;
-#ifdef DRM_ODD_MM_COMPAT
- if (be->needs_ub_cache_adjust(be)) {
- ret = drm_ttm_remap_bound(ttm);
- if (ret)
- return ret;
- }
-#endif
-
return 0;
}
diff --git a/linux-core/drm_ttm.h b/linux-core/drm_ttm.h
index a6dc30ae..6aa1c5ad 100644
--- a/linux-core/drm_ttm.h
+++ b/linux-core/drm_ttm.h
@@ -79,10 +79,6 @@ typedef struct drm_ttm {
ttm_unbound,
ttm_unpopulated,
} state;
-#ifdef DRM_ODD_MM_COMPAT
- struct list_head vma_list;
- struct list_head p_mm_list;
-#endif
} drm_ttm_t;
@@ -90,13 +86,8 @@ typedef struct drm_ttm {
extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
extern struct page *drm_ttm_alloc_page(void);
extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
-extern int drm_unbind_ttm(drm_ttm_t * ttm);
-
-/*
- * Evict a ttm region. Keeps Aperture caching policy.
- */
-
-extern int drm_evict_ttm(drm_ttm_t * ttm);
+extern void drm_ttm_unbind(drm_ttm_t * ttm);
+extern void drm_ttm_evict(drm_ttm_t * ttm);
extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
/*
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 93d1c0b8..a4a9b09d 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -747,12 +747,6 @@ struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
mutex_lock(&bo->mutex);
- map = bo->map_list.map;
-
- if (!map) {
- data->type = VM_FAULT_OOM;
- goto out_unlock;
- }
if (address > vma->vm_end) {
data->type = VM_FAULT_SIGBUS;
@@ -808,8 +802,8 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
-#ifdef DRM_MM_ODD_COMPAT
- drm_bo_vm_add_vma(bo, vma);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_bo_add_vma(bo, vma);
#endif
}
@@ -843,8 +837,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
drm_vm_close(vma);
if (bo) {
mutex_lock(&dev->struct_mutex);
-#ifdef DRM_MM_ODD_COMPAT
- drm_bo_vm_delete_vma(bo, vma);
+#ifdef DRM_ODD_MM_COMPAT
+ drm_bo_delete_vma(bo, vma);
#endif
drm_bo_usage_deref_locked(bo);
mutex_unlock(&dev->struct_mutex);
@@ -881,7 +875,7 @@ int drm_bo_mmap_locked(struct vm_area_struct *vma,
vma->vm_flags |= VM_RESERVED | VM_IO;
drm_bo_vm_open_locked(vma);
#ifdef DRM_ODD_MM_COMPAT
- drm_ttm_map_bound(vma);
+ drm_bo_map_bound(vma);
#endif
return 0;
}