summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexandre Courbot <acourbot@nvidia.com>2014-10-30 18:21:12 +0900
committerAlexandre Courbot <acourbot@nvidia.com>2014-11-11 18:50:37 +0900
commitd337b253ef0562d4f02a06bfdbd44445e2938b7a (patch)
tree405afbf8eb27729a00e095c6492a06b70d9e6fe3
parent2ffbbfcd29b08515d42ca3180715e7517a002db3 (diff)
downloadnouveau-d337b253ef0562d4f02a06bfdbd44445e2938b7a.tar.gz
fb/ramgk20a: use mm_nodes
This is how VRAM objects are defined on other GPUs, and this saves us from caveats when mapping large pages. Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
-rw-r--r--nvkm/subdev/fb/ramgk20a.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/nvkm/subdev/fb/ramgk20a.c b/nvkm/subdev/fb/ramgk20a.c
index 4d77d75e4..ba87b8ce6 100644
--- a/nvkm/subdev/fb/ramgk20a.c
+++ b/nvkm/subdev/fb/ramgk20a.c
@@ -36,16 +36,18 @@ gk20a_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
struct device *dev = nv_device_base(nv_device(pfb));
struct gk20a_mem *mem = to_gk20a_mem(*pmem);
+ struct nouveau_mm_node *r;
*pmem = NULL;
if (unlikely(mem == NULL))
return;
+ r = list_first_entry(&mem->base.regions, struct nouveau_mm_node, rl_entry);
+ kfree(r);
if (likely(mem->cpuaddr))
dma_free_coherent(dev, mem->base.size << PAGE_SHIFT,
mem->cpuaddr, mem->handle);
- kfree(mem->base.pages);
kfree(mem);
}
@@ -55,9 +57,9 @@ gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
{
struct device *dev = nv_device_base(nv_device(pfb));
struct gk20a_mem *mem;
+ struct nouveau_mm_node *r;
u32 type = memtype & 0xff;
u32 npages, order;
- int i;
nv_debug(pfb, "%s: size: %llx align: %x, ncmin: %x\n", __func__, size,
align, ncmin);
@@ -83,15 +85,10 @@ gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
if (!mem)
return -ENOMEM;
+ INIT_LIST_HEAD(&mem->base.regions);
mem->base.size = npages;
mem->base.memtype = type;
- mem->base.pages = kzalloc(sizeof(dma_addr_t) * npages, GFP_KERNEL);
- if (!mem->base.pages) {
- kfree(mem);
- return -ENOMEM;
- }
-
*pmem = &mem->base;
mem->cpuaddr = dma_alloc_coherent(dev, npages << PAGE_SHIFT,
@@ -112,10 +109,15 @@ gk20a_ram_get(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
nv_debug(pfb, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
npages << PAGE_SHIFT, align, &mem->handle, mem->cpuaddr);
- for (i = 0; i < npages; i++)
- mem->base.pages[i] = mem->handle + (PAGE_SIZE * i);
- mem->base.offset = (u64)mem->base.pages[0];
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ r->type = 12;
+ r->offset = mem->handle >> 12;
+ r->length = npages;
+
+ mem->base.offset = (u64)r->offset << 12;
+
+ list_add_tail(&r->rl_entry, &mem->base.regions);
return 0;
}