summaryrefslogtreecommitdiff
path: root/drm/nouveau/nvkm/subdev/mmu/nv41.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2015-08-20 14:54:17 +1000
committerBen Skeggs <bskeggs@redhat.com>2015-08-28 12:37:38 +1000
commit43e31b5873fec91bf5d8ef7bea54db7b57c4908d (patch)
tree6adc05ed276a5bcf01e883769f0710dc85e1fc97 /drm/nouveau/nvkm/subdev/mmu/nv41.c
parent850e79f6d76a25226ea1f83647d02c44fad6c7c9 (diff)
downloadnouveau-43e31b5873fec91bf5d8ef7bea54db7b57c4908d.tar.gz
mmu: directly use instmem for page tables
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drm/nouveau/nvkm/subdev/mmu/nv41.c')
-rw-r--r--drm/nouveau/nvkm/subdev/mmu/nv41.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/drm/nouveau/nvkm/subdev/mmu/nv41.c b/drm/nouveau/nvkm/subdev/mmu/nv41.c
index 6fd74f1f7..0f91d7aeb 100644
--- a/drm/nouveau/nvkm/subdev/mmu/nv41.c
+++ b/drm/nouveau/nvkm/subdev/mmu/nv41.c
@@ -35,7 +35,7 @@
******************************************************************************/
static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = pte * 4;
@@ -54,7 +54,7 @@ nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
}
static void
-nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte = pte * 4;
nvkm_kmap(pgt);
@@ -68,7 +68,7 @@ nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
static void
nv41_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_mmu *mmu = (void *)vm->mmu;
+ struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
struct nvkm_device *device = mmu->base.subdev.device;
mutex_lock(&nv_subdev(mmu)->mutex);
@@ -121,10 +121,9 @@ nv41_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
- (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &mmu->vm->pgt[0].obj[0]);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
+ &mmu->vm->pgt[0].mem[0]);
mmu->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
@@ -137,14 +136,14 @@ nv41_mmu_init(struct nvkm_object *object)
{
struct nv04_mmu *mmu = (void *)object;
struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
+ struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
int ret;
ret = nvkm_mmu_init(&mmu->base);
if (ret)
return ret;
- nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
+ nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x100820, 0x00000000);
return 0;