summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2013-05-13 21:13:15 +1000
committerBen Skeggs <bskeggs@redhat.com>2013-06-13 10:38:43 +1000
commit108cbb229b87fda57d8d2f94771b747496fab4b4 (patch)
tree0d62236102eb2ce897393a4ab45bdba7dc10cd6a
parent9e63099e355c5a32af745faecb2083227fadd1d2 (diff)
downloadnouveau-108cbb229b87fda57d8d2f94771b747496fab4b4.tar.gz
vm/nv50-: take mutex rather than irqsave spinlock
These operations can take quite some time, and we really don't want to have to hold a spinlock for too long. Now that the lock ordering for vm and the gr/nv84 hw bug workaround has been reversed, it's possible to use a mutex here. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--nvkm/subdev/vm/nv50.c7
-rw-r--r--nvkm/subdev/vm/nvc0.c7
2 files changed, 4 insertions, 10 deletions
diff --git a/nvkm/subdev/vm/nv50.c b/nvkm/subdev/vm/nv50.c
index 966e61434..50c66122c 100644
--- a/nvkm/subdev/vm/nv50.c
+++ b/nvkm/subdev/vm/nv50.c
@@ -31,7 +31,6 @@
struct nv50_vmmgr_priv {
struct nouveau_vmmgr base;
- spinlock_t lock;
};
static void
@@ -153,10 +152,9 @@ nv50_vm_flush(struct nouveau_vm *vm)
{
struct nv50_vmmgr_priv *priv = (void *)vm->vmm;
struct nouveau_engine *engine;
- unsigned long flags;
int i, vme;
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&nv_subdev(priv)->mutex);
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if (!atomic_read(&vm->engref[i]))
continue;
@@ -182,7 +180,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
if (!nv_wait(priv, 0x100c80, 0x00000001, 0x00000000))
nv_error(priv, "vm flush timeout: engine %d\n", vme);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&nv_subdev(priv)->mutex);
}
static int
@@ -220,7 +218,6 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.map_sg = nv50_vm_map_sg;
priv->base.unmap = nv50_vm_unmap;
priv->base.flush = nv50_vm_flush;
- spin_lock_init(&priv->lock);
return 0;
}
diff --git a/nvkm/subdev/vm/nvc0.c b/nvkm/subdev/vm/nvc0.c
index 4c3b0a23b..beb09743a 100644
--- a/nvkm/subdev/vm/nvc0.c
+++ b/nvkm/subdev/vm/nvc0.c
@@ -32,7 +32,6 @@
struct nvc0_vmmgr_priv {
struct nouveau_vmmgr base;
- spinlock_t lock;
};
@@ -164,12 +163,11 @@ void
nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
{
struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
- unsigned long flags;
/* looks like maybe a "free flush slots" counter, the
* faster you write to 0x100cbc to more it decreases
*/
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&nv_subdev(priv)->mutex);
if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
nv_rd32(subdev, 0x100c80), type);
@@ -183,7 +181,7 @@ nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
nv_rd32(subdev, 0x100c80), type);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&nv_subdev(priv)->mutex);
}
static void
@@ -227,7 +225,6 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.map_sg = nvc0_vm_map_sg;
priv->base.unmap = nvc0_vm_unmap;
priv->base.flush = nvc0_vm_flush;
- spin_lock_init(&priv->lock);
return 0;
}