From 5f921ae96f1529a55966f25cd5c70fab11d38be7 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sun, 26 Mar 2006 01:37:17 -0800 Subject: [PATCH] sem2mutex: ipc, id.sem Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar Cc: Manfred Spraul Signed-off-by: Lee Schermerhorn Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/compat.c | 2 +- ipc/mqueue.c | 4 +++- ipc/msg.c | 18 ++++++++++-------- ipc/sem.c | 34 ++++++++++++++++++---------------- ipc/shm.c | 30 ++++++++++++++++-------------- ipc/util.c | 29 +++++++++++++++-------------- ipc/util.h | 4 ++-- 7 files changed, 65 insertions(+), 56 deletions(-) (limited to 'ipc') diff --git a/ipc/compat.c b/ipc/compat.c index 1fe95f6659dd..a544dfbb082a 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -30,7 +30,7 @@ #include #include -#include +#include #include #include "util.h" diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 85c52fd26bff..a3bb0c8201c7 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -25,6 +25,8 @@ #include #include #include +#include + #include #include "util.h" @@ -760,7 +762,7 @@ out_unlock: * The receiver accepts the message and returns without grabbing the queue * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers * are necessary. The same algorithm is used for sysv semaphores, see - * ipc/sem.c fore more details. + * ipc/mutex.c fore more details. * * The same algorithm is used for senders. */ diff --git a/ipc/msg.c b/ipc/msg.c index 7eec5ed32379..48a7f17a7236 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -28,6 +28,8 @@ #include #include #include +#include + #include #include #include "util.h" @@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res) * removes the message queue from message queue ID * array, and cleans up all the messages associated with this queue. * - * msg_ids.sem and the spinlock for this message queue is hold - * before freeque() is called. msg_ids.sem remains locked on exit. + * msg_ids.mutex and the spinlock for this message queue is hold + * before freeque() is called. msg_ids.mutex remains locked on exit. */ static void freeque (struct msg_queue *msq, int id) { @@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) int id, ret = -EPERM; struct msg_queue *msq; - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); if (key == IPC_PRIVATE) ret = newque(key, msgflg); else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ @@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) } msg_unlock(msq); } - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); return ret; } @@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgmnb = msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids.in_use; msginfo.msgmap = atomic_read(&msg_hdrs); @@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgtql = MSGTQL; } max_id = msg_ids.max_id; - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) return -EINVAL; } - down(&msg_ids.sem); + mutex_lock(&msg_ids.mutex); msq = msg_lock(msqid); err=-EINVAL; if (msq == NULL) @@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) } err = 0; out_up: - up(&msg_ids.sem); + mutex_unlock(&msg_ids.mutex); return err; out_unlock_up: msg_unlock(msq); diff --git a/ipc/sem.c b/ipc/sem.c index 59696a840be1..48a54f66a246 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -75,6 +75,8 @@ #include #include #include +#include + #include #include "util.h" @@ -139,7 +141,7 @@ void __init sem_init (void) * * if it's IN_WAKEUP, then it must wait until the value changes * * if it's not -EINTR, then the operation was completed by * update_queue. semtimedop can return queue.status without - * performing any operation on the semaphore array. + * performing any operation on the sem array. * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following @@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) if (nsems < 0 || nsems > sc_semmsl) return -EINVAL; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (key == IPC_PRIVATE) { err = newary(key, nsems, semflg); @@ -242,7 +244,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) sem_unlock(sma); } - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; } @@ -437,8 +439,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) return semzcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.sem down and - * the spinlock for this semaphore set hold. sem_ids.sem remains locked +/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and + * the spinlock for this semaphore set hold. sem_ids.mutex remains locked * on exit. */ static void freeary (struct sem_array *sma, int id) @@ -525,7 +527,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids.in_use; seminfo.semaem = used_sems; @@ -534,7 +536,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu seminfo.semaem = SEMAEM; } max_id = sem_ids.max_id; - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) return -EFAULT; return (max_id < 0) ? 0: max_id; @@ -885,9 +887,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) return err; case IPC_RMID: case IPC_SET: - down(&sem_ids.sem); + mutex_lock(&sem_ids.mutex); err = semctl_down(semid,semnum,cmd,version,arg); - up(&sem_ids.sem); + mutex_unlock(&sem_ids.mutex); return err; default: return -EINVAL; @@ -1299,9 +1301,9 @@ found: /* perform adjustments registered in u */ nsems = sma->sem_nsems; for (i = 0; i < nsems; i++) { - struct sem * sem = &sma->sem_base[i]; + struct sem * semaphore = &sma->sem_base[i]; if (u->semadj[i]) { - sem->semval += u->semadj[i]; + semaphore->semval += u->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: @@ -1315,11 +1317,11 @@ found: * * Manfred */ - if (sem->semval < 0) - sem->semval = 0; - if (sem->semval > SEMVMX) - sem->semval = SEMVMX; - sem->sempid = current->tgid; + if (semaphore->semval < 0) + semaphore->semval = 0; + if (semaphore->semval > SEMVMX) + semaphore->semval = SEMVMX; + semaphore->sempid = current->tgid; } } sma->sem_otime = get_seconds(); diff --git a/ipc/shm.c b/ipc/shm.c index 6f9615c09fb2..f806a2e314e0 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd) * * @shp: struct to free * - * It has to be called with shp and shm_ids.sem locked, + * It has to be called with shp and shm_ids.mutex locked, * but returns with shp unlocked and freed. */ static void shm_destroy (struct shmid_kernel *shp) @@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd) int id = file->f_dentry->d_inode->i_ino; struct shmid_kernel *shp; - down (&shm_ids.sem); + mutex_lock(&shm_ids.mutex); /* remove from the list of attaches of the shm segment */ if(!(shp = shm_lock(id))) BUG(); @@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd) shm_destroy (shp); else shm_unlock(shp); - up (&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); } static int shm_mmap(struct file * file, struct vm_area_struct * vma) @@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) struct shmid_kernel *shp; int err, id = 0; - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); if (key == IPC_PRIVATE) { err = newseg(key, shmflg, size); } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { @@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) } shm_unlock(shp); } - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); return err; } @@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) return err; memset(&shm_info,0,sizeof(shm_info)); - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shm_info.used_ids = shm_ids.in_use; shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = shm_ids.max_id; - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; @@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) * Instead we set a destroyed flag, and then blow * the name away when the usage hits zero. */ - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shp = shm_lock(shmid); err = -EINVAL; if (shp == NULL) @@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) shm_unlock(shp); } else shm_destroy (shp); - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); goto out; } @@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) err = -EFAULT; goto out; } - down(&shm_ids.sem); + mutex_lock(&shm_ids.mutex); shp = shm_lock(shmid); err=-EINVAL; if(shp==NULL) goto out_up; - if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)))) + if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, + setbuf.mode, &(shp->shm_perm)))) goto out_unlock_up; err = shm_checkid(shp,shmid); if(err) @@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) out_unlock_up: shm_unlock(shp); out_up: - up(&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); goto out; out_unlock: shm_unlock(shp); @@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) invalid: up_write(¤t->mm->mmap_sem); - down (&shm_ids.sem); + mutex_lock(&shm_ids.mutex); if(!(shp = shm_lock(shmid))) BUG(); shp->shm_nattch--; @@ -780,7 +782,7 @@ invalid: shm_destroy (shp); else shm_unlock(shp); - up (&shm_ids.sem); + mutex_unlock(&shm_ids.mutex); *raddr = (unsigned long) user_addr; err = 0; diff --git a/ipc/util.c b/ipc/util.c index 862621980b01..23151ef32590 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -68,7 +68,8 @@ __initcall(ipc_init); void __init ipc_init_ids(struct ipc_ids* ids, int size) { int i; - sema_init(&ids->sem,1); + + mutex_init(&ids->mutex); if(size > IPCMNI) size = IPCMNI; @@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, * @ids: Identifier set * @key: The key to find * - * Requires ipc_ids.sem locked. + * Requires ipc_ids.mutex locked. * Returns the identifier if found or -1 if not. */ @@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) /* * rcu_dereference() is not needed here - * since ipc_ids.sem is held + * since ipc_ids.mutex is held */ for (id = 0; id <= max_id; id++) { p = ids->entries->p[id]; @@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) } /* - * Requires ipc_ids.sem locked + * Requires ipc_ids.mutex locked */ static int grow_ary(struct ipc_ids* ids, int newsize) { @@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize) * is returned. The list is returned in a locked state on success. * On failure the list is not locked and -1 is returned. * - * Called with ipc_ids.sem held. + * Called with ipc_ids.mutex held. */ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) @@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) /* * rcu_dereference()() is not needed here since - * ipc_ids.sem is held + * ipc_ids.mutex is held */ for (id = 0; id < size; id++) { if(ids->entries->p[id] == NULL) @@ -257,7 +258,7 @@ found: * fed an invalid identifier. The entry is removed and internal * variables recomputed. The object associated with the identifier * is returned. - * ipc_ids.sem and the spinlock for this ID is hold before this function + * ipc_ids.mutex and the spinlock for this ID is hold before this function * is called, and remain locked on the exit. */ @@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) /* * do not need a rcu_dereference()() here to force ordering - * on Alpha, since the ipc_ids.sem is held. + * on Alpha, since the ipc_ids.mutex is held. */ p = ids->entries->p[lid]; ids->entries->p[lid] = NULL; @@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) /* * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() - * is called with shm_ids.sem locked. Since grow_ary() is also called with - * shm_ids.sem down(for Shared Memory), there is no need to add read + * is called with shm_ids.mutex locked. Since grow_ary() is also called with + * shm_ids.mutex down(for Shared Memory), there is no need to add read * barriers here to gurantee the writes in grow_ary() are seen in order * here (for Alpha). * - * However ipc_get() itself does not necessary require ipc_ids.sem down. So - * if in the future ipc_get() is used by other places without ipc_ids.sem + * However ipc_get() itself does not necessary require ipc_ids.mutex down. So + * if in the future ipc_get() is used by other places without ipc_ids.mutex * down, then ipc_get() needs read memery barriers as ipc_lock() does. */ struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) @@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) * Take the lock - this will be released by the corresponding * call to stop(). */ - down(&iface->ids->sem); + mutex_lock(&iface->ids->mutex); /* pos < 0 is invalid */ if (*pos < 0) @@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) ipc_unlock(ipc); /* Release the lock we took in start() */ - up(&iface->ids->sem); + mutex_unlock(&iface->ids->mutex); } static int sysvipc_proc_show(struct seq_file *s, void *it) diff --git a/ipc/util.h b/ipc/util.h index efaff3ee7de7..0181553d31d8 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -25,7 +25,7 @@ struct ipc_ids { int max_id; unsigned short seq; unsigned short seq_max; - struct semaphore sem; + struct mutex mutex; struct ipc_id_ary nullentry; struct ipc_id_ary* entries; }; @@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) #endif -/* must be called with ids->sem acquired.*/ +/* must be called with ids->mutex acquired.*/ int ipc_findkey(struct ipc_ids* ids, key_t key); int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); -- cgit v1.2.1