summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEthan Jackson <ethan@nicira.com>2013-07-30 15:31:48 -0700
committerBen Pfaff <blp@nicira.com>2013-07-30 21:30:45 -0700
commit97be153858b4cd175cbe7862b8e1624bf22ab98a (patch)
treeab1e518bef4e8bbc97fe8a069d9b91541bb9c4d4
parent2b51596fdeba7fbf4caff323dd6af375e7f84596 (diff)
downloadopenvswitch-97be153858b4cd175cbe7862b8e1624bf22ab98a.tar.gz
clang: Add annotations for thread safety check.
This commit adds annotations for thread safety check. And the check can be conducted by using -Wthread-safety flag in clang. Co-authored-by: Alex Wang <alexw@nicira.com> Signed-off-by: Alex Wang <alexw@nicira.com> Signed-off-by: Ethan Jackson <ethan@nicira.com> Signed-off-by: Ben Pfaff <blp@nicira.com>
-rw-r--r--INSTALL11
-rw-r--r--configure.ac1
-rw-r--r--include/sparse/pthread.h12
-rw-r--r--lib/command-line.c28
-rw-r--r--lib/compiler.h132
-rw-r--r--lib/dpif-linux.c34
-rw-r--r--lib/dpif-netdev.c98
-rw-r--r--lib/dpif.c30
-rw-r--r--lib/fatal-signal.c29
-rw-r--r--lib/jsonrpc.c8
-rw-r--r--lib/lockfile.c32
-rw-r--r--lib/netdev-dummy.c9
-rw-r--r--lib/netlink-socket.c12
-rw-r--r--lib/ofp-msgs.c8
-rw-r--r--lib/ofp-parse.c8
-rw-r--r--lib/ovs-atomic-gcc4+.c14
-rw-r--r--lib/ovs-atomic-pthreads.c8
-rw-r--r--lib/ovs-atomic-pthreads.h2
-rw-r--r--lib/ovs-thread.c128
-rw-r--r--lib/ovs-thread.h142
-rw-r--r--lib/timeval.c22
-rw-r--r--lib/uuid.c6
-rw-r--r--lib/vlog.c82
-rw-r--r--lib/vlog.h4
-rw-r--r--vswitchd/system-stats.c28
25 files changed, 554 insertions, 334 deletions
diff --git a/INSTALL b/INSTALL
index 5589fe7dd..a0eb26689 100644
--- a/INSTALL
+++ b/INSTALL
@@ -28,6 +28,11 @@ you will need the following software:
libssl is installed, then Open vSwitch will automatically build
with support for it.
+ - clang, from LLVM, is optional. It provides useful static semantic
+ analyzer and thread-safety check. clang version must be 3.4 or
+ later. For Ubuntu, there are nightly built packages available on
+ clang's website.
+
To compile the kernel module on Linux, you must also install the
following. If you cannot build or install the kernel module, you may
use the userspace-only implementation, at a cost in performance. The
@@ -107,6 +112,8 @@ installing the following to obtain better warnings:
- GNU make.
+ - clang, version 3.4 or later
+
Also, you may find the ovs-dev script found in utilities/ovs-dev.py useful.
Installation Requirements
@@ -173,6 +180,10 @@ Prerequisites section, follow the procedure below to build.
% ./configure CC=gcc-4.2
+ To use 'clang' compiler:
+
+ % ./configure CC=clang
+
To build the Linux kernel module, so that you can run the
kernel-based switch, pass the location of the kernel build
directory on --with-linux. For example, to build for a running
diff --git a/configure.ac b/configure.ac
index 9646b6a7e..70fc9df25 100644
--- a/configure.ac
+++ b/configure.ac
@@ -104,6 +104,7 @@ OVS_ENABLE_OPTION([-Wstrict-prototypes])
OVS_ENABLE_OPTION([-Wold-style-definition])
OVS_ENABLE_OPTION([-Wmissing-prototypes])
OVS_ENABLE_OPTION([-Wmissing-field-initializers])
+OVS_ENABLE_OPTION([-Wthread-safety])
OVS_CONDITIONAL_CC_OPTION([-Wno-unused], [HAVE_WNO_UNUSED])
OVS_CONDITIONAL_CC_OPTION([-Wno-unused-parameter], [HAVE_WNO_UNUSED_PARAMETER])
OVS_ENABLE_WERROR
diff --git a/include/sparse/pthread.h b/include/sparse/pthread.h
index 6cdf5c8a7..aa4652efc 100644
--- a/include/sparse/pthread.h
+++ b/include/sparse/pthread.h
@@ -26,12 +26,12 @@
int pthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQUIRES(mutex);
int pthread_mutex_unlock(pthread_mutex_t *mutex) OVS_RELEASES(mutex);
-int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
-int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQ_RDLOCK(rwlock);
+int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQ_WRLOCK(rwlock);
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) OVS_RELEASES(rwlock);
int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
- OVS_MUST_HOLD(mutex);
+ OVS_REQUIRES(mutex);
/* Sparse complains about the proper PTHREAD_*_INITIALIZER definitions.
* Luckily, it's not a real compiler so we can overwrite it with something
@@ -52,7 +52,7 @@ int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
({ \
int retval = pthread_mutex_trylock(mutex); \
if (!retval) { \
- OVS_ACQUIRE(MUTEX); \
+ OVS_MACRO_LOCK(MUTEX); \
} \
retval; \
})
@@ -61,7 +61,7 @@ int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
({ \
int retval = pthread_rwlock_tryrdlock(rwlock); \
if (!retval) { \
- OVS_ACQUIRE(RWLOCK); \
+ OVS_MACRO_LOCK(RWLOCK); \
} \
retval; \
})
@@ -69,7 +69,7 @@ int pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
({ \
int retval = pthread_rwlock_trywrlock(rwlock); \
if (!retval) { \
- OVS_ACQUIRE(RWLOCK); \
+ OVS_MACRO_LOCK(RWLOCK); \
} \
retval; \
})
diff --git a/lib/command-line.c b/lib/command-line.c
index 39b26daee..805e51b35 100644
--- a/lib/command-line.c
+++ b/lib/command-line.c
@@ -94,10 +94,16 @@ run_command(int argc, char *argv[], const struct command commands[])
/* Process title. */
#ifdef LINUX_DATAPATH
-static char *argv_start; /* Start of command-line arguments in memory. */
-static size_t argv_size; /* Number of bytes of command-line arguments. */
-static char *saved_proctitle; /* Saved command-line arguments. */
-static pthread_mutex_t proctitle_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex proctitle_mutex = OVS_MUTEX_INITIALIZER;
+
+/* Start of command-line arguments in memory. */
+static char *argv_start OVS_GUARDED_BY(proctitle_mutex);
+
+/* Number of bytes of command-line arguments. */
+static size_t argv_size OVS_GUARDED_BY(proctitle_mutex);
+
+/* Saved command-line arguments. */
+static char *saved_proctitle OVS_GUARDED_BY(proctitle_mutex);
/* Prepares the process so that proctitle_set() can later succeed.
*
@@ -118,6 +124,7 @@ proctitle_init(int argc, char **argv)
return;
}
+ ovs_mutex_lock(&proctitle_mutex);
/* Specialized version of first loop iteration below. */
argv_start = argv[0];
argv_size = strlen(argv[0]) + 1;
@@ -141,6 +148,7 @@ proctitle_init(int argc, char **argv)
/* Copy out the old argument so we can reuse the space. */
argv[i] = xstrdup(argv[i]);
}
+ ovs_mutex_unlock(&proctitle_mutex);
}
/* Changes the name of the process, as shown by "ps", to the program name
@@ -151,11 +159,11 @@ proctitle_set(const char *format, ...)
va_list args;
int n;
+ ovs_mutex_lock(&proctitle_mutex);
if (!argv_start || argv_size < 8) {
- return;
+ goto out;
}
- xpthread_mutex_lock(&proctitle_mutex);
if (!saved_proctitle) {
saved_proctitle = xmemdup(argv_start, argv_size);
}
@@ -174,20 +182,22 @@ proctitle_set(const char *format, ...)
memset(&argv_start[n], '\0', argv_size - n);
}
va_end(args);
- xpthread_mutex_unlock(&proctitle_mutex);
+
+out:
+ ovs_mutex_unlock(&proctitle_mutex);
}
/* Restores the process's original command line, as seen by "ps". */
void
proctitle_restore(void)
{
- xpthread_mutex_lock(&proctitle_mutex);
+ ovs_mutex_lock(&proctitle_mutex);
if (saved_proctitle) {
memcpy(argv_start, saved_proctitle, argv_size);
free(saved_proctitle);
saved_proctitle = NULL;
}
- xpthread_mutex_unlock(&proctitle_mutex);
+ ovs_mutex_unlock(&proctitle_mutex);
}
#else /* !LINUX_DATAPATH*/
/* Stubs that don't do anything on non-Linux systems. */
diff --git a/lib/compiler.h b/lib/compiler.h
index 4b1834f0f..1c01fd1d6 100644
--- a/lib/compiler.h
+++ b/lib/compiler.h
@@ -17,6 +17,10 @@
#ifndef COMPILER_H
#define COMPILER_H 1
+#ifndef __has_feature
+ #define __has_feature(x) 0
+#endif
+
#if __GNUC__ && !__CHECKER__
#define NO_RETURN __attribute__((__noreturn__))
#define OVS_UNUSED __attribute__((__unused__))
@@ -41,38 +45,114 @@
#define OVS_UNLIKELY(CONDITION) (!!(CONDITION))
#endif
-#ifdef __CHECKER__
-/* "sparse" annotations for mutexes and mutex-like constructs.
+#if __has_feature(c_thread_safety_attributes)
+/* "clang" annotations for thread safety check.
+ *
+ * OVS_LOCKABLE indicates that the struct contains mutex element
+ * which can be locked by functions like ovs_mutex_lock().
+ *
+ * Below, the word MUTEX stands for the name of an object with an OVS_LOCKABLE
+ * struct type. It can also be a comma-separated list of multiple structs,
+ * e.g. to require a function to hold multiple locks while invoked.
+ *
+ *
+ * On a variable:
+ *
+ * - OVS_GUARDED indicates that the variable may only be accessed some mutex
+ * is held.
+ *
+ * - OVS_GUARDED_BY(MUTEX) indicates that the variable may only be accessed
+ * while the specific MUTEX is held.
*
- * In a function prototype, OVS_ACQUIRES(MUTEX) indicates that the function
- * must be called without MUTEX acquired and that it returns with MUTEX
- * acquired. OVS_RELEASES(MUTEX) indicates the reverse. OVS_MUST_HOLD
- * indicates that the function must be called with MUTEX acquired by the
- * caller and that the function does not release MUTEX.
*
- * In practice, sparse ignores the MUTEX argument. It need not even be a
- * valid expression. It is meant to indicate to human readers what mutex is
- * being acquired.
+ * On a function, the following attributes apply to mutexes:
+ *
+ * - OVS_ACQUIRES(MUTEX) indicate that the function must be called without
+ * holding MUTEX and that it returns holding MUTEX.
+ *
+ * - OVS_RELEASES(MUTEX) indicates that the function may only be called with
+ * MUTEX held and that it returns with MUTEX released. It can be used for
+ * all types of MUTEX.
+ *
+ * - OVS_TRY_LOCK(RETVAL, MUTEX) indicate that the function will try to
+ * acquire MUTEX. RETVAL is an integer or boolean value specifying the
+ * return value of a successful lock acquisition.
+ *
+ * - OVS_REQUIRES(MUTEX) indicate that the function may only be called with
+ * MUTEX held and that the function does not release MUTEX.
+ *
+ * - OVS_LOCKS_EXCLUDED(MUTEX) indicates that the function may only be
+ * called when MUTEX is not held.
+ *
+ * The following variants, with the same syntax, apply to reader-writer locks:
+ *
+ * mutex rwlock, for reading rwlock, for writing
+ * ------------------- ------------------- -------------------
+ * OVS_ACQUIRES OVS_ACQ_RDLOCK OVS_ACQ_WRLOCK
+ * OVS_RELEASES OVS_RELEASES OVS_RELEASES
+ * OVS_TRY_LOCK OVS_TRY_RDLOCK OVS_TRY_WRLOCK
+ * OVS_REQUIRES OVS_REQ_RDLOCK OVS_REQ_WRLOCK
+ * OVS_LOCKS_EXCLUDED OVS_LOCKS_EXCLUDED OVS_LOCKS_EXCLUDED
+ */
+#define OVS_LOCKABLE __attribute__((lockable))
+#define OVS_REQ_RDLOCK(...) __attribute__((shared_locks_required(__VA_ARGS__)))
+#define OVS_ACQ_RDLOCK(...) __attribute__((shared_lock_function(__VA_ARGS__)))
+#define OVS_REQ_WRLOCK(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define OVS_ACQ_WRLOCK(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define OVS_REQUIRES(...) \
+ __attribute__((exclusive_locks_required(__VA_ARGS__)))
+#define OVS_ACQUIRES(...) \
+ __attribute__((exclusive_lock_function(__VA_ARGS__)))
+#define OVS_TRY_WRLOCK(RETVAL, ...) \
+ __attribute__((exclusive_trylock_function(RETVAL, __VA_ARGS__)))
+#define OVS_TRY_RDLOCK(RETVAL, ...) \
+ __attribute__((shared_trylock_function(RETVAL, __VA_ARGS__)))
+#define OVS_TRY_LOCK(RETVAL, ...) \
+ __attribute__((exclusive_trylock_function(RETVAL, __VA_ARGS__)))
+#define OVS_GUARDED __attribute__((guarded_var))
+#define OVS_GUARDED_BY(...) __attribute__((guarded_by(__VA_ARGS__)))
+#define OVS_RELEASES(...) __attribute__((unlock_function(__VA_ARGS__)))
+#define OVS_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__)))
+#elif __CHECKER__
+/* "sparse" annotations for mutexes and mutex-like constructs.
*
- * Since sparse ignores MUTEX, it need not be an actual mutex. It can be
- * any construct on which paired acquire and release semantics make sense:
- * read/write locks, temporary memory allocations, whatever.
+ * Change the thread-safety check annotations to use "context" attribute.
*
- * OVS_ACQUIRE, OVS_RELEASE, and OVS_HOLDS are suitable for use within macros,
+ * OVS_MACRO_LOCK and OVS_MACRO_RELEASE are suitable for use within macros,
* where there is no function prototype to annotate. */
-#define OVS_ACQUIRES(MUTEX) __attribute__((context(MUTEX, 0, 1)))
-#define OVS_RELEASES(MUTEX) __attribute__((context(MUTEX, 1, 0)))
-#define OVS_MUST_HOLD(MUTEX) __attribute__((context(MUTEX, 1, 1)))
-#define OVS_ACQUIRE(MUTEX) __context__(MUTEX, 0, 1)
-#define OVS_RELEASE(MUTEX) __context__(MUTEX, 1, 0)
-#define OVS_HOLDS(MUTEX) __context__(MUTEX, 1, 1)
+#define OVS_LOCKABLE
+#define OVS_REQ_RDLOCK(...) __attribute__((context(MUTEX, 1, 1)))
+#define OVS_ACQ_RDLOCK(...) __attribute__((context(MUTEX, 0, 1)))
+#define OVS_REQ_WRLOCK(...) __attribute__((context(MUTEX, 1, 1)))
+#define OVS_ACQ_WRLOCK(...) __attribute__((context(MUTEX, 0, 1)))
+#define OVS_REQUIRES(...) __attribute__((context(MUTEX, 1, 1)))
+#define OVS_ACQUIRES(...) __attribute__((context(MUTEX, 0, 1)))
+#define OVS_TRY_WRLOCK(RETVAL, ...)
+#define OVS_TRY_RDLOCK(RETVAL, ...)
+#define OVS_TRY_LOCK(REVAL, ...)
+#define OVS_GUARDED
+#define OVS_GUARDED_BY(...)
+#define OVS_RELEASES(...) __attribute__((context(MUTEX, 1, 0)))
+#define OVS_MACRO_LOCK(...) __context__(MUTEX, 0, 1)
+#define OVS_MACRO_RELEASE(...) __context__(MUTEX, 1, 0)
#else
-#define OVS_ACQUIRES(MUTEX)
-#define OVS_RELEASES(MUTEX)
-#define OVS_MUST_HOLD(MUTEX)
-#define OVS_ACQUIRE(MUTEX)
-#define OVS_RELEASE(MUTEX)
-#define OVS_HOLDS(MUTEX)
+#define OVS_LOCKABLE
+#define OVS_REQ_RDLOCK(...)
+#define OVS_ACQ_RDLOCK(...)
+#define OVS_REQ_WRLOCK(...)
+#define OVS_ACQ_WRLOCK(...)
+#define OVS_REQUIRES(...)
+#define OVS_ACQUIRES(...)
+#define OVS_TRY_WRLOCK(...)
+#define OVS_TRY_RDLOCK(...)
+#define OVS_TRY_LOCK(...)
+#define OVS_GUARDED
+#define OVS_GUARDED_BY(...)
+#define OVS_RELEASES(...)
+#define OVS_MACRO_LOCK(...)
+#define OVS_MACRO_RELEASE(...)
#endif
/* ISO C says that a C implementation may choose any integer type for an enum
diff --git a/lib/dpif-linux.c b/lib/dpif-linux.c
index a6df33977..27c622aa2 100644
--- a/lib/dpif-linux.c
+++ b/lib/dpif-linux.c
@@ -140,7 +140,7 @@ struct dpif_linux {
int dp_ifindex;
/* Upcall messages. */
- pthread_mutex_t upcall_lock;
+ struct ovs_mutex upcall_lock;
int uc_array_size; /* Size of 'channels' and 'epoll_events'. */
struct dpif_channel *channels;
struct epoll_event *epoll_events;
@@ -248,7 +248,7 @@ open_dpif(const struct dpif_linux_dp *dp, struct dpif **dpifp)
dpif = xzalloc(sizeof *dpif);
dpif->port_notifier = NULL;
- xpthread_mutex_init(&dpif->upcall_lock, NULL);
+ ovs_mutex_init(&dpif->upcall_lock, PTHREAD_MUTEX_DEFAULT);
dpif->epoll_fd = -1;
dpif_init(&dpif->dpif, &dpif_linux_class, dp->name,
@@ -383,7 +383,7 @@ dpif_linux_close(struct dpif *dpif_)
if (dpif->epoll_fd >= 0) {
close(dpif->epoll_fd);
}
- xpthread_mutex_destroy(&dpif->upcall_lock);
+ ovs_mutex_destroy(&dpif->upcall_lock);
free(dpif);
}
@@ -572,9 +572,9 @@ dpif_linux_port_add(struct dpif *dpif_, struct netdev *netdev,
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_port_add__(dpif_, netdev, port_nop);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
@@ -603,9 +603,9 @@ dpif_linux_port_del(struct dpif *dpif_, odp_port_t port_no)
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_port_del__(dpif_, port_no);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
@@ -668,14 +668,14 @@ dpif_linux_port_get_pid(const struct dpif *dpif_, odp_port_t port_no)
uint32_t port_idx = odp_to_u32(port_no);
uint32_t pid = 0;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
if (dpif->epoll_fd >= 0) {
/* The ODPP_NONE "reserved" port number uses the "ovs-system"'s
* channel, since it is not heavily loaded. */
uint32_t idx = port_idx >= dpif->uc_array_size ? 0 : port_idx;
pid = nl_sock_pid(dpif->channels[idx].sock);
}
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return pid;
}
@@ -1310,9 +1310,9 @@ dpif_linux_recv_set(struct dpif *dpif_, bool enable)
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_recv_set__(dpif_, enable);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
@@ -1463,9 +1463,9 @@ dpif_linux_recv(struct dpif *dpif_, struct dpif_upcall *upcall,
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
int error;
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
error = dpif_linux_recv__(dpif_, upcall, buf);
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
return error;
}
@@ -1475,11 +1475,11 @@ dpif_linux_recv_wait(struct dpif *dpif_)
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
if (dpif->epoll_fd >= 0) {
poll_fd_wait(dpif->epoll_fd, POLLIN);
}
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
}
static void
@@ -1487,7 +1487,7 @@ dpif_linux_recv_purge(struct dpif *dpif_)
{
struct dpif_linux *dpif = dpif_linux_cast(dpif_);
- xpthread_mutex_lock(&dpif->upcall_lock);
+ ovs_mutex_lock(&dpif->upcall_lock);
if (dpif->epoll_fd >= 0) {
struct dpif_channel *ch;
@@ -1498,7 +1498,7 @@ dpif_linux_recv_purge(struct dpif *dpif_)
}
}
}
- xpthread_mutex_unlock(&dpif->upcall_lock);
+ ovs_mutex_unlock(&dpif->upcall_lock);
}
const struct dpif_class dpif_linux_class = {
diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
index 489039829..5bd2d7b1c 100644
--- a/lib/dpif-netdev.c
+++ b/lib/dpif-netdev.c
@@ -141,7 +141,7 @@ struct dpif_netdev {
static struct shash dp_netdevs = SHASH_INITIALIZER(&dp_netdevs);
/* Global lock for all data. */
-static pthread_mutex_t dp_netdev_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex dp_netdev_mutex = OVS_MUTEX_INITIALIZER;
static int get_port_by_number(struct dp_netdev *, odp_port_t port_no,
struct dp_netdev_port **portp);
@@ -184,11 +184,11 @@ dpif_netdev_enumerate(struct sset *all_dps)
{
struct shash_node *node;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
SHASH_FOR_EACH(node, &dp_netdevs) {
sset_add(all_dps, node->name);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
@@ -302,7 +302,7 @@ dpif_netdev_open(const struct dpif_class *class, const char *name,
struct dp_netdev *dp;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp = shash_find_data(&dp_netdevs, name);
if (!dp) {
error = create ? create_dp_netdev(name, class, &dp) : ENODEV;
@@ -314,7 +314,7 @@ dpif_netdev_open(const struct dpif_class *class, const char *name,
if (!error) {
*dpifp = create_dpif_netdev(dp);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -354,7 +354,7 @@ dpif_netdev_close(struct dpif *dpif)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
ovs_assert(dp->open_cnt > 0);
if (--dp->open_cnt == 0 && dp->destroyed) {
@@ -363,7 +363,7 @@ dpif_netdev_close(struct dpif *dpif)
}
free(dpif);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static int
@@ -371,9 +371,9 @@ dpif_netdev_destroy(struct dpif *dpif)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp->destroyed = true;
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
@@ -383,12 +383,12 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
stats->n_flows = hmap_count(&dp->flow_table);
stats->n_hit = dp->n_hit;
stats->n_missed = dp->n_missed;
stats->n_lost = dp->n_lost;
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
@@ -461,7 +461,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
odp_port_t port_no;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dpif_port = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (*port_nop != ODPP_NONE) {
uint32_t port_idx = odp_to_u32(*port_nop);
@@ -481,7 +481,7 @@ dpif_netdev_port_add(struct dpif *dpif, struct netdev *netdev,
*port_nop = port_no;
error = do_add_port(dp, dpif_port, netdev_get_type(netdev), port_no);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -492,9 +492,9 @@ dpif_netdev_port_del(struct dpif *dpif, odp_port_t port_no)
struct dp_netdev *dp = get_dp_netdev(dpif);
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
error = port_no == ODPP_LOCAL ? EINVAL : do_del_port(dp, port_no);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -574,12 +574,12 @@ dpif_netdev_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
struct dp_netdev_port *port;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
error = get_port_by_number(dp, port_no, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -592,12 +592,12 @@ dpif_netdev_port_query_by_name(const struct dpif *dpif, const char *devname,
struct dp_netdev_port *port;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
error = get_port_by_name(dp, devname, &port);
if (!error && dpif_port) {
answer_port_query(port, dpif_port);
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -631,9 +631,9 @@ dpif_netdev_flow_flush(struct dpif *dpif)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp_netdev_flow_flush(dp);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
@@ -658,7 +658,7 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
struct dp_netdev *dp = get_dp_netdev(dpif);
uint32_t port_idx;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
for (port_idx = odp_to_u32(state->port_no);
port_idx < MAX_PORTS; port_idx++) {
struct dp_netdev_port *port = dp->ports[port_idx];
@@ -669,12 +669,12 @@ dpif_netdev_port_dump_next(const struct dpif *dpif, void *state_,
dpif_port->type = port->type;
dpif_port->port_no = port->port_no;
state->port_no = u32_to_odp(port_idx + 1);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return EOF;
}
@@ -694,14 +694,14 @@ dpif_netdev_port_poll(const struct dpif *dpif_, char **devnamep OVS_UNUSED)
struct dpif_netdev *dpif = dpif_netdev_cast(dpif_);
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
if (dpif->dp_serial != dpif->dp->serial) {
dpif->dp_serial = dpif->dp->serial;
error = ENOBUFS;
} else {
error = EAGAIN;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -715,11 +715,11 @@ dpif_netdev_port_poll_wait(const struct dpif *dpif_)
* function and the poll_block() in one thread and a change in
* dpif->dp->serial in another thread. */
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
if (dpif->dp_serial != dpif->dp->serial) {
poll_immediate_wake();
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static struct dp_netdev_flow *
@@ -792,7 +792,7 @@ dpif_netdev_flow_get(const struct dpif *dpif,
return error;
}
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
flow = dp_netdev_lookup_flow(dp, &key);
if (flow) {
if (stats) {
@@ -804,7 +804,7 @@ dpif_netdev_flow_get(const struct dpif *dpif,
} else {
error = ENOENT;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -861,7 +861,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
return error;
}
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
flow = dp_netdev_lookup_flow(dp, &key);
if (!flow) {
if (put->flags & DPIF_FP_CREATE) {
@@ -892,7 +892,7 @@ dpif_netdev_flow_put(struct dpif *dpif, const struct dpif_flow_put *put)
error = EEXIST;
}
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -910,7 +910,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
return error;
}
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
flow = dp_netdev_lookup_flow(dp, &key);
if (flow) {
if (del->stats) {
@@ -920,7 +920,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct dpif_flow_del *del)
} else {
error = ENOENT;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -957,10 +957,10 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_,
struct dp_netdev_flow *flow;
struct hmap_node *node;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
if (!node) {
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return EOF;
}
@@ -994,7 +994,7 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, void *state_,
*stats = &state->stats;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return 0;
}
@@ -1030,10 +1030,10 @@ dpif_netdev_execute(struct dpif *dpif, const struct dpif_execute *execute)
error = dpif_netdev_flow_from_nlattrs(execute->key, execute->key_len,
&key);
if (!error) {
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp_netdev_execute_actions(dp, &copy, &key,
execute->actions, execute->actions_len);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
ofpbuf_uninit(&copy);
@@ -1076,7 +1076,7 @@ dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall,
struct dp_netdev_queue *q;
int error;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
q = find_nonempty_queue(dpif);
if (q) {
struct dp_netdev_upcall *u = &q->upcalls[q->tail++ & QUEUE_MASK];
@@ -1091,7 +1091,7 @@ dpif_netdev_recv(struct dpif *dpif, struct dpif_upcall *upcall,
} else {
error = EAGAIN;
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
return error;
}
@@ -1103,20 +1103,20 @@ dpif_netdev_recv_wait(struct dpif *dpif)
* function and the poll_block() in one thread and a packet being queued in
* another thread. */
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
if (find_nonempty_queue(dpif)) {
poll_immediate_wake();
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
dpif_netdev_recv_purge(struct dpif *dpif)
{
struct dpif_netdev *dpif_netdev = dpif_netdev_cast(dpif);
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp_netdev_purge_queues(dpif_netdev->dp);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
@@ -1161,7 +1161,7 @@ dpif_netdev_run(struct dpif *dpif)
struct dp_netdev *dp;
struct ofpbuf packet;
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
dp = get_dp_netdev(dpif);
ofpbuf_init(&packet,
DP_NETDEV_HEADROOM + VLAN_ETH_HEADER_LEN + dp->max_mtu);
@@ -1184,7 +1184,7 @@ dpif_netdev_run(struct dpif *dpif)
}
}
ofpbuf_uninit(&packet);
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
@@ -1205,13 +1205,13 @@ dpif_netdev_wait(struct dpif *dpif)
* - In the dpif_port_remove() case, A might wake up spuriously, but
* that is harmless. */
- xpthread_mutex_lock(&dp_netdev_mutex);
+ ovs_mutex_lock(&dp_netdev_mutex);
LIST_FOR_EACH (port, node, &get_dp_netdev(dpif)->port_list) {
if (port->rx) {
netdev_rx_wait(port->rx);
}
}
- xpthread_mutex_unlock(&dp_netdev_mutex);
+ ovs_mutex_unlock(&dp_netdev_mutex);
}
static void
diff --git a/lib/dpif.c b/lib/dpif.c
index 0d8dd9d4f..3d6aed71c 100644
--- a/lib/dpif.c
+++ b/lib/dpif.c
@@ -71,7 +71,7 @@ static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
/* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
-static pthread_mutex_t dpif_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
/* Rate limit for individual messages going to or from the datapath, output at
* DBG level. This is very high because, if these are enabled, it is because
@@ -145,9 +145,9 @@ dp_register_provider(const struct dpif_class *new_class)
{
int error;
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
error = dp_register_provider__(new_class);
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return error;
}
@@ -190,9 +190,9 @@ dp_unregister_provider(const char *type)
dp_initialize();
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
error = dp_unregister_provider__(type);
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return error;
}
@@ -202,9 +202,9 @@ dp_unregister_provider(const char *type)
void
dp_blacklist_provider(const char *type)
{
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
sset_add(&dpif_blacklist, type);
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
}
/* Clears 'types' and enumerates the types of all currently registered datapath
@@ -217,21 +217,21 @@ dp_enumerate_types(struct sset *types)
dp_initialize();
sset_clear(types);
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
SHASH_FOR_EACH(node, &dpif_classes) {
const struct registered_dpif_class *registered_class = node->data;
sset_add(types, registered_class->dpif_class->type);
}
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
}
static void
dp_class_unref(struct registered_dpif_class *rc)
{
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
ovs_assert(rc->refcount);
rc->refcount--;
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
}
static struct registered_dpif_class *
@@ -239,12 +239,12 @@ dp_class_lookup(const char *type)
{
struct registered_dpif_class *rc;
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
rc = shash_find_data(&dpif_classes, type);
if (rc) {
rc->refcount++;
}
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return rc;
}
@@ -481,12 +481,12 @@ dpif_port_open_type(const char *datapath_type, const char *port_type)
datapath_type = dpif_normalize_type(datapath_type);
- xpthread_mutex_lock(&dpif_mutex);
+ ovs_mutex_lock(&dpif_mutex);
rc = shash_find_data(&dpif_classes, datapath_type);
if (rc && rc->dpif_class->port_open_type) {
port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
}
- xpthread_mutex_unlock(&dpif_mutex);
+ ovs_mutex_unlock(&dpif_mutex);
return port_type;
}
diff --git a/lib/fatal-signal.c b/lib/fatal-signal.c
index 40daf5a9e..b4c5ee6ac 100644
--- a/lib/fatal-signal.c
+++ b/lib/fatal-signal.c
@@ -57,7 +57,7 @@ static size_t n_hooks;
static int signal_fds[2];
static volatile sig_atomic_t stored_sig_nr = SIG_ATOMIC_MAX;
-static pthread_mutex_t mutex;
+static struct ovs_mutex mutex;
static void atexit_handler(void);
static void call_hooks(int sig_nr);
@@ -72,17 +72,12 @@ fatal_signal_init(void)
static bool inited = false;
if (!inited) {
- pthread_mutexattr_t attr;
size_t i;
assert_single_threaded();
inited = true;
- xpthread_mutexattr_init(&attr);
- xpthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- xpthread_mutex_init(&mutex, &attr);
- xpthread_mutexattr_destroy(&attr);
-
+ ovs_mutex_init(&mutex, PTHREAD_MUTEX_RECURSIVE);
xpipe_nonblocking(signal_fds);
for (i = 0; i < ARRAY_SIZE(fatal_signals); i++) {
@@ -119,14 +114,14 @@ fatal_signal_add_hook(void (*hook_cb)(void *aux), void (*cancel_cb)(void *aux),
{
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
ovs_assert(n_hooks < MAX_HOOKS);
hooks[n_hooks].hook_cb = hook_cb;
hooks[n_hooks].cancel_cb = cancel_cb;
hooks[n_hooks].aux = aux;
hooks[n_hooks].run_at_exit = run_at_exit;
n_hooks++;
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
/* Handles fatal signal number 'sig_nr'.
@@ -167,7 +162,7 @@ fatal_signal_run(void)
if (sig_nr != SIG_ATOMIC_MAX) {
char namebuf[SIGNAL_NAME_BUFSIZE];
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
VLOG_WARN("terminating with signal %d (%s)",
(int)sig_nr, signal_name(sig_nr, namebuf, sizeof namebuf));
@@ -178,7 +173,7 @@ fatal_signal_run(void)
signal(sig_nr, SIG_DFL);
raise(sig_nr);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
NOT_REACHED();
}
}
@@ -232,14 +227,14 @@ fatal_signal_add_file_to_unlink(const char *file)
{
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
if (!added_hook) {
added_hook = true;
fatal_signal_add_hook(unlink_files, cancel_files, NULL, true);
}
sset_add(&files, file);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
/* Unregisters 'file' from being unlinked when the program terminates via
@@ -249,9 +244,9 @@ fatal_signal_remove_file_to_unlink(const char *file)
{
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
sset_find_and_delete(&files, file);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
/* Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'.
@@ -263,7 +258,7 @@ fatal_signal_unlink_file_now(const char *file)
fatal_signal_init();
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
error = unlink(file) ? errno : 0;
if (error) {
@@ -272,7 +267,7 @@ fatal_signal_unlink_file_now(const char *file)
fatal_signal_remove_file_to_unlink(file);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
return error;
}
diff --git a/lib/jsonrpc.c b/lib/jsonrpc.c
index a7d89f705..e02f03579 100644
--- a/lib/jsonrpc.c
+++ b/lib/jsonrpc.c
@@ -515,14 +515,10 @@ jsonrpc_create(enum jsonrpc_msg_type type, const char *method,
static struct json *
jsonrpc_create_id(void)
{
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
- static unsigned int next_id;
+ static atomic_uint next_id = ATOMIC_VAR_INIT(0);
unsigned int id;
- xpthread_mutex_lock(&mutex);
- id = next_id++;
- xpthread_mutex_unlock(&mutex);
-
+ atomic_add(&next_id, 1, &id);
return json_integer_create(id);
}
diff --git a/lib/lockfile.c b/lib/lockfile.c
index 50a4e0c07..43e55927e 100644
--- a/lib/lockfile.c
+++ b/lib/lockfile.c
@@ -53,10 +53,10 @@ struct lockfile {
* descriptor for a file on which a process holds a lock drops *all* locks on
* that file. That means that we can't afford to open a lockfile more than
* once. */
-static struct hmap lock_table = HMAP_INITIALIZER(&lock_table);
-
-/* Protects 'lock_table'. */
-static pthread_mutex_t lock_table_mutex = PTHREAD_MUTEX_INITIALIZER;
+static struct ovs_mutex lock_table_mutex = OVS_MUTEX_INITIALIZER;
+static struct hmap lock_table__ = HMAP_INITIALIZER(&lock_table__);
+static struct hmap *const lock_table OVS_GUARDED_BY(lock_table_mutex)
+ = &lock_table__;
static void lockfile_unhash(struct lockfile *);
static int lockfile_try_lock(const char *name, pid_t *pidp,
@@ -110,9 +110,9 @@ lockfile_lock(const char *file, struct lockfile **lockfilep)
lock_name = lockfile_name(file);
- xpthread_mutex_lock(&lock_table_mutex);
+ ovs_mutex_lock(&lock_table_mutex);
error = lockfile_try_lock(lock_name, &pid, lockfilep);
- xpthread_mutex_unlock(&lock_table_mutex);
+ ovs_mutex_unlock(&lock_table_mutex);
if (error) {
COVERAGE_INC(lockfile_error);
@@ -138,9 +138,9 @@ void
lockfile_unlock(struct lockfile *lockfile)
{
if (lockfile) {
- xpthread_mutex_lock(&lock_table_mutex);
+ ovs_mutex_lock(&lock_table_mutex);
lockfile_unhash(lockfile);
- xpthread_mutex_unlock(&lock_table_mutex);
+ ovs_mutex_unlock(&lock_table_mutex);
COVERAGE_INC(lockfile_unlock);
free(lockfile->name);
@@ -156,12 +156,14 @@ lockfile_postfork(void)
{
struct lockfile *lockfile;
- HMAP_FOR_EACH (lockfile, hmap_node, &lock_table) {
+ ovs_mutex_lock(&lock_table_mutex);
+ HMAP_FOR_EACH (lockfile, hmap_node, lock_table) {
if (lockfile->fd >= 0) {
VLOG_WARN("%s: child does not inherit lock", lockfile->name);
lockfile_unhash(lockfile);
}
}
+ ovs_mutex_unlock(&lock_table_mutex);
}
static uint32_t
@@ -172,12 +174,12 @@ lockfile_hash(dev_t device, ino_t inode)
}
static struct lockfile *
-lockfile_find(dev_t device, ino_t inode)
+lockfile_find(dev_t device, ino_t inode) OVS_REQUIRES(&lock_table_mutex)
{
struct lockfile *lockfile;
HMAP_FOR_EACH_WITH_HASH (lockfile, hmap_node,
- lockfile_hash(device, inode), &lock_table) {
+ lockfile_hash(device, inode), lock_table) {
if (lockfile->device == device && lockfile->inode == inode) {
return lockfile;
}
@@ -186,17 +188,18 @@ lockfile_find(dev_t device, ino_t inode)
}
static void
-lockfile_unhash(struct lockfile *lockfile)
+lockfile_unhash(struct lockfile *lockfile) OVS_REQUIRES(&lock_table_mutex)
{
if (lockfile->fd >= 0) {
close(lockfile->fd);
lockfile->fd = -1;
- hmap_remove(&lock_table, &lockfile->hmap_node);
+ hmap_remove(lock_table, &lockfile->hmap_node);
}
}
static struct lockfile *
lockfile_register(const char *name, dev_t device, ino_t inode, int fd)
+ OVS_REQUIRES(&lock_table_mutex)
{
struct lockfile *lockfile;
@@ -211,13 +214,14 @@ lockfile_register(const char *name, dev_t device, ino_t inode, int fd)
lockfile->device = device;
lockfile->inode = inode;
lockfile->fd = fd;
- hmap_insert(&lock_table, &lockfile->hmap_node,
+ hmap_insert(lock_table, &lockfile->hmap_node,
lockfile_hash(device, inode));
return lockfile;
}
static int
lockfile_try_lock(const char *name, pid_t *pidp, struct lockfile **lockfilep)
+ OVS_REQUIRES(&lock_table_mutex)
{
struct flock l;
struct stat s;
diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c
index 82473a177..c4f58b7a2 100644
--- a/lib/netdev-dummy.c
+++ b/lib/netdev-dummy.c
@@ -241,16 +241,11 @@ static int
netdev_dummy_create(const struct netdev_class *class, const char *name,
struct netdev **netdevp)
{
- static unsigned int next_n = 0xaa550000;
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
-
+ static atomic_uint next_n = ATOMIC_VAR_INIT(0xaa550000);
struct netdev_dummy *netdev;
unsigned int n;
- xpthread_mutex_lock(&mutex);
- n = next_n++;
- xpthread_mutex_unlock(&mutex);
-
+ atomic_add(&next_n, 1, &n);
netdev = xzalloc(sizeof *netdev);
netdev_init(&netdev->up, name, class);
netdev->hwaddr[0] = 0xaa;
diff --git a/lib/netlink-socket.c b/lib/netlink-socket.c
index da32284b1..99bd4cc05 100644
--- a/lib/netlink-socket.c
+++ b/lib/netlink-socket.c
@@ -1012,8 +1012,8 @@ struct nl_pool {
int n;
};
-static struct nl_pool pools[MAX_LINKS];
-static pthread_mutex_t pool_mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex pool_mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
+static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
static int
nl_pool_alloc(int protocol, struct nl_sock **sockp)
@@ -1023,12 +1023,12 @@ nl_pool_alloc(int protocol, struct nl_sock **sockp)
ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
- xpthread_mutex_lock(&pool_mutex);
+ ovs_mutex_lock(&pool_mutex);
pool = &pools[protocol];
if (pool->n > 0) {
sock = pool->socks[--pool->n];
}
- xpthread_mutex_unlock(&pool_mutex);
+ ovs_mutex_unlock(&pool_mutex);
if (sock) {
*sockp = sock;
@@ -1044,12 +1044,12 @@ nl_pool_release(struct nl_sock *sock)
if (sock) {
struct nl_pool *pool = &pools[sock->protocol];
- xpthread_mutex_lock(&pool_mutex);
+ ovs_mutex_lock(&pool_mutex);
if (pool->n < ARRAY_SIZE(pool->socks)) {
pool->socks[pool->n++] = sock;
sock = NULL;
}
- xpthread_mutex_unlock(&pool_mutex);
+ ovs_mutex_unlock(&pool_mutex);
nl_sock_destroy(sock);
}
diff --git a/lib/ofp-msgs.c b/lib/ofp-msgs.c
index 8bb1754f6..d136f73d4 100644
--- a/lib/ofp-msgs.c
+++ b/lib/ofp-msgs.c
@@ -110,14 +110,10 @@ static enum ofperr ofpraw_from_ofphdrs(enum ofpraw *, const struct ofphdrs *);
static ovs_be32
alloc_xid(void)
{
- static uint32_t next_xid = 1;
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+ static atomic_uint32_t next_xid = ATOMIC_VAR_INIT(1);
uint32_t xid;
- xpthread_mutex_lock(&mutex);
- xid = next_xid++;
- xpthread_mutex_unlock(&mutex);
-
+ atomic_add(&next_xid, 1, &xid);
return htonl(xid);
}
diff --git a/lib/ofp-parse.c b/lib/ofp-parse.c
index 4b8036db3..5cb39f5a6 100644
--- a/lib/ofp-parse.c
+++ b/lib/ofp-parse.c
@@ -1520,15 +1520,11 @@ static char * WARN_UNUSED_RESULT
parse_flow_monitor_request__(struct ofputil_flow_monitor_request *fmr,
const char *str_, char *string)
{
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
- static uint32_t id;
-
+ static atomic_uint32_t id = ATOMIC_VAR_INIT(0);
char *save_ptr = NULL;
char *name;
- xpthread_mutex_lock(&mutex);
- fmr->id = id++;
- xpthread_mutex_unlock(&mutex);
+ atomic_add(&id, 1, &fmr->id);
fmr->flags = (NXFMF_INITIAL | NXFMF_ADD | NXFMF_DELETE | NXFMF_MODIFY
| NXFMF_OWN | NXFMF_ACTIONS);
diff --git a/lib/ovs-atomic-gcc4+.c b/lib/ovs-atomic-gcc4+.c
index aeff84579..169384873 100644
--- a/lib/ovs-atomic-gcc4+.c
+++ b/lib/ovs-atomic-gcc4+.c
@@ -20,7 +20,7 @@
#include "ovs-thread.h"
#if OVS_ATOMIC_GCC4P_IMPL
-static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
#define DEFINE_LOCKED_OP(TYPE, NAME, OPERATOR) \
TYPE##_t \
@@ -28,10 +28,10 @@ static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
{ \
TYPE##_t old_value; \
\
- xpthread_mutex_lock(&mutex); \
+ ovs_mutex_lock(&mutex); \
old_value = u->value; \
u->value OPERATOR arg; \
- xpthread_mutex_unlock(&mutex); \
+ ovs_mutex_unlock(&mutex); \
\
return old_value; \
}
@@ -42,9 +42,9 @@ static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
{ \
TYPE##_t value; \
\
- xpthread_mutex_lock(&mutex); \
+ ovs_mutex_lock(&mutex); \
value = u->value; \
- xpthread_mutex_unlock(&mutex); \
+ ovs_mutex_unlock(&mutex); \
\
return value; \
} \
@@ -52,9 +52,9 @@ static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
void \
locked_##TYPE##_store(struct locked_##TYPE *u, TYPE##_t value) \
{ \
- xpthread_mutex_lock(&mutex); \
+ ovs_mutex_lock(&mutex); \
u->value = value; \
- xpthread_mutex_unlock(&mutex); \
+ ovs_mutex_unlock(&mutex); \
} \
DEFINE_LOCKED_OP(TYPE, add, +=); \
DEFINE_LOCKED_OP(TYPE, sub, -=); \
diff --git a/lib/ovs-atomic-pthreads.c b/lib/ovs-atomic-pthreads.c
index a501b8256..7e7ef0508 100644
--- a/lib/ovs-atomic-pthreads.c
+++ b/lib/ovs-atomic-pthreads.c
@@ -26,10 +26,10 @@ atomic_flag_test_and_set(volatile atomic_flag *flag_)
atomic_flag *flag = CONST_CAST(atomic_flag *, flag_);
bool old_value;
- xpthread_mutex_lock(&flag->mutex);
+ ovs_mutex_lock(&flag->mutex);
old_value = flag->b;
flag->b = true;
- xpthread_mutex_unlock(&flag->mutex);
+ ovs_mutex_unlock(&flag->mutex);
return old_value;
}
@@ -46,9 +46,9 @@ atomic_flag_clear(volatile atomic_flag *flag_)
{
atomic_flag *flag = CONST_CAST(atomic_flag *, flag_);
- xpthread_mutex_lock(&flag->mutex);
+ ovs_mutex_lock(&flag->mutex);
flag->b = false;
- xpthread_mutex_unlock(&flag->mutex);
+ ovs_mutex_unlock(&flag->mutex);
}
void
diff --git a/lib/ovs-atomic-pthreads.h b/lib/ovs-atomic-pthreads.h
index 2f47a9c97..61a9771ac 100644
--- a/lib/ovs-atomic-pthreads.h
+++ b/lib/ovs-atomic-pthreads.h
@@ -144,7 +144,7 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
typedef struct {
bool b;
- pthread_mutex_t mutex;
+ struct ovs_mutex mutex;
} atomic_flag;
#define ATOMIC_FLAG_INIT { false, PTHREAD_MUTEX_INITIALIZER }
diff --git a/lib/ovs-thread.c b/lib/ovs-thread.c
index abadeb9be..c8b2c1599 100644
--- a/lib/ovs-thread.c
+++ b/lib/ovs-thread.c
@@ -43,6 +43,58 @@ static const char *must_not_fork;
/* True if we created any threads beyond the main initial thread. */
static bool multithreaded;
+#define LOCK_FUNCTION(TYPE, FUN) \
+ void \
+ ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \
+ const char *where) \
+ { \
+ struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
+ int error = pthread_##TYPE##_##FUN(&l->lock); \
+ if (OVS_UNLIKELY(error)) { \
+ ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \
+ } \
+ l->where = where; \
+ }
+LOCK_FUNCTION(mutex, lock);
+LOCK_FUNCTION(rwlock, rdlock);
+LOCK_FUNCTION(rwlock, wrlock);
+
+#define TRY_LOCK_FUNCTION(TYPE, FUN) \
+ int \
+ ovs_##TYPE##_##FUN##_at(const struct ovs_##TYPE *l_, \
+ const char *where) \
+ { \
+ struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
+ int error = pthread_##TYPE##_##FUN(&l->lock); \
+ if (OVS_UNLIKELY(error) && error != EBUSY) { \
+ ovs_abort(error, "pthread_%s_%s failed", #TYPE, #FUN); \
+ } \
+ if (!error) { \
+ l->where = where; \
+ } \
+ return error; \
+ }
+TRY_LOCK_FUNCTION(mutex, trylock);
+TRY_LOCK_FUNCTION(rwlock, tryrdlock);
+TRY_LOCK_FUNCTION(rwlock, trywrlock);
+
+#define UNLOCK_FUNCTION(TYPE, FUN) \
+ void \
+ ovs_##TYPE##_##FUN(const struct ovs_##TYPE *l_) \
+ { \
+ struct ovs_##TYPE *l = CONST_CAST(struct ovs_##TYPE *, l_); \
+ int error; \
+ l->where = NULL; \
+ error = pthread_##TYPE##_##FUN(&l->lock); \
+ if (OVS_UNLIKELY(error)) { \
+ ovs_abort(error, "pthread_%s_%sfailed", #TYPE, #FUN); \
+ } \
+ }
+UNLOCK_FUNCTION(mutex, unlock);
+UNLOCK_FUNCTION(mutex, destroy);
+UNLOCK_FUNCTION(rwlock, unlock);
+UNLOCK_FUNCTION(rwlock, destroy);
+
#define XPTHREAD_FUNC1(FUNCTION, PARAM1) \
void \
x##FUNCTION(PARAM1 arg1) \
@@ -52,16 +104,6 @@ static bool multithreaded;
ovs_abort(error, "%s failed", #FUNCTION); \
} \
}
-#define XPTHREAD_TRY_FUNC1(FUNCTION, PARAM1) \
- int \
- x##FUNCTION(PARAM1 arg1) \
- { \
- int error = FUNCTION(arg1); \
- if (OVS_UNLIKELY(error && error != EBUSY)) { \
- ovs_abort(error, "%s failed", #FUNCTION); \
- } \
- return error; \
- }
#define XPTHREAD_FUNC2(FUNCTION, PARAM1, PARAM2) \
void \
x##FUNCTION(PARAM1 arg1, PARAM2 arg2) \
@@ -72,36 +114,60 @@ static bool multithreaded;
} \
}
-XPTHREAD_FUNC2(pthread_mutex_init, pthread_mutex_t *, pthread_mutexattr_t *);
-XPTHREAD_FUNC1(pthread_mutex_destroy, pthread_mutex_t *);
-XPTHREAD_FUNC1(pthread_mutex_lock, pthread_mutex_t *);
-XPTHREAD_FUNC1(pthread_mutex_unlock, pthread_mutex_t *);
-XPTHREAD_TRY_FUNC1(pthread_mutex_trylock, pthread_mutex_t *);
-
XPTHREAD_FUNC1(pthread_mutexattr_init, pthread_mutexattr_t *);
XPTHREAD_FUNC1(pthread_mutexattr_destroy, pthread_mutexattr_t *);
XPTHREAD_FUNC2(pthread_mutexattr_settype, pthread_mutexattr_t *, int);
XPTHREAD_FUNC2(pthread_mutexattr_gettype, pthread_mutexattr_t *, int *);
-XPTHREAD_FUNC2(pthread_rwlock_init,
- pthread_rwlock_t *, pthread_rwlockattr_t *);
-XPTHREAD_FUNC1(pthread_rwlock_destroy, pthread_rwlock_t *);
-XPTHREAD_FUNC1(pthread_rwlock_rdlock, pthread_rwlock_t *);
-XPTHREAD_FUNC1(pthread_rwlock_wrlock, pthread_rwlock_t *);
-XPTHREAD_FUNC1(pthread_rwlock_unlock, pthread_rwlock_t *);
-XPTHREAD_TRY_FUNC1(pthread_rwlock_tryrdlock, pthread_rwlock_t *);
-XPTHREAD_TRY_FUNC1(pthread_rwlock_trywrlock, pthread_rwlock_t *);
-
XPTHREAD_FUNC2(pthread_cond_init, pthread_cond_t *, pthread_condattr_t *);
XPTHREAD_FUNC1(pthread_cond_destroy, pthread_cond_t *);
XPTHREAD_FUNC1(pthread_cond_signal, pthread_cond_t *);
XPTHREAD_FUNC1(pthread_cond_broadcast, pthread_cond_t *);
-XPTHREAD_FUNC2(pthread_cond_wait, pthread_cond_t *, pthread_mutex_t *);
typedef void destructor_func(void *);
XPTHREAD_FUNC2(pthread_key_create, pthread_key_t *, destructor_func *);
void
+ovs_mutex_init(const struct ovs_mutex *l_, int type)
+{
+ struct ovs_mutex *l = CONST_CAST(struct ovs_mutex *, l_);
+ pthread_mutexattr_t attr;
+ int error;
+
+ l->where = NULL;
+ xpthread_mutexattr_init(&attr);
+ xpthread_mutexattr_settype(&attr, type);
+ error = pthread_mutex_init(&l->lock, &attr);
+ if (OVS_UNLIKELY(error)) {
+ ovs_abort(error, "pthread_mutex_init failed");
+ }
+ xpthread_mutexattr_destroy(&attr);
+}
+
+void
+ovs_rwlock_init(const struct ovs_rwlock *l_)
+{
+ struct ovs_rwlock *l = CONST_CAST(struct ovs_rwlock *, l_);
+ int error;
+
+ l->where = NULL;
+ error = pthread_rwlock_init(&l->lock, NULL);
+ if (OVS_UNLIKELY(error)) {
+ ovs_abort(error, "pthread_rwlock_init failed");
+ }
+}
+
+void
+ovs_mutex_cond_wait(pthread_cond_t *cond, const struct ovs_mutex *mutex_)
+{
+ struct ovs_mutex *mutex = CONST_CAST(struct ovs_mutex *, mutex_);
+ int error = pthread_cond_wait(cond, &mutex->lock);
+ if (OVS_UNLIKELY(error)) {
+ ovs_abort(error, "pthread_cond_wait failed");
+ }
+}
+
+void
xpthread_create(pthread_t *threadp, pthread_attr_t *attr,
void *(*start)(void *), void *arg)
{
@@ -120,19 +186,19 @@ xpthread_create(pthread_t *threadp, pthread_attr_t *attr,
bool
ovsthread_once_start__(struct ovsthread_once *once)
{
- xpthread_mutex_lock(&once->mutex);
+ ovs_mutex_lock(&once->mutex);
if (!ovsthread_once_is_done__(once)) {
return false;
}
- xpthread_mutex_unlock(&once->mutex);
+ ovs_mutex_unlock(&once->mutex);
return true;
}
-void OVS_RELEASES(once)
+void
ovsthread_once_done(struct ovsthread_once *once)
{
atomic_store(&once->done, true);
- xpthread_mutex_unlock(&once->mutex);
+ ovs_mutex_unlock(&once->mutex);
}
/* Asserts that the process has not yet created any threads (beyond the initial
@@ -169,7 +235,7 @@ xfork_at(const char *where)
pid = fork();
if (pid < 0) {
- VLOG_FATAL("fork failed (%s)", ovs_strerror(errno));
+ VLOG_FATAL("%s: fork failed (%s)", where, ovs_strerror(errno));
}
return pid;
}
diff --git a/lib/ovs-thread.h b/lib/ovs-thread.h
index 98428fd04..f5e171ac6 100644
--- a/lib/ovs-thread.h
+++ b/lib/ovs-thread.h
@@ -23,65 +23,118 @@
#include "ovs-atomic.h"
#include "util.h"
-/* glibc has some non-portable mutex types and initializers:
- *
- * - PTHREAD_MUTEX_ADAPTIVE_NP is a mutex type that works as a spinlock that
- * falls back to a mutex after spinning for some number of iterations.
+
+/* Mutex. */
+struct OVS_LOCKABLE ovs_mutex {
+ pthread_mutex_t lock;
+ const char *where;
+};
+
+/* "struct ovs_mutex" initializers:
*
- * - PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP is a non-portable initializer
- * for an error-checking mutex.
+ * - OVS_MUTEX_INITIALIZER: common case.
*
- * We use these definitions to fall back to PTHREAD_MUTEX_NORMAL instead in
- * these cases.
+ * - OVS_ADAPTIVE_MUTEX_INITIALIZER for a mutex that spins briefly then goes
+ * to sleeps after some number of iterations.
*
- * (glibc has other non-portable initializers, but we can't reasonably
- * substitute for them here.) */
+ * - OVS_ERRORCHECK_MUTEX_INITIALIZER for a mutex that is used for
+ * error-checking. */
+#define OVS_MUTEX_INITIALIZER { PTHREAD_MUTEX_INITIALIZER, NULL }
#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
-#define PTHREAD_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
-#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#define OVS_ADAPTIVE_MUTEX_INITIALIZER \
+ { PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, NULL }
#else
-#define PTHREAD_MUTEX_ADAPTIVE PTHREAD_MUTEX_NORMAL
-#define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define OVS_ADAPTIVE_MUTEX_INITIALIZER OVS_MUTEX_INITIALIZER
#endif
-
#ifdef PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
-#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER \
- PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+#define OVS_ERRORCHECK_MUTEX_INITIALIZER \
+ { PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, NULL }
#else
-#define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define OVS_ERRORCHECK_MUTEX_INITIALIZER OVS_MUTEX_INITIALIZER
#endif
-/* Simple wrappers for pthreads functions. Most of these functions abort the
- * process with an error message on any error. The *_trylock() functions are
- * exceptions: they pass through a 0 or EBUSY return value to the caller and
- * abort on any other error. */
+/* Mutex types, suitable for use with pthread_mutexattr_settype().
+ * There is only one nonstandard type:
+ *
+ * - PTHREAD_MUTEX_ADAPTIVE_NP, the type used for
+ * OVS_ADAPTIVE_MUTEX_INITIALIZER. */
+#ifdef PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP
+#define OVS_MUTEX_ADAPTIVE PTHREAD_MUTEX_ADAPTIVE_NP
+#else
+#define OVS_MUTEX_ADAPTIVE PTHREAD_MUTEX_NORMAL
+#endif
+
+/* ovs_mutex functions analogous to pthread_mutex_*() functions.
+ *
+ * Most of these functions abort the process with an error message on any
+ * error. ovs_mutex_trylock() is an exception: it passes through a 0 or EBUSY
+ * return value to the caller and aborts on any other error. */
+void ovs_mutex_init(const struct ovs_mutex *, int type);
+void ovs_mutex_destroy(const struct ovs_mutex *);
+void ovs_mutex_unlock(const struct ovs_mutex *mutex) OVS_RELEASES(mutex);
+void ovs_mutex_lock_at(const struct ovs_mutex *mutex, const char *where)
+ OVS_ACQUIRES(mutex);
+#define ovs_mutex_lock(mutex) \
+ ovs_mutex_lock_at(mutex, SOURCE_LOCATOR)
-void xpthread_mutex_init(pthread_mutex_t *, pthread_mutexattr_t *);
-void xpthread_mutex_destroy(pthread_mutex_t *);
-void xpthread_mutex_lock(pthread_mutex_t *mutex) OVS_ACQUIRES(mutex);
-void xpthread_mutex_unlock(pthread_mutex_t *mutex) OVS_RELEASES(mutex);
-int xpthread_mutex_trylock(pthread_mutex_t *);
+int ovs_mutex_trylock_at(const struct ovs_mutex *mutex, const char *where)
+ OVS_TRY_LOCK(0, mutex);
+#define ovs_mutex_trylock(mutex) \
+ ovs_mutex_trylock_at(mutex, SOURCE_LOCATOR)
+void ovs_mutex_cond_wait(pthread_cond_t *, const struct ovs_mutex *);
+
+/* Wrappers for pthread_mutexattr_*() that abort the process on any error. */
void xpthread_mutexattr_init(pthread_mutexattr_t *);
void xpthread_mutexattr_destroy(pthread_mutexattr_t *);
void xpthread_mutexattr_settype(pthread_mutexattr_t *, int type);
void xpthread_mutexattr_gettype(pthread_mutexattr_t *, int *typep);
-void xpthread_rwlock_init(pthread_rwlock_t *, pthread_rwlockattr_t *);
-void xpthread_rwlock_destroy(pthread_rwlock_t *);
-void xpthread_rwlock_rdlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
-void xpthread_rwlock_wrlock(pthread_rwlock_t *rwlock) OVS_ACQUIRES(rwlock);
-void xpthread_rwlock_unlock(pthread_rwlock_t *rwlock) OVS_RELEASES(rwlock);
-int xpthread_rwlock_tryrdlock(pthread_rwlock_t *);
-int xpthread_rwlock_trywrlock(pthread_rwlock_t *);
+/* Read-write lock. */
+struct OVS_LOCKABLE ovs_rwlock {
+ pthread_rwlock_t lock;
+ const char *where;
+};
+
+/* Initializer. */
+#define OVS_RWLOCK_INITIALIZER { PTHREAD_RWLOCK_INITIALIZER, NULL }
+
+/* ovs_rwlock functions analogous to pthread_rwlock_*() functions.
+ *
+ * Most of these functions abort the process with an error message on any
+ * error. The "trylock" functions are exception: they pass through a 0 or
+ * EBUSY return value to the caller and abort on any other error. */
+void ovs_rwlock_init(const struct ovs_rwlock *);
+void ovs_rwlock_destroy(const struct ovs_rwlock *);
+void ovs_rwlock_unlock(const struct ovs_rwlock *rwlock) OVS_RELEASES(rwlock);
+
+void ovs_rwlock_wrlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_ACQ_WRLOCK(rwlock);
+#define ovs_rwlock_wrlock(rwlock) \
+ ovs_rwlock_wrlock_at(rwlock, SOURCE_LOCATOR);
+int ovs_rwlock_trywrlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_TRY_WRLOCK(0, rwlock);
+#define ovs_rwlock_trywrlock(rwlock) \
+ ovs_rwlock_trywrlock_at(rwlock, SOURCE_LOCATOR)
+
+void ovs_rwlock_rdlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_ACQ_RDLOCK(rwlock);
+#define ovs_rwlock_rdlock(rwlock) \
+ ovs_rwlock_rdlock_at(rwlock, SOURCE_LOCATOR);
+
+int ovs_rwlock_tryrdlock_at(const struct ovs_rwlock *rwlock, const char *where)
+ OVS_TRY_RDLOCK(0, rwlock);
+#define ovs_rwlock_tryrdlock(rwlock) \
+ ovs_rwlock_tryrdlock_at(rwlock, SOURCE_LOCATOR)
+
+/* Wrappers for xpthread_cond_*() that abort the process on any error.
+ *
+ * Use ovs_mutex_cond_wait() to wait for a condition. */
void xpthread_cond_init(pthread_cond_t *, pthread_condattr_t *);
void xpthread_cond_destroy(pthread_cond_t *);
void xpthread_cond_signal(pthread_cond_t *);
void xpthread_cond_broadcast(pthread_cond_t *);
-void xpthread_cond_wait(pthread_cond_t *, pthread_mutex_t *mutex)
- OVS_MUST_HOLD(mutex);
#ifdef __CHECKER__
/* Replace these functions by the macros already defined in the <pthread.h>
@@ -335,19 +388,22 @@ void xpthread_create(pthread_t *, pthread_attr_t *, void *(*)(void *), void *);
struct ovsthread_once {
atomic_bool done;
- pthread_mutex_t mutex;
+ struct ovs_mutex mutex;
};
#define OVSTHREAD_ONCE_INITIALIZER \
{ \
ATOMIC_VAR_INIT(false), \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER, \
+ OVS_ADAPTIVE_MUTEX_INITIALIZER, \
}
-static inline bool ovsthread_once_start(struct ovsthread_once *);
-void ovsthread_once_done(struct ovsthread_once *once) OVS_RELEASES(once);
+static inline bool ovsthread_once_start(struct ovsthread_once *once)
+ OVS_TRY_LOCK(true, &once->mutex);
+void ovsthread_once_done(struct ovsthread_once *once)
+ OVS_RELEASES(&once->mutex);
-bool ovsthread_once_start__(struct ovsthread_once *);
+bool ovsthread_once_start__(struct ovsthread_once *once)
+ OVS_TRY_LOCK(false, &once->mutex);
static inline bool
ovsthread_once_is_done__(const struct ovsthread_once *once)
@@ -374,7 +430,7 @@ ovsthread_once_start(struct ovsthread_once *once)
#ifdef __CHECKER__
#define ovsthread_once_start(ONCE) \
- ((ONCE)->done ? false : ({ OVS_ACQUIRE(ONCE); true; }))
+ ((ONCE)->done ? false : ({ OVS_MACRO_LOCK((&ONCE->mutex)); true; }))
#endif
void assert_single_threaded_at(const char *where);
diff --git a/lib/timeval.c b/lib/timeval.c
index 4ed3ebff5..05da99e56 100644
--- a/lib/timeval.c
+++ b/lib/timeval.c
@@ -41,7 +41,7 @@ VLOG_DEFINE_THIS_MODULE(timeval);
struct clock {
clockid_t id; /* CLOCK_MONOTONIC or CLOCK_REALTIME. */
- pthread_rwlock_t rwlock; /* Mutual exclusion for 'cache'. */
+ struct ovs_rwlock rwlock; /* Mutual exclusion for 'cache'. */
/* Features for use by unit tests. Protected by 'rwlock'. */
struct timespec warp; /* Offset added for unit tests. */
@@ -83,7 +83,7 @@ init_clock(struct clock *c, clockid_t id)
{
memset(c, 0, sizeof *c);
c->id = id;
- xpthread_rwlock_init(&c->rwlock, NULL);
+ ovs_rwlock_init(&c->rwlock);
xclock_gettime(c->id, &c->cache);
}
@@ -178,21 +178,21 @@ time_timespec__(struct clock *c, struct timespec *ts)
for (;;) {
/* Use the cached time by preference, but fall through if there's been
* a clock tick. */
- xpthread_rwlock_rdlock(&c->rwlock);
+ ovs_rwlock_rdlock(&c->rwlock);
if (c->stopped || !c->tick) {
timespec_add(ts, &c->cache, &c->warp);
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
return;
}
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
/* Refresh the cache. */
- xpthread_rwlock_wrlock(&c->rwlock);
+ ovs_rwlock_wrlock(&c->rwlock);
if (c->tick) {
c->tick = false;
xclock_gettime(c->id, &c->cache);
}
- xpthread_rwlock_unlock(&c->rwlock);
+ ovs_rwlock_unlock(&c->rwlock);
}
}
@@ -568,9 +568,9 @@ timeval_stop_cb(struct unixctl_conn *conn,
int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
void *aux OVS_UNUSED)
{
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
monotonic_clock.stopped = true;
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, NULL);
}
@@ -596,9 +596,9 @@ timeval_warp_cb(struct unixctl_conn *conn,
ts.tv_sec = msecs / 1000;
ts.tv_nsec = (msecs % 1000) * 1000 * 1000;
- xpthread_rwlock_wrlock(&monotonic_clock.rwlock);
+ ovs_rwlock_wrlock(&monotonic_clock.rwlock);
timespec_add(&monotonic_clock.warp, &monotonic_clock.warp, &ts);
- xpthread_rwlock_unlock(&monotonic_clock.rwlock);
+ ovs_rwlock_unlock(&monotonic_clock.rwlock);
unixctl_command_reply(conn, "warped");
}
diff --git a/lib/uuid.c b/lib/uuid.c
index 7b03fe0b6..18748ea95 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -81,19 +81,19 @@ uuid_init(void)
void
uuid_generate(struct uuid *uuid)
{
- static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+ static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
uint64_t copy[2];
uuid_init();
/* Copy out the counter's current value, then increment it. */
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
copy[0] = counter[0];
copy[1] = counter[1];
if (++counter[1] == 0) {
counter[0]++;
}
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
/* AES output is exactly 16 bytes, so we encrypt directly into 'uuid'. */
aes128_encrypt(&key, copy, uuid);
diff --git a/lib/vlog.c b/lib/vlog.c
index c2fc8e024..6f0a25692 100644
--- a/lib/vlog.c
+++ b/lib/vlog.c
@@ -82,10 +82,15 @@ struct vlog_module *vlog_modules[] = {
#define n_vlog_modules ARRAY_SIZE(vlog_modules)
#endif
+/* Protects the 'pattern' in all "struct facility"s, so that a race between
+ * changing and reading the pattern does not cause an access to freed
+ * memory. */
+static struct ovs_rwlock pattern_rwlock = OVS_RWLOCK_INITIALIZER;
+
/* Information about each facility. */
struct facility {
const char *name; /* Name. */
- char *pattern; /* Current pattern (see 'pattern_rwlock'). */
+ char *pattern OVS_GUARDED_BY(pattern_rwlock); /* Current pattern. */
bool default_pattern; /* Whether current pattern is the default. */
};
static struct facility facilities[VLF_N_FACILITIES] = {
@@ -94,11 +99,6 @@ static struct facility facilities[VLF_N_FACILITIES] = {
#undef VLOG_FACILITY
};
-/* Protects the 'pattern' in all "struct facility"s, so that a race between
- * changing and reading the pattern does not cause an access to freed
- * memory. */
-static pthread_rwlock_t pattern_rwlock = PTHREAD_RWLOCK_INITIALIZER;
-
/* Sequence number for the message currently being composed. */
DEFINE_PER_THREAD_DATA(unsigned int, msg_num, 0);
@@ -106,15 +106,15 @@ DEFINE_PER_THREAD_DATA(unsigned int, msg_num, 0);
*
* All of the following is protected by 'log_file_mutex', which nests inside
* pattern_rwlock. */
-static pthread_mutex_t log_file_mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
-static char *log_file_name;
-static int log_fd = -1;
-static struct async_append *log_writer;
+static struct ovs_mutex log_file_mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
+static char *log_file_name OVS_GUARDED_BY(log_file_mutex);
+static int log_fd OVS_GUARDED_BY(log_file_mutex) = -1;
+static struct async_append *log_writer OVS_GUARDED_BY(log_file_mutex);
static void format_log_message(const struct vlog_module *, enum vlog_level,
enum vlog_facility,
const char *message, va_list, struct ds *)
- PRINTF_FORMAT(4, 0);
+ PRINTF_FORMAT(4, 0) OVS_REQ_RDLOCK(&pattern_rwlock);
/* Searches the 'n_names' in 'names'. Returns the index of a match for
* 'target', or 'n_names' if no name matches. */
@@ -201,8 +201,8 @@ vlog_get_level(const struct vlog_module *module, enum vlog_facility facility)
return module->levels[facility];
}
-static void OVS_MUST_HOLD(log_file_mutex)
-update_min_level(struct vlog_module *module)
+static void
+update_min_level(struct vlog_module *module) OVS_REQUIRES(&log_file_mutex)
{
enum vlog_facility facility;
@@ -224,7 +224,7 @@ set_facility_level(enum vlog_facility facility, struct vlog_module *module,
assert(facility >= 0 && facility < VLF_N_FACILITIES);
assert(level < VLL_N_LEVELS);
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
if (!module) {
struct vlog_module **mp;
@@ -236,7 +236,7 @@ set_facility_level(enum vlog_facility facility, struct vlog_module *module,
module->levels[facility] = level;
update_min_level(module);
}
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
}
/* Sets the logging level for the given 'module' and 'facility' to 'level'. A
@@ -261,14 +261,14 @@ do_set_pattern(enum vlog_facility facility, const char *pattern)
{
struct facility *f = &facilities[facility];
- xpthread_rwlock_wrlock(&pattern_rwlock);
+ ovs_rwlock_wrlock(&pattern_rwlock);
if (!f->default_pattern) {
free(f->pattern);
} else {
f->default_pattern = false;
}
f->pattern = xstrdup(pattern);
- xpthread_rwlock_unlock(&pattern_rwlock);
+ ovs_rwlock_unlock(&pattern_rwlock);
}
/* Sets the pattern for the given 'facility' to 'pattern'. */
@@ -297,6 +297,7 @@ vlog_set_log_file(const char *file_name)
struct stat new_stat;
int new_log_fd;
bool same_file;
+ bool log_close;
/* Open new log file. */
new_log_file_name = (file_name
@@ -311,14 +312,14 @@ vlog_set_log_file(const char *file_name)
}
/* If the new log file is the same one we already have open, bail out. */
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
same_file = (log_fd >= 0
&& new_log_fd >= 0
&& !fstat(log_fd, &old_stat)
&& !fstat(new_log_fd, &new_stat)
&& old_stat.st_dev == new_stat.st_dev
&& old_stat.st_ino == new_stat.st_ino);
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
if (same_file) {
close(new_log_fd);
free(new_log_file_name);
@@ -326,12 +327,15 @@ vlog_set_log_file(const char *file_name)
}
/* Log closing old log file (we can't log while holding log_file_mutex). */
- if (log_fd >= 0) {
+ ovs_mutex_lock(&log_file_mutex);
+ log_close = log_fd >= 0;
+ ovs_mutex_unlock(&log_file_mutex);
+ if (log_close) {
VLOG_INFO("closing log file");
}
/* Close old log file, if any, and install new one. */
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
if (log_fd >= 0) {
free(log_file_name);
close(log_fd);
@@ -345,7 +349,7 @@ vlog_set_log_file(const char *file_name)
for (mp = vlog_modules; mp < &vlog_modules[n_vlog_modules]; mp++) {
update_min_level(*mp);
}
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
/* Log opening new log file (we can't log while holding log_file_mutex). */
VLOG_INFO("opened log file %s", new_log_file_name);
@@ -362,9 +366,9 @@ vlog_reopen_log_file(void)
{
char *fn;
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
fn = log_file_name ? xstrdup(log_file_name) : NULL;
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
if (fn) {
int error = vlog_set_log_file(fn);
@@ -504,7 +508,13 @@ static void
vlog_unixctl_reopen(struct unixctl_conn *conn, int argc OVS_UNUSED,
const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
{
- if (log_file_name) {
+ bool has_log_file;
+
+ ovs_mutex_lock(&log_file_mutex);
+ has_log_file = log_file_name != NULL;
+ ovs_mutex_unlock(&log_file_mutex);
+
+ if (has_log_file) {
int error = vlog_reopen_log_file();
if (error) {
unixctl_command_reply_error(conn, ovs_strerror(errno));
@@ -786,7 +796,11 @@ vlog_valist(const struct vlog_module *module, enum vlog_level level,
{
bool log_to_console = module->levels[VLF_CONSOLE] >= level;
bool log_to_syslog = module->levels[VLF_SYSLOG] >= level;
- bool log_to_file = module->levels[VLF_FILE] >= level && log_fd >= 0;
+ bool log_to_file;
+
+ ovs_mutex_lock(&log_file_mutex);
+ log_to_file = module->levels[VLF_FILE] >= level && log_fd >= 0;
+ ovs_mutex_unlock(&log_file_mutex);
if (log_to_console || log_to_syslog || log_to_file) {
int save_errno = errno;
struct ds s;
@@ -797,7 +811,7 @@ vlog_valist(const struct vlog_module *module, enum vlog_level level,
ds_reserve(&s, 1024);
++*msg_num_get();
- xpthread_rwlock_rdlock(&pattern_rwlock);
+ ovs_rwlock_rdlock(&pattern_rwlock);
if (log_to_console) {
format_log_message(module, level, VLF_CONSOLE, message, args, &s);
ds_put_char(&s, '\n');
@@ -820,16 +834,16 @@ vlog_valist(const struct vlog_module *module, enum vlog_level level,
format_log_message(module, level, VLF_FILE, message, args, &s);
ds_put_char(&s, '\n');
- xpthread_mutex_lock(&log_file_mutex);
+ ovs_mutex_lock(&log_file_mutex);
if (log_fd >= 0) {
async_append_write(log_writer, s.string, s.length);
if (level == VLL_EMER) {
async_append_flush(log_writer);
}
}
- xpthread_mutex_unlock(&log_file_mutex);
+ ovs_mutex_unlock(&log_file_mutex);
}
- xpthread_rwlock_unlock(&pattern_rwlock);
+ ovs_rwlock_unlock(&pattern_rwlock);
ds_destroy(&s);
errno = save_errno;
@@ -929,7 +943,7 @@ vlog_should_drop(const struct vlog_module *module, enum vlog_level level,
return true;
}
- xpthread_mutex_lock(&rl->mutex);
+ ovs_mutex_lock(&rl->mutex);
if (!token_bucket_withdraw(&rl->token_bucket, VLOG_MSG_TOKENS)) {
time_t now = time_now();
if (!rl->n_dropped) {
@@ -937,19 +951,19 @@ vlog_should_drop(const struct vlog_module *module, enum vlog_level level,
}
rl->last_dropped = now;
rl->n_dropped++;
- xpthread_mutex_unlock(&rl->mutex);
+ ovs_mutex_unlock(&rl->mutex);
return true;
}
if (!rl->n_dropped) {
- xpthread_mutex_unlock(&rl->mutex);
+ ovs_mutex_unlock(&rl->mutex);
} else {
time_t now = time_now();
unsigned int n_dropped = rl->n_dropped;
unsigned int first_dropped_elapsed = now - rl->first_dropped;
unsigned int last_dropped_elapsed = now - rl->last_dropped;
rl->n_dropped = 0;
- xpthread_mutex_unlock(&rl->mutex);
+ ovs_mutex_unlock(&rl->mutex);
vlog(module, level,
"Dropped %u log messages in last %u seconds (most recently, "
diff --git a/lib/vlog.h b/lib/vlog.h
index c111ff6e9..901b3d38a 100644
--- a/lib/vlog.h
+++ b/lib/vlog.h
@@ -104,7 +104,7 @@ struct vlog_rate_limit {
time_t first_dropped; /* Time first message was dropped. */
time_t last_dropped; /* Time of most recent message drop. */
unsigned int n_dropped; /* Number of messages dropped. */
- pthread_mutex_t mutex; /* Mutual exclusion for rate limit. */
+ struct ovs_mutex mutex; /* Mutual exclusion for rate limit. */
};
/* Number of tokens to emit a message. We add 'rate' tokens per millisecond,
@@ -119,7 +119,7 @@ struct vlog_rate_limit {
0, /* first_dropped */ \
0, /* last_dropped */ \
0, /* n_dropped */ \
- PTHREAD_ADAPTIVE_MUTEX_INITIALIZER /* mutex */ \
+ OVS_ADAPTIVE_MUTEX_INITIALIZER /* mutex */ \
}
/* Configuring how each module logs messages. */
diff --git a/vswitchd/system-stats.c b/vswitchd/system-stats.c
index b1642284c..9e3d228ad 100644
--- a/vswitchd/system-stats.c
+++ b/vswitchd/system-stats.c
@@ -506,12 +506,12 @@ get_filesys_stats(struct smap *stats OVS_UNUSED)
#define SYSTEM_STATS_INTERVAL (5 * 1000) /* In milliseconds. */
-static pthread_mutex_t mutex = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER;
+static struct ovs_mutex mutex = OVS_ADAPTIVE_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
-static struct latch latch;
+static struct latch latch OVS_GUARDED_BY(mutex);
static bool enabled;
-static bool started;
-static struct smap *system_stats;
+static bool started OVS_GUARDED_BY(mutex);
+static struct smap *system_stats OVS_GUARDED_BY(mutex);
static void *system_stats_thread_func(void *);
static void discard_stats(void);
@@ -521,7 +521,7 @@ void
system_stats_enable(bool enable)
{
if (enabled != enable) {
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
if (enable) {
if (!started) {
xpthread_create(NULL, NULL, system_stats_thread_func, NULL);
@@ -532,7 +532,7 @@ system_stats_enable(bool enable)
xpthread_cond_signal(&cond);
}
enabled = enable;
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
}
}
@@ -549,7 +549,7 @@ system_stats_run(void)
{
struct smap *stats = NULL;
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
if (system_stats) {
latch_poll(&latch);
@@ -560,7 +560,7 @@ system_stats_run(void)
discard_stats();
}
}
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
return stats;
}
@@ -576,7 +576,7 @@ system_stats_wait(void)
}
static void
-discard_stats(void)
+discard_stats(void) OVS_REQUIRES(&mutex)
{
if (system_stats) {
smap_destroy(system_stats);
@@ -594,11 +594,11 @@ system_stats_thread_func(void *arg OVS_UNUSED)
long long int next_refresh;
struct smap *stats;
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
while (!enabled) {
- xpthread_cond_wait(&cond, &mutex);
+ ovs_mutex_cond_wait(&cond, &mutex);
}
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
stats = xmalloc(sizeof *stats);
smap_init(stats);
@@ -608,11 +608,11 @@ system_stats_thread_func(void *arg OVS_UNUSED)
get_process_stats(stats);
get_filesys_stats(stats);
- xpthread_mutex_lock(&mutex);
+ ovs_mutex_lock(&mutex);
discard_stats();
system_stats = stats;
latch_set(&latch);
- xpthread_mutex_unlock(&mutex);
+ ovs_mutex_unlock(&mutex);
next_refresh = time_msec() + SYSTEM_STATS_INTERVAL;
do {