summaryrefslogtreecommitdiff
path: root/event.c
diff options
context:
space:
mode:
authorDan Rosen <mergeconflict@google.com>2019-03-26 13:33:57 -0400
committerDan Rosen <mergeconflict@google.com>2019-04-03 12:44:50 -0400
commit2f184f8bbf23377bddc8daa1a2c7b40735ee7e2a (patch)
treeba9a64a7831c8a209af752efd9b406a9a2b04a2c /event.c
parent47d348a63130c91f2a6aadef291ff5687275df72 (diff)
downloadlibevent-2f184f8bbf23377bddc8daa1a2c7b40735ee7e2a.tar.gz
evwatch: Add "prepare" and "check" watchers.
Adds two new callbacks: "prepare" watchers, which fire immediately before we poll for I/O, and "check" watchers, which fire immediately after we finish polling and before we process events. This allows other event loops to be embedded into libevent's, and enables certain performance monitoring. Closes: #710
Diffstat (limited to 'event.c')
-rw-r--r--event.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/event.c b/event.c
index bfd94ebd..34f28ba2 100644
--- a/event.c
+++ b/event.c
@@ -59,6 +59,7 @@
#include "event2/event.h"
#include "event2/event_struct.h"
#include "event2/event_compat.h"
+#include "event2/watch.h"
#include "event-internal.h"
#include "defer-internal.h"
#include "evthread-internal.h"
@@ -737,6 +738,10 @@ event_base_new_with_config(const struct event_config *cfg)
event_base_start_iocp_(base, cfg->n_cpus_hint);
#endif
+ /* initialize watcher lists */
+ for (i = 0; i < EVWATCH_MAX; ++i)
+ TAILQ_INIT(&base->watchers[i]);
+
return (base);
}
@@ -839,6 +844,7 @@ event_base_free_(struct event_base *base, int run_finalizers)
{
int i, n_deleted=0;
struct event *ev;
+ struct evwatch *watcher;
/* XXXX grab the lock? If there is contention when one thread frees
* the base, then the contending thread will be very sad soon. */
@@ -939,6 +945,15 @@ event_base_free_(struct event_base *base, int run_finalizers)
EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
EVTHREAD_FREE_COND(base->current_event_cond);
+ /* Free all event watchers */
+ for (i = 0; i < EVWATCH_MAX; ++i) {
+ while (!TAILQ_EMPTY(&base->watchers[i])) {
+ watcher = TAILQ_FIRST(&base->watchers[i]);
+ TAILQ_REMOVE(&base->watchers[i], watcher, next);
+ mm_free(watcher);
+ }
+ }
+
/* If we're freeing current_base, there won't be a current_base. */
if (base == current_base)
current_base = NULL;
@@ -1926,9 +1941,12 @@ event_base_loop(struct event_base *base, int flags)
struct timeval tv;
struct timeval *tv_p;
int res, done, retval = 0;
+ struct evwatch_prepare_cb_info prepare_info;
+ struct evwatch_check_cb_info check_info;
+ struct evwatch *watcher;
/* Grab the lock. We will release it inside evsel.dispatch, and again
- * as we invoke user callbacks. */
+ * as we invoke watchers and user callbacks. */
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (base->running_loop) {
@@ -1987,6 +2005,13 @@ event_base_loop(struct event_base *base, int flags)
event_queue_make_later_events_active(base);
+ /* Invoke prepare watchers before polling for events */
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ prepare_info.timeout = tv_p;
+ TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_PREPARE], next)
+ (*watcher->callback.prepare)(watcher, &prepare_info, watcher->arg);
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
clear_time_cache(base);
res = evsel->dispatch(base, tv_p);
@@ -2000,6 +2025,13 @@ event_base_loop(struct event_base *base, int flags)
update_time_cache(base);
+ /* Invoke check watchers after polling for events, and before
+ * processing them */
+ EVBASE_RELEASE_LOCK(base, th_base_lock);
+ TAILQ_FOREACH(watcher, &base->watchers[EVWATCH_CHECK], next)
+ (*watcher->callback.check)(watcher, &check_info, watcher->arg);
+ EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
timeout_process(base);
if (N_ACTIVE_CALLBACKS(base)) {