summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRickard Green <rickard@erlang.org>2020-09-22 17:18:11 +0200
committerRickard Green <rickard@erlang.org>2020-09-29 15:32:34 +0200
commit542bd199eb1ff8aa99e3efd684aff6e4940279c8 (patch)
treeeba2ec478f406de5cbed755c9caaf281667eac3a
parentc2045f94c12c0768173f20a15d576102bf347af4 (diff)
downloaderlang-542bd199eb1ff8aa99e3efd684aff6e4940279c8.tar.gz
Fix thread progress handling in poll threads
-rw-r--r--erts/emulator/beam/erl_alloc.types1
-rw-r--r--erts/emulator/beam/erl_lock_check.c1
-rw-r--r--erts/emulator/beam/erl_process.c162
-rw-r--r--erts/emulator/beam/erl_thr_progress.c13
-rw-r--r--erts/emulator/beam/erl_thr_progress.h3
-rw-r--r--erts/emulator/beam/erl_trace.c2
-rw-r--r--erts/emulator/sys/common/erl_check_io.c8
-rw-r--r--erts/emulator/sys/common/erl_check_io.h6
-rw-r--r--erts/emulator/sys/common/erl_poll.c23
9 files changed, 129 insertions, 90 deletions
diff --git a/erts/emulator/beam/erl_alloc.types b/erts/emulator/beam/erl_alloc.types
index 4f03a34390..458a97a6b7 100644
--- a/erts/emulator/beam/erl_alloc.types
+++ b/erts/emulator/beam/erl_alloc.types
@@ -353,6 +353,7 @@ type NIF_SEL_D_STATE FIXED_SIZE SYSTEM enif_select_data_state
type POLLSET LONG_LIVED SYSTEM pollset
type POLLSET_UPDREQ SHORT_LIVED SYSTEM pollset_update_req
type POLL_FDS LONG_LIVED SYSTEM poll_fds
+type BLOCK_PTHR_DATA LONG_LIVED SYSTEM block_poll_thread_data
type FD_STATUS LONG_LIVED SYSTEM fd_status
type SELECT_FDS LONG_LIVED SYSTEM select_fds
diff --git a/erts/emulator/beam/erl_lock_check.c b/erts/emulator/beam/erl_lock_check.c
index 1416c5f96c..730d3f0373 100644
--- a/erts/emulator/beam/erl_lock_check.c
+++ b/erts/emulator/beam/erl_lock_check.c
@@ -127,6 +127,7 @@ static erts_lc_lock_order_t erts_lock_order[] = {
{ "pollwaiter", "address" },
{ "break_waiter_lock", NULL },
#endif /* __WIN32__ */
+ { "block_poll_thread", "index" },
{ "alcu_init_atoms", NULL },
{ "mseg_init_atoms", NULL },
{ "mmap_init_atoms", NULL },
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 1f464e2e5a..4d58b3bbc3 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -181,7 +181,6 @@ sched_get_busy_wait_params(ErtsSchedulerData *esdp)
}
static ErtsAuxWorkData *aux_thread_aux_work_data;
-static ErtsAuxWorkData *poll_thread_aux_work_data;
#define ERTS_SCHDLR_SSPND_CHNG_NMSB (((erts_aint32_t) 1) << 0)
#define ERTS_SCHDLR_SSPND_CHNG_MSB (((erts_aint32_t) 1) << 1)
@@ -410,7 +409,21 @@ typedef union {
static ErtsAlignedSchedulerSleepInfo *aligned_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_cpu_sched_sleep_info;
static ErtsAlignedSchedulerSleepInfo *aligned_dirty_io_sched_sleep_info;
-static ErtsAlignedSchedulerSleepInfo *aligned_poll_thread_sleep_info;
+
+typedef struct {
+ erts_mtx_t mtx;
+ erts_cnd_t cnd;
+ int blocked;
+ int id;
+} ErtsBlockPollThreadData;
+
+typedef union {
+ ErtsBlockPollThreadData block_data;
+ char align[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsBlockPollThreadData))];
+} ErtsAlignedBlockPollThreadData;
+
+
+static ErtsAlignedBlockPollThreadData *ERTS_WRITE_UNLIKELY(block_poll_thread_data);
static Uint last_reductions;
static Uint last_exact_reductions;
@@ -479,10 +492,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(proclist,
200,
ERTS_ALC_T_PROC_LIST)
-#define ERTS_POLL_THREAD_SLEEP_INFO_IX(IX) \
- (ASSERT(0 <= ((int) (IX)) \
- && ((int) (IX)) < ((int) erts_no_poll_threads)), \
- &aligned_poll_thread_sleep_info[(IX)].ssi)
#define ERTS_SCHED_SLEEP_INFO_IX(IX) \
(ASSERT(((int)-1) <= ((int) (IX)) \
&& ((int) (IX)) < ((int) erts_no_schedulers)), \
@@ -3094,7 +3103,7 @@ aux_thread(void *unused)
callbacks.wait = thr_prgr_wait;
callbacks.finalize_wait = thr_prgr_fin_wait;
- tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 1);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 1, 0);
init_aux_work_data(awdp, NULL, NULL);
awdp->ssi = ssi;
@@ -3143,7 +3152,7 @@ aux_thread(void *unused)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(ssi->psi, ERTS_POLL_INF_TIMEOUT);
+ erts_check_io(ssi->psi, ERTS_POLL_INF_TIMEOUT, 0);
}
}
#else
@@ -3173,15 +3182,46 @@ aux_thread(void *unused)
return NULL;
}
+static void
+pt_wake(void *vbpt)
+{
+ ErtsBlockPollThreadData *bpt = (ErtsBlockPollThreadData *) vbpt;
+ erts_mtx_lock(&bpt->mtx);
+ bpt->blocked = 0;
+ erts_cnd_signal(&bpt->cnd);
+ erts_mtx_unlock(&bpt->mtx);
+}
+
+static void
+pt_wait(void *vbpt)
+{
+ ErtsBlockPollThreadData *bpt = (ErtsBlockPollThreadData *) vbpt;
+ erts_mtx_lock(&bpt->mtx);
+ while (bpt->blocked)
+ erts_cnd_wait(&bpt->cnd, &bpt->mtx);
+ erts_mtx_unlock(&bpt->mtx);
+}
+
+static void
+pt_prep_wait(void *vbpt)
+{
+ ErtsBlockPollThreadData *bpt = (ErtsBlockPollThreadData *) vbpt;
+ erts_mtx_lock(&bpt->mtx);
+ bpt->blocked = !0;
+ erts_mtx_unlock(&bpt->mtx);
+}
+
+static void
+pt_fin_wait(void *vbpt)
+{
+
+}
+
static void *
-poll_thread(void *arg)
+poll_thread(void *vbpt)
{
- int id = (int)(UWord)arg;
- ErtsAuxWorkData *awdp = poll_thread_aux_work_data+id;
- ErtsSchedulerSleepInfo *ssi = ERTS_POLL_THREAD_SLEEP_INFO_IX(id);
- erts_aint32_t aux_work;
+ ErtsBlockPollThreadData *bpt = (ErtsBlockPollThreadData *) vbpt;
ErtsThrPrgrCallbacks callbacks;
- int thr_prgr_active = 1;
struct erts_poll_thread *psi;
ErtsThrPrgrData *tpd;
ERTS_MSACC_DECLARE_CACHE();
@@ -3194,59 +3234,24 @@ poll_thread(void *arg)
#endif
erts_port_task_pre_alloc_init_thread();
- ssi->event = erts_tse_fetch();
- erts_msacc_init_thread("poll", id, 0);
+ erts_msacc_init_thread("poll", bpt->id, 0);
- callbacks.arg = (void *) ssi;
- callbacks.wakeup = thr_prgr_wakeup;
- callbacks.prepare_wait = thr_prgr_prep_wait;
- callbacks.wait = thr_prgr_wait;
- callbacks.finalize_wait = thr_prgr_fin_wait;
-
- tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
- init_aux_work_data(awdp, NULL, NULL);
- awdp->ssi = ssi;
+ callbacks.arg = vbpt;
+ callbacks.wakeup = pt_wake;
+ callbacks.prepare_wait = pt_prep_wait;
+ callbacks.wait = pt_wait;
+ callbacks.finalize_wait = pt_fin_wait;
- psi = erts_create_pollset_thread(id, tpd);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0, !0);
- ssi->psi = psi;
-
- sched_prep_spin_wait(ssi);
+ psi = erts_create_pollset_thread(bpt->id, tpd);
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_OTHER);
while (1) {
- erts_aint32_t flgs;
-
- aux_work = erts_atomic32_read_acqb(&ssi->aux_work);
- if (aux_work) {
- if (!thr_prgr_active)
- erts_thr_progress_active(tpd, thr_prgr_active = 1);
- aux_work = handle_aux_work(awdp, aux_work, 1);
- ERTS_MSACC_UPDATE_CACHE();
- if (aux_work && erts_thr_progress_update(tpd))
- erts_thr_progress_leader_update(tpd);
- }
-
- if (!aux_work) {
- if (thr_prgr_active)
- erts_thr_progress_active(tpd, thr_prgr_active = 0);
-
- flgs = sched_spin_wait(ssi, 0);
-
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- flgs = sched_set_sleeptype(ssi, ERTS_SSI_FLG_POLL_SLEEPING);
- if (flgs & ERTS_SSI_FLG_SLEEPING) {
- ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
- ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(psi, ERTS_POLL_INF_TIMEOUT);
- }
- }
- }
-
- flgs = sched_prep_spin_wait(ssi);
+ erts_check_io_interrupt(psi, 0);
+ erts_check_io(psi, ERTS_POLL_INF_TIMEOUT, !0);
}
return NULL;
}
@@ -3431,7 +3436,7 @@ scheduler_wait(int *fcalls, ErtsSchedulerData *esdp, ErtsRunQueue *rq)
if (flgs & ERTS_SSI_FLG_SLEEPING) {
ASSERT(flgs & ERTS_SSI_FLG_POLL_SLEEPING);
ASSERT(flgs & ERTS_SSI_FLG_WAITING);
- erts_check_io(ssi->psi, timeout_time);
+ erts_check_io(ssi->psi, timeout_time, 0);
current_time = erts_get_monotonic_time(esdp);
}
}
@@ -6021,18 +6026,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
erts_atomic32_init_nob(&ssi->aux_work, 0);
}
- aligned_poll_thread_sleep_info =
- erts_alloc_permanent_cache_aligned(
- ERTS_ALC_T_SCHDLR_SLP_INFO,
- no_poll_threads*sizeof(ErtsAlignedSchedulerSleepInfo));
- for (ix = 0; ix < no_poll_threads; ix++) {
- ErtsSchedulerSleepInfo *ssi = &aligned_poll_thread_sleep_info[ix].ssi;
- ssi->esdp = NULL;
- erts_atomic32_init_nob(&ssi->flags, 0);
- ssi->event = NULL; /* initialized in poll_thread */
- erts_atomic32_init_nob(&ssi->aux_work, 0);
- }
-
/* Create and initialize scheduler specific data */
daww_sz = ERTS_ALC_CACHE_LINE_ALIGN_SIZE((sizeof(ErtsDelayedAuxWorkWakeupJob)
@@ -6093,10 +6086,6 @@ erts_init_scheduling(int no_schedulers, int no_schedulers_online, int no_poll_th
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
sizeof(ErtsAuxWorkData));
- poll_thread_aux_work_data =
- erts_alloc_permanent_cache_aligned(ERTS_ALC_T_SCHDLR_DATA,
- no_poll_threads * sizeof(ErtsAuxWorkData));
-
init_no_runqs(no_schedulers_online, no_schedulers_online);
balance_info.last_active_runqs = no_schedulers;
erts_mtx_init(&balance_info.update_mtx, "migration_info_update", NIL,
@@ -8427,7 +8416,7 @@ sched_thread_func(void *vesdp)
erts_msacc_init_thread("scheduler", no, 1);
- erts_thr_progress_register_managed_thread(esdp, &callbacks, 0);
+ erts_thr_progress_register_managed_thread(esdp, &callbacks, 0, 0);
#if ERTS_POLL_USE_SCHEDULER_POLLING
esdp->ssi->psi = erts_create_pollset_thread(-1, NULL);
@@ -8650,10 +8639,25 @@ erts_start_schedulers(void)
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create aux thread, error = %d\n", res);
+ block_poll_thread_data = (ErtsAlignedBlockPollThreadData *)
+ erts_alloc_permanent_cache_aligned(ERTS_ALC_T_BLOCK_PTHR_DATA,
+ sizeof(ErtsAlignedBlockPollThreadData)
+ * erts_no_poll_threads);
+
+
for (ix = 0; ix < erts_no_poll_threads; ix++) {
+ ErtsBlockPollThreadData *bpt = &block_poll_thread_data[ix].block_data;
+ erts_mtx_init(&bpt->mtx, "block_poll_thread",
+ make_small(ix),
+ (ERTS_LOCK_FLAGS_PROPERTY_STATIC
+ | ERTS_LOCK_FLAGS_CATEGORY_IO));
+ erts_cnd_init(&bpt->cnd);
+ bpt->blocked = 0;
+ bpt->id = ix;
+
erts_snprintf(opts.name, 16, "%d_poller", ix);
- res = ethr_thr_create(&tid, poll_thread, (void*)(UWord)ix, &opts);
+ res = ethr_thr_create(&tid, poll_thread, (void*) bpt, &opts);
if (res != 0)
erts_exit(ERTS_ERROR_EXIT, "Failed to create poll thread\n");
}
@@ -9552,7 +9556,7 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
ERTS_MSACC_SET_STATE_CACHED_M(ERTS_MSACC_STATE_CHECK_IO);
LTTNG2(scheduler_poll, esdp->no, 1);
- erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT);
+ erts_check_io(esdp->ssi->psi, ERTS_POLL_NO_TIMEOUT, 0);
ERTS_MSACC_POP_STATE_M();
current_time = erts_get_monotonic_time(esdp);
diff --git a/erts/emulator/beam/erl_thr_progress.c b/erts/emulator/beam/erl_thr_progress.c
index bac437efe9..a3851e43f4 100644
--- a/erts/emulator/beam/erl_thr_progress.c
+++ b/erts/emulator/beam/erl_thr_progress.c
@@ -558,7 +558,8 @@ erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *callbacks)
ErtsThrPrgrData *
erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
ErtsThrPrgrCallbacks *callbacks,
- int pref_wakeup)
+ int pref_wakeup,
+ int deep_sleeper)
{
ErtsThrPrgrData *tpd = perhaps_thr_prgr_data(NULL);
int is_blocking = 0, managed;
@@ -593,6 +594,7 @@ erts_thr_progress_register_managed_thread(ErtsSchedulerData *esdp,
tpd->is_managed = 1;
tpd->is_blocking = is_blocking;
tpd->is_temporary = 0;
+ tpd->is_deep_sleeper = deep_sleeper;
#ifdef ERTS_ENABLE_LOCK_CHECK
tpd->is_delaying = 1;
#endif
@@ -888,7 +890,10 @@ erts_thr_progress_prepare_wait(ErtsThrPrgrData *tpd)
== ERTS_THR_PRGR_LFLG_NO_LEADER
&& got_sched_wakeups()) {
/* Someone need to make progress */
- wakeup_managed(tpd->id);
+ if (tpd->is_deep_sleeper)
+ wakeup_managed(1);
+ else
+ wakeup_managed(tpd->id);
}
}
@@ -1072,11 +1077,13 @@ request_wakeup_managed(ErtsThrPrgrData *tpd, ErtsThrPrgrVal value)
/*
* Only managed threads that aren't in waiting state
- * are allowed to call this function.
+ * and aren't deep sleepers are allowed to call this
+ * function.
*/
ASSERT(tpd->is_managed);
ASSERT(tpd->confirmed != ERTS_THR_PRGR_VAL_WAITING);
+ ASSERT(!tpd->is_deep_sleeper);
if (has_reached_wakeup(value)) {
wakeup_managed(tpd->id);
diff --git a/erts/emulator/beam/erl_thr_progress.h b/erts/emulator/beam/erl_thr_progress.h
index 00a9e61407..3272926365 100644
--- a/erts/emulator/beam/erl_thr_progress.h
+++ b/erts/emulator/beam/erl_thr_progress.h
@@ -68,6 +68,7 @@ typedef struct {
int leader; /* Needs to be first in the managed threads part */
int active;
+ int is_deep_sleeper;
ErtsThrPrgrVal confirmed;
ErtsThrPrgrLeaderState leader_state;
} ErtsThrPrgrData;
@@ -124,7 +125,7 @@ extern ErtsThrPrgr erts_thr_prgr__;
void erts_thr_progress_pre_init(void);
void erts_thr_progress_init(int no_schedulers, int managed, int unmanaged);
ErtsThrPrgrData *erts_thr_progress_register_managed_thread(
- ErtsSchedulerData *esdp, ErtsThrPrgrCallbacks *, int);
+ ErtsSchedulerData *esdp, ErtsThrPrgrCallbacks *, int, int);
void erts_thr_progress_register_unmanaged_thread(ErtsThrPrgrCallbacks *);
void erts_thr_progress_active(ErtsThrPrgrData *, int on);
void erts_thr_progress_wakeup(ErtsThrPrgrData *,
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 139c231e7d..bb75498076 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2206,7 +2206,7 @@ sys_msg_dispatcher_func(void *unused)
callbacks.wait = sys_msg_dispatcher_wait;
callbacks.finalize_wait = sys_msg_dispatcher_fin_wait;
- tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0);
+ tpd = erts_thr_progress_register_managed_thread(NULL, &callbacks, 0, 0);
while (1) {
int end_wait = 0;
diff --git a/erts/emulator/sys/common/erl_check_io.c b/erts/emulator/sys/common/erl_check_io.c
index c77a535105..c90a00c788 100644
--- a/erts/emulator/sys/common/erl_check_io.c
+++ b/erts/emulator/sys/common/erl_check_io.c
@@ -1603,7 +1603,7 @@ erts_create_pollset_thread(int id, ErtsThrPrgrData *tpd) {
}
void
-erts_check_io(ErtsPollThread *psi, ErtsMonotonicTime timeout_time)
+erts_check_io(ErtsPollThread *psi, ErtsMonotonicTime timeout_time, int poll_only_thread)
{
int pollres_len;
int poll_ret, i;
@@ -1617,6 +1617,9 @@ erts_check_io(ErtsPollThread *psi, ErtsMonotonicTime timeout_time)
pollres_len = psi->pollres_len;
+ if (poll_only_thread)
+ erts_thr_progress_active(psi->tpd, 0);
+
#if ERTS_POLL_USE_FALLBACK
if (psi->ps == get_fallback_pollset()) {
@@ -1628,6 +1631,9 @@ erts_check_io(ErtsPollThread *psi, ErtsMonotonicTime timeout_time)
poll_ret = erts_poll_wait(psi->ps, psi->pollres, &pollres_len, psi->tpd, timeout_time);
}
+ if (poll_only_thread)
+ erts_thr_progress_active(psi->tpd, 1);
+
#ifdef ERTS_ENABLE_LOCK_CHECK
erts_lc_check_exact(NULL, 0); /* No locks should be locked */
#endif
diff --git a/erts/emulator/sys/common/erl_check_io.h b/erts/emulator/sys/common/erl_check_io.h
index 31182be5ec..a422c78bd3 100644
--- a/erts/emulator/sys/common/erl_check_io.h
+++ b/erts/emulator/sys/common/erl_check_io.h
@@ -67,8 +67,12 @@ int erts_check_io_max_files(void);
* not return unless erts_check_io_interrupt(pt, 1) is called by another thread.
*
* @param pt the poll thread structure to use.
+ * @param timeout_time timeout
+ * @param poll_only_thread non zero when poll is the only thing the
+ * calling thread does
*/
-void erts_check_io(struct erts_poll_thread *pt, ErtsMonotonicTime timeout_time);
+void erts_check_io(struct erts_poll_thread *pt, ErtsMonotonicTime timeout_time,
+ int poll_only_thread);
/**
* Initialize the check io framework. This function will parse the arguments
* and delete any entries that it is interested in.
diff --git a/erts/emulator/sys/common/erl_poll.c b/erts/emulator/sys/common/erl_poll.c
index c71d23f58c..af014998c5 100644
--- a/erts/emulator/sys/common/erl_poll.c
+++ b/erts/emulator/sys/common/erl_poll.c
@@ -374,6 +374,7 @@ uint32_t epoll_events(int kp_fd, int fd);
#define ERTS_POLL_NOT_WOKEN 0
#define ERTS_POLL_WOKEN -1
#define ERTS_POLL_WOKEN_INTR 1
+#define ERTS_POLL_WSTATE_UNUSED ~0
static ERTS_INLINE void
reset_wakeup_state(ErtsPollSet *ps)
@@ -384,12 +385,16 @@ reset_wakeup_state(ErtsPollSet *ps)
static ERTS_INLINE int
is_woken(ErtsPollSet *ps)
{
+ if (!ERTS_POLL_USE_WAKEUP(ps))
+ return 0;
return erts_atomic32_read_acqb(&ps->wakeup_state) != ERTS_POLL_NOT_WOKEN;
}
static ERTS_INLINE int
is_interrupted_reset(ErtsPollSet *ps)
{
+ if (!ERTS_POLL_USE_WAKEUP(ps))
+ return 0;
return (erts_atomic32_xchg_acqb(&ps->wakeup_state, ERTS_POLL_NOT_WOKEN)
== ERTS_POLL_WOKEN_INTR);
}
@@ -397,7 +402,10 @@ is_interrupted_reset(ErtsPollSet *ps)
static ERTS_INLINE void
woke_up(ErtsPollSet *ps)
{
- erts_aint32_t wakeup_state = erts_atomic32_read_acqb(&ps->wakeup_state);
+ erts_aint32_t wakeup_state;
+ if (!ERTS_POLL_USE_WAKEUP(ps))
+ return;
+ wakeup_state = erts_atomic32_read_acqb(&ps->wakeup_state);
if (wakeup_state == ERTS_POLL_NOT_WOKEN)
(void) erts_atomic32_cmpxchg_nob(&ps->wakeup_state,
ERTS_POLL_WOKEN,
@@ -450,6 +458,7 @@ cleanup_wakeup_pipe(ErtsPollSet *ps)
int intr = 0;
int fd = ps->wake_fds[0];
int res;
+ ASSERT(ERTS_POLL_USE_WAKEUP(ps));
do {
char buf[32];
res = read(fd, buf, sizeof(buf));
@@ -475,6 +484,13 @@ create_wakeup_pipe(ErtsPollSet *ps)
int wake_fds[2];
ps->wake_fds[0] = -1;
ps->wake_fds[1] = -1;
+ if (!ERTS_POLL_USE_WAKEUP(ps)) {
+ erts_atomic32_init_nob(&ps->wakeup_state,
+ (erts_aint32_t) ERTS_POLL_WSTATE_UNUSED);
+ return;
+ }
+ erts_atomic32_init_nob(&ps->wakeup_state,
+ (erts_aint32_t) ERTS_POLL_NOT_WOKEN);
if (pipe(wake_fds) < 0) {
fatal_error("%s:%d:create_wakeup_pipe(): "
"Failed to create pipe: %s (%d)\n",
@@ -483,6 +499,7 @@ create_wakeup_pipe(ErtsPollSet *ps)
erl_errno_id(errno),
errno);
}
+
SET_NONBLOCKING(wake_fds[0]);
SET_NONBLOCKING(wake_fds[1]);
@@ -1938,8 +1955,7 @@ ERTS_POLL_EXPORT(erts_poll_wait)(ErtsPollSet *ps,
ERTS_MSACC_SET_STATE_CACHED(ERTS_MSACC_STATE_CHECK_IO);
}
- if (ERTS_POLL_USE_WAKEUP(ps))
- woke_up(ps);
+ woke_up(ps);
if (res < 0) {
#if ERTS_POLL_USE_SELECT
@@ -2117,7 +2133,6 @@ ERTS_POLL_EXPORT(erts_poll_create_pollset)(int id)
ps->oneshot = 1;
#endif
- erts_atomic32_init_nob(&ps->wakeup_state, (erts_aint32_t) 0);
create_wakeup_pipe(ps);
#if ERTS_POLL_USE_TIMERFD