summaryrefslogtreecommitdiff
path: root/Utilities/cmlibuv/src/unix
diff options
context:
space:
mode:
Diffstat (limited to 'Utilities/cmlibuv/src/unix')
-rw-r--r--Utilities/cmlibuv/src/unix/aix-common.c90
-rw-r--r--Utilities/cmlibuv/src/unix/aix.c1304
-rw-r--r--Utilities/cmlibuv/src/unix/async.c253
-rw-r--r--Utilities/cmlibuv/src/unix/atomic-ops.h70
-rw-r--r--Utilities/cmlibuv/src/unix/bsd-ifaddrs.c163
-rw-r--r--Utilities/cmlibuv/src/unix/bsd-proctitle.c99
-rw-r--r--Utilities/cmlibuv/src/unix/cmake-bootstrap.c148
-rw-r--r--Utilities/cmlibuv/src/unix/core.c1682
-rw-r--r--Utilities/cmlibuv/src/unix/cygwin.c53
-rw-r--r--Utilities/cmlibuv/src/unix/darwin-proctitle.c192
-rw-r--r--Utilities/cmlibuv/src/unix/darwin-stub.h113
-rw-r--r--Utilities/cmlibuv/src/unix/darwin.c379
-rw-r--r--Utilities/cmlibuv/src/unix/dl.c80
-rw-r--r--Utilities/cmlibuv/src/unix/epoll.c422
-rw-r--r--Utilities/cmlibuv/src/unix/freebsd.c304
-rw-r--r--Utilities/cmlibuv/src/unix/fs.c2270
-rw-r--r--Utilities/cmlibuv/src/unix/fsevents.c916
-rw-r--r--Utilities/cmlibuv/src/unix/getaddrinfo.c252
-rw-r--r--Utilities/cmlibuv/src/unix/getnameinfo.c121
-rw-r--r--Utilities/cmlibuv/src/unix/haiku.c167
-rw-r--r--Utilities/cmlibuv/src/unix/hpux.c30
-rw-r--r--Utilities/cmlibuv/src/unix/hurd.c167
-rw-r--r--Utilities/cmlibuv/src/unix/ibmi.c538
-rw-r--r--Utilities/cmlibuv/src/unix/internal.h379
-rw-r--r--Utilities/cmlibuv/src/unix/kqueue.c605
-rw-r--r--Utilities/cmlibuv/src/unix/linux-core.c834
-rw-r--r--Utilities/cmlibuv/src/unix/linux-inotify.c327
-rw-r--r--Utilities/cmlibuv/src/unix/linux-syscalls.c264
-rw-r--r--Utilities/cmlibuv/src/unix/linux-syscalls.h78
-rw-r--r--Utilities/cmlibuv/src/unix/loop-watcher.c68
-rw-r--r--Utilities/cmlibuv/src/unix/loop.c228
-rw-r--r--Utilities/cmlibuv/src/unix/netbsd.c259
-rw-r--r--Utilities/cmlibuv/src/unix/no-fsevents.c42
-rw-r--r--Utilities/cmlibuv/src/unix/no-proctitle.c45
-rw-r--r--Utilities/cmlibuv/src/unix/openbsd.c240
-rw-r--r--Utilities/cmlibuv/src/unix/os390-proctitle.c136
-rw-r--r--Utilities/cmlibuv/src/unix/os390-syscalls.c536
-rw-r--r--Utilities/cmlibuv/src/unix/os390-syscalls.h75
-rw-r--r--Utilities/cmlibuv/src/unix/os390.c1052
-rw-r--r--Utilities/cmlibuv/src/unix/pipe.c435
-rw-r--r--Utilities/cmlibuv/src/unix/poll.c160
-rw-r--r--Utilities/cmlibuv/src/unix/posix-hrtime.c74
-rw-r--r--Utilities/cmlibuv/src/unix/posix-poll.c374
-rw-r--r--Utilities/cmlibuv/src/unix/process.c1140
-rw-r--r--Utilities/cmlibuv/src/unix/procfs-exepath.c45
-rw-r--r--Utilities/cmlibuv/src/unix/proctitle.c157
-rw-r--r--Utilities/cmlibuv/src/unix/pthread-fixes.c58
-rw-r--r--Utilities/cmlibuv/src/unix/qnx.c137
-rw-r--r--Utilities/cmlibuv/src/unix/random-devurandom.c93
-rw-r--r--Utilities/cmlibuv/src/unix/random-getentropy.c57
-rw-r--r--Utilities/cmlibuv/src/unix/random-getrandom.c88
-rw-r--r--Utilities/cmlibuv/src/unix/random-sysctl-linux.c99
-rw-r--r--Utilities/cmlibuv/src/unix/signal.c558
-rw-r--r--Utilities/cmlibuv/src/unix/spinlock.h53
-rw-r--r--Utilities/cmlibuv/src/unix/stream.c1629
-rw-r--r--Utilities/cmlibuv/src/unix/sunos.c904
-rw-r--r--Utilities/cmlibuv/src/unix/sysinfo-loadavg.c36
-rw-r--r--Utilities/cmlibuv/src/unix/sysinfo-memory.c42
-rw-r--r--Utilities/cmlibuv/src/unix/tcp.c519
-rw-r--r--Utilities/cmlibuv/src/unix/thread.c864
-rw-r--r--Utilities/cmlibuv/src/unix/tty.c469
-rw-r--r--Utilities/cmlibuv/src/unix/udp.c1416
62 files changed, 24388 insertions, 0 deletions
diff --git a/Utilities/cmlibuv/src/unix/aix-common.c b/Utilities/cmlibuv/src/unix/aix-common.c
new file mode 100644
index 0000000000..5bd2a688e9
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/aix-common.c
@@ -0,0 +1,90 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <sys/types.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+
+#include <procinfo.h>
+
+#include <ctype.h>
+
+extern char* original_exepath;
+extern uv_mutex_t process_title_mutex;
+extern uv_once_t process_title_mutex_once;
+extern void init_process_title_mutex_once(void);
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ uint64_t G = 1000000000;
+ timebasestruct_t t;
+ read_wall_time(&t, TIMEBASE_SZ);
+ time_base_to_time(&t, TIMEBASE_SZ);
+ return (uint64_t) t.tb_high * G + t.tb_low;
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ * There is no direct way of getting the exe path in AIX - either through /procfs
+ * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
+ * and use it in conjunction with PATH environment variable to craft one.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+ int res;
+ char args[UV__PATH_MAX];
+ size_t cached_len;
+ struct procsinfo pi;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+ if (original_exepath != NULL) {
+ cached_len = strlen(original_exepath);
+ *size -= 1;
+ if (*size > cached_len)
+ *size = cached_len;
+ memcpy(buffer, original_exepath, *size);
+ buffer[*size] = '\0';
+ uv_mutex_unlock(&process_title_mutex);
+ return 0;
+ }
+ uv_mutex_unlock(&process_title_mutex);
+ pi.pi_pid = getpid();
+ res = getargs(&pi, sizeof(pi), args, sizeof(args));
+
+ if (res < 0)
+ return UV_EINVAL;
+
+ return uv__search_path(args, buffer, size);
+}
+
diff --git a/Utilities/cmlibuv/src/unix/aix.c b/Utilities/cmlibuv/src/unix/aix.c
new file mode 100644
index 0000000000..6a013d43e3
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/aix.c
@@ -0,0 +1,1304 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <utmp.h>
+#include <libgen.h>
+
+#include <sys/protosw.h>
+#include <libperfstat.h>
+#include <procinfo.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+
+#include <sys/poll.h>
+
+#include <sys/pollset.h>
+#include <ctype.h>
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+#include <sys/ahafs_evProds.h>
+#endif
+
+#include <sys/mntctl.h>
+#include <sys/vmount.h>
+#include <limits.h>
+#include <strings.h>
+#include <sys/vnode.h>
+
+#define RDWR_BUF_SIZE 4096
+#define EQ(a,b) (strcmp(a,b) == 0)
+
+char* original_exepath = NULL;
+uv_mutex_t process_title_mutex;
+uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static void* args_mem = NULL;
+static char** process_argv = NULL;
+static int process_argc = 0;
+static char* process_title_ptr = NULL;
+
+void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ loop->fs_fd = -1;
+
+ /* Passing maxfd of -1 should mean the limit is determined
+ * by the user's ulimit or the global limit as per the doc */
+ loop->backend_fd = pollset_create(-1);
+
+ if (loop->backend_fd == -1)
+ return -1;
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->fs_fd != -1) {
+ uv__close(loop->fs_fd);
+ loop->fs_fd = -1;
+ }
+
+ if (loop->backend_fd != -1) {
+ pollset_destroy(loop->backend_fd);
+ loop->backend_fd = -1;
+ }
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+ uv__platform_loop_delete(loop);
+
+ return uv__platform_loop_init(loop);
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct poll_ctl pc;
+
+ pc.events = POLLIN;
+ pc.cmd = PS_MOD; /* Equivalent to PS_ADD if the fd is not in the pollset. */
+ pc.fd = fd;
+
+ if (pollset_ctl(loop->backend_fd, &pc, 1))
+ return UV__ERR(errno);
+
+ pc.cmd = PS_DELETE;
+ if (pollset_ctl(loop->backend_fd, &pc, 1))
+ abort();
+
+ return 0;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ struct pollfd events[1024];
+ struct pollfd pqry;
+ struct pollfd* pe;
+ struct poll_ctl pc;
+ QUEUE* q;
+ uv__io_t* w;
+ uint64_t base;
+ uint64_t diff;
+ int have_signals;
+ int nevents;
+ int count;
+ int nfds;
+ int i;
+ int rc;
+ int add_failed;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ pc.events = w->pevents;
+ pc.fd = w->fd;
+
+ add_failed = 0;
+ if (w->events == 0) {
+ pc.cmd = PS_ADD;
+ if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+ if (errno != EINVAL) {
+ assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
+ abort();
+ }
+ /* Check if the fd is already in the pollset */
+ pqry.fd = pc.fd;
+ rc = pollset_query(loop->backend_fd, &pqry);
+ switch (rc) {
+ case -1:
+ assert(0 && "Failed to query pollset for file descriptor");
+ abort();
+ case 0:
+ assert(0 && "Pollset does not contain file descriptor");
+ abort();
+ }
+ /* If we got here then the pollset already contained the file descriptor even though
+ * we didn't think it should. This probably shouldn't happen, but we can continue. */
+ add_failed = 1;
+ }
+ }
+ if (w->events != 0 || add_failed) {
+ /* Modify, potentially removing events -- need to delete then add.
+ * Could maybe mod if we knew for sure no events are removed, but
+ * content of w->events is handled above as not reliable (falls back)
+ * so may require a pollset_query() which would have to be pretty cheap
+ * compared to a PS_DELETE to be worth optimizing. Alternatively, could
+ * lazily remove events, squelching them in the mean time. */
+ pc.cmd = PS_DELETE;
+ if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+ assert(0 && "Failed to delete file descriptor (pc.fd) from pollset");
+ abort();
+ }
+ pc.cmd = PS_ADD;
+ if (pollset_ctl(loop->backend_fd, &pc, 1)) {
+ assert(0 && "Failed to add file descriptor (pc.fd) to pollset");
+ abort();
+ }
+ }
+
+ w->events = w->pevents;
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ nfds = pollset_poll(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ if (timeout == -1)
+ continue;
+ if (timeout > 0)
+ goto update_timeout;
+ }
+
+ assert(timeout != -1);
+ return;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR) {
+ abort();
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ pc.cmd = PS_DELETE;
+ pc.fd = pe->fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (pc.fd == -1)
+ continue;
+
+ assert(pc.fd >= 0);
+ assert((unsigned) pc.fd < loop->nwatchers);
+
+ w = loop->watchers[pc.fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ pollset_ctl(loop->backend_fd, &pc, 1);
+ continue;
+ }
+
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->revents);
+ }
+
+ nevents++;
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ diff = loop->time - base;
+ if (diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= diff;
+ }
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ perfstat_memory_total_t mem_total;
+ int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
+ if (result == -1) {
+ return 0;
+ }
+ return mem_total.real_free * 4096;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ perfstat_memory_total_t mem_total;
+ int result = perfstat_memory_total(NULL, &mem_total, sizeof(mem_total), 1);
+ if (result == -1) {
+ return 0;
+ }
+ return mem_total.real_total * 4096;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ perfstat_cpu_total_t ps_total;
+ int result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
+ if (result == -1) {
+ avg[0] = 0.; avg[1] = 0.; avg[2] = 0.;
+ return;
+ }
+ avg[0] = ps_total.loadavg[0] / (double)(1 << SBITS);
+ avg[1] = ps_total.loadavg[1] / (double)(1 << SBITS);
+ avg[2] = ps_total.loadavg[2] / (double)(1 << SBITS);
+}
+
+
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+static char* uv__rawname(const char* cp, char (*dst)[FILENAME_MAX+1]) {
+ char* dp;
+
+ dp = rindex(cp, '/');
+ if (dp == 0)
+ return 0;
+
+ snprintf(*dst, sizeof(*dst), "%.*s/r%s", (int) (dp - cp), cp, dp + 1);
+ return *dst;
+}
+
+
+/*
+ * Determine whether given pathname is a directory
+ * Returns 0 if the path is a directory, -1 if not
+ *
+ * Note: Opportunity here for more detailed error information but
+ * that requires changing callers of this function as well
+ */
+static int uv__path_is_a_directory(char* filename) {
+ struct stat statbuf;
+
+ if (stat(filename, &statbuf) < 0)
+ return -1; /* failed: not a directory, assume it is a file */
+
+ if (statbuf.st_type == VDIR)
+ return 0;
+
+ return -1;
+}
+
+
+/*
+ * Check whether AHAFS is mounted.
+ * Returns 0 if AHAFS is mounted, or an error code < 0 on failure
+ */
+static int uv__is_ahafs_mounted(void){
+ char rawbuf[FILENAME_MAX+1];
+ int rv, i = 2;
+ struct vmount *p;
+ int size_multiplier = 10;
+ size_t siz = sizeof(struct vmount)*size_multiplier;
+ struct vmount *vmt;
+ const char *dev = "/aha";
+ char *obj, *stub;
+
+ p = uv__malloc(siz);
+ if (p == NULL)
+ return UV__ERR(errno);
+
+ /* Retrieve all mounted filesystems */
+ rv = mntctl(MCTL_QUERY, siz, (char*)p);
+ if (rv < 0)
+ return UV__ERR(errno);
+ if (rv == 0) {
+ /* buffer was not large enough, reallocate to correct size */
+ siz = *(int*)p;
+ uv__free(p);
+ p = uv__malloc(siz);
+ if (p == NULL)
+ return UV__ERR(errno);
+ rv = mntctl(MCTL_QUERY, siz, (char*)p);
+ if (rv < 0)
+ return UV__ERR(errno);
+ }
+
+ /* Look for dev in filesystems mount info */
+ for(vmt = p, i = 0; i < rv; i++) {
+ obj = vmt2dataptr(vmt, VMT_OBJECT); /* device */
+ stub = vmt2dataptr(vmt, VMT_STUB); /* mount point */
+
+ if (EQ(obj, dev) || EQ(uv__rawname(obj, &rawbuf), dev) || EQ(stub, dev)) {
+ uv__free(p); /* Found a match */
+ return 0;
+ }
+ vmt = (struct vmount *) ((char *) vmt + vmt->vmt_length);
+ }
+
+ /* /aha is required for monitoring filesystem changes */
+ return -1;
+}
+
+/*
+ * Recursive call to mkdir() to create intermediate folders, if any
+ * Returns code from mkdir call
+ */
+static int uv__makedir_p(const char *dir) {
+ char tmp[256];
+ char *p = NULL;
+ size_t len;
+ int err;
+
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(tmp, dir, sizeof(tmp));
+ len = strlen(tmp);
+ if (tmp[len - 1] == '/')
+ tmp[len - 1] = 0;
+ for (p = tmp + 1; *p; p++) {
+ if (*p == '/') {
+ *p = 0;
+ err = mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+ if (err != 0 && errno != EEXIST)
+ return err;
+ *p = '/';
+ }
+ }
+ return mkdir(tmp, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
+}
+
+/*
+ * Creates necessary subdirectories in the AIX Event Infrastructure
+ * file system for monitoring the object specified.
+ * Returns code from mkdir call
+ */
+static int uv__make_subdirs_p(const char *filename) {
+ char cmd[2048];
+ char *p;
+ int rc = 0;
+
+ /* Strip off the monitor file name */
+ p = strrchr(filename, '/');
+
+ if (p == NULL)
+ return 0;
+
+ if (uv__path_is_a_directory((char*)filename) == 0) {
+ sprintf(cmd, "/aha/fs/modDir.monFactory");
+ } else {
+ sprintf(cmd, "/aha/fs/modFile.monFactory");
+ }
+
+ strncat(cmd, filename, (p - filename));
+ rc = uv__makedir_p(cmd);
+
+ if (rc == -1 && errno != EEXIST){
+ return UV__ERR(errno);
+ }
+
+ return rc;
+}
+
+
+/*
+ * Checks if /aha is mounted, then proceeds to set up the monitoring
+ * objects for the specified file.
+ * Returns 0 on success, or an error code < 0 on failure
+ */
+static int uv__setup_ahafs(const char* filename, int *fd) {
+ int rc = 0;
+ char mon_file_write_string[RDWR_BUF_SIZE];
+ char mon_file[PATH_MAX];
+ int file_is_directory = 0; /* -1 == NO, 0 == YES */
+
+ /* Create monitor file name for object */
+ file_is_directory = uv__path_is_a_directory((char*)filename);
+
+ if (file_is_directory == 0)
+ sprintf(mon_file, "/aha/fs/modDir.monFactory");
+ else
+ sprintf(mon_file, "/aha/fs/modFile.monFactory");
+
+ if ((strlen(mon_file) + strlen(filename) + 5) > PATH_MAX)
+ return UV_ENAMETOOLONG;
+
+ /* Make the necessary subdirectories for the monitor file */
+ rc = uv__make_subdirs_p(filename);
+ if (rc == -1 && errno != EEXIST)
+ return rc;
+
+ strcat(mon_file, filename);
+ strcat(mon_file, ".mon");
+
+ *fd = 0; errno = 0;
+
+ /* Open the monitor file, creating it if necessary */
+ *fd = open(mon_file, O_CREAT|O_RDWR);
+ if (*fd < 0)
+ return UV__ERR(errno);
+
+ /* Write out the monitoring specifications.
+ * In this case, we are monitoring for a state change event type
+ * CHANGED=YES
+ * We will be waiting in select call, rather than a read:
+ * WAIT_TYPE=WAIT_IN_SELECT
+ * We only want minimal information for files:
+ * INFO_LVL=1
+ * For directories, we want more information to track what file
+ * caused the change
+ * INFO_LVL=2
+ */
+
+ if (file_is_directory == 0)
+ sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=2");
+ else
+ sprintf(mon_file_write_string, "CHANGED=YES;WAIT_TYPE=WAIT_IN_SELECT;INFO_LVL=1");
+
+ rc = write(*fd, mon_file_write_string, strlen(mon_file_write_string)+1);
+ if (rc < 0 && errno != EBUSY)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+/*
+ * Skips a specified number of lines in the buffer passed in.
+ * Walks the buffer pointed to by p and attempts to skip n lines.
+ * Returns the total number of lines skipped
+ */
+static int uv__skip_lines(char **p, int n) {
+ int lines = 0;
+
+ while(n > 0) {
+ *p = strchr(*p, '\n');
+ if (!p)
+ return lines;
+
+ (*p)++;
+ n--;
+ lines++;
+ }
+ return lines;
+}
+
+
+/*
+ * Parse the event occurrence data to figure out what event just occurred
+ * and take proper action.
+ *
+ * The buf is a pointer to the buffer containing the event occurrence data
+ * Returns 0 on success, -1 if unrecoverable error in parsing
+ *
+ */
+static int uv__parse_data(char *buf, int *events, uv_fs_event_t* handle) {
+ int evp_rc, i;
+ char *p;
+ char filename[PATH_MAX]; /* To be used when handling directories */
+
+ p = buf;
+ *events = 0;
+
+ /* Clean the filename buffer*/
+ for(i = 0; i < PATH_MAX; i++) {
+ filename[i] = 0;
+ }
+ i = 0;
+
+ /* Check for BUF_WRAP */
+ if (strncmp(buf, "BUF_WRAP", strlen("BUF_WRAP")) == 0) {
+ assert(0 && "Buffer wrap detected, Some event occurrences lost!");
+ return 0;
+ }
+
+ /* Since we are using the default buffer size (4K), and have specified
+ * INFO_LVL=1, we won't see any EVENT_OVERFLOW conditions. Applications
+ * should check for this keyword if they are using an INFO_LVL of 2 or
+ * higher, and have a buffer size of <= 4K
+ */
+
+ /* Skip to RC_FROM_EVPROD */
+ if (uv__skip_lines(&p, 9) != 9)
+ return -1;
+
+ if (sscanf(p, "RC_FROM_EVPROD=%d\nEND_EVENT_DATA", &evp_rc) == 1) {
+ if (uv__path_is_a_directory(handle->path) == 0) { /* Directory */
+ if (evp_rc == AHAFS_MODDIR_UNMOUNT || evp_rc == AHAFS_MODDIR_REMOVE_SELF) {
+ /* The directory is no longer available for monitoring */
+ *events = UV_RENAME;
+ handle->dir_filename = NULL;
+ } else {
+ /* A file was added/removed inside the directory */
+ *events = UV_CHANGE;
+
+ /* Get the EVPROD_INFO */
+ if (uv__skip_lines(&p, 1) != 1)
+ return -1;
+
+ /* Scan out the name of the file that triggered the event*/
+ if (sscanf(p, "BEGIN_EVPROD_INFO\n%sEND_EVPROD_INFO", filename) == 1) {
+ handle->dir_filename = uv__strdup((const char*)&filename);
+ } else
+ return -1;
+ }
+ } else { /* Regular File */
+ if (evp_rc == AHAFS_MODFILE_RENAME)
+ *events = UV_RENAME;
+ else
+ *events = UV_CHANGE;
+ }
+ }
+ else
+ return -1;
+
+ return 0;
+}
+
+
+/* This is the internal callback */
+static void uv__ahafs_event(uv_loop_t* loop, uv__io_t* event_watch, unsigned int fflags) {
+ char result_data[RDWR_BUF_SIZE];
+ int bytes, rc = 0;
+ uv_fs_event_t* handle;
+ int events = 0;
+ char fname[PATH_MAX];
+ char *p;
+
+ handle = container_of(event_watch, uv_fs_event_t, event_watcher);
+
+ /* At this point, we assume that polling has been done on the
+ * file descriptor, so we can just read the AHAFS event occurrence
+ * data and parse its results without having to block anything
+ */
+ bytes = pread(event_watch->fd, result_data, RDWR_BUF_SIZE, 0);
+
+ assert((bytes >= 0) && "uv__ahafs_event - Error reading monitor file");
+
+ /* In file / directory move cases, AIX Event infrastructure
+ * produces a second event with no data.
+ * Ignore it and return gracefully.
+ */
+ if(bytes == 0)
+ return;
+
+ /* Parse the data */
+ if(bytes > 0)
+ rc = uv__parse_data(result_data, &events, handle);
+
+ /* Unrecoverable error */
+ if (rc == -1)
+ return;
+
+ /* For directory changes, the name of the files that triggered the change
+ * are never absolute pathnames
+ */
+ if (uv__path_is_a_directory(handle->path) == 0) {
+ p = handle->dir_filename;
+ } else {
+ p = strrchr(handle->path, '/');
+ if (p == NULL)
+ p = handle->path;
+ else
+ p++;
+ }
+
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(fname, p, sizeof(fname));
+
+ handle->cb(handle, fname, events, 0);
+}
+#endif
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* filename,
+ unsigned int flags) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ int fd, rc, str_offset = 0;
+ char cwd[PATH_MAX];
+ char absolute_path[PATH_MAX];
+ char readlink_cwd[PATH_MAX];
+ struct timeval zt;
+ fd_set pollfd;
+
+
+ /* Figure out whether filename is absolute or not */
+ if (filename[0] == '\0') {
+ /* Missing a pathname */
+ return UV_ENOENT;
+ }
+ else if (filename[0] == '/') {
+ /* We have absolute pathname */
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(absolute_path, filename, sizeof(absolute_path));
+ } else {
+ /* We have a relative pathname, compose the absolute pathname */
+ snprintf(cwd, sizeof(cwd), "/proc/%lu/cwd", (unsigned long) getpid());
+ rc = readlink(cwd, readlink_cwd, sizeof(readlink_cwd) - 1);
+ if (rc < 0)
+ return rc;
+ /* readlink does not null terminate our string */
+ readlink_cwd[rc] = '\0';
+
+ if (filename[0] == '.' && filename[1] == '/')
+ str_offset = 2;
+
+ snprintf(absolute_path, sizeof(absolute_path), "%s%s", readlink_cwd,
+ filename + str_offset);
+ }
+
+ if (uv__is_ahafs_mounted() < 0) /* /aha checks failed */
+ return UV_ENOSYS;
+
+ /* Setup ahafs */
+ rc = uv__setup_ahafs((const char *)absolute_path, &fd);
+ if (rc != 0)
+ return rc;
+
+ /* Setup/Initialize all the libuv routines */
+ uv__handle_start(handle);
+ uv__io_init(&handle->event_watcher, uv__ahafs_event, fd);
+ handle->path = uv__strdup(filename);
+ handle->cb = cb;
+ handle->dir_filename = NULL;
+
+ uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
+
+ /* AHAFS wants someone to poll for it to start mointoring.
+ * so kick-start it so that we don't miss an event in the
+ * eventuality of an event that occurs in the current loop. */
+ do {
+ memset(&zt, 0, sizeof(zt));
+ FD_ZERO(&pollfd);
+ FD_SET(fd, &pollfd);
+ rc = select(fd + 1, &pollfd, NULL, NULL, &zt);
+ } while (rc == -1 && errno == EINTR);
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ if (!uv__is_active(handle))
+ return 0;
+
+ uv__io_close(handle->loop, &handle->event_watcher);
+ uv__handle_stop(handle);
+
+ if (uv__path_is_a_directory(handle->path) == 0) {
+ uv__free(handle->dir_filename);
+ handle->dir_filename = NULL;
+ }
+
+ uv__free(handle->path);
+ handle->path = NULL;
+ uv__close(handle->event_watcher.fd);
+ handle->event_watcher.fd = -1;
+
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+#ifdef HAVE_SYS_AHAFS_EVPRODS_H
+ uv_fs_event_stop(handle);
+#else
+ UNREACHABLE();
+#endif
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ char exepath[UV__PATH_MAX];
+ char** new_argv;
+ size_t size;
+ char* s;
+ int i;
+
+ if (argc <= 0)
+ return argv;
+
+ /* Save the original pointer to argv.
+ * AIX uses argv to read the process name.
+ * (Not the memory pointed to by argv[0..n] as on Linux.)
+ */
+ process_argv = argv;
+ process_argc = argc;
+
+ /* Use argv[0] to determine value for uv_exepath(). */
+ size = sizeof(exepath);
+ if (uv__search_path(argv[0], exepath, &size) == 0) {
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+ original_exepath = uv__strdup(exepath);
+ uv_mutex_unlock(&process_title_mutex);
+ }
+
+ /* Calculate how much memory we need for the argv strings. */
+ size = 0;
+ for (i = 0; i < argc; i++)
+ size += strlen(argv[i]) + 1;
+
+ /* Add space for the argv pointers. */
+ size += (argc + 1) * sizeof(char*);
+
+ new_argv = uv__malloc(size);
+ if (new_argv == NULL)
+ return argv;
+ args_mem = new_argv;
+
+ /* Copy over the strings and set up the pointer table. */
+ s = (char*) &new_argv[argc + 1];
+ for (i = 0; i < argc; i++) {
+ size = strlen(argv[i]) + 1;
+ memcpy(s, argv[i], size);
+ new_argv[i] = s;
+ s += size;
+ }
+ new_argv[i] = NULL;
+
+ return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (process_argv == NULL || args_mem == NULL)
+ return UV_ENOBUFS;
+
+ /* We cannot free this pointer when libuv shuts down,
+ * the process may still be using it.
+ */
+ new_title = uv__strdup(title);
+ if (new_title == NULL)
+ return UV_ENOMEM;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ /* If this is the first time this is set,
+ * don't free and set argv[1] to NULL.
+ */
+ if (process_title_ptr != NULL)
+ uv__free(process_title_ptr);
+
+ process_title_ptr = new_title;
+
+ process_argv[0] = process_title_ptr;
+ if (process_argc > 1)
+ process_argv[1] = NULL;
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ /* If uv_setup_args wasn't called, we can't continue. */
+ if (process_argv == NULL)
+ return UV_ENOBUFS;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ len = strlen(process_argv[0]);
+ if (size <= len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, process_argv[0], len);
+ buffer[len] = '\0';
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv__free(args_mem); /* Keep valgrind happy. */
+ args_mem = NULL;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char pp[64];
+ psinfo_t psinfo;
+ int err;
+ int fd;
+
+ snprintf(pp, sizeof(pp), "/proc/%lu/psinfo", (unsigned long) getpid());
+
+ fd = open(pp, O_RDONLY);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ /* FIXME(bnoordhuis) Handle EINTR. */
+ err = UV_EINVAL;
+ if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
+ *rss = (size_t)psinfo.pr_rssize * 1024;
+ err = 0;
+ }
+ uv__close(fd);
+
+ return err;
+}
+
+
+int uv_uptime(double* uptime) {
+ struct utmp *utmp_buf;
+ size_t entries = 0;
+ time_t boot_time;
+
+ boot_time = 0;
+ utmpname(UTMP_FILE);
+
+ setutent();
+
+ while ((utmp_buf = getutent()) != NULL) {
+ if (utmp_buf->ut_user[0] && utmp_buf->ut_type == USER_PROCESS)
+ ++entries;
+ if (utmp_buf->ut_type == BOOT_TIME)
+ boot_time = utmp_buf->ut_time;
+ }
+
+ endutent();
+
+ if (boot_time == 0)
+ return UV_ENOSYS;
+
+ *uptime = time(NULL) - boot_time;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ uv_cpu_info_t* cpu_info;
+ perfstat_cpu_total_t ps_total;
+ perfstat_cpu_t* ps_cpus;
+ perfstat_id_t cpu_id;
+ int result, ncpus, idx = 0;
+
+ result = perfstat_cpu_total(NULL, &ps_total, sizeof(ps_total), 1);
+ if (result == -1) {
+ return UV_ENOSYS;
+ }
+
+ ncpus = result = perfstat_cpu(NULL, NULL, sizeof(perfstat_cpu_t), 0);
+ if (result == -1) {
+ return UV_ENOSYS;
+ }
+
+ ps_cpus = (perfstat_cpu_t*) uv__malloc(ncpus * sizeof(perfstat_cpu_t));
+ if (!ps_cpus) {
+ return UV_ENOMEM;
+ }
+
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(cpu_id.name, FIRST_CPU, sizeof(cpu_id.name));
+ result = perfstat_cpu(&cpu_id, ps_cpus, sizeof(perfstat_cpu_t), ncpus);
+ if (result == -1) {
+ uv__free(ps_cpus);
+ return UV_ENOSYS;
+ }
+
+ *cpu_infos = (uv_cpu_info_t*) uv__malloc(ncpus * sizeof(uv_cpu_info_t));
+ if (!*cpu_infos) {
+ uv__free(ps_cpus);
+ return UV_ENOMEM;
+ }
+
+ *count = ncpus;
+
+ cpu_info = *cpu_infos;
+ while (idx < ncpus) {
+ cpu_info->speed = (int)(ps_total.processorHZ / 1000000);
+ cpu_info->model = uv__strdup(ps_total.description);
+ cpu_info->cpu_times.user = ps_cpus[idx].user;
+ cpu_info->cpu_times.sys = ps_cpus[idx].sys;
+ cpu_info->cpu_times.idle = ps_cpus[idx].idle;
+ cpu_info->cpu_times.irq = ps_cpus[idx].wait;
+ cpu_info->cpu_times.nice = 0;
+ cpu_info++;
+ idx++;
+ }
+
+ uv__free(ps_cpus);
+ return 0;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ int sockfd, sock6fd, inet6, i, r, size = 1;
+ struct ifconf ifc;
+ struct ifreq *ifr, *p, flg;
+ struct in6_ifreq if6;
+ struct sockaddr_dl* sa_addr;
+
+ ifc.ifc_req = NULL;
+ sock6fd = -1;
+ r = 0;
+ *count = 0;
+ *addresses = NULL;
+
+ if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP))) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ if (0 > (sock6fd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_IP))) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ if (ioctl(sockfd, SIOCGSIZIFCONF, &size) == -1) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ ifc.ifc_req = (struct ifreq*)uv__malloc(size);
+ if (ifc.ifc_req == NULL) {
+ r = UV_ENOMEM;
+ goto cleanup;
+ }
+ ifc.ifc_len = size;
+ if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
+
+ /* Count all up and running ipv4/ipv6 addresses */
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+ r = UV__ERR(errno);
+ goto cleanup;
+ }
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0)
+ goto cleanup;
+
+ /* Alloc the return interface structs */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+ if (!(*addresses)) {
+ r = UV_ENOMEM;
+ goto cleanup;
+ }
+ address = *addresses;
+
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ inet6 = (p->ifr_addr.sa_family == AF_INET6);
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1)
+ goto syserror;
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ /* All conditions above must match count loop */
+
+ address->name = uv__strdup(p->ifr_name);
+
+ if (inet6)
+ address->address.address6 = *((struct sockaddr_in6*) &p->ifr_addr);
+ else
+ address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
+
+ if (inet6) {
+ memset(&if6, 0, sizeof(if6));
+ r = uv__strscpy(if6.ifr_name, p->ifr_name, sizeof(if6.ifr_name));
+ if (r == UV_E2BIG)
+ goto cleanup;
+ r = 0;
+ memcpy(&if6.ifr_Addr, &p->ifr_addr, sizeof(if6.ifr_Addr));
+ if (ioctl(sock6fd, SIOCGIFNETMASK6, &if6) == -1)
+ goto syserror;
+ address->netmask.netmask6 = *((struct sockaddr_in6*) &if6.ifr_Addr);
+ /* Explicitly set family as the ioctl call appears to return it as 0. */
+ address->netmask.netmask6.sin6_family = AF_INET6;
+ } else {
+ if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1)
+ goto syserror;
+ address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
+ /* Explicitly set family as the ioctl call appears to return it as 0. */
+ address->netmask.netmask4.sin_family = AF_INET;
+ }
+
+ address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
+
+ address++;
+ }
+
+ /* Fill in physical addresses. */
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (p->ifr_addr.sa_family != AF_LINK)
+ continue;
+
+ address = *addresses;
+ for (i = 0; i < *count; i++) {
+ if (strcmp(address->name, p->ifr_name) == 0) {
+ sa_addr = (struct sockaddr_dl*) &p->ifr_addr;
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+
+#undef ADDR_SIZE
+ goto cleanup;
+
+syserror:
+ uv_free_interface_addresses(*addresses, *count);
+ *addresses = NULL;
+ *count = 0;
+ r = UV_ENOSYS;
+
+cleanup:
+ if (sockfd != -1)
+ uv__close(sockfd);
+ if (sock6fd != -1)
+ uv__close(sock6fd);
+ uv__free(ifc.ifc_req);
+ return r;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct pollfd* events;
+ uintptr_t i;
+ uintptr_t nfds;
+ struct poll_ctl pc;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct pollfd*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].fd == fd)
+ events[i].fd = -1;
+
+ /* Remove the file descriptor from the poll set */
+ pc.events = 0;
+ pc.cmd = PS_DELETE;
+ pc.fd = fd;
+ if(loop->backend_fd >= 0)
+ pollset_ctl(loop->backend_fd, &pc, 1);
+}
diff --git a/Utilities/cmlibuv/src/unix/async.c b/Utilities/cmlibuv/src/unix/async.c
new file mode 100644
index 0000000000..e1805c3237
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/async.c
@@ -0,0 +1,253 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* This file contains both the uv__async internal infrastructure and the
+ * user-facing uv_async_t functions.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "atomic-ops.h"
+
+#include <errno.h>
+#include <stdio.h> /* snprintf() */
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sched.h> /* sched_yield() */
+
+#ifdef __linux__
+#include <sys/eventfd.h>
+#endif
+
+static void uv__async_send(uv_loop_t* loop);
+static int uv__async_start(uv_loop_t* loop);
+
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+ int err;
+
+ err = uv__async_start(loop);
+ if (err)
+ return err;
+
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_ASYNC);
+ handle->async_cb = async_cb;
+ handle->pending = 0;
+
+ QUEUE_INSERT_TAIL(&loop->async_handles, &handle->queue);
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+int uv_async_send(uv_async_t* handle) {
+ /* Do a cheap read first. */
+ if (ACCESS_ONCE(int, handle->pending) != 0)
+ return 0;
+
+ /* Tell the other thread we're busy with the handle. */
+ if (cmpxchgi(&handle->pending, 0, 1) != 0)
+ return 0;
+
+ /* Wake up the other thread's event loop. */
+ uv__async_send(handle->loop);
+
+ /* Tell the other thread we're done. */
+ if (cmpxchgi(&handle->pending, 1, 2) != 1)
+ abort();
+
+ return 0;
+}
+
+
+/* Only call this from the event loop thread. */
+static int uv__async_spin(uv_async_t* handle) {
+ int i;
+ int rc;
+
+ for (;;) {
+ /* 997 is not completely chosen at random. It's a prime number, acyclical
+ * by nature, and should therefore hopefully dampen sympathetic resonance.
+ */
+ for (i = 0; i < 997; i++) {
+ /* rc=0 -- handle is not pending.
+ * rc=1 -- handle is pending, other thread is still working with it.
+ * rc=2 -- handle is pending, other thread is done.
+ */
+ rc = cmpxchgi(&handle->pending, 2, 0);
+
+ if (rc != 1)
+ return rc;
+
+ /* Other thread is busy with this handle, spin until it's done. */
+ cpu_relax();
+ }
+
+ /* Yield the CPU. We may have preempted the other thread while it's
+ * inside the critical section and if it's running on the same CPU
+ * as us, we'll just burn CPU cycles until the end of our time slice.
+ */
+ sched_yield();
+ }
+}
+
+
+void uv__async_close(uv_async_t* handle) {
+ uv__async_spin(handle);
+ QUEUE_REMOVE(&handle->queue);
+ uv__handle_stop(handle);
+}
+
+
+static void uv__async_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ char buf[1024];
+ ssize_t r;
+ QUEUE queue;
+ QUEUE* q;
+ uv_async_t* h;
+
+ assert(w == &loop->async_io_watcher);
+
+ for (;;) {
+ r = read(w->fd, buf, sizeof(buf));
+
+ if (r == sizeof(buf))
+ continue;
+
+ if (r != -1)
+ break;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+
+ if (errno == EINTR)
+ continue;
+
+ abort();
+ }
+
+ QUEUE_MOVE(&loop->async_handles, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ h = QUEUE_DATA(q, uv_async_t, queue);
+
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&loop->async_handles, q);
+
+ if (0 == uv__async_spin(h))
+ continue; /* Not pending. */
+
+ if (h->async_cb == NULL)
+ continue;
+
+ h->async_cb(h);
+ }
+}
+
+
+static void uv__async_send(uv_loop_t* loop) {
+ const void* buf;
+ ssize_t len;
+ int fd;
+ int r;
+
+ buf = "";
+ len = 1;
+ fd = loop->async_wfd;
+
+#if defined(__linux__)
+ if (fd == -1) {
+ static const uint64_t val = 1;
+ buf = &val;
+ len = sizeof(val);
+ fd = loop->async_io_watcher.fd; /* eventfd */
+ }
+#endif
+
+ do
+ r = write(fd, buf, len);
+ while (r == -1 && errno == EINTR);
+
+ if (r == len)
+ return;
+
+ if (r == -1)
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ return;
+
+ abort();
+}
+
+
+static int uv__async_start(uv_loop_t* loop) {
+ int pipefd[2];
+ int err;
+
+ if (loop->async_io_watcher.fd != -1)
+ return 0;
+
+#ifdef __linux__
+ err = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (err < 0)
+ return UV__ERR(errno);
+
+ pipefd[0] = err;
+ pipefd[1] = -1;
+#else
+ err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
+ if (err < 0)
+ return err;
+#endif
+
+ uv__io_init(&loop->async_io_watcher, uv__async_io, pipefd[0]);
+ uv__io_start(loop, &loop->async_io_watcher, POLLIN);
+ loop->async_wfd = pipefd[1];
+
+ return 0;
+}
+
+
+int uv__async_fork(uv_loop_t* loop) {
+ if (loop->async_io_watcher.fd == -1) /* never started */
+ return 0;
+
+ uv__async_stop(loop);
+
+ return uv__async_start(loop);
+}
+
+
+void uv__async_stop(uv_loop_t* loop) {
+ if (loop->async_io_watcher.fd == -1)
+ return;
+
+ if (loop->async_wfd != -1) {
+ if (loop->async_wfd != loop->async_io_watcher.fd)
+ uv__close(loop->async_wfd);
+ loop->async_wfd = -1;
+ }
+
+ uv__io_stop(loop, &loop->async_io_watcher, POLLIN);
+ uv__close(loop->async_io_watcher.fd);
+ loop->async_io_watcher.fd = -1;
+}
diff --git a/Utilities/cmlibuv/src/unix/atomic-ops.h b/Utilities/cmlibuv/src/unix/atomic-ops.h
new file mode 100644
index 0000000000..2b58162153
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/atomic-ops.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_ATOMIC_OPS_H_
+#define UV_ATOMIC_OPS_H_
+
+#include "internal.h" /* UV_UNUSED */
+
+#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+#include <atomic.h>
+#endif
+
+UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval));
+UV_UNUSED(static void cpu_relax(void));
+
+/* Prefer hand-rolled assembly over the gcc builtins because the latter also
+ * issue full memory barriers.
+ */
+UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
+#if defined(__i386__) || defined(__x86_64__)
+ int out;
+ __asm__ __volatile__ ("lock; cmpxchg %2, %1;"
+ : "=a" (out), "+m" (*(volatile int*) ptr)
+ : "r" (newval), "0" (oldval)
+ : "memory");
+ return out;
+#elif defined(_AIX) && defined(__ibmxl__)
+ /* FIXME: This is not actually atomic but XLClang 16.1 for AIX
+ does not provide __sync_val_compare_and_swap or an equivalent.
+ Its documentation suggests using C++11 atomics but this is C. */
+ __compare_and_swap((volatile int*)ptr, &oldval, newval);
+ return oldval;
+#elif defined(__MVS__)
+ /* Use hand-rolled assembly because codegen from builtin __plo_CSST results in
+ * a runtime bug.
+ */
+ __asm(" cs %0,%2,%1 \n " : "+r"(oldval), "+m"(*ptr) : "r"(newval) :);
+ return oldval;
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+ return atomic_cas_uint((uint_t *)ptr, (uint_t)oldval, (uint_t)newval);
+#else
+ return __sync_val_compare_and_swap(ptr, oldval, newval);
+#endif
+}
+
+UV_UNUSED(static void cpu_relax(void)) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
+#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
+ __asm__ __volatile__ ("yield" ::: "memory");
+#elif (defined(__ppc__) || defined(__ppc64__)) && defined(__APPLE__)
+ __asm volatile ("" : : : "memory");
+#elif !defined(__APPLE__) && (defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__))
+ __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
+#endif
+}
+
+#endif /* UV_ATOMIC_OPS_H_ */
diff --git a/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c b/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
new file mode 100644
index 0000000000..11ca95591f
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
@@ -0,0 +1,163 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+#include <ifaddrs.h>
+#include <net/if.h>
+#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
+#include <net/if_dl.h>
+#endif
+
+#if defined(__HAIKU__)
+#define IFF_RUNNING IFF_LINK
+#endif
+
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__GNU__)
+ /*
+ * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
+ * equals `AF_LINK`. Otherwise, the result depends on the operating
+ * system with `AF_LINK` or `PF_INET`.
+ */
+ if (exclude_type == UV__EXCLUDE_IFPHYS)
+ return (ent->ifa_addr->sa_family != AF_LINK);
+#endif
+#if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__) || \
+ defined(__HAIKU__)
+ /*
+ * On BSD getifaddrs returns information related to the raw underlying
+ * devices. We're not interested in this information.
+ */
+ if (ent->ifa_addr->sa_family == AF_LINK)
+ return 1;
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+ if (ent->ifa_addr->sa_family != PF_INET &&
+ ent->ifa_addr->sa_family != PF_INET6)
+ return 1;
+#endif
+ return 0;
+}
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ struct ifaddrs* addrs;
+ struct ifaddrs* ent;
+ uv_interface_address_t* address;
+#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
+ int i;
+#endif
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs) != 0)
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ /* Make sure the memory is initiallized to zero using calloc() */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+
+ if (*addresses == NULL) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask == NULL) {
+ memset(&address->netmask, 0, sizeof(address->netmask));
+ } else if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+ address++;
+ }
+
+#if !(defined(__CYGWIN__) || defined(__MSYS__)) && !defined(__GNU__)
+ /* Fill in physical addresses for each interface */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
+ continue;
+
+ address = *addresses;
+
+ for (i = 0; i < *count; i++) {
+ if (strcmp(address->name, ent->ifa_name) == 0) {
+ struct sockaddr_dl* sa_addr;
+ sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+#endif
+
+ freeifaddrs(addrs);
+
+ return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
diff --git a/Utilities/cmlibuv/src/unix/bsd-proctitle.c b/Utilities/cmlibuv/src/unix/bsd-proctitle.c
new file mode 100644
index 0000000000..b0c01e2cb8
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/bsd-proctitle.c
@@ -0,0 +1,99 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/types.h>
+#include <unistd.h>
+
+
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static char* process_title;
+
+
+static void init_process_title_mutex_once(void) {
+ if (uv_mutex_init(&process_title_mutex))
+ abort();
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_destroy(&process_title_mutex);
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ process_title = argc > 0 ? uv__strdup(argv[0]) : NULL;
+ return argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ new_title = uv__strdup(title);
+ if (new_title == NULL)
+ return UV_ENOMEM;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ uv__free(process_title);
+ process_title = new_title;
+ setproctitle("%s", title);
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title != NULL) {
+ len = strlen(process_title) + 1;
+
+ if (size < len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, process_title, len);
+ } else {
+ len = 0;
+ }
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ buffer[len] = '\0';
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/cmake-bootstrap.c b/Utilities/cmlibuv/src/unix/cmake-bootstrap.c
new file mode 100644
index 0000000000..394231d1be
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/cmake-bootstrap.c
@@ -0,0 +1,148 @@
+#include "uv.h"
+#include "internal.h"
+
+void uv__process_title_cleanup(void) {
+}
+
+void uv__threadpool_cleanup(void) {
+}
+
+int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
+ return -EINVAL;
+}
+
+void uv__udp_close(uv_udp_t* handle) {
+}
+
+void uv__udp_finish_close(uv_udp_t* handle) {
+}
+
+void uv__fs_poll_close(uv_fs_poll_t* handle) {
+}
+
+int uv_async_init(uv_loop_t* loop, uv_async_t* handle, uv_async_cb async_cb) {
+ return 0;
+}
+
+void uv__async_close(uv_async_t* handle) {
+}
+
+int uv__async_fork(uv_loop_t* loop) {
+ return 0;
+}
+
+void uv__async_stop(uv_loop_t* loop) {
+}
+
+void uv__work_submit(uv_loop_t* loop, struct uv__work* w,
+ enum uv__work_kind kind,
+ void (*work)(struct uv__work* w),
+ void (*done)(struct uv__work* w, int status)) {
+ abort();
+}
+
+void uv__work_done(uv_async_t* handle) {
+}
+
+int uv__pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void)) {
+ return 0;
+}
+
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
+ return 0;
+}
+
+int uv_mutex_init(uv_mutex_t* mutex) {
+ return 0;
+}
+
+void uv_mutex_destroy(uv_mutex_t* mutex) {
+}
+
+void uv_mutex_lock(uv_mutex_t* mutex) {
+}
+
+void uv_mutex_unlock(uv_mutex_t* mutex) {
+}
+
+int uv_rwlock_init(uv_rwlock_t* rwlock) {
+ return 0;
+}
+
+void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
+}
+
+void uv_once(uv_once_t* guard, void (*callback)(void)) {
+ if (*guard) {
+ return;
+ }
+ *guard = 1;
+ callback();
+}
+
+#if defined(__linux__)
+int uv__accept4(int fd, struct sockaddr* addr, socklen_t* addrlen, int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__dup3(int oldfd, int newfd, int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__pipe2(int pipefd[2], int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt,
+ int64_t offset) {
+ errno = ENOSYS;
+ return -1;
+}
+
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt,
+ int64_t offset) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__utimesat(int dirfd, const char* path, const struct timespec times[2],
+ int flags) {
+ errno = ENOSYS;
+ return -1;
+}
+
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf) {
+ errno = ENOSYS;
+ return -1;
+}
+#endif
+
+#if defined(__linux__) || defined(__FreeBSD__)
+ssize_t uv__fs_copy_file_range(int fd_in, off_t* off_in,
+ int fd_out, off_t* off_out,
+ size_t len, unsigned int flags)
+{
+ errno = ENOSYS;
+ return -1;
+}
+#endif
diff --git a/Utilities/cmlibuv/src/unix/core.c b/Utilities/cmlibuv/src/unix/core.c
new file mode 100644
index 0000000000..d0b0e0069b
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/core.c
@@ -0,0 +1,1682 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "strtok.h"
+
+#include <stddef.h> /* NULL */
+#include <stdio.h> /* printf */
+#include <stdlib.h>
+#include <string.h> /* strerror */
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h> /* O_CLOEXEC */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
+#include <sys/uio.h> /* writev */
+#include <sys/resource.h> /* getrusage */
+#include <pwd.h>
+#include <sched.h>
+#include <sys/utsname.h>
+#include <sys/time.h>
+
+#ifdef __sun
+# include <sys/filio.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+#endif
+
+#if defined(__APPLE__)
+# include <sys/filio.h>
+# endif /* defined(__APPLE__) */
+
+
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+# include <crt_externs.h>
+# include <mach-o/dyld.h> /* _NSGetExecutablePath */
+# define environ (*_NSGetEnviron())
+#else /* defined(__APPLE__) && !TARGET_OS_IPHONE */
+extern char** environ;
+#endif /* !(defined(__APPLE__) && !TARGET_OS_IPHONE) */
+
+
+#if defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || \
+ defined(__OpenBSD__)
+# include <sys/sysctl.h>
+# include <sys/filio.h>
+# include <sys/wait.h>
+# if defined(__FreeBSD__)
+# define uv__accept4 accept4
+# endif
+# if defined(__NetBSD__)
+# define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
+# endif
+#endif
+
+#if defined(__FreeBSD__)
+# include <sys/param.h>
+# include <sys/cpuset.h>
+#endif
+
+#if defined(__MVS__)
+# include <sys/ioctl.h>
+# include "zos-sys-info.h"
+#endif
+
+#if defined(__linux__)
+# include <sched.h>
+# include <sys/syscall.h>
+# define uv__accept4 accept4
+#endif
+
+#if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
+# include <sanitizer/linux_syscall_hooks.h>
+#endif
+
+static void uv__run_pending(uv_loop_t* loop);
+
+/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
+STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
+STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->base) ==
+ sizeof(((struct iovec*) 0)->iov_base));
+STATIC_ASSERT(sizeof(((uv_buf_t*) 0)->len) ==
+ sizeof(((struct iovec*) 0)->iov_len));
+STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
+STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
+
+
+uint64_t uv_hrtime(void) {
+ return uv__hrtime(UV_CLOCK_PRECISE);
+}
+
+
+void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
+ assert(!uv__is_closing(handle));
+
+ handle->flags |= UV_HANDLE_CLOSING;
+ handle->close_cb = close_cb;
+
+ switch (handle->type) {
+ case UV_NAMED_PIPE:
+ uv__pipe_close((uv_pipe_t*)handle);
+ break;
+
+ case UV_TTY:
+ uv__stream_close((uv_stream_t*)handle);
+ break;
+
+ case UV_TCP:
+ uv__tcp_close((uv_tcp_t*)handle);
+ break;
+
+ case UV_UDP:
+ uv__udp_close((uv_udp_t*)handle);
+ break;
+
+ case UV_PREPARE:
+ uv__prepare_close((uv_prepare_t*)handle);
+ break;
+
+ case UV_CHECK:
+ uv__check_close((uv_check_t*)handle);
+ break;
+
+ case UV_IDLE:
+ uv__idle_close((uv_idle_t*)handle);
+ break;
+
+ case UV_ASYNC:
+ uv__async_close((uv_async_t*)handle);
+ break;
+
+ case UV_TIMER:
+ uv__timer_close((uv_timer_t*)handle);
+ break;
+
+ case UV_PROCESS:
+ uv__process_close((uv_process_t*)handle);
+ break;
+
+ case UV_FS_EVENT:
+ uv__fs_event_close((uv_fs_event_t*)handle);
+#if defined(__sun) || defined(__MVS__)
+ /*
+ * On Solaris, illumos, and z/OS we will not be able to dissociate the
+ * watcher for an event which is pending delivery, so we cannot always call
+ * uv__make_close_pending() straight away. The backend will call the
+ * function once the event has cleared.
+ */
+ return;
+#endif
+ break;
+
+ case UV_POLL:
+ uv__poll_close((uv_poll_t*)handle);
+ break;
+
+ case UV_FS_POLL:
+ uv__fs_poll_close((uv_fs_poll_t*)handle);
+ /* Poll handles use file system requests, and one of them may still be
+ * running. The poll code will call uv__make_close_pending() for us. */
+ return;
+
+ case UV_SIGNAL:
+ uv__signal_close((uv_signal_t*) handle);
+ break;
+
+ default:
+ assert(0);
+ }
+
+ uv__make_close_pending(handle);
+}
+
+int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
+ int r;
+ int fd;
+ socklen_t len;
+
+ if (handle == NULL || value == NULL)
+ return UV_EINVAL;
+
+ if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
+ fd = uv__stream_fd((uv_stream_t*) handle);
+ else if (handle->type == UV_UDP)
+ fd = ((uv_udp_t *) handle)->io_watcher.fd;
+ else
+ return UV_ENOTSUP;
+
+ len = sizeof(*value);
+
+ if (*value == 0)
+ r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
+ else
+ r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
+
+ if (r < 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+void uv__make_close_pending(uv_handle_t* handle) {
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->next_closing = handle->loop->closing_handles;
+ handle->loop->closing_handles = handle;
+}
+
+int uv__getiovmax(void) {
+#if defined(IOV_MAX)
+ return IOV_MAX;
+#elif defined(_SC_IOV_MAX)
+ static int iovmax_cached = -1;
+ int iovmax;
+
+ iovmax = uv__load_relaxed(&iovmax_cached);
+ if (iovmax != -1)
+ return iovmax;
+
+ /* On some embedded devices (arm-linux-uclibc based ip camera),
+ * sysconf(_SC_IOV_MAX) can not get the correct value. The return
+ * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
+ */
+ iovmax = sysconf(_SC_IOV_MAX);
+ if (iovmax == -1)
+ iovmax = 1;
+
+ uv__store_relaxed(&iovmax_cached, iovmax);
+
+ return iovmax;
+#else
+ return 1024;
+#endif
+}
+
+
+static void uv__finish_close(uv_handle_t* handle) {
+ uv_signal_t* sh;
+
+ /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
+ * possible for it to be active in the sense that uv__is_active() returns
+ * true.
+ *
+ * A good example is when the user calls uv_shutdown(), immediately followed
+ * by uv_close(). The handle is considered active at this point because the
+ * completion of the shutdown req is still pending.
+ */
+ assert(handle->flags & UV_HANDLE_CLOSING);
+ assert(!(handle->flags & UV_HANDLE_CLOSED));
+ handle->flags |= UV_HANDLE_CLOSED;
+
+ switch (handle->type) {
+ case UV_PREPARE:
+ case UV_CHECK:
+ case UV_IDLE:
+ case UV_ASYNC:
+ case UV_TIMER:
+ case UV_PROCESS:
+ case UV_FS_EVENT:
+ case UV_FS_POLL:
+ case UV_POLL:
+ break;
+
+ case UV_SIGNAL:
+ /* If there are any caught signals "trapped" in the signal pipe,
+ * we can't call the close callback yet. Reinserting the handle
+ * into the closing queue makes the event loop spin but that's
+ * okay because we only need to deliver the pending events.
+ */
+ sh = (uv_signal_t*) handle;
+ if (sh->caught_signals > sh->dispatched_signals) {
+ handle->flags ^= UV_HANDLE_CLOSED;
+ uv__make_close_pending(handle); /* Back into the queue. */
+ return;
+ }
+ break;
+
+ case UV_NAMED_PIPE:
+ case UV_TCP:
+ case UV_TTY:
+ uv__stream_destroy((uv_stream_t*)handle);
+ break;
+
+ case UV_UDP:
+ uv__udp_finish_close((uv_udp_t*)handle);
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+ uv__handle_unref(handle);
+ QUEUE_REMOVE(&handle->handle_queue);
+
+ if (handle->close_cb) {
+ handle->close_cb(handle);
+ }
+}
+
+
+static void uv__run_closing_handles(uv_loop_t* loop) {
+ uv_handle_t* p;
+ uv_handle_t* q;
+
+ p = loop->closing_handles;
+ loop->closing_handles = NULL;
+
+ while (p) {
+ q = p->next_closing;
+ uv__finish_close(p);
+ p = q;
+ }
+}
+
+
+int uv_is_closing(const uv_handle_t* handle) {
+ return uv__is_closing(handle);
+}
+
+
+int uv_backend_fd(const uv_loop_t* loop) {
+ return loop->backend_fd;
+}
+
+
+static int uv__loop_alive(const uv_loop_t* loop) {
+ return uv__has_active_handles(loop) ||
+ uv__has_active_reqs(loop) ||
+ !QUEUE_EMPTY(&loop->pending_queue) ||
+ loop->closing_handles != NULL;
+}
+
+
+static int uv__backend_timeout(const uv_loop_t* loop) {
+ if (loop->stop_flag == 0 &&
+ /* uv__loop_alive(loop) && */
+ (uv__has_active_handles(loop) || uv__has_active_reqs(loop)) &&
+ QUEUE_EMPTY(&loop->pending_queue) &&
+ QUEUE_EMPTY(&loop->idle_handles) &&
+ loop->closing_handles == NULL)
+ return uv__next_timeout(loop);
+ return 0;
+}
+
+
+int uv_backend_timeout(const uv_loop_t* loop) {
+ if (QUEUE_EMPTY(&loop->watcher_queue))
+ return uv__backend_timeout(loop);
+ /* Need to call uv_run to update the backend fd state. */
+ return 0;
+}
+
+
+int uv_loop_alive(const uv_loop_t* loop) {
+ return uv__loop_alive(loop);
+}
+
+
+int uv_run(uv_loop_t* loop, uv_run_mode mode) {
+ int timeout;
+ int r;
+ int can_sleep;
+
+ r = uv__loop_alive(loop);
+ if (!r)
+ uv__update_time(loop);
+
+ while (r != 0 && loop->stop_flag == 0) {
+ uv__update_time(loop);
+ uv__run_timers(loop);
+
+ can_sleep =
+ QUEUE_EMPTY(&loop->pending_queue) && QUEUE_EMPTY(&loop->idle_handles);
+
+ uv__run_pending(loop);
+ uv__run_idle(loop);
+ uv__run_prepare(loop);
+
+ timeout = 0;
+ if ((mode == UV_RUN_ONCE && can_sleep) || mode == UV_RUN_DEFAULT)
+ timeout = uv__backend_timeout(loop);
+
+ uv__io_poll(loop, timeout);
+
+ /* Process immediate callbacks (e.g. write_cb) a small fixed number of
+ * times to avoid loop starvation.*/
+ for (r = 0; r < 8 && !QUEUE_EMPTY(&loop->pending_queue); r++)
+ uv__run_pending(loop);
+
+ /* Run one final update on the provider_idle_time in case uv__io_poll
+ * returned because the timeout expired, but no events were received. This
+ * call will be ignored if the provider_entry_time was either never set (if
+ * the timeout == 0) or was already updated b/c an event was received.
+ */
+ uv__metrics_update_idle_time(loop);
+
+ uv__run_check(loop);
+ uv__run_closing_handles(loop);
+
+ if (mode == UV_RUN_ONCE) {
+ /* UV_RUN_ONCE implies forward progress: at least one callback must have
+ * been invoked when it returns. uv__io_poll() can return without doing
+ * I/O (meaning: no callbacks) when its timeout expires - which means we
+ * have pending timers that satisfy the forward progress constraint.
+ *
+ * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
+ * the check.
+ */
+ uv__update_time(loop);
+ uv__run_timers(loop);
+ }
+
+ r = uv__loop_alive(loop);
+ if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
+ break;
+ }
+
+ /* The if statement lets gcc compile it to a conditional store. Avoids
+ * dirtying a cache line.
+ */
+ if (loop->stop_flag != 0)
+ loop->stop_flag = 0;
+
+ return r;
+}
+
+
+void uv_update_time(uv_loop_t* loop) {
+ uv__update_time(loop);
+}
+
+
+int uv_is_active(const uv_handle_t* handle) {
+ return uv__is_active(handle);
+}
+
+
+/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
+int uv__socket(int domain, int type, int protocol) {
+ int sockfd;
+ int err;
+
+#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
+ sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
+ if (sockfd != -1)
+ return sockfd;
+
+ if (errno != EINVAL)
+ return UV__ERR(errno);
+#endif
+
+ sockfd = socket(domain, type, protocol);
+ if (sockfd == -1)
+ return UV__ERR(errno);
+
+ err = uv__nonblock(sockfd, 1);
+ if (err == 0)
+ err = uv__cloexec(sockfd, 1);
+
+ if (err) {
+ uv__close(sockfd);
+ return err;
+ }
+
+#if defined(SO_NOSIGPIPE)
+ {
+ int on = 1;
+ setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
+ }
+#endif
+
+ return sockfd;
+}
+
+/* get a file pointer to a file in read-only and close-on-exec mode */
+FILE* uv__open_file(const char* path) {
+ int fd;
+ FILE* fp;
+
+ fd = uv__open_cloexec(path, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ fp = fdopen(fd, "r");
+ if (fp == NULL)
+ uv__close(fd);
+
+ return fp;
+}
+
+
+int uv__accept(int sockfd) {
+ int peerfd;
+ int err;
+
+ (void) &err;
+ assert(sockfd >= 0);
+
+ do
+#ifdef uv__accept4
+ peerfd = uv__accept4(sockfd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
+#else
+ peerfd = accept(sockfd, NULL, NULL);
+#endif
+ while (peerfd == -1 && errno == EINTR);
+
+ if (peerfd == -1)
+ return UV__ERR(errno);
+
+#ifndef uv__accept4
+ err = uv__cloexec(peerfd, 1);
+ if (err == 0)
+ err = uv__nonblock(peerfd, 1);
+
+ if (err != 0) {
+ uv__close(peerfd);
+ return err;
+ }
+#endif
+
+ return peerfd;
+}
+
+
+/* close() on macos has the "interesting" quirk that it fails with EINTR
+ * without closing the file descriptor when a thread is in the cancel state.
+ * That's why libuv calls close$NOCANCEL() instead.
+ *
+ * glibc on linux has a similar issue: close() is a cancellation point and
+ * will unwind the thread when it's in the cancel state. Work around that
+ * by making the system call directly. Musl libc is unaffected.
+ */
+int uv__close_nocancel(int fd) {
+#if defined(__APPLE__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
+#if defined(__LP64__) || TARGET_OS_IPHONE
+ extern int close$NOCANCEL(int);
+ return close$NOCANCEL(fd);
+#else
+ extern int close$NOCANCEL$UNIX2003(int);
+ return close$NOCANCEL$UNIX2003(fd);
+#endif
+#pragma GCC diagnostic pop
+#elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
+ long rc;
+ __sanitizer_syscall_pre_close(fd);
+ rc = syscall(SYS_close, fd);
+ __sanitizer_syscall_post_close(rc, fd);
+ return rc;
+#elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
+ return syscall(SYS_close, fd);
+#else
+ return close(fd);
+#endif
+}
+
+
+int uv__close_nocheckstdio(int fd) {
+ int saved_errno;
+ int rc;
+
+ assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
+
+ saved_errno = errno;
+ rc = uv__close_nocancel(fd);
+ if (rc == -1) {
+ rc = UV__ERR(errno);
+ if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
+ rc = 0; /* The close is in progress, not an error. */
+ errno = saved_errno;
+ }
+
+ return rc;
+}
+
+
+int uv__close(int fd) {
+ assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
+#if defined(__MVS__)
+ SAVE_ERRNO(epoll_file_close(fd));
+#endif
+ return uv__close_nocheckstdio(fd);
+}
+
+#if UV__NONBLOCK_IS_IOCTL
+int uv__nonblock_ioctl(int fd, int set) {
+ int r;
+
+ do
+ r = ioctl(fd, FIONBIO, &set);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+#endif
+
+
+int uv__nonblock_fcntl(int fd, int set) {
+ int flags;
+ int r;
+
+ do
+ r = fcntl(fd, F_GETFL);
+ while (r == -1 && errno == EINTR);
+
+ if (r == -1)
+ return UV__ERR(errno);
+
+ /* Bail out now if already set/clear. */
+ if (!!(r & O_NONBLOCK) == !!set)
+ return 0;
+
+ if (set)
+ flags = r | O_NONBLOCK;
+ else
+ flags = r & ~O_NONBLOCK;
+
+ do
+ r = fcntl(fd, F_SETFL, flags);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv__cloexec(int fd, int set) {
+ int flags;
+ int r;
+
+ flags = 0;
+ if (set)
+ flags = FD_CLOEXEC;
+
+ do
+ r = fcntl(fd, F_SETFD, flags);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
+#if defined(__ANDROID__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__NetBSD__) || \
+ defined(__OpenBSD__) || \
+ defined(__linux__)
+ ssize_t rc;
+ rc = recvmsg(fd, msg, flags | MSG_CMSG_CLOEXEC);
+ if (rc == -1)
+ return UV__ERR(errno);
+ return rc;
+#else
+ struct cmsghdr* cmsg;
+ int* pfd;
+ int* end;
+ ssize_t rc;
+ rc = recvmsg(fd, msg, flags);
+ if (rc == -1)
+ return UV__ERR(errno);
+ if (msg->msg_controllen == 0)
+ return rc;
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
+ if (cmsg->cmsg_type == SCM_RIGHTS)
+ for (pfd = (int*) CMSG_DATA(cmsg),
+ end = (int*) ((char*) cmsg + cmsg->cmsg_len);
+ pfd < end;
+ pfd += 1)
+ uv__cloexec(*pfd, 1);
+ return rc;
+#endif
+}
+
+
+int uv_cwd(char* buffer, size_t* size) {
+ char scratch[1 + UV__PATH_MAX];
+
+ if (buffer == NULL || size == NULL)
+ return UV_EINVAL;
+
+ /* Try to read directly into the user's buffer first... */
+ if (getcwd(buffer, *size) != NULL)
+ goto fixup;
+
+ if (errno != ERANGE)
+ return UV__ERR(errno);
+
+ /* ...or into scratch space if the user's buffer is too small
+ * so we can report how much space to provide on the next try.
+ */
+ if (getcwd(scratch, sizeof(scratch)) == NULL)
+ return UV__ERR(errno);
+
+ buffer = scratch;
+
+fixup:
+
+ *size = strlen(buffer);
+
+ if (*size > 1 && buffer[*size - 1] == '/') {
+ *size -= 1;
+ buffer[*size] = '\0';
+ }
+
+ if (buffer == scratch) {
+ *size += 1;
+ return UV_ENOBUFS;
+ }
+
+ return 0;
+}
+
+
+int uv_chdir(const char* dir) {
+ if (chdir(dir))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv_disable_stdio_inheritance(void) {
+ int fd;
+
+ /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
+ * first 16 file descriptors. After that, bail out after the first error.
+ */
+ for (fd = 0; ; fd++)
+ if (uv__cloexec(fd, 1) && fd > 15)
+ break;
+}
+
+
+int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
+ int fd_out;
+
+ switch (handle->type) {
+ case UV_TCP:
+ case UV_NAMED_PIPE:
+ case UV_TTY:
+ fd_out = uv__stream_fd((uv_stream_t*) handle);
+ break;
+
+ case UV_UDP:
+ fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
+ break;
+
+ case UV_POLL:
+ fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
+ break;
+
+ default:
+ return UV_EINVAL;
+ }
+
+ if (uv__is_closing(handle) || fd_out == -1)
+ return UV_EBADF;
+
+ *fd = fd_out;
+ return 0;
+}
+
+
+static void uv__run_pending(uv_loop_t* loop) {
+ QUEUE* q;
+ QUEUE pq;
+ uv__io_t* w;
+
+ QUEUE_MOVE(&loop->pending_queue, &pq);
+
+ while (!QUEUE_EMPTY(&pq)) {
+ q = QUEUE_HEAD(&pq);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+ w = QUEUE_DATA(q, uv__io_t, pending_queue);
+ w->cb(loop, w, POLLOUT);
+ }
+}
+
+
+static unsigned int next_power_of_two(unsigned int val) {
+ val -= 1;
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ val += 1;
+ return val;
+}
+
+static void maybe_resize(uv_loop_t* loop, unsigned int len) {
+ uv__io_t** watchers;
+ void* fake_watcher_list;
+ void* fake_watcher_count;
+ unsigned int nwatchers;
+ unsigned int i;
+
+ if (len <= loop->nwatchers)
+ return;
+
+ /* Preserve fake watcher list and count at the end of the watchers */
+ if (loop->watchers != NULL) {
+ fake_watcher_list = loop->watchers[loop->nwatchers];
+ fake_watcher_count = loop->watchers[loop->nwatchers + 1];
+ } else {
+ fake_watcher_list = NULL;
+ fake_watcher_count = NULL;
+ }
+
+ nwatchers = next_power_of_two(len + 2) - 2;
+ watchers = uv__reallocf(loop->watchers,
+ (nwatchers + 2) * sizeof(loop->watchers[0]));
+
+ if (watchers == NULL)
+ abort();
+ for (i = loop->nwatchers; i < nwatchers; i++)
+ watchers[i] = NULL;
+ watchers[nwatchers] = fake_watcher_list;
+ watchers[nwatchers + 1] = fake_watcher_count;
+
+ loop->watchers = watchers;
+ loop->nwatchers = nwatchers;
+}
+
+
+void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
+ assert(cb != NULL);
+ assert(fd >= -1);
+ QUEUE_INIT(&w->pending_queue);
+ QUEUE_INIT(&w->watcher_queue);
+ w->cb = cb;
+ w->fd = fd;
+ w->events = 0;
+ w->pevents = 0;
+
+#if defined(UV_HAVE_KQUEUE)
+ w->rcount = 0;
+ w->wcount = 0;
+#endif /* defined(UV_HAVE_KQUEUE) */
+}
+
+
+void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
+ assert(0 != events);
+ assert(w->fd >= 0);
+ assert(w->fd < INT_MAX);
+
+ w->pevents |= events;
+ maybe_resize(loop, w->fd + 1);
+
+#if !defined(__sun)
+ /* The event ports backend needs to rearm all file descriptors on each and
+ * every tick of the event loop but the other backends allow us to
+ * short-circuit here if the event mask is unchanged.
+ */
+ if (w->events == w->pevents)
+ return;
+#endif
+
+ if (QUEUE_EMPTY(&w->watcher_queue))
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+
+ if (loop->watchers[w->fd] == NULL) {
+ loop->watchers[w->fd] = w;
+ loop->nfds++;
+ }
+}
+
+
+void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
+ assert(0 != events);
+
+ if (w->fd == -1)
+ return;
+
+ assert(w->fd >= 0);
+
+ /* Happens when uv__io_stop() is called on a handle that was never started. */
+ if ((unsigned) w->fd >= loop->nwatchers)
+ return;
+
+ w->pevents &= ~events;
+
+ if (w->pevents == 0) {
+ QUEUE_REMOVE(&w->watcher_queue);
+ QUEUE_INIT(&w->watcher_queue);
+ w->events = 0;
+
+ if (w == loop->watchers[w->fd]) {
+ assert(loop->nfds > 0);
+ loop->watchers[w->fd] = NULL;
+ loop->nfds--;
+ }
+ }
+ else if (QUEUE_EMPTY(&w->watcher_queue))
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+}
+
+
+void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
+ uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+ QUEUE_REMOVE(&w->pending_queue);
+
+ /* Remove stale events for this file descriptor */
+ if (w->fd != -1)
+ uv__platform_invalidate_fd(loop, w->fd);
+}
+
+
+void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
+ if (QUEUE_EMPTY(&w->pending_queue))
+ QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
+}
+
+
+int uv__io_active(const uv__io_t* w, unsigned int events) {
+ assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
+ assert(0 != events);
+ return 0 != (w->pevents & events);
+}
+
+
+int uv__fd_exists(uv_loop_t* loop, int fd) {
+ return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
+}
+
+
+int uv_getrusage(uv_rusage_t* rusage) {
+ struct rusage usage;
+
+ if (getrusage(RUSAGE_SELF, &usage))
+ return UV__ERR(errno);
+
+ rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
+ rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
+
+ rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
+ rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
+
+#if !defined(__MVS__) && !defined(__HAIKU__)
+ rusage->ru_maxrss = usage.ru_maxrss;
+ rusage->ru_ixrss = usage.ru_ixrss;
+ rusage->ru_idrss = usage.ru_idrss;
+ rusage->ru_isrss = usage.ru_isrss;
+ rusage->ru_minflt = usage.ru_minflt;
+ rusage->ru_majflt = usage.ru_majflt;
+ rusage->ru_nswap = usage.ru_nswap;
+ rusage->ru_inblock = usage.ru_inblock;
+ rusage->ru_oublock = usage.ru_oublock;
+ rusage->ru_msgsnd = usage.ru_msgsnd;
+ rusage->ru_msgrcv = usage.ru_msgrcv;
+ rusage->ru_nsignals = usage.ru_nsignals;
+ rusage->ru_nvcsw = usage.ru_nvcsw;
+ rusage->ru_nivcsw = usage.ru_nivcsw;
+#endif
+
+ return 0;
+}
+
+
+int uv__open_cloexec(const char* path, int flags) {
+#if defined(O_CLOEXEC)
+ int fd;
+
+ fd = open(path, flags | O_CLOEXEC);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ return fd;
+#else /* O_CLOEXEC */
+ int err;
+ int fd;
+
+ fd = open(path, flags);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ err = uv__cloexec(fd, 1);
+ if (err) {
+ uv__close(fd);
+ return err;
+ }
+
+ return fd;
+#endif /* O_CLOEXEC */
+}
+
+
+int uv__slurp(const char* filename, char* buf, size_t len) {
+ ssize_t n;
+ int fd;
+
+ assert(len > 0);
+
+ fd = uv__open_cloexec(filename, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ do
+ n = read(fd, buf, len - 1);
+ while (n == -1 && errno == EINTR);
+
+ if (uv__close_nocheckstdio(fd))
+ abort();
+
+ if (n < 0)
+ return UV__ERR(errno);
+
+ buf[n] = '\0';
+
+ return 0;
+}
+
+
+int uv__dup2_cloexec(int oldfd, int newfd) {
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__linux__)
+ int r;
+
+ r = dup3(oldfd, newfd, O_CLOEXEC);
+ if (r == -1)
+ return UV__ERR(errno);
+
+ return r;
+#else
+ int err;
+ int r;
+
+ r = dup2(oldfd, newfd); /* Never retry. */
+ if (r == -1)
+ return UV__ERR(errno);
+
+ err = uv__cloexec(newfd, 1);
+ if (err != 0) {
+ uv__close(newfd);
+ return err;
+ }
+
+ return r;
+#endif
+}
+
+
+int uv_os_homedir(char* buffer, size_t* size) {
+ uv_passwd_t pwd;
+ size_t len;
+ int r;
+
+ /* Check if the HOME environment variable is set first. The task of
+ performing input validation on buffer and size is taken care of by
+ uv_os_getenv(). */
+ r = uv_os_getenv("HOME", buffer, size);
+
+ if (r != UV_ENOENT)
+ return r;
+
+ /* HOME is not set, so call uv__getpwuid_r() */
+ r = uv__getpwuid_r(&pwd);
+
+ if (r != 0) {
+ return r;
+ }
+
+ len = strlen(pwd.homedir);
+
+ if (len >= *size) {
+ *size = len + 1;
+ uv_os_free_passwd(&pwd);
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, pwd.homedir, len + 1);
+ *size = len;
+ uv_os_free_passwd(&pwd);
+
+ return 0;
+}
+
+
+int uv_os_tmpdir(char* buffer, size_t* size) {
+ const char* buf;
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+#define CHECK_ENV_VAR(name) \
+ do { \
+ buf = getenv(name); \
+ if (buf != NULL) \
+ goto return_buffer; \
+ } \
+ while (0)
+
+ /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
+ CHECK_ENV_VAR("TMPDIR");
+ CHECK_ENV_VAR("TMP");
+ CHECK_ENV_VAR("TEMP");
+ CHECK_ENV_VAR("TEMPDIR");
+
+#undef CHECK_ENV_VAR
+
+ /* No temp environment variables defined */
+ #if defined(__ANDROID__)
+ buf = "/data/local/tmp";
+ #else
+ buf = "/tmp";
+ #endif
+
+return_buffer:
+ len = strlen(buf);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ /* The returned directory should not have a trailing slash. */
+ if (len > 1 && buf[len - 1] == '/') {
+ len--;
+ }
+
+ memcpy(buffer, buf, len + 1);
+ buffer[len] = '\0';
+ *size = len;
+
+ return 0;
+}
+
+
+int uv__getpwuid_r(uv_passwd_t* pwd) {
+ struct passwd pw;
+ struct passwd* result;
+ char* buf;
+ uid_t uid;
+ size_t bufsize;
+ size_t name_size;
+ size_t homedir_size;
+ size_t shell_size;
+ int r;
+
+ if (pwd == NULL)
+ return UV_EINVAL;
+
+ uid = geteuid();
+
+ /* Calling sysconf(_SC_GETPW_R_SIZE_MAX) would get the suggested size, but it
+ * is frequently 1024 or 4096, so we can just use that directly. The pwent
+ * will not usually be large. */
+ for (bufsize = 2000;; bufsize *= 2) {
+ buf = uv__malloc(bufsize);
+
+ if (buf == NULL)
+ return UV_ENOMEM;
+
+ do
+ r = getpwuid_r(uid, &pw, buf, bufsize, &result);
+ while (r == EINTR);
+
+ if (r != 0 || result == NULL)
+ uv__free(buf);
+
+ if (r != ERANGE)
+ break;
+ }
+
+ if (r != 0)
+ return UV__ERR(r);
+
+ if (result == NULL)
+ return UV_ENOENT;
+
+ /* Allocate memory for the username, shell, and home directory */
+ name_size = strlen(pw.pw_name) + 1;
+ homedir_size = strlen(pw.pw_dir) + 1;
+ shell_size = strlen(pw.pw_shell) + 1;
+ pwd->username = uv__malloc(name_size + homedir_size + shell_size);
+
+ if (pwd->username == NULL) {
+ uv__free(buf);
+ return UV_ENOMEM;
+ }
+
+ /* Copy the username */
+ memcpy(pwd->username, pw.pw_name, name_size);
+
+ /* Copy the home directory */
+ pwd->homedir = pwd->username + name_size;
+ memcpy(pwd->homedir, pw.pw_dir, homedir_size);
+
+ /* Copy the shell */
+ pwd->shell = pwd->homedir + homedir_size;
+ memcpy(pwd->shell, pw.pw_shell, shell_size);
+
+ /* Copy the uid and gid */
+ pwd->uid = pw.pw_uid;
+ pwd->gid = pw.pw_gid;
+
+ uv__free(buf);
+
+ return 0;
+}
+
+
+void uv_os_free_passwd(uv_passwd_t* pwd) {
+ if (pwd == NULL)
+ return;
+
+ /*
+ The memory for name, shell, and homedir are allocated in a single
+ uv__malloc() call. The base of the pointer is stored in pwd->username, so
+ that is the field that needs to be freed.
+ */
+ uv__free(pwd->username);
+ pwd->username = NULL;
+ pwd->shell = NULL;
+ pwd->homedir = NULL;
+}
+
+
+int uv_os_get_passwd(uv_passwd_t* pwd) {
+ return uv__getpwuid_r(pwd);
+}
+
+
+int uv_translate_sys_error(int sys_errno) {
+ /* If < 0 then it's already a libuv error. */
+ return sys_errno <= 0 ? sys_errno : -sys_errno;
+}
+
+
+int uv_os_environ(uv_env_item_t** envitems, int* count) {
+ int i, j, cnt;
+ uv_env_item_t* envitem;
+
+ *envitems = NULL;
+ *count = 0;
+
+ for (i = 0; environ[i] != NULL; i++);
+
+ *envitems = uv__calloc(i, sizeof(**envitems));
+
+ if (*envitems == NULL)
+ return UV_ENOMEM;
+
+ for (j = 0, cnt = 0; j < i; j++) {
+ char* buf;
+ char* ptr;
+
+ if (environ[j] == NULL)
+ break;
+
+ buf = uv__strdup(environ[j]);
+ if (buf == NULL)
+ goto fail;
+
+ ptr = strchr(buf, '=');
+ if (ptr == NULL) {
+ uv__free(buf);
+ continue;
+ }
+
+ *ptr = '\0';
+
+ envitem = &(*envitems)[cnt];
+ envitem->name = buf;
+ envitem->value = ptr + 1;
+
+ cnt++;
+ }
+
+ *count = cnt;
+ return 0;
+
+fail:
+ for (i = 0; i < cnt; i++) {
+ envitem = &(*envitems)[cnt];
+ uv__free(envitem->name);
+ }
+ uv__free(*envitems);
+
+ *envitems = NULL;
+ *count = 0;
+ return UV_ENOMEM;
+}
+
+
+int uv_os_getenv(const char* name, char* buffer, size_t* size) {
+ char* var;
+ size_t len;
+
+ if (name == NULL || buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ var = getenv(name);
+
+ if (var == NULL)
+ return UV_ENOENT;
+
+ len = strlen(var);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, var, len + 1);
+ *size = len;
+
+ return 0;
+}
+
+
+int uv_os_setenv(const char* name, const char* value) {
+ if (name == NULL || value == NULL)
+ return UV_EINVAL;
+
+ if (setenv(name, value, 1) != 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv_os_unsetenv(const char* name) {
+ if (name == NULL)
+ return UV_EINVAL;
+
+ if (unsetenv(name) != 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv_os_gethostname(char* buffer, size_t* size) {
+ /*
+ On some platforms, if the input buffer is not large enough, gethostname()
+ succeeds, but truncates the result. libuv can detect this and return ENOBUFS
+ instead by creating a large enough buffer and comparing the hostname length
+ to the size input.
+ */
+ char buf[UV_MAXHOSTNAMESIZE];
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ if (gethostname(buf, sizeof(buf)) != 0)
+ return UV__ERR(errno);
+
+ buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
+ len = strlen(buf);
+
+ if (len >= *size) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, buf, len + 1);
+ *size = len;
+ return 0;
+}
+
+
+int uv_cpumask_size(void) {
+#if defined(__linux__) || defined(__FreeBSD__)
+ return CPU_SETSIZE;
+#else
+ return UV_ENOTSUP;
+#endif
+}
+
+
+uv_os_fd_t uv_get_osfhandle(int fd) {
+ return fd;
+}
+
+int uv_open_osfhandle(uv_os_fd_t os_fd) {
+ return os_fd;
+}
+
+uv_pid_t uv_os_getpid(void) {
+ return getpid();
+}
+
+
+uv_pid_t uv_os_getppid(void) {
+ return getppid();
+}
+
+
+int uv_os_getpriority(uv_pid_t pid, int* priority) {
+ int r;
+
+ if (priority == NULL)
+ return UV_EINVAL;
+
+ errno = 0;
+ r = getpriority(PRIO_PROCESS, (int) pid);
+
+ if (r == -1 && errno != 0)
+ return UV__ERR(errno);
+
+ *priority = r;
+ return 0;
+}
+
+
+int uv_os_setpriority(uv_pid_t pid, int priority) {
+ if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
+ return UV_EINVAL;
+
+ if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+int uv_os_uname(uv_utsname_t* buffer) {
+ struct utsname buf;
+ int r;
+
+ if (buffer == NULL)
+ return UV_EINVAL;
+
+ if (uname(&buf) == -1) {
+ r = UV__ERR(errno);
+ goto error;
+ }
+
+ r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
+ if (r == UV_E2BIG)
+ goto error;
+
+#ifdef _AIX
+ r = snprintf(buffer->release,
+ sizeof(buffer->release),
+ "%s.%s",
+ buf.version,
+ buf.release);
+ if (r >= sizeof(buffer->release)) {
+ r = UV_E2BIG;
+ goto error;
+ }
+#else
+ r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
+ if (r == UV_E2BIG)
+ goto error;
+#endif
+
+ r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
+ if (r == UV_E2BIG)
+ goto error;
+
+#if defined(_AIX) || defined(__PASE__)
+ r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
+#else
+ r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
+#endif
+
+ if (r == UV_E2BIG)
+ goto error;
+
+ return 0;
+
+error:
+ buffer->sysname[0] = '\0';
+ buffer->release[0] = '\0';
+ buffer->version[0] = '\0';
+ buffer->machine[0] = '\0';
+ return r;
+}
+
+int uv__getsockpeername(const uv_handle_t* handle,
+ uv__peersockfunc func,
+ struct sockaddr* name,
+ int* namelen) {
+ socklen_t socklen;
+ uv_os_fd_t fd;
+ int r;
+
+ r = uv_fileno(handle, &fd);
+ if (r < 0)
+ return r;
+
+ /* sizeof(socklen_t) != sizeof(int) on some systems. */
+ socklen = (socklen_t) *namelen;
+
+ if (func(fd, name, &socklen))
+ return UV__ERR(errno);
+
+ *namelen = (int) socklen;
+ return 0;
+}
+
+int uv_gettimeofday(uv_timeval64_t* tv) {
+ struct timeval time;
+
+ if (tv == NULL)
+ return UV_EINVAL;
+
+ if (gettimeofday(&time, NULL) != 0)
+ return UV__ERR(errno);
+
+ tv->tv_sec = (int64_t) time.tv_sec;
+ tv->tv_usec = (int32_t) time.tv_usec;
+ return 0;
+}
+
+void uv_sleep(unsigned int msec) {
+ struct timespec timeout;
+ int rc;
+
+ timeout.tv_sec = msec / 1000;
+ timeout.tv_nsec = (msec % 1000) * 1000 * 1000;
+
+ do
+ rc = nanosleep(&timeout, &timeout);
+ while (rc == -1 && errno == EINTR);
+
+ assert(rc == 0);
+}
+
+int uv__search_path(const char* prog, char* buf, size_t* buflen) {
+ char abspath[UV__PATH_MAX];
+ size_t abspath_size;
+ char trypath[UV__PATH_MAX];
+ char* cloned_path;
+ char* path_env;
+ char* token;
+ char* itr;
+
+ if (buf == NULL || buflen == NULL || *buflen == 0)
+ return UV_EINVAL;
+
+ /*
+ * Possibilities for prog:
+ * i) an absolute path such as: /home/user/myprojects/nodejs/node
+ * ii) a relative path such as: ./node or ../myprojects/nodejs/node
+ * iii) a bare filename such as "node", after exporting PATH variable
+ * to its location.
+ */
+
+ /* Case i) and ii) absolute or relative paths */
+ if (strchr(prog, '/') != NULL) {
+ if (realpath(prog, abspath) != abspath)
+ return UV__ERR(errno);
+
+ abspath_size = strlen(abspath);
+
+ *buflen -= 1;
+ if (*buflen > abspath_size)
+ *buflen = abspath_size;
+
+ memcpy(buf, abspath, *buflen);
+ buf[*buflen] = '\0';
+
+ return 0;
+ }
+
+ /* Case iii). Search PATH environment variable */
+ cloned_path = NULL;
+ token = NULL;
+ path_env = getenv("PATH");
+
+ if (path_env == NULL)
+ return UV_EINVAL;
+
+ cloned_path = uv__strdup(path_env);
+ if (cloned_path == NULL)
+ return UV_ENOMEM;
+
+ token = uv__strtok(cloned_path, ":", &itr);
+ while (token != NULL) {
+ snprintf(trypath, sizeof(trypath) - 1, "%s/%s", token, prog);
+ if (realpath(trypath, abspath) == abspath) {
+ /* Check the match is executable */
+ if (access(abspath, X_OK) == 0) {
+ abspath_size = strlen(abspath);
+
+ *buflen -= 1;
+ if (*buflen > abspath_size)
+ *buflen = abspath_size;
+
+ memcpy(buf, abspath, *buflen);
+ buf[*buflen] = '\0';
+
+ uv__free(cloned_path);
+ return 0;
+ }
+ }
+ token = uv__strtok(NULL, ":", &itr);
+ }
+ uv__free(cloned_path);
+
+ /* Out of tokens (path entries), and no match found */
+ return UV_EINVAL;
+}
+
+
+unsigned int uv_available_parallelism(void) {
+#ifdef __linux__
+ cpu_set_t set;
+ long rc;
+
+ memset(&set, 0, sizeof(set));
+
+ /* sysconf(_SC_NPROCESSORS_ONLN) in musl calls sched_getaffinity() but in
+ * glibc it's... complicated... so for consistency try sched_getaffinity()
+ * before falling back to sysconf(_SC_NPROCESSORS_ONLN).
+ */
+ if (0 == sched_getaffinity(0, sizeof(set), &set))
+ rc = CPU_COUNT(&set);
+ else
+ rc = sysconf(_SC_NPROCESSORS_ONLN);
+
+ if (rc < 1)
+ rc = 1;
+
+ return (unsigned) rc;
+#elif defined(__MVS__)
+ int rc;
+
+ rc = __get_num_online_cpus();
+ if (rc < 1)
+ rc = 1;
+
+ return (unsigned) rc;
+#else /* __linux__ */
+ long rc;
+
+ rc = sysconf(_SC_NPROCESSORS_ONLN);
+ if (rc < 1)
+ rc = 1;
+
+ return (unsigned) rc;
+#endif /* __linux__ */
+}
diff --git a/Utilities/cmlibuv/src/unix/cygwin.c b/Utilities/cmlibuv/src/unix/cygwin.c
new file mode 100644
index 0000000000..169958d55f
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/cygwin.c
@@ -0,0 +1,53 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/sysinfo.h>
+#include <unistd.h>
+
+int uv_uptime(double* uptime) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) < 0)
+ return UV__ERR(errno);
+
+ *uptime = info.uptime;
+ return 0;
+}
+
+int uv_resident_set_memory(size_t* rss) {
+ /* FIXME: read /proc/meminfo? */
+ *rss = 0;
+ return 0;
+}
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ /* FIXME: read /proc/stat? */
+ *cpu_infos = NULL;
+ *count = 0;
+ return UV_ENOSYS;
+}
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
diff --git a/Utilities/cmlibuv/src/unix/darwin-proctitle.c b/Utilities/cmlibuv/src/unix/darwin-proctitle.c
new file mode 100644
index 0000000000..5288083ef0
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/darwin-proctitle.c
@@ -0,0 +1,192 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <TargetConditionals.h>
+
+#if !TARGET_OS_IPHONE
+#include "darwin-stub.h"
+#endif
+
+
+static int uv__pthread_setname_np(const char* name) {
+ char namebuf[64]; /* MAXTHREADNAMESIZE */
+ int err;
+
+ strncpy(namebuf, name, sizeof(namebuf) - 1);
+ namebuf[sizeof(namebuf) - 1] = '\0';
+
+ err = pthread_setname_np(namebuf);
+ if (err)
+ return UV__ERR(err);
+
+ return 0;
+}
+
+
+int uv__set_process_title(const char* title) {
+#if TARGET_OS_IPHONE
+ return uv__pthread_setname_np(title);
+#else
+ CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
+ const char*,
+ CFStringEncoding);
+ CFBundleRef (*pCFBundleGetBundleWithIdentifier)(CFStringRef);
+ void *(*pCFBundleGetDataPointerForName)(CFBundleRef, CFStringRef);
+ void *(*pCFBundleGetFunctionPointerForName)(CFBundleRef, CFStringRef);
+ CFTypeRef (*pLSGetCurrentApplicationASN)(void);
+ OSStatus (*pLSSetApplicationInformationItem)(int,
+ CFTypeRef,
+ CFStringRef,
+ CFStringRef,
+ CFDictionaryRef*);
+ void* application_services_handle;
+ void* core_foundation_handle;
+ CFBundleRef launch_services_bundle;
+ CFStringRef* display_name_key;
+ CFDictionaryRef (*pCFBundleGetInfoDictionary)(CFBundleRef);
+ CFBundleRef (*pCFBundleGetMainBundle)(void);
+ CFDictionaryRef (*pLSApplicationCheckIn)(int, CFDictionaryRef);
+ void (*pLSSetApplicationLaunchServicesServerConnectionStatus)(uint64_t,
+ void*);
+ CFTypeRef asn;
+ int err;
+
+ err = UV_ENOENT;
+ application_services_handle = dlopen("/System/Library/Frameworks/"
+ "ApplicationServices.framework/"
+ "Versions/A/ApplicationServices",
+ RTLD_LAZY | RTLD_LOCAL);
+ core_foundation_handle = dlopen("/System/Library/Frameworks/"
+ "CoreFoundation.framework/"
+ "Versions/A/CoreFoundation",
+ RTLD_LAZY | RTLD_LOCAL);
+
+ if (application_services_handle == NULL || core_foundation_handle == NULL)
+ goto out;
+
+ *(void **)(&pCFStringCreateWithCString) =
+ dlsym(core_foundation_handle, "CFStringCreateWithCString");
+ *(void **)(&pCFBundleGetBundleWithIdentifier) =
+ dlsym(core_foundation_handle, "CFBundleGetBundleWithIdentifier");
+ *(void **)(&pCFBundleGetDataPointerForName) =
+ dlsym(core_foundation_handle, "CFBundleGetDataPointerForName");
+ *(void **)(&pCFBundleGetFunctionPointerForName) =
+ dlsym(core_foundation_handle, "CFBundleGetFunctionPointerForName");
+
+ if (pCFStringCreateWithCString == NULL ||
+ pCFBundleGetBundleWithIdentifier == NULL ||
+ pCFBundleGetDataPointerForName == NULL ||
+ pCFBundleGetFunctionPointerForName == NULL) {
+ goto out;
+ }
+
+#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
+
+ launch_services_bundle =
+ pCFBundleGetBundleWithIdentifier(S("com.apple.LaunchServices"));
+
+ if (launch_services_bundle == NULL)
+ goto out;
+
+ *(void **)(&pLSGetCurrentApplicationASN) =
+ pCFBundleGetFunctionPointerForName(launch_services_bundle,
+ S("_LSGetCurrentApplicationASN"));
+
+ if (pLSGetCurrentApplicationASN == NULL)
+ goto out;
+
+ *(void **)(&pLSSetApplicationInformationItem) =
+ pCFBundleGetFunctionPointerForName(launch_services_bundle,
+ S("_LSSetApplicationInformationItem"));
+
+ if (pLSSetApplicationInformationItem == NULL)
+ goto out;
+
+ display_name_key = pCFBundleGetDataPointerForName(launch_services_bundle,
+ S("_kLSDisplayNameKey"));
+
+ if (display_name_key == NULL || *display_name_key == NULL)
+ goto out;
+
+ *(void **)(&pCFBundleGetInfoDictionary) = dlsym(core_foundation_handle,
+ "CFBundleGetInfoDictionary");
+ *(void **)(&pCFBundleGetMainBundle) = dlsym(core_foundation_handle,
+ "CFBundleGetMainBundle");
+ if (pCFBundleGetInfoDictionary == NULL || pCFBundleGetMainBundle == NULL)
+ goto out;
+
+ *(void **)(&pLSApplicationCheckIn) = pCFBundleGetFunctionPointerForName(
+ launch_services_bundle,
+ S("_LSApplicationCheckIn"));
+
+ if (pLSApplicationCheckIn == NULL)
+ goto out;
+
+ *(void **)(&pLSSetApplicationLaunchServicesServerConnectionStatus) =
+ pCFBundleGetFunctionPointerForName(
+ launch_services_bundle,
+ S("_LSSetApplicationLaunchServicesServerConnectionStatus"));
+
+ if (pLSSetApplicationLaunchServicesServerConnectionStatus == NULL)
+ goto out;
+
+ pLSSetApplicationLaunchServicesServerConnectionStatus(0, NULL);
+
+ /* Check into process manager?! */
+ pLSApplicationCheckIn(-2,
+ pCFBundleGetInfoDictionary(pCFBundleGetMainBundle()));
+
+ asn = pLSGetCurrentApplicationASN();
+
+ err = UV_EBUSY;
+ if (asn == NULL)
+ goto out;
+
+ err = UV_EINVAL;
+ if (pLSSetApplicationInformationItem(-2, /* Magic value. */
+ asn,
+ *display_name_key,
+ S(title),
+ NULL) != noErr) {
+ goto out;
+ }
+
+ uv__pthread_setname_np(title); /* Don't care if it fails. */
+ err = 0;
+
+out:
+ if (core_foundation_handle != NULL)
+ dlclose(core_foundation_handle);
+
+ if (application_services_handle != NULL)
+ dlclose(application_services_handle);
+
+ return err;
+#endif /* !TARGET_OS_IPHONE */
+}
diff --git a/Utilities/cmlibuv/src/unix/darwin-stub.h b/Utilities/cmlibuv/src/unix/darwin-stub.h
new file mode 100644
index 0000000000..433e3efa73
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/darwin-stub.h
@@ -0,0 +1,113 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_DARWIN_STUB_H_
+#define UV_DARWIN_STUB_H_
+
+#include <stdint.h>
+
+struct CFArrayCallBacks;
+struct CFRunLoopSourceContext;
+struct FSEventStreamContext;
+struct CFRange;
+
+typedef double CFAbsoluteTime;
+typedef double CFTimeInterval;
+typedef int FSEventStreamEventFlags;
+typedef int OSStatus;
+typedef long CFIndex;
+typedef struct CFArrayCallBacks CFArrayCallBacks;
+typedef struct CFRunLoopSourceContext CFRunLoopSourceContext;
+typedef struct FSEventStreamContext FSEventStreamContext;
+typedef uint32_t FSEventStreamCreateFlags;
+typedef uint64_t FSEventStreamEventId;
+typedef unsigned CFStringEncoding;
+typedef void* CFAllocatorRef;
+typedef void* CFArrayRef;
+typedef void* CFBundleRef;
+typedef void* CFDataRef;
+typedef void* CFDictionaryRef;
+typedef void* CFMutableDictionaryRef;
+typedef struct CFRange CFRange;
+typedef void* CFRunLoopRef;
+typedef void* CFRunLoopSourceRef;
+typedef void* CFStringRef;
+typedef void* CFTypeRef;
+typedef void* FSEventStreamRef;
+
+typedef uint32_t IOOptionBits;
+typedef unsigned int io_iterator_t;
+typedef unsigned int io_object_t;
+typedef unsigned int io_service_t;
+typedef unsigned int io_registry_entry_t;
+
+
+typedef void (*FSEventStreamCallback)(const FSEventStreamRef,
+ void*,
+ size_t,
+ void*,
+ const FSEventStreamEventFlags*,
+ const FSEventStreamEventId*);
+
+struct CFRunLoopSourceContext {
+ CFIndex version;
+ void* info;
+ void* pad[7];
+ void (*perform)(void*);
+};
+
+struct FSEventStreamContext {
+ CFIndex version;
+ void* info;
+ void* pad[3];
+};
+
+struct CFRange {
+ CFIndex location;
+ CFIndex length;
+};
+
+static const CFStringEncoding kCFStringEncodingUTF8 = 0x8000100;
+static const OSStatus noErr = 0;
+
+static const FSEventStreamEventId kFSEventStreamEventIdSinceNow = -1;
+
+static const int kFSEventStreamCreateFlagNoDefer = 2;
+static const int kFSEventStreamCreateFlagFileEvents = 16;
+
+static const int kFSEventStreamEventFlagEventIdsWrapped = 8;
+static const int kFSEventStreamEventFlagHistoryDone = 16;
+static const int kFSEventStreamEventFlagItemChangeOwner = 0x4000;
+static const int kFSEventStreamEventFlagItemCreated = 0x100;
+static const int kFSEventStreamEventFlagItemFinderInfoMod = 0x2000;
+static const int kFSEventStreamEventFlagItemInodeMetaMod = 0x400;
+static const int kFSEventStreamEventFlagItemIsDir = 0x20000;
+static const int kFSEventStreamEventFlagItemModified = 0x1000;
+static const int kFSEventStreamEventFlagItemRemoved = 0x200;
+static const int kFSEventStreamEventFlagItemRenamed = 0x800;
+static const int kFSEventStreamEventFlagItemXattrMod = 0x8000;
+static const int kFSEventStreamEventFlagKernelDropped = 4;
+static const int kFSEventStreamEventFlagMount = 64;
+static const int kFSEventStreamEventFlagRootChanged = 32;
+static const int kFSEventStreamEventFlagUnmount = 128;
+static const int kFSEventStreamEventFlagUserDropped = 2;
+
+#endif /* UV_DARWIN_STUB_H_ */
diff --git a/Utilities/cmlibuv/src/unix/darwin.c b/Utilities/cmlibuv/src/unix/darwin.c
new file mode 100644
index 0000000000..62f04d3154
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/darwin.c
@@ -0,0 +1,379 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdint.h>
+#include <errno.h>
+
+#include <dlfcn.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach-o/dyld.h> /* _NSGetExecutablePath */
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <unistd.h> /* sysconf */
+
+#include "darwin-stub.h"
+
+static uv_once_t once = UV_ONCE_INIT;
+static uint64_t (*time_func)(void);
+static mach_timebase_info_data_t timebase;
+
+typedef unsigned char UInt8;
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ loop->cf_state = NULL;
+
+ if (uv__kqueue_init(loop))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ uv__fsevents_loop_delete(loop);
+}
+
+
+static void uv__hrtime_init_once(void) {
+ if (KERN_SUCCESS != mach_timebase_info(&timebase))
+ abort();
+
+ time_func = (uint64_t (*)(void)) dlsym(RTLD_DEFAULT, "mach_continuous_time");
+ if (time_func == NULL)
+ time_func = mach_absolute_time;
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ uv_once(&once, uv__hrtime_init_once);
+ return time_func() * timebase.numer / timebase.denom;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ /* realpath(exepath) may be > PATH_MAX so double it to be on the safe side. */
+ char abspath[PATH_MAX * 2 + 1];
+ char exepath[PATH_MAX + 1];
+ uint32_t exepath_size;
+ size_t abspath_size;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ exepath_size = sizeof(exepath);
+ if (_NSGetExecutablePath(exepath, &exepath_size))
+ return UV_EIO;
+
+ if (realpath(exepath, abspath) != abspath)
+ return UV__ERR(errno);
+
+ abspath_size = strlen(abspath);
+ if (abspath_size == 0)
+ return UV_EIO;
+
+ *size -= 1;
+ if (*size > abspath_size)
+ *size = abspath_size;
+
+ memcpy(buffer, abspath, *size);
+ buffer[*size] = '\0';
+
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ vm_statistics_data_t info;
+ mach_msg_type_number_t count = sizeof(info) / sizeof(integer_t);
+
+ if (host_statistics(mach_host_self(), HOST_VM_INFO,
+ (host_info_t)&info, &count) != KERN_SUCCESS) {
+ return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
+ }
+
+ return (uint64_t) info.free_count * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t info;
+ int which[] = {CTL_HW, HW_MEMSIZE};
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ mach_msg_type_number_t count;
+ task_basic_info_data_t info;
+ kern_return_t err;
+
+ count = TASK_BASIC_INFO_COUNT;
+ err = task_info(mach_task_self(),
+ TASK_BASIC_INFO,
+ (task_info_t) &info,
+ &count);
+ (void) &err;
+ /* task_info(TASK_BASIC_INFO) cannot really fail. Anything other than
+ * KERN_SUCCESS implies a libuv bug.
+ */
+ assert(err == KERN_SUCCESS);
+ *rss = info.resident_size;
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ time_t now;
+ struct timeval info;
+ size_t size = sizeof(info);
+ static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ now = time(NULL);
+ *uptime = now - info.tv_sec;
+
+ return 0;
+}
+
+static int uv__get_cpu_speed(uint64_t* speed) {
+ /* IOKit */
+ void (*pIOObjectRelease)(io_object_t);
+ kern_return_t (*pIOMasterPort)(mach_port_t, mach_port_t*);
+ CFMutableDictionaryRef (*pIOServiceMatching)(const char*);
+ kern_return_t (*pIOServiceGetMatchingServices)(mach_port_t,
+ CFMutableDictionaryRef,
+ io_iterator_t*);
+ io_service_t (*pIOIteratorNext)(io_iterator_t);
+ CFTypeRef (*pIORegistryEntryCreateCFProperty)(io_registry_entry_t,
+ CFStringRef,
+ CFAllocatorRef,
+ IOOptionBits);
+
+ /* CoreFoundation */
+ CFStringRef (*pCFStringCreateWithCString)(CFAllocatorRef,
+ const char*,
+ CFStringEncoding);
+ CFStringEncoding (*pCFStringGetSystemEncoding)(void);
+ UInt8 *(*pCFDataGetBytePtr)(CFDataRef);
+ CFIndex (*pCFDataGetLength)(CFDataRef);
+ void (*pCFDataGetBytes)(CFDataRef, CFRange, UInt8*);
+ void (*pCFRelease)(CFTypeRef);
+
+ void* core_foundation_handle;
+ void* iokit_handle;
+ int err;
+
+ kern_return_t kr;
+ mach_port_t mach_port;
+ io_iterator_t it;
+ io_object_t service;
+
+ mach_port = 0;
+
+ err = UV_ENOENT;
+ core_foundation_handle = dlopen("/System/Library/Frameworks/"
+ "CoreFoundation.framework/"
+ "CoreFoundation",
+ RTLD_LAZY | RTLD_LOCAL);
+ iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
+ "IOKit",
+ RTLD_LAZY | RTLD_LOCAL);
+
+ if (core_foundation_handle == NULL || iokit_handle == NULL)
+ goto out;
+
+#define V(handle, symbol) \
+ do { \
+ *(void **)(&p ## symbol) = dlsym((handle), #symbol); \
+ if (p ## symbol == NULL) \
+ goto out; \
+ } \
+ while (0)
+ V(iokit_handle, IOMasterPort);
+ V(iokit_handle, IOServiceMatching);
+ V(iokit_handle, IOServiceGetMatchingServices);
+ V(iokit_handle, IOIteratorNext);
+ V(iokit_handle, IOObjectRelease);
+ V(iokit_handle, IORegistryEntryCreateCFProperty);
+ V(core_foundation_handle, CFStringCreateWithCString);
+ V(core_foundation_handle, CFStringGetSystemEncoding);
+ V(core_foundation_handle, CFDataGetBytePtr);
+ V(core_foundation_handle, CFDataGetLength);
+ V(core_foundation_handle, CFDataGetBytes);
+ V(core_foundation_handle, CFRelease);
+#undef V
+
+#define S(s) pCFStringCreateWithCString(NULL, (s), kCFStringEncodingUTF8)
+
+ kr = pIOMasterPort(MACH_PORT_NULL, &mach_port);
+ assert(kr == KERN_SUCCESS);
+ CFMutableDictionaryRef classes_to_match
+ = pIOServiceMatching("IOPlatformDevice");
+ kr = pIOServiceGetMatchingServices(mach_port, classes_to_match, &it);
+ assert(kr == KERN_SUCCESS);
+ service = pIOIteratorNext(it);
+
+ CFStringRef device_type_str = S("device_type");
+ CFStringRef clock_frequency_str = S("clock-frequency");
+
+ while (service != 0) {
+ CFDataRef data;
+ data = pIORegistryEntryCreateCFProperty(service,
+ device_type_str,
+ NULL,
+ 0);
+ if (data) {
+ const UInt8* raw = pCFDataGetBytePtr(data);
+ if (strncmp((char*)raw, "cpu", 3) == 0 ||
+ strncmp((char*)raw, "processor", 9) == 0) {
+ CFDataRef freq_ref;
+ freq_ref = pIORegistryEntryCreateCFProperty(service,
+ clock_frequency_str,
+ NULL,
+ 0);
+ if (freq_ref) {
+ const UInt8* freq_ref_ptr = pCFDataGetBytePtr(freq_ref);
+ CFIndex len = pCFDataGetLength(freq_ref);
+ if (len == 8)
+ memcpy(speed, freq_ref_ptr, 8);
+ else if (len == 4) {
+ uint32_t v;
+ memcpy(&v, freq_ref_ptr, 4);
+ *speed = v;
+ } else {
+ *speed = 0;
+ }
+
+ pCFRelease(freq_ref);
+ pCFRelease(data);
+ break;
+ }
+ }
+ pCFRelease(data);
+ }
+
+ service = pIOIteratorNext(it);
+ }
+
+ pIOObjectRelease(it);
+
+ err = 0;
+
+ if (device_type_str != NULL)
+ pCFRelease(device_type_str);
+ if (clock_frequency_str != NULL)
+ pCFRelease(clock_frequency_str);
+
+out:
+ if (core_foundation_handle != NULL)
+ dlclose(core_foundation_handle);
+
+ if (iokit_handle != NULL)
+ dlclose(iokit_handle);
+
+ mach_port_deallocate(mach_task_self(), mach_port);
+
+ return err;
+}
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+ multiplier = ((uint64_t)1000L / ticks);
+ char model[512];
+ size_t size;
+ unsigned int i;
+ natural_t numcpus;
+ mach_msg_type_number_t msg_type;
+ processor_cpu_load_info_data_t *info;
+ uv_cpu_info_t* cpu_info;
+ uint64_t cpuspeed;
+ int err;
+
+ size = sizeof(model);
+ if (sysctlbyname("machdep.cpu.brand_string", &model, &size, NULL, 0) &&
+ sysctlbyname("hw.model", &model, &size, NULL, 0)) {
+ return UV__ERR(errno);
+ }
+
+ err = uv__get_cpu_speed(&cpuspeed);
+ if (err < 0)
+ return err;
+
+ if (host_processor_info(mach_host_self(), PROCESSOR_CPU_LOAD_INFO, &numcpus,
+ (processor_info_array_t*)&info,
+ &msg_type) != KERN_SUCCESS) {
+ return UV_EINVAL; /* FIXME(bnoordhuis) Translate error. */
+ }
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos)) {
+ vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
+ return UV_ENOMEM;
+ }
+
+ *count = numcpus;
+
+ for (i = 0; i < numcpus; i++) {
+ cpu_info = &(*cpu_infos)[i];
+
+ cpu_info->cpu_times.user = (uint64_t)(info[i].cpu_ticks[0]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(info[i].cpu_ticks[3]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(info[i].cpu_ticks[1]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(info[i].cpu_ticks[2]) * multiplier;
+ cpu_info->cpu_times.irq = 0;
+
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = cpuspeed/1000000;
+ }
+ vm_deallocate(mach_task_self(), (vm_address_t)info, msg_type);
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/dl.c b/Utilities/cmlibuv/src/unix/dl.c
new file mode 100644
index 0000000000..80b3333ae2
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/dl.c
@@ -0,0 +1,80 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <string.h>
+#include <locale.h>
+
+static int uv__dlerror(uv_lib_t* lib);
+
+
+int uv_dlopen(const char* filename, uv_lib_t* lib) {
+ dlerror(); /* Reset error status. */
+ lib->errmsg = NULL;
+ lib->handle = dlopen(filename, RTLD_LAZY);
+ return lib->handle ? 0 : uv__dlerror(lib);
+}
+
+
+void uv_dlclose(uv_lib_t* lib) {
+ uv__free(lib->errmsg);
+ lib->errmsg = NULL;
+
+ if (lib->handle) {
+ /* Ignore errors. No good way to signal them without leaking memory. */
+ dlclose(lib->handle);
+ lib->handle = NULL;
+ }
+}
+
+
+int uv_dlsym(uv_lib_t* lib, const char* name, void** ptr) {
+ dlerror(); /* Reset error status. */
+ *ptr = dlsym(lib->handle, name);
+ return *ptr ? 0 : uv__dlerror(lib);
+}
+
+
+const char* uv_dlerror(const uv_lib_t* lib) {
+ return lib->errmsg ? lib->errmsg : "no error";
+}
+
+
+static int uv__dlerror(uv_lib_t* lib) {
+ const char* errmsg;
+
+ uv__free(lib->errmsg);
+
+ errmsg = dlerror();
+
+ if (errmsg) {
+ lib->errmsg = uv__strdup(errmsg);
+ return -1;
+ }
+ else {
+ lib->errmsg = NULL;
+ return 0;
+ }
+}
diff --git a/Utilities/cmlibuv/src/unix/epoll.c b/Utilities/cmlibuv/src/unix/epoll.c
new file mode 100644
index 0000000000..97348e254b
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/epoll.c
@@ -0,0 +1,422 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include <errno.h>
+#include <sys/epoll.h>
+
+int uv__epoll_init(uv_loop_t* loop) {
+ int fd;
+ fd = epoll_create1(O_CLOEXEC);
+
+ /* epoll_create1() can fail either because it's not implemented (old kernel)
+ * or because it doesn't understand the O_CLOEXEC flag.
+ */
+ if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
+ fd = epoll_create(256);
+
+ if (fd != -1)
+ uv__cloexec(fd, 1);
+ }
+
+ loop->backend_fd = fd;
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event* events;
+ struct epoll_event dummy;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct epoll_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if (events[i].data.fd == fd)
+ events[i].data.fd = -1;
+
+ /* Remove the file descriptor from the epoll.
+ * This avoids a problem where the same file description remains open
+ * in another process, causing repeated junk epoll events.
+ *
+ * We pass in a dummy epoll_event, to work around a bug in old kernels.
+ */
+ if (loop->backend_fd >= 0) {
+ /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
+ * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
+ }
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event e;
+ int rc;
+
+ memset(&e, 0, sizeof(e));
+ e.events = POLLIN;
+ e.data.fd = -1;
+
+ rc = 0;
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
+ if (errno != EEXIST)
+ rc = UV__ERR(errno);
+
+ if (rc == 0)
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
+ abort();
+
+ return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
+ * effectively infinite on 32 bits architectures. To avoid blocking
+ * indefinitely, we cap the timeout and poll again if necessary.
+ *
+ * Note that "30 minutes" is a simplification because it depends on
+ * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
+ * that being the largest value I have seen in the wild (and only once.)
+ */
+ static const int max_safe_timeout = 1789569;
+ static int no_epoll_pwait_cached;
+ static int no_epoll_wait_cached;
+ int no_epoll_pwait;
+ int no_epoll_wait;
+ struct epoll_event events[1024];
+ struct epoll_event* pe;
+ struct epoll_event e;
+ int real_timeout;
+ QUEUE* q;
+ uv__io_t* w;
+ sigset_t sigset;
+ uint64_t sigmask;
+ uint64_t base;
+ int have_signals;
+ int nevents;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ memset(&e, 0, sizeof(e));
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ e.events = w->pevents;
+ e.data.fd = w->fd;
+
+ if (w->events == 0)
+ op = EPOLL_CTL_ADD;
+ else
+ op = EPOLL_CTL_MOD;
+
+ /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
+ * events, skip the syscall and squelch the events after epoll_wait().
+ */
+ if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
+ if (errno != EEXIST)
+ abort();
+
+ assert(op == EPOLL_CTL_ADD);
+
+ /* We've reactivated a file descriptor that's been watched before. */
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ sigmask = 0;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ sigemptyset(&sigset);
+ sigaddset(&sigset, SIGPROF);
+ sigmask |= 1 << (SIGPROF - 1);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+ real_timeout = timeout;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ user_timeout = 0;
+ }
+
+ /* You could argue there is a dependency between these two but
+ * ultimately we don't care about their ordering with respect
+ * to one another. Worst case, we make a few system calls that
+ * could have been avoided because another thread already knows
+ * they fail with ENOSYS. Hardly the end of the world.
+ */
+ no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
+ no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ /* See the comment for max_safe_timeout for an explanation of why
+ * this is necessary. Executive summary: kernel bug workaround.
+ */
+ if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
+ timeout = max_safe_timeout;
+
+ if (sigmask != 0 && no_epoll_pwait != 0)
+ if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
+ abort();
+
+ if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
+ nfds = epoll_pwait(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout,
+ &sigset);
+ if (nfds == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_epoll_pwait_cached, 1);
+ no_epoll_pwait = 1;
+ }
+ } else {
+ nfds = epoll_wait(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout);
+ if (nfds == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_epoll_wait_cached, 1);
+ no_epoll_wait = 1;
+ }
+ }
+
+ if (sigmask != 0 && no_epoll_pwait != 0)
+ if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
+ abort();
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ assert(timeout != -1);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* We may have been inside the system call for longer than |timeout|
+ * milliseconds so we need to update the timestamp to avoid drift.
+ */
+ goto update_timeout;
+ }
+
+ if (nfds == -1) {
+ if (errno == ENOSYS) {
+ /* epoll_wait() or epoll_pwait() failed, try the other system call. */
+ assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
+ continue;
+ }
+
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ {
+ /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
+ union {
+ struct epoll_event* events;
+ uv__io_t* watchers;
+ } x;
+
+ x.events = events;
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = x.watchers;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->data.fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
+ continue;
+ }
+
+ /* Give users only events they're interested in. Prevents spurious
+ * callbacks when previous callback invocation in this loop has stopped
+ * the current watcher. Also, filters out events that users has not
+ * requested us to watch.
+ */
+ pe->events &= w->pevents | POLLERR | POLLHUP;
+
+ /* Work around an epoll quirk where it sometimes reports just the
+ * EPOLLERR or EPOLLHUP event. In order to force the event loop to
+ * move forward, we merge in the read/write events that the watcher
+ * is interested in; uv__read() and uv__write() will then deal with
+ * the error or hangup in the usual fashion.
+ *
+ * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
+ * reads the available data, calls uv_read_stop(), then sometime later
+ * calls uv_read_start() again. By then, libuv has forgotten about the
+ * hangup and the kernel won't report EPOLLIN again because there's
+ * nothing left to read. If anything, libuv is to blame here. The
+ * current hack is just a quick bandaid; to properly fix it, libuv
+ * needs to remember the error/hangup event. We should get that for
+ * free when we switch over to edge-triggered I/O.
+ */
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |=
+ w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+
+ if (pe->events != 0) {
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->events);
+ }
+
+ nevents++;
+ }
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ real_timeout -= (loop->time - base);
+ if (real_timeout <= 0)
+ return;
+
+ timeout = real_timeout;
+ }
+}
+
diff --git a/Utilities/cmlibuv/src/unix/freebsd.c b/Utilities/cmlibuv/src/unix/freebsd.c
new file mode 100644
index 0000000000..658ff262d3
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/freebsd.c
@@ -0,0 +1,304 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+
+#include <paths.h>
+#include <sys/user.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <vm/vm_param.h> /* VM_LOADAVG */
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h> /* sysconf */
+#include <fcntl.h>
+
+#ifndef CPUSTATES
+# define CPUSTATES 5U
+#endif
+#ifndef CP_USER
+# define CP_USER 0
+# define CP_NICE 1
+# define CP_SYS 2
+# define CP_IDLE 3
+# define CP_INTR 4
+#endif
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+int uv_exepath(char* buffer, size_t* size) {
+ char abspath[PATH_MAX * 2 + 1];
+ int mib[4];
+ size_t abspath_size;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PATHNAME;
+ mib[3] = -1;
+
+ abspath_size = sizeof abspath;
+ if (sysctl(mib, ARRAY_SIZE(mib), abspath, &abspath_size, NULL, 0))
+ return UV__ERR(errno);
+
+ assert(abspath_size > 0);
+ abspath_size -= 1;
+ *size -= 1;
+
+ if (*size > abspath_size)
+ *size = abspath_size;
+
+ memcpy(buffer, abspath, *size);
+ buffer[*size] = '\0';
+
+ return 0;
+}
+
+uint64_t uv_get_free_memory(void) {
+ int freecount;
+ size_t size = sizeof(freecount);
+
+ if (sysctlbyname("vm.stats.vm.v_free_count", &freecount, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) freecount * sysconf(_SC_PAGESIZE);
+
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ unsigned long info;
+ int which[] = {CTL_HW, HW_PHYSMEM};
+
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ struct kinfo_proc kinfo;
+ size_t page_size;
+ size_t kinfo_size;
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PID;
+ mib[3] = getpid();
+
+ kinfo_size = sizeof(kinfo);
+
+ if (sysctl(mib, ARRAY_SIZE(mib), &kinfo, &kinfo_size, NULL, 0))
+ return UV__ERR(errno);
+
+ page_size = getpagesize();
+
+#ifdef __DragonFly__
+ *rss = kinfo.kp_vm_rssize * page_size;
+#else
+ *rss = kinfo.ki_rssize * page_size;
+#endif
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ int r;
+ struct timespec sp;
+ r = clock_gettime(CLOCK_MONOTONIC, &sp);
+ if (r)
+ return UV__ERR(errno);
+
+ *uptime = sp.tv_sec;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+ multiplier = ((uint64_t)1000L / ticks), cpuspeed, maxcpus,
+ cur = 0;
+ uv_cpu_info_t* cpu_info;
+ const char* maxcpus_key;
+ const char* cptimes_key;
+ const char* model_key;
+ char model[512];
+ long* cp_times;
+ int numcpus;
+ size_t size;
+ int i;
+
+#if defined(__DragonFly__)
+ /* This is not quite correct but DragonFlyBSD doesn't seem to have anything
+ * comparable to kern.smp.maxcpus or kern.cp_times (kern.cp_time is a total,
+ * not per CPU). At least this stops uv_cpu_info() from failing completely.
+ */
+ maxcpus_key = "hw.ncpu";
+ cptimes_key = "kern.cp_time";
+#else
+ maxcpus_key = "kern.smp.maxcpus";
+ cptimes_key = "kern.cp_times";
+#endif
+
+#if defined(__arm__) || defined(__aarch64__)
+ /* The key hw.model and hw.clockrate are not available on FreeBSD ARM. */
+ model_key = "hw.machine";
+ cpuspeed = 0;
+#else
+ model_key = "hw.model";
+
+ size = sizeof(cpuspeed);
+ if (sysctlbyname("hw.clockrate", &cpuspeed, &size, NULL, 0))
+ return -errno;
+#endif
+
+ size = sizeof(model);
+ if (sysctlbyname(model_key, &model, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ size = sizeof(numcpus);
+ if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos))
+ return UV_ENOMEM;
+
+ *count = numcpus;
+
+ /* kern.cp_times on FreeBSD i386 gives an array up to maxcpus instead of
+ * ncpu.
+ */
+ size = sizeof(maxcpus);
+ if (sysctlbyname(maxcpus_key, &maxcpus, &size, NULL, 0)) {
+ uv__free(*cpu_infos);
+ return UV__ERR(errno);
+ }
+
+ size = maxcpus * CPUSTATES * sizeof(long);
+
+ cp_times = uv__malloc(size);
+ if (cp_times == NULL) {
+ uv__free(*cpu_infos);
+ return UV_ENOMEM;
+ }
+
+ if (sysctlbyname(cptimes_key, cp_times, &size, NULL, 0)) {
+ uv__free(cp_times);
+ uv__free(*cpu_infos);
+ return UV__ERR(errno);
+ }
+
+ for (i = 0; i < numcpus; i++) {
+ cpu_info = &(*cpu_infos)[i];
+
+ cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
+ cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
+
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = cpuspeed;
+
+ cur+=CPUSTATES;
+ }
+
+ uv__free(cp_times);
+ return 0;
+}
+
+
+int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
+ return sendmmsg(fd,
+ (struct mmsghdr*) mmsg,
+ vlen,
+ 0 /* flags */);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
+ return recvmmsg(fd,
+ (struct mmsghdr*) mmsg,
+ vlen,
+ 0 /* flags */,
+ NULL /* timeout */);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ off_t* off_in,
+ int fd_out,
+ off_t* off_out,
+ size_t len,
+ unsigned int flags)
+{
+#if __FreeBSD__ >= 13 && !defined(__DragonFly__)
+ return copy_file_range(fd_in, off_in, fd_out, off_out, len, flags);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
diff --git a/Utilities/cmlibuv/src/unix/fs.c b/Utilities/cmlibuv/src/unix/fs.c
new file mode 100644
index 0000000000..e2db3ad668
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/fs.c
@@ -0,0 +1,2270 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Caveat emptor: this file deviates from the libuv convention of returning
+ * negated errno codes. Most uv_fs_*() functions map directly to the system
+ * call of the same name. For more complex wrappers, it's easier to just
+ * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
+ * getting the errno to the right place (req->result or as the return value.)
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <dlfcn.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h> /* PATH_MAX */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+
+#if defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+# define HAVE_PREADV 1
+#else
+# define HAVE_PREADV 0
+#endif
+
+#if defined(__linux__)
+# include "sys/utsname.h"
+#endif
+
+#if defined(__linux__) || defined(__sun)
+# include <sys/sendfile.h>
+# include <sys/sysmacros.h>
+#endif
+
+#if defined(__APPLE__)
+# include <sys/sysctl.h>
+#elif defined(__linux__) && !defined(FICLONE)
+# include <sys/ioctl.h>
+# define FICLONE _IOW(0x94, 9, int)
+#endif
+
+#if defined(_AIX) && !defined(_AIX71)
+# include <utime.h>
+#endif
+
+#if defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+# include <sys/param.h>
+# include <sys/mount.h>
+#elif defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
+# include <sys/statvfs.h>
+#else
+# include <sys/statfs.h>
+#endif
+
+#if defined(_AIX) && _XOPEN_SOURCE <= 600
+extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
+#endif
+
+#define INIT(subtype) \
+ do { \
+ if (req == NULL) \
+ return UV_EINVAL; \
+ UV_REQ_INIT(req, UV_FS); \
+ req->fs_type = UV_FS_ ## subtype; \
+ req->result = 0; \
+ req->ptr = NULL; \
+ req->loop = loop; \
+ req->path = NULL; \
+ req->new_path = NULL; \
+ req->bufs = NULL; \
+ req->cb = cb; \
+ } \
+ while (0)
+
+#define PATH \
+ do { \
+ assert(path != NULL); \
+ if (cb == NULL) { \
+ req->path = path; \
+ } else { \
+ req->path = uv__strdup(path); \
+ if (req->path == NULL) \
+ return UV_ENOMEM; \
+ } \
+ } \
+ while (0)
+
+#define PATH2 \
+ do { \
+ if (cb == NULL) { \
+ req->path = path; \
+ req->new_path = new_path; \
+ } else { \
+ size_t path_len; \
+ size_t new_path_len; \
+ path_len = strlen(path) + 1; \
+ new_path_len = strlen(new_path) + 1; \
+ req->path = uv__malloc(path_len + new_path_len); \
+ if (req->path == NULL) \
+ return UV_ENOMEM; \
+ req->new_path = req->path + path_len; \
+ memcpy((void*) req->path, path, path_len); \
+ memcpy((void*) req->new_path, new_path, new_path_len); \
+ } \
+ } \
+ while (0)
+
+#define POST \
+ do { \
+ if (cb != NULL) { \
+ uv__req_register(loop, req); \
+ uv__work_submit(loop, \
+ &req->work_req, \
+ UV__WORK_FAST_IO, \
+ uv__fs_work, \
+ uv__fs_done); \
+ return 0; \
+ } \
+ else { \
+ uv__fs_work(&req->work_req); \
+ return req->result; \
+ } \
+ } \
+ while (0)
+
+
+static int uv__fs_close(int fd) {
+ int rc;
+
+ rc = uv__close_nocancel(fd);
+ if (rc == -1)
+ if (errno == EINTR || errno == EINPROGRESS)
+ rc = 0; /* The close is in progress, not an error. */
+
+ return rc;
+}
+
+
+static ssize_t uv__fs_fsync(uv_fs_t* req) {
+#if defined(__APPLE__)
+ /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
+ * to the drive platters. This is in contrast to Linux's fdatasync and fsync
+ * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
+ * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
+ * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
+ * This is the same approach taken by sqlite, except sqlite does not issue
+ * an F_BARRIERFSYNC call.
+ */
+ int r;
+
+ r = fcntl(req->file, F_FULLFSYNC);
+ if (r != 0)
+ r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
+ if (r != 0)
+ r = fsync(req->file);
+ return r;
+#else
+ return fsync(req->file);
+#endif
+}
+
+
+static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
+#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
+ return fdatasync(req->file);
+#elif defined(__APPLE__)
+ /* See the comment in uv__fs_fsync. */
+ return uv__fs_fsync(req);
+#else
+ return fsync(req->file);
+#endif
+}
+
+
+UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
+ struct timespec ts;
+ ts.tv_sec = time;
+ ts.tv_nsec = (time - ts.tv_sec) * 1e9;
+
+ /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
+ * stick to microsecond resolution for the sake of consistency with other
+ * platforms. I'm the original author of this compatibility hack but I'm
+ * less convinced it's useful nowadays.
+ */
+ ts.tv_nsec -= ts.tv_nsec % 1000;
+
+ if (ts.tv_nsec < 0) {
+ ts.tv_nsec += 1e9;
+ ts.tv_sec -= 1;
+ }
+ return ts;
+}
+
+UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
+ struct timeval tv;
+ tv.tv_sec = time;
+ tv.tv_usec = (time - tv.tv_sec) * 1e6;
+ if (tv.tv_usec < 0) {
+ tv.tv_usec += 1e6;
+ tv.tv_sec -= 1;
+ }
+ return tv;
+}
+
+static ssize_t uv__fs_futime(uv_fs_t* req) {
+#if defined(__linux__) \
+ || defined(_AIX71) \
+ || defined(__HAIKU__) \
+ || defined(__GNU__)
+ struct timespec ts[2];
+ ts[0] = uv__fs_to_timespec(req->atime);
+ ts[1] = uv__fs_to_timespec(req->mtime);
+ return futimens(req->file, ts);
+#elif defined(__APPLE__) \
+ || defined(__DragonFly__) \
+ || defined(__FreeBSD__) \
+ || defined(__FreeBSD_kernel__) \
+ || defined(__NetBSD__) \
+ || defined(__OpenBSD__) \
+ || defined(__sun)
+ struct timeval tv[2];
+ tv[0] = uv__fs_to_timeval(req->atime);
+ tv[1] = uv__fs_to_timeval(req->mtime);
+# if defined(__sun)
+ return futimesat(req->file, NULL, tv);
+# else
+ return futimes(req->file, tv);
+# endif
+#elif defined(__MVS__)
+ attrib_t atr;
+ memset(&atr, 0, sizeof(atr));
+ atr.att_mtimechg = 1;
+ atr.att_atimechg = 1;
+ atr.att_mtime = req->mtime;
+ atr.att_atime = req->atime;
+ return __fchattr(req->file, &atr, sizeof(atr));
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+#if (defined(__sun) || defined(__hpux)) && (_XOPEN_SOURCE < 600 || defined(CMAKE_BOOTSTRAP))
+static char* uv__mkdtemp(char *template)
+{
+ if (!mktemp(template) || mkdir(template, 0700))
+ return NULL;
+ return template;
+}
+#else
+#define uv__mkdtemp mkdtemp
+#endif
+
+static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
+ return uv__mkdtemp((char*) req->path) ? 0 : -1;
+}
+
+
+static int (*uv__mkostemp)(char*, int);
+
+
+static void uv__mkostemp_initonce(void) {
+ /* z/os doesn't have RTLD_DEFAULT but that's okay
+ * because it doesn't have mkostemp(O_CLOEXEC) either.
+ */
+#ifdef RTLD_DEFAULT
+ uv__mkostemp = (int (*)(char*, int)) dlsym(RTLD_DEFAULT, "mkostemp");
+
+ /* We don't care about errors, but we do want to clean them up.
+ * If there has been no error, then dlerror() will just return
+ * NULL.
+ */
+ dlerror();
+#endif /* RTLD_DEFAULT */
+}
+
+
+static int uv__fs_mkstemp(uv_fs_t* req) {
+ static uv_once_t once = UV_ONCE_INIT;
+ int r;
+#ifdef O_CLOEXEC
+ static int no_cloexec_support;
+#endif
+ static const char pattern[] = "XXXXXX";
+ static const size_t pattern_size = sizeof(pattern) - 1;
+ char* path;
+ size_t path_length;
+
+ path = (char*) req->path;
+ path_length = strlen(path);
+
+ /* EINVAL can be returned for 2 reasons:
+ 1. The template's last 6 characters were not XXXXXX
+ 2. open() didn't support O_CLOEXEC
+ We want to avoid going to the fallback path in case
+ of 1, so it's manually checked before. */
+ if (path_length < pattern_size ||
+ strcmp(path + path_length - pattern_size, pattern)) {
+ errno = EINVAL;
+ r = -1;
+ goto clobber;
+ }
+
+ uv_once(&once, uv__mkostemp_initonce);
+
+#ifdef O_CLOEXEC
+ if (uv__load_relaxed(&no_cloexec_support) == 0 && uv__mkostemp != NULL) {
+ r = uv__mkostemp(path, O_CLOEXEC);
+
+ if (r >= 0)
+ return r;
+
+ /* If mkostemp() returns EINVAL, it means the kernel doesn't
+ support O_CLOEXEC, so we just fallback to mkstemp() below. */
+ if (errno != EINVAL)
+ goto clobber;
+
+ /* We set the static variable so that next calls don't even
+ try to use mkostemp. */
+ uv__store_relaxed(&no_cloexec_support, 1);
+ }
+#endif /* O_CLOEXEC */
+
+ if (req->cb != NULL)
+ uv_rwlock_rdlock(&req->loop->cloexec_lock);
+
+ r = mkstemp(path);
+
+ /* In case of failure `uv__cloexec` will leave error in `errno`,
+ * so it is enough to just set `r` to `-1`.
+ */
+ if (r >= 0 && uv__cloexec(r, 1) != 0) {
+ r = uv__close(r);
+ if (r != 0)
+ abort();
+ r = -1;
+ }
+
+ if (req->cb != NULL)
+ uv_rwlock_rdunlock(&req->loop->cloexec_lock);
+
+clobber:
+ if (r < 0)
+ path[0] = '\0';
+ return r;
+}
+
+
+static ssize_t uv__fs_open(uv_fs_t* req) {
+#ifdef O_CLOEXEC
+ return open(req->path, req->flags | O_CLOEXEC, req->mode);
+#else /* O_CLOEXEC */
+ int r;
+
+ if (req->cb != NULL)
+ uv_rwlock_rdlock(&req->loop->cloexec_lock);
+
+ r = open(req->path, req->flags, req->mode);
+
+ /* In case of failure `uv__cloexec` will leave error in `errno`,
+ * so it is enough to just set `r` to `-1`.
+ */
+ if (r >= 0 && uv__cloexec(r, 1) != 0) {
+ r = uv__close(r);
+ if (r != 0)
+ abort();
+ r = -1;
+ }
+
+ if (req->cb != NULL)
+ uv_rwlock_rdunlock(&req->loop->cloexec_lock);
+
+ return r;
+#endif /* O_CLOEXEC */
+}
+
+
+#if !HAVE_PREADV
+static ssize_t uv__fs_preadv(uv_file fd,
+ uv_buf_t* bufs,
+ unsigned int nbufs,
+ off_t off) {
+ uv_buf_t* buf;
+ uv_buf_t* end;
+ ssize_t result;
+ ssize_t rc;
+ size_t pos;
+
+ assert(nbufs > 0);
+
+ result = 0;
+ pos = 0;
+ buf = bufs + 0;
+ end = bufs + nbufs;
+
+ for (;;) {
+ do
+ rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
+ while (rc == -1 && errno == EINTR);
+
+ if (rc == 0)
+ break;
+
+ if (rc == -1 && result == 0)
+ return UV__ERR(errno);
+
+ if (rc == -1)
+ break; /* We read some data so return that, ignore the error. */
+
+ pos += rc;
+ result += rc;
+
+ if (pos < buf->len)
+ continue;
+
+ pos = 0;
+ buf += 1;
+
+ if (buf == end)
+ break;
+ }
+
+ return result;
+}
+#endif
+
+
+static ssize_t uv__fs_read(uv_fs_t* req) {
+#if defined(__linux__)
+ static int no_preadv;
+#endif
+ unsigned int iovmax;
+ ssize_t result;
+
+ iovmax = uv__getiovmax();
+ if (req->nbufs > iovmax)
+ req->nbufs = iovmax;
+
+ if (req->off < 0) {
+ if (req->nbufs == 1)
+ result = read(req->file, req->bufs[0].base, req->bufs[0].len);
+ else
+ result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
+ } else {
+ if (req->nbufs == 1) {
+ result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+ goto done;
+ }
+
+#if HAVE_PREADV
+ result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
+#else
+# if defined(__linux__)
+ if (uv__load_relaxed(&no_preadv)) retry:
+# endif
+ {
+ result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
+ }
+# if defined(__linux__)
+ else {
+ result = uv__preadv(req->file,
+ (struct iovec*)req->bufs,
+ req->nbufs,
+ req->off);
+ if (result == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_preadv, 1);
+ goto retry;
+ }
+ }
+# endif
+#endif
+ }
+
+done:
+ /* Early cleanup of bufs allocation, since we're done with it. */
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+
+ req->bufs = NULL;
+ req->nbufs = 0;
+
+#ifdef __PASE__
+ /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
+ if (result == -1 && errno == EOPNOTSUPP) {
+ struct stat buf;
+ ssize_t rc;
+ rc = fstat(req->file, &buf);
+ if (rc == 0 && S_ISDIR(buf.st_mode)) {
+ errno = EISDIR;
+ }
+ }
+#endif
+
+ return result;
+}
+
+
+#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
+#define UV_CONST_DIRENT uv__dirent_t
+#else
+#define UV_CONST_DIRENT const uv__dirent_t
+#endif
+
+
+static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
+ return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
+}
+
+
+static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
+ return strcmp((*a)->d_name, (*b)->d_name);
+}
+
+
+static ssize_t uv__fs_scandir(uv_fs_t* req) {
+ uv__dirent_t** dents;
+ int n;
+
+ dents = NULL;
+ n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
+
+ /* NOTE: We will use nbufs as an index field */
+ req->nbufs = 0;
+
+ if (n == 0) {
+ /* OS X still needs to deallocate some memory.
+ * Memory was allocated using the system allocator, so use free() here.
+ */
+ free(dents);
+ dents = NULL;
+ } else if (n == -1) {
+ return n;
+ }
+
+ req->ptr = dents;
+
+ return n;
+}
+
+static int uv__fs_opendir(uv_fs_t* req) {
+ uv_dir_t* dir;
+
+ dir = uv__malloc(sizeof(*dir));
+ if (dir == NULL)
+ goto error;
+
+ dir->dir = opendir(req->path);
+ if (dir->dir == NULL)
+ goto error;
+
+ req->ptr = dir;
+ return 0;
+
+error:
+ uv__free(dir);
+ req->ptr = NULL;
+ return -1;
+}
+
+static int uv__fs_readdir(uv_fs_t* req) {
+ uv_dir_t* dir;
+ uv_dirent_t* dirent;
+ struct dirent* res;
+ unsigned int dirent_idx;
+ unsigned int i;
+
+ dir = req->ptr;
+ dirent_idx = 0;
+
+ while (dirent_idx < dir->nentries) {
+ /* readdir() returns NULL on end of directory, as well as on error. errno
+ is used to differentiate between the two conditions. */
+ errno = 0;
+ res = readdir(dir->dir);
+
+ if (res == NULL) {
+ if (errno != 0)
+ goto error;
+ break;
+ }
+
+ if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
+ continue;
+
+ dirent = &dir->dirents[dirent_idx];
+ dirent->name = uv__strdup(res->d_name);
+
+ if (dirent->name == NULL)
+ goto error;
+
+ dirent->type = uv__fs_get_dirent_type(res);
+ ++dirent_idx;
+ }
+
+ return dirent_idx;
+
+error:
+ for (i = 0; i < dirent_idx; ++i) {
+ uv__free((char*) dir->dirents[i].name);
+ dir->dirents[i].name = NULL;
+ }
+
+ return -1;
+}
+
+static int uv__fs_closedir(uv_fs_t* req) {
+ uv_dir_t* dir;
+
+ dir = req->ptr;
+
+ if (dir->dir != NULL) {
+ closedir(dir->dir);
+ dir->dir = NULL;
+ }
+
+ uv__free(req->ptr);
+ req->ptr = NULL;
+ return 0;
+}
+
+static int uv__fs_statfs(uv_fs_t* req) {
+ uv_statfs_t* stat_fs;
+#if defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
+ struct statvfs buf;
+
+ if (0 != statvfs(req->path, &buf))
+#else
+ struct statfs buf;
+
+ if (0 != statfs(req->path, &buf))
+#endif /* defined(__sun) */
+ return -1;
+
+ stat_fs = uv__malloc(sizeof(*stat_fs));
+ if (stat_fs == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+#if defined(__sun) || \
+ defined(__MVS__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__)
+ stat_fs->f_type = 0; /* f_type is not supported. */
+#else
+ stat_fs->f_type = buf.f_type;
+#endif
+ stat_fs->f_bsize = buf.f_bsize;
+ stat_fs->f_blocks = buf.f_blocks;
+ stat_fs->f_bfree = buf.f_bfree;
+ stat_fs->f_bavail = buf.f_bavail;
+ stat_fs->f_files = buf.f_files;
+ stat_fs->f_ffree = buf.f_ffree;
+ req->ptr = stat_fs;
+ return 0;
+}
+
+static ssize_t uv__fs_pathmax_size(const char* path) {
+ ssize_t pathmax;
+
+ pathmax = pathconf(path, _PC_PATH_MAX);
+
+ if (pathmax == -1)
+ pathmax = UV__PATH_MAX;
+
+ return pathmax;
+}
+
+static ssize_t uv__fs_readlink(uv_fs_t* req) {
+ ssize_t maxlen;
+ ssize_t len;
+ char* buf;
+
+#if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
+ maxlen = uv__fs_pathmax_size(req->path);
+#else
+ /* We may not have a real PATH_MAX. Read size of link. */
+ struct stat st;
+ int ret;
+ ret = lstat(req->path, &st);
+ if (ret != 0)
+ return -1;
+ if (!S_ISLNK(st.st_mode)) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ maxlen = st.st_size;
+
+ /* According to readlink(2) lstat can report st_size == 0
+ for some symlinks, such as those in /proc or /sys. */
+ if (maxlen == 0)
+ maxlen = uv__fs_pathmax_size(req->path);
+#endif
+
+ buf = uv__malloc(maxlen);
+
+ if (buf == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+#if defined(__MVS__)
+ len = os390_readlink(req->path, buf, maxlen);
+#else
+ len = readlink(req->path, buf, maxlen);
+#endif
+
+ if (len == -1) {
+ uv__free(buf);
+ return -1;
+ }
+
+ /* Uncommon case: resize to make room for the trailing nul byte. */
+ if (len == maxlen) {
+ buf = uv__reallocf(buf, len + 1);
+
+ if (buf == NULL)
+ return -1;
+ }
+
+ buf[len] = '\0';
+ req->ptr = buf;
+
+ return 0;
+}
+
+static ssize_t uv__fs_realpath(uv_fs_t* req) {
+ char* buf;
+
+#if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
+ buf = realpath(req->path, NULL);
+ if (buf == NULL)
+ return -1;
+#else
+ ssize_t len;
+
+ len = uv__fs_pathmax_size(req->path);
+ buf = uv__malloc(len + 1);
+
+ if (buf == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ if (realpath(req->path, buf) == NULL) {
+ uv__free(buf);
+ return -1;
+ }
+#endif
+
+ req->ptr = buf;
+
+ return 0;
+}
+
+static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
+ struct pollfd pfd;
+ int use_pread;
+ off_t offset;
+ ssize_t nsent;
+ ssize_t nread;
+ ssize_t nwritten;
+ size_t buflen;
+ size_t len;
+ ssize_t n;
+ int in_fd;
+ int out_fd;
+ char buf[8192];
+
+ len = req->bufsml[0].len;
+ in_fd = req->flags;
+ out_fd = req->file;
+ offset = req->off;
+ use_pread = 1;
+
+ /* Here are the rules regarding errors:
+ *
+ * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
+ * The user needs to know that some data has already been sent, to stop
+ * them from sending it twice.
+ *
+ * 2. Write errors are always reported. Write errors are bad because they
+ * mean data loss: we've read data but now we can't write it out.
+ *
+ * We try to use pread() and fall back to regular read() if the source fd
+ * doesn't support positional reads, for example when it's a pipe fd.
+ *
+ * If we get EAGAIN when writing to the target fd, we poll() on it until
+ * it becomes writable again.
+ *
+ * FIXME: If we get a write error when use_pread==1, it should be safe to
+ * return the number of sent bytes instead of an error because pread()
+ * is, in theory, idempotent. However, special files in /dev or /proc
+ * may support pread() but not necessarily return the same data on
+ * successive reads.
+ *
+ * FIXME: There is no way now to signal that we managed to send *some* data
+ * before a write error.
+ */
+ for (nsent = 0; (size_t) nsent < len; ) {
+ buflen = len - nsent;
+
+ if (buflen > sizeof(buf))
+ buflen = sizeof(buf);
+
+ do
+ if (use_pread)
+ nread = pread(in_fd, buf, buflen, offset);
+ else
+ nread = read(in_fd, buf, buflen);
+ while (nread == -1 && errno == EINTR);
+
+ if (nread == 0)
+ goto out;
+
+ if (nread == -1) {
+ if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
+ use_pread = 0;
+ continue;
+ }
+
+ if (nsent == 0)
+ nsent = -1;
+
+ goto out;
+ }
+
+ for (nwritten = 0; nwritten < nread; ) {
+ do
+ n = write(out_fd, buf + nwritten, nread - nwritten);
+ while (n == -1 && errno == EINTR);
+
+ if (n != -1) {
+ nwritten += n;
+ continue;
+ }
+
+ if (errno != EAGAIN && errno != EWOULDBLOCK) {
+ nsent = -1;
+ goto out;
+ }
+
+ pfd.fd = out_fd;
+ pfd.events = POLLOUT;
+ pfd.revents = 0;
+
+ do
+ n = poll(&pfd, 1, -1);
+ while (n == -1 && errno == EINTR);
+
+ if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
+ errno = EIO;
+ nsent = -1;
+ goto out;
+ }
+ }
+
+ offset += nread;
+ nsent += nread;
+ }
+
+out:
+ if (nsent != -1)
+ req->off = offset;
+
+ return nsent;
+}
+
+
+#ifdef __linux__
+static unsigned uv__kernel_version(void) {
+ static unsigned cached_version;
+ struct utsname u;
+ unsigned version;
+ unsigned major;
+ unsigned minor;
+ unsigned patch;
+
+ version = uv__load_relaxed(&cached_version);
+ if (version != 0)
+ return version;
+
+ if (-1 == uname(&u))
+ return 0;
+
+ if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
+ return 0;
+
+ version = major * 65536 + minor * 256 + patch;
+ uv__store_relaxed(&cached_version, version);
+
+ return version;
+}
+
+
+/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
+ * in copy_file_range() when it shouldn't. There is no workaround except to
+ * fall back to a regular copy.
+ */
+static int uv__is_buggy_cephfs(int fd) {
+ struct statfs s;
+
+ if (-1 == fstatfs(fd, &s))
+ return 0;
+
+ if (s.f_type != /* CephFS */ 0xC36400)
+ return 0;
+
+ return uv__kernel_version() < /* 4.20.0 */ 0x041400;
+}
+
+
+static int uv__is_cifs_or_smb(int fd) {
+ struct statfs s;
+
+ if (-1 == fstatfs(fd, &s))
+ return 0;
+
+ switch ((unsigned) s.f_type) {
+ case 0x0000517Bu: /* SMB */
+ case 0xFE534D42u: /* SMB2 */
+ case 0xFF534D42u: /* CIFS */
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
+ int out_fd, size_t len) {
+ static int no_copy_file_range_support;
+ ssize_t r;
+
+ if (uv__load_relaxed(&no_copy_file_range_support)) {
+ errno = ENOSYS;
+ return -1;
+ }
+
+ r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
+
+ if (r != -1)
+ return r;
+
+ switch (errno) {
+ case EACCES:
+ /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
+ * copy-from command when it shouldn't.
+ */
+ if (uv__is_buggy_cephfs(in_fd))
+ errno = ENOSYS; /* Use fallback. */
+ break;
+ case ENOSYS:
+ uv__store_relaxed(&no_copy_file_range_support, 1);
+ break;
+ case EPERM:
+ /* It's been reported that CIFS spuriously fails.
+ * Consider it a transient error.
+ */
+ if (uv__is_cifs_or_smb(out_fd))
+ errno = ENOSYS; /* Use fallback. */
+ break;
+ case ENOTSUP:
+ case EXDEV:
+ /* ENOTSUP - it could work on another file system type.
+ * EXDEV - it will not work when in_fd and out_fd are not on the same
+ * mounted filesystem (pre Linux 5.3)
+ */
+ errno = ENOSYS; /* Use fallback. */
+ break;
+ }
+
+ return -1;
+}
+
+#endif /* __linux__ */
+
+
+static ssize_t uv__fs_sendfile(uv_fs_t* req) {
+ int in_fd;
+ int out_fd;
+
+ in_fd = req->flags;
+ out_fd = req->file;
+
+#if defined(__linux__) || defined(__sun)
+ {
+ off_t off;
+ ssize_t r;
+ size_t len;
+ int try_sendfile;
+
+ off = req->off;
+ len = req->bufsml[0].len;
+
+#ifdef __linux__
+ r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
+ try_sendfile = (r == -1 && errno == ENOSYS);
+#else
+ try_sendfile = 1;
+#endif
+
+ if (try_sendfile)
+ r = sendfile(out_fd, in_fd, &off, len);
+
+ /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
+ * it still writes out data. Fortunately, we can detect it by checking if
+ * the offset has been updated.
+ */
+ if (r != -1 || off > req->off) {
+ r = off - req->off;
+ req->off = off;
+ return r;
+ }
+
+ if (errno == EINVAL ||
+ errno == EIO ||
+ errno == ENOTSOCK ||
+ errno == EXDEV) {
+ errno = 0;
+ return uv__fs_sendfile_emul(req);
+ }
+
+ return -1;
+ }
+#elif defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__)
+ {
+ off_t len;
+ ssize_t r;
+
+ /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
+ * non-blocking mode and not all data could be written. If a non-zero
+ * number of bytes have been sent, we don't consider it an error.
+ */
+
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+#if defined(__FreeBSD__)
+ off_t off;
+
+ off = req->off;
+ r = uv__fs_copy_file_range(in_fd, &off, out_fd, NULL, req->bufsml[0].len, 0);
+ if (r >= 0) {
+ r = off - req->off;
+ req->off = off;
+ return r;
+ }
+#endif
+ len = 0;
+ r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
+#elif defined(__FreeBSD_kernel__)
+ len = 0;
+ r = bsd_sendfile(in_fd,
+ out_fd,
+ req->off,
+ req->bufsml[0].len,
+ NULL,
+ &len,
+ 0);
+#else
+ /* The darwin sendfile takes len as an input for the length to send,
+ * so make sure to initialize it with the caller's value. */
+ len = req->bufsml[0].len;
+ r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
+#endif
+
+ /*
+ * The man page for sendfile(2) on DragonFly states that `len` contains
+ * a meaningful value ONLY in case of EAGAIN and EINTR.
+ * Nothing is said about it's value in case of other errors, so better
+ * not depend on the potential wrong assumption that is was not modified
+ * by the syscall.
+ */
+ if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
+ req->off += len;
+ return (ssize_t) len;
+ }
+
+ if (errno == EINVAL ||
+ errno == EIO ||
+ errno == ENOTSOCK ||
+ errno == EXDEV) {
+ errno = 0;
+ return uv__fs_sendfile_emul(req);
+ }
+
+ return -1;
+ }
+#else
+ /* Squelch compiler warnings. */
+ (void) &in_fd;
+ (void) &out_fd;
+
+ return uv__fs_sendfile_emul(req);
+#endif
+}
+
+
+static ssize_t uv__fs_utime(uv_fs_t* req) {
+#if defined(__linux__) \
+ || defined(_AIX71) \
+ || defined(__sun) \
+ || defined(__HAIKU__)
+ struct timespec ts[2];
+ ts[0] = uv__fs_to_timespec(req->atime);
+ ts[1] = uv__fs_to_timespec(req->mtime);
+ return utimensat(AT_FDCWD, req->path, ts, 0);
+#elif defined(__APPLE__) \
+ || defined(__DragonFly__) \
+ || defined(__FreeBSD__) \
+ || defined(__FreeBSD_kernel__) \
+ || defined(__NetBSD__) \
+ || defined(__OpenBSD__)
+ struct timeval tv[2];
+ tv[0] = uv__fs_to_timeval(req->atime);
+ tv[1] = uv__fs_to_timeval(req->mtime);
+ return utimes(req->path, tv);
+#elif defined(_AIX) \
+ && !defined(_AIX71)
+ struct utimbuf buf;
+ buf.actime = req->atime;
+ buf.modtime = req->mtime;
+ return utime(req->path, &buf);
+#elif defined(__MVS__)
+ attrib_t atr;
+ memset(&atr, 0, sizeof(atr));
+ atr.att_mtimechg = 1;
+ atr.att_atimechg = 1;
+ atr.att_mtime = req->mtime;
+ atr.att_atime = req->atime;
+ return __lchattr((char*) req->path, &atr, sizeof(atr));
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+
+static ssize_t uv__fs_lutime(uv_fs_t* req) {
+#if defined(__linux__) || \
+ defined(_AIX71) || \
+ defined(__sun) || \
+ defined(__HAIKU__) || \
+ defined(__GNU__) || \
+ defined(__OpenBSD__)
+ struct timespec ts[2];
+ ts[0] = uv__fs_to_timespec(req->atime);
+ ts[1] = uv__fs_to_timespec(req->mtime);
+ return utimensat(AT_FDCWD, req->path, ts, AT_SYMLINK_NOFOLLOW);
+#elif defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__)
+ struct timeval tv[2];
+ tv[0] = uv__fs_to_timeval(req->atime);
+ tv[1] = uv__fs_to_timeval(req->mtime);
+ return lutimes(req->path, tv);
+#else
+ errno = ENOSYS;
+ return -1;
+#endif
+}
+
+
+static ssize_t uv__fs_write(uv_fs_t* req) {
+#if defined(__linux__)
+ static int no_pwritev;
+#endif
+ ssize_t r;
+
+ /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
+ * data loss. We can't use a per-file descriptor lock, the descriptor may be
+ * a dup().
+ */
+#if defined(__APPLE__)
+ static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+
+ if (pthread_mutex_lock(&lock))
+ abort();
+#endif
+
+ if (req->off < 0) {
+ if (req->nbufs == 1)
+ r = write(req->file, req->bufs[0].base, req->bufs[0].len);
+ else
+ r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
+ } else {
+ if (req->nbufs == 1) {
+ r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+ goto done;
+ }
+#if HAVE_PREADV
+ r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
+#else
+# if defined(__linux__)
+ if (no_pwritev) retry:
+# endif
+ {
+ r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
+ }
+# if defined(__linux__)
+ else {
+ r = uv__pwritev(req->file,
+ (struct iovec*) req->bufs,
+ req->nbufs,
+ req->off);
+ if (r == -1 && errno == ENOSYS) {
+ no_pwritev = 1;
+ goto retry;
+ }
+ }
+# endif
+#endif
+ }
+
+done:
+#if defined(__APPLE__)
+ if (pthread_mutex_unlock(&lock))
+ abort();
+#endif
+
+ return r;
+}
+
+static ssize_t uv__fs_copyfile(uv_fs_t* req) {
+ uv_fs_t fs_req;
+ uv_file srcfd;
+ uv_file dstfd;
+ struct stat src_statsbuf;
+ struct stat dst_statsbuf;
+ int dst_flags;
+ int result;
+ int err;
+ off_t bytes_to_send;
+ off_t in_offset;
+ off_t bytes_written;
+ size_t bytes_chunk;
+
+ dstfd = -1;
+ err = 0;
+
+ /* Open the source file. */
+ srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
+ uv_fs_req_cleanup(&fs_req);
+
+ if (srcfd < 0)
+ return srcfd;
+
+ /* Get the source file's mode. */
+ if (fstat(srcfd, &src_statsbuf)) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+
+ dst_flags = O_WRONLY | O_CREAT;
+
+ if (req->flags & UV_FS_COPYFILE_EXCL)
+ dst_flags |= O_EXCL;
+
+ /* Open the destination file. */
+ dstfd = uv_fs_open(NULL,
+ &fs_req,
+ req->new_path,
+ dst_flags,
+ src_statsbuf.st_mode,
+ NULL);
+ uv_fs_req_cleanup(&fs_req);
+
+ if (dstfd < 0) {
+ err = dstfd;
+ goto out;
+ }
+
+ /* If the file is not being opened exclusively, verify that the source and
+ destination are not the same file. If they are the same, bail out early. */
+ if ((req->flags & UV_FS_COPYFILE_EXCL) == 0) {
+ /* Get the destination file's mode. */
+ if (fstat(dstfd, &dst_statsbuf)) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+
+ /* Check if srcfd and dstfd refer to the same file */
+ if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
+ src_statsbuf.st_ino == dst_statsbuf.st_ino) {
+ goto out;
+ }
+
+ /* Truncate the file in case the destination already existed. */
+ if (ftruncate(dstfd, 0) != 0) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+ }
+
+ if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
+ err = UV__ERR(errno);
+#ifdef __linux__
+ /* fchmod() on CIFS shares always fails with EPERM unless the share is
+ * mounted with "noperm". As fchmod() is a meaningless operation on such
+ * shares anyway, detect that condition and squelch the error.
+ */
+ if (err != UV_EPERM)
+ goto out;
+
+ if (!uv__is_cifs_or_smb(dstfd))
+ goto out;
+
+ err = 0;
+#else /* !__linux__ */
+ goto out;
+#endif /* !__linux__ */
+ }
+
+#ifdef FICLONE
+ if (req->flags & UV_FS_COPYFILE_FICLONE ||
+ req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ if (ioctl(dstfd, FICLONE, srcfd) == 0) {
+ /* ioctl() with FICLONE succeeded. */
+ goto out;
+ }
+ /* If an error occurred and force was set, return the error to the caller;
+ * fall back to sendfile() when force was not set. */
+ if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+ }
+#else
+ if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
+ err = UV_ENOSYS;
+ goto out;
+ }
+#endif
+
+ bytes_to_send = src_statsbuf.st_size;
+ in_offset = 0;
+ while (bytes_to_send != 0) {
+ bytes_chunk = SSIZE_MAX;
+ if (bytes_to_send < (off_t) bytes_chunk)
+ bytes_chunk = bytes_to_send;
+ uv_fs_sendfile(NULL, &fs_req, dstfd, srcfd, in_offset, bytes_chunk, NULL);
+ bytes_written = fs_req.result;
+ uv_fs_req_cleanup(&fs_req);
+
+ if (bytes_written < 0) {
+ err = bytes_written;
+ break;
+ }
+
+ bytes_to_send -= bytes_written;
+ in_offset += bytes_written;
+ }
+
+out:
+ if (err < 0)
+ result = err;
+ else
+ result = 0;
+
+ /* Close the source file. */
+ err = uv__close_nocheckstdio(srcfd);
+
+ /* Don't overwrite any existing errors. */
+ if (err != 0 && result == 0)
+ result = err;
+
+ /* Close the destination file if it is open. */
+ if (dstfd >= 0) {
+ err = uv__close_nocheckstdio(dstfd);
+
+ /* Don't overwrite any existing errors. */
+ if (err != 0 && result == 0)
+ result = err;
+
+ /* Remove the destination file if something went wrong. */
+ if (result != 0) {
+ uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
+ /* Ignore the unlink return value, as an error already happened. */
+ uv_fs_req_cleanup(&fs_req);
+ }
+ }
+
+ if (result == 0)
+ return 0;
+
+ errno = UV__ERR(result);
+ return -1;
+}
+
+static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
+ dst->st_dev = src->st_dev;
+ dst->st_mode = src->st_mode;
+ dst->st_nlink = src->st_nlink;
+ dst->st_uid = src->st_uid;
+ dst->st_gid = src->st_gid;
+ dst->st_rdev = src->st_rdev;
+ dst->st_ino = src->st_ino;
+ dst->st_size = src->st_size;
+ dst->st_blksize = src->st_blksize;
+ dst->st_blocks = src->st_blocks;
+
+#if defined(__APPLE__)
+ dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
+ dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
+ dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
+ dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
+ dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
+ dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
+ dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
+ dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
+ dst->st_flags = src->st_flags;
+ dst->st_gen = src->st_gen;
+#elif defined(__ANDROID__)
+ dst->st_atim.tv_sec = src->st_atime;
+ dst->st_atim.tv_nsec = src->st_atimensec;
+ dst->st_mtim.tv_sec = src->st_mtime;
+ dst->st_mtim.tv_nsec = src->st_mtimensec;
+ dst->st_ctim.tv_sec = src->st_ctime;
+ dst->st_ctim.tv_nsec = src->st_ctimensec;
+ dst->st_birthtim.tv_sec = src->st_ctime;
+ dst->st_birthtim.tv_nsec = src->st_ctimensec;
+ dst->st_flags = 0;
+ dst->st_gen = 0;
+#elif !defined(_AIX) && \
+ !defined(__MVS__) && ( \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__) || \
+ defined(_GNU_SOURCE) || \
+ defined(_BSD_SOURCE) || \
+ defined(_SVID_SOURCE) || \
+ defined(_XOPEN_SOURCE) || \
+ defined(_DEFAULT_SOURCE))
+ dst->st_atim.tv_sec = src->st_atim.tv_sec;
+ dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
+ dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
+ dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
+ dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
+ dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
+# if defined(__FreeBSD__) || \
+ defined(__NetBSD__)
+ dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
+ dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
+ dst->st_flags = src->st_flags;
+ dst->st_gen = src->st_gen;
+# else
+ dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
+ dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
+ dst->st_flags = 0;
+ dst->st_gen = 0;
+# endif
+#else
+ dst->st_atim.tv_sec = src->st_atime;
+ dst->st_atim.tv_nsec = 0;
+ dst->st_mtim.tv_sec = src->st_mtime;
+ dst->st_mtim.tv_nsec = 0;
+ dst->st_ctim.tv_sec = src->st_ctime;
+ dst->st_ctim.tv_nsec = 0;
+ dst->st_birthtim.tv_sec = src->st_ctime;
+ dst->st_birthtim.tv_nsec = 0;
+ dst->st_flags = 0;
+ dst->st_gen = 0;
+#endif
+}
+
+
+static int uv__fs_statx(int fd,
+ const char* path,
+ int is_fstat,
+ int is_lstat,
+ uv_stat_t* buf) {
+ STATIC_ASSERT(UV_ENOSYS != -1);
+#ifdef __linux__
+ static int no_statx;
+ struct uv__statx statxbuf;
+ int dirfd;
+ int flags;
+ int mode;
+ int rc;
+
+ if (uv__load_relaxed(&no_statx))
+ return UV_ENOSYS;
+
+ dirfd = AT_FDCWD;
+ flags = 0; /* AT_STATX_SYNC_AS_STAT */
+ mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
+
+ if (is_fstat) {
+ dirfd = fd;
+ flags |= 0x1000; /* AT_EMPTY_PATH */
+ }
+
+ if (is_lstat)
+ flags |= AT_SYMLINK_NOFOLLOW;
+
+ rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
+
+ switch (rc) {
+ case 0:
+ break;
+ case -1:
+ /* EPERM happens when a seccomp filter rejects the system call.
+ * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
+ * EOPNOTSUPP is used on DVS exported filesystems
+ */
+ if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
+ return -1;
+ /* Fall through. */
+ default:
+ /* Normally on success, zero is returned and On error, -1 is returned.
+ * Observed on S390 RHEL running in a docker container with statx not
+ * implemented, rc might return 1 with 0 set as the error code in which
+ * case we return ENOSYS.
+ */
+ uv__store_relaxed(&no_statx, 1);
+ return UV_ENOSYS;
+ }
+
+ buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
+ buf->st_mode = statxbuf.stx_mode;
+ buf->st_nlink = statxbuf.stx_nlink;
+ buf->st_uid = statxbuf.stx_uid;
+ buf->st_gid = statxbuf.stx_gid;
+ buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
+ buf->st_ino = statxbuf.stx_ino;
+ buf->st_size = statxbuf.stx_size;
+ buf->st_blksize = statxbuf.stx_blksize;
+ buf->st_blocks = statxbuf.stx_blocks;
+ buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
+ buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
+ buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
+ buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
+ buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
+ buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
+ buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
+ buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
+ buf->st_flags = 0;
+ buf->st_gen = 0;
+
+ return 0;
+#else
+ return UV_ENOSYS;
+#endif /* __linux__ */
+}
+
+
+static int uv__fs_stat(const char *path, uv_stat_t *buf) {
+ struct stat pbuf;
+ int ret;
+
+ ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
+ if (ret != UV_ENOSYS)
+ return ret;
+
+ ret = stat(path, &pbuf);
+ if (ret == 0)
+ uv__to_stat(&pbuf, buf);
+
+ return ret;
+}
+
+
+static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
+ struct stat pbuf;
+ int ret;
+
+ ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
+ if (ret != UV_ENOSYS)
+ return ret;
+
+ ret = lstat(path, &pbuf);
+ if (ret == 0)
+ uv__to_stat(&pbuf, buf);
+
+ return ret;
+}
+
+
+static int uv__fs_fstat(int fd, uv_stat_t *buf) {
+ struct stat pbuf;
+ int ret;
+
+ ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
+ if (ret != UV_ENOSYS)
+ return ret;
+
+ ret = fstat(fd, &pbuf);
+ if (ret == 0)
+ uv__to_stat(&pbuf, buf);
+
+ return ret;
+}
+
+static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
+ size_t offset;
+ /* Figure out which bufs are done */
+ for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
+ size -= bufs[offset].len;
+
+ /* Fix a partial read/write */
+ if (size > 0) {
+ bufs[offset].base += size;
+ bufs[offset].len -= size;
+ }
+ return offset;
+}
+
+static ssize_t uv__fs_write_all(uv_fs_t* req) {
+ unsigned int iovmax;
+ unsigned int nbufs;
+ uv_buf_t* bufs;
+ ssize_t total;
+ ssize_t result;
+
+ iovmax = uv__getiovmax();
+ nbufs = req->nbufs;
+ bufs = req->bufs;
+ total = 0;
+
+ while (nbufs > 0) {
+ req->nbufs = nbufs;
+ if (req->nbufs > iovmax)
+ req->nbufs = iovmax;
+
+ do
+ result = uv__fs_write(req);
+ while (result < 0 && errno == EINTR);
+
+ if (result <= 0) {
+ if (total == 0)
+ total = result;
+ break;
+ }
+
+ if (req->off >= 0)
+ req->off += result;
+
+ req->nbufs = uv__fs_buf_offset(req->bufs, result);
+ req->bufs += req->nbufs;
+ nbufs -= req->nbufs;
+ total += result;
+ }
+
+ if (bufs != req->bufsml)
+ uv__free(bufs);
+
+ req->bufs = NULL;
+ req->nbufs = 0;
+
+ return total;
+}
+
+
+static void uv__fs_work(struct uv__work* w) {
+ int retry_on_eintr;
+ uv_fs_t* req;
+ ssize_t r;
+
+ req = container_of(w, uv_fs_t, work_req);
+ retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
+ req->fs_type == UV_FS_READ);
+
+ do {
+ errno = 0;
+
+#define X(type, action) \
+ case UV_FS_ ## type: \
+ r = action; \
+ break;
+
+ switch (req->fs_type) {
+ X(ACCESS, access(req->path, req->flags));
+ X(CHMOD, chmod(req->path, req->mode));
+ X(CHOWN, chown(req->path, req->uid, req->gid));
+ X(CLOSE, uv__fs_close(req->file));
+ X(COPYFILE, uv__fs_copyfile(req));
+ X(FCHMOD, fchmod(req->file, req->mode));
+ X(FCHOWN, fchown(req->file, req->uid, req->gid));
+ X(LCHOWN, lchown(req->path, req->uid, req->gid));
+ X(FDATASYNC, uv__fs_fdatasync(req));
+ X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
+ X(FSYNC, uv__fs_fsync(req));
+ X(FTRUNCATE, ftruncate(req->file, req->off));
+ X(FUTIME, uv__fs_futime(req));
+ X(LUTIME, uv__fs_lutime(req));
+ X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
+ X(LINK, link(req->path, req->new_path));
+ X(MKDIR, mkdir(req->path, req->mode));
+ X(MKDTEMP, uv__fs_mkdtemp(req));
+ X(MKSTEMP, uv__fs_mkstemp(req));
+ X(OPEN, uv__fs_open(req));
+ X(READ, uv__fs_read(req));
+ X(SCANDIR, uv__fs_scandir(req));
+ X(OPENDIR, uv__fs_opendir(req));
+ X(READDIR, uv__fs_readdir(req));
+ X(CLOSEDIR, uv__fs_closedir(req));
+ X(READLINK, uv__fs_readlink(req));
+ X(REALPATH, uv__fs_realpath(req));
+ X(RENAME, rename(req->path, req->new_path));
+ X(RMDIR, rmdir(req->path));
+ X(SENDFILE, uv__fs_sendfile(req));
+ X(STAT, uv__fs_stat(req->path, &req->statbuf));
+ X(STATFS, uv__fs_statfs(req));
+ X(SYMLINK, symlink(req->path, req->new_path));
+ X(UNLINK, unlink(req->path));
+ X(UTIME, uv__fs_utime(req));
+ X(WRITE, uv__fs_write_all(req));
+ default: abort();
+ }
+#undef X
+ } while (r == -1 && errno == EINTR && retry_on_eintr);
+
+ if (r == -1)
+ req->result = UV__ERR(errno);
+ else
+ req->result = r;
+
+ if (r == 0 && (req->fs_type == UV_FS_STAT ||
+ req->fs_type == UV_FS_FSTAT ||
+ req->fs_type == UV_FS_LSTAT)) {
+ req->ptr = &req->statbuf;
+ }
+}
+
+
+static void uv__fs_done(struct uv__work* w, int status) {
+ uv_fs_t* req;
+
+ req = container_of(w, uv_fs_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ if (status == UV_ECANCELED) {
+ assert(req->result == 0);
+ req->result = UV_ECANCELED;
+ }
+
+ req->cb(req);
+}
+
+
+int uv_fs_access(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(ACCESS);
+ PATH;
+ req->flags = flags;
+ POST;
+}
+
+
+int uv_fs_chmod(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(CHMOD);
+ PATH;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_chown(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb) {
+ INIT(CHOWN);
+ PATH;
+ req->uid = uid;
+ req->gid = gid;
+ POST;
+}
+
+
+int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(CLOSE);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_fchmod(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(FCHMOD);
+ req->file = file;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_fchown(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb) {
+ INIT(FCHOWN);
+ req->file = file;
+ req->uid = uid;
+ req->gid = gid;
+ POST;
+}
+
+
+int uv_fs_lchown(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_uid_t uid,
+ uv_gid_t gid,
+ uv_fs_cb cb) {
+ INIT(LCHOWN);
+ PATH;
+ req->uid = uid;
+ req->gid = gid;
+ POST;
+}
+
+
+int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(FDATASYNC);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(FSTAT);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
+ INIT(FSYNC);
+ req->file = file;
+ POST;
+}
+
+
+int uv_fs_ftruncate(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ int64_t off,
+ uv_fs_cb cb) {
+ INIT(FTRUNCATE);
+ req->file = file;
+ req->off = off;
+ POST;
+}
+
+
+int uv_fs_futime(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ double atime,
+ double mtime,
+ uv_fs_cb cb) {
+ INIT(FUTIME);
+ req->file = file;
+ req->atime = atime;
+ req->mtime = mtime;
+ POST;
+}
+
+int uv_fs_lutime(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ double atime,
+ double mtime,
+ uv_fs_cb cb) {
+ INIT(LUTIME);
+ PATH;
+ req->atime = atime;
+ req->mtime = mtime;
+ POST;
+}
+
+
+int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(LSTAT);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_link(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ uv_fs_cb cb) {
+ INIT(LINK);
+ PATH2;
+ POST;
+}
+
+
+int uv_fs_mkdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(MKDIR);
+ PATH;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_mkdtemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb) {
+ INIT(MKDTEMP);
+ req->path = uv__strdup(tpl);
+ if (req->path == NULL)
+ return UV_ENOMEM;
+ POST;
+}
+
+
+int uv_fs_mkstemp(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* tpl,
+ uv_fs_cb cb) {
+ INIT(MKSTEMP);
+ req->path = uv__strdup(tpl);
+ if (req->path == NULL)
+ return UV_ENOMEM;
+ POST;
+}
+
+
+int uv_fs_open(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ int mode,
+ uv_fs_cb cb) {
+ INIT(OPEN);
+ PATH;
+ req->flags = flags;
+ req->mode = mode;
+ POST;
+}
+
+
+int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
+ uv_file file,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t off,
+ uv_fs_cb cb) {
+ INIT(READ);
+
+ if (bufs == NULL || nbufs == 0)
+ return UV_EINVAL;
+
+ req->file = file;
+
+ req->nbufs = nbufs;
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+ if (req->bufs == NULL)
+ return UV_ENOMEM;
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
+
+ req->off = off;
+ POST;
+}
+
+
+int uv_fs_scandir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(SCANDIR);
+ PATH;
+ req->flags = flags;
+ POST;
+}
+
+int uv_fs_opendir(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ INIT(OPENDIR);
+ PATH;
+ POST;
+}
+
+int uv_fs_readdir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb) {
+ INIT(READDIR);
+
+ if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
+ return UV_EINVAL;
+
+ req->ptr = dir;
+ POST;
+}
+
+int uv_fs_closedir(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_dir_t* dir,
+ uv_fs_cb cb) {
+ INIT(CLOSEDIR);
+
+ if (dir == NULL)
+ return UV_EINVAL;
+
+ req->ptr = dir;
+ POST;
+}
+
+int uv_fs_readlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ INIT(READLINK);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_realpath(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char * path,
+ uv_fs_cb cb) {
+ INIT(REALPATH);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_rename(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ uv_fs_cb cb) {
+ INIT(RENAME);
+ PATH2;
+ POST;
+}
+
+
+int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(RMDIR);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_sendfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file out_fd,
+ uv_file in_fd,
+ int64_t off,
+ size_t len,
+ uv_fs_cb cb) {
+ INIT(SENDFILE);
+ req->flags = in_fd; /* hack */
+ req->file = out_fd;
+ req->off = off;
+ req->bufsml[0].len = len;
+ POST;
+}
+
+
+int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(STAT);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_symlink(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(SYMLINK);
+ PATH2;
+ req->flags = flags;
+ POST;
+}
+
+
+int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
+ INIT(UNLINK);
+ PATH;
+ POST;
+}
+
+
+int uv_fs_utime(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ double atime,
+ double mtime,
+ uv_fs_cb cb) {
+ INIT(UTIME);
+ PATH;
+ req->atime = atime;
+ req->mtime = mtime;
+ POST;
+}
+
+
+int uv_fs_write(uv_loop_t* loop,
+ uv_fs_t* req,
+ uv_file file,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ int64_t off,
+ uv_fs_cb cb) {
+ INIT(WRITE);
+
+ if (bufs == NULL || nbufs == 0)
+ return UV_EINVAL;
+
+ req->file = file;
+
+ req->nbufs = nbufs;
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(*bufs));
+
+ if (req->bufs == NULL)
+ return UV_ENOMEM;
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
+
+ req->off = off;
+ POST;
+}
+
+
+void uv_fs_req_cleanup(uv_fs_t* req) {
+ if (req == NULL)
+ return;
+
+ /* Only necessary for asychronous requests, i.e., requests with a callback.
+ * Synchronous ones don't copy their arguments and have req->path and
+ * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP and
+ * UV_FS_MKSTEMP are the exception to the rule, they always allocate memory.
+ */
+ if (req->path != NULL &&
+ (req->cb != NULL ||
+ req->fs_type == UV_FS_MKDTEMP || req->fs_type == UV_FS_MKSTEMP))
+ uv__free((void*) req->path); /* Memory is shared with req->new_path. */
+
+ req->path = NULL;
+ req->new_path = NULL;
+
+ if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
+ uv__fs_readdir_cleanup(req);
+
+ if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
+ uv__fs_scandir_cleanup(req);
+
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+
+ if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
+ uv__free(req->ptr);
+ req->ptr = NULL;
+}
+
+
+int uv_fs_copyfile(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ const char* new_path,
+ int flags,
+ uv_fs_cb cb) {
+ INIT(COPYFILE);
+
+ if (flags & ~(UV_FS_COPYFILE_EXCL |
+ UV_FS_COPYFILE_FICLONE |
+ UV_FS_COPYFILE_FICLONE_FORCE)) {
+ return UV_EINVAL;
+ }
+
+ PATH2;
+ req->flags = flags;
+ POST;
+}
+
+
+int uv_fs_statfs(uv_loop_t* loop,
+ uv_fs_t* req,
+ const char* path,
+ uv_fs_cb cb) {
+ INIT(STATFS);
+ PATH;
+ POST;
+}
+
+int uv_fs_get_system_error(const uv_fs_t* req) {
+ return -req->result;
+}
diff --git a/Utilities/cmlibuv/src/unix/fsevents.c b/Utilities/cmlibuv/src/unix/fsevents.c
new file mode 100644
index 0000000000..bf4f1f6a51
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/fsevents.c
@@ -0,0 +1,916 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#if TARGET_OS_IPHONE || MAC_OS_X_VERSION_MAX_ALLOWED < 1070
+
+/* iOS (currently) doesn't provide the FSEvents-API (nor CoreServices) */
+/* macOS prior to 10.7 doesn't provide the full FSEvents API so use kqueue */
+
+int uv__fsevents_init(uv_fs_event_t* handle) {
+ return 0;
+}
+
+
+int uv__fsevents_close(uv_fs_event_t* handle) {
+ return 0;
+}
+
+
+void uv__fsevents_loop_delete(uv_loop_t* loop) {
+}
+
+#else /* TARGET_OS_IPHONE */
+
+#include "darwin-stub.h"
+
+#include <dlfcn.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <pthread.h>
+
+static const int kFSEventsModified =
+ kFSEventStreamEventFlagItemChangeOwner |
+ kFSEventStreamEventFlagItemFinderInfoMod |
+ kFSEventStreamEventFlagItemInodeMetaMod |
+ kFSEventStreamEventFlagItemModified |
+ kFSEventStreamEventFlagItemXattrMod;
+
+static const int kFSEventsRenamed =
+ kFSEventStreamEventFlagItemCreated |
+ kFSEventStreamEventFlagItemRemoved |
+ kFSEventStreamEventFlagItemRenamed;
+
+static const int kFSEventsSystem =
+ kFSEventStreamEventFlagUserDropped |
+ kFSEventStreamEventFlagKernelDropped |
+ kFSEventStreamEventFlagEventIdsWrapped |
+ kFSEventStreamEventFlagHistoryDone |
+ kFSEventStreamEventFlagMount |
+ kFSEventStreamEventFlagUnmount |
+ kFSEventStreamEventFlagRootChanged;
+
+typedef struct uv__fsevents_event_s uv__fsevents_event_t;
+typedef struct uv__cf_loop_signal_s uv__cf_loop_signal_t;
+typedef struct uv__cf_loop_state_s uv__cf_loop_state_t;
+
+enum uv__cf_loop_signal_type_e {
+ kUVCFLoopSignalRegular,
+ kUVCFLoopSignalClosing
+};
+typedef enum uv__cf_loop_signal_type_e uv__cf_loop_signal_type_t;
+
+struct uv__cf_loop_signal_s {
+ QUEUE member;
+ uv_fs_event_t* handle;
+ uv__cf_loop_signal_type_t type;
+};
+
+struct uv__fsevents_event_s {
+ QUEUE member;
+ int events;
+ char path[1];
+};
+
+struct uv__cf_loop_state_s {
+ CFRunLoopRef loop;
+ CFRunLoopSourceRef signal_source;
+ int fsevent_need_reschedule;
+ FSEventStreamRef fsevent_stream;
+ uv_sem_t fsevent_sem;
+ uv_mutex_t fsevent_mutex;
+ void* fsevent_handles[2];
+ unsigned int fsevent_handle_count;
+};
+
+/* Forward declarations */
+static void uv__cf_loop_cb(void* arg);
+static void* uv__cf_loop_runner(void* arg);
+static int uv__cf_loop_signal(uv_loop_t* loop,
+ uv_fs_event_t* handle,
+ uv__cf_loop_signal_type_t type);
+
+/* Lazy-loaded by uv__fsevents_global_init(). */
+static CFArrayRef (*pCFArrayCreate)(CFAllocatorRef,
+ const void**,
+ CFIndex,
+ const CFArrayCallBacks*);
+static void (*pCFRelease)(CFTypeRef);
+static void (*pCFRunLoopAddSource)(CFRunLoopRef,
+ CFRunLoopSourceRef,
+ CFStringRef);
+static CFRunLoopRef (*pCFRunLoopGetCurrent)(void);
+static void (*pCFRunLoopRemoveSource)(CFRunLoopRef,
+ CFRunLoopSourceRef,
+ CFStringRef);
+static void (*pCFRunLoopRun)(void);
+static CFRunLoopSourceRef (*pCFRunLoopSourceCreate)(CFAllocatorRef,
+ CFIndex,
+ CFRunLoopSourceContext*);
+static void (*pCFRunLoopSourceSignal)(CFRunLoopSourceRef);
+static void (*pCFRunLoopStop)(CFRunLoopRef);
+static void (*pCFRunLoopWakeUp)(CFRunLoopRef);
+static CFStringRef (*pCFStringCreateWithFileSystemRepresentation)(
+ CFAllocatorRef,
+ const char*);
+static CFStringEncoding (*pCFStringGetSystemEncoding)(void);
+static CFStringRef (*pkCFRunLoopDefaultMode);
+static FSEventStreamRef (*pFSEventStreamCreate)(CFAllocatorRef,
+ FSEventStreamCallback,
+ FSEventStreamContext*,
+ CFArrayRef,
+ FSEventStreamEventId,
+ CFTimeInterval,
+ FSEventStreamCreateFlags);
+static void (*pFSEventStreamFlushSync)(FSEventStreamRef);
+static void (*pFSEventStreamInvalidate)(FSEventStreamRef);
+static void (*pFSEventStreamRelease)(FSEventStreamRef);
+static void (*pFSEventStreamScheduleWithRunLoop)(FSEventStreamRef,
+ CFRunLoopRef,
+ CFStringRef);
+static int (*pFSEventStreamStart)(FSEventStreamRef);
+static void (*pFSEventStreamStop)(FSEventStreamRef);
+
+#define UV__FSEVENTS_PROCESS(handle, block) \
+ do { \
+ QUEUE events; \
+ QUEUE* q; \
+ uv__fsevents_event_t* event; \
+ int err; \
+ uv_mutex_lock(&(handle)->cf_mutex); \
+ /* Split-off all events and empty original queue */ \
+ QUEUE_MOVE(&(handle)->cf_events, &events); \
+ /* Get error (if any) and zero original one */ \
+ err = (handle)->cf_error; \
+ (handle)->cf_error = 0; \
+ uv_mutex_unlock(&(handle)->cf_mutex); \
+ /* Loop through events, deallocating each after processing */ \
+ while (!QUEUE_EMPTY(&events)) { \
+ q = QUEUE_HEAD(&events); \
+ event = QUEUE_DATA(q, uv__fsevents_event_t, member); \
+ QUEUE_REMOVE(q); \
+ /* NOTE: Checking uv__is_active() is required here, because handle \
+ * callback may close handle and invoking it after it will lead to \
+ * incorrect behaviour */ \
+ if (!uv__is_closing((handle)) && uv__is_active((handle))) \
+ block \
+ /* Free allocated data */ \
+ uv__free(event); \
+ } \
+ if (err != 0 && !uv__is_closing((handle)) && uv__is_active((handle))) \
+ (handle)->cb((handle), NULL, 0, err); \
+ } while (0)
+
+
+/* Runs in UV loop's thread, when there're events to report to handle */
+static void uv__fsevents_cb(uv_async_t* cb) {
+ uv_fs_event_t* handle;
+
+ handle = cb->data;
+
+ UV__FSEVENTS_PROCESS(handle, {
+ handle->cb(handle, event->path[0] ? event->path : NULL, event->events, 0);
+ });
+}
+
+
+/* Runs in CF thread, pushed event into handle's event list */
+static void uv__fsevents_push_event(uv_fs_event_t* handle,
+ QUEUE* events,
+ int err) {
+ assert(events != NULL || err != 0);
+ uv_mutex_lock(&handle->cf_mutex);
+
+ /* Concatenate two queues */
+ if (events != NULL)
+ QUEUE_ADD(&handle->cf_events, events);
+
+ /* Propagate error */
+ if (err != 0)
+ handle->cf_error = err;
+ uv_mutex_unlock(&handle->cf_mutex);
+
+ uv_async_send(handle->cf_cb);
+}
+
+
+/* Runs in CF thread, when there're events in FSEventStream */
+static void uv__fsevents_event_cb(const FSEventStreamRef streamRef,
+ void* info,
+ size_t numEvents,
+ void* eventPaths,
+ const FSEventStreamEventFlags eventFlags[],
+ const FSEventStreamEventId eventIds[]) {
+ size_t i;
+ int len;
+ char** paths;
+ char* path;
+ char* pos;
+ uv_fs_event_t* handle;
+ QUEUE* q;
+ uv_loop_t* loop;
+ uv__cf_loop_state_t* state;
+ uv__fsevents_event_t* event;
+ FSEventStreamEventFlags flags;
+ QUEUE head;
+
+ loop = info;
+ state = loop->cf_state;
+ assert(state != NULL);
+ paths = eventPaths;
+
+ /* For each handle */
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_FOREACH(q, &state->fsevent_handles) {
+ handle = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+ QUEUE_INIT(&head);
+
+ /* Process and filter out events */
+ for (i = 0; i < numEvents; i++) {
+ flags = eventFlags[i];
+
+ /* Ignore system events */
+ if (flags & kFSEventsSystem)
+ continue;
+
+ path = paths[i];
+ len = strlen(path);
+
+ if (handle->realpath_len == 0)
+ continue; /* This should be unreachable */
+
+ /* Filter out paths that are outside handle's request */
+ if (len < handle->realpath_len)
+ continue;
+
+ /* Make sure that realpath actually named a directory,
+ * (unless watching root, which alone keeps a trailing slash on the realpath)
+ * or that we matched the whole string */
+ if (handle->realpath_len != len &&
+ handle->realpath_len > 1 &&
+ path[handle->realpath_len] != '/')
+ continue;
+
+ if (memcmp(path, handle->realpath, handle->realpath_len) != 0)
+ continue;
+
+ if (!(handle->realpath_len == 1 && handle->realpath[0] == '/')) {
+ /* Remove common prefix, unless the watched folder is "/" */
+ path += handle->realpath_len;
+ len -= handle->realpath_len;
+
+ /* Ignore events with path equal to directory itself */
+ if (len <= 1 && (flags & kFSEventStreamEventFlagItemIsDir))
+ continue;
+
+ if (len == 0) {
+ /* Since we're using fsevents to watch the file itself,
+ * realpath == path, and we now need to get the basename of the file back
+ * (for commonality with other codepaths and platforms). */
+ while (len < handle->realpath_len && path[-1] != '/') {
+ path--;
+ len++;
+ }
+ /* Created and Removed seem to be always set, but don't make sense */
+ flags &= ~kFSEventsRenamed;
+ } else {
+ /* Skip forward slash */
+ path++;
+ len--;
+ }
+ }
+
+ /* Do not emit events from subdirectories (without option set) */
+ if ((handle->cf_flags & UV_FS_EVENT_RECURSIVE) == 0 && *path != '\0') {
+ pos = strchr(path + 1, '/');
+ if (pos != NULL)
+ continue;
+ }
+
+ event = uv__malloc(sizeof(*event) + len);
+ if (event == NULL)
+ break;
+
+ memset(event, 0, sizeof(*event));
+ memcpy(event->path, path, len + 1);
+ event->events = UV_RENAME;
+
+ if (0 == (flags & kFSEventsRenamed)) {
+ if (0 != (flags & kFSEventsModified) ||
+ 0 == (flags & kFSEventStreamEventFlagItemIsDir))
+ event->events = UV_CHANGE;
+ }
+
+ QUEUE_INSERT_TAIL(&head, &event->member);
+ }
+
+ if (!QUEUE_EMPTY(&head))
+ uv__fsevents_push_event(handle, &head, 0);
+ }
+ uv_mutex_unlock(&state->fsevent_mutex);
+}
+
+
+/* Runs in CF thread */
+static int uv__fsevents_create_stream(uv_loop_t* loop, CFArrayRef paths) {
+ uv__cf_loop_state_t* state;
+ FSEventStreamContext ctx;
+ FSEventStreamRef ref;
+ CFAbsoluteTime latency;
+ FSEventStreamCreateFlags flags;
+
+ /* Initialize context */
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.info = loop;
+
+ latency = 0.05;
+
+ /* Explanation of selected flags:
+ * 1. NoDefer - without this flag, events that are happening continuously
+ * (i.e. each event is happening after time interval less than `latency`,
+ * counted from previous event), will be deferred and passed to callback
+ * once they'll either fill whole OS buffer, or when this continuous stream
+ * will stop (i.e. there'll be delay between events, bigger than
+ * `latency`).
+ * Specifying this flag will invoke callback after `latency` time passed
+ * since event.
+ * 2. FileEvents - fire callback for file changes too (by default it is firing
+ * it only for directory changes).
+ */
+ flags = kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents;
+
+ /*
+ * NOTE: It might sound like a good idea to remember last seen StreamEventId,
+ * but in reality one dir might have last StreamEventId less than, the other,
+ * that is being watched now. Which will cause FSEventStream API to report
+ * changes to files from the past.
+ */
+ ref = pFSEventStreamCreate(NULL,
+ &uv__fsevents_event_cb,
+ &ctx,
+ paths,
+ kFSEventStreamEventIdSinceNow,
+ latency,
+ flags);
+ assert(ref != NULL);
+
+ state = loop->cf_state;
+ pFSEventStreamScheduleWithRunLoop(ref,
+ state->loop,
+ *pkCFRunLoopDefaultMode);
+ if (!pFSEventStreamStart(ref)) {
+ pFSEventStreamInvalidate(ref);
+ pFSEventStreamRelease(ref);
+ return UV_EMFILE;
+ }
+
+ state->fsevent_stream = ref;
+ return 0;
+}
+
+
+/* Runs in CF thread */
+static void uv__fsevents_destroy_stream(uv_loop_t* loop) {
+ uv__cf_loop_state_t* state;
+
+ state = loop->cf_state;
+
+ if (state->fsevent_stream == NULL)
+ return;
+
+ /* Stop emitting events */
+ pFSEventStreamStop(state->fsevent_stream);
+
+ /* Release stream */
+ pFSEventStreamInvalidate(state->fsevent_stream);
+ pFSEventStreamRelease(state->fsevent_stream);
+ state->fsevent_stream = NULL;
+}
+
+
+/* Runs in CF thread, when there're new fsevent handles to add to stream */
+static void uv__fsevents_reschedule(uv_fs_event_t* handle,
+ uv__cf_loop_signal_type_t type) {
+ uv__cf_loop_state_t* state;
+ QUEUE* q;
+ uv_fs_event_t* curr;
+ CFArrayRef cf_paths;
+ CFStringRef* paths;
+ unsigned int i;
+ int err;
+ unsigned int path_count;
+
+ state = handle->loop->cf_state;
+ paths = NULL;
+ cf_paths = NULL;
+ err = 0;
+ /* NOTE: `i` is used in deallocation loop below */
+ i = 0;
+
+ /* Optimization to prevent O(n^2) time spent when starting to watch
+ * many files simultaneously
+ */
+ uv_mutex_lock(&state->fsevent_mutex);
+ if (state->fsevent_need_reschedule == 0) {
+ uv_mutex_unlock(&state->fsevent_mutex);
+ goto final;
+ }
+ state->fsevent_need_reschedule = 0;
+ uv_mutex_unlock(&state->fsevent_mutex);
+
+ /* Destroy previous FSEventStream */
+ uv__fsevents_destroy_stream(handle->loop);
+
+ /* Any failure below will be a memory failure */
+ err = UV_ENOMEM;
+
+ /* Create list of all watched paths */
+ uv_mutex_lock(&state->fsevent_mutex);
+ path_count = state->fsevent_handle_count;
+ if (path_count != 0) {
+ paths = uv__malloc(sizeof(*paths) * path_count);
+ if (paths == NULL) {
+ uv_mutex_unlock(&state->fsevent_mutex);
+ goto final;
+ }
+
+ q = &state->fsevent_handles;
+ for (; i < path_count; i++) {
+ q = QUEUE_NEXT(q);
+ assert(q != &state->fsevent_handles);
+ curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+
+ assert(curr->realpath != NULL);
+ paths[i] =
+ pCFStringCreateWithFileSystemRepresentation(NULL, curr->realpath);
+ if (paths[i] == NULL) {
+ uv_mutex_unlock(&state->fsevent_mutex);
+ goto final;
+ }
+ }
+ }
+ uv_mutex_unlock(&state->fsevent_mutex);
+ err = 0;
+
+ if (path_count != 0) {
+ /* Create new FSEventStream */
+ cf_paths = pCFArrayCreate(NULL, (const void**) paths, path_count, NULL);
+ if (cf_paths == NULL) {
+ err = UV_ENOMEM;
+ goto final;
+ }
+ err = uv__fsevents_create_stream(handle->loop, cf_paths);
+ }
+
+final:
+ /* Deallocate all paths in case of failure */
+ if (err != 0) {
+ if (cf_paths == NULL) {
+ while (i != 0)
+ pCFRelease(paths[--i]);
+ uv__free(paths);
+ } else {
+ /* CFArray takes ownership of both strings and original C-array */
+ pCFRelease(cf_paths);
+ }
+
+ /* Broadcast error to all handles */
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_FOREACH(q, &state->fsevent_handles) {
+ curr = QUEUE_DATA(q, uv_fs_event_t, cf_member);
+ uv__fsevents_push_event(curr, NULL, err);
+ }
+ uv_mutex_unlock(&state->fsevent_mutex);
+ }
+
+ /*
+ * Main thread will block until the removal of handle from the list,
+ * we must tell it when we're ready.
+ *
+ * NOTE: This is coupled with `uv_sem_wait()` in `uv__fsevents_close`
+ */
+ if (type == kUVCFLoopSignalClosing)
+ uv_sem_post(&state->fsevent_sem);
+}
+
+
+static int uv__fsevents_global_init(void) {
+ static pthread_mutex_t global_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+ static void* core_foundation_handle;
+ static void* core_services_handle;
+ int err;
+
+ err = 0;
+ pthread_mutex_lock(&global_init_mutex);
+ if (core_foundation_handle != NULL)
+ goto out;
+
+ /* The libraries are never unloaded because we currently don't have a good
+ * mechanism for keeping a reference count. It's unlikely to be an issue
+ * but if it ever becomes one, we can turn the dynamic library handles into
+ * per-event loop properties and have the dynamic linker keep track for us.
+ */
+ err = UV_ENOSYS;
+ core_foundation_handle = dlopen("/System/Library/Frameworks/"
+ "CoreFoundation.framework/"
+ "Versions/A/CoreFoundation",
+ RTLD_LAZY | RTLD_LOCAL);
+ if (core_foundation_handle == NULL)
+ goto out;
+
+ core_services_handle = dlopen("/System/Library/Frameworks/"
+ "CoreServices.framework/"
+ "Versions/A/CoreServices",
+ RTLD_LAZY | RTLD_LOCAL);
+ if (core_services_handle == NULL)
+ goto out;
+
+ err = UV_ENOENT;
+#define V(handle, symbol) \
+ do { \
+ *(void **)(&p ## symbol) = dlsym((handle), #symbol); \
+ if (p ## symbol == NULL) \
+ goto out; \
+ } \
+ while (0)
+ V(core_foundation_handle, CFArrayCreate);
+ V(core_foundation_handle, CFRelease);
+ V(core_foundation_handle, CFRunLoopAddSource);
+ V(core_foundation_handle, CFRunLoopGetCurrent);
+ V(core_foundation_handle, CFRunLoopRemoveSource);
+ V(core_foundation_handle, CFRunLoopRun);
+ V(core_foundation_handle, CFRunLoopSourceCreate);
+ V(core_foundation_handle, CFRunLoopSourceSignal);
+ V(core_foundation_handle, CFRunLoopStop);
+ V(core_foundation_handle, CFRunLoopWakeUp);
+ V(core_foundation_handle, CFStringCreateWithFileSystemRepresentation);
+ V(core_foundation_handle, CFStringGetSystemEncoding);
+ V(core_foundation_handle, kCFRunLoopDefaultMode);
+ V(core_services_handle, FSEventStreamCreate);
+ V(core_services_handle, FSEventStreamFlushSync);
+ V(core_services_handle, FSEventStreamInvalidate);
+ V(core_services_handle, FSEventStreamRelease);
+ V(core_services_handle, FSEventStreamScheduleWithRunLoop);
+ V(core_services_handle, FSEventStreamStart);
+ V(core_services_handle, FSEventStreamStop);
+#undef V
+ err = 0;
+
+out:
+ if (err && core_services_handle != NULL) {
+ dlclose(core_services_handle);
+ core_services_handle = NULL;
+ }
+
+ if (err && core_foundation_handle != NULL) {
+ dlclose(core_foundation_handle);
+ core_foundation_handle = NULL;
+ }
+
+ pthread_mutex_unlock(&global_init_mutex);
+ return err;
+}
+
+
+/* Runs in UV loop */
+static int uv__fsevents_loop_init(uv_loop_t* loop) {
+ CFRunLoopSourceContext ctx;
+ uv__cf_loop_state_t* state;
+ pthread_attr_t attr;
+ int err;
+
+ if (loop->cf_state != NULL)
+ return 0;
+
+ err = uv__fsevents_global_init();
+ if (err)
+ return err;
+
+ state = uv__calloc(1, sizeof(*state));
+ if (state == NULL)
+ return UV_ENOMEM;
+
+ err = uv_mutex_init(&loop->cf_mutex);
+ if (err)
+ goto fail_mutex_init;
+
+ err = uv_sem_init(&loop->cf_sem, 0);
+ if (err)
+ goto fail_sem_init;
+
+ QUEUE_INIT(&loop->cf_signals);
+
+ err = uv_sem_init(&state->fsevent_sem, 0);
+ if (err)
+ goto fail_fsevent_sem_init;
+
+ err = uv_mutex_init(&state->fsevent_mutex);
+ if (err)
+ goto fail_fsevent_mutex_init;
+
+ QUEUE_INIT(&state->fsevent_handles);
+ state->fsevent_need_reschedule = 0;
+ state->fsevent_handle_count = 0;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.info = loop;
+ ctx.perform = uv__cf_loop_cb;
+ state->signal_source = pCFRunLoopSourceCreate(NULL, 0, &ctx);
+ if (state->signal_source == NULL) {
+ err = UV_ENOMEM;
+ goto fail_signal_source_create;
+ }
+
+ if (pthread_attr_init(&attr))
+ abort();
+
+ if (pthread_attr_setstacksize(&attr, uv__thread_stack_size()))
+ abort();
+
+ loop->cf_state = state;
+
+ /* uv_thread_t is an alias for pthread_t. */
+ err = UV__ERR(pthread_create(&loop->cf_thread, &attr, uv__cf_loop_runner, loop));
+
+ if (pthread_attr_destroy(&attr))
+ abort();
+
+ if (err)
+ goto fail_thread_create;
+
+ /* Synchronize threads */
+ uv_sem_wait(&loop->cf_sem);
+ return 0;
+
+fail_thread_create:
+ loop->cf_state = NULL;
+
+fail_signal_source_create:
+ uv_mutex_destroy(&state->fsevent_mutex);
+
+fail_fsevent_mutex_init:
+ uv_sem_destroy(&state->fsevent_sem);
+
+fail_fsevent_sem_init:
+ uv_sem_destroy(&loop->cf_sem);
+
+fail_sem_init:
+ uv_mutex_destroy(&loop->cf_mutex);
+
+fail_mutex_init:
+ uv__free(state);
+ return err;
+}
+
+
+/* Runs in UV loop */
+void uv__fsevents_loop_delete(uv_loop_t* loop) {
+ uv__cf_loop_signal_t* s;
+ uv__cf_loop_state_t* state;
+ QUEUE* q;
+
+ if (loop->cf_state == NULL)
+ return;
+
+ if (uv__cf_loop_signal(loop, NULL, kUVCFLoopSignalRegular) != 0)
+ abort();
+
+ uv_thread_join(&loop->cf_thread);
+ uv_sem_destroy(&loop->cf_sem);
+ uv_mutex_destroy(&loop->cf_mutex);
+
+ /* Free any remaining data */
+ while (!QUEUE_EMPTY(&loop->cf_signals)) {
+ q = QUEUE_HEAD(&loop->cf_signals);
+ s = QUEUE_DATA(q, uv__cf_loop_signal_t, member);
+ QUEUE_REMOVE(q);
+ uv__free(s);
+ }
+
+ /* Destroy state */
+ state = loop->cf_state;
+ uv_sem_destroy(&state->fsevent_sem);
+ uv_mutex_destroy(&state->fsevent_mutex);
+ pCFRelease(state->signal_source);
+ uv__free(state);
+ loop->cf_state = NULL;
+}
+
+
+/* Runs in CF thread. This is the CF loop's body */
+static void* uv__cf_loop_runner(void* arg) {
+ uv_loop_t* loop;
+ uv__cf_loop_state_t* state;
+
+ loop = arg;
+ state = loop->cf_state;
+ state->loop = pCFRunLoopGetCurrent();
+
+ pCFRunLoopAddSource(state->loop,
+ state->signal_source,
+ *pkCFRunLoopDefaultMode);
+
+ uv_sem_post(&loop->cf_sem);
+
+ pCFRunLoopRun();
+ pCFRunLoopRemoveSource(state->loop,
+ state->signal_source,
+ *pkCFRunLoopDefaultMode);
+
+ state->loop = NULL;
+
+ return NULL;
+}
+
+
+/* Runs in CF thread, executed after `uv__cf_loop_signal()` */
+static void uv__cf_loop_cb(void* arg) {
+ uv_loop_t* loop;
+ uv__cf_loop_state_t* state;
+ QUEUE* item;
+ QUEUE split_head;
+ uv__cf_loop_signal_t* s;
+
+ loop = arg;
+ state = loop->cf_state;
+
+ uv_mutex_lock(&loop->cf_mutex);
+ QUEUE_MOVE(&loop->cf_signals, &split_head);
+ uv_mutex_unlock(&loop->cf_mutex);
+
+ while (!QUEUE_EMPTY(&split_head)) {
+ item = QUEUE_HEAD(&split_head);
+ QUEUE_REMOVE(item);
+
+ s = QUEUE_DATA(item, uv__cf_loop_signal_t, member);
+
+ /* This was a termination signal */
+ if (s->handle == NULL)
+ pCFRunLoopStop(state->loop);
+ else
+ uv__fsevents_reschedule(s->handle, s->type);
+
+ uv__free(s);
+ }
+}
+
+
+/* Runs in UV loop to notify CF thread */
+int uv__cf_loop_signal(uv_loop_t* loop,
+ uv_fs_event_t* handle,
+ uv__cf_loop_signal_type_t type) {
+ uv__cf_loop_signal_t* item;
+ uv__cf_loop_state_t* state;
+
+ item = uv__malloc(sizeof(*item));
+ if (item == NULL)
+ return UV_ENOMEM;
+
+ item->handle = handle;
+ item->type = type;
+
+ uv_mutex_lock(&loop->cf_mutex);
+ QUEUE_INSERT_TAIL(&loop->cf_signals, &item->member);
+
+ state = loop->cf_state;
+ assert(state != NULL);
+ pCFRunLoopSourceSignal(state->signal_source);
+ pCFRunLoopWakeUp(state->loop);
+
+ uv_mutex_unlock(&loop->cf_mutex);
+
+ return 0;
+}
+
+
+/* Runs in UV loop to initialize handle */
+int uv__fsevents_init(uv_fs_event_t* handle) {
+ int err;
+ uv__cf_loop_state_t* state;
+
+ err = uv__fsevents_loop_init(handle->loop);
+ if (err)
+ return err;
+
+ /* Get absolute path to file */
+ handle->realpath = realpath(handle->path, NULL);
+ if (handle->realpath == NULL)
+ return UV__ERR(errno);
+ handle->realpath_len = strlen(handle->realpath);
+
+ /* Initialize event queue */
+ QUEUE_INIT(&handle->cf_events);
+ handle->cf_error = 0;
+
+ /*
+ * Events will occur in other thread.
+ * Initialize callback for getting them back into event loop's thread
+ */
+ handle->cf_cb = uv__malloc(sizeof(*handle->cf_cb));
+ if (handle->cf_cb == NULL) {
+ err = UV_ENOMEM;
+ goto fail_cf_cb_malloc;
+ }
+
+ handle->cf_cb->data = handle;
+ uv_async_init(handle->loop, handle->cf_cb, uv__fsevents_cb);
+ handle->cf_cb->flags |= UV_HANDLE_INTERNAL;
+ uv_unref((uv_handle_t*) handle->cf_cb);
+
+ err = uv_mutex_init(&handle->cf_mutex);
+ if (err)
+ goto fail_cf_mutex_init;
+
+ /* Insert handle into the list */
+ state = handle->loop->cf_state;
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_INSERT_TAIL(&state->fsevent_handles, &handle->cf_member);
+ state->fsevent_handle_count++;
+ state->fsevent_need_reschedule = 1;
+ uv_mutex_unlock(&state->fsevent_mutex);
+
+ /* Reschedule FSEventStream */
+ assert(handle != NULL);
+ err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalRegular);
+ if (err)
+ goto fail_loop_signal;
+
+ return 0;
+
+fail_loop_signal:
+ uv_mutex_destroy(&handle->cf_mutex);
+
+fail_cf_mutex_init:
+ uv__free(handle->cf_cb);
+ handle->cf_cb = NULL;
+
+fail_cf_cb_malloc:
+ uv__free(handle->realpath);
+ handle->realpath = NULL;
+ handle->realpath_len = 0;
+
+ return err;
+}
+
+
+/* Runs in UV loop to de-initialize handle */
+int uv__fsevents_close(uv_fs_event_t* handle) {
+ int err;
+ uv__cf_loop_state_t* state;
+
+ if (handle->cf_cb == NULL)
+ return UV_EINVAL;
+
+ /* Remove handle from the list */
+ state = handle->loop->cf_state;
+ uv_mutex_lock(&state->fsevent_mutex);
+ QUEUE_REMOVE(&handle->cf_member);
+ state->fsevent_handle_count--;
+ state->fsevent_need_reschedule = 1;
+ uv_mutex_unlock(&state->fsevent_mutex);
+
+ /* Reschedule FSEventStream */
+ assert(handle != NULL);
+ err = uv__cf_loop_signal(handle->loop, handle, kUVCFLoopSignalClosing);
+ if (err)
+ return UV__ERR(err);
+
+ /* Wait for deinitialization */
+ uv_sem_wait(&state->fsevent_sem);
+
+ uv_close((uv_handle_t*) handle->cf_cb, (uv_close_cb) uv__free);
+ handle->cf_cb = NULL;
+
+ /* Free data in queue */
+ UV__FSEVENTS_PROCESS(handle, {
+ /* NOP */
+ });
+
+ uv_mutex_destroy(&handle->cf_mutex);
+ uv__free(handle->realpath);
+ handle->realpath = NULL;
+ handle->realpath_len = 0;
+
+ return 0;
+}
+
+#endif /* TARGET_OS_IPHONE */
diff --git a/Utilities/cmlibuv/src/unix/getaddrinfo.c b/Utilities/cmlibuv/src/unix/getaddrinfo.c
new file mode 100644
index 0000000000..77337ace94
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/getaddrinfo.c
@@ -0,0 +1,252 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
+ * include any headers.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "idna.h"
+
+#include <errno.h>
+#include <stddef.h> /* NULL */
+#include <stdlib.h>
+#include <string.h>
+#include <net/if.h> /* if_indextoname() */
+
+/* EAI_* constants. */
+#include <netdb.h>
+
+
+int uv__getaddrinfo_translate_error(int sys_err) {
+ switch (sys_err) {
+ case 0: return 0;
+#if defined(EAI_ADDRFAMILY)
+ case EAI_ADDRFAMILY: return UV_EAI_ADDRFAMILY;
+#endif
+#if defined(EAI_AGAIN)
+ case EAI_AGAIN: return UV_EAI_AGAIN;
+#endif
+#if defined(EAI_BADFLAGS)
+ case EAI_BADFLAGS: return UV_EAI_BADFLAGS;
+#endif
+#if defined(EAI_BADHINTS)
+ case EAI_BADHINTS: return UV_EAI_BADHINTS;
+#endif
+#if defined(EAI_CANCELED)
+ case EAI_CANCELED: return UV_EAI_CANCELED;
+#endif
+#if defined(EAI_FAIL)
+ case EAI_FAIL: return UV_EAI_FAIL;
+#endif
+#if defined(EAI_FAMILY)
+ case EAI_FAMILY: return UV_EAI_FAMILY;
+#endif
+#if defined(EAI_MEMORY)
+ case EAI_MEMORY: return UV_EAI_MEMORY;
+#endif
+#if defined(EAI_NODATA)
+ case EAI_NODATA: return UV_EAI_NODATA;
+#endif
+#if defined(EAI_NONAME)
+# if !defined(EAI_NODATA) || EAI_NODATA != EAI_NONAME
+ case EAI_NONAME: return UV_EAI_NONAME;
+# endif
+#endif
+#if defined(EAI_OVERFLOW)
+ case EAI_OVERFLOW: return UV_EAI_OVERFLOW;
+#endif
+#if defined(EAI_PROTOCOL)
+ case EAI_PROTOCOL: return UV_EAI_PROTOCOL;
+#endif
+#if defined(EAI_SERVICE)
+ case EAI_SERVICE: return UV_EAI_SERVICE;
+#endif
+#if defined(EAI_SOCKTYPE)
+ case EAI_SOCKTYPE: return UV_EAI_SOCKTYPE;
+#endif
+#if defined(EAI_SYSTEM)
+ case EAI_SYSTEM: return UV__ERR(errno);
+#endif
+ }
+ assert(!"unknown EAI_* error code");
+ abort();
+#ifndef __SUNPRO_C
+ return 0; /* Pacify compiler. */
+#endif
+}
+
+
+static void uv__getaddrinfo_work(struct uv__work* w) {
+ uv_getaddrinfo_t* req;
+ int err;
+
+ req = container_of(w, uv_getaddrinfo_t, work_req);
+ err = getaddrinfo(req->hostname, req->service, req->hints, &req->addrinfo);
+ req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+
+static void uv__getaddrinfo_done(struct uv__work* w, int status) {
+ uv_getaddrinfo_t* req;
+
+ req = container_of(w, uv_getaddrinfo_t, work_req);
+ uv__req_unregister(req->loop, req);
+
+ /* See initialization in uv_getaddrinfo(). */
+ if (req->hints)
+ uv__free(req->hints);
+ else if (req->service)
+ uv__free(req->service);
+ else if (req->hostname)
+ uv__free(req->hostname);
+ else
+ assert(0);
+
+ req->hints = NULL;
+ req->service = NULL;
+ req->hostname = NULL;
+
+ if (status == UV_ECANCELED) {
+ assert(req->retcode == 0);
+ req->retcode = UV_EAI_CANCELED;
+ }
+
+ if (req->cb)
+ req->cb(req, req->retcode, req->addrinfo);
+}
+
+
+int uv_getaddrinfo(uv_loop_t* loop,
+ uv_getaddrinfo_t* req,
+ uv_getaddrinfo_cb cb,
+ const char* hostname,
+ const char* service,
+ const struct addrinfo* hints) {
+ char hostname_ascii[256];
+ size_t hostname_len;
+ size_t service_len;
+ size_t hints_len;
+ size_t len;
+ char* buf;
+ long rc;
+
+ if (req == NULL || (hostname == NULL && service == NULL))
+ return UV_EINVAL;
+
+ /* FIXME(bnoordhuis) IDNA does not seem to work z/OS,
+ * probably because it uses EBCDIC rather than ASCII.
+ */
+#ifdef __MVS__
+ (void) &hostname_ascii;
+#else
+ if (hostname != NULL) {
+ rc = uv__idna_toascii(hostname,
+ hostname + strlen(hostname),
+ hostname_ascii,
+ hostname_ascii + sizeof(hostname_ascii));
+ if (rc < 0)
+ return rc;
+ hostname = hostname_ascii;
+ }
+#endif
+
+ hostname_len = hostname ? strlen(hostname) + 1 : 0;
+ service_len = service ? strlen(service) + 1 : 0;
+ hints_len = hints ? sizeof(*hints) : 0;
+ buf = uv__malloc(hostname_len + service_len + hints_len);
+
+ if (buf == NULL)
+ return UV_ENOMEM;
+
+ uv__req_init(loop, req, UV_GETADDRINFO);
+ req->loop = loop;
+ req->cb = cb;
+ req->addrinfo = NULL;
+ req->hints = NULL;
+ req->service = NULL;
+ req->hostname = NULL;
+ req->retcode = 0;
+
+ /* order matters, see uv_getaddrinfo_done() */
+ len = 0;
+
+ if (hints) {
+ req->hints = memcpy(buf + len, hints, sizeof(*hints));
+ len += sizeof(*hints);
+ }
+
+ if (service) {
+ req->service = memcpy(buf + len, service, service_len);
+ len += service_len;
+ }
+
+ if (hostname)
+ req->hostname = memcpy(buf + len, hostname, hostname_len);
+
+ if (cb) {
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_SLOW_IO,
+ uv__getaddrinfo_work,
+ uv__getaddrinfo_done);
+ return 0;
+ } else {
+ uv__getaddrinfo_work(&req->work_req);
+ uv__getaddrinfo_done(&req->work_req, 0);
+ return req->retcode;
+ }
+}
+
+
+void uv_freeaddrinfo(struct addrinfo* ai) {
+ if (ai)
+ freeaddrinfo(ai);
+}
+
+
+int uv_if_indextoname(unsigned int ifindex, char* buffer, size_t* size) {
+ char ifname_buf[UV_IF_NAMESIZE];
+ size_t len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ if (if_indextoname(ifindex, ifname_buf) == NULL)
+ return UV__ERR(errno);
+
+ len = strnlen(ifname_buf, sizeof(ifname_buf));
+
+ if (*size <= len) {
+ *size = len + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, ifname_buf, len);
+ buffer[len] = '\0';
+ *size = len;
+
+ return 0;
+}
+
+int uv_if_indextoiid(unsigned int ifindex, char* buffer, size_t* size) {
+ return uv_if_indextoname(ifindex, buffer, size);
+}
diff --git a/Utilities/cmlibuv/src/unix/getnameinfo.c b/Utilities/cmlibuv/src/unix/getnameinfo.c
new file mode 100644
index 0000000000..b695081b47
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/getnameinfo.c
@@ -0,0 +1,121 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to
+* deal in the Software without restriction, including without limitation the
+* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+* sell copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+* IN THE SOFTWARE.
+*/
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+
+static void uv__getnameinfo_work(struct uv__work* w) {
+ uv_getnameinfo_t* req;
+ int err;
+ socklen_t salen;
+
+ req = container_of(w, uv_getnameinfo_t, work_req);
+
+ if (req->storage.ss_family == AF_INET)
+ salen = sizeof(struct sockaddr_in);
+ else if (req->storage.ss_family == AF_INET6)
+ salen = sizeof(struct sockaddr_in6);
+ else
+ abort();
+
+ err = getnameinfo((struct sockaddr*) &req->storage,
+ salen,
+ req->host,
+ sizeof(req->host),
+ req->service,
+ sizeof(req->service),
+ req->flags);
+ req->retcode = uv__getaddrinfo_translate_error(err);
+}
+
+static void uv__getnameinfo_done(struct uv__work* w, int status) {
+ uv_getnameinfo_t* req;
+ char* host;
+ char* service;
+
+ req = container_of(w, uv_getnameinfo_t, work_req);
+ uv__req_unregister(req->loop, req);
+ host = service = NULL;
+
+ if (status == UV_ECANCELED) {
+ assert(req->retcode == 0);
+ req->retcode = UV_EAI_CANCELED;
+ } else if (req->retcode == 0) {
+ host = req->host;
+ service = req->service;
+ }
+
+ if (req->getnameinfo_cb)
+ req->getnameinfo_cb(req, req->retcode, host, service);
+}
+
+/*
+* Entry point for getnameinfo
+* return 0 if a callback will be made
+* return error code if validation fails
+*/
+int uv_getnameinfo(uv_loop_t* loop,
+ uv_getnameinfo_t* req,
+ uv_getnameinfo_cb getnameinfo_cb,
+ const struct sockaddr* addr,
+ int flags) {
+ if (req == NULL || addr == NULL)
+ return UV_EINVAL;
+
+ if (addr->sa_family == AF_INET) {
+ memcpy(&req->storage,
+ addr,
+ sizeof(struct sockaddr_in));
+ } else if (addr->sa_family == AF_INET6) {
+ memcpy(&req->storage,
+ addr,
+ sizeof(struct sockaddr_in6));
+ } else {
+ return UV_EINVAL;
+ }
+
+ uv__req_init(loop, (uv_req_t*)req, UV_GETNAMEINFO);
+
+ req->getnameinfo_cb = getnameinfo_cb;
+ req->flags = flags;
+ req->type = UV_GETNAMEINFO;
+ req->loop = loop;
+ req->retcode = 0;
+
+ if (getnameinfo_cb) {
+ uv__work_submit(loop,
+ &req->work_req,
+ UV__WORK_SLOW_IO,
+ uv__getnameinfo_work,
+ uv__getnameinfo_done);
+ return 0;
+ } else {
+ uv__getnameinfo_work(&req->work_req);
+ uv__getnameinfo_done(&req->work_req, 0);
+ return req->retcode;
+ }
+}
diff --git a/Utilities/cmlibuv/src/unix/haiku.c b/Utilities/cmlibuv/src/unix/haiku.c
new file mode 100644
index 0000000000..cf17d836b4
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/haiku.c
@@ -0,0 +1,167 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <FindDirectory.h> /* find_path() */
+#include <OS.h>
+
+
+void uv_loadavg(double avg[3]) {
+ avg[0] = 0;
+ avg[1] = 0;
+ avg[2] = 0;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ char abspath[B_PATH_NAME_LENGTH];
+ status_t status;
+ ssize_t abspath_len;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ status = find_path(B_APP_IMAGE_SYMBOL, B_FIND_PATH_IMAGE_PATH, NULL, abspath,
+ sizeof(abspath));
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ abspath_len = uv__strscpy(buffer, abspath, *size);
+ *size -= 1;
+ if (abspath_len >= 0 && *size > (size_t)abspath_len)
+ *size = (size_t)abspath_len;
+
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ status_t status;
+ system_info sinfo;
+
+ status = get_system_info(&sinfo);
+ if (status != B_OK)
+ return 0;
+
+ return (sinfo.max_pages - sinfo.used_pages) * B_PAGE_SIZE;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ status_t status;
+ system_info sinfo;
+
+ status = get_system_info(&sinfo);
+ if (status != B_OK)
+ return 0;
+
+ return sinfo.max_pages * B_PAGE_SIZE;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ area_info area;
+ ssize_t cookie;
+ status_t status;
+ thread_info thread;
+
+ status = get_thread_info(find_thread(NULL), &thread);
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ cookie = 0;
+ *rss = 0;
+ while (get_next_area_info(thread.team, &cookie, &area) == B_OK)
+ *rss += area.ram_size;
+
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ /* system_time() returns time since booting in microseconds */
+ *uptime = (double)system_time() / 1000000;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ cpu_topology_node_info* topology_infos;
+ int i;
+ status_t status;
+ system_info system;
+ uint32_t topology_count;
+ uint64_t cpuspeed;
+ uv_cpu_info_t* cpu_info;
+
+ if (cpu_infos == NULL || count == NULL)
+ return UV_EINVAL;
+
+ status = get_cpu_topology_info(NULL, &topology_count);
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ topology_infos = uv__malloc(topology_count * sizeof(*topology_infos));
+ if (topology_infos == NULL)
+ return UV_ENOMEM;
+
+ status = get_cpu_topology_info(topology_infos, &topology_count);
+ if (status != B_OK) {
+ uv__free(topology_infos);
+ return UV__ERR(status);
+ }
+
+ cpuspeed = 0;
+ for (i = 0; i < (int)topology_count; i++) {
+ if (topology_infos[i].type == B_TOPOLOGY_CORE) {
+ cpuspeed = topology_infos[i].data.core.default_frequency;
+ break;
+ }
+ }
+
+ uv__free(topology_infos);
+
+ status = get_system_info(&system);
+ if (status != B_OK)
+ return UV__ERR(status);
+
+ *cpu_infos = uv__calloc(system.cpu_count, sizeof(**cpu_infos));
+ if (*cpu_infos == NULL)
+ return UV_ENOMEM;
+
+ /* CPU time and model are not exposed by Haiku. */
+ cpu_info = *cpu_infos;
+ for (i = 0; i < (int)system.cpu_count; i++) {
+ cpu_info->model = uv__strdup("unknown");
+ cpu_info->speed = (int)(cpuspeed / 1000000);
+ cpu_info++;
+ }
+ *count = system.cpu_count;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/hpux.c b/Utilities/cmlibuv/src/unix/hpux.c
new file mode 100644
index 0000000000..4d3f628b75
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/hpux.c
@@ -0,0 +1,30 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <time.h>
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ return (uint64_t) gethrtime();
+}
diff --git a/Utilities/cmlibuv/src/unix/hurd.c b/Utilities/cmlibuv/src/unix/hurd.c
new file mode 100644
index 0000000000..d19ea63479
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/hurd.c
@@ -0,0 +1,167 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define _GNU_SOURCE 1
+
+#include "uv.h"
+#include "internal.h"
+
+#include <hurd.h>
+#include <hurd/process.h>
+#include <mach/task_info.h>
+#include <mach/vm_statistics.h>
+#include <mach/vm_param.h>
+
+#include <inttypes.h>
+#include <stddef.h>
+#include <unistd.h>
+#include <string.h>
+#include <limits.h>
+
+int uv_exepath(char* buffer, size_t* size) {
+ kern_return_t err;
+ /* XXX in current Hurd, strings are char arrays of 1024 elements */
+ string_t exepath;
+ ssize_t copied;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ if (*size - 1 > 0) {
+ /* XXX limited length of buffer in current Hurd, this API will probably
+ * evolve in the future */
+ err = proc_get_exe(getproc(), getpid(), exepath);
+
+ if (err)
+ return UV__ERR(err);
+ }
+
+ copied = uv__strscpy(buffer, exepath, *size);
+
+ /* do not return error on UV_E2BIG failure */
+ *size = copied < 0 ? strlen(buffer) : (size_t) copied;
+
+ return 0;
+}
+
+int uv_resident_set_memory(size_t* rss) {
+ kern_return_t err;
+ struct task_basic_info bi;
+ mach_msg_type_number_t count;
+
+ count = TASK_BASIC_INFO_COUNT;
+ err = task_info(mach_task_self(), TASK_BASIC_INFO,
+ (task_info_t) &bi, &count);
+
+ if (err)
+ return UV__ERR(err);
+
+ *rss = bi.resident_size;
+
+ return 0;
+}
+
+uint64_t uv_get_free_memory(void) {
+ kern_return_t err;
+ struct vm_statistics vmstats;
+
+ err = vm_statistics(mach_task_self(), &vmstats);
+
+ if (err)
+ return 0;
+
+ return vmstats.free_count * vm_page_size;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ kern_return_t err;
+ host_basic_info_data_t hbi;
+ mach_msg_type_number_t cnt;
+
+ cnt = HOST_BASIC_INFO_COUNT;
+ err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
+
+ if (err)
+ return 0;
+
+ return hbi.memory_size;
+}
+
+
+int uv_uptime(double* uptime) {
+ char buf[128];
+
+ /* Try /proc/uptime first */
+ if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
+ if (1 == sscanf(buf, "%lf", uptime))
+ return 0;
+
+ /* Reimplement here code from procfs to calculate uptime if not mounted? */
+
+ return UV__ERR(EIO);
+}
+
+void uv_loadavg(double avg[3]) {
+ char buf[128]; /* Large enough to hold all of /proc/loadavg. */
+
+ if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
+ if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
+ return;
+
+ /* Reimplement here code from procfs to calculate loadavg if not mounted? */
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ kern_return_t err;
+ host_basic_info_data_t hbi;
+ mach_msg_type_number_t cnt;
+
+ /* Get count of cpus */
+ cnt = HOST_BASIC_INFO_COUNT;
+ err = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t) &hbi, &cnt);
+
+ if (err) {
+ err = UV__ERR(err);
+ goto abort;
+ }
+
+ /* XXX not implemented on the Hurd */
+ *cpu_infos = uv__calloc(hbi.avail_cpus, sizeof(**cpu_infos));
+ if (*cpu_infos == NULL) {
+ err = UV_ENOMEM;
+ goto abort;
+ }
+
+ *count = hbi.avail_cpus;
+
+ return 0;
+
+ abort:
+ *cpu_infos = NULL;
+ *count = 0;
+ return err;
+}
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
diff --git a/Utilities/cmlibuv/src/unix/ibmi.c b/Utilities/cmlibuv/src/unix/ibmi.c
new file mode 100644
index 0000000000..580ea1f3a8
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/ibmi.c
@@ -0,0 +1,538 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <utmp.h>
+#include <libgen.h>
+
+#include <sys/protosw.h>
+#include <procinfo.h>
+#include <sys/proc.h>
+#include <sys/procfs.h>
+
+#include <ctype.h>
+
+#include <sys/mntctl.h>
+#include <sys/vmount.h>
+#include <limits.h>
+#include <strings.h>
+#include <sys/vnode.h>
+
+#include <as400_protos.h>
+#include <as400_types.h>
+
+char* original_exepath = NULL;
+uv_mutex_t process_title_mutex;
+uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+
+typedef struct {
+ int bytes_available;
+ int bytes_returned;
+ char current_date_and_time[8];
+ char system_name[8];
+ char elapsed_time[6];
+ char restricted_state_flag;
+ char reserved;
+ int percent_processing_unit_used;
+ int jobs_in_system;
+ int percent_permanent_addresses;
+ int percent_temporary_addresses;
+ int system_asp;
+ int percent_system_asp_used;
+ int total_auxiliary_storage;
+ int current_unprotected_storage_used;
+ int maximum_unprotected_storage_used;
+ int percent_db_capability;
+ int main_storage_size;
+ int number_of_partitions;
+ int partition_identifier;
+ int reserved1;
+ int current_processing_capacity;
+ char processor_sharing_attribute;
+ char reserved2[3];
+ int number_of_processors;
+ int active_jobs_in_system;
+ int active_threads_in_system;
+ int maximum_jobs_in_system;
+ int percent_temporary_256mb_segments_used;
+ int percent_temporary_4gb_segments_used;
+ int percent_permanent_256mb_segments_used;
+ int percent_permanent_4gb_segments_used;
+ int percent_current_interactive_performance;
+ int percent_uncapped_cpu_capacity_used;
+ int percent_shared_processor_pool_used;
+ long main_storage_size_long;
+} SSTS0200;
+
+
+typedef struct {
+ char header[208];
+ unsigned char loca_adapter_address[12];
+} LIND0500;
+
+
+typedef struct {
+ int bytes_provided;
+ int bytes_available;
+ char msgid[7];
+} errcode_s;
+
+
+static const unsigned char e2a[256] = {
+ 0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
+ 128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
+ 144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
+ 32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33,
+ 38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94,
+ 45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34,
+ 195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200, 201,
+ 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205, 206, 207, 208,
+ 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 123, 65, 66, 67, 68, 69, 70, 71, 72, 73, 232, 233, 234, 235, 236, 237,
+ 125, 74, 75, 76, 77, 78, 79, 80, 81, 82, 238, 239, 240, 241, 242, 243,
+ 92, 159, 83, 84, 85, 86, 87, 88, 89, 90, 244, 245, 246, 247, 248, 249,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 250, 251, 252, 253, 254, 255};
+
+
+static const unsigned char a2e[256] = {
+ 0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
+ 64, 79, 127, 123, 91, 108, 80, 125, 77, 93, 92, 78, 107, 96, 75, 97,
+ 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 122, 94, 76, 126, 110, 111,
+ 124, 193, 194, 195, 196, 197, 198, 199, 200, 201, 209, 210, 211, 212, 213, 214,
+ 215, 216, 217, 226, 227, 228, 229, 230, 231, 232, 233, 74, 224, 90, 95, 109,
+ 121, 129, 130, 131, 132, 133, 134, 135, 136, 137, 145, 146, 147, 148, 149, 150,
+ 151, 152, 153, 162, 163, 164, 165, 166, 167, 168, 169, 192, 106, 208, 161, 7,
+ 32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
+ 48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62, 225,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 98, 99, 100, 101, 102, 103, 104, 105, 112, 113, 114, 115, 116, 117,
+ 118, 119, 120, 128, 138, 139, 140, 141, 142, 143, 144, 154, 155, 156, 157, 158,
+ 159, 160, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183,
+ 184, 185, 186, 187, 188, 189, 190, 191, 202, 203, 204, 205, 206, 207, 218, 219,
+ 220, 221, 222, 223, 234, 235, 236, 237, 238, 239, 250, 251, 252, 253, 254, 255};
+
+
+static void iconv_e2a(unsigned char src[], unsigned char dst[], size_t length) {
+ size_t i;
+ for (i = 0; i < length; i++)
+ dst[i] = e2a[src[i]];
+}
+
+
+static void iconv_a2e(const char* src, unsigned char dst[], size_t length) {
+ size_t srclen;
+ size_t i;
+
+ srclen = strlen(src);
+ if (srclen > length)
+ srclen = length;
+ for (i = 0; i < srclen; i++)
+ dst[i] = a2e[src[i]];
+ /* padding the remaining part with spaces */
+ for (; i < length; i++)
+ dst[i] = a2e[' '];
+}
+
+void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+static int get_ibmi_system_status(SSTS0200* rcvr) {
+ /* rcvrlen is input parameter 2 to QWCRSSTS */
+ unsigned int rcvrlen = sizeof(*rcvr);
+ unsigned char format[8], reset_status[10];
+
+ /* format is input parameter 3 to QWCRSSTS */
+ iconv_a2e("SSTS0200", format, sizeof(format));
+ /* reset_status is input parameter 4 */
+ iconv_a2e("*NO", reset_status, sizeof(reset_status));
+
+ /* errcode is input parameter 5 to QWCRSSTS */
+ errcode_s errcode;
+
+ /* qwcrssts_pointer is the 16-byte tagged system pointer to QWCRSSTS */
+ ILEpointer __attribute__((aligned(16))) qwcrssts_pointer;
+
+ /* qwcrssts_argv is the array of argument pointers to QWCRSSTS */
+ void* qwcrssts_argv[6];
+
+ /* Set the IBM i pointer to the QSYS/QWCRSSTS *PGM object */
+ int rc = _RSLOBJ2(&qwcrssts_pointer, RSLOBJ_TS_PGM, "QWCRSSTS", "QSYS");
+
+ if (rc != 0)
+ return rc;
+
+ /* initialize the QWCRSSTS returned info structure */
+ memset(rcvr, 0, sizeof(*rcvr));
+
+ /* initialize the QWCRSSTS error code structure */
+ memset(&errcode, 0, sizeof(errcode));
+ errcode.bytes_provided = sizeof(errcode);
+
+ /* initialize the array of argument pointers for the QWCRSSTS API */
+ qwcrssts_argv[0] = rcvr;
+ qwcrssts_argv[1] = &rcvrlen;
+ qwcrssts_argv[2] = &format;
+ qwcrssts_argv[3] = &reset_status;
+ qwcrssts_argv[4] = &errcode;
+ qwcrssts_argv[5] = NULL;
+
+ /* Call the IBM i QWCRSSTS API from PASE */
+ rc = _PGMCALL(&qwcrssts_pointer, qwcrssts_argv, 0);
+
+ return rc;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ SSTS0200 rcvr;
+
+ if (get_ibmi_system_status(&rcvr))
+ return 0;
+
+ return (uint64_t)rcvr.main_storage_size * 1024ULL;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ SSTS0200 rcvr;
+
+ if (get_ibmi_system_status(&rcvr))
+ return 0;
+
+ return (uint64_t)rcvr.main_storage_size * 1024ULL;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ SSTS0200 rcvr;
+
+ if (get_ibmi_system_status(&rcvr)) {
+ avg[0] = avg[1] = avg[2] = 0;
+ return;
+ }
+
+ /* The average (in tenths) of the elapsed time during which the processing
+ * units were in use. For example, a value of 411 in binary would be 41.1%.
+ * This percentage could be greater than 100% for an uncapped partition.
+ */
+ double processing_unit_used_percent =
+ rcvr.percent_processing_unit_used / 1000.0;
+
+ avg[0] = avg[1] = avg[2] = processing_unit_used_percent;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ *rss = 0;
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ return UV_ENOSYS;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int numcpus, idx = 0;
+ uv_cpu_info_t* cpu_info;
+
+ *cpu_infos = NULL;
+ *count = 0;
+
+ numcpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(uv_cpu_info_t));
+ if (!*cpu_infos) {
+ return UV_ENOMEM;
+ }
+
+ cpu_info = *cpu_infos;
+ for (idx = 0; idx < numcpus; idx++) {
+ cpu_info->speed = 0;
+ cpu_info->model = uv__strdup("unknown");
+ cpu_info->cpu_times.user = 0;
+ cpu_info->cpu_times.sys = 0;
+ cpu_info->cpu_times.idle = 0;
+ cpu_info->cpu_times.irq = 0;
+ cpu_info->cpu_times.nice = 0;
+ cpu_info++;
+ }
+ *count = numcpus;
+
+ return 0;
+}
+
+
+static int get_ibmi_physical_address(const char* line, char (*phys_addr)[6]) {
+ LIND0500 rcvr;
+ /* rcvrlen is input parameter 2 to QDCRLIND */
+ unsigned int rcvrlen = sizeof(rcvr);
+ unsigned char format[8], line_name[10];
+ unsigned char mac_addr[sizeof(rcvr.loca_adapter_address)];
+ int c[6];
+
+ /* format is input parameter 3 to QDCRLIND */
+ iconv_a2e("LIND0500", format, sizeof(format));
+
+ /* line_name is input parameter 4 to QDCRLIND */
+ iconv_a2e(line, line_name, sizeof(line_name));
+
+ /* err is input parameter 5 to QDCRLIND */
+ errcode_s err;
+
+ /* qwcrssts_pointer is the 16-byte tagged system pointer to QDCRLIND */
+ ILEpointer __attribute__((aligned(16))) qdcrlind_pointer;
+
+ /* qwcrssts_argv is the array of argument pointers to QDCRLIND */
+ void* qdcrlind_argv[6];
+
+ /* Set the IBM i pointer to the QSYS/QDCRLIND *PGM object */
+ int rc = _RSLOBJ2(&qdcrlind_pointer, RSLOBJ_TS_PGM, "QDCRLIND", "QSYS");
+
+ if (rc != 0)
+ return rc;
+
+ /* initialize the QDCRLIND returned info structure */
+ memset(&rcvr, 0, sizeof(rcvr));
+
+ /* initialize the QDCRLIND error code structure */
+ memset(&err, 0, sizeof(err));
+ err.bytes_provided = sizeof(err);
+
+ /* initialize the array of argument pointers for the QDCRLIND API */
+ qdcrlind_argv[0] = &rcvr;
+ qdcrlind_argv[1] = &rcvrlen;
+ qdcrlind_argv[2] = &format;
+ qdcrlind_argv[3] = &line_name;
+ qdcrlind_argv[4] = &err;
+ qdcrlind_argv[5] = NULL;
+
+ /* Call the IBM i QDCRLIND API from PASE */
+ rc = _PGMCALL(&qdcrlind_pointer, qdcrlind_argv, 0);
+ if (rc != 0)
+ return rc;
+
+ if (err.bytes_available > 0) {
+ return -1;
+ }
+
+ /* convert ebcdic loca_adapter_address to ascii first */
+ iconv_e2a(rcvr.loca_adapter_address, mac_addr,
+ sizeof(rcvr.loca_adapter_address));
+
+ /* convert loca_adapter_address(char[12]) to phys_addr(char[6]) */
+ int r = sscanf(mac_addr, "%02x%02x%02x%02x%02x%02x",
+ &c[0], &c[1], &c[2], &c[3], &c[4], &c[5]);
+
+ if (r == ARRAY_SIZE(c)) {
+ (*phys_addr)[0] = c[0];
+ (*phys_addr)[1] = c[1];
+ (*phys_addr)[2] = c[2];
+ (*phys_addr)[3] = c[3];
+ (*phys_addr)[4] = c[4];
+ (*phys_addr)[5] = c[5];
+ } else {
+ memset(*phys_addr, 0, sizeof(*phys_addr));
+ rc = -1;
+ }
+ return rc;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ struct ifaddrs_pase *ifap = NULL, *cur;
+ int inet6, r = 0;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (Qp2getifaddrs(&ifap))
+ return UV_ENOSYS;
+
+ /* The first loop to get the size of the array to be allocated */
+ for (cur = ifap; cur; cur = cur->ifa_next) {
+ if (!(cur->ifa_addr->sa_family == AF_INET6 ||
+ cur->ifa_addr->sa_family == AF_INET))
+ continue;
+
+ if (!(cur->ifa_flags & IFF_UP && cur->ifa_flags & IFF_RUNNING))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ Qp2freeifaddrs(ifap);
+ return 0;
+ }
+
+ /* Alloc the return interface structs */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+ if (*addresses == NULL) {
+ Qp2freeifaddrs(ifap);
+ return UV_ENOMEM;
+ }
+ address = *addresses;
+
+ /* The second loop to fill in the array */
+ for (cur = ifap; cur; cur = cur->ifa_next) {
+ if (!(cur->ifa_addr->sa_family == AF_INET6 ||
+ cur->ifa_addr->sa_family == AF_INET))
+ continue;
+
+ if (!(cur->ifa_flags & IFF_UP && cur->ifa_flags & IFF_RUNNING))
+ continue;
+
+ address->name = uv__strdup(cur->ifa_name);
+
+ inet6 = (cur->ifa_addr->sa_family == AF_INET6);
+
+ if (inet6) {
+ address->address.address6 = *((struct sockaddr_in6*)cur->ifa_addr);
+ address->netmask.netmask6 = *((struct sockaddr_in6*)cur->ifa_netmask);
+ address->netmask.netmask6.sin6_family = AF_INET6;
+ } else {
+ address->address.address4 = *((struct sockaddr_in*)cur->ifa_addr);
+ address->netmask.netmask4 = *((struct sockaddr_in*)cur->ifa_netmask);
+ address->netmask.netmask4.sin_family = AF_INET;
+ }
+ address->is_internal = cur->ifa_flags & IFF_LOOPBACK ? 1 : 0;
+ if (!address->is_internal) {
+ int rc = -1;
+ size_t name_len = strlen(address->name);
+ /* To get the associated MAC address, we must convert the address to a
+ * line description. Normally, the name field contains the line
+ * description name, but for VLANs it has the VLAN appended with a
+ * period. Since object names can also contain periods and numbers, there
+ * is no way to know if a returned name is for a VLAN or not. eg.
+ * *LIND ETH1.1 and *LIND ETH1, VLAN 1 both have the same name: ETH1.1
+ *
+ * Instead, we apply the same heuristic used by some of the XPF ioctls:
+ * - names > 10 *must* contain a VLAN
+ * - assume names <= 10 do not contain a VLAN and try directly
+ * - if >10 or QDCRLIND returned an error, try to strip off a VLAN
+ * and try again
+ * - if we still get an error or couldn't find a period, leave the MAC as
+ * 00:00:00:00:00:00
+ */
+ if (name_len <= 10) {
+ /* Assume name does not contain a VLAN ID */
+ rc = get_ibmi_physical_address(address->name, &address->phys_addr);
+ }
+
+ if (name_len > 10 || rc != 0) {
+ /* The interface name must contain a VLAN ID suffix. Attempt to strip
+ * it off so we can get the line description to pass to QDCRLIND.
+ */
+ char* temp_name = uv__strdup(address->name);
+ char* dot = strrchr(temp_name, '.');
+ if (dot != NULL) {
+ *dot = '\0';
+ if (strlen(temp_name) <= 10) {
+ rc = get_ibmi_physical_address(temp_name, &address->phys_addr);
+ }
+ }
+ uv__free(temp_name);
+ }
+ }
+
+ address++;
+ }
+
+ Qp2freeifaddrs(ifap);
+ return r;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses, int count) {
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+char** uv_setup_args(int argc, char** argv) {
+ char exepath[UV__PATH_MAX];
+ char* s;
+ size_t size;
+
+ if (argc > 0) {
+ /* Use argv[0] to determine value for uv_exepath(). */
+ size = sizeof(exepath);
+ if (uv__search_path(argv[0], exepath, &size) == 0) {
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+ original_exepath = uv__strdup(exepath);
+ uv_mutex_unlock(&process_title_mutex);
+ }
+ }
+
+ return argv;
+}
+
+int uv_set_process_title(const char* title) {
+ return 0;
+}
+
+int uv_get_process_title(char* buffer, size_t size) {
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ buffer[0] = '\0';
+ return 0;
+}
+
+void uv__process_title_cleanup(void) {
+}
+
diff --git a/Utilities/cmlibuv/src/unix/internal.h b/Utilities/cmlibuv/src/unix/internal.h
new file mode 100644
index 0000000000..f41ee3cd9a
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/internal.h
@@ -0,0 +1,379 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_UNIX_INTERNAL_H_
+#define UV_UNIX_INTERNAL_H_
+
+#include "uv-common.h"
+
+#include <assert.h>
+#include <limits.h> /* _POSIX_PATH_MAX, PATH_MAX */
+#include <stdlib.h> /* abort */
+#include <string.h> /* strrchr */
+#include <fcntl.h> /* O_CLOEXEC and O_NONBLOCK, if supported. */
+#include <stdio.h>
+#include <errno.h>
+#include <sys/socket.h>
+
+#if defined(__STRICT_ANSI__)
+# define inline __inline
+#endif
+
+#if defined(__linux__)
+# include "linux-syscalls.h"
+#endif /* __linux__ */
+
+#if defined(__MVS__)
+# include "os390-syscalls.h"
+#endif /* __MVS__ */
+
+#if defined(__sun)
+# include <sys/port.h>
+# include <port.h>
+#endif /* __sun */
+
+#if defined(_AIX)
+# define reqevents events
+# define rtnevents revents
+# include <sys/poll.h>
+#else
+# include <poll.h>
+#endif /* _AIX */
+
+#if defined(__APPLE__) && !TARGET_OS_IPHONE
+# include <AvailabilityMacros.h>
+#endif
+
+/*
+ * Define common detection for active Thread Sanitizer
+ * - clang uses __has_feature(thread_sanitizer)
+ * - gcc-7+ uses __SANITIZE_THREAD__
+ */
+#if defined(__has_feature)
+# if __has_feature(thread_sanitizer)
+# define __SANITIZE_THREAD__ 1
+# endif
+#endif
+
+#if defined(PATH_MAX)
+# define UV__PATH_MAX PATH_MAX
+#else
+# define UV__PATH_MAX 8192
+#endif
+
+#if defined(CMAKE_BOOTSTRAP)
+# undef pthread_atfork
+# define pthread_atfork(prepare, parent, child) \
+ uv__pthread_atfork(prepare, parent, child)
+int uv__pthread_atfork(void (*prepare)(void), void (*parent)(void),
+ void (*child)(void));
+# undef pthread_sigmask
+# define pthread_sigmask(how, set, oldset) \
+ uv__pthread_sigmask(how, set, oldset)
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
+#elif defined(__ANDROID__)
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset);
+# ifdef pthread_sigmask
+# undef pthread_sigmask
+# endif
+# define pthread_sigmask(how, set, oldset) uv__pthread_sigmask(how, set, oldset)
+#endif
+
+#define ACCESS_ONCE(type, var) \
+ (*(volatile type*) &(var))
+
+#define ROUND_UP(a, b) \
+ ((a) % (b) ? ((a) + (b)) - ((a) % (b)) : (a))
+
+#define UNREACHABLE() \
+ do { \
+ assert(0 && "unreachable code"); \
+ abort(); \
+ } \
+ while (0)
+
+#define SAVE_ERRNO(block) \
+ do { \
+ int _saved_errno = errno; \
+ do { block; } while (0); \
+ errno = _saved_errno; \
+ } \
+ while (0)
+
+/* The __clang__ and __INTEL_COMPILER checks are superfluous because they
+ * define __GNUC__. They are here to convey to you, dear reader, that these
+ * macros are enabled when compiling with clang or icc.
+ */
+#if defined(__clang__) || \
+ defined(__GNUC__) || \
+ defined(__INTEL_COMPILER)
+# define UV_UNUSED(declaration) __attribute__((unused)) declaration
+#else
+# define UV_UNUSED(declaration) declaration
+#endif
+
+/* Leans on the fact that, on Linux, POLLRDHUP == EPOLLRDHUP. */
+#ifdef POLLRDHUP
+# define UV__POLLRDHUP POLLRDHUP
+#else
+# define UV__POLLRDHUP 0x2000
+#endif
+
+#ifdef POLLPRI
+# define UV__POLLPRI POLLPRI
+#else
+# define UV__POLLPRI 0
+#endif
+
+#if !defined(O_CLOEXEC) && defined(__FreeBSD__)
+/*
+ * It may be that we are just missing `__POSIX_VISIBLE >= 200809`.
+ * Try using fixed value const and give up, if it doesn't work
+ */
+# define O_CLOEXEC 0x00100000
+#endif
+
+typedef struct uv__stream_queued_fds_s uv__stream_queued_fds_t;
+
+/* loop flags */
+enum {
+ UV_LOOP_BLOCK_SIGPROF = 0x1,
+ UV_LOOP_REAP_CHILDREN = 0x2
+};
+
+/* flags of excluding ifaddr */
+enum {
+ UV__EXCLUDE_IFPHYS,
+ UV__EXCLUDE_IFADDR
+};
+
+typedef enum {
+ UV_CLOCK_PRECISE = 0, /* Use the highest resolution clock available. */
+ UV_CLOCK_FAST = 1 /* Use the fastest clock with <= 1ms granularity. */
+} uv_clocktype_t;
+
+struct uv__stream_queued_fds_s {
+ unsigned int size;
+ unsigned int offset;
+ int fds[1];
+};
+
+
+#if defined(_AIX) || \
+ defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__linux__) || \
+ defined(__OpenBSD__) || \
+ defined(__NetBSD__)
+#define uv__nonblock uv__nonblock_ioctl
+#define UV__NONBLOCK_IS_IOCTL 1
+#else
+#define uv__nonblock uv__nonblock_fcntl
+#define UV__NONBLOCK_IS_IOCTL 0
+#endif
+
+/* On Linux, uv__nonblock_fcntl() and uv__nonblock_ioctl() do not commute
+ * when O_NDELAY is not equal to O_NONBLOCK. Case in point: linux/sparc32
+ * and linux/sparc64, where O_NDELAY is O_NONBLOCK + another bit.
+ *
+ * Libuv uses uv__nonblock_fcntl() directly sometimes so ensure that it
+ * commutes with uv__nonblock().
+ */
+#if defined(__linux__) && O_NDELAY != O_NONBLOCK
+#undef uv__nonblock
+#define uv__nonblock uv__nonblock_fcntl
+#endif
+
+/* core */
+int uv__cloexec(int fd, int set);
+int uv__nonblock_ioctl(int fd, int set);
+int uv__nonblock_fcntl(int fd, int set);
+int uv__close(int fd); /* preserves errno */
+int uv__close_nocheckstdio(int fd);
+int uv__close_nocancel(int fd);
+int uv__socket(int domain, int type, int protocol);
+ssize_t uv__recvmsg(int fd, struct msghdr *msg, int flags);
+void uv__make_close_pending(uv_handle_t* handle);
+int uv__getiovmax(void);
+
+void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd);
+void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+void uv__io_close(uv_loop_t* loop, uv__io_t* w);
+void uv__io_feed(uv_loop_t* loop, uv__io_t* w);
+int uv__io_active(const uv__io_t* w, unsigned int events);
+int uv__io_check_fd(uv_loop_t* loop, int fd);
+void uv__io_poll(uv_loop_t* loop, int timeout); /* in milliseconds or -1 */
+int uv__io_fork(uv_loop_t* loop);
+int uv__fd_exists(uv_loop_t* loop, int fd);
+
+/* async */
+void uv__async_stop(uv_loop_t* loop);
+int uv__async_fork(uv_loop_t* loop);
+
+
+/* loop */
+void uv__run_idle(uv_loop_t* loop);
+void uv__run_check(uv_loop_t* loop);
+void uv__run_prepare(uv_loop_t* loop);
+
+/* stream */
+void uv__stream_init(uv_loop_t* loop, uv_stream_t* stream,
+ uv_handle_type type);
+int uv__stream_open(uv_stream_t*, int fd, int flags);
+void uv__stream_destroy(uv_stream_t* stream);
+#if defined(__APPLE__)
+int uv__stream_try_select(uv_stream_t* stream, int* fd);
+#endif /* defined(__APPLE__) */
+void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+int uv__accept(int sockfd);
+int uv__dup2_cloexec(int oldfd, int newfd);
+int uv__open_cloexec(const char* path, int flags);
+int uv__slurp(const char* filename, char* buf, size_t len);
+
+/* tcp */
+int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb);
+int uv__tcp_nodelay(int fd, int on);
+int uv__tcp_keepalive(int fd, int on, unsigned int delay);
+
+/* pipe */
+int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
+
+/* signal */
+void uv__signal_close(uv_signal_t* handle);
+void uv__signal_global_once_init(void);
+void uv__signal_loop_cleanup(uv_loop_t* loop);
+int uv__signal_loop_fork(uv_loop_t* loop);
+
+/* platform specific */
+uint64_t uv__hrtime(uv_clocktype_t type);
+int uv__kqueue_init(uv_loop_t* loop);
+int uv__epoll_init(uv_loop_t* loop);
+int uv__platform_loop_init(uv_loop_t* loop);
+void uv__platform_loop_delete(uv_loop_t* loop);
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
+
+/* various */
+void uv__async_close(uv_async_t* handle);
+void uv__check_close(uv_check_t* handle);
+void uv__fs_event_close(uv_fs_event_t* handle);
+void uv__idle_close(uv_idle_t* handle);
+void uv__pipe_close(uv_pipe_t* handle);
+void uv__poll_close(uv_poll_t* handle);
+void uv__prepare_close(uv_prepare_t* handle);
+void uv__process_close(uv_process_t* handle);
+void uv__stream_close(uv_stream_t* handle);
+void uv__tcp_close(uv_tcp_t* handle);
+size_t uv__thread_stack_size(void);
+void uv__udp_close(uv_udp_t* handle);
+void uv__udp_finish_close(uv_udp_t* handle);
+FILE* uv__open_file(const char* path);
+int uv__getpwuid_r(uv_passwd_t* pwd);
+int uv__search_path(const char* prog, char* buf, size_t* buflen);
+void uv__wait_children(uv_loop_t* loop);
+
+/* random */
+int uv__random_devurandom(void* buf, size_t buflen);
+int uv__random_getrandom(void* buf, size_t buflen);
+int uv__random_getentropy(void* buf, size_t buflen);
+int uv__random_readpath(const char* path, void* buf, size_t buflen);
+int uv__random_sysctl(void* buf, size_t buflen);
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+int uv___stream_fd(const uv_stream_t* handle);
+#define uv__stream_fd(handle) (uv___stream_fd((const uv_stream_t*) (handle)))
+#else
+#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
+#endif /* defined(__APPLE__) */
+
+int uv__make_pipe(int fds[2], int flags);
+
+#if defined(__APPLE__)
+
+int uv__fsevents_init(uv_fs_event_t* handle);
+int uv__fsevents_close(uv_fs_event_t* handle);
+void uv__fsevents_loop_delete(uv_loop_t* loop);
+
+#endif /* defined(__APPLE__) */
+
+UV_UNUSED(static void uv__update_time(uv_loop_t* loop)) {
+ /* Use a fast time source if available. We only need millisecond precision.
+ */
+ loop->time = uv__hrtime(UV_CLOCK_FAST) / 1000000;
+}
+
+UV_UNUSED(static char* uv__basename_r(const char* path)) {
+ char* s;
+
+ s = strrchr(path, '/');
+ if (s == NULL)
+ return (char*) path;
+
+ return s + 1;
+}
+
+#if defined(__linux__)
+int uv__inotify_fork(uv_loop_t* loop, void* old_watchers);
+#endif
+
+typedef int (*uv__peersockfunc)(int, struct sockaddr*, socklen_t*);
+
+int uv__getsockpeername(const uv_handle_t* handle,
+ uv__peersockfunc func,
+ struct sockaddr* name,
+ int* namelen);
+
+#if defined(__linux__) || \
+ defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__) || \
+ defined(__DragonFly__)
+#define HAVE_MMSG 1
+struct uv__mmsghdr {
+ struct msghdr msg_hdr;
+ unsigned int msg_len;
+};
+
+int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
+int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
+#else
+#define HAVE_MMSG 0
+#endif
+
+#if defined(__sun)
+#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
+size_t strnlen(const char* s, size_t maxlen);
+#endif
+#endif
+
+#if defined(__FreeBSD__)
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ off_t* off_in,
+ int fd_out,
+ off_t* off_out,
+ size_t len,
+ unsigned int flags);
+#endif
+
+
+#endif /* UV_UNIX_INTERNAL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/kqueue.c b/Utilities/cmlibuv/src/unix/kqueue.c
new file mode 100644
index 0000000000..5dac76ae75
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/kqueue.c
@@ -0,0 +1,605 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/event.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+/*
+ * Required on
+ * - Until at least FreeBSD 11.0
+ * - Older versions of Mac OS X
+ *
+ * http://www.boost.org/doc/libs/1_61_0/boost/asio/detail/kqueue_reactor.hpp
+ */
+#ifndef EV_OOBAND
+#define EV_OOBAND EV_FLAG1
+#endif
+
+static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags);
+
+
+int uv__kqueue_init(uv_loop_t* loop) {
+ loop->backend_fd = kqueue();
+ if (loop->backend_fd == -1)
+ return UV__ERR(errno);
+
+ uv__cloexec(loop->backend_fd, 1);
+
+ return 0;
+}
+
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+static int uv__has_forked_with_cfrunloop;
+#endif
+
+int uv__io_fork(uv_loop_t* loop) {
+ int err;
+ loop->backend_fd = -1;
+ err = uv__kqueue_init(loop);
+ if (err)
+ return err;
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ if (loop->cf_state != NULL) {
+ /* We cannot start another CFRunloop and/or thread in the child
+ process; CF aborts if you try or if you try to touch the thread
+ at all to kill it. So the best we can do is ignore it from now
+ on. This means we can't watch directories in the same way
+ anymore (like other BSDs). It also means we cannot properly
+ clean up the allocated resources; calling
+ uv__fsevents_loop_delete from uv_loop_close will crash the
+ process. So we sidestep the issue by pretending like we never
+ started it in the first place.
+ */
+ uv__store_relaxed(&uv__has_forked_with_cfrunloop, 1);
+ uv__free(loop->cf_state);
+ loop->cf_state = NULL;
+ }
+#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
+ return err;
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct kevent ev;
+ int rc;
+
+ rc = 0;
+ EV_SET(&ev, fd, EVFILT_READ, EV_ADD, 0, 0, 0);
+ if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+ rc = UV__ERR(errno);
+
+ EV_SET(&ev, fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
+ if (rc == 0)
+ if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+ abort();
+
+ return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ struct kevent events[1024];
+ struct kevent* ev;
+ struct timespec spec;
+ unsigned int nevents;
+ unsigned int revents;
+ QUEUE* q;
+ uv__io_t* w;
+ uv_process_t* process;
+ sigset_t* pset;
+ sigset_t set;
+ uint64_t base;
+ uint64_t diff;
+ int have_signals;
+ int filter;
+ int fflags;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ nevents = 0;
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
+ filter = EVFILT_READ;
+ fflags = 0;
+ op = EV_ADD;
+
+ if (w->cb == uv__fs_event) {
+ filter = EVFILT_VNODE;
+ fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
+ | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
+ op = EV_ADD | EV_ONESHOT; /* Stop the event from firing repeatedly. */
+ }
+
+ EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
+ if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
+ EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
+ if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
+ EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
+
+ if (++nevents == ARRAY_SIZE(events)) {
+ if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
+ abort();
+ nevents = 0;
+ }
+ }
+
+ w->events = w->pevents;
+ }
+
+ pset = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ pset = &set;
+ sigemptyset(pset);
+ sigaddset(pset, SIGPROF);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (;; nevents = 0) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (timeout != -1) {
+ spec.tv_sec = timeout / 1000;
+ spec.tv_nsec = (timeout % 1000) * 1000000;
+ }
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_BLOCK, pset, NULL);
+
+ nfds = kevent(loop->backend_fd,
+ events,
+ nevents,
+ events,
+ ARRAY_SIZE(events),
+ timeout == -1 ? NULL : &spec);
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_UNBLOCK, pset, NULL);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ if (timeout == -1)
+ continue;
+ if (timeout > 0)
+ goto update_timeout;
+ }
+
+ assert(timeout != -1);
+ return;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ for (i = 0; i < nfds; i++) {
+ ev = events + i;
+ fd = ev->ident;
+
+ /* Handle kevent NOTE_EXIT results */
+ if (ev->filter == EVFILT_PROC) {
+ QUEUE_FOREACH(q, &loop->process_handles) {
+ process = QUEUE_DATA(q, uv_process_t, queue);
+ if (process->pid == fd) {
+ process->flags |= UV_HANDLE_REAP;
+ loop->flags |= UV_LOOP_REAP_CHILDREN;
+ break;
+ }
+ }
+ nevents++;
+ continue;
+ }
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ * TODO: batch up. */
+ struct kevent events[1];
+
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != EBADF && errno != ENOENT)
+ abort();
+
+ continue;
+ }
+
+ if (ev->filter == EVFILT_VNODE) {
+ assert(w->events == POLLIN);
+ assert(w->pevents == POLLIN);
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, ev->fflags); /* XXX always uv__fs_event() */
+ nevents++;
+ continue;
+ }
+
+ revents = 0;
+
+ if (ev->filter == EVFILT_READ) {
+ if (w->pevents & POLLIN) {
+ revents |= POLLIN;
+ w->rcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ if ((ev->flags & EV_EOF) && (w->pevents & UV__POLLRDHUP))
+ revents |= UV__POLLRDHUP;
+ }
+
+ if (ev->filter == EV_OOBAND) {
+ if (w->pevents & UV__POLLPRI) {
+ revents |= UV__POLLPRI;
+ w->rcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ }
+
+ if (ev->filter == EVFILT_WRITE) {
+ if (w->pevents & POLLOUT) {
+ revents |= POLLOUT;
+ w->wcount = ev->data;
+ } else {
+ /* TODO batch up */
+ struct kevent events[1];
+ EV_SET(events + 0, fd, ev->filter, EV_DELETE, 0, 0, 0);
+ if (kevent(loop->backend_fd, events, 1, NULL, 0, NULL))
+ if (errno != ENOENT)
+ abort();
+ }
+ }
+
+ if (ev->flags & EV_ERROR)
+ revents |= POLLERR;
+
+ if (revents == 0)
+ continue;
+
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, revents);
+ }
+
+ nevents++;
+ }
+
+ if (loop->flags & UV_LOOP_REAP_CHILDREN) {
+ loop->flags &= ~UV_LOOP_REAP_CHILDREN;
+ uv__wait_children(loop);
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ diff = loop->time - base;
+ if (diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= diff;
+ }
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct kevent* events;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct kevent*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events == NULL)
+ return;
+
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
+ events[i].ident = -1;
+}
+
+
+static void uv__fs_event(uv_loop_t* loop, uv__io_t* w, unsigned int fflags) {
+ uv_fs_event_t* handle;
+ struct kevent ev;
+ int events;
+ const char* path;
+#if defined(F_GETPATH)
+ /* MAXPATHLEN == PATH_MAX but the former is what XNU calls it internally. */
+ char pathbuf[MAXPATHLEN];
+#endif
+
+ handle = container_of(w, uv_fs_event_t, event_watcher);
+
+ if (fflags & (NOTE_ATTRIB | NOTE_EXTEND))
+ events = UV_CHANGE;
+ else
+ events = UV_RENAME;
+
+ path = NULL;
+#if defined(F_GETPATH)
+ /* Also works when the file has been unlinked from the file system. Passing
+ * in the path when the file has been deleted is arguably a little strange
+ * but it's consistent with what the inotify backend does.
+ */
+ if (fcntl(handle->event_watcher.fd, F_GETPATH, pathbuf) == 0)
+ path = uv__basename_r(pathbuf);
+#endif
+ handle->cb(handle, path, events, 0);
+
+ if (handle->event_watcher.fd == -1)
+ return;
+
+ /* Watcher operates in one-shot mode, re-arm it. */
+ fflags = NOTE_ATTRIB | NOTE_WRITE | NOTE_RENAME
+ | NOTE_DELETE | NOTE_EXTEND | NOTE_REVOKE;
+
+ EV_SET(&ev, w->fd, EVFILT_VNODE, EV_ADD | EV_ONESHOT, fflags, 0, 0);
+
+ if (kevent(loop->backend_fd, &ev, 1, NULL, 0, NULL))
+ abort();
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ int fd;
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ struct stat statbuf;
+#endif
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ handle->cb = cb;
+ handle->path = uv__strdup(path);
+ if (handle->path == NULL)
+ return UV_ENOMEM;
+
+ /* TODO open asynchronously - but how do we report back errors? */
+ fd = open(handle->path, O_RDONLY);
+ if (fd == -1) {
+ uv__free(handle->path);
+ handle->path = NULL;
+ return UV__ERR(errno);
+ }
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ /* Nullify field to perform checks later */
+ handle->cf_cb = NULL;
+ handle->realpath = NULL;
+ handle->realpath_len = 0;
+ handle->cf_flags = flags;
+
+ if (fstat(fd, &statbuf))
+ goto fallback;
+ /* FSEvents works only with directories */
+ if (!(statbuf.st_mode & S_IFDIR))
+ goto fallback;
+
+ if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop)) {
+ int r;
+ /* The fallback fd is no longer needed */
+ uv__close_nocheckstdio(fd);
+ handle->event_watcher.fd = -1;
+ r = uv__fsevents_init(handle);
+ if (r == 0) {
+ uv__handle_start(handle);
+ } else {
+ uv__free(handle->path);
+ handle->path = NULL;
+ }
+ return r;
+ }
+fallback:
+#endif /* #if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 */
+
+ uv__handle_start(handle);
+ uv__io_init(&handle->event_watcher, uv__fs_event, fd);
+ uv__io_start(handle->loop, &handle->event_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ int r;
+ r = 0;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ uv__handle_stop(handle);
+
+#if defined(__APPLE__) && MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
+ if (0 == uv__load_relaxed(&uv__has_forked_with_cfrunloop))
+ if (handle->cf_cb != NULL)
+ r = uv__fsevents_close(handle);
+#endif
+
+ if (handle->event_watcher.fd != -1) {
+ uv__io_close(handle->loop, &handle->event_watcher);
+ uv__close(handle->event_watcher.fd);
+ handle->event_watcher.fd = -1;
+ }
+
+ uv__free(handle->path);
+ handle->path = NULL;
+
+ return r;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-core.c b/Utilities/cmlibuv/src/unix/linux-core.c
new file mode 100644
index 0000000000..23a7dafec8
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-core.c
@@ -0,0 +1,834 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* We lean on the fact that POLL{IN,OUT,ERR,HUP} correspond with their
+ * EPOLL* counterparts. We use the POLL* variants in this file because that
+ * is what libuv uses elsewhere.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <net/if.h>
+#include <sys/epoll.h>
+#include <sys/param.h>
+#include <sys/prctl.h>
+#include <sys/sysinfo.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <time.h>
+
+#define HAVE_IFADDRS_H 1
+
+# if defined(__ANDROID_API__) && __ANDROID_API__ < 24
+# undef HAVE_IFADDRS_H
+#endif
+
+#ifdef __UCLIBC__
+# if __UCLIBC_MAJOR__ < 0 && __UCLIBC_MINOR__ < 9 && __UCLIBC_SUBLEVEL__ < 32
+# undef HAVE_IFADDRS_H
+# endif
+#endif
+
+#ifdef HAVE_IFADDRS_H
+# include <ifaddrs.h>
+# include <sys/socket.h>
+# include <net/ethernet.h>
+# include <netpacket/packet.h>
+#endif /* HAVE_IFADDRS_H */
+
+/* Available from 2.6.32 onwards. */
+#ifndef CLOCK_MONOTONIC_COARSE
+# define CLOCK_MONOTONIC_COARSE 6
+#endif
+
+/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
+ * include that file because it conflicts with <time.h>. We'll just have to
+ * define it ourselves.
+ */
+#ifndef CLOCK_BOOTTIME
+# define CLOCK_BOOTTIME 7
+#endif
+
+static int read_models(unsigned int numcpus, uv_cpu_info_t* ci);
+static int read_times(FILE* statfile_fp,
+ unsigned int numcpus,
+ uv_cpu_info_t* ci);
+static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
+static uint64_t read_cpufreq(unsigned int cpunum);
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+
+ loop->inotify_fd = -1;
+ loop->inotify_watchers = NULL;
+
+ return uv__epoll_init(loop);
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+ int err;
+ void* old_watchers;
+
+ old_watchers = loop->inotify_watchers;
+
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+ uv__platform_loop_delete(loop);
+
+ err = uv__platform_loop_init(loop);
+ if (err)
+ return err;
+
+ return uv__inotify_fork(loop, old_watchers);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->inotify_fd == -1) return;
+ uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
+ uv__close(loop->inotify_fd);
+ loop->inotify_fd = -1;
+}
+
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ static clock_t fast_clock_id = -1;
+ struct timespec t;
+ clock_t clock_id;
+
+ /* Prefer CLOCK_MONOTONIC_COARSE if available but only when it has
+ * millisecond granularity or better. CLOCK_MONOTONIC_COARSE is
+ * serviced entirely from the vDSO, whereas CLOCK_MONOTONIC may
+ * decide to make a costly system call.
+ */
+ /* TODO(bnoordhuis) Use CLOCK_MONOTONIC_COARSE for UV_CLOCK_PRECISE
+ * when it has microsecond granularity or better (unlikely).
+ */
+ clock_id = CLOCK_MONOTONIC;
+ if (type != UV_CLOCK_FAST)
+ goto done;
+
+ clock_id = uv__load_relaxed(&fast_clock_id);
+ if (clock_id != -1)
+ goto done;
+
+ clock_id = CLOCK_MONOTONIC;
+ if (0 == clock_getres(CLOCK_MONOTONIC_COARSE, &t))
+ if (t.tv_nsec <= 1 * 1000 * 1000)
+ clock_id = CLOCK_MONOTONIC_COARSE;
+
+ uv__store_relaxed(&fast_clock_id, clock_id);
+
+done:
+
+ if (clock_gettime(clock_id, &t))
+ return 0; /* Not really possible. */
+
+ return t.tv_sec * (uint64_t) 1e9 + t.tv_nsec;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char buf[1024];
+ const char* s;
+ ssize_t n;
+ long val;
+ int fd;
+ int i;
+
+ do
+ fd = open("/proc/self/stat", O_RDONLY);
+ while (fd == -1 && errno == EINTR);
+
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ do
+ n = read(fd, buf, sizeof(buf) - 1);
+ while (n == -1 && errno == EINTR);
+
+ uv__close(fd);
+ if (n == -1)
+ return UV__ERR(errno);
+ buf[n] = '\0';
+
+ s = strchr(buf, ' ');
+ if (s == NULL)
+ goto err;
+
+ s += 1;
+ if (*s != '(')
+ goto err;
+
+ s = strchr(s, ')');
+ if (s == NULL)
+ goto err;
+
+ for (i = 1; i <= 22; i++) {
+ s = strchr(s + 1, ' ');
+ if (s == NULL)
+ goto err;
+ }
+
+ errno = 0;
+ val = strtol(s, NULL, 10);
+ if (errno != 0)
+ goto err;
+ if (val < 0)
+ goto err;
+
+ *rss = val * getpagesize();
+ return 0;
+
+err:
+ return UV_EINVAL;
+}
+
+int uv_uptime(double* uptime) {
+ static volatile int no_clock_boottime;
+ char buf[128];
+ struct timespec now;
+ int r;
+
+ /* Try /proc/uptime first, then fallback to clock_gettime(). */
+
+ if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
+ if (1 == sscanf(buf, "%lf", uptime))
+ return 0;
+
+ /* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
+ * (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
+ * is suspended.
+ */
+ if (no_clock_boottime) {
+ retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
+ }
+ else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
+ no_clock_boottime = 1;
+ goto retry_clock_gettime;
+ }
+
+ if (r)
+ return UV__ERR(errno);
+
+ *uptime = now.tv_sec;
+ return 0;
+}
+
+
+static int uv__cpu_num(FILE* statfile_fp, unsigned int* numcpus) {
+ unsigned int num;
+ char buf[1024];
+
+ if (!fgets(buf, sizeof(buf), statfile_fp))
+ return UV_EIO;
+
+ num = 0;
+ while (fgets(buf, sizeof(buf), statfile_fp)) {
+ if (strncmp(buf, "cpu", 3))
+ break;
+ num++;
+ }
+
+ if (num == 0)
+ return UV_EIO;
+
+ *numcpus = num;
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int numcpus;
+ uv_cpu_info_t* ci;
+ int err;
+ FILE* statfile_fp;
+
+ *cpu_infos = NULL;
+ *count = 0;
+
+ statfile_fp = uv__open_file("/proc/stat");
+ if (statfile_fp == NULL)
+ return UV__ERR(errno);
+
+ err = uv__cpu_num(statfile_fp, &numcpus);
+ if (err < 0)
+ goto out;
+
+ err = UV_ENOMEM;
+ ci = uv__calloc(numcpus, sizeof(*ci));
+ if (ci == NULL)
+ goto out;
+
+ err = read_models(numcpus, ci);
+ if (err == 0)
+ err = read_times(statfile_fp, numcpus, ci);
+
+ if (err) {
+ uv_free_cpu_info(ci, numcpus);
+ goto out;
+ }
+
+ /* read_models() on x86 also reads the CPU speed from /proc/cpuinfo.
+ * We don't check for errors here. Worst case, the field is left zero.
+ */
+ if (ci[0].speed == 0)
+ read_speeds(numcpus, ci);
+
+ *cpu_infos = ci;
+ *count = numcpus;
+ err = 0;
+
+out:
+
+ if (fclose(statfile_fp))
+ if (errno != EINTR && errno != EINPROGRESS)
+ abort();
+
+ return err;
+}
+
+
+static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
+ unsigned int num;
+
+ for (num = 0; num < numcpus; num++)
+ ci[num].speed = read_cpufreq(num) / 1000;
+}
+
+
+/* Also reads the CPU frequency on ppc and x86. The other architectures only
+ * have a BogoMIPS field, which may not be very accurate.
+ *
+ * Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
+ */
+static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
+#if defined(__PPC__)
+ static const char model_marker[] = "cpu\t\t: ";
+ static const char speed_marker[] = "clock\t\t: ";
+#else
+ static const char model_marker[] = "model name\t: ";
+ static const char speed_marker[] = "cpu MHz\t\t: ";
+#endif
+ const char* inferred_model;
+ unsigned int model_idx;
+ unsigned int speed_idx;
+ unsigned int part_idx;
+ char buf[1024];
+ char* model;
+ FILE* fp;
+ int model_id;
+
+ /* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
+ (void) &model_marker;
+ (void) &speed_marker;
+ (void) &speed_idx;
+ (void) &part_idx;
+ (void) &model;
+ (void) &buf;
+ (void) &fp;
+ (void) &model_id;
+
+ model_idx = 0;
+ speed_idx = 0;
+ part_idx = 0;
+
+#if defined(__arm__) || \
+ defined(__i386__) || \
+ defined(__mips__) || \
+ defined(__aarch64__) || \
+ defined(__PPC__) || \
+ defined(__x86_64__)
+ fp = uv__open_file("/proc/cpuinfo");
+ if (fp == NULL)
+ return UV__ERR(errno);
+
+ while (fgets(buf, sizeof(buf), fp)) {
+ if (model_idx < numcpus) {
+ if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
+ model = buf + sizeof(model_marker) - 1;
+ model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
+ if (model == NULL) {
+ fclose(fp);
+ return UV_ENOMEM;
+ }
+ ci[model_idx++].model = model;
+ continue;
+ }
+ }
+#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
+ if (model_idx < numcpus) {
+#if defined(__arm__)
+ /* Fallback for pre-3.8 kernels. */
+ static const char model_marker[] = "Processor\t: ";
+#elif defined(__aarch64__)
+ static const char part_marker[] = "CPU part\t: ";
+
+ /* Adapted from: https://github.com/karelzak/util-linux */
+ struct vendor_part {
+ const int id;
+ const char* name;
+ };
+
+ static const struct vendor_part arm_chips[] = {
+ { 0x811, "ARM810" },
+ { 0x920, "ARM920" },
+ { 0x922, "ARM922" },
+ { 0x926, "ARM926" },
+ { 0x940, "ARM940" },
+ { 0x946, "ARM946" },
+ { 0x966, "ARM966" },
+ { 0xa20, "ARM1020" },
+ { 0xa22, "ARM1022" },
+ { 0xa26, "ARM1026" },
+ { 0xb02, "ARM11 MPCore" },
+ { 0xb36, "ARM1136" },
+ { 0xb56, "ARM1156" },
+ { 0xb76, "ARM1176" },
+ { 0xc05, "Cortex-A5" },
+ { 0xc07, "Cortex-A7" },
+ { 0xc08, "Cortex-A8" },
+ { 0xc09, "Cortex-A9" },
+ { 0xc0d, "Cortex-A17" }, /* Originally A12 */
+ { 0xc0f, "Cortex-A15" },
+ { 0xc0e, "Cortex-A17" },
+ { 0xc14, "Cortex-R4" },
+ { 0xc15, "Cortex-R5" },
+ { 0xc17, "Cortex-R7" },
+ { 0xc18, "Cortex-R8" },
+ { 0xc20, "Cortex-M0" },
+ { 0xc21, "Cortex-M1" },
+ { 0xc23, "Cortex-M3" },
+ { 0xc24, "Cortex-M4" },
+ { 0xc27, "Cortex-M7" },
+ { 0xc60, "Cortex-M0+" },
+ { 0xd01, "Cortex-A32" },
+ { 0xd03, "Cortex-A53" },
+ { 0xd04, "Cortex-A35" },
+ { 0xd05, "Cortex-A55" },
+ { 0xd06, "Cortex-A65" },
+ { 0xd07, "Cortex-A57" },
+ { 0xd08, "Cortex-A72" },
+ { 0xd09, "Cortex-A73" },
+ { 0xd0a, "Cortex-A75" },
+ { 0xd0b, "Cortex-A76" },
+ { 0xd0c, "Neoverse-N1" },
+ { 0xd0d, "Cortex-A77" },
+ { 0xd0e, "Cortex-A76AE" },
+ { 0xd13, "Cortex-R52" },
+ { 0xd20, "Cortex-M23" },
+ { 0xd21, "Cortex-M33" },
+ { 0xd41, "Cortex-A78" },
+ { 0xd42, "Cortex-A78AE" },
+ { 0xd4a, "Neoverse-E1" },
+ { 0xd4b, "Cortex-A78C" },
+ };
+
+ if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
+ model = buf + sizeof(part_marker) - 1;
+
+ errno = 0;
+ model_id = strtol(model, NULL, 16);
+ if ((errno != 0) || model_id < 0) {
+ fclose(fp);
+ return UV_EINVAL;
+ }
+
+ for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
+ if (model_id == arm_chips[part_idx].id) {
+ model = uv__strdup(arm_chips[part_idx].name);
+ if (model == NULL) {
+ fclose(fp);
+ return UV_ENOMEM;
+ }
+ ci[model_idx++].model = model;
+ break;
+ }
+ }
+ }
+#else /* defined(__mips__) */
+ static const char model_marker[] = "cpu model\t\t: ";
+#endif
+ if (strncmp(buf, model_marker, sizeof(model_marker) - 1) == 0) {
+ model = buf + sizeof(model_marker) - 1;
+ model = uv__strndup(model, strlen(model) - 1); /* Strip newline. */
+ if (model == NULL) {
+ fclose(fp);
+ return UV_ENOMEM;
+ }
+ ci[model_idx++].model = model;
+ continue;
+ }
+ }
+#else /* !__arm__ && !__mips__ && !__aarch64__ */
+ if (speed_idx < numcpus) {
+ if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
+ ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
+ continue;
+ }
+ }
+#endif /* __arm__ || __mips__ || __aarch64__ */
+ }
+
+ fclose(fp);
+#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
+
+ /* Now we want to make sure that all the models contain *something* because
+ * it's not safe to leave them as null. Copy the last entry unless there
+ * isn't one, in that case we simply put "unknown" into everything.
+ */
+ inferred_model = "unknown";
+ if (model_idx > 0)
+ inferred_model = ci[model_idx - 1].model;
+
+ while (model_idx < numcpus) {
+ model = uv__strndup(inferred_model, strlen(inferred_model));
+ if (model == NULL)
+ return UV_ENOMEM;
+ ci[model_idx++].model = model;
+ }
+
+ return 0;
+}
+
+
+static int read_times(FILE* statfile_fp,
+ unsigned int numcpus,
+ uv_cpu_info_t* ci) {
+ struct uv_cpu_times_s ts;
+ unsigned int ticks;
+ unsigned int multiplier;
+ uint64_t user;
+ uint64_t nice;
+ uint64_t sys;
+ uint64_t idle;
+ uint64_t dummy;
+ uint64_t irq;
+ uint64_t num;
+ uint64_t len;
+ char buf[1024];
+
+ ticks = (unsigned int)sysconf(_SC_CLK_TCK);
+ assert(ticks != (unsigned int) -1);
+ assert(ticks != 0);
+ multiplier = ((uint64_t)1000L / ticks);
+
+ rewind(statfile_fp);
+
+ if (!fgets(buf, sizeof(buf), statfile_fp))
+ abort();
+
+ num = 0;
+
+ while (fgets(buf, sizeof(buf), statfile_fp)) {
+ if (num >= numcpus)
+ break;
+
+ if (strncmp(buf, "cpu", 3))
+ break;
+
+ /* skip "cpu<num> " marker */
+ {
+ unsigned int n;
+ int r = sscanf(buf, "cpu%u ", &n);
+ assert(r == 1);
+ (void) r; /* silence build warning */
+ for (len = sizeof("cpu0"); n /= 10; len++);
+ }
+
+ /* Line contains user, nice, system, idle, iowait, irq, softirq, steal,
+ * guest, guest_nice but we're only interested in the first four + irq.
+ *
+ * Don't use %*s to skip fields or %ll to read straight into the uint64_t
+ * fields, they're not allowed in C89 mode.
+ */
+ if (6 != sscanf(buf + len,
+ "%" PRIu64 " %" PRIu64 " %" PRIu64
+ "%" PRIu64 " %" PRIu64 " %" PRIu64,
+ &user,
+ &nice,
+ &sys,
+ &idle,
+ &dummy,
+ &irq))
+ abort();
+
+ ts.user = user * multiplier;
+ ts.nice = nice * multiplier;
+ ts.sys = sys * multiplier;
+ ts.idle = idle * multiplier;
+ ts.irq = irq * multiplier;
+ ci[num++].cpu_times = ts;
+ }
+ assert(num == numcpus);
+
+ return 0;
+}
+
+
+static uint64_t read_cpufreq(unsigned int cpunum) {
+ uint64_t val;
+ char buf[1024];
+ FILE* fp;
+
+ snprintf(buf,
+ sizeof(buf),
+ "/sys/devices/system/cpu/cpu%u/cpufreq/scaling_cur_freq",
+ cpunum);
+
+ fp = uv__open_file(buf);
+ if (fp == NULL)
+ return 0;
+
+ if (fscanf(fp, "%" PRIu64, &val) != 1)
+ val = 0;
+
+ fclose(fp);
+
+ return val;
+}
+
+
+#ifdef HAVE_IFADDRS_H
+static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+ /*
+ * On Linux getifaddrs returns information related to the raw underlying
+ * devices. We're not interested in this information yet.
+ */
+ if (ent->ifa_addr->sa_family == PF_PACKET)
+ return exclude_type;
+ return !exclude_type;
+}
+#endif
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+#ifndef HAVE_IFADDRS_H
+ *count = 0;
+ *addresses = NULL;
+ return UV_ENOSYS;
+#else
+ struct ifaddrs *addrs, *ent;
+ uv_interface_address_t* address;
+ int i;
+ struct sockaddr_ll *sll;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs))
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ /* Make sure the memory is initiallized to zero using calloc() */
+ *addresses = uv__calloc(*count, sizeof(**addresses));
+ if (!(*addresses)) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFADDR))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!(ent->ifa_flags & IFF_LOOPBACK);
+
+ address++;
+ }
+
+ /* Fill in physical addresses for each interface */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent, UV__EXCLUDE_IFPHYS))
+ continue;
+
+ address = *addresses;
+
+ for (i = 0; i < (*count); i++) {
+ size_t namelen = strlen(ent->ifa_name);
+ /* Alias interface share the same physical address */
+ if (strncmp(address->name, ent->ifa_name, namelen) == 0 &&
+ (address->name[namelen] == 0 || address->name[namelen] == ':')) {
+ sll = (struct sockaddr_ll*)ent->ifa_addr;
+ memcpy(address->phys_addr, sll->sll_addr, sizeof(address->phys_addr));
+ }
+ address++;
+ }
+ }
+
+ freeifaddrs(addrs);
+
+ return 0;
+#endif
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+
+void uv__set_process_title(const char* title) {
+#if defined(PR_SET_NAME)
+ prctl(PR_SET_NAME, title); /* Only copies first 16 characters. */
+#endif
+}
+
+
+static uint64_t uv__read_proc_meminfo(const char* what) {
+ uint64_t rc;
+ char* p;
+ char buf[4096]; /* Large enough to hold all of /proc/meminfo. */
+
+ if (uv__slurp("/proc/meminfo", buf, sizeof(buf)))
+ return 0;
+
+ p = strstr(buf, what);
+
+ if (p == NULL)
+ return 0;
+
+ p += strlen(what);
+
+ rc = 0;
+ sscanf(p, "%" PRIu64 " kB", &rc);
+
+ return rc * 1024;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct sysinfo info;
+ uint64_t rc;
+
+ rc = uv__read_proc_meminfo("MemAvailable:");
+
+ if (rc != 0)
+ return rc;
+
+ if (0 == sysinfo(&info))
+ return (uint64_t) info.freeram * info.mem_unit;
+
+ return 0;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ struct sysinfo info;
+ uint64_t rc;
+
+ rc = uv__read_proc_meminfo("MemTotal:");
+
+ if (rc != 0)
+ return rc;
+
+ if (0 == sysinfo(&info))
+ return (uint64_t) info.totalram * info.mem_unit;
+
+ return 0;
+}
+
+
+static uint64_t uv__read_cgroups_uint64(const char* cgroup, const char* param) {
+ char filename[256];
+ char buf[32]; /* Large enough to hold an encoded uint64_t. */
+ uint64_t rc;
+
+ rc = 0;
+ snprintf(filename, sizeof(filename), "/sys/fs/cgroup/%s/%s", cgroup, param);
+ if (0 == uv__slurp(filename, buf, sizeof(buf)))
+ sscanf(buf, "%" PRIu64, &rc);
+
+ return rc;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ /*
+ * This might return 0 if there was a problem getting the memory limit from
+ * cgroups. This is OK because a return value of 0 signifies that the memory
+ * limit is unknown.
+ */
+ return uv__read_cgroups_uint64("memory", "memory.limit_in_bytes");
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct sysinfo info;
+ char buf[128]; /* Large enough to hold all of /proc/loadavg. */
+
+ if (0 == uv__slurp("/proc/loadavg", buf, sizeof(buf)))
+ if (3 == sscanf(buf, "%lf %lf %lf", &avg[0], &avg[1], &avg[2]))
+ return;
+
+ if (sysinfo(&info) < 0)
+ return;
+
+ avg[0] = (double) info.loads[0] / 65536.0;
+ avg[1] = (double) info.loads[1] / 65536.0;
+ avg[2] = (double) info.loads[2] / 65536.0;
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-inotify.c b/Utilities/cmlibuv/src/unix/linux-inotify.c
new file mode 100644
index 0000000000..c1bd260e16
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-inotify.c
@@ -0,0 +1,327 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv/tree.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/inotify.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+struct watcher_list {
+ RB_ENTRY(watcher_list) entry;
+ QUEUE watchers;
+ int iterating;
+ char* path;
+ int wd;
+};
+
+struct watcher_root {
+ struct watcher_list* rbh_root;
+};
+#define CAST(p) ((struct watcher_root*)(p))
+
+
+static int compare_watchers(const struct watcher_list* a,
+ const struct watcher_list* b) {
+ if (a->wd < b->wd) return -1;
+ if (a->wd > b->wd) return 1;
+ return 0;
+}
+
+
+RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
+
+
+static void uv__inotify_read(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int revents);
+
+static void maybe_free_watcher_list(struct watcher_list* w,
+ uv_loop_t* loop);
+
+static int init_inotify(uv_loop_t* loop) {
+ int fd;
+
+ if (loop->inotify_fd != -1)
+ return 0;
+
+ fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC);
+ if (fd < 0)
+ return UV__ERR(errno);
+
+ loop->inotify_fd = fd;
+ uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
+ uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
+ /* Open the inotify_fd, and re-arm all the inotify watchers. */
+ int err;
+ struct watcher_list* tmp_watcher_list_iter;
+ struct watcher_list* watcher_list;
+ struct watcher_list tmp_watcher_list;
+ QUEUE queue;
+ QUEUE* q;
+ uv_fs_event_t* handle;
+ char* tmp_path;
+
+ if (old_watchers != NULL) {
+ /* We must restore the old watcher list to be able to close items
+ * out of it.
+ */
+ loop->inotify_watchers = old_watchers;
+
+ QUEUE_INIT(&tmp_watcher_list.watchers);
+ /* Note that the queue we use is shared with the start and stop()
+ * functions, making QUEUE_FOREACH unsafe to use. So we use the
+ * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
+ * list until we're done iterating. c.f. uv__inotify_read.
+ */
+ RB_FOREACH_SAFE(watcher_list, watcher_root,
+ CAST(&old_watchers), tmp_watcher_list_iter) {
+ watcher_list->iterating = 1;
+ QUEUE_MOVE(&watcher_list->watchers, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
+ /* It's critical to keep a copy of path here, because it
+ * will be set to NULL by stop() and then deallocated by
+ * maybe_free_watcher_list
+ */
+ tmp_path = uv__strdup(handle->path);
+ assert(tmp_path != NULL);
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
+ uv_fs_event_stop(handle);
+
+ QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
+ handle->path = tmp_path;
+ }
+ watcher_list->iterating = 0;
+ maybe_free_watcher_list(watcher_list, loop);
+ }
+
+ QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ QUEUE_REMOVE(q);
+ handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
+ tmp_path = handle->path;
+ handle->path = NULL;
+ err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
+ uv__free(tmp_path);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+
+static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
+ struct watcher_list w;
+ w.wd = wd;
+ return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
+}
+
+static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
+ /* if the watcher_list->watchers is being iterated over, we can't free it. */
+ if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
+ /* No watchers left for this path. Clean up. */
+ RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
+ inotify_rm_watch(loop->inotify_fd, w->wd);
+ uv__free(w);
+ }
+}
+
+static void uv__inotify_read(uv_loop_t* loop,
+ uv__io_t* dummy,
+ unsigned int events) {
+ const struct inotify_event* e;
+ struct watcher_list* w;
+ uv_fs_event_t* h;
+ QUEUE queue;
+ QUEUE* q;
+ const char* path;
+ ssize_t size;
+ const char *p;
+ /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
+ char buf[4096];
+
+ for (;;) {
+ do
+ size = read(loop->inotify_fd, buf, sizeof(buf));
+ while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ assert(errno == EAGAIN || errno == EWOULDBLOCK);
+ break;
+ }
+
+ assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
+
+ /* Now we have one or more inotify_event structs. */
+ for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
+ e = (const struct inotify_event*) p;
+
+ events = 0;
+ if (e->mask & (IN_ATTRIB|IN_MODIFY))
+ events |= UV_CHANGE;
+ if (e->mask & ~(IN_ATTRIB|IN_MODIFY))
+ events |= UV_RENAME;
+
+ w = find_watcher(loop, e->wd);
+ if (w == NULL)
+ continue; /* Stale event, no watchers left. */
+
+ /* inotify does not return the filename when monitoring a single file
+ * for modifications. Repurpose the filename for API compatibility.
+ * I'm not convinced this is a good thing, maybe it should go.
+ */
+ path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
+
+ /* We're about to iterate over the queue and call user's callbacks.
+ * What can go wrong?
+ * A callback could call uv_fs_event_stop()
+ * and the queue can change under our feet.
+ * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
+ * And we don't free the watcher_list until we're done iterating.
+ *
+ * First,
+ * tell uv_fs_event_stop() (that could be called from a user's callback)
+ * not to free watcher_list.
+ */
+ w->iterating = 1;
+ QUEUE_MOVE(&w->watchers, &queue);
+ while (!QUEUE_EMPTY(&queue)) {
+ q = QUEUE_HEAD(&queue);
+ h = QUEUE_DATA(q, uv_fs_event_t, watchers);
+
+ QUEUE_REMOVE(q);
+ QUEUE_INSERT_TAIL(&w->watchers, q);
+
+ h->cb(h, path, events, 0);
+ }
+ /* done iterating, time to (maybe) free empty watcher_list */
+ w->iterating = 0;
+ maybe_free_watcher_list(w, loop);
+ }
+ }
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ struct watcher_list* w;
+ size_t len;
+ int events;
+ int err;
+ int wd;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ err = init_inotify(handle->loop);
+ if (err)
+ return err;
+
+ events = IN_ATTRIB
+ | IN_CREATE
+ | IN_MODIFY
+ | IN_DELETE
+ | IN_DELETE_SELF
+ | IN_MOVE_SELF
+ | IN_MOVED_FROM
+ | IN_MOVED_TO;
+
+ wd = inotify_add_watch(handle->loop->inotify_fd, path, events);
+ if (wd == -1)
+ return UV__ERR(errno);
+
+ w = find_watcher(handle->loop, wd);
+ if (w)
+ goto no_insert;
+
+ len = strlen(path) + 1;
+ w = uv__malloc(sizeof(*w) + len);
+ if (w == NULL)
+ return UV_ENOMEM;
+
+ w->wd = wd;
+ w->path = memcpy(w + 1, path, len);
+ QUEUE_INIT(&w->watchers);
+ w->iterating = 0;
+ RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
+
+no_insert:
+ uv__handle_start(handle);
+ QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
+ handle->path = w->path;
+ handle->cb = cb;
+ handle->wd = wd;
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ struct watcher_list* w;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ w = find_watcher(handle->loop, handle->wd);
+ assert(w != NULL);
+
+ handle->wd = -1;
+ handle->path = NULL;
+ uv__handle_stop(handle);
+ QUEUE_REMOVE(&handle->watchers);
+
+ maybe_free_watcher_list(w, handle->loop);
+
+ return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ uv_fs_event_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.c b/Utilities/cmlibuv/src/unix/linux-syscalls.c
new file mode 100644
index 0000000000..5071cd56d1
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-syscalls.c
@@ -0,0 +1,264 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "linux-syscalls.h"
+#include <unistd.h>
+#include <signal.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#if defined(__arm__)
+# if defined(__thumb__) || defined(__ARM_EABI__)
+# define UV_SYSCALL_BASE 0
+# else
+# define UV_SYSCALL_BASE 0x900000
+# endif
+#endif /* __arm__ */
+
+#ifndef __NR_recvmmsg
+# if defined(__x86_64__)
+# define __NR_recvmmsg 299
+# elif defined(__arm__)
+# define __NR_recvmmsg (UV_SYSCALL_BASE + 365)
+# endif
+#endif /* __NR_recvmsg */
+
+#ifndef __NR_sendmmsg
+# if defined(__x86_64__)
+# define __NR_sendmmsg 307
+# elif defined(__arm__)
+# define __NR_sendmmsg (UV_SYSCALL_BASE + 374)
+# endif
+#endif /* __NR_sendmmsg */
+
+#ifndef __NR_utimensat
+# if defined(__x86_64__)
+# define __NR_utimensat 280
+# elif defined(__i386__)
+# define __NR_utimensat 320
+# elif defined(__arm__)
+# define __NR_utimensat (UV_SYSCALL_BASE + 348)
+# endif
+#endif /* __NR_utimensat */
+
+#ifndef __NR_preadv
+# if defined(__x86_64__)
+# define __NR_preadv 295
+# elif defined(__i386__)
+# define __NR_preadv 333
+# elif defined(__arm__)
+# define __NR_preadv (UV_SYSCALL_BASE + 361)
+# endif
+#endif /* __NR_preadv */
+
+#ifndef __NR_pwritev
+# if defined(__x86_64__)
+# define __NR_pwritev 296
+# elif defined(__i386__)
+# define __NR_pwritev 334
+# elif defined(__arm__)
+# define __NR_pwritev (UV_SYSCALL_BASE + 362)
+# endif
+#endif /* __NR_pwritev */
+
+#ifndef __NR_dup3
+# if defined(__x86_64__)
+# define __NR_dup3 292
+# elif defined(__i386__)
+# define __NR_dup3 330
+# elif defined(__arm__)
+# define __NR_dup3 (UV_SYSCALL_BASE + 358)
+# endif
+#endif /* __NR_pwritev */
+
+#ifndef __NR_copy_file_range
+# if defined(__x86_64__)
+# define __NR_copy_file_range 326
+# elif defined(__i386__)
+# define __NR_copy_file_range 377
+# elif defined(__s390__)
+# define __NR_copy_file_range 375
+# elif defined(__arm__)
+# define __NR_copy_file_range (UV_SYSCALL_BASE + 391)
+# elif defined(__aarch64__)
+# define __NR_copy_file_range 285
+# elif defined(__powerpc__)
+# define __NR_copy_file_range 379
+# elif defined(__arc__)
+# define __NR_copy_file_range 285
+# endif
+#endif /* __NR_copy_file_range */
+
+#ifndef __NR_statx
+# if defined(__x86_64__)
+# define __NR_statx 332
+# elif defined(__i386__)
+# define __NR_statx 383
+# elif defined(__aarch64__)
+# define __NR_statx 397
+# elif defined(__arm__)
+# define __NR_statx (UV_SYSCALL_BASE + 397)
+# elif defined(__ppc__)
+# define __NR_statx 383
+# elif defined(__s390__)
+# define __NR_statx 379
+# endif
+#endif /* __NR_statx */
+
+#ifndef __NR_getrandom
+# if defined(__x86_64__)
+# define __NR_getrandom 318
+# elif defined(__i386__)
+# define __NR_getrandom 355
+# elif defined(__aarch64__)
+# define __NR_getrandom 384
+# elif defined(__arm__)
+# define __NR_getrandom (UV_SYSCALL_BASE + 384)
+# elif defined(__ppc__)
+# define __NR_getrandom 359
+# elif defined(__s390__)
+# define __NR_getrandom 349
+# endif
+#endif /* __NR_getrandom */
+
+struct uv__mmsghdr;
+
+int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if defined(__i386__)
+ unsigned long args[4];
+ int rc;
+
+ args[0] = (unsigned long) fd;
+ args[1] = (unsigned long) mmsg;
+ args[2] = (unsigned long) vlen;
+ args[3] = /* flags */ 0;
+
+ /* socketcall() raises EINVAL when SYS_SENDMMSG is not supported. */
+ rc = syscall(/* __NR_socketcall */ 102, 20 /* SYS_SENDMMSG */, args);
+ if (rc == -1)
+ if (errno == EINVAL)
+ errno = ENOSYS;
+
+ return rc;
+#elif defined(__NR_sendmmsg)
+ return syscall(__NR_sendmmsg, fd, mmsg, vlen, /* flags */ 0);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
+#if defined(__i386__)
+ unsigned long args[5];
+ int rc;
+
+ args[0] = (unsigned long) fd;
+ args[1] = (unsigned long) mmsg;
+ args[2] = (unsigned long) vlen;
+ args[3] = /* flags */ 0;
+ args[4] = /* timeout */ 0;
+
+ /* socketcall() raises EINVAL when SYS_RECVMMSG is not supported. */
+ rc = syscall(/* __NR_socketcall */ 102, 19 /* SYS_RECVMMSG */, args);
+ if (rc == -1)
+ if (errno == EINVAL)
+ errno = ENOSYS;
+
+ return rc;
+#elif defined(__NR_recvmmsg)
+ return syscall(__NR_recvmmsg, fd, mmsg, vlen, /* flags */ 0, /* timeout */ 0);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
+#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
+ return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
+#endif
+}
+
+
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
+#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
+ return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
+#endif
+}
+
+
+int uv__dup3(int oldfd, int newfd, int flags) {
+#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
+ return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_dup3, oldfd, newfd, flags);
+#endif
+}
+
+
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ off_t* off_in,
+ int fd_out,
+ off_t* off_out,
+ size_t len,
+ unsigned int flags)
+{
+#ifdef __NR_copy_file_range
+ return syscall(__NR_copy_file_range,
+ fd_in,
+ off_in,
+ fd_out,
+ off_out,
+ len,
+ flags);
+#else
+ return errno = ENOSYS, -1;
+#endif
+}
+
+
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf) {
+#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
+ return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
+#endif
+}
+
+
+ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
+#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
+ return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_getrandom, buf, buflen, flags);
+#endif
+}
diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.h b/Utilities/cmlibuv/src/unix/linux-syscalls.h
new file mode 100644
index 0000000000..b4d9082d46
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/linux-syscalls.h
@@ -0,0 +1,78 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UV_LINUX_SYSCALL_H_
+#define UV_LINUX_SYSCALL_H_
+
+#include <stdint.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/socket.h>
+
+struct uv__statx_timestamp {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t unused0;
+};
+
+struct uv__statx {
+ uint32_t stx_mask;
+ uint32_t stx_blksize;
+ uint64_t stx_attributes;
+ uint32_t stx_nlink;
+ uint32_t stx_uid;
+ uint32_t stx_gid;
+ uint16_t stx_mode;
+ uint16_t unused0;
+ uint64_t stx_ino;
+ uint64_t stx_size;
+ uint64_t stx_blocks;
+ uint64_t stx_attributes_mask;
+ struct uv__statx_timestamp stx_atime;
+ struct uv__statx_timestamp stx_btime;
+ struct uv__statx_timestamp stx_ctime;
+ struct uv__statx_timestamp stx_mtime;
+ uint32_t stx_rdev_major;
+ uint32_t stx_rdev_minor;
+ uint32_t stx_dev_major;
+ uint32_t stx_dev_minor;
+ uint64_t unused1[14];
+};
+
+ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
+ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset);
+int uv__dup3(int oldfd, int newfd, int flags);
+ssize_t
+uv__fs_copy_file_range(int fd_in,
+ off_t* off_in,
+ int fd_out,
+ off_t* off_out,
+ size_t len,
+ unsigned int flags);
+int uv__statx(int dirfd,
+ const char* path,
+ int flags,
+ unsigned int mask,
+ struct uv__statx* statxbuf);
+ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags);
+
+#endif /* UV_LINUX_SYSCALL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/loop-watcher.c b/Utilities/cmlibuv/src/unix/loop-watcher.c
new file mode 100644
index 0000000000..b8c1c2a710
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/loop-watcher.c
@@ -0,0 +1,68 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#define UV_LOOP_WATCHER_DEFINE(name, type) \
+ int uv_##name##_init(uv_loop_t* loop, uv_##name##_t* handle) { \
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_##type); \
+ handle->name##_cb = NULL; \
+ return 0; \
+ } \
+ \
+ int uv_##name##_start(uv_##name##_t* handle, uv_##name##_cb cb) { \
+ if (uv__is_active(handle)) return 0; \
+ if (cb == NULL) return UV_EINVAL; \
+ QUEUE_INSERT_HEAD(&handle->loop->name##_handles, &handle->queue); \
+ handle->name##_cb = cb; \
+ uv__handle_start(handle); \
+ return 0; \
+ } \
+ \
+ int uv_##name##_stop(uv_##name##_t* handle) { \
+ if (!uv__is_active(handle)) return 0; \
+ QUEUE_REMOVE(&handle->queue); \
+ uv__handle_stop(handle); \
+ return 0; \
+ } \
+ \
+ void uv__run_##name(uv_loop_t* loop) { \
+ uv_##name##_t* h; \
+ QUEUE queue; \
+ QUEUE* q; \
+ QUEUE_MOVE(&loop->name##_handles, &queue); \
+ while (!QUEUE_EMPTY(&queue)) { \
+ q = QUEUE_HEAD(&queue); \
+ h = QUEUE_DATA(q, uv_##name##_t, queue); \
+ QUEUE_REMOVE(q); \
+ QUEUE_INSERT_TAIL(&loop->name##_handles, q); \
+ h->name##_cb(h); \
+ } \
+ } \
+ \
+ void uv__##name##_close(uv_##name##_t* handle) { \
+ uv_##name##_stop(handle); \
+ }
+
+UV_LOOP_WATCHER_DEFINE(prepare, PREPARE)
+UV_LOOP_WATCHER_DEFINE(check, CHECK)
+UV_LOOP_WATCHER_DEFINE(idle, IDLE)
diff --git a/Utilities/cmlibuv/src/unix/loop.c b/Utilities/cmlibuv/src/unix/loop.c
new file mode 100644
index 0000000000..a88e71c339
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/loop.c
@@ -0,0 +1,228 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "uv/tree.h"
+#include "internal.h"
+#include "heap-inl.h"
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+int uv_loop_init(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+ void* saved_data;
+ int err;
+
+
+ saved_data = loop->data;
+ memset(loop, 0, sizeof(*loop));
+ loop->data = saved_data;
+
+ lfields = (uv__loop_internal_fields_t*) uv__calloc(1, sizeof(*lfields));
+ if (lfields == NULL)
+ return UV_ENOMEM;
+ loop->internal_fields = lfields;
+
+ err = uv_mutex_init(&lfields->loop_metrics.lock);
+ if (err)
+ goto fail_metrics_mutex_init;
+
+ heap_init((struct heap*) &loop->timer_heap);
+ QUEUE_INIT(&loop->wq);
+ QUEUE_INIT(&loop->idle_handles);
+ QUEUE_INIT(&loop->async_handles);
+ QUEUE_INIT(&loop->check_handles);
+ QUEUE_INIT(&loop->prepare_handles);
+ QUEUE_INIT(&loop->handle_queue);
+
+ loop->active_handles = 0;
+ loop->active_reqs.count = 0;
+ loop->nfds = 0;
+ loop->watchers = NULL;
+ loop->nwatchers = 0;
+ QUEUE_INIT(&loop->pending_queue);
+ QUEUE_INIT(&loop->watcher_queue);
+
+ loop->closing_handles = NULL;
+ uv__update_time(loop);
+ loop->async_io_watcher.fd = -1;
+ loop->async_wfd = -1;
+ loop->signal_pipefd[0] = -1;
+ loop->signal_pipefd[1] = -1;
+ loop->backend_fd = -1;
+ loop->emfile_fd = -1;
+
+ loop->timer_counter = 0;
+ loop->stop_flag = 0;
+
+ err = uv__platform_loop_init(loop);
+ if (err)
+ goto fail_platform_init;
+
+ uv__signal_global_once_init();
+ err = uv_signal_init(loop, &loop->child_watcher);
+ if (err)
+ goto fail_signal_init;
+
+ uv__handle_unref(&loop->child_watcher);
+ loop->child_watcher.flags |= UV_HANDLE_INTERNAL;
+ QUEUE_INIT(&loop->process_handles);
+
+ err = uv_rwlock_init(&loop->cloexec_lock);
+ if (err)
+ goto fail_rwlock_init;
+
+ err = uv_mutex_init(&loop->wq_mutex);
+ if (err)
+ goto fail_mutex_init;
+
+ err = uv_async_init(loop, &loop->wq_async, uv__work_done);
+ if (err)
+ goto fail_async_init;
+
+ uv__handle_unref(&loop->wq_async);
+ loop->wq_async.flags |= UV_HANDLE_INTERNAL;
+
+ return 0;
+
+fail_async_init:
+ uv_mutex_destroy(&loop->wq_mutex);
+
+fail_mutex_init:
+ uv_rwlock_destroy(&loop->cloexec_lock);
+
+fail_rwlock_init:
+ uv__signal_loop_cleanup(loop);
+
+fail_signal_init:
+ uv__platform_loop_delete(loop);
+
+fail_platform_init:
+ uv_mutex_destroy(&lfields->loop_metrics.lock);
+
+fail_metrics_mutex_init:
+ uv__free(lfields);
+ loop->internal_fields = NULL;
+
+ uv__free(loop->watchers);
+ loop->nwatchers = 0;
+ return err;
+}
+
+
+int uv_loop_fork(uv_loop_t* loop) {
+ int err;
+ unsigned int i;
+ uv__io_t* w;
+
+ err = uv__io_fork(loop);
+ if (err)
+ return err;
+
+ err = uv__async_fork(loop);
+ if (err)
+ return err;
+
+ err = uv__signal_loop_fork(loop);
+ if (err)
+ return err;
+
+ /* Rearm all the watchers that aren't re-queued by the above. */
+ for (i = 0; i < loop->nwatchers; i++) {
+ w = loop->watchers[i];
+ if (w == NULL)
+ continue;
+
+ if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue)) {
+ w->events = 0; /* Force re-registration in uv__io_poll. */
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ }
+ }
+
+ return 0;
+}
+
+
+void uv__loop_close(uv_loop_t* loop) {
+ uv__loop_internal_fields_t* lfields;
+
+ uv__signal_loop_cleanup(loop);
+ uv__platform_loop_delete(loop);
+ uv__async_stop(loop);
+
+ if (loop->emfile_fd != -1) {
+ uv__close(loop->emfile_fd);
+ loop->emfile_fd = -1;
+ }
+
+ if (loop->backend_fd != -1) {
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+ }
+
+ uv_mutex_lock(&loop->wq_mutex);
+ assert(QUEUE_EMPTY(&loop->wq) && "thread pool work queue not empty!");
+ assert(!uv__has_active_reqs(loop));
+ uv_mutex_unlock(&loop->wq_mutex);
+ uv_mutex_destroy(&loop->wq_mutex);
+
+ /*
+ * Note that all thread pool stuff is finished at this point and
+ * it is safe to just destroy rw lock
+ */
+ uv_rwlock_destroy(&loop->cloexec_lock);
+
+#if 0
+ assert(QUEUE_EMPTY(&loop->pending_queue));
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ assert(loop->nfds == 0);
+#endif
+
+ uv__free(loop->watchers);
+ loop->watchers = NULL;
+ loop->nwatchers = 0;
+
+ lfields = uv__get_internal_fields(loop);
+ uv_mutex_destroy(&lfields->loop_metrics.lock);
+ uv__free(lfields);
+ loop->internal_fields = NULL;
+}
+
+
+int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap) {
+ uv__loop_internal_fields_t* lfields;
+
+ lfields = uv__get_internal_fields(loop);
+ if (option == UV_METRICS_IDLE_TIME) {
+ lfields->flags |= UV_METRICS_IDLE_TIME;
+ return 0;
+ }
+
+ if (option != UV_LOOP_BLOCK_SIGNAL)
+ return UV_ENOSYS;
+
+ if (va_arg(ap, int) != SIGPROF)
+ return UV_EINVAL;
+
+ loop->flags |= UV_LOOP_BLOCK_SIGPROF;
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/netbsd.c b/Utilities/cmlibuv/src/unix/netbsd.c
new file mode 100644
index 0000000000..c66333f522
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/netbsd.c
@@ -0,0 +1,259 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+
+#include <kvm.h>
+#include <paths.h>
+#include <unistd.h>
+#include <time.h>
+#include <stdlib.h>
+#include <fcntl.h>
+
+#include <sys/resource.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <uvm/uvm_extern.h>
+
+#include <unistd.h>
+#include <time.h>
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) == -1) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ /* Intermediate buffer, retrieving partial path name does not work
+ * As of NetBSD-8(beta), vnode->path translator does not handle files
+ * with longer names than 31 characters.
+ */
+ char int_buf[PATH_MAX];
+ size_t int_size;
+ int mib[4];
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC_ARGS;
+ mib[2] = -1;
+ mib[3] = KERN_PROC_PATHNAME;
+ int_size = ARRAY_SIZE(int_buf);
+
+ if (sysctl(mib, 4, int_buf, &int_size, NULL, 0))
+ return UV__ERR(errno);
+
+ /* Copy string from the intermediate buffer to outer one with appropriate
+ * length.
+ */
+ /* TODO(bnoordhuis) Check uv__strscpy() return value. */
+ uv__strscpy(buffer, int_buf, *size);
+
+ /* Set new size. */
+ *size = strlen(buffer);
+
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct uvmexp info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_UVMEXP};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+#if defined(HW_PHYSMEM64)
+ uint64_t info;
+ int which[] = {CTL_HW, HW_PHYSMEM64};
+#else
+ unsigned int info;
+ int which[] = {CTL_HW, HW_PHYSMEM};
+#endif
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ kvm_t *kd = NULL;
+ struct kinfo_proc2 *kinfo = NULL;
+ pid_t pid;
+ int nprocs;
+ int max_size = sizeof(struct kinfo_proc2);
+ int page_size;
+
+ page_size = getpagesize();
+ pid = getpid();
+
+ kd = kvm_open(NULL, NULL, NULL, KVM_NO_FILES, "kvm_open");
+
+ if (kd == NULL) goto error;
+
+ kinfo = kvm_getproc2(kd, KERN_PROC_PID, pid, max_size, &nprocs);
+ if (kinfo == NULL) goto error;
+
+ *rss = kinfo->p_vm_rssize * page_size;
+
+ kvm_close(kd);
+
+ return 0;
+
+error:
+ if (kd) kvm_close(kd);
+ return UV_EPERM;
+}
+
+
+int uv_uptime(double* uptime) {
+ time_t now;
+ struct timeval info;
+ size_t size = sizeof(info);
+ static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ now = time(NULL);
+
+ *uptime = (double)(now - info.tv_sec);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK);
+ unsigned int multiplier = ((uint64_t)1000L / ticks);
+ unsigned int cur = 0;
+ uv_cpu_info_t* cpu_info;
+ u_int64_t* cp_times;
+ char model[512];
+ u_int64_t cpuspeed;
+ int numcpus;
+ size_t size;
+ int i;
+
+ size = sizeof(model);
+ if (sysctlbyname("machdep.cpu_brand", &model, &size, NULL, 0) &&
+ sysctlbyname("hw.model", &model, &size, NULL, 0)) {
+ return UV__ERR(errno);
+ }
+
+ size = sizeof(numcpus);
+ if (sysctlbyname("hw.ncpu", &numcpus, &size, NULL, 0))
+ return UV__ERR(errno);
+ *count = numcpus;
+
+ /* Only i386 and amd64 have machdep.tsc_freq */
+ size = sizeof(cpuspeed);
+ if (sysctlbyname("machdep.tsc_freq", &cpuspeed, &size, NULL, 0))
+ cpuspeed = 0;
+
+ size = numcpus * CPUSTATES * sizeof(*cp_times);
+ cp_times = uv__malloc(size);
+ if (cp_times == NULL)
+ return UV_ENOMEM;
+
+ if (sysctlbyname("kern.cp_time", cp_times, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos)) {
+ uv__free(cp_times);
+ uv__free(*cpu_infos);
+ return UV_ENOMEM;
+ }
+
+ for (i = 0; i < numcpus; i++) {
+ cpu_info = &(*cpu_infos)[i];
+ cpu_info->cpu_times.user = (uint64_t)(cp_times[CP_USER+cur]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(cp_times[CP_NICE+cur]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(cp_times[CP_SYS+cur]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(cp_times[CP_IDLE+cur]) * multiplier;
+ cpu_info->cpu_times.irq = (uint64_t)(cp_times[CP_INTR+cur]) * multiplier;
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = (int)(cpuspeed/(uint64_t) 1e6);
+ cur += CPUSTATES;
+ }
+ uv__free(cp_times);
+ return 0;
+}
+
+int uv__random_sysctl(void* buf, size_t len) {
+ static int name[] = {CTL_KERN, KERN_ARND};
+ size_t count, req;
+ unsigned char* p;
+
+ p = buf;
+ while (len) {
+ req = len < 32 ? len : 32;
+ count = req;
+
+ if (sysctl(name, ARRAY_SIZE(name), p, &count, NULL, 0) == -1)
+ return UV__ERR(errno);
+
+ if (count != req)
+ return UV_EIO; /* Can't happen. */
+
+ p += count;
+ len -= count;
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/no-fsevents.c b/Utilities/cmlibuv/src/unix/no-fsevents.c
new file mode 100644
index 0000000000..158643af1e
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/no-fsevents.c
@@ -0,0 +1,42 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
+ const char* filename, unsigned int flags) {
+ return UV_ENOSYS;
+}
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ UNREACHABLE();
+}
diff --git a/Utilities/cmlibuv/src/unix/no-proctitle.c b/Utilities/cmlibuv/src/unix/no-proctitle.c
new file mode 100644
index 0000000000..32aa0af1f9
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/no-proctitle.c
@@ -0,0 +1,45 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <stddef.h>
+
+char** uv_setup_args(int argc, char** argv) {
+ return argv;
+}
+
+void uv__process_title_cleanup(void) {
+}
+
+int uv_set_process_title(const char* title) {
+ return 0;
+}
+
+int uv_get_process_title(char* buffer, size_t size) {
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ buffer[0] = '\0';
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/openbsd.c b/Utilities/cmlibuv/src/unix/openbsd.c
new file mode 100644
index 0000000000..f32a94df38
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/openbsd.c
@@ -0,0 +1,240 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/resource.h>
+#include <sys/sched.h>
+#include <sys/time.h>
+#include <sys/sysctl.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <paths.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ return uv__kqueue_init(loop);
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+}
+
+
+void uv_loadavg(double avg[3]) {
+ struct loadavg info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_LOADAVG};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0) < 0) return;
+
+ avg[0] = (double) info.ldavg[0] / info.fscale;
+ avg[1] = (double) info.ldavg[1] / info.fscale;
+ avg[2] = (double) info.ldavg[2] / info.fscale;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ int mib[4];
+ char **argsbuf = NULL;
+ size_t argsbuf_size = 100U;
+ size_t exepath_size;
+ pid_t mypid;
+ int err;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ mypid = getpid();
+ for (;;) {
+ err = UV_ENOMEM;
+ argsbuf = uv__reallocf(argsbuf, argsbuf_size);
+ if (argsbuf == NULL)
+ goto out;
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC_ARGS;
+ mib[2] = mypid;
+ mib[3] = KERN_PROC_ARGV;
+ if (sysctl(mib, ARRAY_SIZE(mib), argsbuf, &argsbuf_size, NULL, 0) == 0) {
+ break;
+ }
+ if (errno != ENOMEM) {
+ err = UV__ERR(errno);
+ goto out;
+ }
+ argsbuf_size *= 2U;
+ }
+
+ if (argsbuf[0] == NULL) {
+ err = UV_EINVAL; /* FIXME(bnoordhuis) More appropriate error. */
+ goto out;
+ }
+
+ *size -= 1;
+ exepath_size = strlen(argsbuf[0]);
+ if (*size > exepath_size)
+ *size = exepath_size;
+
+ memcpy(buffer, argsbuf[0], *size);
+ buffer[*size] = '\0';
+ err = 0;
+
+out:
+ uv__free(argsbuf);
+
+ return err;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ struct uvmexp info;
+ size_t size = sizeof(info);
+ int which[] = {CTL_VM, VM_UVMEXP};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info.free * sysconf(_SC_PAGESIZE);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t info;
+ int which[] = {CTL_HW, HW_PHYSMEM64};
+ size_t size = sizeof(info);
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ return (uint64_t) info;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ struct kinfo_proc kinfo;
+ size_t page_size = getpagesize();
+ size_t size = sizeof(struct kinfo_proc);
+ int mib[6];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PID;
+ mib[3] = getpid();
+ mib[4] = sizeof(struct kinfo_proc);
+ mib[5] = 1;
+
+ if (sysctl(mib, ARRAY_SIZE(mib), &kinfo, &size, NULL, 0) < 0)
+ return UV__ERR(errno);
+
+ *rss = kinfo.p_vm_rssize * page_size;
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ time_t now;
+ struct timeval info;
+ size_t size = sizeof(info);
+ static int which[] = {CTL_KERN, KERN_BOOTTIME};
+
+ if (sysctl(which, ARRAY_SIZE(which), &info, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ now = time(NULL);
+
+ *uptime = (double)(now - info.tv_sec);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ unsigned int ticks = (unsigned int)sysconf(_SC_CLK_TCK),
+ multiplier = ((uint64_t)1000L / ticks), cpuspeed;
+ uint64_t info[CPUSTATES];
+ char model[512];
+ int numcpus = 1;
+ int which[] = {CTL_HW,HW_MODEL};
+ int percpu[] = {CTL_KERN,KERN_CPTIME2,0};
+ size_t size;
+ int i, j;
+ uv_cpu_info_t* cpu_info;
+
+ size = sizeof(model);
+ if (sysctl(which, ARRAY_SIZE(which), &model, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ which[1] = HW_NCPUONLINE;
+ size = sizeof(numcpus);
+ if (sysctl(which, ARRAY_SIZE(which), &numcpus, &size, NULL, 0))
+ return UV__ERR(errno);
+
+ *cpu_infos = uv__malloc(numcpus * sizeof(**cpu_infos));
+ if (!(*cpu_infos))
+ return UV_ENOMEM;
+
+ i = 0;
+ *count = numcpus;
+
+ which[1] = HW_CPUSPEED;
+ size = sizeof(cpuspeed);
+ if (sysctl(which, ARRAY_SIZE(which), &cpuspeed, &size, NULL, 0))
+ goto error;
+
+ size = sizeof(info);
+ for (i = 0; i < numcpus; i++) {
+ percpu[2] = i;
+ if (sysctl(percpu, ARRAY_SIZE(percpu), &info, &size, NULL, 0))
+ goto error;
+
+ cpu_info = &(*cpu_infos)[i];
+
+ cpu_info->cpu_times.user = (uint64_t)(info[CP_USER]) * multiplier;
+ cpu_info->cpu_times.nice = (uint64_t)(info[CP_NICE]) * multiplier;
+ cpu_info->cpu_times.sys = (uint64_t)(info[CP_SYS]) * multiplier;
+ cpu_info->cpu_times.idle = (uint64_t)(info[CP_IDLE]) * multiplier;
+ cpu_info->cpu_times.irq = (uint64_t)(info[CP_INTR]) * multiplier;
+
+ cpu_info->model = uv__strdup(model);
+ cpu_info->speed = cpuspeed;
+ }
+
+ return 0;
+
+error:
+ *count = 0;
+ for (j = 0; j < i; j++)
+ uv__free((*cpu_infos)[j].model);
+
+ uv__free(*cpu_infos);
+ *cpu_infos = NULL;
+ return UV__ERR(errno);
+}
diff --git a/Utilities/cmlibuv/src/unix/os390-proctitle.c b/Utilities/cmlibuv/src/unix/os390-proctitle.c
new file mode 100644
index 0000000000..ccda97c9ac
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390-proctitle.c
@@ -0,0 +1,136 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static char* process_title = NULL;
+static void* args_mem = NULL;
+
+
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ char** new_argv;
+ size_t size;
+ char* s;
+ int i;
+
+ if (argc <= 0)
+ return argv;
+
+ /* Calculate how much memory we need for the argv strings. */
+ size = 0;
+ for (i = 0; i < argc; i++)
+ size += strlen(argv[i]) + 1;
+
+ /* Add space for the argv pointers. */
+ size += (argc + 1) * sizeof(char*);
+
+ new_argv = uv__malloc(size);
+ if (new_argv == NULL)
+ return argv;
+
+ /* Copy over the strings and set up the pointer table. */
+ s = (char*) &new_argv[argc + 1];
+ for (i = 0; i < argc; i++) {
+ size = strlen(argv[i]) + 1;
+ memcpy(s, argv[i], size);
+ new_argv[i] = s;
+ s += size;
+ }
+ new_argv[i] = NULL;
+
+ args_mem = new_argv;
+ process_title = uv__strdup(argv[0]);
+
+ return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL)
+ return UV_ENOBUFS;
+
+ /* We cannot free this pointer when libuv shuts down,
+ * the process may still be using it.
+ */
+ new_title = uv__strdup(title);
+ if (new_title == NULL)
+ return UV_ENOMEM;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title != NULL)
+ uv__free(process_title);
+
+ process_title = new_title;
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL || process_title == NULL)
+ return UV_ENOBUFS;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ len = strlen(process_title);
+
+ if (size <= len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ strcpy(buffer, process_title);
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv__free(args_mem); /* Keep valgrind happy. */
+ args_mem = NULL;
+}
diff --git a/Utilities/cmlibuv/src/unix/os390-syscalls.c b/Utilities/cmlibuv/src/unix/os390-syscalls.c
new file mode 100644
index 0000000000..5861aaaa20
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390-syscalls.c
@@ -0,0 +1,536 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+
+#include "os390-syscalls.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <search.h>
+#include <termios.h>
+#include <sys/msg.h>
+
+static QUEUE global_epoll_queue;
+static uv_mutex_t global_epoll_lock;
+static uv_once_t once = UV_ONCE_INIT;
+
+int scandir(const char* maindir, struct dirent*** namelist,
+ int (*filter)(const struct dirent*),
+ int (*compar)(const struct dirent**,
+ const struct dirent **)) {
+ struct dirent** nl;
+ struct dirent** nl_copy;
+ struct dirent* dirent;
+ unsigned count;
+ size_t allocated;
+ DIR* mdir;
+
+ nl = NULL;
+ count = 0;
+ allocated = 0;
+ mdir = opendir(maindir);
+ if (!mdir)
+ return -1;
+
+ for (;;) {
+ dirent = readdir(mdir);
+ if (!dirent)
+ break;
+ if (!filter || filter(dirent)) {
+ struct dirent* copy;
+ copy = uv__malloc(sizeof(*copy));
+ if (!copy)
+ goto error;
+ memcpy(copy, dirent, sizeof(*copy));
+
+ nl_copy = uv__realloc(nl, sizeof(*copy) * (count + 1));
+ if (nl_copy == NULL) {
+ uv__free(copy);
+ goto error;
+ }
+
+ nl = nl_copy;
+ nl[count++] = copy;
+ }
+ }
+
+ qsort(nl, count, sizeof(struct dirent *),
+ (int (*)(const void *, const void *)) compar);
+
+ closedir(mdir);
+
+ *namelist = nl;
+ return count;
+
+error:
+ while (count > 0) {
+ dirent = nl[--count];
+ uv__free(dirent);
+ }
+ uv__free(nl);
+ closedir(mdir);
+ errno = ENOMEM;
+ return -1;
+}
+
+
+static unsigned int next_power_of_two(unsigned int val) {
+ val -= 1;
+ val |= val >> 1;
+ val |= val >> 2;
+ val |= val >> 4;
+ val |= val >> 8;
+ val |= val >> 16;
+ val += 1;
+ return val;
+}
+
+
+static void maybe_resize(uv__os390_epoll* lst, unsigned int len) {
+ unsigned int newsize;
+ unsigned int i;
+ struct pollfd* newlst;
+ struct pollfd event;
+
+ if (len <= lst->size)
+ return;
+
+ if (lst->size == 0)
+ event.fd = -1;
+ else {
+ /* Extract the message queue at the end. */
+ event = lst->items[lst->size - 1];
+ lst->items[lst->size - 1].fd = -1;
+ }
+
+ newsize = next_power_of_two(len);
+ newlst = uv__reallocf(lst->items, newsize * sizeof(lst->items[0]));
+
+ if (newlst == NULL)
+ abort();
+ for (i = lst->size; i < newsize; ++i)
+ newlst[i].fd = -1;
+
+ /* Restore the message queue at the end */
+ newlst[newsize - 1] = event;
+
+ lst->items = newlst;
+ lst->size = newsize;
+}
+
+
+void uv__os390_cleanup(void) {
+ msgctl(uv_backend_fd(uv_default_loop()), IPC_RMID, NULL);
+}
+
+
+static void init_message_queue(uv__os390_epoll* lst) {
+ struct {
+ long int header;
+ char body;
+ } msg;
+
+ /* initialize message queue */
+ lst->msg_queue = msgget(IPC_PRIVATE, 0600 | IPC_CREAT);
+ if (lst->msg_queue == -1)
+ abort();
+
+ /*
+ On z/OS, the message queue will be affiliated with the process only
+ when a send is performed on it. Once this is done, the system
+ can be queried for all message queues belonging to our process id.
+ */
+ msg.header = 1;
+ if (msgsnd(lst->msg_queue, &msg, sizeof(msg.body), 0) != 0)
+ abort();
+
+ /* Clean up the dummy message sent above */
+ if (msgrcv(lst->msg_queue, &msg, sizeof(msg.body), 0, 0) != sizeof(msg.body))
+ abort();
+}
+
+
+static void before_fork(void) {
+ uv_mutex_lock(&global_epoll_lock);
+}
+
+
+static void after_fork(void) {
+ uv_mutex_unlock(&global_epoll_lock);
+}
+
+
+static void child_fork(void) {
+ QUEUE* q;
+ uv_once_t child_once = UV_ONCE_INIT;
+
+ /* reset once */
+ memcpy(&once, &child_once, sizeof(child_once));
+
+ /* reset epoll list */
+ while (!QUEUE_EMPTY(&global_epoll_queue)) {
+ uv__os390_epoll* lst;
+ q = QUEUE_HEAD(&global_epoll_queue);
+ QUEUE_REMOVE(q);
+ lst = QUEUE_DATA(q, uv__os390_epoll, member);
+ uv__free(lst->items);
+ lst->items = NULL;
+ lst->size = 0;
+ }
+
+ uv_mutex_unlock(&global_epoll_lock);
+ uv_mutex_destroy(&global_epoll_lock);
+}
+
+
+static void epoll_init(void) {
+ QUEUE_INIT(&global_epoll_queue);
+ if (uv_mutex_init(&global_epoll_lock))
+ abort();
+
+ if (pthread_atfork(&before_fork, &after_fork, &child_fork))
+ abort();
+}
+
+
+uv__os390_epoll* epoll_create1(int flags) {
+ uv__os390_epoll* lst;
+
+ lst = uv__malloc(sizeof(*lst));
+ if (lst != NULL) {
+ /* initialize list */
+ lst->size = 0;
+ lst->items = NULL;
+ init_message_queue(lst);
+ maybe_resize(lst, 1);
+ lst->items[lst->size - 1].fd = lst->msg_queue;
+ lst->items[lst->size - 1].events = POLLIN;
+ lst->items[lst->size - 1].revents = 0;
+ uv_once(&once, epoll_init);
+ uv_mutex_lock(&global_epoll_lock);
+ QUEUE_INSERT_TAIL(&global_epoll_queue, &lst->member);
+ uv_mutex_unlock(&global_epoll_lock);
+ }
+
+ return lst;
+}
+
+
+int epoll_ctl(uv__os390_epoll* lst,
+ int op,
+ int fd,
+ struct epoll_event *event) {
+ uv_mutex_lock(&global_epoll_lock);
+
+ if (op == EPOLL_CTL_DEL) {
+ if (fd >= lst->size || lst->items[fd].fd == -1) {
+ uv_mutex_unlock(&global_epoll_lock);
+ errno = ENOENT;
+ return -1;
+ }
+ lst->items[fd].fd = -1;
+ } else if (op == EPOLL_CTL_ADD) {
+
+ /* Resizing to 'fd + 1' would expand the list to contain at least
+ * 'fd'. But we need to guarantee that the last index on the list
+ * is reserved for the message queue. So specify 'fd + 2' instead.
+ */
+ maybe_resize(lst, fd + 2);
+ if (lst->items[fd].fd != -1) {
+ uv_mutex_unlock(&global_epoll_lock);
+ errno = EEXIST;
+ return -1;
+ }
+ lst->items[fd].fd = fd;
+ lst->items[fd].events = event->events;
+ lst->items[fd].revents = 0;
+ } else if (op == EPOLL_CTL_MOD) {
+ if (fd >= lst->size - 1 || lst->items[fd].fd == -1) {
+ uv_mutex_unlock(&global_epoll_lock);
+ errno = ENOENT;
+ return -1;
+ }
+ lst->items[fd].events = event->events;
+ lst->items[fd].revents = 0;
+ } else
+ abort();
+
+ uv_mutex_unlock(&global_epoll_lock);
+ return 0;
+}
+
+#define EP_MAX_PFDS (ULONG_MAX / sizeof(struct pollfd))
+#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
+
+int epoll_wait(uv__os390_epoll* lst, struct epoll_event* events,
+ int maxevents, int timeout) {
+ nmsgsfds_t size;
+ struct pollfd* pfds;
+ int pollret;
+ int pollfdret;
+ int pollmsgret;
+ int reventcount;
+ int nevents;
+ struct pollfd msg_fd;
+ int i;
+
+ if (!lst || !lst->items || !events) {
+ errno = EFAULT;
+ return -1;
+ }
+
+ if (lst->size > EP_MAX_PFDS) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ assert(lst->size > 0);
+ _SET_FDS_MSGS(size, 1, lst->size - 1);
+ pfds = lst->items;
+ pollret = poll(pfds, size, timeout);
+ if (pollret <= 0)
+ return pollret;
+
+ pollfdret = _NFDS(pollret);
+ pollmsgret = _NMSGS(pollret);
+
+ reventcount = 0;
+ nevents = 0;
+ msg_fd = pfds[lst->size - 1]; /* message queue is always last entry */
+ maxevents = maxevents - pollmsgret; /* allow spot for message queue */
+ for (i = 0;
+ i < lst->size - 1 &&
+ nevents < maxevents &&
+ reventcount < pollfdret; ++i) {
+ struct epoll_event ev;
+ struct pollfd* pfd;
+
+ pfd = &pfds[i];
+ if (pfd->fd == -1 || pfd->revents == 0)
+ continue;
+
+ ev.fd = pfd->fd;
+ ev.events = pfd->revents;
+ ev.is_msg = 0;
+
+ reventcount++;
+ events[nevents++] = ev;
+ }
+
+ if (pollmsgret > 0 && msg_fd.revents != 0 && msg_fd.fd != -1) {
+ struct epoll_event ev;
+ ev.fd = msg_fd.fd;
+ ev.events = msg_fd.revents;
+ ev.is_msg = 1;
+ events[nevents++] = ev;
+ }
+
+ return nevents;
+}
+
+
+int epoll_file_close(int fd) {
+ QUEUE* q;
+
+ uv_once(&once, epoll_init);
+ uv_mutex_lock(&global_epoll_lock);
+ QUEUE_FOREACH(q, &global_epoll_queue) {
+ uv__os390_epoll* lst;
+
+ lst = QUEUE_DATA(q, uv__os390_epoll, member);
+ if (fd < lst->size && lst->items != NULL && lst->items[fd].fd != -1)
+ lst->items[fd].fd = -1;
+ }
+
+ uv_mutex_unlock(&global_epoll_lock);
+ return 0;
+}
+
+void epoll_queue_close(uv__os390_epoll* lst) {
+ /* Remove epoll instance from global queue */
+ uv_mutex_lock(&global_epoll_lock);
+ QUEUE_REMOVE(&lst->member);
+ uv_mutex_unlock(&global_epoll_lock);
+
+ /* Free resources */
+ msgctl(lst->msg_queue, IPC_RMID, NULL);
+ lst->msg_queue = -1;
+ uv__free(lst->items);
+ lst->items = NULL;
+}
+
+
+char* mkdtemp(char* path) {
+ static const char* tempchars =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+ static const size_t num_chars = 62;
+ static const size_t num_x = 6;
+ char *ep, *cp;
+ unsigned int tries, i;
+ size_t len;
+ uint64_t v;
+ int fd;
+ int retval;
+ int saved_errno;
+
+ len = strlen(path);
+ ep = path + len;
+ if (len < num_x || strncmp(ep - num_x, "XXXXXX", num_x)) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd == -1)
+ return NULL;
+
+ tries = TMP_MAX;
+ retval = -1;
+ do {
+ if (read(fd, &v, sizeof(v)) != sizeof(v))
+ break;
+
+ cp = ep - num_x;
+ for (i = 0; i < num_x; i++) {
+ *cp++ = tempchars[v % num_chars];
+ v /= num_chars;
+ }
+
+ if (mkdir(path, S_IRWXU) == 0) {
+ retval = 0;
+ break;
+ }
+ else if (errno != EEXIST)
+ break;
+ } while (--tries);
+
+ saved_errno = errno;
+ uv__close(fd);
+ if (tries == 0) {
+ errno = EEXIST;
+ return NULL;
+ }
+
+ if (retval == -1) {
+ errno = saved_errno;
+ return NULL;
+ }
+
+ return path;
+}
+
+
+ssize_t os390_readlink(const char* path, char* buf, size_t len) {
+ ssize_t rlen;
+ ssize_t vlen;
+ ssize_t plen;
+ char* delimiter;
+ char old_delim;
+ char* tmpbuf;
+ char realpathstr[PATH_MAX + 1];
+
+ tmpbuf = uv__malloc(len + 1);
+ if (tmpbuf == NULL) {
+ errno = ENOMEM;
+ return -1;
+ }
+
+ rlen = readlink(path, tmpbuf, len);
+ if (rlen < 0) {
+ uv__free(tmpbuf);
+ return rlen;
+ }
+
+ if (rlen < 3 || strncmp("/$", tmpbuf, 2) != 0) {
+ /* Straightforward readlink. */
+ memcpy(buf, tmpbuf, rlen);
+ uv__free(tmpbuf);
+ return rlen;
+ }
+
+ /*
+ * There is a parmlib variable at the beginning
+ * which needs interpretation.
+ */
+ tmpbuf[rlen] = '\0';
+ delimiter = strchr(tmpbuf + 2, '/');
+ if (delimiter == NULL)
+ /* No slash at the end */
+ delimiter = strchr(tmpbuf + 2, '\0');
+
+ /* Read real path of the variable. */
+ old_delim = *delimiter;
+ *delimiter = '\0';
+ if (realpath(tmpbuf, realpathstr) == NULL) {
+ uv__free(tmpbuf);
+ return -1;
+ }
+
+ /* realpathstr is not guaranteed to end with null byte.*/
+ realpathstr[PATH_MAX] = '\0';
+
+ /* Reset the delimiter and fill up the buffer. */
+ *delimiter = old_delim;
+ plen = strlen(delimiter);
+ vlen = strlen(realpathstr);
+ rlen = plen + vlen;
+ if (rlen > len) {
+ uv__free(tmpbuf);
+ errno = ENAMETOOLONG;
+ return -1;
+ }
+ memcpy(buf, realpathstr, vlen);
+ memcpy(buf + vlen, delimiter, plen);
+
+ /* Done using temporary buffer. */
+ uv__free(tmpbuf);
+
+ return rlen;
+}
+
+
+int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value) {
+ UNREACHABLE();
+}
+
+
+int sem_destroy(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
+
+
+int sem_post(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
+
+
+int sem_trywait(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
+
+
+int sem_wait(UV_PLATFORM_SEM_T* semid) {
+ UNREACHABLE();
+}
diff --git a/Utilities/cmlibuv/src/unix/os390-syscalls.h b/Utilities/cmlibuv/src/unix/os390-syscalls.h
new file mode 100644
index 0000000000..9f504171d8
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390-syscalls.h
@@ -0,0 +1,75 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+
+#ifndef UV_OS390_SYSCALL_H_
+#define UV_OS390_SYSCALL_H_
+
+#include "uv.h"
+#include "internal.h"
+#include <dirent.h>
+#include <poll.h>
+#include <pthread.h>
+#include "zos-base.h"
+
+#define EPOLL_CTL_ADD 1
+#define EPOLL_CTL_DEL 2
+#define EPOLL_CTL_MOD 3
+#define MAX_EPOLL_INSTANCES 256
+#define MAX_ITEMS_PER_EPOLL 1024
+
+#define UV__O_CLOEXEC 0x80000
+
+struct epoll_event {
+ int events;
+ int fd;
+ int is_msg;
+};
+
+typedef struct {
+ QUEUE member;
+ struct pollfd* items;
+ unsigned long size;
+ int msg_queue;
+} uv__os390_epoll;
+
+/* epoll api */
+uv__os390_epoll* epoll_create1(int flags);
+int epoll_ctl(uv__os390_epoll* ep, int op, int fd, struct epoll_event *event);
+int epoll_wait(uv__os390_epoll* ep, struct epoll_event *events, int maxevents, int timeout);
+int epoll_file_close(int fd);
+
+/* utility functions */
+int scandir(const char* maindir, struct dirent*** namelist,
+ int (*filter)(const struct dirent *),
+ int (*compar)(const struct dirent **,
+ const struct dirent **));
+char *mkdtemp(char* path);
+ssize_t os390_readlink(const char* path, char* buf, size_t len);
+size_t strnlen(const char* str, size_t maxlen);
+int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value);
+int sem_destroy(UV_PLATFORM_SEM_T* semid);
+int sem_post(UV_PLATFORM_SEM_T* semid);
+int sem_trywait(UV_PLATFORM_SEM_T* semid);
+int sem_wait(UV_PLATFORM_SEM_T* semid);
+void uv__os390_cleanup(void);
+
+#endif /* UV_OS390_SYSCALL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/os390.c b/Utilities/cmlibuv/src/unix/os390.c
new file mode 100644
index 0000000000..3b16318ce2
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390.c
@@ -0,0 +1,1052 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "internal.h"
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <utmpx.h>
+#include <unistd.h>
+#include <sys/ps.h>
+#include <builtins.h>
+#include <termios.h>
+#include <sys/msg.h>
+#include <sys/resource.h>
+#include "zos-base.h"
+#if defined(__clang__)
+#include "csrsic.h"
+#else
+#include "//'SYS1.SAMPLIB(CSRSIC)'"
+#endif
+
+#define CVT_PTR 0x10
+#define PSA_PTR 0x00
+#define CSD_OFFSET 0x294
+
+/*
+ Long-term average CPU service used by this logical partition,
+ in millions of service units per hour. If this value is above
+ the partition's defined capacity, the partition will be capped.
+ It is calculated using the physical CPU adjustment factor
+ (RCTPCPUA) so it may not match other measures of service which
+ are based on the logical CPU adjustment factor. It is available
+ if the hardware supports LPAR cluster.
+*/
+#define RCTLACS_OFFSET 0xC4
+
+/* 32-bit count of alive CPUs. This includes both CPs and IFAs */
+#define CSD_NUMBER_ONLINE_CPUS 0xD4
+
+/* Address of system resources manager (SRM) control table */
+#define CVTOPCTP_OFFSET 0x25C
+
+/* Address of the RCT table */
+#define RMCTRCT_OFFSET 0xE4
+
+/* Address of the rsm control and enumeration area. */
+#define CVTRCEP_OFFSET 0x490
+
+/* Total number of frames currently on all available frame queues. */
+#define RCEAFC_OFFSET 0x088
+
+/* CPC model length from the CSRSI Service. */
+#define CPCMODEL_LENGTH 16
+
+/* Pointer to the home (current) ASCB. */
+#define PSAAOLD 0x224
+
+/* Pointer to rsm address space block extension. */
+#define ASCBRSME 0x16C
+
+/*
+ NUMBER OF FRAMES CURRENTLY IN USE BY THIS ADDRESS SPACE.
+ It does not include 2G frames.
+*/
+#define RAXFMCT 0x2C
+
+/* Thread Entry constants */
+#define PGTH_CURRENT 1
+#define PGTH_LEN 26
+#define PGTHAPATH 0x20
+#pragma linkage(BPX4GTH, OS)
+#pragma linkage(BPX1GTH, OS)
+
+/* TOD Clock resolution in nanoseconds */
+#define TOD_RES 4.096
+
+typedef unsigned data_area_ptr_assign_type;
+
+typedef union {
+ struct {
+#if defined(_LP64)
+ data_area_ptr_assign_type lower;
+#endif
+ data_area_ptr_assign_type assign;
+ };
+ char* deref;
+} data_area_ptr;
+
+
+void uv_loadavg(double avg[3]) {
+ /* TODO: implement the following */
+ avg[0] = 0;
+ avg[1] = 0;
+ avg[2] = 0;
+}
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ uv__os390_epoll* ep;
+
+ ep = epoll_create1(0);
+ loop->ep = ep;
+ if (ep == NULL)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->ep != NULL) {
+ epoll_queue_close(loop->ep);
+ loop->ep = NULL;
+ }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ unsigned long long timestamp;
+ __stckf(&timestamp);
+ /* Convert to nanoseconds */
+ return timestamp / TOD_RES;
+}
+
+
+static int getexe(char* buf, size_t len) {
+ return uv__strscpy(buf, __getargv()[0], len);
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ * There is no direct way of getting the exe path in zOS - either through /procfs
+ * or through some libc APIs. The below approach is to parse the argv[0]'s pattern
+ * and use it in conjunction with PATH environment variable to craft one.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+ int res;
+ char args[PATH_MAX];
+ int pid;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ res = getexe(args, sizeof(args));
+ if (res < 0)
+ return UV_EINVAL;
+
+ return uv__search_path(args, buffer, size);
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ uint64_t freeram;
+
+ data_area_ptr cvt = {0};
+ data_area_ptr rcep = {0};
+ cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
+ rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
+ freeram = (uint64_t)*((uint32_t*)(rcep.deref + RCEAFC_OFFSET)) * 4096;
+ return freeram;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ /* Use CVTRLSTG to get the size of actual real storage online at IPL in K. */
+ return (uint64_t)((int)((char *__ptr32 *__ptr32 *)0)[4][214]) * 1024;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ struct rlimit rl;
+
+ /* RLIMIT_MEMLIMIT return value is in megabytes rather than bytes. */
+ if (getrlimit(RLIMIT_MEMLIMIT, &rl) == 0)
+ return rl.rlim_cur * 1024 * 1024;
+
+ return 0; /* There is no memory limit set. */
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ char* ascb;
+ char* rax;
+ size_t nframes;
+
+ ascb = *(char* __ptr32 *)(PSA_PTR + PSAAOLD);
+ rax = *(char* __ptr32 *)(ascb + ASCBRSME);
+ nframes = *(unsigned int*)(rax + RAXFMCT);
+
+ *rss = nframes * sysconf(_SC_PAGESIZE);
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ struct utmpx u ;
+ struct utmpx *v;
+ time64_t t;
+
+ u.ut_type = BOOT_TIME;
+ v = getutxid(&u);
+ if (v == NULL)
+ return -1;
+ *uptime = difftime64(time64(&t), v->ut_tv.tv_sec);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ uv_cpu_info_t* cpu_info;
+ int idx;
+ siv1v2 info;
+ data_area_ptr cvt = {0};
+ data_area_ptr csd = {0};
+ data_area_ptr rmctrct = {0};
+ data_area_ptr cvtopctp = {0};
+ int cpu_usage_avg;
+
+ cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
+
+ csd.assign = *((data_area_ptr_assign_type *) (cvt.deref + CSD_OFFSET));
+ cvtopctp.assign = *((data_area_ptr_assign_type *) (cvt.deref + CVTOPCTP_OFFSET));
+ rmctrct.assign = *((data_area_ptr_assign_type *) (cvtopctp.deref + RMCTRCT_OFFSET));
+
+ *count = *((int*) (csd.deref + CSD_NUMBER_ONLINE_CPUS));
+ cpu_usage_avg = *((unsigned short int*) (rmctrct.deref + RCTLACS_OFFSET));
+
+ *cpu_infos = uv__malloc(*count * sizeof(uv_cpu_info_t));
+ if (!*cpu_infos)
+ return UV_ENOMEM;
+
+ cpu_info = *cpu_infos;
+ idx = 0;
+ while (idx < *count) {
+ cpu_info->speed = *(int*)(info.siv1v2si22v1.si22v1cpucapability);
+ cpu_info->model = uv__malloc(CPCMODEL_LENGTH + 1);
+ memset(cpu_info->model, '\0', CPCMODEL_LENGTH + 1);
+ memcpy(cpu_info->model, info.siv1v2si11v1.si11v1cpcmodel, CPCMODEL_LENGTH);
+ cpu_info->cpu_times.user = cpu_usage_avg;
+ /* TODO: implement the following */
+ cpu_info->cpu_times.sys = 0;
+ cpu_info->cpu_times.idle = 0;
+ cpu_info->cpu_times.irq = 0;
+ cpu_info->cpu_times.nice = 0;
+ ++cpu_info;
+ ++idx;
+ }
+
+ return 0;
+}
+
+
+static int uv__interface_addresses_v6(uv_interface_address_t** addresses,
+ int* count) {
+ uv_interface_address_t* address;
+ int sockfd;
+ int maxsize;
+ __net_ifconf6header_t ifc;
+ __net_ifconf6entry_t* ifr;
+ __net_ifconf6entry_t* p;
+ unsigned int i;
+ int count_names;
+ unsigned char netmask[16] = {0};
+
+ *count = 0;
+ /* Assume maximum buffer size allowable */
+ maxsize = 16384;
+
+ if (0 > (sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP)))
+ return UV__ERR(errno);
+
+ ifc.__nif6h_buffer = uv__calloc(1, maxsize);
+
+ if (ifc.__nif6h_buffer == NULL) {
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+
+ ifc.__nif6h_version = 1;
+ ifc.__nif6h_buflen = maxsize;
+
+ if (ioctl(sockfd, SIOCGIFCONF6, &ifc) == -1) {
+ /* This will error on a system that does not support IPv6. However, we want
+ * to treat this as there being 0 interfaces so we can continue to get IPv4
+ * interfaces in uv_interface_addresses(). So return 0 instead of the error.
+ */
+ uv__free(ifc.__nif6h_buffer);
+ uv__close(sockfd);
+ errno = 0;
+ return 0;
+ }
+
+ ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
+ while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
+ p = ifr;
+ ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
+
+ if (!(p->__nif6e_addr.sin6_family == AF_INET6))
+ continue;
+
+ if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
+ continue;
+
+ ++(*count);
+ }
+
+ if ((*count) == 0) {
+ uv__free(ifc.__nif6h_buffer);
+ uv__close(sockfd);
+ return 0;
+ }
+
+ /* Alloc the return interface structs */
+ *addresses = uv__calloc(1, *count * sizeof(uv_interface_address_t));
+ if (!(*addresses)) {
+ uv__free(ifc.__nif6h_buffer);
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+ address = *addresses;
+
+ count_names = 0;
+ ifr = (__net_ifconf6entry_t*)(ifc.__nif6h_buffer);
+ while ((char*)ifr < (char*)ifc.__nif6h_buffer + ifc.__nif6h_buflen) {
+ p = ifr;
+ ifr = (__net_ifconf6entry_t*)((char*)ifr + ifc.__nif6h_entrylen);
+
+ if (!(p->__nif6e_addr.sin6_family == AF_INET6))
+ continue;
+
+ if (!(p->__nif6e_flags & _NIF6E_FLAGS_ON_LINK_ACTIVE))
+ continue;
+
+ /* All conditions above must match count loop */
+
+ i = 0;
+ /* Ignore EBCDIC space (0x40) padding in name */
+ while (i < ARRAY_SIZE(p->__nif6e_name) &&
+ p->__nif6e_name[i] != 0x40 &&
+ p->__nif6e_name[i] != 0)
+ ++i;
+ address->name = uv__malloc(i + 1);
+ if (address->name == NULL) {
+ uv_free_interface_addresses(*addresses, count_names);
+ uv__free(ifc.__nif6h_buffer);
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+ memcpy(address->name, p->__nif6e_name, i);
+ address->name[i] = '\0';
+ __e2a_s(address->name);
+ count_names++;
+
+ address->address.address6 = *((struct sockaddr_in6*) &p->__nif6e_addr);
+
+ for (i = 0; i < (p->__nif6e_prefixlen / 8); i++)
+ netmask[i] = 0xFF;
+
+ if (p->__nif6e_prefixlen % 8)
+ netmask[i] = 0xFF << (8 - (p->__nif6e_prefixlen % 8));
+
+ address->netmask.netmask6.sin6_len = p->__nif6e_prefixlen;
+ memcpy(&(address->netmask.netmask6.sin6_addr), netmask, 16);
+ address->netmask.netmask6.sin6_family = AF_INET6;
+
+ address->is_internal = p->__nif6e_flags & _NIF6E_FLAGS_LOOPBACK ? 1 : 0;
+ address++;
+ }
+
+ uv__free(ifc.__nif6h_buffer);
+ uv__close(sockfd);
+ return 0;
+}
+
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ int sockfd;
+ int maxsize;
+ struct ifconf ifc;
+ struct ifreq flg;
+ struct ifreq* ifr;
+ struct ifreq* p;
+ uv_interface_address_t* addresses_v6;
+ int count_v6;
+ unsigned int i;
+ int rc;
+ int count_names;
+
+ *count = 0;
+ *addresses = NULL;
+
+ /* get the ipv6 addresses first */
+ if ((rc = uv__interface_addresses_v6(&addresses_v6, &count_v6)) != 0)
+ return rc;
+
+ /* now get the ipv4 addresses */
+
+ /* Assume maximum buffer size allowable */
+ maxsize = 16384;
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
+ if (0 > sockfd) {
+ if (count_v6)
+ uv_free_interface_addresses(addresses_v6, count_v6);
+ return UV__ERR(errno);
+ }
+
+ ifc.ifc_req = uv__calloc(1, maxsize);
+
+ if (ifc.ifc_req == NULL) {
+ if (count_v6)
+ uv_free_interface_addresses(addresses_v6, count_v6);
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+
+ ifc.ifc_len = maxsize;
+
+ if (ioctl(sockfd, SIOCGIFCONF, &ifc) == -1) {
+ if (count_v6)
+ uv_free_interface_addresses(addresses_v6, count_v6);
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#define ADDR_SIZE(p) MAX((p).sa_len, sizeof(p))
+
+ /* Count all up and running ipv4/ipv6 addresses */
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+ if (count_v6)
+ uv_free_interface_addresses(addresses_v6, count_v6);
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ (*count)++;
+ }
+
+ if (*count == 0 && count_v6 == 0) {
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return 0;
+ }
+
+ /* Alloc the return interface structs */
+ *addresses = uv__calloc(1, (*count + count_v6) *
+ sizeof(uv_interface_address_t));
+
+ if (!(*addresses)) {
+ if (count_v6)
+ uv_free_interface_addresses(addresses_v6, count_v6);
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+ address = *addresses;
+
+ /* copy over the ipv6 addresses if any are found */
+ if (count_v6) {
+ memcpy(address, addresses_v6, count_v6 * sizeof(uv_interface_address_t));
+ address += count_v6;
+ *count += count_v6;
+ /* free ipv6 addresses, but keep address names */
+ uv__free(addresses_v6);
+ }
+
+ count_names = *count;
+ ifr = ifc.ifc_req;
+ while ((char*)ifr < (char*)ifc.ifc_req + ifc.ifc_len) {
+ p = ifr;
+ ifr = (struct ifreq*)
+ ((char*)ifr + sizeof(ifr->ifr_name) + ADDR_SIZE(ifr->ifr_addr));
+
+ if (!(p->ifr_addr.sa_family == AF_INET6 ||
+ p->ifr_addr.sa_family == AF_INET))
+ continue;
+
+ memcpy(flg.ifr_name, p->ifr_name, sizeof(flg.ifr_name));
+ if (ioctl(sockfd, SIOCGIFFLAGS, &flg) == -1) {
+ uv_free_interface_addresses(*addresses, count_names);
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return UV_ENOSYS;
+ }
+
+ if (!(flg.ifr_flags & IFF_UP && flg.ifr_flags & IFF_RUNNING))
+ continue;
+
+ /* All conditions above must match count loop */
+
+ i = 0;
+ /* Ignore EBCDIC space (0x40) padding in name */
+ while (i < ARRAY_SIZE(p->ifr_name) &&
+ p->ifr_name[i] != 0x40 &&
+ p->ifr_name[i] != 0)
+ ++i;
+ address->name = uv__malloc(i + 1);
+ if (address->name == NULL) {
+ uv_free_interface_addresses(*addresses, count_names);
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return UV_ENOMEM;
+ }
+ memcpy(address->name, p->ifr_name, i);
+ address->name[i] = '\0';
+ __e2a_s(address->name);
+ count_names++;
+
+ address->address.address4 = *((struct sockaddr_in*) &p->ifr_addr);
+
+ if (ioctl(sockfd, SIOCGIFNETMASK, p) == -1) {
+ uv_free_interface_addresses(*addresses, count_names);
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+ address->netmask.netmask4 = *((struct sockaddr_in*) &p->ifr_addr);
+ address->netmask.netmask4.sin_family = AF_INET;
+ address->is_internal = flg.ifr_flags & IFF_LOOPBACK ? 1 : 0;
+ address++;
+ }
+
+#undef ADDR_SIZE
+#undef MAX
+
+ uv__free(ifc.ifc_req);
+ uv__close(sockfd);
+ return 0;
+}
+
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+ for (i = 0; i < count; ++i)
+ uv__free(addresses[i].name);
+ uv__free(addresses);
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event* events;
+ struct epoll_event dummy;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct epoll_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].fd == fd)
+ events[i].fd = -1;
+
+ /* Remove the file descriptor from the epoll. */
+ if (loop->ep != NULL)
+ epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, &dummy);
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct pollfd p[1];
+ int rv;
+
+ p[0].fd = fd;
+ p[0].events = POLLIN;
+
+ do
+ rv = poll(p, 1, 0);
+ while (rv == -1 && errno == EINTR);
+
+ if (rv == -1)
+ abort();
+
+ if (p[0].revents & POLLNVAL)
+ return -1;
+
+ return 0;
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+static int os390_regfileint(uv_fs_event_t* handle, char* path) {
+ uv__os390_epoll* ep;
+ _RFIS reg_struct;
+ int rc;
+
+ ep = handle->loop->ep;
+ assert(ep->msg_queue != -1);
+
+ reg_struct.__rfis_cmd = _RFIS_REG;
+ reg_struct.__rfis_qid = ep->msg_queue;
+ reg_struct.__rfis_type = 1;
+ memcpy(reg_struct.__rfis_utok, &handle, sizeof(handle));
+
+ rc = __w_pioctl(path, _IOCC_REGFILEINT, sizeof(reg_struct), &reg_struct);
+ if (rc != 0)
+ return UV__ERR(errno);
+
+ memcpy(handle->rfis_rftok, reg_struct.__rfis_rftok,
+ sizeof(handle->rfis_rftok));
+
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle, uv_fs_event_cb cb,
+ const char* filename, unsigned int flags) {
+ char* path;
+ int rc;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ path = uv__strdup(filename);
+ if (path == NULL)
+ return UV_ENOMEM;
+
+ rc = os390_regfileint(handle, path);
+ if (rc != 0) {
+ uv__free(path);
+ return rc;
+ }
+
+ uv__handle_start(handle);
+ handle->path = path;
+ handle->cb = cb;
+
+ return 0;
+}
+
+
+int uv__fs_event_stop(uv_fs_event_t* handle) {
+ uv__os390_epoll* ep;
+ _RFIS reg_struct;
+ int rc;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ ep = handle->loop->ep;
+ assert(ep->msg_queue != -1);
+
+ reg_struct.__rfis_cmd = _RFIS_UNREG;
+ reg_struct.__rfis_qid = ep->msg_queue;
+ reg_struct.__rfis_type = 1;
+ memcpy(reg_struct.__rfis_rftok, handle->rfis_rftok,
+ sizeof(handle->rfis_rftok));
+
+ /*
+ * This call will take "/" as the path argument in case we
+ * don't care to supply the correct path. The system will simply
+ * ignore it.
+ */
+ rc = __w_pioctl("/", _IOCC_REGFILEINT, sizeof(reg_struct), &reg_struct);
+ if (rc != 0 && errno != EALREADY && errno != ENOENT)
+ abort();
+
+ if (handle->path != NULL) {
+ uv__free(handle->path);
+ handle->path = NULL;
+ }
+
+ if (rc != 0 && errno == EALREADY)
+ return -1;
+
+ uv__handle_stop(handle);
+
+ return 0;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ uv__fs_event_stop(handle);
+ return 0;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ /*
+ * If we were unable to unregister file interest here, then it is most likely
+ * that there is a pending queued change notification. When this happens, we
+ * don't want to complete the close as it will free the underlying memory for
+ * the handle, causing a use-after-free problem when the event is processed.
+ * We defer the final cleanup until after the event is consumed in
+ * os390_message_queue_handler().
+ */
+ if (uv__fs_event_stop(handle) == 0)
+ uv__make_close_pending((uv_handle_t*) handle);
+}
+
+
+static int os390_message_queue_handler(uv__os390_epoll* ep) {
+ uv_fs_event_t* handle;
+ int msglen;
+ int events;
+ _RFIM msg;
+
+ if (ep->msg_queue == -1)
+ return 0;
+
+ msglen = msgrcv(ep->msg_queue, &msg, sizeof(msg), 0, IPC_NOWAIT);
+
+ if (msglen == -1 && errno == ENOMSG)
+ return 0;
+
+ if (msglen == -1)
+ abort();
+
+ events = 0;
+ if (msg.__rfim_event == _RFIM_ATTR || msg.__rfim_event == _RFIM_WRITE)
+ events = UV_CHANGE;
+ else if (msg.__rfim_event == _RFIM_RENAME || msg.__rfim_event == _RFIM_UNLINK)
+ events = UV_RENAME;
+ else if (msg.__rfim_event == 156)
+ /* TODO(gabylb): zos - this event should not happen, need to investigate.
+ *
+ * This event seems to occur when the watched file is [re]moved, or an
+ * editor (like vim) renames then creates the file on save (for vim, that's
+ * when backupcopy=no|auto).
+ */
+ events = UV_RENAME;
+ else
+ /* Some event that we are not interested in. */
+ return 0;
+
+ /* `__rfim_utok` is treated as text when it should be treated as binary while
+ * running in ASCII mode, resulting in an unwanted autoconversion.
+ */
+ __a2e_l(msg.__rfim_utok, sizeof(msg.__rfim_utok));
+ handle = *(uv_fs_event_t**)(msg.__rfim_utok);
+ assert(handle != NULL);
+
+ assert((handle->flags & UV_HANDLE_CLOSED) == 0);
+ if (uv__is_closing(handle)) {
+ uv__handle_stop(handle);
+ uv__make_close_pending((uv_handle_t*) handle);
+ return 0;
+ } else if (handle->path == NULL) {
+ /* _RFIS_UNREG returned EALREADY. */
+ uv__handle_stop(handle);
+ return 0;
+ }
+
+ /* The file is implicitly unregistered when the change notification is
+ * sent, only one notification is sent per registration. So we need to
+ * re-register interest in a file after each change notification we
+ * receive.
+ */
+ assert(handle->path != NULL);
+ os390_regfileint(handle, handle->path);
+ handle->cb(handle, uv__basename_r(handle->path), events, 0);
+ return 1;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ static const int max_safe_timeout = 1789569;
+ struct epoll_event events[1024];
+ struct epoll_event* pe;
+ struct epoll_event e;
+ uv__os390_epoll* ep;
+ int have_signals;
+ int real_timeout;
+ QUEUE* q;
+ uv__io_t* w;
+ uint64_t base;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ uv_stream_t* stream;
+
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+
+ stream= container_of(w, uv_stream_t, io_watcher);
+
+ assert(w->fd < (int) loop->nwatchers);
+
+ e.events = w->pevents;
+ e.fd = w->fd;
+
+ if (w->events == 0)
+ op = EPOLL_CTL_ADD;
+ else
+ op = EPOLL_CTL_MOD;
+
+ /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
+ * events, skip the syscall and squelch the events after epoll_wait().
+ */
+ if (epoll_ctl(loop->ep, op, w->fd, &e)) {
+ if (errno != EEXIST)
+ abort();
+
+ assert(op == EPOLL_CTL_ADD);
+
+ /* We've reactivated a file descriptor that's been watched before. */
+ if (epoll_ctl(loop->ep, EPOLL_CTL_MOD, w->fd, &e))
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+ real_timeout = timeout;
+ int nevents = 0;
+ have_signals = 0;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ nfds = 0;
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
+ timeout = max_safe_timeout;
+
+ nfds = epoll_wait(loop->ep, events,
+ ARRAY_SIZE(events), timeout);
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ base = loop->time;
+ SAVE_ERRNO(uv__update_time(loop));
+ if (nfds == 0) {
+ assert(timeout != -1);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* We may have been inside the system call for longer than |timeout|
+ * milliseconds so we need to update the timestamp to avoid drift.
+ */
+ goto update_timeout;
+ }
+
+ if (nfds == -1) {
+
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ ep = loop->ep;
+ if (pe->is_msg) {
+ os390_message_queue_handler(ep);
+ nevents++;
+ continue;
+ }
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ epoll_ctl(loop->ep, EPOLL_CTL_DEL, fd, pe);
+ continue;
+ }
+
+ /* Give users only events they're interested in. Prevents spurious
+ * callbacks when previous callback invocation in this loop has stopped
+ * the current watcher. Also, filters out events that users has not
+ * requested us to watch.
+ */
+ pe->events &= w->pevents | POLLERR | POLLHUP;
+
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |= w->pevents & (POLLIN | POLLOUT);
+
+ if (pe->events != 0) {
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->events);
+ }
+ nevents++;
+ }
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ real_timeout -= (loop->time - base);
+ if (real_timeout <= 0)
+ return;
+
+ timeout = real_timeout;
+ }
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+ /*
+ Nullify the msg queue but don't close it because
+ it is still being used by the parent.
+ */
+ loop->ep = NULL;
+
+ return uv__platform_loop_init(loop);
+}
diff --git a/Utilities/cmlibuv/src/unix/pipe.c b/Utilities/cmlibuv/src/unix/pipe.c
new file mode 100644
index 0000000000..e9b88c1f1f
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/pipe.c
@@ -0,0 +1,435 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+
+int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
+ uv__stream_init(loop, (uv_stream_t*)handle, UV_NAMED_PIPE);
+ handle->shutdown_req = NULL;
+ handle->connect_req = NULL;
+ handle->pipe_fname = NULL;
+ handle->ipc = ipc;
+ return 0;
+}
+
+
+int uv_pipe_bind(uv_pipe_t* handle, const char* name) {
+ struct sockaddr_un saddr;
+ const char* pipe_fname = NULL;
+ int sockfd = -1;
+ int err;
+
+ /* Already bound? */
+ if (uv__stream_fd(handle) >= 0)
+ return UV_EINVAL;
+ if (uv__is_closing(handle)) {
+ return UV_EINVAL;
+ }
+ /* Make a copy of the file name, it outlives this function's scope. */
+ pipe_fname = uv__strdup(name);
+ if (pipe_fname == NULL)
+ return UV_ENOMEM;
+
+ /* We've got a copy, don't touch the original any more. */
+ name = NULL;
+
+ err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
+ if (err < 0)
+ goto err_socket;
+ sockfd = err;
+
+ memset(&saddr, 0, sizeof saddr);
+ uv__strscpy(saddr.sun_path, pipe_fname, sizeof(saddr.sun_path));
+ saddr.sun_family = AF_UNIX;
+
+ if (bind(sockfd, (struct sockaddr*)&saddr, sizeof saddr)) {
+ err = UV__ERR(errno);
+ /* Convert ENOENT to EACCES for compatibility with Windows. */
+ if (err == UV_ENOENT)
+ err = UV_EACCES;
+
+ uv__close(sockfd);
+ goto err_socket;
+ }
+
+ /* Success. */
+ handle->flags |= UV_HANDLE_BOUND;
+ handle->pipe_fname = pipe_fname; /* Is a strdup'ed copy. */
+ handle->io_watcher.fd = sockfd;
+ return 0;
+
+err_socket:
+ uv__free((void*)pipe_fname);
+ return err;
+}
+
+
+int uv__pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb) {
+ if (uv__stream_fd(handle) == -1)
+ return UV_EINVAL;
+
+ if (handle->ipc)
+ return UV_EINVAL;
+
+#if defined(__MVS__) || defined(__PASE__)
+ /* On zOS, backlog=0 has undefined behaviour */
+ /* On IBMi PASE, backlog=0 leads to "Connection refused" error */
+ if (backlog == 0)
+ backlog = 1;
+ else if (backlog < 0)
+ backlog = SOMAXCONN;
+#endif
+
+ if (listen(uv__stream_fd(handle), backlog))
+ return UV__ERR(errno);
+
+ handle->connection_cb = cb;
+ handle->io_watcher.cb = uv__server_io;
+ uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
+ return 0;
+}
+
+
+void uv__pipe_close(uv_pipe_t* handle) {
+ if (handle->pipe_fname) {
+ /*
+ * Unlink the file system entity before closing the file descriptor.
+ * Doing it the other way around introduces a race where our process
+ * unlinks a socket with the same name that's just been created by
+ * another thread or process.
+ */
+ unlink(handle->pipe_fname);
+ uv__free((void*)handle->pipe_fname);
+ handle->pipe_fname = NULL;
+ }
+
+ uv__stream_close((uv_stream_t*)handle);
+}
+
+
+int uv_pipe_open(uv_pipe_t* handle, uv_file fd) {
+ int flags;
+ int mode;
+ int err;
+ flags = 0;
+
+ if (uv__fd_exists(handle->loop, fd))
+ return UV_EEXIST;
+
+ do
+ mode = fcntl(fd, F_GETFL);
+ while (mode == -1 && errno == EINTR);
+
+ if (mode == -1)
+ return UV__ERR(errno); /* according to docs, must be EBADF */
+
+ err = uv__nonblock(fd, 1);
+ if (err)
+ return err;
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ err = uv__stream_try_select((uv_stream_t*) handle, &fd);
+ if (err)
+ return err;
+#endif /* defined(__APPLE__) */
+
+ mode &= O_ACCMODE;
+ if (mode != O_WRONLY)
+ flags |= UV_HANDLE_READABLE;
+ if (mode != O_RDONLY)
+ flags |= UV_HANDLE_WRITABLE;
+
+ return uv__stream_open((uv_stream_t*)handle, fd, flags);
+}
+
+
+void uv_pipe_connect(uv_connect_t* req,
+ uv_pipe_t* handle,
+ const char* name,
+ uv_connect_cb cb) {
+ struct sockaddr_un saddr;
+ int new_sock;
+ int err;
+ int r;
+
+ new_sock = (uv__stream_fd(handle) == -1);
+
+ if (new_sock) {
+ err = uv__socket(AF_UNIX, SOCK_STREAM, 0);
+ if (err < 0)
+ goto out;
+ handle->io_watcher.fd = err;
+ }
+
+ memset(&saddr, 0, sizeof saddr);
+ uv__strscpy(saddr.sun_path, name, sizeof(saddr.sun_path));
+ saddr.sun_family = AF_UNIX;
+
+ do {
+ r = connect(uv__stream_fd(handle),
+ (struct sockaddr*)&saddr, sizeof saddr);
+ }
+ while (r == -1 && errno == EINTR);
+
+ if (r == -1 && errno != EINPROGRESS) {
+ err = UV__ERR(errno);
+#if defined(__CYGWIN__) || defined(__MSYS__)
+ /* EBADF is supposed to mean that the socket fd is bad, but
+ Cygwin reports EBADF instead of ENOTSOCK when the file is
+ not a socket. We do not expect to see a bad fd here
+ (e.g. due to new_sock), so translate the error. */
+ if (err == UV_EBADF)
+ err = UV_ENOTSOCK;
+#endif
+ goto out;
+ }
+
+ err = 0;
+ if (new_sock) {
+ err = uv__stream_open((uv_stream_t*)handle,
+ uv__stream_fd(handle),
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ }
+
+ if (err == 0)
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+
+out:
+ handle->delayed_error = err;
+ handle->connect_req = req;
+
+ uv__req_init(handle->loop, req, UV_CONNECT);
+ req->handle = (uv_stream_t*)handle;
+ req->cb = cb;
+ QUEUE_INIT(&req->queue);
+
+ /* Force callback to run on next tick in case of error. */
+ if (err)
+ uv__io_feed(handle->loop, &handle->io_watcher);
+
+}
+
+
+static int uv__pipe_getsockpeername(const uv_pipe_t* handle,
+ uv__peersockfunc func,
+ char* buffer,
+ size_t* size) {
+ struct sockaddr_un sa;
+ socklen_t addrlen;
+ int err;
+
+ addrlen = sizeof(sa);
+ memset(&sa, 0, addrlen);
+ err = uv__getsockpeername((const uv_handle_t*) handle,
+ func,
+ (struct sockaddr*) &sa,
+ (int*) &addrlen);
+ if (err < 0) {
+ *size = 0;
+ return err;
+ }
+
+#if defined(__linux__)
+ if (sa.sun_path[0] == 0)
+ /* Linux abstract namespace */
+ addrlen -= offsetof(struct sockaddr_un, sun_path);
+ else
+#endif
+ addrlen = strlen(sa.sun_path);
+
+
+ if ((size_t)addrlen >= *size) {
+ *size = addrlen + 1;
+ return UV_ENOBUFS;
+ }
+
+ memcpy(buffer, sa.sun_path, addrlen);
+ *size = addrlen;
+
+ /* only null-terminate if it's not an abstract socket */
+ if (buffer[0] != '\0')
+ buffer[addrlen] = '\0';
+
+ return 0;
+}
+
+
+int uv_pipe_getsockname(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ return uv__pipe_getsockpeername(handle, getsockname, buffer, size);
+}
+
+
+int uv_pipe_getpeername(const uv_pipe_t* handle, char* buffer, size_t* size) {
+ return uv__pipe_getsockpeername(handle, getpeername, buffer, size);
+}
+
+
+void uv_pipe_pending_instances(uv_pipe_t* handle, int count) {
+}
+
+
+int uv_pipe_pending_count(uv_pipe_t* handle) {
+ uv__stream_queued_fds_t* queued_fds;
+
+ if (!handle->ipc)
+ return 0;
+
+ if (handle->accepted_fd == -1)
+ return 0;
+
+ if (handle->queued_fds == NULL)
+ return 1;
+
+ queued_fds = handle->queued_fds;
+ return queued_fds->offset + 1;
+}
+
+
+uv_handle_type uv_pipe_pending_type(uv_pipe_t* handle) {
+ if (!handle->ipc)
+ return UV_UNKNOWN_HANDLE;
+
+ if (handle->accepted_fd == -1)
+ return UV_UNKNOWN_HANDLE;
+ else
+ return uv_guess_handle(handle->accepted_fd);
+}
+
+
+int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
+ unsigned desired_mode;
+ struct stat pipe_stat;
+ char* name_buffer;
+ size_t name_len;
+ int r;
+
+ if (handle == NULL || uv__stream_fd(handle) == -1)
+ return UV_EBADF;
+
+ if (mode != UV_READABLE &&
+ mode != UV_WRITABLE &&
+ mode != (UV_WRITABLE | UV_READABLE))
+ return UV_EINVAL;
+
+ /* Unfortunately fchmod does not work on all platforms, we will use chmod. */
+ name_len = 0;
+ r = uv_pipe_getsockname(handle, NULL, &name_len);
+ if (r != UV_ENOBUFS)
+ return r;
+
+ name_buffer = uv__malloc(name_len);
+ if (name_buffer == NULL)
+ return UV_ENOMEM;
+
+ r = uv_pipe_getsockname(handle, name_buffer, &name_len);
+ if (r != 0) {
+ uv__free(name_buffer);
+ return r;
+ }
+
+ /* stat must be used as fstat has a bug on Darwin */
+ if (stat(name_buffer, &pipe_stat) == -1) {
+ uv__free(name_buffer);
+ return -errno;
+ }
+
+ desired_mode = 0;
+ if (mode & UV_READABLE)
+ desired_mode |= S_IRUSR | S_IRGRP | S_IROTH;
+ if (mode & UV_WRITABLE)
+ desired_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
+
+ /* Exit early if pipe already has desired mode. */
+ if ((pipe_stat.st_mode & desired_mode) == desired_mode) {
+ uv__free(name_buffer);
+ return 0;
+ }
+
+ pipe_stat.st_mode |= desired_mode;
+
+ r = chmod(name_buffer, pipe_stat.st_mode);
+ uv__free(name_buffer);
+
+ return r != -1 ? 0 : UV__ERR(errno);
+}
+
+
+int uv_pipe(uv_os_fd_t fds[2], int read_flags, int write_flags) {
+ uv_os_fd_t temp[2];
+ int err;
+#if defined(__FreeBSD__) || defined(__linux__)
+ int flags = O_CLOEXEC;
+
+ if ((read_flags & UV_NONBLOCK_PIPE) && (write_flags & UV_NONBLOCK_PIPE))
+ flags |= UV_FS_O_NONBLOCK;
+
+ if (pipe2(temp, flags))
+ return UV__ERR(errno);
+
+ if (flags & UV_FS_O_NONBLOCK) {
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+ }
+#else
+ if (pipe(temp))
+ return UV__ERR(errno);
+
+ if ((err = uv__cloexec(temp[0], 1)))
+ goto fail;
+
+ if ((err = uv__cloexec(temp[1], 1)))
+ goto fail;
+#endif
+
+ if (read_flags & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[0], 1)))
+ goto fail;
+
+ if (write_flags & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[1], 1)))
+ goto fail;
+
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+
+fail:
+ uv__close(temp[0]);
+ uv__close(temp[1]);
+ return err;
+}
+
+
+int uv__make_pipe(int fds[2], int flags) {
+ return uv_pipe(fds,
+ flags & UV_NONBLOCK_PIPE,
+ flags & UV_NONBLOCK_PIPE);
+}
diff --git a/Utilities/cmlibuv/src/unix/poll.c b/Utilities/cmlibuv/src/unix/poll.c
new file mode 100644
index 0000000000..7a12e2d148
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/poll.c
@@ -0,0 +1,160 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+
+static void uv__poll_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ uv_poll_t* handle;
+ int pevents;
+
+ handle = container_of(w, uv_poll_t, io_watcher);
+
+ /*
+ * As documented in the kernel source fs/kernfs/file.c #780
+ * poll will return POLLERR|POLLPRI in case of sysfs
+ * polling. This does not happen in case of out-of-band
+ * TCP messages.
+ *
+ * The above is the case on (at least) FreeBSD and Linux.
+ *
+ * So to properly determine a POLLPRI or a POLLERR we need
+ * to check for both.
+ */
+ if ((events & POLLERR) && !(events & UV__POLLPRI)) {
+ uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+ uv__handle_stop(handle);
+ handle->poll_cb(handle, UV_EBADF, 0);
+ return;
+ }
+
+ pevents = 0;
+ if (events & POLLIN)
+ pevents |= UV_READABLE;
+ if (events & UV__POLLPRI)
+ pevents |= UV_PRIORITIZED;
+ if (events & POLLOUT)
+ pevents |= UV_WRITABLE;
+ if (events & UV__POLLRDHUP)
+ pevents |= UV_DISCONNECT;
+
+ handle->poll_cb(handle, 0, pevents);
+}
+
+
+int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
+ int err;
+
+ if (uv__fd_exists(loop, fd))
+ return UV_EEXIST;
+
+ err = uv__io_check_fd(loop, fd);
+ if (err)
+ return err;
+
+ /* If ioctl(FIONBIO) reports ENOTTY, try fcntl(F_GETFL) + fcntl(F_SETFL).
+ * Workaround for e.g. kqueue fds not supporting ioctls.
+ */
+ err = uv__nonblock(fd, 1);
+#if UV__NONBLOCK_IS_IOCTL
+ if (err == UV_ENOTTY)
+ err = uv__nonblock_fcntl(fd, 1);
+#endif
+
+ if (err)
+ return err;
+
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_POLL);
+ uv__io_init(&handle->io_watcher, uv__poll_io, fd);
+ handle->poll_cb = NULL;
+ return 0;
+}
+
+
+int uv_poll_init_socket(uv_loop_t* loop, uv_poll_t* handle,
+ uv_os_sock_t socket) {
+ return uv_poll_init(loop, handle, socket);
+}
+
+
+static void uv__poll_stop(uv_poll_t* handle) {
+ uv__io_stop(handle->loop,
+ &handle->io_watcher,
+ POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+ uv__handle_stop(handle);
+ uv__platform_invalidate_fd(handle->loop, handle->io_watcher.fd);
+}
+
+
+int uv_poll_stop(uv_poll_t* handle) {
+ assert(!uv__is_closing(handle));
+ uv__poll_stop(handle);
+ return 0;
+}
+
+
+int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
+ uv__io_t** watchers;
+ uv__io_t* w;
+ int events;
+
+ assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
+ UV_PRIORITIZED)) == 0);
+ assert(!uv__is_closing(handle));
+
+ watchers = handle->loop->watchers;
+ w = &handle->io_watcher;
+
+ if (uv__fd_exists(handle->loop, w->fd))
+ if (watchers[w->fd] != w)
+ return UV_EEXIST;
+
+ uv__poll_stop(handle);
+
+ if (pevents == 0)
+ return 0;
+
+ events = 0;
+ if (pevents & UV_READABLE)
+ events |= POLLIN;
+ if (pevents & UV_PRIORITIZED)
+ events |= UV__POLLPRI;
+ if (pevents & UV_WRITABLE)
+ events |= POLLOUT;
+ if (pevents & UV_DISCONNECT)
+ events |= UV__POLLRDHUP;
+
+ uv__io_start(handle->loop, &handle->io_watcher, events);
+ uv__handle_start(handle);
+ handle->poll_cb = poll_cb;
+
+ return 0;
+}
+
+
+void uv__poll_close(uv_poll_t* handle) {
+ uv__poll_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/posix-hrtime.c b/Utilities/cmlibuv/src/unix/posix-hrtime.c
new file mode 100644
index 0000000000..870b45c76c
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/posix-hrtime.c
@@ -0,0 +1,74 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#if defined(__APPLE__)
+/* Special case for CMake bootstrap: no clock_gettime on macOS < 10.12 */
+
+#ifndef CMAKE_BOOTSTRAP
+#error "This code path meant only for use during CMake bootstrap."
+#endif
+
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ static mach_timebase_info_data_t info;
+
+ if ((ACCESS_ONCE(uint32_t, info.numer) == 0 ||
+ ACCESS_ONCE(uint32_t, info.denom) == 0) &&
+ mach_timebase_info(&info) != KERN_SUCCESS)
+ abort();
+
+ return mach_absolute_time() * info.numer / info.denom;
+}
+
+#elif defined(__hpux)
+/* Special case for CMake bootstrap: no CLOCK_MONOTONIC on HP-UX */
+
+#ifndef CMAKE_BOOTSTRAP
+#error "This code path meant only for use during CMake bootstrap."
+#endif
+
+#include <stdint.h>
+#include <time.h>
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ return (uint64_t) gethrtime();
+}
+
+#else
+
+#include <stdint.h>
+#include <time.h>
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return (((uint64_t) ts.tv_sec) * NANOSEC + ts.tv_nsec);
+}
+
+#endif
diff --git a/Utilities/cmlibuv/src/unix/posix-poll.c b/Utilities/cmlibuv/src/unix/posix-poll.c
new file mode 100644
index 0000000000..0f4bf93874
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/posix-poll.c
@@ -0,0 +1,374 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+/* POSIX defines poll() as a portable way to wait on file descriptors.
+ * Here we maintain a dynamically sized array of file descriptors and
+ * events to pass as the first argument to poll().
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ loop->poll_fds = NULL;
+ loop->poll_fds_used = 0;
+ loop->poll_fds_size = 0;
+ loop->poll_fds_iterating = 0;
+ return 0;
+}
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ uv__free(loop->poll_fds);
+ loop->poll_fds = NULL;
+}
+
+int uv__io_fork(uv_loop_t* loop) {
+ uv__platform_loop_delete(loop);
+ return uv__platform_loop_init(loop);
+}
+
+/* Allocate or dynamically resize our poll fds array. */
+static void uv__pollfds_maybe_resize(uv_loop_t* loop) {
+ size_t i;
+ size_t n;
+ struct pollfd* p;
+
+ if (loop->poll_fds_used < loop->poll_fds_size)
+ return;
+
+ n = loop->poll_fds_size ? loop->poll_fds_size * 2 : 64;
+ p = uv__reallocf(loop->poll_fds, n * sizeof(*loop->poll_fds));
+ if (p == NULL)
+ abort();
+
+ loop->poll_fds = p;
+ for (i = loop->poll_fds_size; i < n; i++) {
+ loop->poll_fds[i].fd = -1;
+ loop->poll_fds[i].events = 0;
+ loop->poll_fds[i].revents = 0;
+ }
+ loop->poll_fds_size = n;
+}
+
+/* Primitive swap operation on poll fds array elements. */
+static void uv__pollfds_swap(uv_loop_t* loop, size_t l, size_t r) {
+ struct pollfd pfd;
+ pfd = loop->poll_fds[l];
+ loop->poll_fds[l] = loop->poll_fds[r];
+ loop->poll_fds[r] = pfd;
+}
+
+/* Add a watcher's fd to our poll fds array with its pending events. */
+static void uv__pollfds_add(uv_loop_t* loop, uv__io_t* w) {
+ size_t i;
+ struct pollfd* pe;
+
+ /* If the fd is already in the set just update its events. */
+ assert(!loop->poll_fds_iterating);
+ for (i = 0; i < loop->poll_fds_used; ++i) {
+ if (loop->poll_fds[i].fd == w->fd) {
+ loop->poll_fds[i].events = w->pevents;
+ return;
+ }
+ }
+
+ /* Otherwise, allocate a new slot in the set for the fd. */
+ uv__pollfds_maybe_resize(loop);
+ pe = &loop->poll_fds[loop->poll_fds_used++];
+ pe->fd = w->fd;
+ pe->events = w->pevents;
+}
+
+/* Remove a watcher's fd from our poll fds array. */
+static void uv__pollfds_del(uv_loop_t* loop, int fd) {
+ size_t i;
+ assert(!loop->poll_fds_iterating);
+ for (i = 0; i < loop->poll_fds_used;) {
+ if (loop->poll_fds[i].fd == fd) {
+ /* swap to last position and remove */
+ --loop->poll_fds_used;
+ uv__pollfds_swap(loop, i, loop->poll_fds_used);
+ loop->poll_fds[loop->poll_fds_used].fd = -1;
+ loop->poll_fds[loop->poll_fds_used].events = 0;
+ loop->poll_fds[loop->poll_fds_used].revents = 0;
+ /* This method is called with an fd of -1 to purge the invalidated fds,
+ * so we may possibly have multiples to remove.
+ */
+ if (-1 != fd)
+ return;
+ } else {
+ /* We must only increment the loop counter when the fds do not match.
+ * Otherwise, when we are purging an invalidated fd, the value just
+ * swapped here from the previous end of the array will be skipped.
+ */
+ ++i;
+ }
+ }
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ sigset_t* pset;
+ sigset_t set;
+ uint64_t time_base;
+ uint64_t time_diff;
+ QUEUE* q;
+ uv__io_t* w;
+ size_t i;
+ unsigned int nevents;
+ int nfds;
+ int have_signals;
+ struct pollfd* pe;
+ int fd;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ /* Take queued watchers and add their fds to our poll fds array. */
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ uv__pollfds_add(loop, w);
+
+ w->events = w->pevents;
+ }
+
+ /* Prepare a set of signals to block around poll(), if any. */
+ pset = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ pset = &set;
+ sigemptyset(pset);
+ sigaddset(pset, SIGPROF);
+ }
+
+ assert(timeout >= -1);
+ time_base = loop->time;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ /* Loop calls to poll() and processing of results. If we get some
+ * results from poll() but they turn out not to be interesting to
+ * our caller then we need to loop around and poll() again.
+ */
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (pset != NULL)
+ if (pthread_sigmask(SIG_BLOCK, pset, NULL))
+ abort();
+ nfds = poll(loop->poll_fds, (nfds_t)loop->poll_fds_used, timeout);
+ if (pset != NULL)
+ if (pthread_sigmask(SIG_UNBLOCK, pset, NULL))
+ abort();
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ if (timeout == -1)
+ continue;
+ if (timeout > 0)
+ goto update_timeout;
+ }
+
+ assert(timeout != -1);
+ return;
+ }
+
+ if (nfds == -1) {
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ /* Tell uv__platform_invalidate_fd not to manipulate our array
+ * while we are iterating over it.
+ */
+ loop->poll_fds_iterating = 1;
+
+ /* Initialize a count of events that we care about. */
+ nevents = 0;
+ have_signals = 0;
+
+ /* Loop over the entire poll fds array looking for returned events. */
+ for (i = 0; i < loop->poll_fds_used; i++) {
+ pe = loop->poll_fds + i;
+ fd = pe->fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd. */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, ignore. */
+ uv__platform_invalidate_fd(loop, fd);
+ continue;
+ }
+
+ /* Filter out events that user has not requested us to watch
+ * (e.g. POLLNVAL).
+ */
+ pe->revents &= w->pevents | POLLERR | POLLHUP;
+
+ if (pe->revents != 0) {
+ /* Run signal watchers last. */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->revents);
+ }
+
+ nevents++;
+ }
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->poll_fds_iterating = 0;
+
+ /* Purge invalidated fds from our poll fds array. */
+ uv__pollfds_del(loop, -1);
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0)
+ return;
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ time_diff = loop->time - time_base;
+ if (time_diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= time_diff;
+ }
+}
+
+/* Remove the given fd from our poll fds array because no one
+ * is interested in its events anymore.
+ */
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ size_t i;
+
+ assert(fd >= 0);
+
+ if (loop->poll_fds_iterating) {
+ /* uv__io_poll is currently iterating. Just invalidate fd. */
+ for (i = 0; i < loop->poll_fds_used; i++)
+ if (loop->poll_fds[i].fd == fd) {
+ loop->poll_fds[i].fd = -1;
+ loop->poll_fds[i].events = 0;
+ loop->poll_fds[i].revents = 0;
+ }
+ } else {
+ /* uv__io_poll is not iterating. Delete fd from the set. */
+ uv__pollfds_del(loop, fd);
+ }
+}
+
+/* Check whether the given fd is supported by poll(). */
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct pollfd p[1];
+ int rv;
+
+ p[0].fd = fd;
+ p[0].events = POLLIN;
+
+ do
+ rv = poll(p, 1, 0);
+ while (rv == -1 && (errno == EINTR || errno == EAGAIN));
+
+ if (rv == -1)
+ return UV__ERR(errno);
+
+ if (p[0].revents & POLLNVAL)
+ return UV_EINVAL;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/process.c b/Utilities/cmlibuv/src/unix/process.c
new file mode 100644
index 0000000000..0de5c4695b
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/process.c
@@ -0,0 +1,1140 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <sched.h>
+
+#if defined(__APPLE__)
+# include <spawn.h>
+# include <paths.h>
+# include <sys/kauth.h>
+# include <sys/types.h>
+# include <sys/sysctl.h>
+# include <dlfcn.h>
+# include <crt_externs.h>
+# include <xlocale.h>
+# define environ (*_NSGetEnviron())
+
+/* macOS 10.14 back does not define this constant */
+# ifndef POSIX_SPAWN_SETSID
+# define POSIX_SPAWN_SETSID 1024
+# endif
+
+#else
+extern char **environ;
+#endif
+
+#if defined(__linux__) || defined(__GLIBC__)
+# include <grp.h>
+#endif
+
+#if defined(__MVS__)
+# include "zos-base.h"
+#endif
+
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__)
+# define uv__cpu_set_t cpu_set_t
+#elif defined(__FreeBSD__)
+# include <sys/param.h>
+# include <sys/cpuset.h>
+# include <pthread_np.h>
+# define uv__cpu_set_t cpuset_t
+#endif
+#endif
+
+#if defined(__APPLE__) || \
+ defined(__DragonFly__) || \
+ defined(__FreeBSD__) || \
+ defined(__NetBSD__) || \
+ defined(__OpenBSD__)
+#include <sys/event.h>
+#else
+#define UV_USE_SIGCHLD
+#endif
+
+#ifdef UV_USE_SIGCHLD
+static void uv__chld(uv_signal_t* handle, int signum) {
+ assert(signum == SIGCHLD);
+ uv__wait_children(handle->loop);
+}
+#endif
+
+void uv__wait_children(uv_loop_t* loop) {
+ uv_process_t* process;
+ int exit_status;
+ int term_signal;
+ int status;
+ int options;
+ pid_t pid;
+ QUEUE pending;
+ QUEUE* q;
+ QUEUE* h;
+
+ QUEUE_INIT(&pending);
+
+ h = &loop->process_handles;
+ q = QUEUE_HEAD(h);
+ while (q != h) {
+ process = QUEUE_DATA(q, uv_process_t, queue);
+ q = QUEUE_NEXT(q);
+
+#ifndef UV_USE_SIGCHLD
+ if ((process->flags & UV_HANDLE_REAP) == 0)
+ continue;
+ options = 0;
+ process->flags &= ~UV_HANDLE_REAP;
+#else
+ options = WNOHANG;
+#endif
+
+ do
+ pid = waitpid(process->pid, &status, options);
+ while (pid == -1 && errno == EINTR);
+
+#ifdef UV_USE_SIGCHLD
+ if (pid == 0) /* Not yet exited */
+ continue;
+#endif
+
+ if (pid == -1) {
+ if (errno != ECHILD)
+ abort();
+ /* The child died, and we missed it. This probably means someone else
+ * stole the waitpid from us. Handle this by not handling it at all. */
+ continue;
+ }
+
+ assert(pid == process->pid);
+ process->status = status;
+ QUEUE_REMOVE(&process->queue);
+ QUEUE_INSERT_TAIL(&pending, &process->queue);
+ }
+
+ h = &pending;
+ q = QUEUE_HEAD(h);
+ while (q != h) {
+ process = QUEUE_DATA(q, uv_process_t, queue);
+ q = QUEUE_NEXT(q);
+
+ QUEUE_REMOVE(&process->queue);
+ QUEUE_INIT(&process->queue);
+ uv__handle_stop(process);
+
+ if (process->exit_cb == NULL)
+ continue;
+
+ exit_status = 0;
+ if (WIFEXITED(process->status))
+ exit_status = WEXITSTATUS(process->status);
+
+ term_signal = 0;
+ if (WIFSIGNALED(process->status))
+ term_signal = WTERMSIG(process->status);
+
+ process->exit_cb(process, exit_status, term_signal);
+ }
+ assert(QUEUE_EMPTY(&pending));
+}
+
+/*
+ * Used for initializing stdio streams like options.stdin_stream. Returns
+ * zero on success. See also the cleanup section in uv_spawn().
+ */
+static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
+ int mask;
+ int fd;
+
+ mask = UV_IGNORE | UV_CREATE_PIPE | UV_INHERIT_FD | UV_INHERIT_STREAM;
+
+ switch (container->flags & mask) {
+ case UV_IGNORE:
+ return 0;
+
+ case UV_CREATE_PIPE:
+ assert(container->data.stream != NULL);
+ if (container->data.stream->type != UV_NAMED_PIPE)
+ return UV_EINVAL;
+ else
+ return uv_socketpair(SOCK_STREAM, 0, fds, 0, 0);
+
+ case UV_INHERIT_FD:
+ case UV_INHERIT_STREAM:
+ if (container->flags & UV_INHERIT_FD)
+ fd = container->data.fd;
+ else
+ fd = uv__stream_fd(container->data.stream);
+
+ if (fd == -1)
+ return UV_EINVAL;
+
+ fds[1] = fd;
+ return 0;
+
+ default:
+ assert(0 && "Unexpected flags");
+ return UV_EINVAL;
+ }
+}
+
+
+static int uv__process_open_stream(uv_stdio_container_t* container,
+ int pipefds[2]) {
+ int flags;
+ int err;
+
+ if (!(container->flags & UV_CREATE_PIPE) || pipefds[0] < 0)
+ return 0;
+
+ err = uv__close(pipefds[1]);
+ if (err != 0)
+ abort();
+
+ pipefds[1] = -1;
+ uv__nonblock(pipefds[0], 1);
+
+ flags = 0;
+ if (container->flags & UV_WRITABLE_PIPE)
+ flags |= UV_HANDLE_READABLE;
+ if (container->flags & UV_READABLE_PIPE)
+ flags |= UV_HANDLE_WRITABLE;
+
+ return uv__stream_open(container->data.stream, pipefds[0], flags);
+}
+
+
+static void uv__process_close_stream(uv_stdio_container_t* container) {
+ if (!(container->flags & UV_CREATE_PIPE)) return;
+ uv__stream_close(container->data.stream);
+}
+
+
+static void uv__write_int(int fd, int val) {
+ ssize_t n;
+
+ do
+ n = write(fd, &val, sizeof(val));
+ while (n == -1 && errno == EINTR);
+
+ /* The write might have failed (e.g. if the parent process has died),
+ * but we have nothing left but to _exit ourself now too. */
+ _exit(127);
+}
+
+
+static void uv__write_errno(int error_fd) {
+ uv__write_int(error_fd, UV__ERR(errno));
+}
+
+
+#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
+/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
+ * avoided. Since this isn't called on those targets, the function
+ * doesn't even need to be defined for them.
+ */
+static void uv__process_child_init(const uv_process_options_t* options,
+ int stdio_count,
+ int (*pipes)[2],
+ int error_fd) {
+ sigset_t signewset;
+ int close_fd;
+ int use_fd;
+ int fd;
+ int n;
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__) || defined(__FreeBSD__)
+ int r;
+ int i;
+ int cpumask_size;
+ uv__cpu_set_t cpuset;
+#endif
+#endif
+
+ /* Reset signal disposition first. Use a hard-coded limit because NSIG is not
+ * fixed on Linux: it's either 32, 34 or 64, depending on whether RT signals
+ * are enabled. We are not allowed to touch RT signal handlers, glibc uses
+ * them internally.
+ */
+ for (n = 1; n < 32; n += 1) {
+ if (n == SIGKILL || n == SIGSTOP)
+ continue; /* Can't be changed. */
+
+#if defined(__HAIKU__)
+ if (n == SIGKILLTHR)
+ continue; /* Can't be changed. */
+#endif
+
+ if (SIG_ERR != signal(n, SIG_DFL))
+ continue;
+
+ uv__write_errno(error_fd);
+ }
+
+ if (options->flags & UV_PROCESS_DETACHED)
+ setsid();
+
+ /* First duplicate low numbered fds, since it's not safe to duplicate them,
+ * they could get replaced. Example: swapping stdout and stderr; without
+ * this fd 2 (stderr) would be duplicated into fd 1, thus making both
+ * stdout and stderr go to the same fd, which was not the intention. */
+ for (fd = 0; fd < stdio_count; fd++) {
+ use_fd = pipes[fd][1];
+ if (use_fd < 0 || use_fd >= fd)
+ continue;
+#ifdef F_DUPFD_CLOEXEC /* POSIX 2008 */
+ pipes[fd][1] = fcntl(use_fd, F_DUPFD_CLOEXEC, stdio_count);
+#else
+ pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
+#endif
+ if (pipes[fd][1] == -1)
+ uv__write_errno(error_fd);
+#ifndef F_DUPFD_CLOEXEC /* POSIX 2008 */
+ n = uv__cloexec(pipes[fd][1], 1);
+ if (n)
+ uv__write_int(error_fd, n);
+#endif
+ }
+
+ for (fd = 0; fd < stdio_count; fd++) {
+ close_fd = -1;
+ use_fd = pipes[fd][1];
+
+ if (use_fd < 0) {
+ if (fd >= 3)
+ continue;
+ else {
+ /* Redirect stdin, stdout and stderr to /dev/null even if UV_IGNORE is
+ * set. */
+ uv__close_nocheckstdio(fd); /* Free up fd, if it happens to be open. */
+ use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
+ close_fd = use_fd;
+
+ if (use_fd < 0)
+ uv__write_errno(error_fd);
+ }
+ }
+
+ if (fd == use_fd) {
+ if (close_fd == -1) {
+ n = uv__cloexec(use_fd, 0);
+ if (n)
+ uv__write_int(error_fd, n);
+ }
+ }
+ else {
+ fd = dup2(use_fd, fd);
+ }
+
+ if (fd == -1)
+ uv__write_errno(error_fd);
+
+ if (fd <= 2 && close_fd == -1)
+ uv__nonblock_fcntl(fd, 0);
+
+ if (close_fd >= stdio_count)
+ uv__close(close_fd);
+ }
+
+ if (options->cwd != NULL && chdir(options->cwd))
+ uv__write_errno(error_fd);
+
+ if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
+ /* When dropping privileges from root, the `setgroups` call will
+ * remove any extraneous groups. If we don't call this, then
+ * even though our uid has dropped, we may still have groups
+ * that enable us to do super-user things. This will fail if we
+ * aren't root, so don't bother checking the return value, this
+ * is just done as an optimistic privilege dropping function.
+ */
+ SAVE_ERRNO(setgroups(0, NULL));
+ }
+
+ if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid))
+ uv__write_errno(error_fd);
+
+ if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid))
+ uv__write_errno(error_fd);
+
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__) || defined(__FreeBSD__)
+ if (options->cpumask != NULL) {
+ cpumask_size = uv_cpumask_size();
+ assert(options->cpumask_size >= (size_t)cpumask_size);
+
+ CPU_ZERO(&cpuset);
+ for (i = 0; i < cpumask_size; ++i) {
+ if (options->cpumask[i]) {
+ CPU_SET(i, &cpuset);
+ }
+ }
+
+ r = -pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
+ if (r != 0) {
+ uv__write_int(error_fd, r);
+ _exit(127);
+ }
+ }
+#endif
+#endif
+
+ if (options->env != NULL)
+ environ = options->env;
+
+ /* Reset signal mask just before exec. */
+ sigemptyset(&signewset);
+ if (sigprocmask(SIG_SETMASK, &signewset, NULL) != 0)
+ abort();
+
+#ifdef __MVS__
+ execvpe(options->file, options->args, environ);
+#else
+ execvp(options->file, options->args);
+#endif
+
+ uv__write_errno(error_fd);
+}
+#endif
+
+
+#if defined(__APPLE__)
+typedef struct uv__posix_spawn_fncs_tag {
+ struct {
+ int (*addchdir_np)(const posix_spawn_file_actions_t *, const char *);
+ } file_actions;
+} uv__posix_spawn_fncs_t;
+
+
+static uv_once_t posix_spawn_init_once = UV_ONCE_INIT;
+static uv__posix_spawn_fncs_t posix_spawn_fncs;
+static int posix_spawn_can_use_setsid;
+
+
+static void uv__spawn_init_posix_spawn_fncs(void) {
+ /* Try to locate all non-portable functions at runtime */
+ posix_spawn_fncs.file_actions.addchdir_np =
+ dlsym(RTLD_DEFAULT, "posix_spawn_file_actions_addchdir_np");
+}
+
+
+static void uv__spawn_init_can_use_setsid(void) {
+ int which[] = {CTL_KERN, KERN_OSRELEASE};
+ unsigned major;
+ unsigned minor;
+ unsigned patch;
+ char buf[256];
+ size_t len;
+
+ len = sizeof(buf);
+ if (sysctl(which, ARRAY_SIZE(which), buf, &len, NULL, 0))
+ return;
+
+ /* NULL specifies to use LC_C_LOCALE */
+ if (3 != sscanf_l(buf, NULL, "%u.%u.%u", &major, &minor, &patch))
+ return;
+
+ posix_spawn_can_use_setsid = (major >= 19); /* macOS Catalina */
+}
+
+
+static void uv__spawn_init_posix_spawn(void) {
+ /* Init handles to all potentially non-defined functions */
+ uv__spawn_init_posix_spawn_fncs();
+
+ /* Init feature detection for POSIX_SPAWN_SETSID flag */
+ uv__spawn_init_can_use_setsid();
+}
+
+
+static int uv__spawn_set_posix_spawn_attrs(
+ posix_spawnattr_t* attrs,
+ const uv__posix_spawn_fncs_t* posix_spawn_fncs,
+ const uv_process_options_t* options) {
+ int err;
+ unsigned int flags;
+ sigset_t signal_set;
+
+ err = posix_spawnattr_init(attrs);
+ if (err != 0) {
+ /* If initialization fails, no need to de-init, just return */
+ return err;
+ }
+
+ if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
+ /* kauth_cred_issuser currently requires exactly uid == 0 for these
+ * posixspawn_attrs (set_groups_np, setuid_np, setgid_np), which deviates
+ * from the normal specification of setuid (which also uses euid), and they
+ * are also undocumented syscalls, so we do not use them. */
+ err = ENOSYS;
+ goto error;
+ }
+
+ /* Set flags for spawn behavior
+ * 1) POSIX_SPAWN_CLOEXEC_DEFAULT: (Apple Extension) All descriptors in the
+ * parent will be treated as if they had been created with O_CLOEXEC. The
+ * only fds that will be passed on to the child are those manipulated by
+ * the file actions
+ * 2) POSIX_SPAWN_SETSIGDEF: Signals mentioned in spawn-sigdefault in the
+ * spawn attributes will be reset to behave as their default
+ * 3) POSIX_SPAWN_SETSIGMASK: Signal mask will be set to the value of
+ * spawn-sigmask in attributes
+ * 4) POSIX_SPAWN_SETSID: Make the process a new session leader if a detached
+ * session was requested. */
+ flags = POSIX_SPAWN_CLOEXEC_DEFAULT |
+ POSIX_SPAWN_SETSIGDEF |
+ POSIX_SPAWN_SETSIGMASK;
+ if (options->flags & UV_PROCESS_DETACHED) {
+ /* If running on a version of macOS where this flag is not supported,
+ * revert back to the fork/exec flow. Otherwise posix_spawn will
+ * silently ignore the flag. */
+ if (!posix_spawn_can_use_setsid) {
+ err = ENOSYS;
+ goto error;
+ }
+
+ flags |= POSIX_SPAWN_SETSID;
+ }
+ err = posix_spawnattr_setflags(attrs, flags);
+ if (err != 0)
+ goto error;
+
+ /* Reset all signal the child to their default behavior */
+ sigfillset(&signal_set);
+ err = posix_spawnattr_setsigdefault(attrs, &signal_set);
+ if (err != 0)
+ goto error;
+
+ /* Reset the signal mask for all signals */
+ sigemptyset(&signal_set);
+ err = posix_spawnattr_setsigmask(attrs, &signal_set);
+ if (err != 0)
+ goto error;
+
+ return err;
+
+error:
+ (void) posix_spawnattr_destroy(attrs);
+ return err;
+}
+
+
+static int uv__spawn_set_posix_spawn_file_actions(
+ posix_spawn_file_actions_t* actions,
+ const uv__posix_spawn_fncs_t* posix_spawn_fncs,
+ const uv_process_options_t* options,
+ int stdio_count,
+ int (*pipes)[2]) {
+ int fd;
+ int fd2;
+ int use_fd;
+ int err;
+
+ err = posix_spawn_file_actions_init(actions);
+ if (err != 0) {
+ /* If initialization fails, no need to de-init, just return */
+ return err;
+ }
+
+ /* Set the current working directory if requested */
+ if (options->cwd != NULL) {
+ if (posix_spawn_fncs->file_actions.addchdir_np == NULL) {
+ err = ENOSYS;
+ goto error;
+ }
+
+ err = posix_spawn_fncs->file_actions.addchdir_np(actions, options->cwd);
+ if (err != 0)
+ goto error;
+ }
+
+ /* Do not return ENOSYS after this point, as we may mutate pipes. */
+
+ /* First duplicate low numbered fds, since it's not safe to duplicate them,
+ * they could get replaced. Example: swapping stdout and stderr; without
+ * this fd 2 (stderr) would be duplicated into fd 1, thus making both
+ * stdout and stderr go to the same fd, which was not the intention. */
+ for (fd = 0; fd < stdio_count; fd++) {
+ use_fd = pipes[fd][1];
+ if (use_fd < 0 || use_fd >= fd)
+ continue;
+ use_fd = stdio_count;
+ for (fd2 = 0; fd2 < stdio_count; fd2++) {
+ /* If we were not setting POSIX_SPAWN_CLOEXEC_DEFAULT, we would need to
+ * also consider whether fcntl(fd, F_GETFD) returned without the
+ * FD_CLOEXEC flag set. */
+ if (pipes[fd2][1] == use_fd) {
+ use_fd++;
+ fd2 = 0;
+ }
+ }
+ err = posix_spawn_file_actions_adddup2(
+ actions,
+ pipes[fd][1],
+ use_fd);
+ assert(err != ENOSYS);
+ if (err != 0)
+ goto error;
+ pipes[fd][1] = use_fd;
+ }
+
+ /* Second, move the descriptors into their respective places */
+ for (fd = 0; fd < stdio_count; fd++) {
+ use_fd = pipes[fd][1];
+ if (use_fd < 0) {
+ if (fd >= 3)
+ continue;
+ else {
+ /* If ignored, redirect to (or from) /dev/null, */
+ err = posix_spawn_file_actions_addopen(
+ actions,
+ fd,
+ "/dev/null",
+ fd == 0 ? O_RDONLY : O_RDWR,
+ 0);
+ assert(err != ENOSYS);
+ if (err != 0)
+ goto error;
+ continue;
+ }
+ }
+
+ if (fd == use_fd)
+ err = posix_spawn_file_actions_addinherit_np(actions, fd);
+ else
+ err = posix_spawn_file_actions_adddup2(actions, use_fd, fd);
+ assert(err != ENOSYS);
+ if (err != 0)
+ goto error;
+
+ /* Make sure the fd is marked as non-blocking (state shared between child
+ * and parent). */
+ uv__nonblock_fcntl(use_fd, 0);
+ }
+
+ /* Finally, close all the superfluous descriptors */
+ for (fd = 0; fd < stdio_count; fd++) {
+ use_fd = pipes[fd][1];
+ if (use_fd < stdio_count)
+ continue;
+
+ /* Check if we already closed this. */
+ for (fd2 = 0; fd2 < fd; fd2++) {
+ if (pipes[fd2][1] == use_fd)
+ break;
+ }
+ if (fd2 < fd)
+ continue;
+
+ err = posix_spawn_file_actions_addclose(actions, use_fd);
+ assert(err != ENOSYS);
+ if (err != 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ (void) posix_spawn_file_actions_destroy(actions);
+ return err;
+}
+
+char* uv__spawn_find_path_in_env(char** env) {
+ char** env_iterator;
+ const char path_var[] = "PATH=";
+
+ /* Look for an environment variable called PATH in the
+ * provided env array, and return its value if found */
+ for (env_iterator = env; *env_iterator != NULL; env_iterator++) {
+ if (strncmp(*env_iterator, path_var, sizeof(path_var) - 1) == 0) {
+ /* Found "PATH=" at the beginning of the string */
+ return *env_iterator + sizeof(path_var) - 1;
+ }
+ }
+
+ return NULL;
+}
+
+
+static int uv__spawn_resolve_and_spawn(const uv_process_options_t* options,
+ posix_spawnattr_t* attrs,
+ posix_spawn_file_actions_t* actions,
+ pid_t* pid) {
+ const char *p;
+ const char *z;
+ const char *path;
+ size_t l;
+ size_t k;
+ int err;
+ int seen_eacces;
+
+ path = NULL;
+ err = -1;
+ seen_eacces = 0;
+
+ /* Short circuit for erroneous case */
+ if (options->file == NULL)
+ return ENOENT;
+
+ /* The environment for the child process is that of the parent unless overriden
+ * by options->env */
+ char** env = environ;
+ if (options->env != NULL)
+ env = options->env;
+
+ /* If options->file contains a slash, posix_spawn/posix_spawnp should behave
+ * the same, and do not involve PATH resolution at all. The libc
+ * `posix_spawnp` provided by Apple is buggy (since 10.15), so we now emulate it
+ * here, per https://github.com/libuv/libuv/pull/3583. */
+ if (strchr(options->file, '/') != NULL) {
+ do
+ err = posix_spawn(pid, options->file, actions, attrs, options->args, env);
+ while (err == EINTR);
+ return err;
+ }
+
+ /* Look for the definition of PATH in the provided env */
+ path = uv__spawn_find_path_in_env(env);
+
+ /* The following resolution logic (execvpe emulation) is copied from
+ * https://git.musl-libc.org/cgit/musl/tree/src/process/execvp.c
+ * and adapted to work for our specific usage */
+
+ /* If no path was provided in env, use the default value
+ * to look for the executable */
+ if (path == NULL)
+ path = _PATH_DEFPATH;
+
+ k = strnlen(options->file, NAME_MAX + 1);
+ if (k > NAME_MAX)
+ return ENAMETOOLONG;
+
+ l = strnlen(path, PATH_MAX - 1) + 1;
+
+ for (p = path;; p = z) {
+ /* Compose the new process file from the entry in the PATH
+ * environment variable and the actual file name */
+ char b[PATH_MAX + NAME_MAX];
+ z = strchr(p, ':');
+ if (!z)
+ z = p + strlen(p);
+ if ((size_t)(z - p) >= l) {
+ if (!*z++)
+ break;
+
+ continue;
+ }
+ memcpy(b, p, z - p);
+ b[z - p] = '/';
+ memcpy(b + (z - p) + (z > p), options->file, k + 1);
+
+ /* Try to spawn the new process file. If it fails with ENOENT, the
+ * new process file is not in this PATH entry, continue with the next
+ * PATH entry. */
+ do
+ err = posix_spawn(pid, b, actions, attrs, options->args, env);
+ while (err == EINTR);
+
+ switch (err) {
+ case EACCES:
+ seen_eacces = 1;
+ break; /* continue search */
+ case ENOENT:
+ case ENOTDIR:
+ break; /* continue search */
+ default:
+ return err;
+ }
+
+ if (!*z++)
+ break;
+ }
+
+ if (seen_eacces)
+ return EACCES;
+ return err;
+}
+
+
+static int uv__spawn_and_init_child_posix_spawn(
+ const uv_process_options_t* options,
+ int stdio_count,
+ int (*pipes)[2],
+ pid_t* pid,
+ const uv__posix_spawn_fncs_t* posix_spawn_fncs) {
+ int err;
+ posix_spawnattr_t attrs;
+ posix_spawn_file_actions_t actions;
+
+ err = uv__spawn_set_posix_spawn_attrs(&attrs, posix_spawn_fncs, options);
+ if (err != 0)
+ goto error;
+
+ /* This may mutate pipes. */
+ err = uv__spawn_set_posix_spawn_file_actions(&actions,
+ posix_spawn_fncs,
+ options,
+ stdio_count,
+ pipes);
+ if (err != 0) {
+ (void) posix_spawnattr_destroy(&attrs);
+ goto error;
+ }
+
+ /* Try to spawn options->file resolving in the provided environment
+ * if any */
+ err = uv__spawn_resolve_and_spawn(options, &attrs, &actions, pid);
+ assert(err != ENOSYS);
+
+ /* Destroy the actions/attributes */
+ (void) posix_spawn_file_actions_destroy(&actions);
+ (void) posix_spawnattr_destroy(&attrs);
+
+error:
+ /* In an error situation, the attributes and file actions are
+ * already destroyed, only the happy path requires cleanup */
+ return UV__ERR(err);
+}
+#endif
+
+static int uv__spawn_and_init_child_fork(const uv_process_options_t* options,
+ int stdio_count,
+ int (*pipes)[2],
+ int error_fd,
+ pid_t* pid) {
+ sigset_t signewset;
+ sigset_t sigoldset;
+
+ /* Start the child with most signals blocked, to avoid any issues before we
+ * can reset them, but allow program failures to exit (and not hang). */
+ sigfillset(&signewset);
+ sigdelset(&signewset, SIGKILL);
+ sigdelset(&signewset, SIGSTOP);
+ sigdelset(&signewset, SIGTRAP);
+ sigdelset(&signewset, SIGSEGV);
+ sigdelset(&signewset, SIGBUS);
+ sigdelset(&signewset, SIGILL);
+ sigdelset(&signewset, SIGSYS);
+ sigdelset(&signewset, SIGABRT);
+ if (pthread_sigmask(SIG_BLOCK, &signewset, &sigoldset) != 0)
+ abort();
+
+ *pid = fork();
+
+ if (*pid == 0) {
+ /* Fork succeeded, in the child process */
+ uv__process_child_init(options, stdio_count, pipes, error_fd);
+ abort();
+ }
+
+ if (pthread_sigmask(SIG_SETMASK, &sigoldset, NULL) != 0)
+ abort();
+
+ if (*pid == -1)
+ /* Failed to fork */
+ return UV__ERR(errno);
+
+ /* Fork succeeded, in the parent process */
+ return 0;
+}
+
+static int uv__spawn_and_init_child(
+ uv_loop_t* loop,
+ const uv_process_options_t* options,
+ int stdio_count,
+ int (*pipes)[2],
+ pid_t* pid) {
+ int signal_pipe[2] = { -1, -1 };
+ int status;
+ int err;
+ int exec_errorno;
+ ssize_t r;
+
+#if defined(__APPLE__)
+ uv_once(&posix_spawn_init_once, uv__spawn_init_posix_spawn);
+
+ /* Special child process spawn case for macOS Big Sur (11.0) onwards
+ *
+ * Big Sur introduced a significant performance degradation on a call to
+ * fork/exec when the process has many pages mmaped in with MAP_JIT, like, say
+ * a javascript interpreter. Electron-based applications, for example,
+ * are impacted; though the magnitude of the impact depends on how much the
+ * app relies on subprocesses.
+ *
+ * On macOS, though, posix_spawn is implemented in a way that does not
+ * exhibit the problem. This block implements the forking and preparation
+ * logic with posix_spawn and its related primitives. It also takes advantage of
+ * the macOS extension POSIX_SPAWN_CLOEXEC_DEFAULT that makes impossible to
+ * leak descriptors to the child process. */
+ err = uv__spawn_and_init_child_posix_spawn(options,
+ stdio_count,
+ pipes,
+ pid,
+ &posix_spawn_fncs);
+
+ /* The posix_spawn flow will return UV_ENOSYS if any of the posix_spawn_x_np
+ * non-standard functions is both _needed_ and _undefined_. In those cases,
+ * default back to the fork/execve strategy. For all other errors, just fail. */
+ if (err != UV_ENOSYS)
+ return err;
+
+#endif
+
+ /* This pipe is used by the parent to wait until
+ * the child has called `execve()`. We need this
+ * to avoid the following race condition:
+ *
+ * if ((pid = fork()) > 0) {
+ * kill(pid, SIGTERM);
+ * }
+ * else if (pid == 0) {
+ * execve("/bin/cat", argp, envp);
+ * }
+ *
+ * The parent sends a signal immediately after forking.
+ * Since the child may not have called `execve()` yet,
+ * there is no telling what process receives the signal,
+ * our fork or /bin/cat.
+ *
+ * To avoid ambiguity, we create a pipe with both ends
+ * marked close-on-exec. Then, after the call to `fork()`,
+ * the parent polls the read end until it EOFs or errors with EPIPE.
+ */
+ err = uv__make_pipe(signal_pipe, 0);
+ if (err)
+ return err;
+
+ /* Acquire write lock to prevent opening new fds in worker threads */
+ uv_rwlock_wrlock(&loop->cloexec_lock);
+
+ err = uv__spawn_and_init_child_fork(options, stdio_count, pipes, signal_pipe[1], pid);
+
+ /* Release lock in parent process */
+ uv_rwlock_wrunlock(&loop->cloexec_lock);
+
+ uv__close(signal_pipe[1]);
+
+ if (err == 0) {
+ do
+ r = read(signal_pipe[0], &exec_errorno, sizeof(exec_errorno));
+ while (r == -1 && errno == EINTR);
+
+ if (r == 0)
+ ; /* okay, EOF */
+ else if (r == sizeof(exec_errorno)) {
+ do
+ err = waitpid(*pid, &status, 0); /* okay, read errorno */
+ while (err == -1 && errno == EINTR);
+ assert(err == *pid);
+ err = exec_errorno;
+ } else if (r == -1 && errno == EPIPE) {
+ /* Something unknown happened to our child before spawn */
+ do
+ err = waitpid(*pid, &status, 0); /* okay, got EPIPE */
+ while (err == -1 && errno == EINTR);
+ assert(err == *pid);
+ err = UV_EPIPE;
+ } else
+ abort();
+ }
+
+ uv__close_nocheckstdio(signal_pipe[0]);
+
+ return err;
+}
+
+int uv_spawn(uv_loop_t* loop,
+ uv_process_t* process,
+ const uv_process_options_t* options) {
+#if defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH)
+ /* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
+ return UV_ENOSYS;
+#else
+ int pipes_storage[8][2];
+ int (*pipes)[2];
+ int stdio_count;
+ pid_t pid;
+ int err;
+ int exec_errorno;
+ int i;
+
+ if (options->cpumask != NULL) {
+#ifndef CMAKE_BOOTSTRAP
+#if defined(__linux__) || defined(__FreeBSD__)
+ if (options->cpumask_size < (size_t)uv_cpumask_size()) {
+ return UV_EINVAL;
+ }
+#else
+ return UV_ENOTSUP;
+#endif
+#else
+ return UV_ENOTSUP;
+#endif
+ }
+
+ assert(options->file != NULL);
+ assert(!(options->flags & ~(UV_PROCESS_DETACHED |
+ UV_PROCESS_SETGID |
+ UV_PROCESS_SETUID |
+ UV_PROCESS_WINDOWS_HIDE |
+ UV_PROCESS_WINDOWS_HIDE_CONSOLE |
+ UV_PROCESS_WINDOWS_HIDE_GUI |
+ UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS)));
+
+ uv__handle_init(loop, (uv_handle_t*)process, UV_PROCESS);
+ QUEUE_INIT(&process->queue);
+ process->status = 0;
+
+ stdio_count = options->stdio_count;
+ if (stdio_count < 3)
+ stdio_count = 3;
+
+ err = UV_ENOMEM;
+ pipes = pipes_storage;
+ if (stdio_count > (int) ARRAY_SIZE(pipes_storage))
+ pipes = uv__malloc(stdio_count * sizeof(*pipes));
+
+ if (pipes == NULL)
+ goto error;
+
+ for (i = 0; i < stdio_count; i++) {
+ pipes[i][0] = -1;
+ pipes[i][1] = -1;
+ }
+
+ for (i = 0; i < options->stdio_count; i++) {
+ err = uv__process_init_stdio(options->stdio + i, pipes[i]);
+ if (err)
+ goto error;
+ }
+
+#ifdef UV_USE_SIGCHLD
+ uv_signal_start(&loop->child_watcher, uv__chld, SIGCHLD);
+#endif
+
+ /* Spawn the child */
+ exec_errorno = uv__spawn_and_init_child(loop, options, stdio_count, pipes, &pid);
+
+#if 0
+ /* This runs into a nodejs issue (it expects initialized streams, even if the
+ * exec failed).
+ * See https://github.com/libuv/libuv/pull/3107#issuecomment-782482608 */
+ if (exec_errorno != 0)
+ goto error;
+#endif
+
+ /* Activate this handle if exec() happened successfully, even if we later
+ * fail to open a stdio handle. This ensures we can eventually reap the child
+ * with waitpid. */
+ if (exec_errorno == 0) {
+#ifndef UV_USE_SIGCHLD
+ struct kevent event;
+ EV_SET(&event, pid, EVFILT_PROC, EV_ADD | EV_ONESHOT, NOTE_EXIT, 0, 0);
+ if (kevent(loop->backend_fd, &event, 1, NULL, 0, NULL)) {
+ if (errno != ESRCH)
+ abort();
+ /* Process already exited. Call waitpid on the next loop iteration. */
+ process->flags |= UV_HANDLE_REAP;
+ loop->flags |= UV_LOOP_REAP_CHILDREN;
+ }
+#endif
+
+ process->pid = pid;
+ process->exit_cb = options->exit_cb;
+ QUEUE_INSERT_TAIL(&loop->process_handles, &process->queue);
+ uv__handle_start(process);
+ }
+
+ for (i = 0; i < options->stdio_count; i++) {
+ err = uv__process_open_stream(options->stdio + i, pipes[i]);
+ if (err == 0)
+ continue;
+
+ while (i--)
+ uv__process_close_stream(options->stdio + i);
+
+ goto error;
+ }
+
+ if (pipes != pipes_storage)
+ uv__free(pipes);
+
+ return exec_errorno;
+
+error:
+ if (pipes != NULL) {
+ for (i = 0; i < stdio_count; i++) {
+ if (i < options->stdio_count)
+ if (options->stdio[i].flags & (UV_INHERIT_FD | UV_INHERIT_STREAM))
+ continue;
+ if (pipes[i][0] != -1)
+ uv__close_nocheckstdio(pipes[i][0]);
+ if (pipes[i][1] != -1)
+ uv__close_nocheckstdio(pipes[i][1]);
+ }
+
+ if (pipes != pipes_storage)
+ uv__free(pipes);
+ }
+
+ return err;
+#endif
+}
+
+
+int uv_process_kill(uv_process_t* process, int signum) {
+ return uv_kill(process->pid, signum);
+}
+
+
+int uv_kill(int pid, int signum) {
+ if (kill(pid, signum)) {
+#if defined(__MVS__)
+ /* EPERM is returned if the process is a zombie. */
+ siginfo_t infop;
+ if (errno == EPERM &&
+ waitid(P_PID, pid, &infop, WNOHANG | WNOWAIT | WEXITED) == 0)
+ return 0;
+#endif
+ return UV__ERR(errno);
+ } else
+ return 0;
+}
+
+
+void uv__process_close(uv_process_t* handle) {
+ QUEUE_REMOVE(&handle->queue);
+ uv__handle_stop(handle);
+ if (QUEUE_EMPTY(&handle->loop->process_handles))
+ uv_signal_stop(&handle->loop->child_watcher);
+}
diff --git a/Utilities/cmlibuv/src/unix/procfs-exepath.c b/Utilities/cmlibuv/src/unix/procfs-exepath.c
new file mode 100644
index 0000000000..00dc021f21
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/procfs-exepath.c
@@ -0,0 +1,45 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stddef.h>
+#include <unistd.h>
+
+int uv_exepath(char* buffer, size_t* size) {
+ ssize_t n;
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ n = *size - 1;
+ if (n > 0)
+ n = readlink("/proc/self/exe", buffer, n);
+
+ if (n == -1)
+ return UV__ERR(errno);
+
+ buffer[n] = '\0';
+ *size = n;
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/proctitle.c b/Utilities/cmlibuv/src/unix/proctitle.c
new file mode 100644
index 0000000000..9d1f00ddf6
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/proctitle.c
@@ -0,0 +1,157 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+struct uv__process_title {
+ char* str;
+ size_t len; /* Length of the current process title. */
+ size_t cap; /* Maximum capacity. Computed once in uv_setup_args(). */
+};
+
+extern void uv__set_process_title(const char* title);
+
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static struct uv__process_title process_title;
+static void* args_mem;
+
+
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ struct uv__process_title pt;
+ char** new_argv;
+ size_t size;
+ char* s;
+ int i;
+
+ if (argc <= 0)
+ return argv;
+
+ pt.str = argv[0];
+ pt.len = strlen(argv[0]);
+ pt.cap = pt.len + 1;
+
+ /* Calculate how much memory we need for the argv strings. */
+ size = pt.cap;
+ for (i = 1; i < argc; i++)
+ size += strlen(argv[i]) + 1;
+
+ /* Add space for the argv pointers. */
+ size += (argc + 1) * sizeof(char*);
+
+ new_argv = uv__malloc(size);
+ if (new_argv == NULL)
+ return argv;
+
+ /* Copy over the strings and set up the pointer table. */
+ i = 0;
+ s = (char*) &new_argv[argc + 1];
+ size = pt.cap;
+ goto loop;
+
+ for (/* empty */; i < argc; i++) {
+ size = strlen(argv[i]) + 1;
+ loop:
+ memcpy(s, argv[i], size);
+ new_argv[i] = s;
+ s += size;
+ }
+ new_argv[i] = NULL;
+
+ pt.cap = argv[i - 1] + size - argv[0];
+
+ args_mem = new_argv;
+ process_title = pt;
+
+ return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ struct uv__process_title* pt;
+ size_t len;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL)
+ return UV_ENOBUFS;
+
+ pt = &process_title;
+ len = strlen(title);
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (len >= pt->cap) {
+ len = 0;
+ if (pt->cap > 0)
+ len = pt->cap - 1;
+ }
+
+ memcpy(pt->str, title, len);
+ memset(pt->str + len, '\0', pt->cap - len);
+ pt->len = len;
+ uv__set_process_title(pt->str);
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL)
+ return UV_ENOBUFS;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (size <= process_title.len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ if (process_title.len != 0)
+ memcpy(buffer, process_title.str, process_title.len + 1);
+
+ buffer[process_title.len] = '\0';
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv__free(args_mem); /* Keep valgrind happy. */
+ args_mem = NULL;
+}
diff --git a/Utilities/cmlibuv/src/unix/pthread-fixes.c b/Utilities/cmlibuv/src/unix/pthread-fixes.c
new file mode 100644
index 0000000000..022d79c4e2
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/pthread-fixes.c
@@ -0,0 +1,58 @@
+/* Copyright (c) 2013, Sony Mobile Communications AB
+ * Copyright (c) 2012, Google Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Android versions < 4.1 have a broken pthread_sigmask. */
+#include "uv-common.h"
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+
+int uv__pthread_sigmask(int how, const sigset_t* set, sigset_t* oset) {
+ static int workaround;
+ int err;
+
+ if (uv__load_relaxed(&workaround)) {
+ return sigprocmask(how, set, oset);
+ } else {
+ err = pthread_sigmask(how, set, oset);
+ if (err) {
+ if (err == EINVAL && sigprocmask(how, set, oset) == 0) {
+ uv__store_relaxed(&workaround, 1);
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/qnx.c b/Utilities/cmlibuv/src/unix/qnx.c
new file mode 100644
index 0000000000..ca148d349f
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/qnx.c
@@ -0,0 +1,137 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <string.h>
+#include <sys/process.h>
+#include <sys/neutrino.h>
+#include <sys/memmsg.h>
+#include <sys/syspage.h>
+#include <sys/procfs.h>
+
+static void
+get_mem_info(uint64_t* totalmem, uint64_t* freemem) {
+ mem_info_t msg;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.i.type = _MEM_INFO;
+ msg.i.fd = -1;
+
+ if (MsgSend(MEMMGR_COID, &msg.i, sizeof(msg.i), &msg.o, sizeof(msg.o))
+ != -1) {
+ *totalmem = msg.o.info.__posix_tmi_total;
+ *freemem = msg.o.info.posix_tmi_length;
+ } else {
+ *totalmem = 0;
+ *freemem = 0;
+ }
+}
+
+
+void uv_loadavg(double avg[3]) {
+ avg[0] = 0.0;
+ avg[1] = 0.0;
+ avg[2] = 0.0;
+}
+
+
+int uv_exepath(char* buffer, size_t* size) {
+ char path[PATH_MAX];
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ realpath(_cmdname(NULL), path);
+ strlcpy(buffer, path, *size);
+ *size = strlen(buffer);
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ uint64_t totalmem;
+ uint64_t freemem;
+ get_mem_info(&totalmem, &freemem);
+ return freemem;
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ uint64_t totalmem;
+ uint64_t freemem;
+ get_mem_info(&totalmem, &freemem);
+ return totalmem;
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0;
+}
+
+
+int uv_resident_set_memory(size_t* rss) {
+ int fd;
+ procfs_asinfo asinfo;
+
+ fd = uv__open_cloexec("/proc/self/ctl", O_RDONLY);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ if (devctl(fd, DCMD_PROC_ASINFO, &asinfo, sizeof(asinfo), 0) == -1) {
+ uv__close(fd);
+ return UV__ERR(errno);
+ }
+
+ uv__close(fd);
+ *rss = asinfo.rss;
+ return 0;
+}
+
+
+int uv_uptime(double* uptime) {
+ struct qtime_entry* qtime = _SYSPAGE_ENTRY(_syspage_ptr, qtime);
+ *uptime = (qtime->nsec / 1000000000.0);
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ struct cpuinfo_entry* cpuinfo =
+ (struct cpuinfo_entry*)_SYSPAGE_ENTRY(_syspage_ptr, new_cpuinfo);
+ size_t cpuinfo_size = _SYSPAGE_ELEMENT_SIZE(_syspage_ptr, cpuinfo);
+ struct strings_entry* strings = _SYSPAGE_ENTRY(_syspage_ptr, strings);
+ int num_cpus = _syspage_ptr->num_cpu;
+ int i;
+
+ *count = num_cpus;
+ *cpu_infos = uv__malloc(num_cpus * sizeof(**cpu_infos));
+ if (*cpu_infos == NULL)
+ return UV_ENOMEM;
+
+ for (i = 0; i < num_cpus; i++) {
+ (*cpu_infos)[i].model = strdup(&strings->data[cpuinfo->name]);
+ (*cpu_infos)[i].speed = cpuinfo->speed;
+ SYSPAGE_ARRAY_ADJ_OFFSET(cpuinfo, cpuinfo, cpuinfo_size);
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/random-devurandom.c b/Utilities/cmlibuv/src/unix/random-devurandom.c
new file mode 100644
index 0000000000..05e52a56a3
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-devurandom.c
@@ -0,0 +1,93 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <sys/stat.h>
+#include <unistd.h>
+
+static uv_once_t once = UV_ONCE_INIT;
+static int status;
+
+
+int uv__random_readpath(const char* path, void* buf, size_t buflen) {
+ struct stat s;
+ size_t pos;
+ ssize_t n;
+ int fd;
+
+ fd = uv__open_cloexec(path, O_RDONLY);
+
+ if (fd < 0)
+ return fd;
+
+ if (fstat(fd, &s)) {
+ uv__close(fd);
+ return UV__ERR(errno);
+ }
+
+ if (!S_ISCHR(s.st_mode)) {
+ uv__close(fd);
+ return UV_EIO;
+ }
+
+ for (pos = 0; pos != buflen; pos += n) {
+ do
+ n = read(fd, (char*) buf + pos, buflen - pos);
+ while (n == -1 && errno == EINTR);
+
+ if (n == -1) {
+ uv__close(fd);
+ return UV__ERR(errno);
+ }
+
+ if (n == 0) {
+ uv__close(fd);
+ return UV_EIO;
+ }
+ }
+
+ uv__close(fd);
+ return 0;
+}
+
+
+static void uv__random_devurandom_init(void) {
+ char c;
+
+ /* Linux's random(4) man page suggests applications should read at least
+ * once from /dev/random before switching to /dev/urandom in order to seed
+ * the system RNG. Reads from /dev/random can of course block indefinitely
+ * until entropy is available but that's the point.
+ */
+ status = uv__random_readpath("/dev/random", &c, 1);
+}
+
+
+int uv__random_devurandom(void* buf, size_t buflen) {
+ uv_once(&once, uv__random_devurandom_init);
+
+ if (status != 0)
+ return status;
+
+ return uv__random_readpath("/dev/urandom", buf, buflen);
+}
diff --git a/Utilities/cmlibuv/src/unix/random-getentropy.c b/Utilities/cmlibuv/src/unix/random-getentropy.c
new file mode 100644
index 0000000000..c45d9fd4a2
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-getentropy.c
@@ -0,0 +1,57 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stddef.h>
+#include <dlfcn.h>
+
+typedef int (*uv__getentropy_cb)(void *, size_t);
+
+static uv__getentropy_cb uv__getentropy;
+static uv_once_t once = UV_ONCE_INIT;
+
+
+static void uv__random_getentropy_init(void) {
+ uv__getentropy = (uv__getentropy_cb) dlsym(RTLD_DEFAULT, "getentropy");
+}
+
+
+int uv__random_getentropy(void* buf, size_t buflen) {
+ size_t pos;
+ size_t stride;
+
+ uv_once(&once, uv__random_getentropy_init);
+
+ if (uv__getentropy == NULL)
+ return UV_ENOSYS;
+
+ /* getentropy() returns an error for requests > 256 bytes. */
+ for (pos = 0, stride = 256; pos + stride < buflen; pos += stride)
+ if (uv__getentropy((char *) buf + pos, stride))
+ return UV__ERR(errno);
+
+ if (uv__getentropy((char *) buf + pos, buflen - pos))
+ return UV__ERR(errno);
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/random-getrandom.c b/Utilities/cmlibuv/src/unix/random-getrandom.c
new file mode 100644
index 0000000000..bcc94089bc
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-getrandom.c
@@ -0,0 +1,88 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#ifdef __linux__
+
+#include "linux-syscalls.h"
+
+#define uv__random_getrandom_init() 0
+
+#else /* !__linux__ */
+
+#include <stddef.h>
+#include <dlfcn.h>
+
+typedef ssize_t (*uv__getrandom_cb)(void *, size_t, unsigned);
+
+static uv__getrandom_cb uv__getrandom;
+static uv_once_t once = UV_ONCE_INIT;
+
+static void uv__random_getrandom_init_once(void) {
+ uv__getrandom = (uv__getrandom_cb) dlsym(RTLD_DEFAULT, "getrandom");
+}
+
+static int uv__random_getrandom_init(void) {
+ uv_once(&once, uv__random_getrandom_init_once);
+
+ if (uv__getrandom == NULL)
+ return UV_ENOSYS;
+
+ return 0;
+}
+
+#endif /* !__linux__ */
+
+int uv__random_getrandom(void* buf, size_t buflen) {
+ ssize_t n;
+ size_t pos;
+ int rc;
+
+ rc = uv__random_getrandom_init();
+ if (rc != 0)
+ return rc;
+
+ for (pos = 0; pos != buflen; pos += n) {
+ do {
+ n = buflen - pos;
+
+ /* Most getrandom() implementations promise that reads <= 256 bytes
+ * will always succeed and won't be interrupted by signals.
+ * It's therefore useful to split it up in smaller reads because
+ * one big read may, in theory, continuously fail with EINTR.
+ */
+ if (n > 256)
+ n = 256;
+
+ n = uv__getrandom((char *) buf + pos, n, 0);
+ } while (n == -1 && errno == EINTR);
+
+ if (n == -1)
+ return UV__ERR(errno);
+
+ if (n == 0)
+ return UV_EIO;
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/random-sysctl-linux.c b/Utilities/cmlibuv/src/unix/random-sysctl-linux.c
new file mode 100644
index 0000000000..66ba8d74ec
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/random-sysctl-linux.c
@@ -0,0 +1,99 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <errno.h>
+#include <string.h>
+
+#include <syscall.h>
+#include <unistd.h>
+
+
+struct uv__sysctl_args {
+ int* name;
+ int nlen;
+ void* oldval;
+ size_t* oldlenp;
+ void* newval;
+ size_t newlen;
+ unsigned long unused[4];
+};
+
+
+int uv__random_sysctl(void* buf, size_t buflen) {
+ static int name[] = {1 /*CTL_KERN*/, 40 /*KERN_RANDOM*/, 6 /*RANDOM_UUID*/};
+ struct uv__sysctl_args args;
+ char uuid[16];
+ char* p;
+ char* pe;
+ size_t n;
+
+ p = buf;
+ pe = p + buflen;
+
+ while (p < pe) {
+ memset(&args, 0, sizeof(args));
+
+ args.name = name;
+ args.nlen = ARRAY_SIZE(name);
+ args.oldval = uuid;
+ args.oldlenp = &n;
+ n = sizeof(uuid);
+
+ /* Emits a deprecation warning with some kernels but that seems like
+ * an okay trade-off for the fallback of the fallback: this function is
+ * only called when neither getrandom(2) nor /dev/urandom are available.
+ * Fails with ENOSYS on kernels configured without CONFIG_SYSCTL_SYSCALL.
+ * At least arm64 never had a _sysctl system call and therefore doesn't
+ * have a SYS__sysctl define either.
+ */
+#ifdef SYS__sysctl
+ if (syscall(SYS__sysctl, &args) == -1)
+ return UV__ERR(errno);
+#else
+ {
+ (void) &args;
+ return UV_ENOSYS;
+ }
+#endif
+
+ if (n != sizeof(uuid))
+ return UV_EIO; /* Can't happen. */
+
+ /* uuid[] is now a type 4 UUID. Bytes 6 and 8 (counting from zero) contain
+ * 4 and 5 bits of entropy, respectively. For ease of use, we skip those
+ * and only use 14 of the 16 bytes.
+ */
+ uuid[6] = uuid[14];
+ uuid[8] = uuid[15];
+
+ n = pe - p;
+ if (n > 14)
+ n = 14;
+
+ memcpy(p, uuid, n);
+ p += n;
+ }
+
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/signal.c b/Utilities/cmlibuv/src/unix/signal.c
new file mode 100644
index 0000000000..1133c73a95
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/signal.c
@@ -0,0 +1,558 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifndef SA_RESTART
+# define SA_RESTART 0
+#endif
+
+typedef struct {
+ uv_signal_t* handle;
+ int signum;
+} uv__signal_msg_t;
+
+RB_HEAD(uv__signal_tree_s, uv_signal_s);
+
+
+static int uv__signal_unlock(void);
+static int uv__signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum,
+ int oneshot);
+static void uv__signal_event(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2);
+static void uv__signal_stop(uv_signal_t* handle);
+static void uv__signal_unregister_handler(int signum);
+
+
+static uv_once_t uv__signal_global_init_guard = UV_ONCE_INIT;
+static struct uv__signal_tree_s uv__signal_tree =
+ RB_INITIALIZER(uv__signal_tree);
+static int uv__signal_lock_pipefd[2] = { -1, -1 };
+
+RB_GENERATE_STATIC(uv__signal_tree_s,
+ uv_signal_s, tree_entry,
+ uv__signal_compare)
+
+static void uv__signal_global_reinit(void);
+
+static void uv__signal_global_init(void) {
+ if (uv__signal_lock_pipefd[0] == -1)
+ /* pthread_atfork can register before and after handlers, one
+ * for each child. This only registers one for the child. That
+ * state is both persistent and cumulative, so if we keep doing
+ * it the handler functions will be called multiple times. Thus
+ * we only want to do it once.
+ */
+ if (pthread_atfork(NULL, NULL, &uv__signal_global_reinit))
+ abort();
+
+ uv__signal_global_reinit();
+}
+
+
+void uv__signal_cleanup(void) {
+ /* We can only use signal-safe functions here.
+ * That includes read/write and close, fortunately.
+ * We do all of this directly here instead of resetting
+ * uv__signal_global_init_guard because
+ * uv__signal_global_once_init is only called from uv_loop_init
+ * and this needs to function in existing loops.
+ */
+ if (uv__signal_lock_pipefd[0] != -1) {
+ uv__close(uv__signal_lock_pipefd[0]);
+ uv__signal_lock_pipefd[0] = -1;
+ }
+
+ if (uv__signal_lock_pipefd[1] != -1) {
+ uv__close(uv__signal_lock_pipefd[1]);
+ uv__signal_lock_pipefd[1] = -1;
+ }
+}
+
+
+static void uv__signal_global_reinit(void) {
+ uv__signal_cleanup();
+
+ if (uv__make_pipe(uv__signal_lock_pipefd, 0))
+ abort();
+
+ if (uv__signal_unlock())
+ abort();
+}
+
+
+void uv__signal_global_once_init(void) {
+ uv_once(&uv__signal_global_init_guard, uv__signal_global_init);
+}
+
+
+static int uv__signal_lock(void) {
+ int r;
+ char data;
+
+ do {
+ r = read(uv__signal_lock_pipefd[0], &data, sizeof data);
+ } while (r < 0 && errno == EINTR);
+
+ return (r < 0) ? -1 : 0;
+}
+
+
+static int uv__signal_unlock(void) {
+ int r;
+ char data = 42;
+
+ do {
+ r = write(uv__signal_lock_pipefd[1], &data, sizeof data);
+ } while (r < 0 && errno == EINTR);
+
+ return (r < 0) ? -1 : 0;
+}
+
+
+static void uv__signal_block_and_lock(sigset_t* saved_sigmask) {
+ sigset_t new_mask;
+
+ if (sigfillset(&new_mask))
+ abort();
+
+ /* to shut up valgrind */
+ sigemptyset(saved_sigmask);
+ if (pthread_sigmask(SIG_SETMASK, &new_mask, saved_sigmask))
+ abort();
+
+ if (uv__signal_lock())
+ abort();
+}
+
+
+static void uv__signal_unlock_and_unblock(sigset_t* saved_sigmask) {
+ if (uv__signal_unlock())
+ abort();
+
+ if (pthread_sigmask(SIG_SETMASK, saved_sigmask, NULL))
+ abort();
+}
+
+
+static uv_signal_t* uv__signal_first_handle(int signum) {
+ /* This function must be called with the signal lock held. */
+ uv_signal_t lookup;
+ uv_signal_t* handle;
+
+ lookup.signum = signum;
+ lookup.flags = 0;
+ lookup.loop = NULL;
+
+ handle = RB_NFIND(uv__signal_tree_s, &uv__signal_tree, &lookup);
+
+ if (handle != NULL && handle->signum == signum)
+ return handle;
+
+ return NULL;
+}
+
+
+static void uv__signal_handler(int signum) {
+ uv__signal_msg_t msg;
+ uv_signal_t* handle;
+ int saved_errno;
+
+ saved_errno = errno;
+ memset(&msg, 0, sizeof msg);
+
+ if (uv__signal_lock()) {
+ errno = saved_errno;
+ return;
+ }
+
+ for (handle = uv__signal_first_handle(signum);
+ handle != NULL && handle->signum == signum;
+ handle = RB_NEXT(uv__signal_tree_s, &uv__signal_tree, handle)) {
+ int r;
+
+ msg.signum = signum;
+ msg.handle = handle;
+
+ /* write() should be atomic for small data chunks, so the entire message
+ * should be written at once. In theory the pipe could become full, in
+ * which case the user is out of luck.
+ */
+ do {
+ r = write(handle->loop->signal_pipefd[1], &msg, sizeof msg);
+ } while (r == -1 && errno == EINTR);
+
+ assert(r == sizeof msg ||
+ (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)));
+
+ if (r != -1)
+ handle->caught_signals++;
+ }
+
+ uv__signal_unlock();
+ errno = saved_errno;
+}
+
+
+static int uv__signal_register_handler(int signum, int oneshot) {
+ /* When this function is called, the signal lock must be held. */
+ struct sigaction sa;
+
+ /* XXX use a separate signal stack? */
+ memset(&sa, 0, sizeof(sa));
+ if (sigfillset(&sa.sa_mask))
+ abort();
+ sa.sa_handler = uv__signal_handler;
+ sa.sa_flags = SA_RESTART;
+ if (oneshot)
+ sa.sa_flags |= SA_RESETHAND;
+
+ /* XXX save old action so we can restore it later on? */
+ if (sigaction(signum, &sa, NULL))
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+static void uv__signal_unregister_handler(int signum) {
+ /* When this function is called, the signal lock must be held. */
+ struct sigaction sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_DFL;
+
+ /* sigaction can only fail with EINVAL or EFAULT; an attempt to deregister a
+ * signal implies that it was successfully registered earlier, so EINVAL
+ * should never happen.
+ */
+ if (sigaction(signum, &sa, NULL))
+ abort();
+}
+
+
+static int uv__signal_loop_once_init(uv_loop_t* loop) {
+ int err;
+
+ /* Return if already initialized. */
+ if (loop->signal_pipefd[0] != -1)
+ return 0;
+
+ err = uv__make_pipe(loop->signal_pipefd, UV_NONBLOCK_PIPE);
+ if (err)
+ return err;
+
+ uv__io_init(&loop->signal_io_watcher,
+ uv__signal_event,
+ loop->signal_pipefd[0]);
+ uv__io_start(loop, &loop->signal_io_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv__signal_loop_fork(uv_loop_t* loop) {
+ uv__io_stop(loop, &loop->signal_io_watcher, POLLIN);
+ uv__close(loop->signal_pipefd[0]);
+ uv__close(loop->signal_pipefd[1]);
+ loop->signal_pipefd[0] = -1;
+ loop->signal_pipefd[1] = -1;
+ return uv__signal_loop_once_init(loop);
+}
+
+
+void uv__signal_loop_cleanup(uv_loop_t* loop) {
+ QUEUE* q;
+
+ /* Stop all the signal watchers that are still attached to this loop. This
+ * ensures that the (shared) signal tree doesn't contain any invalid entries
+ * entries, and that signal handlers are removed when appropriate.
+ * It's safe to use QUEUE_FOREACH here because the handles and the handle
+ * queue are not modified by uv__signal_stop().
+ */
+ QUEUE_FOREACH(q, &loop->handle_queue) {
+ uv_handle_t* handle = QUEUE_DATA(q, uv_handle_t, handle_queue);
+
+ if (handle->type == UV_SIGNAL)
+ uv__signal_stop((uv_signal_t*) handle);
+ }
+
+ if (loop->signal_pipefd[0] != -1) {
+ uv__close(loop->signal_pipefd[0]);
+ loop->signal_pipefd[0] = -1;
+ }
+
+ if (loop->signal_pipefd[1] != -1) {
+ uv__close(loop->signal_pipefd[1]);
+ loop->signal_pipefd[1] = -1;
+ }
+}
+
+
+int uv_signal_init(uv_loop_t* loop, uv_signal_t* handle) {
+ int err;
+
+ err = uv__signal_loop_once_init(loop);
+ if (err)
+ return err;
+
+ uv__handle_init(loop, (uv_handle_t*) handle, UV_SIGNAL);
+ handle->signum = 0;
+ handle->caught_signals = 0;
+ handle->dispatched_signals = 0;
+
+ return 0;
+}
+
+
+void uv__signal_close(uv_signal_t* handle) {
+ uv__signal_stop(handle);
+}
+
+
+int uv_signal_start(uv_signal_t* handle, uv_signal_cb signal_cb, int signum) {
+ return uv__signal_start(handle, signal_cb, signum, 0);
+}
+
+
+int uv_signal_start_oneshot(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum) {
+ return uv__signal_start(handle, signal_cb, signum, 1);
+}
+
+
+static int uv__signal_start(uv_signal_t* handle,
+ uv_signal_cb signal_cb,
+ int signum,
+ int oneshot) {
+ sigset_t saved_sigmask;
+ int err;
+ uv_signal_t* first_handle;
+
+ assert(!uv__is_closing(handle));
+
+ /* If the user supplies signum == 0, then return an error already. If the
+ * signum is otherwise invalid then uv__signal_register will find out
+ * eventually.
+ */
+ if (signum == 0)
+ return UV_EINVAL;
+
+ /* Short circuit: if the signal watcher is already watching {signum} don't
+ * go through the process of deregistering and registering the handler.
+ * Additionally, this avoids pending signals getting lost in the small
+ * time frame that handle->signum == 0.
+ */
+ if (signum == handle->signum) {
+ handle->signal_cb = signal_cb;
+ return 0;
+ }
+
+ /* If the signal handler was already active, stop it first. */
+ if (handle->signum != 0) {
+ uv__signal_stop(handle);
+ }
+
+ uv__signal_block_and_lock(&saved_sigmask);
+
+ /* If at this point there are no active signal watchers for this signum (in
+ * any of the loops), it's time to try and register a handler for it here.
+ * Also in case there's only one-shot handlers and a regular handler comes in.
+ */
+ first_handle = uv__signal_first_handle(signum);
+ if (first_handle == NULL ||
+ (!oneshot && (first_handle->flags & UV_SIGNAL_ONE_SHOT))) {
+ err = uv__signal_register_handler(signum, oneshot);
+ if (err) {
+ /* Registering the signal handler failed. Must be an invalid signal. */
+ uv__signal_unlock_and_unblock(&saved_sigmask);
+ return err;
+ }
+ }
+
+ handle->signum = signum;
+ if (oneshot)
+ handle->flags |= UV_SIGNAL_ONE_SHOT;
+
+ RB_INSERT(uv__signal_tree_s, &uv__signal_tree, handle);
+
+ uv__signal_unlock_and_unblock(&saved_sigmask);
+
+ handle->signal_cb = signal_cb;
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+static void uv__signal_event(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int events) {
+ uv__signal_msg_t* msg;
+ uv_signal_t* handle;
+ char buf[sizeof(uv__signal_msg_t) * 32];
+ size_t bytes, end, i;
+ int r;
+
+ bytes = 0;
+ end = 0;
+
+ do {
+ r = read(loop->signal_pipefd[0], buf + bytes, sizeof(buf) - bytes);
+
+ if (r == -1 && errno == EINTR)
+ continue;
+
+ if (r == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
+ /* If there are bytes in the buffer already (which really is extremely
+ * unlikely if possible at all) we can't exit the function here. We'll
+ * spin until more bytes are read instead.
+ */
+ if (bytes > 0)
+ continue;
+
+ /* Otherwise, there was nothing there. */
+ return;
+ }
+
+ /* Other errors really should never happen. */
+ if (r == -1)
+ abort();
+
+ bytes += r;
+
+ /* `end` is rounded down to a multiple of sizeof(uv__signal_msg_t). */
+ end = (bytes / sizeof(uv__signal_msg_t)) * sizeof(uv__signal_msg_t);
+
+ for (i = 0; i < end; i += sizeof(uv__signal_msg_t)) {
+ msg = (uv__signal_msg_t*) (buf + i);
+ handle = msg->handle;
+
+ if (msg->signum == handle->signum) {
+ assert(!(handle->flags & UV_HANDLE_CLOSING));
+ handle->signal_cb(handle, handle->signum);
+ }
+
+ handle->dispatched_signals++;
+
+ if (handle->flags & UV_SIGNAL_ONE_SHOT)
+ uv__signal_stop(handle);
+ }
+
+ bytes -= end;
+
+ /* If there are any "partial" messages left, move them to the start of the
+ * the buffer, and spin. This should not happen.
+ */
+ if (bytes) {
+ memmove(buf, buf + end, bytes);
+ continue;
+ }
+ } while (end == sizeof buf);
+}
+
+
+static int uv__signal_compare(uv_signal_t* w1, uv_signal_t* w2) {
+ int f1;
+ int f2;
+ /* Compare signums first so all watchers with the same signnum end up
+ * adjacent.
+ */
+ if (w1->signum < w2->signum) return -1;
+ if (w1->signum > w2->signum) return 1;
+
+ /* Handlers without UV_SIGNAL_ONE_SHOT set will come first, so if the first
+ * handler returned is a one-shot handler, the rest will be too.
+ */
+ f1 = w1->flags & UV_SIGNAL_ONE_SHOT;
+ f2 = w2->flags & UV_SIGNAL_ONE_SHOT;
+ if (f1 < f2) return -1;
+ if (f1 > f2) return 1;
+
+ /* Sort by loop pointer, so we can easily look up the first item after
+ * { .signum = x, .loop = NULL }.
+ */
+ if (w1->loop < w2->loop) return -1;
+ if (w1->loop > w2->loop) return 1;
+
+ if (w1 < w2) return -1;
+ if (w1 > w2) return 1;
+
+ return 0;
+}
+
+
+int uv_signal_stop(uv_signal_t* handle) {
+ assert(!uv__is_closing(handle));
+ uv__signal_stop(handle);
+ return 0;
+}
+
+
+static void uv__signal_stop(uv_signal_t* handle) {
+ uv_signal_t* removed_handle;
+ sigset_t saved_sigmask;
+ uv_signal_t* first_handle;
+ int rem_oneshot;
+ int first_oneshot;
+ int ret;
+
+ /* If the watcher wasn't started, this is a no-op. */
+ if (handle->signum == 0)
+ return;
+
+ uv__signal_block_and_lock(&saved_sigmask);
+
+ removed_handle = RB_REMOVE(uv__signal_tree_s, &uv__signal_tree, handle);
+ assert(removed_handle == handle);
+ (void) removed_handle;
+
+ /* Check if there are other active signal watchers observing this signal. If
+ * not, unregister the signal handler.
+ */
+ first_handle = uv__signal_first_handle(handle->signum);
+ if (first_handle == NULL) {
+ uv__signal_unregister_handler(handle->signum);
+ } else {
+ rem_oneshot = handle->flags & UV_SIGNAL_ONE_SHOT;
+ first_oneshot = first_handle->flags & UV_SIGNAL_ONE_SHOT;
+ if (first_oneshot && !rem_oneshot) {
+ ret = uv__signal_register_handler(handle->signum, 1);
+ assert(ret == 0);
+ (void)ret;
+ }
+ }
+
+ uv__signal_unlock_and_unblock(&saved_sigmask);
+
+ handle->signum = 0;
+ uv__handle_stop(handle);
+}
diff --git a/Utilities/cmlibuv/src/unix/spinlock.h b/Utilities/cmlibuv/src/unix/spinlock.h
new file mode 100644
index 0000000000..a20c83cc60
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/spinlock.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2013, Ben Noordhuis <info@bnoordhuis.nl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef UV_SPINLOCK_H_
+#define UV_SPINLOCK_H_
+
+#include "internal.h" /* ACCESS_ONCE, UV_UNUSED */
+#include "atomic-ops.h"
+
+#define UV_SPINLOCK_INITIALIZER { 0 }
+
+typedef struct {
+ int lock;
+} uv_spinlock_t;
+
+UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock));
+UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock));
+UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock));
+UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock));
+
+UV_UNUSED(static void uv_spinlock_init(uv_spinlock_t* spinlock)) {
+ ACCESS_ONCE(int, spinlock->lock) = 0;
+}
+
+UV_UNUSED(static void uv_spinlock_lock(uv_spinlock_t* spinlock)) {
+ while (!uv_spinlock_trylock(spinlock)) cpu_relax();
+}
+
+UV_UNUSED(static void uv_spinlock_unlock(uv_spinlock_t* spinlock)) {
+ ACCESS_ONCE(int, spinlock->lock) = 0;
+}
+
+UV_UNUSED(static int uv_spinlock_trylock(uv_spinlock_t* spinlock)) {
+ /* TODO(bnoordhuis) Maybe change to a ticket lock to guarantee fair queueing.
+ * Not really critical until we have locks that are (frequently) contended
+ * for by several threads.
+ */
+ return 0 == cmpxchgi(&spinlock->lock, 0, 1);
+}
+
+#endif /* UV_SPINLOCK_H_ */
diff --git a/Utilities/cmlibuv/src/unix/stream.c b/Utilities/cmlibuv/src/unix/stream.c
new file mode 100644
index 0000000000..1ce75257b2
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/stream.c
@@ -0,0 +1,1629 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <limits.h> /* IOV_MAX */
+
+#if defined(__APPLE__)
+# include <sys/event.h>
+# include <sys/time.h>
+# include <sys/select.h>
+
+/* Forward declaration */
+typedef struct uv__stream_select_s uv__stream_select_t;
+
+struct uv__stream_select_s {
+ uv_stream_t* stream;
+ uv_thread_t thread;
+ uv_sem_t close_sem;
+ uv_sem_t async_sem;
+ uv_async_t async;
+ int events;
+ int fake_fd;
+ int int_fd;
+ int fd;
+ fd_set* sread;
+ size_t sread_sz;
+ fd_set* swrite;
+ size_t swrite_sz;
+};
+#endif /* defined(__APPLE__) */
+
+static void uv__stream_connect(uv_stream_t*);
+static void uv__write(uv_stream_t* stream);
+static void uv__read(uv_stream_t* stream);
+static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events);
+static void uv__write_callbacks(uv_stream_t* stream);
+static size_t uv__write_req_size(uv_write_t* req);
+static void uv__drain(uv_stream_t* stream);
+
+
+void uv__stream_init(uv_loop_t* loop,
+ uv_stream_t* stream,
+ uv_handle_type type) {
+ int err;
+
+ uv__handle_init(loop, (uv_handle_t*)stream, type);
+ stream->read_cb = NULL;
+ stream->alloc_cb = NULL;
+ stream->close_cb = NULL;
+ stream->connection_cb = NULL;
+ stream->connect_req = NULL;
+ stream->shutdown_req = NULL;
+ stream->accepted_fd = -1;
+ stream->queued_fds = NULL;
+ stream->delayed_error = 0;
+ QUEUE_INIT(&stream->write_queue);
+ QUEUE_INIT(&stream->write_completed_queue);
+ stream->write_queue_size = 0;
+
+ if (loop->emfile_fd == -1) {
+ err = uv__open_cloexec("/dev/null", O_RDONLY);
+ if (err < 0)
+ /* In the rare case that "/dev/null" isn't mounted open "/"
+ * instead.
+ */
+ err = uv__open_cloexec("/", O_RDONLY);
+ if (err >= 0)
+ loop->emfile_fd = err;
+ }
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ stream->select = NULL;
+#endif /* defined(__APPLE_) */
+
+ uv__io_init(&stream->io_watcher, uv__stream_io, -1);
+}
+
+
+static void uv__stream_osx_interrupt_select(uv_stream_t* stream) {
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ /* Notify select() thread about state change */
+ uv__stream_select_t* s;
+ int r;
+
+ s = stream->select;
+ if (s == NULL)
+ return;
+
+ /* Interrupt select() loop
+ * NOTE: fake_fd and int_fd are socketpair(), thus writing to one will
+ * emit read event on other side
+ */
+ do
+ r = write(s->fake_fd, "x", 1);
+ while (r == -1 && errno == EINTR);
+
+ assert(r == 1);
+#else /* !defined(__APPLE__) */
+ /* No-op on any other platform */
+#endif /* !defined(__APPLE__) */
+}
+
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+static void uv__stream_osx_select(void* arg) {
+ uv_stream_t* stream;
+ uv__stream_select_t* s;
+ char buf[1024];
+ int events;
+ int fd;
+ int r;
+ int max_fd;
+
+ stream = arg;
+ s = stream->select;
+ fd = s->fd;
+
+ if (fd > s->int_fd)
+ max_fd = fd;
+ else
+ max_fd = s->int_fd;
+
+ for (;;) {
+ /* Terminate on semaphore */
+ if (uv_sem_trywait(&s->close_sem) == 0)
+ break;
+
+ /* Watch fd using select(2) */
+ memset(s->sread, 0, s->sread_sz);
+ memset(s->swrite, 0, s->swrite_sz);
+
+ if (uv__io_active(&stream->io_watcher, POLLIN))
+ FD_SET(fd, s->sread);
+ if (uv__io_active(&stream->io_watcher, POLLOUT))
+ FD_SET(fd, s->swrite);
+ FD_SET(s->int_fd, s->sread);
+
+ /* Wait indefinitely for fd events */
+ r = select(max_fd + 1, s->sread, s->swrite, NULL, NULL);
+ if (r == -1) {
+ if (errno == EINTR)
+ continue;
+
+ /* XXX: Possible?! */
+ abort();
+ }
+
+ /* Ignore timeouts */
+ if (r == 0)
+ continue;
+
+ /* Empty socketpair's buffer in case of interruption */
+ if (FD_ISSET(s->int_fd, s->sread))
+ for (;;) {
+ r = read(s->int_fd, buf, sizeof(buf));
+
+ if (r == sizeof(buf))
+ continue;
+
+ if (r != -1)
+ break;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ break;
+
+ if (errno == EINTR)
+ continue;
+
+ abort();
+ }
+
+ /* Handle events */
+ events = 0;
+ if (FD_ISSET(fd, s->sread))
+ events |= POLLIN;
+ if (FD_ISSET(fd, s->swrite))
+ events |= POLLOUT;
+
+ assert(events != 0 || FD_ISSET(s->int_fd, s->sread));
+ if (events != 0) {
+ ACCESS_ONCE(int, s->events) = events;
+
+ uv_async_send(&s->async);
+ uv_sem_wait(&s->async_sem);
+
+ /* Should be processed at this stage */
+ assert((s->events == 0) || (stream->flags & UV_HANDLE_CLOSING));
+ }
+ }
+}
+
+
+static void uv__stream_osx_select_cb(uv_async_t* handle) {
+ uv__stream_select_t* s;
+ uv_stream_t* stream;
+ int events;
+
+ s = container_of(handle, uv__stream_select_t, async);
+ stream = s->stream;
+
+ /* Get and reset stream's events */
+ events = s->events;
+ ACCESS_ONCE(int, s->events) = 0;
+
+ assert(events != 0);
+ assert(events == (events & (POLLIN | POLLOUT)));
+
+ /* Invoke callback on event-loop */
+ if ((events & POLLIN) && uv__io_active(&stream->io_watcher, POLLIN))
+ uv__stream_io(stream->loop, &stream->io_watcher, POLLIN);
+
+ if ((events & POLLOUT) && uv__io_active(&stream->io_watcher, POLLOUT))
+ uv__stream_io(stream->loop, &stream->io_watcher, POLLOUT);
+
+ if (stream->flags & UV_HANDLE_CLOSING)
+ return;
+
+ /* NOTE: It is important to do it here, otherwise `select()` might be called
+ * before the actual `uv__read()`, leading to the blocking syscall
+ */
+ uv_sem_post(&s->async_sem);
+}
+
+
+static void uv__stream_osx_cb_close(uv_handle_t* async) {
+ uv__stream_select_t* s;
+
+ s = container_of(async, uv__stream_select_t, async);
+ uv__free(s);
+}
+
+
+int uv__stream_try_select(uv_stream_t* stream, int* fd) {
+ /*
+ * kqueue doesn't work with some files from /dev mount on osx.
+ * select(2) in separate thread for those fds
+ */
+
+ struct kevent filter[1];
+ struct kevent events[1];
+ struct timespec timeout;
+ uv__stream_select_t* s;
+ int fds[2];
+ int err;
+ int ret;
+ int kq;
+ int old_fd;
+ int max_fd;
+ size_t sread_sz;
+ size_t swrite_sz;
+
+ kq = kqueue();
+ if (kq == -1) {
+ perror("(libuv) kqueue()");
+ return UV__ERR(errno);
+ }
+
+ EV_SET(&filter[0], *fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, 0);
+
+ /* Use small timeout, because we only want to capture EINVALs */
+ timeout.tv_sec = 0;
+ timeout.tv_nsec = 1;
+
+ do
+ ret = kevent(kq, filter, 1, events, 1, &timeout);
+ while (ret == -1 && errno == EINTR);
+
+ uv__close(kq);
+
+ if (ret == -1)
+ return UV__ERR(errno);
+
+ if (ret == 0 || (events[0].flags & EV_ERROR) == 0 || events[0].data != EINVAL)
+ return 0;
+
+ /* At this point we definitely know that this fd won't work with kqueue */
+
+ /*
+ * Create fds for io watcher and to interrupt the select() loop.
+ * NOTE: do it ahead of malloc below to allocate enough space for fd_sets
+ */
+ if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
+ return UV__ERR(errno);
+
+ max_fd = *fd;
+ if (fds[1] > max_fd)
+ max_fd = fds[1];
+
+ sread_sz = ROUND_UP(max_fd + 1, sizeof(uint32_t) * NBBY) / NBBY;
+ swrite_sz = sread_sz;
+
+ s = uv__malloc(sizeof(*s) + sread_sz + swrite_sz);
+ if (s == NULL) {
+ err = UV_ENOMEM;
+ goto failed_malloc;
+ }
+
+ s->events = 0;
+ s->fd = *fd;
+ s->sread = (fd_set*) ((char*) s + sizeof(*s));
+ s->sread_sz = sread_sz;
+ s->swrite = (fd_set*) ((char*) s->sread + sread_sz);
+ s->swrite_sz = swrite_sz;
+
+ err = uv_async_init(stream->loop, &s->async, uv__stream_osx_select_cb);
+ if (err)
+ goto failed_async_init;
+
+ s->async.flags |= UV_HANDLE_INTERNAL;
+ uv__handle_unref(&s->async);
+
+ err = uv_sem_init(&s->close_sem, 0);
+ if (err != 0)
+ goto failed_close_sem_init;
+
+ err = uv_sem_init(&s->async_sem, 0);
+ if (err != 0)
+ goto failed_async_sem_init;
+
+ s->fake_fd = fds[0];
+ s->int_fd = fds[1];
+
+ old_fd = *fd;
+ s->stream = stream;
+ stream->select = s;
+ *fd = s->fake_fd;
+
+ err = uv_thread_create(&s->thread, uv__stream_osx_select, stream);
+ if (err != 0)
+ goto failed_thread_create;
+
+ return 0;
+
+failed_thread_create:
+ s->stream = NULL;
+ stream->select = NULL;
+ *fd = old_fd;
+
+ uv_sem_destroy(&s->async_sem);
+
+failed_async_sem_init:
+ uv_sem_destroy(&s->close_sem);
+
+failed_close_sem_init:
+ uv__close(fds[0]);
+ uv__close(fds[1]);
+ uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
+ return err;
+
+failed_async_init:
+ uv__free(s);
+
+failed_malloc:
+ uv__close(fds[0]);
+ uv__close(fds[1]);
+
+ return err;
+}
+#endif /* defined(__APPLE__) */
+
+
+int uv__stream_open(uv_stream_t* stream, int fd, int flags) {
+#if defined(__APPLE__)
+ int enable;
+#endif
+
+ if (!(stream->io_watcher.fd == -1 || stream->io_watcher.fd == fd))
+ return UV_EBUSY;
+
+ assert(fd >= 0);
+ stream->flags |= flags;
+
+ if (stream->type == UV_TCP) {
+ if ((stream->flags & UV_HANDLE_TCP_NODELAY) && uv__tcp_nodelay(fd, 1))
+ return UV__ERR(errno);
+
+ /* TODO Use delay the user passed in. */
+ if ((stream->flags & UV_HANDLE_TCP_KEEPALIVE) &&
+ uv__tcp_keepalive(fd, 1, 60)) {
+ return UV__ERR(errno);
+ }
+ }
+
+#if defined(__APPLE__)
+ enable = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_OOBINLINE, &enable, sizeof(enable)) &&
+ errno != ENOTSOCK &&
+ errno != EINVAL) {
+ return UV__ERR(errno);
+ }
+#endif
+
+ stream->io_watcher.fd = fd;
+
+ return 0;
+}
+
+
+void uv__stream_flush_write_queue(uv_stream_t* stream, int error) {
+ uv_write_t* req;
+ QUEUE* q;
+ while (!QUEUE_EMPTY(&stream->write_queue)) {
+ q = QUEUE_HEAD(&stream->write_queue);
+ QUEUE_REMOVE(q);
+
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ req->error = error;
+
+ QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+ }
+}
+
+
+void uv__stream_destroy(uv_stream_t* stream) {
+ assert(!uv__io_active(&stream->io_watcher, POLLIN | POLLOUT));
+ assert(stream->flags & UV_HANDLE_CLOSED);
+
+ if (stream->connect_req) {
+ uv__req_unregister(stream->loop, stream->connect_req);
+ stream->connect_req->cb(stream->connect_req, UV_ECANCELED);
+ stream->connect_req = NULL;
+ }
+
+ uv__stream_flush_write_queue(stream, UV_ECANCELED);
+ uv__write_callbacks(stream);
+ uv__drain(stream);
+
+ assert(stream->write_queue_size == 0);
+}
+
+
+/* Implements a best effort approach to mitigating accept() EMFILE errors.
+ * We have a spare file descriptor stashed away that we close to get below
+ * the EMFILE limit. Next, we accept all pending connections and close them
+ * immediately to signal the clients that we're overloaded - and we are, but
+ * we still keep on trucking.
+ *
+ * There is one caveat: it's not reliable in a multi-threaded environment.
+ * The file descriptor limit is per process. Our party trick fails if another
+ * thread opens a file or creates a socket in the time window between us
+ * calling close() and accept().
+ */
+static int uv__emfile_trick(uv_loop_t* loop, int accept_fd) {
+ int err;
+ int emfile_fd;
+
+ if (loop->emfile_fd == -1)
+ return UV_EMFILE;
+
+ uv__close(loop->emfile_fd);
+ loop->emfile_fd = -1;
+
+ do {
+ err = uv__accept(accept_fd);
+ if (err >= 0)
+ uv__close(err);
+ } while (err >= 0 || err == UV_EINTR);
+
+ emfile_fd = uv__open_cloexec("/", O_RDONLY);
+ if (emfile_fd >= 0)
+ loop->emfile_fd = emfile_fd;
+
+ return err;
+}
+
+
+#if defined(UV_HAVE_KQUEUE)
+# define UV_DEC_BACKLOG(w) w->rcount--;
+#else
+# define UV_DEC_BACKLOG(w) /* no-op */
+#endif /* defined(UV_HAVE_KQUEUE) */
+
+
+void uv__server_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ uv_stream_t* stream;
+ int err;
+
+ stream = container_of(w, uv_stream_t, io_watcher);
+ assert(events & POLLIN);
+ assert(stream->accepted_fd == -1);
+ assert(!(stream->flags & UV_HANDLE_CLOSING));
+
+ uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+
+ /* connection_cb can close the server socket while we're
+ * in the loop so check it on each iteration.
+ */
+ while (uv__stream_fd(stream) != -1) {
+ assert(stream->accepted_fd == -1);
+
+#if defined(UV_HAVE_KQUEUE)
+ if (w->rcount <= 0)
+ return;
+#endif /* defined(UV_HAVE_KQUEUE) */
+
+ err = uv__accept(uv__stream_fd(stream));
+ if (err < 0) {
+ if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
+ return; /* Not an error. */
+
+ if (err == UV_ECONNABORTED)
+ continue; /* Ignore. Nothing we can do about that. */
+
+ if (err == UV_EMFILE || err == UV_ENFILE) {
+ err = uv__emfile_trick(loop, uv__stream_fd(stream));
+ if (err == UV_EAGAIN || err == UV__ERR(EWOULDBLOCK))
+ break;
+ }
+
+ stream->connection_cb(stream, err);
+ continue;
+ }
+
+ UV_DEC_BACKLOG(w)
+ stream->accepted_fd = err;
+ stream->connection_cb(stream, 0);
+
+ if (stream->accepted_fd != -1) {
+ /* The user hasn't yet accepted called uv_accept() */
+ uv__io_stop(loop, &stream->io_watcher, POLLIN);
+ return;
+ }
+
+ if (stream->type == UV_TCP &&
+ (stream->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
+ /* Give other processes a chance to accept connections. */
+ struct timespec timeout = { 0, 1 };
+ nanosleep(&timeout, NULL);
+ }
+ }
+}
+
+
+#undef UV_DEC_BACKLOG
+
+
+int uv_accept(uv_stream_t* server, uv_stream_t* client) {
+ int err;
+
+ assert(server->loop == client->loop);
+
+ if (server->accepted_fd == -1)
+ return UV_EAGAIN;
+
+ switch (client->type) {
+ case UV_NAMED_PIPE:
+ case UV_TCP:
+ err = uv__stream_open(client,
+ server->accepted_fd,
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ if (err) {
+ /* TODO handle error */
+ uv__close(server->accepted_fd);
+ goto done;
+ }
+ break;
+
+ case UV_UDP:
+ err = uv_udp_open((uv_udp_t*) client, server->accepted_fd);
+ if (err) {
+ uv__close(server->accepted_fd);
+ goto done;
+ }
+ break;
+
+ default:
+ return UV_EINVAL;
+ }
+
+ client->flags |= UV_HANDLE_BOUND;
+
+done:
+ /* Process queued fds */
+ if (server->queued_fds != NULL) {
+ uv__stream_queued_fds_t* queued_fds;
+
+ queued_fds = server->queued_fds;
+
+ /* Read first */
+ server->accepted_fd = queued_fds->fds[0];
+
+ /* All read, free */
+ assert(queued_fds->offset > 0);
+ if (--queued_fds->offset == 0) {
+ uv__free(queued_fds);
+ server->queued_fds = NULL;
+ } else {
+ /* Shift rest */
+ memmove(queued_fds->fds,
+ queued_fds->fds + 1,
+ queued_fds->offset * sizeof(*queued_fds->fds));
+ }
+ } else {
+ server->accepted_fd = -1;
+ if (err == 0)
+ uv__io_start(server->loop, &server->io_watcher, POLLIN);
+ }
+ return err;
+}
+
+
+int uv_listen(uv_stream_t* stream, int backlog, uv_connection_cb cb) {
+ int err;
+ if (uv__is_closing(stream)) {
+ return UV_EINVAL;
+ }
+ switch (stream->type) {
+ case UV_TCP:
+ err = uv__tcp_listen((uv_tcp_t*)stream, backlog, cb);
+ break;
+
+ case UV_NAMED_PIPE:
+ err = uv__pipe_listen((uv_pipe_t*)stream, backlog, cb);
+ break;
+
+ default:
+ err = UV_EINVAL;
+ }
+
+ if (err == 0)
+ uv__handle_start(stream);
+
+ return err;
+}
+
+
+static void uv__drain(uv_stream_t* stream) {
+ uv_shutdown_t* req;
+ int err;
+
+ assert(QUEUE_EMPTY(&stream->write_queue));
+ if (!(stream->flags & UV_HANDLE_CLOSING)) {
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+ }
+
+ if (!(stream->flags & UV_HANDLE_SHUTTING))
+ return;
+
+ req = stream->shutdown_req;
+ assert(req);
+
+ if ((stream->flags & UV_HANDLE_CLOSING) ||
+ !(stream->flags & UV_HANDLE_SHUT)) {
+ stream->shutdown_req = NULL;
+ stream->flags &= ~UV_HANDLE_SHUTTING;
+ uv__req_unregister(stream->loop, req);
+
+ err = 0;
+ if (stream->flags & UV_HANDLE_CLOSING)
+ /* The user destroyed the stream before we got to do the shutdown. */
+ err = UV_ECANCELED;
+ else if (shutdown(uv__stream_fd(stream), SHUT_WR))
+ err = UV__ERR(errno);
+ else /* Success. */
+ stream->flags |= UV_HANDLE_SHUT;
+
+ if (req->cb != NULL)
+ req->cb(req, err);
+ }
+}
+
+
+static ssize_t uv__writev(int fd, struct iovec* vec, size_t n) {
+ if (n == 1)
+ return write(fd, vec->iov_base, vec->iov_len);
+ else
+ return writev(fd, vec, n);
+}
+
+
+static size_t uv__write_req_size(uv_write_t* req) {
+ size_t size;
+
+ assert(req->bufs != NULL);
+ size = uv__count_bufs(req->bufs + req->write_index,
+ req->nbufs - req->write_index);
+ assert(req->handle->write_queue_size >= size);
+
+ return size;
+}
+
+
+/* Returns 1 if all write request data has been written, or 0 if there is still
+ * more data to write.
+ *
+ * Note: the return value only says something about the *current* request.
+ * There may still be other write requests sitting in the queue.
+ */
+static int uv__write_req_update(uv_stream_t* stream,
+ uv_write_t* req,
+ size_t n) {
+ uv_buf_t* buf;
+ size_t len;
+
+ assert(n <= stream->write_queue_size);
+ stream->write_queue_size -= n;
+
+ buf = req->bufs + req->write_index;
+
+ do {
+ len = n < buf->len ? n : buf->len;
+ buf->base += len;
+ buf->len -= len;
+ buf += (buf->len == 0); /* Advance to next buffer if this one is empty. */
+ n -= len;
+ } while (n > 0);
+
+ req->write_index = buf - req->bufs;
+
+ return req->write_index == req->nbufs;
+}
+
+
+static void uv__write_req_finish(uv_write_t* req) {
+ uv_stream_t* stream = req->handle;
+
+ /* Pop the req off tcp->write_queue. */
+ QUEUE_REMOVE(&req->queue);
+
+ /* Only free when there was no error. On error, we touch up write_queue_size
+ * right before making the callback. The reason we don't do that right away
+ * is that a write_queue_size > 0 is our only way to signal to the user that
+ * they should stop writing - which they should if we got an error. Something
+ * to revisit in future revisions of the libuv API.
+ */
+ if (req->error == 0) {
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+ }
+
+ /* Add it to the write_completed_queue where it will have its
+ * callback called in the near future.
+ */
+ QUEUE_INSERT_TAIL(&stream->write_completed_queue, &req->queue);
+ uv__io_feed(stream->loop, &stream->io_watcher);
+}
+
+
+static int uv__handle_fd(uv_handle_t* handle) {
+ switch (handle->type) {
+ case UV_NAMED_PIPE:
+ case UV_TCP:
+ return ((uv_stream_t*) handle)->io_watcher.fd;
+
+ case UV_UDP:
+ return ((uv_udp_t*) handle)->io_watcher.fd;
+
+ default:
+ return -1;
+ }
+}
+
+static int uv__try_write(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
+ struct iovec* iov;
+ int iovmax;
+ int iovcnt;
+ ssize_t n;
+
+ /*
+ * Cast to iovec. We had to have our own uv_buf_t instead of iovec
+ * because Windows's WSABUF is not an iovec.
+ */
+ iov = (struct iovec*) bufs;
+ iovcnt = nbufs;
+
+ iovmax = uv__getiovmax();
+
+ /* Limit iov count to avoid EINVALs from writev() */
+ if (iovcnt > iovmax)
+ iovcnt = iovmax;
+
+ /*
+ * Now do the actual writev. Note that we've been updating the pointers
+ * inside the iov each time we write. So there is no need to offset it.
+ */
+ if (send_handle != NULL) {
+ int fd_to_send;
+ struct msghdr msg;
+ struct cmsghdr *cmsg;
+ union {
+ char data[64];
+ struct cmsghdr alias;
+ } scratch;
+
+ if (uv__is_closing(send_handle))
+ return UV_EBADF;
+
+ fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
+
+ memset(&scratch, 0, sizeof(scratch));
+
+ assert(fd_to_send >= 0);
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = iovcnt;
+ msg.msg_flags = 0;
+
+ msg.msg_control = &scratch.alias;
+ msg.msg_controllen = CMSG_SPACE(sizeof(fd_to_send));
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(fd_to_send));
+
+ /* silence aliasing warning */
+ {
+ void* pv = CMSG_DATA(cmsg);
+ int* pi = pv;
+ *pi = fd_to_send;
+ }
+
+ do
+ n = sendmsg(uv__stream_fd(stream), &msg, 0);
+ while (n == -1 && errno == EINTR);
+ } else {
+ do
+ n = uv__writev(uv__stream_fd(stream), iov, iovcnt);
+ while (n == -1 && errno == EINTR);
+ }
+
+ if (n >= 0)
+ return n;
+
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ return UV_EAGAIN;
+
+#ifdef __APPLE__
+ /* macOS versions 10.10 and 10.15 - and presumbaly 10.11 to 10.14, too -
+ * have a bug where a race condition causes the kernel to return EPROTOTYPE
+ * because the socket isn't fully constructed. It's probably the result of
+ * the peer closing the connection and that is why libuv translates it to
+ * ECONNRESET. Previously, libuv retried until the EPROTOTYPE error went
+ * away but some VPN software causes the same behavior except the error is
+ * permanent, not transient, turning the retry mechanism into an infinite
+ * loop. See https://github.com/libuv/libuv/pull/482.
+ */
+ if (errno == EPROTOTYPE)
+ return UV_ECONNRESET;
+#endif /* __APPLE__ */
+
+ return UV__ERR(errno);
+}
+
+static void uv__write(uv_stream_t* stream) {
+ QUEUE* q;
+ uv_write_t* req;
+ ssize_t n;
+
+ assert(uv__stream_fd(stream) >= 0);
+
+ for (;;) {
+ if (QUEUE_EMPTY(&stream->write_queue))
+ return;
+
+ q = QUEUE_HEAD(&stream->write_queue);
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ assert(req->handle == stream);
+
+ n = uv__try_write(stream,
+ &(req->bufs[req->write_index]),
+ req->nbufs - req->write_index,
+ req->send_handle);
+
+ /* Ensure the handle isn't sent again in case this is a partial write. */
+ if (n >= 0) {
+ req->send_handle = NULL;
+ if (uv__write_req_update(stream, req, n)) {
+ uv__write_req_finish(req);
+ return; /* TODO(bnoordhuis) Start trying to write the next request. */
+ }
+ } else if (n != UV_EAGAIN)
+ break;
+
+ /* If this is a blocking stream, try again. */
+ if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
+ continue;
+
+ /* We're not done. */
+ uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+
+ /* Notify select() thread about state change */
+ uv__stream_osx_interrupt_select(stream);
+
+ return;
+ }
+
+ req->error = n;
+ uv__write_req_finish(req);
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+}
+
+
+static void uv__write_callbacks(uv_stream_t* stream) {
+ uv_write_t* req;
+ QUEUE* q;
+ QUEUE pq;
+
+ if (QUEUE_EMPTY(&stream->write_completed_queue))
+ return;
+
+ QUEUE_MOVE(&stream->write_completed_queue, &pq);
+
+ while (!QUEUE_EMPTY(&pq)) {
+ /* Pop a req off write_completed_queue. */
+ q = QUEUE_HEAD(&pq);
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ QUEUE_REMOVE(q);
+ uv__req_unregister(stream->loop, req);
+
+ if (req->bufs != NULL) {
+ stream->write_queue_size -= uv__write_req_size(req);
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+ }
+
+ /* NOTE: call callback AFTER freeing the request data. */
+ if (req->cb)
+ req->cb(req, req->error);
+ }
+}
+
+
+static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
+ stream->flags |= UV_HANDLE_READ_EOF;
+ stream->flags &= ~UV_HANDLE_READING;
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+ stream->read_cb(stream, UV_EOF, buf);
+}
+
+
+static int uv__stream_queue_fd(uv_stream_t* stream, int fd) {
+ uv__stream_queued_fds_t* queued_fds;
+ unsigned int queue_size;
+
+ queued_fds = stream->queued_fds;
+ if (queued_fds == NULL) {
+ queue_size = 8;
+ queued_fds = uv__malloc((queue_size - 1) * sizeof(*queued_fds->fds) +
+ sizeof(*queued_fds));
+ if (queued_fds == NULL)
+ return UV_ENOMEM;
+ queued_fds->size = queue_size;
+ queued_fds->offset = 0;
+ stream->queued_fds = queued_fds;
+
+ /* Grow */
+ } else if (queued_fds->size == queued_fds->offset) {
+ queue_size = queued_fds->size + 8;
+ queued_fds = uv__realloc(queued_fds,
+ (queue_size - 1) * sizeof(*queued_fds->fds) +
+ sizeof(*queued_fds));
+
+ /*
+ * Allocation failure, report back.
+ * NOTE: if it is fatal - sockets will be closed in uv__stream_close
+ */
+ if (queued_fds == NULL)
+ return UV_ENOMEM;
+ queued_fds->size = queue_size;
+ stream->queued_fds = queued_fds;
+ }
+
+ /* Put fd in a queue */
+ queued_fds->fds[queued_fds->offset++] = fd;
+
+ return 0;
+}
+
+
+#if defined(__PASE__)
+/* on IBMi PASE the control message length can not exceed 256. */
+# define UV__CMSG_FD_COUNT 60
+#else
+# define UV__CMSG_FD_COUNT 64
+#endif
+#define UV__CMSG_FD_SIZE (UV__CMSG_FD_COUNT * sizeof(int))
+
+
+static int uv__stream_recv_cmsg(uv_stream_t* stream, struct msghdr* msg) {
+ struct cmsghdr* cmsg;
+
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ char* start;
+ char* end;
+ int err;
+ void* pv;
+ int* pi;
+ unsigned int i;
+ unsigned int count;
+
+ if (cmsg->cmsg_type != SCM_RIGHTS) {
+ fprintf(stderr, "ignoring non-SCM_RIGHTS ancillary data: %d\n",
+ cmsg->cmsg_type);
+ continue;
+ }
+
+ /* silence aliasing warning */
+ pv = CMSG_DATA(cmsg);
+ pi = pv;
+
+ /* Count available fds */
+ start = (char*) cmsg;
+ end = (char*) cmsg + cmsg->cmsg_len;
+ count = 0;
+ while (start + CMSG_LEN(count * sizeof(*pi)) < end)
+ count++;
+ assert(start + CMSG_LEN(count * sizeof(*pi)) == end);
+
+ for (i = 0; i < count; i++) {
+ /* Already has accepted fd, queue now */
+ if (stream->accepted_fd != -1) {
+ err = uv__stream_queue_fd(stream, pi[i]);
+ if (err != 0) {
+ /* Close rest */
+ for (; i < count; i++)
+ uv__close(pi[i]);
+ return err;
+ }
+ } else {
+ stream->accepted_fd = pi[i];
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wgnu-folding-constant"
+# pragma clang diagnostic ignored "-Wvla-extension"
+#endif
+
+static void uv__read(uv_stream_t* stream) {
+ uv_buf_t buf;
+ ssize_t nread;
+ struct msghdr msg;
+ char cmsg_space[CMSG_SPACE(UV__CMSG_FD_SIZE)];
+ int count;
+ int err;
+ int is_ipc;
+
+ stream->flags &= ~UV_HANDLE_READ_PARTIAL;
+
+ /* Prevent loop starvation when the data comes in as fast as (or faster than)
+ * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
+ */
+ count = 32;
+
+ is_ipc = stream->type == UV_NAMED_PIPE && ((uv_pipe_t*) stream)->ipc;
+
+ /* XXX: Maybe instead of having UV_HANDLE_READING we just test if
+ * tcp->read_cb is NULL or not?
+ */
+ while (stream->read_cb
+ && (stream->flags & UV_HANDLE_READING)
+ && (count-- > 0)) {
+ assert(stream->alloc_cb != NULL);
+
+ buf = uv_buf_init(NULL, 0);
+ stream->alloc_cb((uv_handle_t*)stream, 64 * 1024, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ /* User indicates it can't or won't handle the read. */
+ stream->read_cb(stream, UV_ENOBUFS, &buf);
+ return;
+ }
+
+ assert(buf.base != NULL);
+ assert(uv__stream_fd(stream) >= 0);
+
+ if (!is_ipc) {
+ do {
+ nread = read(uv__stream_fd(stream), buf.base, buf.len);
+ }
+ while (nread < 0 && errno == EINTR);
+ } else {
+ /* ipc uses recvmsg */
+ msg.msg_flags = 0;
+ msg.msg_iov = (struct iovec*) &buf;
+ msg.msg_iovlen = 1;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ /* Set up to receive a descriptor even if one isn't in the message */
+ msg.msg_controllen = sizeof(cmsg_space);
+ msg.msg_control = cmsg_space;
+
+ do {
+ nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
+ }
+ while (nread < 0 && errno == EINTR);
+ }
+
+ if (nread < 0) {
+ /* Error */
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ /* Wait for the next one. */
+ if (stream->flags & UV_HANDLE_READING) {
+ uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+ uv__stream_osx_interrupt_select(stream);
+ }
+ stream->read_cb(stream, 0, &buf);
+#if defined(__CYGWIN__) || defined(__MSYS__)
+ } else if (errno == ECONNRESET && stream->type == UV_NAMED_PIPE) {
+ uv__stream_eof(stream, &buf);
+ return;
+#endif
+ } else {
+ /* Error. User should call uv_close(). */
+ stream->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ stream->read_cb(stream, UV__ERR(errno), &buf);
+ if (stream->flags & UV_HANDLE_READING) {
+ stream->flags &= ~UV_HANDLE_READING;
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+ }
+ }
+ return;
+ } else if (nread == 0) {
+ uv__stream_eof(stream, &buf);
+ return;
+ } else {
+ /* Successful read */
+ ssize_t buflen = buf.len;
+
+ if (is_ipc) {
+ err = uv__stream_recv_cmsg(stream, &msg);
+ if (err != 0) {
+ stream->read_cb(stream, err, &buf);
+ return;
+ }
+ }
+
+#if defined(__MVS__)
+ if (is_ipc && msg.msg_controllen > 0) {
+ uv_buf_t blankbuf;
+ int nread;
+ struct iovec *old;
+
+ blankbuf.base = 0;
+ blankbuf.len = 0;
+ old = msg.msg_iov;
+ msg.msg_iov = (struct iovec*) &blankbuf;
+ nread = 0;
+ do {
+ nread = uv__recvmsg(uv__stream_fd(stream), &msg, 0);
+ err = uv__stream_recv_cmsg(stream, &msg);
+ if (err != 0) {
+ stream->read_cb(stream, err, &buf);
+ msg.msg_iov = old;
+ return;
+ }
+ } while (nread == 0 && msg.msg_controllen > 0);
+ msg.msg_iov = old;
+ }
+#endif
+ stream->read_cb(stream, nread, &buf);
+
+ /* Return if we didn't fill the buffer, there is no more data to read. */
+ if (nread < buflen) {
+ stream->flags |= UV_HANDLE_READ_PARTIAL;
+ return;
+ }
+ }
+ }
+}
+
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+
+#undef UV__CMSG_FD_COUNT
+#undef UV__CMSG_FD_SIZE
+
+
+int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
+ assert(stream->type == UV_TCP ||
+ stream->type == UV_TTY ||
+ stream->type == UV_NAMED_PIPE);
+
+ if (!(stream->flags & UV_HANDLE_WRITABLE) ||
+ stream->flags & UV_HANDLE_SHUT ||
+ stream->flags & UV_HANDLE_SHUTTING ||
+ uv__is_closing(stream)) {
+ return UV_ENOTCONN;
+ }
+
+ assert(uv__stream_fd(stream) >= 0);
+
+ /* Initialize request. The `shutdown(2)` call will always be deferred until
+ * `uv__drain`, just before the callback is run. */
+ uv__req_init(stream->loop, req, UV_SHUTDOWN);
+ req->handle = stream;
+ req->cb = cb;
+ stream->shutdown_req = req;
+ stream->flags |= UV_HANDLE_SHUTTING;
+ stream->flags &= ~UV_HANDLE_WRITABLE;
+
+ if (QUEUE_EMPTY(&stream->write_queue))
+ uv__io_feed(stream->loop, &stream->io_watcher);
+
+ return 0;
+}
+
+
+static void uv__stream_io(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
+ uv_stream_t* stream;
+
+ stream = container_of(w, uv_stream_t, io_watcher);
+
+ assert(stream->type == UV_TCP ||
+ stream->type == UV_NAMED_PIPE ||
+ stream->type == UV_TTY);
+ assert(!(stream->flags & UV_HANDLE_CLOSING));
+
+ if (stream->connect_req) {
+ uv__stream_connect(stream);
+ return;
+ }
+
+ assert(uv__stream_fd(stream) >= 0);
+
+ /* Ignore POLLHUP here. Even if it's set, there may still be data to read. */
+ if (events & (POLLIN | POLLERR | POLLHUP))
+ uv__read(stream);
+
+ if (uv__stream_fd(stream) == -1)
+ return; /* read_cb closed stream. */
+
+ /* Short-circuit iff POLLHUP is set, the user is still interested in read
+ * events and uv__read() reported a partial read but not EOF. If the EOF
+ * flag is set, uv__read() called read_cb with err=UV_EOF and we don't
+ * have to do anything. If the partial read flag is not set, we can't
+ * report the EOF yet because there is still data to read.
+ */
+ if ((events & POLLHUP) &&
+ (stream->flags & UV_HANDLE_READING) &&
+ (stream->flags & UV_HANDLE_READ_PARTIAL) &&
+ !(stream->flags & UV_HANDLE_READ_EOF)) {
+ uv_buf_t buf = { NULL, 0 };
+ uv__stream_eof(stream, &buf);
+ }
+
+ if (uv__stream_fd(stream) == -1)
+ return; /* read_cb closed stream. */
+
+ if (events & (POLLOUT | POLLERR | POLLHUP)) {
+ uv__write(stream);
+ uv__write_callbacks(stream);
+
+ /* Write queue drained. */
+ if (QUEUE_EMPTY(&stream->write_queue))
+ uv__drain(stream);
+ }
+}
+
+
+/**
+ * We get called here from directly following a call to connect(2).
+ * In order to determine if we've errored out or succeeded must call
+ * getsockopt.
+ */
+static void uv__stream_connect(uv_stream_t* stream) {
+ int error;
+ uv_connect_t* req = stream->connect_req;
+ socklen_t errorsize = sizeof(int);
+
+ assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE);
+ assert(req);
+
+ if (stream->delayed_error) {
+ /* To smooth over the differences between unixes errors that
+ * were reported synchronously on the first connect can be delayed
+ * until the next tick--which is now.
+ */
+ error = stream->delayed_error;
+ stream->delayed_error = 0;
+ } else {
+ /* Normal situation: we need to get the socket error from the kernel. */
+ assert(uv__stream_fd(stream) >= 0);
+ getsockopt(uv__stream_fd(stream),
+ SOL_SOCKET,
+ SO_ERROR,
+ &error,
+ &errorsize);
+ error = UV__ERR(error);
+ }
+
+ if (error == UV__ERR(EINPROGRESS))
+ return;
+
+ stream->connect_req = NULL;
+ uv__req_unregister(stream->loop, req);
+
+ if (error < 0 || QUEUE_EMPTY(&stream->write_queue)) {
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
+ }
+
+ if (req->cb)
+ req->cb(req, error);
+
+ if (uv__stream_fd(stream) == -1)
+ return;
+
+ if (error < 0) {
+ uv__stream_flush_write_queue(stream, UV_ECANCELED);
+ uv__write_callbacks(stream);
+ }
+}
+
+
+static int uv__check_before_write(uv_stream_t* stream,
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
+ assert(nbufs > 0);
+ assert((stream->type == UV_TCP ||
+ stream->type == UV_NAMED_PIPE ||
+ stream->type == UV_TTY) &&
+ "uv_write (unix) does not yet support other types of streams");
+
+ if (uv__stream_fd(stream) < 0)
+ return UV_EBADF;
+
+ if (!(stream->flags & UV_HANDLE_WRITABLE))
+ return UV_EPIPE;
+
+ if (send_handle != NULL) {
+ if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc)
+ return UV_EINVAL;
+
+ /* XXX We abuse uv_write2() to send over UDP handles to child processes.
+ * Don't call uv__stream_fd() on those handles, it's a macro that on OS X
+ * evaluates to a function that operates on a uv_stream_t with a couple of
+ * OS X specific fields. On other Unices it does (handle)->io_watcher.fd,
+ * which works but only by accident.
+ */
+ if (uv__handle_fd((uv_handle_t*) send_handle) < 0)
+ return UV_EBADF;
+
+#if defined(__CYGWIN__) || defined(__MSYS__)
+ /* Cygwin recvmsg always sets msg_controllen to zero, so we cannot send it.
+ See https://github.com/mirror/newlib-cygwin/blob/86fc4bf0/winsup/cygwin/fhandler_socket.cc#L1736-L1743 */
+ return UV_ENOSYS;
+#endif
+ }
+
+ return 0;
+}
+
+int uv_write2(uv_write_t* req,
+ uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb) {
+ int empty_queue;
+ int err;
+
+ err = uv__check_before_write(stream, nbufs, send_handle);
+ if (err < 0)
+ return err;
+
+ /* It's legal for write_queue_size > 0 even when the write_queue is empty;
+ * it means there are error-state requests in the write_completed_queue that
+ * will touch up write_queue_size later, see also uv__write_req_finish().
+ * We could check that write_queue is empty instead but that implies making
+ * a write() syscall when we know that the handle is in error mode.
+ */
+ empty_queue = (stream->write_queue_size == 0);
+
+ /* Initialize the req */
+ uv__req_init(stream->loop, req, UV_WRITE);
+ req->cb = cb;
+ req->handle = stream;
+ req->error = 0;
+ req->send_handle = send_handle;
+ QUEUE_INIT(&req->queue);
+
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
+
+ if (req->bufs == NULL)
+ return UV_ENOMEM;
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
+ req->nbufs = nbufs;
+ req->write_index = 0;
+ stream->write_queue_size += uv__count_bufs(bufs, nbufs);
+
+ /* Append the request to write_queue. */
+ QUEUE_INSERT_TAIL(&stream->write_queue, &req->queue);
+
+ /* If the queue was empty when this function began, we should attempt to
+ * do the write immediately. Otherwise start the write_watcher and wait
+ * for the fd to become writable.
+ */
+ if (stream->connect_req) {
+ /* Still connecting, do nothing. */
+ }
+ else if (empty_queue) {
+ uv__write(stream);
+ }
+ else {
+ /*
+ * blocking streams should never have anything in the queue.
+ * if this assert fires then somehow the blocking stream isn't being
+ * sufficiently flushed in uv__write.
+ */
+ assert(!(stream->flags & UV_HANDLE_BLOCKING_WRITES));
+ uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+ uv__stream_osx_interrupt_select(stream);
+ }
+
+ return 0;
+}
+
+
+/* The buffers to be written must remain valid until the callback is called.
+ * This is not required for the uv_buf_t array.
+ */
+int uv_write(uv_write_t* req,
+ uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_write_cb cb) {
+ return uv_write2(req, handle, bufs, nbufs, NULL, cb);
+}
+
+
+int uv_try_write(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs) {
+ return uv_try_write2(stream, bufs, nbufs, NULL);
+}
+
+
+int uv_try_write2(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
+ int err;
+
+ /* Connecting or already writing some data */
+ if (stream->connect_req != NULL || stream->write_queue_size != 0)
+ return UV_EAGAIN;
+
+ err = uv__check_before_write(stream, nbufs, NULL);
+ if (err < 0)
+ return err;
+
+ return uv__try_write(stream, bufs, nbufs, send_handle);
+}
+
+
+int uv__read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
+ stream->type == UV_TTY);
+
+ /* The UV_HANDLE_READING flag is irrelevant of the state of the stream - it
+ * just expresses the desired state of the user. */
+ stream->flags |= UV_HANDLE_READING;
+ stream->flags &= ~UV_HANDLE_READ_EOF;
+
+ /* TODO: try to do the read inline? */
+ assert(uv__stream_fd(stream) >= 0);
+ assert(alloc_cb);
+
+ stream->read_cb = read_cb;
+ stream->alloc_cb = alloc_cb;
+
+ uv__io_start(stream->loop, &stream->io_watcher, POLLIN);
+ uv__handle_start(stream);
+ uv__stream_osx_interrupt_select(stream);
+
+ return 0;
+}
+
+
+int uv_read_stop(uv_stream_t* stream) {
+ if (!(stream->flags & UV_HANDLE_READING))
+ return 0;
+
+ stream->flags &= ~UV_HANDLE_READING;
+ uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
+ uv__handle_stop(stream);
+ uv__stream_osx_interrupt_select(stream);
+
+ stream->read_cb = NULL;
+ stream->alloc_cb = NULL;
+ return 0;
+}
+
+
+int uv_is_readable(const uv_stream_t* stream) {
+ return !!(stream->flags & UV_HANDLE_READABLE);
+}
+
+
+int uv_is_writable(const uv_stream_t* stream) {
+ return !!(stream->flags & UV_HANDLE_WRITABLE);
+}
+
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+int uv___stream_fd(const uv_stream_t* handle) {
+ const uv__stream_select_t* s;
+
+ assert(handle->type == UV_TCP ||
+ handle->type == UV_TTY ||
+ handle->type == UV_NAMED_PIPE);
+
+ s = handle->select;
+ if (s != NULL)
+ return s->fd;
+
+ return handle->io_watcher.fd;
+}
+#endif /* defined(__APPLE__) */
+
+
+void uv__stream_close(uv_stream_t* handle) {
+ unsigned int i;
+ uv__stream_queued_fds_t* queued_fds;
+
+#if defined(__APPLE__) && !defined(CMAKE_BOOTSTRAP)
+ /* Terminate select loop first */
+ if (handle->select != NULL) {
+ uv__stream_select_t* s;
+
+ s = handle->select;
+
+ uv_sem_post(&s->close_sem);
+ uv_sem_post(&s->async_sem);
+ uv__stream_osx_interrupt_select(handle);
+ uv_thread_join(&s->thread);
+ uv_sem_destroy(&s->close_sem);
+ uv_sem_destroy(&s->async_sem);
+ uv__close(s->fake_fd);
+ uv__close(s->int_fd);
+ uv_close((uv_handle_t*) &s->async, uv__stream_osx_cb_close);
+
+ handle->select = NULL;
+ }
+#endif /* defined(__APPLE__) */
+
+ uv__io_close(handle->loop, &handle->io_watcher);
+ uv_read_stop(handle);
+ uv__handle_stop(handle);
+ handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+
+ if (handle->io_watcher.fd != -1) {
+ /* Don't close stdio file descriptors. Nothing good comes from it. */
+ if (handle->io_watcher.fd > STDERR_FILENO)
+ uv__close(handle->io_watcher.fd);
+ handle->io_watcher.fd = -1;
+ }
+
+ if (handle->accepted_fd != -1) {
+ uv__close(handle->accepted_fd);
+ handle->accepted_fd = -1;
+ }
+
+ /* Close all queued fds */
+ if (handle->queued_fds != NULL) {
+ queued_fds = handle->queued_fds;
+ for (i = 0; i < queued_fds->offset; i++)
+ uv__close(queued_fds->fds[i]);
+ uv__free(handle->queued_fds);
+ handle->queued_fds = NULL;
+ }
+
+ assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
+}
+
+
+int uv_stream_set_blocking(uv_stream_t* handle, int blocking) {
+ /* Don't need to check the file descriptor, uv__nonblock()
+ * will fail with EBADF if it's not valid.
+ */
+ return uv__nonblock(uv__stream_fd(handle), !blocking);
+}
diff --git a/Utilities/cmlibuv/src/unix/sunos.c b/Utilities/cmlibuv/src/unix/sunos.c
new file mode 100644
index 0000000000..85e4d60253
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/sunos.c
@@ -0,0 +1,904 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#if !defined(SUNOS_NO_IFADDRS) && _XOPEN_SOURCE < 600
+#define SUNOS_NO_IFADDRS
+#endif
+
+#ifndef SUNOS_NO_IFADDRS
+# include <ifaddrs.h>
+#endif
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_arp.h>
+#include <sys/sockio.h>
+
+#include <sys/loadavg.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <kstat.h>
+#include <fcntl.h>
+
+#include <sys/port.h>
+#include <port.h>
+
+#define PORT_FIRED 0x69
+#define PORT_UNUSED 0x0
+#define PORT_LOADED 0x99
+#define PORT_DELETED -1
+
+#if (!defined(_LP64)) && (_FILE_OFFSET_BITS - 0 == 64)
+#define PROCFS_FILE_OFFSET_BITS_HACK 1
+#undef _FILE_OFFSET_BITS
+#else
+#define PROCFS_FILE_OFFSET_BITS_HACK 0
+#endif
+
+#include <procfs.h>
+
+#if (PROCFS_FILE_OFFSET_BITS_HACK - 0 == 1)
+#define _FILE_OFFSET_BITS 64
+#endif
+
+
+int uv__platform_loop_init(uv_loop_t* loop) {
+ int err;
+ int fd;
+
+ loop->fs_fd = -1;
+ loop->backend_fd = -1;
+
+ fd = port_create();
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ err = uv__cloexec(fd, 1);
+ if (err) {
+ uv__close(fd);
+ return err;
+ }
+ loop->backend_fd = fd;
+
+ return 0;
+}
+
+
+void uv__platform_loop_delete(uv_loop_t* loop) {
+ if (loop->fs_fd != -1) {
+ uv__close(loop->fs_fd);
+ loop->fs_fd = -1;
+ }
+
+ if (loop->backend_fd != -1) {
+ uv__close(loop->backend_fd);
+ loop->backend_fd = -1;
+ }
+}
+
+
+int uv__io_fork(uv_loop_t* loop) {
+#if defined(PORT_SOURCE_FILE)
+ if (loop->fs_fd != -1) {
+ /* stop the watcher before we blow away its fileno */
+ uv__io_stop(loop, &loop->fs_event_watcher, POLLIN);
+ }
+#endif
+ uv__platform_loop_delete(loop);
+ return uv__platform_loop_init(loop);
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct port_event* events;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct port_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events == NULL)
+ return;
+
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if ((int) events[i].portev_object == fd)
+ events[i].portev_object = -1;
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ if (port_associate(loop->backend_fd, PORT_SOURCE_FD, fd, POLLIN, 0))
+ return UV__ERR(errno);
+
+ if (port_dissociate(loop->backend_fd, PORT_SOURCE_FD, fd)) {
+ perror("(libuv) port_dissociate()");
+ abort();
+ }
+
+ return 0;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ struct port_event events[1024];
+ struct port_event* pe;
+ struct timespec spec;
+ QUEUE* q;
+ uv__io_t* w;
+ sigset_t* pset;
+ sigset_t set;
+ uint64_t base;
+ uint64_t diff;
+ unsigned int nfds;
+ unsigned int i;
+ int saved_errno;
+ int have_signals;
+ int nevents;
+ int count;
+ int err;
+ int fd;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+
+ if (port_associate(loop->backend_fd,
+ PORT_SOURCE_FD,
+ w->fd,
+ w->pevents,
+ 0)) {
+ perror("(libuv) port_associate()");
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ pset = NULL;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ pset = &set;
+ sigemptyset(pset);
+ sigaddset(pset, SIGPROF);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ }
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ if (timeout != -1) {
+ spec.tv_sec = timeout / 1000;
+ spec.tv_nsec = (timeout % 1000) * 1000000;
+ }
+
+ /* Work around a kernel bug where nfds is not updated. */
+ events[0].portev_source = 0;
+
+ nfds = 1;
+ saved_errno = 0;
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_BLOCK, pset, NULL);
+
+ err = port_getn(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ &nfds,
+ timeout == -1 ? NULL : &spec);
+
+ if (pset != NULL)
+ pthread_sigmask(SIG_UNBLOCK, pset, NULL);
+
+ if (err) {
+ /* Work around another kernel bug: port_getn() may return events even
+ * on error.
+ */
+ if (errno == EINTR || errno == ETIME) {
+ saved_errno = errno;
+ } else {
+ perror("(libuv) port_getn()");
+ abort();
+ }
+ }
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (events[0].portev_source == 0) {
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+ goto update_timeout;
+ }
+
+ if (nfds == 0) {
+ assert(timeout != -1);
+ return;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = (void*) events;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->portev_object;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ /* File descriptor that we've stopped watching, ignore. */
+ if (w == NULL)
+ continue;
+
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->portev_events);
+ }
+
+ nevents++;
+
+ if (w != loop->watchers[fd])
+ continue; /* Disabled by callback. */
+
+ /* Events Ports operates in oneshot mode, rearm timer on next run. */
+ if (w->pevents != 0 && QUEUE_EMPTY(&w->watcher_queue))
+ QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (saved_errno == ETIME) {
+ assert(timeout != -1);
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ diff = loop->time - base;
+ if (diff >= (uint64_t) timeout)
+ return;
+
+ timeout -= diff;
+ }
+}
+
+
+uint64_t uv__hrtime(uv_clocktype_t type) {
+ return gethrtime();
+}
+
+
+/*
+ * We could use a static buffer for the path manipulations that we need outside
+ * of the function, but this function could be called by multiple consumers and
+ * we don't want to potentially create a race condition in the use of snprintf.
+ */
+int uv_exepath(char* buffer, size_t* size) {
+ ssize_t res;
+ char buf[128];
+
+ if (buffer == NULL || size == NULL || *size == 0)
+ return UV_EINVAL;
+
+ snprintf(buf, sizeof(buf), "/proc/%lu/path/a.out", (unsigned long) getpid());
+
+ res = *size - 1;
+ if (res > 0)
+ res = readlink(buf, buffer, res);
+
+ if (res == -1)
+ return UV__ERR(errno);
+
+ buffer[res] = '\0';
+ *size = res;
+ return 0;
+}
+
+
+uint64_t uv_get_free_memory(void) {
+ return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_AVPHYS_PAGES);
+}
+
+
+uint64_t uv_get_total_memory(void) {
+ return (uint64_t) sysconf(_SC_PAGESIZE) * sysconf(_SC_PHYS_PAGES);
+}
+
+
+uint64_t uv_get_constrained_memory(void) {
+ return 0; /* Memory constraints are unknown. */
+}
+
+
+void uv_loadavg(double avg[3]) {
+ (void) getloadavg(avg, 3);
+}
+
+
+#if defined(PORT_SOURCE_FILE)
+
+static int uv__fs_event_rearm(uv_fs_event_t *handle) {
+ if (handle->fd == PORT_DELETED)
+ return UV_EBADF;
+
+ if (port_associate(handle->loop->fs_fd,
+ PORT_SOURCE_FILE,
+ (uintptr_t) &handle->fo,
+ FILE_ATTRIB | FILE_MODIFIED,
+ handle) == -1) {
+ return UV__ERR(errno);
+ }
+ handle->fd = PORT_LOADED;
+
+ return 0;
+}
+
+
+static void uv__fs_event_read(uv_loop_t* loop,
+ uv__io_t* w,
+ unsigned int revents) {
+ uv_fs_event_t *handle = NULL;
+ timespec_t timeout;
+ port_event_t pe;
+ int events;
+ int r;
+
+ (void) w;
+ (void) revents;
+
+ do {
+ uint_t n = 1;
+
+ /*
+ * Note that our use of port_getn() here (and not port_get()) is deliberate:
+ * there is a bug in event ports (Sun bug 6456558) whereby a zeroed timeout
+ * causes port_get() to return success instead of ETIME when there aren't
+ * actually any events (!); by using port_getn() in lieu of port_get(),
+ * we can at least workaround the bug by checking for zero returned events
+ * and treating it as we would ETIME.
+ */
+ do {
+ memset(&timeout, 0, sizeof timeout);
+ r = port_getn(loop->fs_fd, &pe, 1, &n, &timeout);
+ }
+ while (r == -1 && errno == EINTR);
+
+ if ((r == -1 && errno == ETIME) || n == 0)
+ break;
+
+ handle = (uv_fs_event_t*) pe.portev_user;
+ assert((r == 0) && "unexpected port_get() error");
+
+ if (uv__is_closing(handle)) {
+ uv__handle_stop(handle);
+ uv__make_close_pending((uv_handle_t*) handle);
+ break;
+ }
+
+ events = 0;
+ if (pe.portev_events & (FILE_ATTRIB | FILE_MODIFIED))
+ events |= UV_CHANGE;
+ if (pe.portev_events & ~(FILE_ATTRIB | FILE_MODIFIED))
+ events |= UV_RENAME;
+ assert(events != 0);
+ handle->fd = PORT_FIRED;
+ handle->cb(handle, NULL, events, 0);
+
+ if (handle->fd != PORT_DELETED) {
+ r = uv__fs_event_rearm(handle);
+ if (r != 0)
+ handle->cb(handle, NULL, 0, r);
+ }
+ }
+ while (handle->fd != PORT_DELETED);
+}
+
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
+ return 0;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* path,
+ unsigned int flags) {
+ int portfd;
+ int first_run;
+ int err;
+
+ if (uv__is_active(handle))
+ return UV_EINVAL;
+
+ first_run = 0;
+ if (handle->loop->fs_fd == -1) {
+ portfd = port_create();
+ if (portfd == -1)
+ return UV__ERR(errno);
+ handle->loop->fs_fd = portfd;
+ first_run = 1;
+ }
+
+ uv__handle_start(handle);
+ handle->path = uv__strdup(path);
+ handle->fd = PORT_UNUSED;
+ handle->cb = cb;
+
+ memset(&handle->fo, 0, sizeof handle->fo);
+ handle->fo.fo_name = handle->path;
+ err = uv__fs_event_rearm(handle);
+ if (err != 0) {
+ uv_fs_event_stop(handle);
+ return err;
+ }
+
+ if (first_run) {
+ uv__io_init(&handle->loop->fs_event_watcher, uv__fs_event_read, portfd);
+ uv__io_start(handle->loop, &handle->loop->fs_event_watcher, POLLIN);
+ }
+
+ return 0;
+}
+
+
+static int uv__fs_event_stop(uv_fs_event_t* handle) {
+ int ret = 0;
+
+ if (!uv__is_active(handle))
+ return 0;
+
+ if (handle->fd == PORT_LOADED) {
+ ret = port_dissociate(handle->loop->fs_fd,
+ PORT_SOURCE_FILE,
+ (uintptr_t) &handle->fo);
+ }
+
+ handle->fd = PORT_DELETED;
+ uv__free(handle->path);
+ handle->path = NULL;
+ handle->fo.fo_name = NULL;
+ if (ret == 0)
+ uv__handle_stop(handle);
+
+ return ret;
+}
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ (void) uv__fs_event_stop(handle);
+ return 0;
+}
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ /*
+ * If we were unable to dissociate the port here, then it is most likely
+ * that there is a pending queued event. When this happens, we don't want
+ * to complete the close as it will free the underlying memory for the
+ * handle, causing a use-after-free problem when the event is processed.
+ * We defer the final cleanup until after the event is consumed in
+ * uv__fs_event_read().
+ */
+ if (uv__fs_event_stop(handle) == 0)
+ uv__make_close_pending((uv_handle_t*) handle);
+}
+
+#else /* !defined(PORT_SOURCE_FILE) */
+
+int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+
+int uv_fs_event_start(uv_fs_event_t* handle,
+ uv_fs_event_cb cb,
+ const char* filename,
+ unsigned int flags) {
+ return UV_ENOSYS;
+}
+
+
+int uv_fs_event_stop(uv_fs_event_t* handle) {
+ return UV_ENOSYS;
+}
+
+
+void uv__fs_event_close(uv_fs_event_t* handle) {
+ UNREACHABLE();
+}
+
+#endif /* defined(PORT_SOURCE_FILE) */
+
+
+int uv_resident_set_memory(size_t* rss) {
+ psinfo_t psinfo;
+ int err;
+ int fd;
+
+ fd = open("/proc/self/psinfo", O_RDONLY);
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ /* FIXME(bnoordhuis) Handle EINTR. */
+ err = UV_EINVAL;
+ if (read(fd, &psinfo, sizeof(psinfo)) == sizeof(psinfo)) {
+ *rss = (size_t)psinfo.pr_rssize * 1024;
+ err = 0;
+ }
+ uv__close(fd);
+
+ return err;
+}
+
+
+int uv_uptime(double* uptime) {
+ kstat_ctl_t *kc;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+
+ long hz = sysconf(_SC_CLK_TCK);
+
+ kc = kstat_open();
+ if (kc == NULL)
+ return UV_EPERM;
+
+ ksp = kstat_lookup(kc, (char*) "unix", 0, (char*) "system_misc");
+ if (kstat_read(kc, ksp, NULL) == -1) {
+ *uptime = -1;
+ } else {
+ knp = (kstat_named_t*) kstat_data_lookup(ksp, (char*) "clk_intr");
+ *uptime = knp->value.ul / hz;
+ }
+ kstat_close(kc);
+
+ return 0;
+}
+
+
+int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
+ int lookup_instance;
+ kstat_ctl_t *kc;
+ kstat_t *ksp;
+ kstat_named_t *knp;
+ uv_cpu_info_t* cpu_info;
+
+ kc = kstat_open();
+ if (kc == NULL)
+ return UV_EPERM;
+
+ /* Get count of cpus */
+ lookup_instance = 0;
+ while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
+ lookup_instance++;
+ }
+
+ *cpu_infos = uv__malloc(lookup_instance * sizeof(**cpu_infos));
+ if (!(*cpu_infos)) {
+ kstat_close(kc);
+ return UV_ENOMEM;
+ }
+
+ *count = lookup_instance;
+
+ cpu_info = *cpu_infos;
+ lookup_instance = 0;
+ while ((ksp = kstat_lookup(kc, (char*) "cpu_info", lookup_instance, NULL))) {
+ if (kstat_read(kc, ksp, NULL) == -1) {
+ cpu_info->speed = 0;
+ cpu_info->model = NULL;
+ } else {
+ knp = kstat_data_lookup(ksp, (char*) "clock_MHz");
+ assert(knp->data_type == KSTAT_DATA_INT32 ||
+ knp->data_type == KSTAT_DATA_INT64);
+ cpu_info->speed = (knp->data_type == KSTAT_DATA_INT32) ? knp->value.i32
+ : knp->value.i64;
+
+ knp = kstat_data_lookup(ksp, (char*) "brand");
+ assert(knp->data_type == KSTAT_DATA_STRING);
+ cpu_info->model = uv__strdup(KSTAT_NAMED_STR_PTR(knp));
+ }
+
+ lookup_instance++;
+ cpu_info++;
+ }
+
+ cpu_info = *cpu_infos;
+ lookup_instance = 0;
+ for (;;) {
+ ksp = kstat_lookup(kc, (char*) "cpu", lookup_instance, (char*) "sys");
+
+ if (ksp == NULL)
+ break;
+
+ if (kstat_read(kc, ksp, NULL) == -1) {
+ cpu_info->cpu_times.user = 0;
+ cpu_info->cpu_times.nice = 0;
+ cpu_info->cpu_times.sys = 0;
+ cpu_info->cpu_times.idle = 0;
+ cpu_info->cpu_times.irq = 0;
+ } else {
+ knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_user");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.user = knp->value.ui64;
+
+ knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_kernel");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.sys = knp->value.ui64;
+
+ knp = kstat_data_lookup(ksp, (char*) "cpu_ticks_idle");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.idle = knp->value.ui64;
+
+ knp = kstat_data_lookup(ksp, (char*) "intr");
+ assert(knp->data_type == KSTAT_DATA_UINT64);
+ cpu_info->cpu_times.irq = knp->value.ui64;
+ cpu_info->cpu_times.nice = 0;
+ }
+
+ lookup_instance++;
+ cpu_info++;
+ }
+
+ kstat_close(kc);
+
+ return 0;
+}
+
+
+#ifdef SUNOS_NO_IFADDRS
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ *count = 0;
+ *addresses = NULL;
+ return UV_ENOSYS;
+}
+#else /* SUNOS_NO_IFADDRS */
+/*
+ * Inspired By:
+ * https://blogs.oracle.com/paulie/entry/retrieving_mac_address_in_solaris
+ * http://www.pauliesworld.org/project/getmac.c
+ */
+static int uv__set_phys_addr(uv_interface_address_t* address,
+ struct ifaddrs* ent) {
+
+ struct sockaddr_dl* sa_addr;
+ int sockfd;
+ size_t i;
+ struct arpreq arpreq;
+
+ /* This appears to only work as root */
+ sa_addr = (struct sockaddr_dl*)(ent->ifa_addr);
+ memcpy(address->phys_addr, LLADDR(sa_addr), sizeof(address->phys_addr));
+ for (i = 0; i < sizeof(address->phys_addr); i++) {
+ /* Check that all bytes of phys_addr are zero. */
+ if (address->phys_addr[i] != 0)
+ return 0;
+ }
+ memset(&arpreq, 0, sizeof(arpreq));
+ if (address->address.address4.sin_family == AF_INET) {
+ struct sockaddr_in* sin = ((struct sockaddr_in*)&arpreq.arp_pa);
+ sin->sin_addr.s_addr = address->address.address4.sin_addr.s_addr;
+ } else if (address->address.address4.sin_family == AF_INET6) {
+ struct sockaddr_in6* sin = ((struct sockaddr_in6*)&arpreq.arp_pa);
+ memcpy(sin->sin6_addr.s6_addr,
+ address->address.address6.sin6_addr.s6_addr,
+ sizeof(address->address.address6.sin6_addr.s6_addr));
+ } else {
+ return 0;
+ }
+
+ sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sockfd < 0)
+ return UV__ERR(errno);
+
+ if (ioctl(sockfd, SIOCGARP, (char*)&arpreq) == -1) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+ memcpy(address->phys_addr, arpreq.arp_ha.sa_data, sizeof(address->phys_addr));
+ uv__close(sockfd);
+ return 0;
+}
+
+
+static int uv__ifaddr_exclude(struct ifaddrs *ent) {
+ if (!((ent->ifa_flags & IFF_UP) && (ent->ifa_flags & IFF_RUNNING)))
+ return 1;
+ if (ent->ifa_addr == NULL)
+ return 1;
+ if (ent->ifa_addr->sa_family != AF_INET &&
+ ent->ifa_addr->sa_family != AF_INET6)
+ return 1;
+ return 0;
+}
+
+int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
+ uv_interface_address_t* address;
+ struct ifaddrs* addrs;
+ struct ifaddrs* ent;
+
+ *count = 0;
+ *addresses = NULL;
+
+ if (getifaddrs(&addrs))
+ return UV__ERR(errno);
+
+ /* Count the number of interfaces */
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent))
+ continue;
+ (*count)++;
+ }
+
+ if (*count == 0) {
+ freeifaddrs(addrs);
+ return 0;
+ }
+
+ *addresses = uv__malloc(*count * sizeof(**addresses));
+ if (!(*addresses)) {
+ freeifaddrs(addrs);
+ return UV_ENOMEM;
+ }
+
+ address = *addresses;
+
+ for (ent = addrs; ent != NULL; ent = ent->ifa_next) {
+ if (uv__ifaddr_exclude(ent))
+ continue;
+
+ address->name = uv__strdup(ent->ifa_name);
+
+ if (ent->ifa_addr->sa_family == AF_INET6) {
+ address->address.address6 = *((struct sockaddr_in6*) ent->ifa_addr);
+ } else {
+ address->address.address4 = *((struct sockaddr_in*) ent->ifa_addr);
+ }
+
+ if (ent->ifa_netmask->sa_family == AF_INET6) {
+ address->netmask.netmask6 = *((struct sockaddr_in6*) ent->ifa_netmask);
+ } else {
+ address->netmask.netmask4 = *((struct sockaddr_in*) ent->ifa_netmask);
+ }
+
+ address->is_internal = !!((ent->ifa_flags & IFF_PRIVATE) ||
+ (ent->ifa_flags & IFF_LOOPBACK));
+
+ uv__set_phys_addr(address, ent);
+ address++;
+ }
+
+ freeifaddrs(addrs);
+
+ return 0;
+}
+#endif /* SUNOS_NO_IFADDRS */
+
+void uv_free_interface_addresses(uv_interface_address_t* addresses,
+ int count) {
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uv__free(addresses[i].name);
+ }
+
+ uv__free(addresses);
+}
+
+
+#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
+size_t strnlen(const char* s, size_t maxlen) {
+ const char* end;
+ end = memchr(s, '\0', maxlen);
+ if (end == NULL)
+ return maxlen;
+ return end - s;
+}
+#endif
diff --git a/Utilities/cmlibuv/src/unix/sysinfo-loadavg.c b/Utilities/cmlibuv/src/unix/sysinfo-loadavg.c
new file mode 100644
index 0000000000..ebad0e89db
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/sysinfo-loadavg.c
@@ -0,0 +1,36 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <sys/sysinfo.h>
+
+void uv_loadavg(double avg[3]) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) < 0) return;
+
+ avg[0] = (double) info.loads[0] / 65536.0;
+ avg[1] = (double) info.loads[1] / 65536.0;
+ avg[2] = (double) info.loads[2] / 65536.0;
+}
diff --git a/Utilities/cmlibuv/src/unix/sysinfo-memory.c b/Utilities/cmlibuv/src/unix/sysinfo-memory.c
new file mode 100644
index 0000000000..23b4fc6e91
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/sysinfo-memory.c
@@ -0,0 +1,42 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdint.h>
+#include <sys/sysinfo.h>
+
+uint64_t uv_get_free_memory(void) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) == 0)
+ return (uint64_t) info.freeram * info.mem_unit;
+ return 0;
+}
+
+uint64_t uv_get_total_memory(void) {
+ struct sysinfo info;
+
+ if (sysinfo(&info) == 0)
+ return (uint64_t) info.totalram * info.mem_unit;
+ return 0;
+}
diff --git a/Utilities/cmlibuv/src/unix/tcp.c b/Utilities/cmlibuv/src/unix/tcp.c
new file mode 100644
index 0000000000..73fc657a86
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/tcp.c
@@ -0,0 +1,519 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+
+static int new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
+ struct sockaddr_storage saddr;
+ socklen_t slen;
+ int sockfd;
+ int err;
+
+ err = uv__socket(domain, SOCK_STREAM, 0);
+ if (err < 0)
+ return err;
+ sockfd = err;
+
+ err = uv__stream_open((uv_stream_t*) handle, sockfd, flags);
+ if (err) {
+ uv__close(sockfd);
+ return err;
+ }
+
+ if (flags & UV_HANDLE_BOUND) {
+ /* Bind this new socket to an arbitrary port */
+ slen = sizeof(saddr);
+ memset(&saddr, 0, sizeof(saddr));
+ if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen)) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+
+ if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen)) {
+ uv__close(sockfd);
+ return UV__ERR(errno);
+ }
+ }
+
+ return 0;
+}
+
+
+static int maybe_new_socket(uv_tcp_t* handle, int domain, unsigned long flags) {
+ struct sockaddr_storage saddr;
+ socklen_t slen;
+
+ if (domain == AF_UNSPEC) {
+ handle->flags |= flags;
+ return 0;
+ }
+
+ if (uv__stream_fd(handle) != -1) {
+
+ if (flags & UV_HANDLE_BOUND) {
+
+ if (handle->flags & UV_HANDLE_BOUND) {
+ /* It is already bound to a port. */
+ handle->flags |= flags;
+ return 0;
+ }
+
+ /* Query to see if tcp socket is bound. */
+ slen = sizeof(saddr);
+ memset(&saddr, 0, sizeof(saddr));
+ if (getsockname(uv__stream_fd(handle), (struct sockaddr*) &saddr, &slen))
+ return UV__ERR(errno);
+
+ if ((saddr.ss_family == AF_INET6 &&
+ ((struct sockaddr_in6*) &saddr)->sin6_port != 0) ||
+ (saddr.ss_family == AF_INET &&
+ ((struct sockaddr_in*) &saddr)->sin_port != 0)) {
+ /* Handle is already bound to a port. */
+ handle->flags |= flags;
+ return 0;
+ }
+
+ /* Bind to arbitrary port */
+ if (bind(uv__stream_fd(handle), (struct sockaddr*) &saddr, slen))
+ return UV__ERR(errno);
+ }
+
+ handle->flags |= flags;
+ return 0;
+ }
+
+ return new_socket(handle, domain, flags);
+}
+
+
+int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* tcp, unsigned int flags) {
+ int domain;
+
+ /* Use the lower 8 bits for the domain */
+ domain = flags & 0xFF;
+ if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
+ return UV_EINVAL;
+
+ if (flags & ~0xFF)
+ return UV_EINVAL;
+
+ uv__stream_init(loop, (uv_stream_t*)tcp, UV_TCP);
+
+ /* If anything fails beyond this point we need to remove the handle from
+ * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+ */
+
+ if (domain != AF_UNSPEC) {
+ int err = maybe_new_socket(tcp, domain, 0);
+ if (err) {
+ QUEUE_REMOVE(&tcp->handle_queue);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+
+int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* tcp) {
+ return uv_tcp_init_ex(loop, tcp, AF_UNSPEC);
+}
+
+
+int uv__tcp_bind(uv_tcp_t* tcp,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int err;
+ int on;
+
+ /* Cannot set IPv6-only mode on non-IPv6 socket. */
+ if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
+ return UV_EINVAL;
+
+ err = maybe_new_socket(tcp, addr->sa_family, 0);
+ if (err)
+ return err;
+
+ on = 1;
+ if (setsockopt(tcp->io_watcher.fd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)))
+ return UV__ERR(errno);
+
+#ifndef __OpenBSD__
+#ifdef IPV6_V6ONLY
+ if (addr->sa_family == AF_INET6) {
+ on = (flags & UV_TCP_IPV6ONLY) != 0;
+ if (setsockopt(tcp->io_watcher.fd,
+ IPPROTO_IPV6,
+ IPV6_V6ONLY,
+ &on,
+ sizeof on) == -1) {
+#if defined(__MVS__)
+ if (errno == EOPNOTSUPP)
+ return UV_EINVAL;
+#endif
+ return UV__ERR(errno);
+ }
+ }
+#endif
+#endif
+
+ errno = 0;
+ err = bind(tcp->io_watcher.fd, addr, addrlen);
+ if (err == -1 && errno != EADDRINUSE) {
+ if (errno == EAFNOSUPPORT)
+ /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
+ * socket created with AF_INET to an AF_INET6 address or vice versa. */
+ return UV_EINVAL;
+ return UV__ERR(errno);
+ }
+ tcp->delayed_error = (err == -1) ? UV__ERR(errno) : 0;
+
+ tcp->flags |= UV_HANDLE_BOUND;
+ if (addr->sa_family == AF_INET6)
+ tcp->flags |= UV_HANDLE_IPV6;
+
+ return 0;
+}
+
+
+int uv__tcp_connect(uv_connect_t* req,
+ uv_tcp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_connect_cb cb) {
+ int err;
+ int r;
+
+ assert(handle->type == UV_TCP);
+
+ if (handle->connect_req != NULL)
+ return UV_EALREADY; /* FIXME(bnoordhuis) UV_EINVAL or maybe UV_EBUSY. */
+
+ if (handle->delayed_error != 0)
+ goto out;
+
+ err = maybe_new_socket(handle,
+ addr->sa_family,
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+ if (err)
+ return err;
+
+ do {
+ errno = 0;
+ r = connect(uv__stream_fd(handle), addr, addrlen);
+ } while (r == -1 && errno == EINTR);
+
+ /* We not only check the return value, but also check the errno != 0.
+ * Because in rare cases connect() will return -1 but the errno
+ * is 0 (for example, on Android 4.3, OnePlus phone A0001_12_150227)
+ * and actually the tcp three-way handshake is completed.
+ */
+ if (r == -1 && errno != 0) {
+ if (errno == EINPROGRESS)
+ ; /* not an error */
+ else if (errno == ECONNREFUSED
+#if defined(__OpenBSD__)
+ || errno == EINVAL
+#endif
+ )
+ /* If we get ECONNREFUSED (Solaris) or EINVAL (OpenBSD) wait until the
+ * next tick to report the error. Solaris and OpenBSD wants to report
+ * immediately -- other unixes want to wait.
+ */
+ handle->delayed_error = UV__ERR(ECONNREFUSED);
+ else
+ return UV__ERR(errno);
+ }
+
+out:
+
+ uv__req_init(handle->loop, req, UV_CONNECT);
+ req->cb = cb;
+ req->handle = (uv_stream_t*) handle;
+ QUEUE_INIT(&req->queue);
+ handle->connect_req = req;
+
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+
+ if (handle->delayed_error)
+ uv__io_feed(handle->loop, &handle->io_watcher);
+
+ return 0;
+}
+
+
+int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
+ int err;
+
+ if (uv__fd_exists(handle->loop, sock))
+ return UV_EEXIST;
+
+ err = uv__nonblock(sock, 1);
+ if (err)
+ return err;
+
+ return uv__stream_open((uv_stream_t*)handle,
+ sock,
+ UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
+}
+
+
+int uv_tcp_getsockname(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ if (handle->delayed_error)
+ return handle->delayed_error;
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getsockname,
+ name,
+ namelen);
+}
+
+
+int uv_tcp_getpeername(const uv_tcp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ if (handle->delayed_error)
+ return handle->delayed_error;
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getpeername,
+ name,
+ namelen);
+}
+
+
+int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
+ int fd;
+ struct linger l = { 1, 0 };
+
+ /* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
+ if (handle->flags & UV_HANDLE_SHUTTING)
+ return UV_EINVAL;
+
+ fd = uv__stream_fd(handle);
+ if (0 != setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l))) {
+ if (errno == EINVAL) {
+ /* Open Group Specifications Issue 7, 2018 edition states that
+ * EINVAL may mean the socket has been shut down already.
+ * Behavior observed on Solaris, illumos and macOS. */
+ errno = 0;
+ } else {
+ return UV__ERR(errno);
+ }
+ }
+
+ uv_close((uv_handle_t*) handle, close_cb);
+ return 0;
+}
+
+
+int uv__tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
+ static int single_accept_cached = -1;
+ unsigned long flags;
+ int single_accept;
+ int err;
+
+ if (tcp->delayed_error)
+ return tcp->delayed_error;
+
+ single_accept = uv__load_relaxed(&single_accept_cached);
+ if (single_accept == -1) {
+ const char* val = getenv("UV_TCP_SINGLE_ACCEPT");
+ single_accept = (val != NULL && atoi(val) != 0); /* Off by default. */
+ uv__store_relaxed(&single_accept_cached, single_accept);
+ }
+
+ if (single_accept)
+ tcp->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
+
+ flags = 0;
+#if defined(__MVS__)
+ /* on zOS the listen call does not bind automatically
+ if the socket is unbound. Hence the manual binding to
+ an arbitrary port is required to be done manually
+ */
+ flags |= UV_HANDLE_BOUND;
+#endif
+ err = maybe_new_socket(tcp, AF_INET, flags);
+ if (err)
+ return err;
+
+ if (listen(tcp->io_watcher.fd, backlog))
+ return UV__ERR(errno);
+
+ tcp->connection_cb = cb;
+ tcp->flags |= UV_HANDLE_BOUND;
+
+ /* Start listening for connections. */
+ tcp->io_watcher.cb = uv__server_io;
+ uv__io_start(tcp->loop, &tcp->io_watcher, POLLIN);
+
+ return 0;
+}
+
+
+int uv__tcp_nodelay(int fd, int on) {
+ if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)))
+ return UV__ERR(errno);
+ return 0;
+}
+
+
+int uv__tcp_keepalive(int fd, int on, unsigned int delay) {
+ if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)))
+ return UV__ERR(errno);
+
+#ifdef TCP_KEEPIDLE
+ if (on) {
+ int intvl = 1; /* 1 second; same as default on Win32 */
+ int cnt = 10; /* 10 retries; same as hardcoded on Win32 */
+ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &delay, sizeof(delay)))
+ return UV__ERR(errno);
+ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, &intvl, sizeof(intvl)))
+ return UV__ERR(errno);
+ if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, &cnt, sizeof(cnt)))
+ return UV__ERR(errno);
+ }
+#endif
+
+ /* Solaris/SmartOS, if you don't support keep-alive,
+ * then don't advertise it in your system headers...
+ */
+ /* FIXME(bnoordhuis) That's possibly because sizeof(delay) should be 1. */
+#if defined(TCP_KEEPALIVE) && !defined(__sun)
+ if (on && setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &delay, sizeof(delay)))
+ return UV__ERR(errno);
+#endif
+
+ return 0;
+}
+
+
+int uv_tcp_nodelay(uv_tcp_t* handle, int on) {
+ int err;
+
+ if (uv__stream_fd(handle) != -1) {
+ err = uv__tcp_nodelay(uv__stream_fd(handle), on);
+ if (err)
+ return err;
+ }
+
+ if (on)
+ handle->flags |= UV_HANDLE_TCP_NODELAY;
+ else
+ handle->flags &= ~UV_HANDLE_TCP_NODELAY;
+
+ return 0;
+}
+
+
+int uv_tcp_keepalive(uv_tcp_t* handle, int on, unsigned int delay) {
+ int err;
+
+ if (uv__stream_fd(handle) != -1) {
+ err =uv__tcp_keepalive(uv__stream_fd(handle), on, delay);
+ if (err)
+ return err;
+ }
+
+ if (on)
+ handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
+ else
+ handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
+
+ /* TODO Store delay if uv__stream_fd(handle) == -1 but don't want to enlarge
+ * uv_tcp_t with an int that's almost never used...
+ */
+
+ return 0;
+}
+
+
+int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
+ if (enable)
+ handle->flags &= ~UV_HANDLE_TCP_SINGLE_ACCEPT;
+ else
+ handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
+ return 0;
+}
+
+
+void uv__tcp_close(uv_tcp_t* handle) {
+ uv__stream_close((uv_stream_t*)handle);
+}
+
+
+int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
+ uv_os_sock_t temp[2];
+ int err;
+#if defined(__FreeBSD__) || defined(__linux__)
+ int flags;
+
+ flags = type | SOCK_CLOEXEC;
+ if ((flags0 & UV_NONBLOCK_PIPE) && (flags1 & UV_NONBLOCK_PIPE))
+ flags |= SOCK_NONBLOCK;
+
+ if (socketpair(AF_UNIX, flags, protocol, temp))
+ return UV__ERR(errno);
+
+ if (flags & UV_FS_O_NONBLOCK) {
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+ }
+#else
+ if (socketpair(AF_UNIX, type, protocol, temp))
+ return UV__ERR(errno);
+
+ if ((err = uv__cloexec(temp[0], 1)))
+ goto fail;
+ if ((err = uv__cloexec(temp[1], 1)))
+ goto fail;
+#endif
+
+ if (flags0 & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[0], 1)))
+ goto fail;
+ if (flags1 & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[1], 1)))
+ goto fail;
+
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+
+fail:
+ uv__close(temp[0]);
+ uv__close(temp[1]);
+ return err;
+}
diff --git a/Utilities/cmlibuv/src/unix/thread.c b/Utilities/cmlibuv/src/unix/thread.c
new file mode 100644
index 0000000000..5a07b027ea
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/thread.c
@@ -0,0 +1,864 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <pthread.h>
+#include <assert.h>
+#include <errno.h>
+
+#include <sys/time.h>
+#include <sys/resource.h> /* getrlimit() */
+#include <unistd.h> /* getpagesize() */
+
+#include <limits.h>
+
+#ifdef __MVS__
+#include <sys/ipc.h>
+#include <sys/sem.h>
+#endif
+
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+#include <gnu/libc-version.h> /* gnu_get_libc_version() */
+#endif
+
+#undef NANOSEC
+#define NANOSEC ((uint64_t) 1e9)
+
+#if defined(PTHREAD_BARRIER_SERIAL_THREAD)
+STATIC_ASSERT(sizeof(uv_barrier_t) == sizeof(pthread_barrier_t));
+#endif
+
+/* Note: guard clauses should match uv_barrier_t's in include/uv/unix.h. */
+#if defined(_AIX) || \
+ defined(__OpenBSD__) || \
+ !defined(PTHREAD_BARRIER_SERIAL_THREAD)
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ struct _uv_barrier* b;
+ int rc;
+
+ if (barrier == NULL || count == 0)
+ return UV_EINVAL;
+
+ b = uv__malloc(sizeof(*b));
+ if (b == NULL)
+ return UV_ENOMEM;
+
+ b->in = 0;
+ b->out = 0;
+ b->threshold = count;
+
+ rc = uv_mutex_init(&b->mutex);
+ if (rc != 0)
+ goto error2;
+
+ rc = uv_cond_init(&b->cond);
+ if (rc != 0)
+ goto error;
+
+ barrier->b = b;
+ return 0;
+
+error:
+ uv_mutex_destroy(&b->mutex);
+error2:
+ uv__free(b);
+ return rc;
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ struct _uv_barrier* b;
+ int last;
+
+ if (barrier == NULL || barrier->b == NULL)
+ return UV_EINVAL;
+
+ b = barrier->b;
+ uv_mutex_lock(&b->mutex);
+
+ if (++b->in == b->threshold) {
+ b->in = 0;
+ b->out = b->threshold;
+ uv_cond_signal(&b->cond);
+ } else {
+ do
+ uv_cond_wait(&b->cond, &b->mutex);
+ while (b->in != 0);
+ }
+
+ last = (--b->out == 0);
+ uv_cond_signal(&b->cond);
+
+ uv_mutex_unlock(&b->mutex);
+ return last;
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+ struct _uv_barrier* b;
+
+ b = barrier->b;
+ uv_mutex_lock(&b->mutex);
+
+ assert(b->in == 0);
+ while (b->out != 0)
+ uv_cond_wait(&b->cond, &b->mutex);
+
+ if (b->in != 0)
+ abort();
+
+ uv_mutex_unlock(&b->mutex);
+ uv_mutex_destroy(&b->mutex);
+ uv_cond_destroy(&b->cond);
+
+ uv__free(barrier->b);
+ barrier->b = NULL;
+}
+
+#else
+
+int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) {
+ return UV__ERR(pthread_barrier_init(barrier, NULL, count));
+}
+
+
+int uv_barrier_wait(uv_barrier_t* barrier) {
+ int rc;
+
+ rc = pthread_barrier_wait(barrier);
+ if (rc != 0)
+ if (rc != PTHREAD_BARRIER_SERIAL_THREAD)
+ abort();
+
+ return rc == PTHREAD_BARRIER_SERIAL_THREAD;
+}
+
+
+void uv_barrier_destroy(uv_barrier_t* barrier) {
+ if (pthread_barrier_destroy(barrier))
+ abort();
+}
+
+#endif
+
+
+/* Musl's PTHREAD_STACK_MIN is 2 KB on all architectures, which is
+ * too small to safely receive signals on.
+ *
+ * Musl's PTHREAD_STACK_MIN + MINSIGSTKSZ == 8192 on arm64 (which has
+ * the largest MINSIGSTKSZ of the architectures that musl supports) so
+ * let's use that as a lower bound.
+ *
+ * We use a hardcoded value because PTHREAD_STACK_MIN + MINSIGSTKSZ
+ * is between 28 and 133 KB when compiling against glibc, depending
+ * on the architecture.
+ */
+static size_t uv__min_stack_size(void) {
+ static const size_t min = 8192;
+
+#ifdef PTHREAD_STACK_MIN /* Not defined on NetBSD. */
+ if (min < (size_t) PTHREAD_STACK_MIN)
+ return PTHREAD_STACK_MIN;
+#endif /* PTHREAD_STACK_MIN */
+
+ return min;
+}
+
+
+/* On Linux, threads created by musl have a much smaller stack than threads
+ * created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
+ */
+static size_t uv__default_stack_size(void) {
+#if !defined(__linux__)
+ return 0;
+#elif defined(__PPC__) || defined(__ppc__) || defined(__powerpc__)
+ return 4 << 20; /* glibc default. */
+#else
+ return 2 << 20; /* glibc default. */
+#endif
+}
+
+
+/* On MacOS, threads other than the main thread are created with a reduced
+ * stack size by default. Adjust to RLIMIT_STACK aligned to the page size.
+ */
+size_t uv__thread_stack_size(void) {
+#if defined(__APPLE__) || defined(__linux__)
+ struct rlimit lim;
+
+ /* getrlimit() can fail on some aarch64 systems due to a glibc bug where
+ * the system call wrapper invokes the wrong system call. Don't treat
+ * that as fatal, just use the default stack size instead.
+ */
+ if (getrlimit(RLIMIT_STACK, &lim))
+ return uv__default_stack_size();
+
+ if (lim.rlim_cur == RLIM_INFINITY)
+ return uv__default_stack_size();
+
+ /* pthread_attr_setstacksize() expects page-aligned values. */
+ lim.rlim_cur -= lim.rlim_cur % (rlim_t) getpagesize();
+
+ if (lim.rlim_cur >= (rlim_t) uv__min_stack_size())
+ return lim.rlim_cur;
+#endif
+
+ return uv__default_stack_size();
+}
+
+
+int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {
+ uv_thread_options_t params;
+ params.flags = UV_THREAD_NO_FLAGS;
+ return uv_thread_create_ex(tid, &params, entry, arg);
+}
+
+int uv_thread_create_ex(uv_thread_t* tid,
+ const uv_thread_options_t* params,
+ void (*entry)(void *arg),
+ void *arg) {
+ int err;
+ pthread_attr_t* attr;
+ pthread_attr_t attr_storage;
+ size_t pagesize;
+ size_t stack_size;
+ size_t min_stack_size;
+
+ /* Used to squelch a -Wcast-function-type warning. */
+ union {
+ void (*in)(void*);
+ void* (*out)(void*);
+ } f;
+
+ stack_size =
+ params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;
+
+ attr = NULL;
+ if (stack_size == 0) {
+ stack_size = uv__thread_stack_size();
+ } else {
+ pagesize = (size_t)getpagesize();
+ /* Round up to the nearest page boundary. */
+ stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);
+ min_stack_size = uv__min_stack_size();
+ if (stack_size < min_stack_size)
+ stack_size = min_stack_size;
+ }
+
+ if (stack_size > 0) {
+ attr = &attr_storage;
+
+ if (pthread_attr_init(attr))
+ abort();
+
+ if (pthread_attr_setstacksize(attr, stack_size))
+ abort();
+ }
+
+ f.in = entry;
+ err = pthread_create(tid, attr, f.out, arg);
+
+ if (attr != NULL)
+ pthread_attr_destroy(attr);
+
+ return UV__ERR(err);
+}
+
+
+uv_thread_t uv_thread_self(void) {
+ return pthread_self();
+}
+
+int uv_thread_join(uv_thread_t *tid) {
+ return UV__ERR(pthread_join(*tid, NULL));
+}
+
+
+int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {
+ return pthread_equal(*t1, *t2);
+}
+
+
+int uv_mutex_init(uv_mutex_t* mutex) {
+#if defined(NDEBUG) || !defined(PTHREAD_MUTEX_ERRORCHECK)
+ return UV__ERR(pthread_mutex_init(mutex, NULL));
+#else
+ pthread_mutexattr_t attr;
+ int err;
+
+ if (pthread_mutexattr_init(&attr))
+ abort();
+
+ if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK))
+ abort();
+
+ err = pthread_mutex_init(mutex, &attr);
+
+ if (pthread_mutexattr_destroy(&attr))
+ abort();
+
+ return UV__ERR(err);
+#endif
+}
+
+
+int uv_mutex_init_recursive(uv_mutex_t* mutex) {
+ pthread_mutexattr_t attr;
+ int err;
+
+ if (pthread_mutexattr_init(&attr))
+ abort();
+
+ if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
+ abort();
+
+ err = pthread_mutex_init(mutex, &attr);
+
+ if (pthread_mutexattr_destroy(&attr))
+ abort();
+
+ return UV__ERR(err);
+}
+
+
+void uv_mutex_destroy(uv_mutex_t* mutex) {
+ if (pthread_mutex_destroy(mutex))
+ abort();
+}
+
+
+void uv_mutex_lock(uv_mutex_t* mutex) {
+ if (pthread_mutex_lock(mutex))
+ abort();
+}
+
+
+int uv_mutex_trylock(uv_mutex_t* mutex) {
+ int err;
+
+ err = pthread_mutex_trylock(mutex);
+ if (err) {
+ if (err != EBUSY && err != EAGAIN)
+ abort();
+ return UV_EBUSY;
+ }
+
+ return 0;
+}
+
+
+void uv_mutex_unlock(uv_mutex_t* mutex) {
+ if (pthread_mutex_unlock(mutex))
+ abort();
+}
+
+
+int uv_rwlock_init(uv_rwlock_t* rwlock) {
+ return UV__ERR(pthread_rwlock_init(rwlock, NULL));
+}
+
+
+void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_destroy(rwlock))
+ abort();
+}
+
+
+void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_rdlock(rwlock))
+ abort();
+}
+
+
+int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
+ int err;
+
+ err = pthread_rwlock_tryrdlock(rwlock);
+ if (err) {
+ if (err != EBUSY && err != EAGAIN)
+ abort();
+ return UV_EBUSY;
+ }
+
+ return 0;
+}
+
+
+void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_unlock(rwlock))
+ abort();
+}
+
+
+void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_wrlock(rwlock))
+ abort();
+}
+
+
+int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
+ int err;
+
+ err = pthread_rwlock_trywrlock(rwlock);
+ if (err) {
+ if (err != EBUSY && err != EAGAIN)
+ abort();
+ return UV_EBUSY;
+ }
+
+ return 0;
+}
+
+
+void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
+ if (pthread_rwlock_unlock(rwlock))
+ abort();
+}
+
+
+void uv_once(uv_once_t* guard, void (*callback)(void)) {
+ if (pthread_once(guard, callback))
+ abort();
+}
+
+#if defined(__APPLE__) && defined(__MACH__)
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+ kern_return_t err;
+
+ err = semaphore_create(mach_task_self(), sem, SYNC_POLICY_FIFO, value);
+ if (err == KERN_SUCCESS)
+ return 0;
+ if (err == KERN_INVALID_ARGUMENT)
+ return UV_EINVAL;
+ if (err == KERN_RESOURCE_SHORTAGE)
+ return UV_ENOMEM;
+
+ abort();
+ return UV_EINVAL; /* Satisfy the compiler. */
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+ if (semaphore_destroy(mach_task_self(), *sem))
+ abort();
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+ if (semaphore_signal(*sem))
+ abort();
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+ int r;
+
+ do
+ r = semaphore_wait(*sem);
+ while (r == KERN_ABORTED);
+
+ if (r != KERN_SUCCESS)
+ abort();
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+ mach_timespec_t interval;
+ kern_return_t err;
+
+ interval.tv_sec = 0;
+ interval.tv_nsec = 0;
+
+ err = semaphore_timedwait(*sem, interval);
+ if (err == KERN_SUCCESS)
+ return 0;
+ if (err == KERN_OPERATION_TIMED_OUT)
+ return UV_EAGAIN;
+
+ abort();
+ return UV_EINVAL; /* Satisfy the compiler. */
+}
+
+#else /* !(defined(__APPLE__) && defined(__MACH__)) */
+
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+
+/* Hack around https://sourceware.org/bugzilla/show_bug.cgi?id=12674
+ * by providing a custom implementation for glibc < 2.21 in terms of other
+ * concurrency primitives.
+ * Refs: https://github.com/nodejs/node/issues/19903 */
+
+/* To preserve ABI compatibility, we treat the uv_sem_t as storage for
+ * a pointer to the actual struct we're using underneath. */
+
+static uv_once_t glibc_version_check_once = UV_ONCE_INIT;
+static int platform_needs_custom_semaphore = 0;
+
+static void glibc_version_check(void) {
+ const char* version = gnu_get_libc_version();
+ platform_needs_custom_semaphore =
+ version[0] == '2' && version[1] == '.' &&
+ atoi(version + 2) < 21;
+}
+
+#elif defined(__MVS__)
+
+#define platform_needs_custom_semaphore 1
+
+#else /* !defined(__GLIBC__) && !defined(__MVS__) */
+
+#define platform_needs_custom_semaphore 0
+
+#endif
+
+typedef struct uv_semaphore_s {
+ uv_mutex_t mutex;
+ uv_cond_t cond;
+ unsigned int value;
+} uv_semaphore_t;
+
+#if (defined(__GLIBC__) && !defined(__UCLIBC__)) || \
+ platform_needs_custom_semaphore
+STATIC_ASSERT(sizeof(uv_sem_t) >= sizeof(uv_semaphore_t*));
+#endif
+
+static int uv__custom_sem_init(uv_sem_t* sem_, unsigned int value) {
+ int err;
+ uv_semaphore_t* sem;
+
+ sem = uv__malloc(sizeof(*sem));
+ if (sem == NULL)
+ return UV_ENOMEM;
+
+ if ((err = uv_mutex_init(&sem->mutex)) != 0) {
+ uv__free(sem);
+ return err;
+ }
+
+ if ((err = uv_cond_init(&sem->cond)) != 0) {
+ uv_mutex_destroy(&sem->mutex);
+ uv__free(sem);
+ return err;
+ }
+
+ sem->value = value;
+ *(uv_semaphore_t**)sem_ = sem;
+ return 0;
+}
+
+
+static void uv__custom_sem_destroy(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ uv_cond_destroy(&sem->cond);
+ uv_mutex_destroy(&sem->mutex);
+ uv__free(sem);
+}
+
+
+static void uv__custom_sem_post(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ uv_mutex_lock(&sem->mutex);
+ sem->value++;
+ if (sem->value == 1)
+ uv_cond_signal(&sem->cond);
+ uv_mutex_unlock(&sem->mutex);
+}
+
+
+static void uv__custom_sem_wait(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ uv_mutex_lock(&sem->mutex);
+ while (sem->value == 0)
+ uv_cond_wait(&sem->cond, &sem->mutex);
+ sem->value--;
+ uv_mutex_unlock(&sem->mutex);
+}
+
+
+static int uv__custom_sem_trywait(uv_sem_t* sem_) {
+ uv_semaphore_t* sem;
+
+ sem = *(uv_semaphore_t**)sem_;
+ if (uv_mutex_trylock(&sem->mutex) != 0)
+ return UV_EAGAIN;
+
+ if (sem->value == 0) {
+ uv_mutex_unlock(&sem->mutex);
+ return UV_EAGAIN;
+ }
+
+ sem->value--;
+ uv_mutex_unlock(&sem->mutex);
+
+ return 0;
+}
+
+static int uv__sem_init(uv_sem_t* sem, unsigned int value) {
+ if (sem_init(sem, 0, value))
+ return UV__ERR(errno);
+ return 0;
+}
+
+
+static void uv__sem_destroy(uv_sem_t* sem) {
+ if (sem_destroy(sem))
+ abort();
+}
+
+
+static void uv__sem_post(uv_sem_t* sem) {
+ if (sem_post(sem))
+ abort();
+}
+
+
+static void uv__sem_wait(uv_sem_t* sem) {
+ int r;
+
+ do
+ r = sem_wait(sem);
+ while (r == -1 && errno == EINTR);
+
+ if (r)
+ abort();
+}
+
+
+static int uv__sem_trywait(uv_sem_t* sem) {
+ int r;
+
+ do
+ r = sem_trywait(sem);
+ while (r == -1 && errno == EINTR);
+
+ if (r) {
+ if (errno == EAGAIN)
+ return UV_EAGAIN;
+ abort();
+ }
+
+ return 0;
+}
+
+int uv_sem_init(uv_sem_t* sem, unsigned int value) {
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
+ uv_once(&glibc_version_check_once, glibc_version_check);
+#endif
+
+ if (platform_needs_custom_semaphore)
+ return uv__custom_sem_init(sem, value);
+ else
+ return uv__sem_init(sem, value);
+}
+
+
+void uv_sem_destroy(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ uv__custom_sem_destroy(sem);
+ else
+ uv__sem_destroy(sem);
+}
+
+
+void uv_sem_post(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ uv__custom_sem_post(sem);
+ else
+ uv__sem_post(sem);
+}
+
+
+void uv_sem_wait(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ uv__custom_sem_wait(sem);
+ else
+ uv__sem_wait(sem);
+}
+
+
+int uv_sem_trywait(uv_sem_t* sem) {
+ if (platform_needs_custom_semaphore)
+ return uv__custom_sem_trywait(sem);
+ else
+ return uv__sem_trywait(sem);
+}
+
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+
+#if defined(__APPLE__) && defined(__MACH__) || defined(__MVS__)
+
+int uv_cond_init(uv_cond_t* cond) {
+ return UV__ERR(pthread_cond_init(cond, NULL));
+}
+
+#else /* !(defined(__APPLE__) && defined(__MACH__)) */
+
+int uv_cond_init(uv_cond_t* cond) {
+ pthread_condattr_t attr;
+ int err;
+
+ err = pthread_condattr_init(&attr);
+ if (err)
+ return UV__ERR(err);
+
+#if !defined(__hpux)
+ err = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+ if (err)
+ goto error2;
+#endif
+
+ err = pthread_cond_init(cond, &attr);
+ if (err)
+ goto error2;
+
+ err = pthread_condattr_destroy(&attr);
+ if (err)
+ goto error;
+
+ return 0;
+
+error:
+ pthread_cond_destroy(cond);
+error2:
+ pthread_condattr_destroy(&attr);
+ return UV__ERR(err);
+}
+
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+void uv_cond_destroy(uv_cond_t* cond) {
+#if defined(__APPLE__) && defined(__MACH__)
+ /* It has been reported that destroying condition variables that have been
+ * signalled but not waited on can sometimes result in application crashes.
+ * See https://codereview.chromium.org/1323293005.
+ */
+ pthread_mutex_t mutex;
+ struct timespec ts;
+ int err;
+
+ if (pthread_mutex_init(&mutex, NULL))
+ abort();
+
+ if (pthread_mutex_lock(&mutex))
+ abort();
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 1;
+
+ err = pthread_cond_timedwait_relative_np(cond, &mutex, &ts);
+ if (err != 0 && err != ETIMEDOUT)
+ abort();
+
+ if (pthread_mutex_unlock(&mutex))
+ abort();
+
+ if (pthread_mutex_destroy(&mutex))
+ abort();
+#endif /* defined(__APPLE__) && defined(__MACH__) */
+
+ if (pthread_cond_destroy(cond))
+ abort();
+}
+
+void uv_cond_signal(uv_cond_t* cond) {
+ if (pthread_cond_signal(cond))
+ abort();
+}
+
+void uv_cond_broadcast(uv_cond_t* cond) {
+ if (pthread_cond_broadcast(cond))
+ abort();
+}
+
+void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {
+ if (pthread_cond_wait(cond, mutex))
+ abort();
+}
+
+
+int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {
+ int r;
+ struct timespec ts;
+#if defined(__MVS__)
+ struct timeval tv;
+#endif
+
+#if defined(__APPLE__) && defined(__MACH__)
+ ts.tv_sec = timeout / NANOSEC;
+ ts.tv_nsec = timeout % NANOSEC;
+ r = pthread_cond_timedwait_relative_np(cond, mutex, &ts);
+#else
+#if defined(__MVS__)
+ if (gettimeofday(&tv, NULL))
+ abort();
+ timeout += tv.tv_sec * NANOSEC + tv.tv_usec * 1e3;
+#else
+ timeout += uv__hrtime(UV_CLOCK_PRECISE);
+#endif
+ ts.tv_sec = timeout / NANOSEC;
+ ts.tv_nsec = timeout % NANOSEC;
+ r = pthread_cond_timedwait(cond, mutex, &ts);
+#endif
+
+
+ if (r == 0)
+ return 0;
+
+ if (r == ETIMEDOUT)
+ return UV_ETIMEDOUT;
+
+ abort();
+#ifndef __SUNPRO_C
+ return UV_EINVAL; /* Satisfy the compiler. */
+#endif
+}
+
+
+int uv_key_create(uv_key_t* key) {
+ return UV__ERR(pthread_key_create(key, NULL));
+}
+
+
+void uv_key_delete(uv_key_t* key) {
+ if (pthread_key_delete(*key))
+ abort();
+}
+
+
+void* uv_key_get(uv_key_t* key) {
+ return pthread_getspecific(*key);
+}
+
+
+void uv_key_set(uv_key_t* key, void* value) {
+ if (pthread_setspecific(*key, value))
+ abort();
+}
diff --git a/Utilities/cmlibuv/src/unix/tty.c b/Utilities/cmlibuv/src/unix/tty.c
new file mode 100644
index 0000000000..44fdb9c189
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/tty.c
@@ -0,0 +1,469 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include "spinlock.h"
+
+#include <stdlib.h>
+#include <assert.h>
+#include <unistd.h>
+#include <termios.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+
+#if defined(__MVS__) && !defined(IMAXBEL)
+#define IMAXBEL 0
+#endif
+
+#if defined(__PASE__)
+/* On IBM i PASE, for better compatibility with running interactive programs in
+ * a 5250 environment, isatty() will return true for the stdin/stdout/stderr
+ * streams created by QSH/QP2TERM.
+ *
+ * For more, see docs on PASE_STDIO_ISATTY in
+ * https://www.ibm.com/support/knowledgecenter/ssw_ibm_i_74/apis/pase_environ.htm
+ *
+ * This behavior causes problems for Node as it expects that if isatty() returns
+ * true that TTY ioctls will be supported by that fd (which is not an
+ * unreasonable expectation) and when they don't it crashes with assertion
+ * errors.
+ *
+ * Here, we create our own version of isatty() that uses ioctl() to identify
+ * whether the fd is *really* a TTY or not.
+ */
+static int isreallyatty(int file) {
+ int rc;
+
+ rc = !ioctl(file, TXISATTY + 0x81, NULL);
+ if (!rc && errno != EBADF)
+ errno = ENOTTY;
+
+ return rc;
+}
+#define isatty(fd) isreallyatty(fd)
+#endif
+
+#if !defined(CMAKE_BOOTSTRAP)
+
+static int orig_termios_fd = -1;
+static struct termios orig_termios;
+static uv_spinlock_t termios_spinlock = UV_SPINLOCK_INITIALIZER;
+
+int uv__tcsetattr(int fd, int how, const struct termios *term) {
+ int rc;
+
+ do
+ rc = tcsetattr(fd, how, term);
+ while (rc == -1 && errno == EINTR);
+
+ if (rc == -1)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+static int uv__tty_is_slave(const int fd) {
+ int result;
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+ int dummy;
+
+ result = ioctl(fd, TIOCGPTN, &dummy) != 0;
+#elif defined(__APPLE__)
+ char dummy[256];
+
+ result = ioctl(fd, TIOCPTYGNAME, &dummy) != 0;
+#elif defined(__NetBSD__)
+ /*
+ * NetBSD as an extension returns with ptsname(3) and ptsname_r(3) the slave
+ * device name for both descriptors, the master one and slave one.
+ *
+ * Implement function to compare major device number with pts devices.
+ *
+ * The major numbers are machine-dependent, on NetBSD/amd64 they are
+ * respectively:
+ * - master tty: ptc - major 6
+ * - slave tty: pts - major 5
+ */
+
+ struct stat sb;
+ /* Lookup device's major for the pts driver and cache it. */
+ static devmajor_t pts = NODEVMAJOR;
+
+ if (pts == NODEVMAJOR) {
+ pts = getdevmajor("pts", S_IFCHR);
+ if (pts == NODEVMAJOR)
+ abort();
+ }
+
+ /* Lookup stat structure behind the file descriptor. */
+ if (fstat(fd, &sb) != 0)
+ abort();
+
+ /* Assert character device. */
+ if (!S_ISCHR(sb.st_mode))
+ abort();
+
+ /* Assert valid major. */
+ if (major(sb.st_rdev) == NODEVMAJOR)
+ abort();
+
+ result = (pts == major(sb.st_rdev));
+#else
+ /* Fallback to ptsname
+ */
+ result = ptsname(fd) == NULL;
+#endif
+ return result;
+}
+
+int uv_tty_init(uv_loop_t* loop, uv_tty_t* tty, int fd, int unused) {
+ uv_handle_type type;
+ int flags = 0;
+ int newfd = -1;
+ int r;
+ int saved_flags;
+ int mode;
+ char path[256];
+ (void)unused; /* deprecated parameter is no longer needed */
+
+ /* File descriptors that refer to files cannot be monitored with epoll.
+ * That restriction also applies to character devices like /dev/random
+ * (but obviously not /dev/tty.)
+ */
+ type = uv_guess_handle(fd);
+ if (type == UV_FILE || type == UV_UNKNOWN_HANDLE)
+ return UV_EINVAL;
+
+ /* Save the fd flags in case we need to restore them due to an error. */
+ do
+ saved_flags = fcntl(fd, F_GETFL);
+ while (saved_flags == -1 && errno == EINTR);
+
+ if (saved_flags == -1)
+ return UV__ERR(errno);
+ mode = saved_flags & O_ACCMODE;
+
+ /* Reopen the file descriptor when it refers to a tty. This lets us put the
+ * tty in non-blocking mode without affecting other processes that share it
+ * with us.
+ *
+ * Example: `node | cat` - if we put our fd 0 in non-blocking mode, it also
+ * affects fd 1 of `cat` because both file descriptors refer to the same
+ * struct file in the kernel. When we reopen our fd 0, it points to a
+ * different struct file, hence changing its properties doesn't affect
+ * other processes.
+ */
+ if (type == UV_TTY) {
+ /* Reopening a pty in master mode won't work either because the reopened
+ * pty will be in slave mode (*BSD) or reopening will allocate a new
+ * master/slave pair (Linux). Therefore check if the fd points to a
+ * slave device.
+ */
+ if (uv__tty_is_slave(fd) && ttyname_r(fd, path, sizeof(path)) == 0)
+ r = uv__open_cloexec(path, mode | O_NOCTTY);
+ else
+ r = -1;
+
+ if (r < 0) {
+ /* fallback to using blocking writes */
+ if (mode != O_RDONLY)
+ flags |= UV_HANDLE_BLOCKING_WRITES;
+ goto skip;
+ }
+
+ newfd = r;
+
+ r = uv__dup2_cloexec(newfd, fd);
+ if (r < 0 && r != UV_EINVAL) {
+ /* EINVAL means newfd == fd which could conceivably happen if another
+ * thread called close(fd) between our calls to isatty() and open().
+ * That's a rather unlikely event but let's handle it anyway.
+ */
+ uv__close(newfd);
+ return r;
+ }
+
+ fd = newfd;
+ }
+
+skip:
+ uv__stream_init(loop, (uv_stream_t*) tty, UV_TTY);
+
+ /* If anything fails beyond this point we need to remove the handle from
+ * the handle queue, since it was added by uv__handle_init in uv_stream_init.
+ */
+
+ if (!(flags & UV_HANDLE_BLOCKING_WRITES))
+ uv__nonblock(fd, 1);
+
+#if defined(__APPLE__)
+ r = uv__stream_try_select((uv_stream_t*) tty, &fd);
+ if (r) {
+ int rc = r;
+ if (newfd != -1)
+ uv__close(newfd);
+ QUEUE_REMOVE(&tty->handle_queue);
+ do
+ r = fcntl(fd, F_SETFL, saved_flags);
+ while (r == -1 && errno == EINTR);
+ return rc;
+ }
+#endif
+
+ if (mode != O_WRONLY)
+ flags |= UV_HANDLE_READABLE;
+ if (mode != O_RDONLY)
+ flags |= UV_HANDLE_WRITABLE;
+
+ uv__stream_open((uv_stream_t*) tty, fd, flags);
+ tty->mode = UV_TTY_MODE_NORMAL;
+
+ return 0;
+}
+
+static void uv__tty_make_raw(struct termios* tio) {
+ assert(tio != NULL);
+
+#if defined __sun || defined __MVS__ || defined __hpux
+ /*
+ * This implementation of cfmakeraw for Solaris and derivatives is taken from
+ * http://www.perkin.org.uk/posts/solaris-portability-cfmakeraw.html.
+ */
+ tio->c_iflag &= ~(IMAXBEL | IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR |
+ IGNCR | ICRNL | IXON);
+ tio->c_oflag &= ~OPOST;
+ tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
+ tio->c_cflag &= ~(CSIZE | PARENB);
+ tio->c_cflag |= CS8;
+
+ /*
+ * By default, most software expects a pending read to block until at
+ * least one byte becomes available. As per termio(7I), this requires
+ * setting the MIN and TIME parameters appropriately.
+ *
+ * As a somewhat unfortunate artifact of history, the MIN and TIME slots
+ * in the control character array overlap with the EOF and EOL slots used
+ * for canonical mode processing. Because the EOF character needs to be
+ * the ASCII EOT value (aka Control-D), it has the byte value 4. When
+ * switching to raw mode, this is interpreted as a MIN value of 4; i.e.,
+ * reads will block until at least four bytes have been input.
+ *
+ * Other platforms with a distinct MIN slot like Linux and FreeBSD appear
+ * to default to a MIN value of 1, so we'll force that value here:
+ */
+ tio->c_cc[VMIN] = 1;
+ tio->c_cc[VTIME] = 0;
+#else
+ cfmakeraw(tio);
+#endif /* #ifdef __sun */
+}
+
+int uv_tty_set_mode(uv_tty_t* tty, uv_tty_mode_t mode) {
+ struct termios tmp;
+ int fd;
+ int rc;
+
+ if (tty->mode == (int) mode)
+ return 0;
+
+ fd = uv__stream_fd(tty);
+ if (tty->mode == UV_TTY_MODE_NORMAL && mode != UV_TTY_MODE_NORMAL) {
+ do
+ rc = tcgetattr(fd, &tty->orig_termios);
+ while (rc == -1 && errno == EINTR);
+
+ if (rc == -1)
+ return UV__ERR(errno);
+
+ /* This is used for uv_tty_reset_mode() */
+ uv_spinlock_lock(&termios_spinlock);
+ if (orig_termios_fd == -1) {
+ orig_termios = tty->orig_termios;
+ orig_termios_fd = fd;
+ }
+ uv_spinlock_unlock(&termios_spinlock);
+ }
+
+ tmp = tty->orig_termios;
+ switch (mode) {
+ case UV_TTY_MODE_NORMAL:
+ break;
+ case UV_TTY_MODE_RAW:
+ tmp.c_iflag &= ~(BRKINT | ICRNL | INPCK | ISTRIP | IXON);
+ tmp.c_oflag |= (ONLCR);
+ tmp.c_cflag |= (CS8);
+ tmp.c_lflag &= ~(ECHO | ICANON | IEXTEN | ISIG);
+ tmp.c_cc[VMIN] = 1;
+ tmp.c_cc[VTIME] = 0;
+ break;
+ case UV_TTY_MODE_IO:
+ uv__tty_make_raw(&tmp);
+ break;
+ }
+
+ /* Apply changes after draining */
+ rc = uv__tcsetattr(fd, TCSADRAIN, &tmp);
+ if (rc == 0)
+ tty->mode = mode;
+
+ return rc;
+}
+
+
+int uv_tty_get_winsize(uv_tty_t* tty, int* width, int* height) {
+ struct winsize ws;
+ int err;
+
+ do
+ err = ioctl(uv__stream_fd(tty), TIOCGWINSZ, &ws);
+ while (err == -1 && errno == EINTR);
+
+ if (err == -1)
+ return UV__ERR(errno);
+
+ *width = ws.ws_col;
+ *height = ws.ws_row;
+
+ return 0;
+}
+
+#endif
+
+uv_handle_type uv_guess_handle(uv_file file) {
+ struct sockaddr_storage ss;
+ struct stat s;
+ socklen_t len;
+ int type;
+
+ if (file < 0)
+ return UV_UNKNOWN_HANDLE;
+
+ if (isatty(file))
+ return UV_TTY;
+
+ if (fstat(file, &s)) {
+#if defined(__PASE__)
+ /* On ibmi receiving RST from TCP instead of FIN immediately puts fd into
+ * an error state. fstat will return EINVAL, getsockname will also return
+ * EINVAL, even if sockaddr_storage is valid. (If file does not refer to a
+ * socket, ENOTSOCK is returned instead.)
+ * In such cases, we will permit the user to open the connection as uv_tcp
+ * still, so that the user can get immediately notified of the error in
+ * their read callback and close this fd.
+ */
+ len = sizeof(ss);
+ if (getsockname(file, (struct sockaddr*) &ss, &len)) {
+ if (errno == EINVAL)
+ return UV_TCP;
+ }
+#endif
+ return UV_UNKNOWN_HANDLE;
+ }
+
+ if (S_ISREG(s.st_mode))
+ return UV_FILE;
+
+ if (S_ISCHR(s.st_mode))
+ return UV_FILE; /* XXX UV_NAMED_PIPE? */
+
+ if (S_ISFIFO(s.st_mode))
+ return UV_NAMED_PIPE;
+
+ if (!S_ISSOCK(s.st_mode))
+ return UV_UNKNOWN_HANDLE;
+
+ len = sizeof(ss);
+ if (getsockname(file, (struct sockaddr*) &ss, &len)) {
+#if defined(_AIX)
+ /* On aix receiving RST from TCP instead of FIN immediately puts fd into
+ * an error state. In such case getsockname will return EINVAL, even if
+ * sockaddr_storage is valid.
+ * In such cases, we will permit the user to open the connection as uv_tcp
+ * still, so that the user can get immediately notified of the error in
+ * their read callback and close this fd.
+ */
+ if (errno == EINVAL) {
+ return UV_TCP;
+ }
+#endif
+ return UV_UNKNOWN_HANDLE;
+ }
+
+ len = sizeof(type);
+ if (getsockopt(file, SOL_SOCKET, SO_TYPE, &type, &len))
+ return UV_UNKNOWN_HANDLE;
+
+ if (type == SOCK_DGRAM)
+ if (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)
+ return UV_UDP;
+
+ if (type == SOCK_STREAM) {
+#if defined(_AIX) || defined(__DragonFly__)
+ /* on AIX/DragonFly the getsockname call returns an empty sa structure
+ * for sockets of type AF_UNIX. For all other types it will
+ * return a properly filled in structure.
+ */
+ if (len == 0)
+ return UV_NAMED_PIPE;
+#endif /* defined(_AIX) || defined(__DragonFly__) */
+
+ if (ss.ss_family == AF_INET || ss.ss_family == AF_INET6)
+ return UV_TCP;
+ if (ss.ss_family == AF_UNIX)
+ return UV_NAMED_PIPE;
+ }
+
+ return UV_UNKNOWN_HANDLE;
+}
+
+#if !defined(CMAKE_BOOTSTRAP)
+
+/* This function is async signal-safe, meaning that it's safe to call from
+ * inside a signal handler _unless_ execution was inside uv_tty_set_mode()'s
+ * critical section when the signal was raised.
+ */
+int uv_tty_reset_mode(void) {
+ int saved_errno;
+ int err;
+
+ saved_errno = errno;
+ if (!uv_spinlock_trylock(&termios_spinlock))
+ return UV_EBUSY; /* In uv_tty_set_mode(). */
+
+ err = 0;
+ if (orig_termios_fd != -1)
+ err = uv__tcsetattr(orig_termios_fd, TCSANOW, &orig_termios);
+
+ uv_spinlock_unlock(&termios_spinlock);
+ errno = saved_errno;
+
+ return err;
+}
+
+void uv_tty_set_vterm_state(uv_tty_vtermstate_t state) {
+}
+
+int uv_tty_get_vterm_state(uv_tty_vtermstate_t* state) {
+ return UV_ENOTSUP;
+}
+
+#endif
diff --git a/Utilities/cmlibuv/src/unix/udp.c b/Utilities/cmlibuv/src/unix/udp.c
new file mode 100644
index 0000000000..4d985b88ba
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/udp.c
@@ -0,0 +1,1416 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#if defined(__MVS__)
+#include <xti.h>
+#endif
+#include <sys/un.h>
+
+#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
+# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
+#endif
+
+#if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP)
+# define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP
+#endif
+
+union uv__sockaddr {
+ struct sockaddr_in6 in6;
+ struct sockaddr_in in;
+ struct sockaddr addr;
+};
+
+static void uv__udp_run_completed(uv_udp_t* handle);
+static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents);
+static void uv__udp_recvmsg(uv_udp_t* handle);
+static void uv__udp_sendmsg(uv_udp_t* handle);
+static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
+ int domain,
+ unsigned int flags);
+
+#if HAVE_MMSG
+
+#define UV__MMSG_MAXWIDTH 20
+
+static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf);
+static void uv__udp_sendmmsg(uv_udp_t* handle);
+
+static int uv__recvmmsg_avail;
+static int uv__sendmmsg_avail;
+static uv_once_t once = UV_ONCE_INIT;
+
+static void uv__udp_mmsg_init(void) {
+ int ret;
+ int s;
+ s = uv__socket(AF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return;
+ ret = uv__sendmmsg(s, NULL, 0);
+ if (ret == 0 || errno != ENOSYS) {
+ uv__sendmmsg_avail = 1;
+ uv__recvmmsg_avail = 1;
+ } else {
+ ret = uv__recvmmsg(s, NULL, 0);
+ if (ret == 0 || errno != ENOSYS)
+ uv__recvmmsg_avail = 1;
+ }
+ uv__close(s);
+}
+
+#endif
+
+void uv__udp_close(uv_udp_t* handle) {
+ uv__io_close(handle->loop, &handle->io_watcher);
+ uv__handle_stop(handle);
+
+ if (handle->io_watcher.fd != -1) {
+ uv__close(handle->io_watcher.fd);
+ handle->io_watcher.fd = -1;
+ }
+}
+
+
+void uv__udp_finish_close(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ QUEUE* q;
+
+ assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT));
+ assert(handle->io_watcher.fd == -1);
+
+ while (!QUEUE_EMPTY(&handle->write_queue)) {
+ q = QUEUE_HEAD(&handle->write_queue);
+ QUEUE_REMOVE(q);
+
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ req->status = UV_ECANCELED;
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ }
+
+ uv__udp_run_completed(handle);
+
+ assert(handle->send_queue_size == 0);
+ assert(handle->send_queue_count == 0);
+
+ /* Now tear down the handle. */
+ handle->recv_cb = NULL;
+ handle->alloc_cb = NULL;
+ /* but _do not_ touch close_cb */
+}
+
+
+static void uv__udp_run_completed(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ QUEUE* q;
+
+ assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING));
+ handle->flags |= UV_HANDLE_UDP_PROCESSING;
+
+ while (!QUEUE_EMPTY(&handle->write_completed_queue)) {
+ q = QUEUE_HEAD(&handle->write_completed_queue);
+ QUEUE_REMOVE(q);
+
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ uv__req_unregister(handle->loop, req);
+
+ handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs);
+ handle->send_queue_count--;
+
+ if (req->bufs != req->bufsml)
+ uv__free(req->bufs);
+ req->bufs = NULL;
+
+ if (req->send_cb == NULL)
+ continue;
+
+ /* req->status >= 0 == bytes written
+ * req->status < 0 == errno
+ */
+ if (req->status >= 0)
+ req->send_cb(req, 0);
+ else
+ req->send_cb(req, req->status);
+ }
+
+ if (QUEUE_EMPTY(&handle->write_queue)) {
+ /* Pending queue and completion queue empty, stop watcher. */
+ uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT);
+ if (!uv__io_active(&handle->io_watcher, POLLIN))
+ uv__handle_stop(handle);
+ }
+
+ handle->flags &= ~UV_HANDLE_UDP_PROCESSING;
+}
+
+
+static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) {
+ uv_udp_t* handle;
+
+ handle = container_of(w, uv_udp_t, io_watcher);
+ assert(handle->type == UV_UDP);
+
+ if (revents & POLLIN)
+ uv__udp_recvmsg(handle);
+
+ if (revents & POLLOUT) {
+ uv__udp_sendmsg(handle);
+ uv__udp_run_completed(handle);
+ }
+}
+
+#if HAVE_MMSG
+static int uv__udp_recvmmsg(uv_udp_t* handle, uv_buf_t* buf) {
+ struct sockaddr_in6 peers[UV__MMSG_MAXWIDTH];
+ struct iovec iov[UV__MMSG_MAXWIDTH];
+ struct uv__mmsghdr msgs[UV__MMSG_MAXWIDTH];
+ ssize_t nread;
+ uv_buf_t chunk_buf;
+ size_t chunks;
+ int flags;
+ size_t k;
+
+ /* prepare structures for recvmmsg */
+ chunks = buf->len / UV__UDP_DGRAM_MAXSIZE;
+ if (chunks > ARRAY_SIZE(iov))
+ chunks = ARRAY_SIZE(iov);
+ for (k = 0; k < chunks; ++k) {
+ iov[k].iov_base = buf->base + k * UV__UDP_DGRAM_MAXSIZE;
+ iov[k].iov_len = UV__UDP_DGRAM_MAXSIZE;
+ memset(&msgs[k].msg_hdr, 0, sizeof(msgs[k].msg_hdr));
+ msgs[k].msg_hdr.msg_iov = iov + k;
+ msgs[k].msg_hdr.msg_iovlen = 1;
+ msgs[k].msg_hdr.msg_name = peers + k;
+ msgs[k].msg_hdr.msg_namelen = sizeof(peers[0]);
+ msgs[k].msg_hdr.msg_control = NULL;
+ msgs[k].msg_hdr.msg_controllen = 0;
+ msgs[k].msg_hdr.msg_flags = 0;
+ }
+
+ do
+ nread = uv__recvmmsg(handle->io_watcher.fd, msgs, chunks);
+ while (nread == -1 && errno == EINTR);
+
+ if (nread < 1) {
+ if (nread == 0 || errno == EAGAIN || errno == EWOULDBLOCK)
+ handle->recv_cb(handle, 0, buf, NULL, 0);
+ else
+ handle->recv_cb(handle, UV__ERR(errno), buf, NULL, 0);
+ } else {
+ /* pass each chunk to the application */
+ for (k = 0; k < (size_t) nread && handle->recv_cb != NULL; k++) {
+ flags = UV_UDP_MMSG_CHUNK;
+ if (msgs[k].msg_hdr.msg_flags & MSG_TRUNC)
+ flags |= UV_UDP_PARTIAL;
+
+ chunk_buf = uv_buf_init(iov[k].iov_base, iov[k].iov_len);
+ handle->recv_cb(handle,
+ msgs[k].msg_len,
+ &chunk_buf,
+ msgs[k].msg_hdr.msg_name,
+ flags);
+ }
+
+ /* one last callback so the original buffer is freed */
+ if (handle->recv_cb != NULL)
+ handle->recv_cb(handle, 0, buf, NULL, UV_UDP_MMSG_FREE);
+ }
+ return nread;
+}
+#endif
+
+static void uv__udp_recvmsg(uv_udp_t* handle) {
+ struct sockaddr_storage peer;
+ struct msghdr h;
+ ssize_t nread;
+ uv_buf_t buf;
+ int flags;
+ int count;
+
+ assert(handle->recv_cb != NULL);
+ assert(handle->alloc_cb != NULL);
+
+ /* Prevent loop starvation when the data comes in as fast as (or faster than)
+ * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O.
+ */
+ count = 32;
+
+ do {
+ buf = uv_buf_init(NULL, 0);
+ handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
+ if (buf.base == NULL || buf.len == 0) {
+ handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
+ return;
+ }
+ assert(buf.base != NULL);
+
+#if HAVE_MMSG
+ if (uv_udp_using_recvmmsg(handle)) {
+ nread = uv__udp_recvmmsg(handle, &buf);
+ if (nread > 0)
+ count -= nread;
+ continue;
+ }
+#endif
+
+ memset(&h, 0, sizeof(h));
+ memset(&peer, 0, sizeof(peer));
+ h.msg_name = &peer;
+ h.msg_namelen = sizeof(peer);
+ h.msg_iov = (void*) &buf;
+ h.msg_iovlen = 1;
+
+ do {
+ nread = recvmsg(handle->io_watcher.fd, &h, 0);
+ }
+ while (nread == -1 && errno == EINTR);
+
+ if (nread == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK)
+ handle->recv_cb(handle, 0, &buf, NULL, 0);
+ else
+ handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0);
+ }
+ else {
+ flags = 0;
+ if (h.msg_flags & MSG_TRUNC)
+ flags |= UV_UDP_PARTIAL;
+
+ handle->recv_cb(handle, nread, &buf, (const struct sockaddr*) &peer, flags);
+ }
+ count--;
+ }
+ /* recv_cb callback may decide to pause or close the handle */
+ while (nread != -1
+ && count > 0
+ && handle->io_watcher.fd != -1
+ && handle->recv_cb != NULL);
+}
+
+#if HAVE_MMSG
+static void uv__udp_sendmmsg(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ struct uv__mmsghdr h[UV__MMSG_MAXWIDTH];
+ struct uv__mmsghdr *p;
+ QUEUE* q;
+ ssize_t npkts;
+ size_t pkts;
+ size_t i;
+
+ if (QUEUE_EMPTY(&handle->write_queue))
+ return;
+
+write_queue_drain:
+ for (pkts = 0, q = QUEUE_HEAD(&handle->write_queue);
+ pkts < UV__MMSG_MAXWIDTH && q != &handle->write_queue;
+ ++pkts, q = QUEUE_HEAD(q)) {
+ assert(q != NULL);
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ p = &h[pkts];
+ memset(p, 0, sizeof(*p));
+ if (req->addr.ss_family == AF_UNSPEC) {
+ p->msg_hdr.msg_name = NULL;
+ p->msg_hdr.msg_namelen = 0;
+ } else {
+ p->msg_hdr.msg_name = &req->addr;
+ if (req->addr.ss_family == AF_INET6)
+ p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in6);
+ else if (req->addr.ss_family == AF_INET)
+ p->msg_hdr.msg_namelen = sizeof(struct sockaddr_in);
+ else if (req->addr.ss_family == AF_UNIX)
+ p->msg_hdr.msg_namelen = sizeof(struct sockaddr_un);
+ else {
+ assert(0 && "unsupported address family");
+ abort();
+ }
+ }
+ h[pkts].msg_hdr.msg_iov = (struct iovec*) req->bufs;
+ h[pkts].msg_hdr.msg_iovlen = req->nbufs;
+ }
+
+ do
+ npkts = uv__sendmmsg(handle->io_watcher.fd, h, pkts);
+ while (npkts == -1 && errno == EINTR);
+
+ if (npkts < 1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ return;
+ for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
+ i < pkts && q != &handle->write_queue;
+ ++i, q = QUEUE_HEAD(&handle->write_queue)) {
+ assert(q != NULL);
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ req->status = UV__ERR(errno);
+ QUEUE_REMOVE(&req->queue);
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ }
+ uv__io_feed(handle->loop, &handle->io_watcher);
+ return;
+ }
+
+ /* Safety: npkts known to be >0 below. Hence cast from ssize_t
+ * to size_t safe.
+ */
+ for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
+ i < (size_t)npkts && q != &handle->write_queue;
+ ++i, q = QUEUE_HEAD(&handle->write_queue)) {
+ assert(q != NULL);
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ req->status = req->bufs[0].len;
+
+ /* Sending a datagram is an atomic operation: either all data
+ * is written or nothing is (and EMSGSIZE is raised). That is
+ * why we don't handle partial writes. Just pop the request
+ * off the write queue and onto the completed queue, done.
+ */
+ QUEUE_REMOVE(&req->queue);
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ }
+
+ /* couldn't batch everything, continue sending (jump to avoid stack growth) */
+ if (!QUEUE_EMPTY(&handle->write_queue))
+ goto write_queue_drain;
+ uv__io_feed(handle->loop, &handle->io_watcher);
+ return;
+}
+#endif
+
+static void uv__udp_sendmsg(uv_udp_t* handle) {
+ uv_udp_send_t* req;
+ struct msghdr h;
+ QUEUE* q;
+ ssize_t size;
+
+#if HAVE_MMSG
+ uv_once(&once, uv__udp_mmsg_init);
+ if (uv__sendmmsg_avail) {
+ uv__udp_sendmmsg(handle);
+ return;
+ }
+#endif
+
+ while (!QUEUE_EMPTY(&handle->write_queue)) {
+ q = QUEUE_HEAD(&handle->write_queue);
+ assert(q != NULL);
+
+ req = QUEUE_DATA(q, uv_udp_send_t, queue);
+ assert(req != NULL);
+
+ memset(&h, 0, sizeof h);
+ if (req->addr.ss_family == AF_UNSPEC) {
+ h.msg_name = NULL;
+ h.msg_namelen = 0;
+ } else {
+ h.msg_name = &req->addr;
+ if (req->addr.ss_family == AF_INET6)
+ h.msg_namelen = sizeof(struct sockaddr_in6);
+ else if (req->addr.ss_family == AF_INET)
+ h.msg_namelen = sizeof(struct sockaddr_in);
+ else if (req->addr.ss_family == AF_UNIX)
+ h.msg_namelen = sizeof(struct sockaddr_un);
+ else {
+ assert(0 && "unsupported address family");
+ abort();
+ }
+ }
+ h.msg_iov = (struct iovec*) req->bufs;
+ h.msg_iovlen = req->nbufs;
+
+ do {
+ size = sendmsg(handle->io_watcher.fd, &h, 0);
+ } while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ break;
+ }
+
+ req->status = (size == -1 ? UV__ERR(errno) : size);
+
+ /* Sending a datagram is an atomic operation: either all data
+ * is written or nothing is (and EMSGSIZE is raised). That is
+ * why we don't handle partial writes. Just pop the request
+ * off the write queue and onto the completed queue, done.
+ */
+ QUEUE_REMOVE(&req->queue);
+ QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue);
+ uv__io_feed(handle->loop, &handle->io_watcher);
+ }
+}
+
+/* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional
+ * refinements for programs that use multicast.
+ *
+ * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that
+ * are different from the BSDs: it _shares_ the port rather than steal it
+ * from the current listener. While useful, it's not something we can emulate
+ * on other platforms so we don't enable it.
+ *
+ * zOS does not support getsockname with SO_REUSEPORT option when using
+ * AF_UNIX.
+ */
+static int uv__set_reuse(int fd) {
+ int yes;
+ yes = 1;
+
+#if defined(SO_REUSEPORT) && defined(__MVS__)
+ struct sockaddr_in sockfd;
+ unsigned int sockfd_len = sizeof(sockfd);
+ if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1)
+ return UV__ERR(errno);
+ if (sockfd.sin_family == AF_UNIX) {
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ } else {
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ }
+#elif defined(SO_REUSEPORT) && !defined(__linux__) && !defined(__GNU__)
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+#else
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+#endif
+
+ return 0;
+}
+
+/*
+ * The Linux kernel suppresses some ICMP error messages by default for UDP
+ * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
+ * error reporting, hopefully resulting in faster failover to working name
+ * servers.
+ */
+static int uv__set_recverr(int fd, sa_family_t ss_family) {
+#if defined(__linux__)
+ int yes;
+
+ yes = 1;
+ if (ss_family == AF_INET) {
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ } else if (ss_family == AF_INET6) {
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ }
+#endif
+ return 0;
+}
+
+
+int uv__udp_bind(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ unsigned int flags) {
+ int err;
+ int yes;
+ int fd;
+
+ /* Check for bad flags. */
+ if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR))
+ return UV_EINVAL;
+
+ /* Cannot set IPv6-only mode on non-IPv6 socket. */
+ if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6)
+ return UV_EINVAL;
+
+ fd = handle->io_watcher.fd;
+ if (fd == -1) {
+ err = uv__socket(addr->sa_family, SOCK_DGRAM, 0);
+ if (err < 0)
+ return err;
+ fd = err;
+ handle->io_watcher.fd = fd;
+ }
+
+ if (flags & UV_UDP_LINUX_RECVERR) {
+ err = uv__set_recverr(fd, addr->sa_family);
+ if (err)
+ return err;
+ }
+
+ if (flags & UV_UDP_REUSEADDR) {
+ err = uv__set_reuse(fd);
+ if (err)
+ return err;
+ }
+
+ if (flags & UV_UDP_IPV6ONLY) {
+#ifdef IPV6_V6ONLY
+ yes = 1;
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) {
+ err = UV__ERR(errno);
+ return err;
+ }
+#else
+ err = UV_ENOTSUP;
+ return err;
+#endif
+ }
+
+ if (bind(fd, addr, addrlen)) {
+ err = UV__ERR(errno);
+ if (errno == EAFNOSUPPORT)
+ /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a
+ * socket created with AF_INET to an AF_INET6 address or vice versa. */
+ err = UV_EINVAL;
+ return err;
+ }
+
+ if (addr->sa_family == AF_INET6)
+ handle->flags |= UV_HANDLE_IPV6;
+
+ handle->flags |= UV_HANDLE_BOUND;
+ return 0;
+}
+
+
+static int uv__udp_maybe_deferred_bind(uv_udp_t* handle,
+ int domain,
+ unsigned int flags) {
+ union uv__sockaddr taddr;
+ socklen_t addrlen;
+
+ if (handle->io_watcher.fd != -1)
+ return 0;
+
+ switch (domain) {
+ case AF_INET:
+ {
+ struct sockaddr_in* addr = &taddr.in;
+ memset(addr, 0, sizeof *addr);
+ addr->sin_family = AF_INET;
+ addr->sin_addr.s_addr = INADDR_ANY;
+ addrlen = sizeof *addr;
+ break;
+ }
+ case AF_INET6:
+ {
+ struct sockaddr_in6* addr = &taddr.in6;
+ memset(addr, 0, sizeof *addr);
+ addr->sin6_family = AF_INET6;
+ addr->sin6_addr = in6addr_any;
+ addrlen = sizeof *addr;
+ break;
+ }
+ default:
+ assert(0 && "unsupported address family");
+ abort();
+ }
+
+ return uv__udp_bind(handle, &taddr.addr, addrlen, flags);
+}
+
+
+int uv__udp_connect(uv_udp_t* handle,
+ const struct sockaddr* addr,
+ unsigned int addrlen) {
+ int err;
+
+ err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+ if (err)
+ return err;
+
+ do {
+ errno = 0;
+ err = connect(handle->io_watcher.fd, addr, addrlen);
+ } while (err == -1 && errno == EINTR);
+
+ if (err)
+ return UV__ERR(errno);
+
+ handle->flags |= UV_HANDLE_UDP_CONNECTED;
+
+ return 0;
+}
+
+/* From https://pubs.opengroup.org/onlinepubs/9699919799/functions/connect.html
+ * Any of uv supported UNIXs kernel should be standardized, but the kernel
+ * implementation logic not same, let's use pseudocode to explain the udp
+ * disconnect behaviors:
+ *
+ * Predefined stubs for pseudocode:
+ * 1. sodisconnect: The function to perform the real udp disconnect
+ * 2. pru_connect: The function to perform the real udp connect
+ * 3. so: The kernel object match with socket fd
+ * 4. addr: The sockaddr parameter from user space
+ *
+ * BSDs:
+ * if(sodisconnect(so) == 0) { // udp disconnect succeed
+ * if (addr->sa_len != so->addr->sa_len) return EINVAL;
+ * if (addr->sa_family != so->addr->sa_family) return EAFNOSUPPORT;
+ * pru_connect(so);
+ * }
+ * else return EISCONN;
+ *
+ * z/OS (same with Windows):
+ * if(addr->sa_len < so->addr->sa_len) return EINVAL;
+ * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
+ *
+ * AIX:
+ * if(addr->sa_len != sizeof(struct sockaddr)) return EINVAL; // ignore ip proto version
+ * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
+ *
+ * Linux,Others:
+ * if(addr->sa_len < sizeof(struct sockaddr)) return EINVAL;
+ * if (addr->sa_family == AF_UNSPEC) sodisconnect(so);
+ */
+int uv__udp_disconnect(uv_udp_t* handle) {
+ int r;
+#if defined(__MVS__)
+ struct sockaddr_storage addr;
+#else
+ struct sockaddr addr;
+#endif
+
+ memset(&addr, 0, sizeof(addr));
+
+#if defined(__MVS__)
+ addr.ss_family = AF_UNSPEC;
+#else
+ addr.sa_family = AF_UNSPEC;
+#endif
+
+ do {
+ errno = 0;
+#ifdef __PASE__
+ /* On IBMi a connectionless transport socket can be disconnected by
+ * either setting the addr parameter to NULL or setting the
+ * addr_length parameter to zero, and issuing another connect().
+ * https://www.ibm.com/docs/en/i/7.4?topic=ssw_ibm_i_74/apis/connec.htm
+ */
+ r = connect(handle->io_watcher.fd, (struct sockaddr*) NULL, 0);
+#else
+ r = connect(handle->io_watcher.fd, (struct sockaddr*) &addr, sizeof(addr));
+#endif
+ } while (r == -1 && errno == EINTR);
+
+ if (r == -1) {
+#if defined(BSD) /* The macro BSD is from sys/param.h */
+ if (errno != EAFNOSUPPORT && errno != EINVAL)
+ return UV__ERR(errno);
+#else
+ return UV__ERR(errno);
+#endif
+ }
+
+ handle->flags &= ~UV_HANDLE_UDP_CONNECTED;
+ return 0;
+}
+
+int uv__udp_send(uv_udp_send_t* req,
+ uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen,
+ uv_udp_send_cb send_cb) {
+ int err;
+ int empty_queue;
+
+ assert(nbufs > 0);
+
+ if (addr) {
+ err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+ if (err)
+ return err;
+ }
+
+ /* It's legal for send_queue_count > 0 even when the write_queue is empty;
+ * it means there are error-state requests in the write_completed_queue that
+ * will touch up send_queue_size/count later.
+ */
+ empty_queue = (handle->send_queue_count == 0);
+
+ uv__req_init(handle->loop, req, UV_UDP_SEND);
+ assert(addrlen <= sizeof(req->addr));
+ if (addr == NULL)
+ req->addr.ss_family = AF_UNSPEC;
+ else
+ memcpy(&req->addr, addr, addrlen);
+ req->send_cb = send_cb;
+ req->handle = handle;
+ req->nbufs = nbufs;
+
+ req->bufs = req->bufsml;
+ if (nbufs > ARRAY_SIZE(req->bufsml))
+ req->bufs = uv__malloc(nbufs * sizeof(bufs[0]));
+
+ if (req->bufs == NULL) {
+ uv__req_unregister(handle->loop, req);
+ return UV_ENOMEM;
+ }
+
+ memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0]));
+ handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs);
+ handle->send_queue_count++;
+ QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue);
+ uv__handle_start(handle);
+
+ if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) {
+ uv__udp_sendmsg(handle);
+
+ /* `uv__udp_sendmsg` may not be able to do non-blocking write straight
+ * away. In such cases the `io_watcher` has to be queued for asynchronous
+ * write.
+ */
+ if (!QUEUE_EMPTY(&handle->write_queue))
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+ } else {
+ uv__io_start(handle->loop, &handle->io_watcher, POLLOUT);
+ }
+
+ return 0;
+}
+
+
+int uv__udp_try_send(uv_udp_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ const struct sockaddr* addr,
+ unsigned int addrlen) {
+ int err;
+ struct msghdr h;
+ ssize_t size;
+
+ assert(nbufs > 0);
+
+ /* already sending a message */
+ if (handle->send_queue_count != 0)
+ return UV_EAGAIN;
+
+ if (addr) {
+ err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0);
+ if (err)
+ return err;
+ } else {
+ assert(handle->flags & UV_HANDLE_UDP_CONNECTED);
+ }
+
+ memset(&h, 0, sizeof h);
+ h.msg_name = (struct sockaddr*) addr;
+ h.msg_namelen = addrlen;
+ h.msg_iov = (struct iovec*) bufs;
+ h.msg_iovlen = nbufs;
+
+ do {
+ size = sendmsg(handle->io_watcher.fd, &h, 0);
+ } while (size == -1 && errno == EINTR);
+
+ if (size == -1) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS)
+ return UV_EAGAIN;
+ else
+ return UV__ERR(errno);
+ }
+
+ return size;
+}
+
+
+static int uv__udp_set_membership4(uv_udp_t* handle,
+ const struct sockaddr_in* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ struct ip_mreq mreq;
+ int optname;
+ int err;
+
+ memset(&mreq, 0, sizeof mreq);
+
+ if (interface_addr) {
+ err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+ if (err)
+ return err;
+ } else {
+ mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+ }
+
+ mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+
+ switch (membership) {
+ case UV_JOIN_GROUP:
+ optname = IP_ADD_MEMBERSHIP;
+ break;
+ case UV_LEAVE_GROUP:
+ optname = IP_DROP_MEMBERSHIP;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+#if defined(__MVS__)
+ if (errno == ENXIO)
+ return UV_ENODEV;
+#endif
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+static int uv__udp_set_membership6(uv_udp_t* handle,
+ const struct sockaddr_in6* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ int optname;
+ struct ipv6_mreq mreq;
+ struct sockaddr_in6 addr6;
+
+ memset(&mreq, 0, sizeof mreq);
+
+ if (interface_addr) {
+ if (uv_ip6_addr(interface_addr, 0, &addr6))
+ return UV_EINVAL;
+ mreq.ipv6mr_interface = addr6.sin6_scope_id;
+ } else {
+ mreq.ipv6mr_interface = 0;
+ }
+
+ mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr;
+
+ switch (membership) {
+ case UV_JOIN_GROUP:
+ optname = IPV6_ADD_MEMBERSHIP;
+ break;
+ case UV_LEAVE_GROUP:
+ optname = IPV6_DROP_MEMBERSHIP;
+ break;
+ default:
+ return UV_EINVAL;
+ }
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+#if defined(__MVS__)
+ if (errno == ENXIO)
+ return UV_ENODEV;
+#endif
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+#if !defined(__OpenBSD__) && \
+ !defined(__NetBSD__) && \
+ !defined(__ANDROID__) && \
+ !defined(__DragonFly__) && \
+ !defined(__QNX__) && \
+ !defined(__GNU__)
+static int uv__udp_set_source_membership4(uv_udp_t* handle,
+ const struct sockaddr_in* multicast_addr,
+ const char* interface_addr,
+ const struct sockaddr_in* source_addr,
+ uv_membership membership) {
+ struct ip_mreq_source mreq;
+ int optname;
+ int err;
+
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr != NULL) {
+ err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr);
+ if (err)
+ return err;
+ } else {
+ mreq.imr_interface.s_addr = htonl(INADDR_ANY);
+ }
+
+ mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr;
+ mreq.imr_sourceaddr.s_addr = source_addr->sin_addr.s_addr;
+
+ if (membership == UV_JOIN_GROUP)
+ optname = IP_ADD_SOURCE_MEMBERSHIP;
+ else if (membership == UV_LEAVE_GROUP)
+ optname = IP_DROP_SOURCE_MEMBERSHIP;
+ else
+ return UV_EINVAL;
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+static int uv__udp_set_source_membership6(uv_udp_t* handle,
+ const struct sockaddr_in6* multicast_addr,
+ const char* interface_addr,
+ const struct sockaddr_in6* source_addr,
+ uv_membership membership) {
+ struct group_source_req mreq;
+ struct sockaddr_in6 addr6;
+ int optname;
+ int err;
+
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+
+ memset(&mreq, 0, sizeof(mreq));
+
+ if (interface_addr != NULL) {
+ err = uv_ip6_addr(interface_addr, 0, &addr6);
+ if (err)
+ return err;
+ mreq.gsr_interface = addr6.sin6_scope_id;
+ } else {
+ mreq.gsr_interface = 0;
+ }
+
+ STATIC_ASSERT(sizeof(mreq.gsr_group) >= sizeof(*multicast_addr));
+ STATIC_ASSERT(sizeof(mreq.gsr_source) >= sizeof(*source_addr));
+ memcpy(&mreq.gsr_group, multicast_addr, sizeof(*multicast_addr));
+ memcpy(&mreq.gsr_source, source_addr, sizeof(*source_addr));
+
+ if (membership == UV_JOIN_GROUP)
+ optname = MCAST_JOIN_SOURCE_GROUP;
+ else if (membership == UV_LEAVE_GROUP)
+ optname = MCAST_LEAVE_SOURCE_GROUP;
+ else
+ return UV_EINVAL;
+
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ optname,
+ &mreq,
+ sizeof(mreq))) {
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+#endif
+
+
+int uv__udp_init_ex(uv_loop_t* loop,
+ uv_udp_t* handle,
+ unsigned flags,
+ int domain) {
+ int fd;
+
+ fd = -1;
+ if (domain != AF_UNSPEC) {
+ fd = uv__socket(domain, SOCK_DGRAM, 0);
+ if (fd < 0)
+ return fd;
+ }
+
+ uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP);
+ handle->alloc_cb = NULL;
+ handle->recv_cb = NULL;
+ handle->send_queue_size = 0;
+ handle->send_queue_count = 0;
+ uv__io_init(&handle->io_watcher, uv__udp_io, fd);
+ QUEUE_INIT(&handle->write_queue);
+ QUEUE_INIT(&handle->write_completed_queue);
+
+ return 0;
+}
+
+
+int uv_udp_using_recvmmsg(const uv_udp_t* handle) {
+#if HAVE_MMSG
+ if (handle->flags & UV_HANDLE_UDP_RECVMMSG) {
+ uv_once(&once, uv__udp_mmsg_init);
+ return uv__recvmmsg_avail;
+ }
+#endif
+ return 0;
+}
+
+
+int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) {
+ int err;
+
+ /* Check for already active socket. */
+ if (handle->io_watcher.fd != -1)
+ return UV_EBUSY;
+
+ if (uv__fd_exists(handle->loop, sock))
+ return UV_EEXIST;
+
+ err = uv__nonblock(sock, 1);
+ if (err)
+ return err;
+
+ err = uv__set_reuse(sock);
+ if (err)
+ return err;
+
+ handle->io_watcher.fd = sock;
+ if (uv__udp_is_connected(handle))
+ handle->flags |= UV_HANDLE_UDP_CONNECTED;
+
+ return 0;
+}
+
+
+int uv_udp_set_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ uv_membership membership) {
+ int err;
+ struct sockaddr_in addr4;
+ struct sockaddr_in6 addr6;
+
+ if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) {
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+ return uv__udp_set_membership4(handle, &addr4, interface_addr, membership);
+ } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) {
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR);
+ if (err)
+ return err;
+ return uv__udp_set_membership6(handle, &addr6, interface_addr, membership);
+ } else {
+ return UV_EINVAL;
+ }
+}
+
+
+int uv_udp_set_source_membership(uv_udp_t* handle,
+ const char* multicast_addr,
+ const char* interface_addr,
+ const char* source_addr,
+ uv_membership membership) {
+#if !defined(__OpenBSD__) && \
+ !defined(__NetBSD__) && \
+ !defined(__ANDROID__) && \
+ !defined(__DragonFly__) && \
+ !defined(__QNX__) && \
+ !defined(__GNU__)
+ int err;
+ union uv__sockaddr mcast_addr;
+ union uv__sockaddr src_addr;
+
+ err = uv_ip4_addr(multicast_addr, 0, &mcast_addr.in);
+ if (err) {
+ err = uv_ip6_addr(multicast_addr, 0, &mcast_addr.in6);
+ if (err)
+ return err;
+ err = uv_ip6_addr(source_addr, 0, &src_addr.in6);
+ if (err)
+ return err;
+ return uv__udp_set_source_membership6(handle,
+ &mcast_addr.in6,
+ interface_addr,
+ &src_addr.in6,
+ membership);
+ }
+
+ err = uv_ip4_addr(source_addr, 0, &src_addr.in);
+ if (err)
+ return err;
+ return uv__udp_set_source_membership4(handle,
+ &mcast_addr.in,
+ interface_addr,
+ &src_addr.in,
+ membership);
+#else
+ return UV_ENOSYS;
+#endif
+}
+
+
+static int uv__setsockopt(uv_udp_t* handle,
+ int option4,
+ int option6,
+ const void* val,
+ socklen_t size) {
+ int r;
+
+ if (handle->flags & UV_HANDLE_IPV6)
+ r = setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ option6,
+ val,
+ size);
+ else
+ r = setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ option4,
+ val,
+ size);
+ if (r)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+static int uv__setsockopt_maybe_char(uv_udp_t* handle,
+ int option4,
+ int option6,
+ int val) {
+#if defined(__sun) || defined(_AIX) || defined(__MVS__)
+ char arg = val;
+#elif defined(__OpenBSD__)
+ unsigned char arg = val;
+#else
+ int arg = val;
+#endif
+
+ if (val < 0 || val > 255)
+ return UV_EINVAL;
+
+ return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg));
+}
+
+
+int uv_udp_set_broadcast(uv_udp_t* handle, int on) {
+ if (setsockopt(handle->io_watcher.fd,
+ SOL_SOCKET,
+ SO_BROADCAST,
+ &on,
+ sizeof(on))) {
+ return UV__ERR(errno);
+ }
+
+ return 0;
+}
+
+
+int uv_udp_set_ttl(uv_udp_t* handle, int ttl) {
+ if (ttl < 1 || ttl > 255)
+ return UV_EINVAL;
+
+#if defined(__MVS__)
+ if (!(handle->flags & UV_HANDLE_IPV6))
+ return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */
+#endif
+
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS,
+ * so hardcode the size of these options on this platform,
+ * and use the general uv__setsockopt_maybe_char call on other platforms.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__)
+
+ return uv__setsockopt(handle,
+ IP_TTL,
+ IPV6_UNICAST_HOPS,
+ &ttl,
+ sizeof(ttl));
+
+#else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
+ defined(__MVS__) || defined(__QNX__)) */
+
+ return uv__setsockopt_maybe_char(handle,
+ IP_TTL,
+ IPV6_UNICAST_HOPS,
+ ttl);
+
+#endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) ||
+ defined(__MVS__) || defined(__QNX__) */
+}
+
+
+int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) {
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for
+ * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case,
+ * and use the general uv__setsockopt_maybe_char call otherwise.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__)
+ if (handle->flags & UV_HANDLE_IPV6)
+ return uv__setsockopt(handle,
+ IP_MULTICAST_TTL,
+ IPV6_MULTICAST_HOPS,
+ &ttl,
+ sizeof(ttl));
+#endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__) */
+
+ return uv__setsockopt_maybe_char(handle,
+ IP_MULTICAST_TTL,
+ IPV6_MULTICAST_HOPS,
+ ttl);
+}
+
+
+int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) {
+/*
+ * On Solaris and derivatives such as SmartOS, the length of socket options
+ * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for
+ * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case,
+ * and use the general uv__setsockopt_maybe_char call otherwise.
+ */
+#if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \
+ defined(__MVS__) || defined(__QNX__)
+ if (handle->flags & UV_HANDLE_IPV6)
+ return uv__setsockopt(handle,
+ IP_MULTICAST_LOOP,
+ IPV6_MULTICAST_LOOP,
+ &on,
+ sizeof(on));
+#endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) ||
+ defined(__MVS__) || defined(__QNX__) */
+
+ return uv__setsockopt_maybe_char(handle,
+ IP_MULTICAST_LOOP,
+ IPV6_MULTICAST_LOOP,
+ on);
+}
+
+int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) {
+ struct sockaddr_storage addr_st;
+ struct sockaddr_in* addr4;
+ struct sockaddr_in6* addr6;
+
+ addr4 = (struct sockaddr_in*) &addr_st;
+ addr6 = (struct sockaddr_in6*) &addr_st;
+
+ if (!interface_addr) {
+ memset(&addr_st, 0, sizeof addr_st);
+ if (handle->flags & UV_HANDLE_IPV6) {
+ addr_st.ss_family = AF_INET6;
+ addr6->sin6_scope_id = 0;
+ } else {
+ addr_st.ss_family = AF_INET;
+ addr4->sin_addr.s_addr = htonl(INADDR_ANY);
+ }
+ } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) {
+ /* nothing, address was parsed */
+ } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) {
+ /* nothing, address was parsed */
+ } else {
+ return UV_EINVAL;
+ }
+
+ if (addr_st.ss_family == AF_INET) {
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IP,
+ IP_MULTICAST_IF,
+ (void*) &addr4->sin_addr,
+ sizeof(addr4->sin_addr)) == -1) {
+ return UV__ERR(errno);
+ }
+ } else if (addr_st.ss_family == AF_INET6) {
+ if (setsockopt(handle->io_watcher.fd,
+ IPPROTO_IPV6,
+ IPV6_MULTICAST_IF,
+ &addr6->sin6_scope_id,
+ sizeof(addr6->sin6_scope_id)) == -1) {
+ return UV__ERR(errno);
+ }
+ } else {
+ assert(0 && "unexpected address family");
+ abort();
+ }
+
+ return 0;
+}
+
+int uv_udp_getpeername(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getpeername,
+ name,
+ namelen);
+}
+
+int uv_udp_getsockname(const uv_udp_t* handle,
+ struct sockaddr* name,
+ int* namelen) {
+
+ return uv__getsockpeername((const uv_handle_t*) handle,
+ getsockname,
+ name,
+ namelen);
+}
+
+
+int uv__udp_recv_start(uv_udp_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_udp_recv_cb recv_cb) {
+ int err;
+
+ if (alloc_cb == NULL || recv_cb == NULL)
+ return UV_EINVAL;
+
+ if (uv__io_active(&handle->io_watcher, POLLIN))
+ return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */
+
+ err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0);
+ if (err)
+ return err;
+
+ handle->alloc_cb = alloc_cb;
+ handle->recv_cb = recv_cb;
+
+ uv__io_start(handle->loop, &handle->io_watcher, POLLIN);
+ uv__handle_start(handle);
+
+ return 0;
+}
+
+
+int uv__udp_recv_stop(uv_udp_t* handle) {
+ uv__io_stop(handle->loop, &handle->io_watcher, POLLIN);
+
+ if (!uv__io_active(&handle->io_watcher, POLLOUT))
+ uv__handle_stop(handle);
+
+ handle->alloc_cb = NULL;
+ handle->recv_cb = NULL;
+
+ return 0;
+}