summaryrefslogtreecommitdiff
path: root/Utilities/cmlibuv
diff options
context:
space:
mode:
authorBrad King <brad.king@kitware.com>2021-11-17 06:40:02 -0500
committerBrad King <brad.king@kitware.com>2021-11-17 06:40:02 -0500
commit74a05716a453d1cffbba4a1b300c7cafea17b8f5 (patch)
treea8b1b793607796938219d94a57ba4ce2924e3c8f /Utilities/cmlibuv
parent65fb29fdfbe1663fe0f050214c43bf24238fbbfe (diff)
parent27e34e6190ba1014c20ef5e8ffa7d653595ced12 (diff)
downloadcmake-74a05716a453d1cffbba4a1b300c7cafea17b8f5.tar.gz
Merge branch 'upstream-libuv' into update-libuv
* upstream-libuv: libuv 2021-11-09 (0f696da5)
Diffstat (limited to 'Utilities/cmlibuv')
-rw-r--r--Utilities/cmlibuv/include/uv.h30
-rw-r--r--Utilities/cmlibuv/include/uv/errno.h14
-rw-r--r--Utilities/cmlibuv/include/uv/tree.h2
-rw-r--r--Utilities/cmlibuv/include/uv/unix.h6
-rw-r--r--Utilities/cmlibuv/include/uv/version.h2
-rw-r--r--Utilities/cmlibuv/include/uv/win.h7
-rw-r--r--Utilities/cmlibuv/src/idna.c49
-rw-r--r--Utilities/cmlibuv/src/inet.c3
-rw-r--r--Utilities/cmlibuv/src/threadpool.c2
-rw-r--r--Utilities/cmlibuv/src/timer.c1
-rw-r--r--Utilities/cmlibuv/src/unix/async.c2
-rw-r--r--Utilities/cmlibuv/src/unix/atomic-ops.h6
-rw-r--r--Utilities/cmlibuv/src/unix/bsd-ifaddrs.c6
-rw-r--r--Utilities/cmlibuv/src/unix/bsd-proctitle.c4
-rw-r--r--Utilities/cmlibuv/src/unix/core.c28
-rw-r--r--Utilities/cmlibuv/src/unix/darwin.c12
-rw-r--r--Utilities/cmlibuv/src/unix/epoll.c422
-rw-r--r--Utilities/cmlibuv/src/unix/freebsd.c15
-rw-r--r--Utilities/cmlibuv/src/unix/fs.c193
-rw-r--r--Utilities/cmlibuv/src/unix/fsevents.c23
-rw-r--r--Utilities/cmlibuv/src/unix/getaddrinfo.c3
-rw-r--r--Utilities/cmlibuv/src/unix/ibmi.c47
-rw-r--r--Utilities/cmlibuv/src/unix/internal.h30
-rw-r--r--Utilities/cmlibuv/src/unix/linux-core.c567
-rw-r--r--Utilities/cmlibuv/src/unix/linux-inotify.c2
-rw-r--r--Utilities/cmlibuv/src/unix/linux-syscalls.c37
-rw-r--r--Utilities/cmlibuv/src/unix/linux-syscalls.h7
-rw-r--r--Utilities/cmlibuv/src/unix/os390-proctitle.c136
-rw-r--r--Utilities/cmlibuv/src/unix/os390-syscalls.c57
-rw-r--r--Utilities/cmlibuv/src/unix/os390-syscalls.h2
-rw-r--r--Utilities/cmlibuv/src/unix/os390.c136
-rw-r--r--Utilities/cmlibuv/src/unix/pipe.c54
-rw-r--r--Utilities/cmlibuv/src/unix/poll.c14
-rw-r--r--Utilities/cmlibuv/src/unix/process.c210
-rw-r--r--Utilities/cmlibuv/src/unix/proctitle.c4
-rw-r--r--Utilities/cmlibuv/src/unix/signal.c2
-rw-r--r--Utilities/cmlibuv/src/unix/stream.c219
-rw-r--r--Utilities/cmlibuv/src/unix/sunos.c11
-rw-r--r--Utilities/cmlibuv/src/unix/tcp.c53
-rw-r--r--Utilities/cmlibuv/src/unix/thread.c12
-rw-r--r--Utilities/cmlibuv/src/unix/tty.c18
-rw-r--r--Utilities/cmlibuv/src/unix/udp.c39
-rw-r--r--Utilities/cmlibuv/src/uv-common.c25
-rw-r--r--Utilities/cmlibuv/src/uv-common.h9
-rw-r--r--Utilities/cmlibuv/src/win/error.c2
-rw-r--r--Utilities/cmlibuv/src/win/fs-event.c6
-rw-r--r--Utilities/cmlibuv/src/win/fs.c67
-rw-r--r--Utilities/cmlibuv/src/win/internal.h4
-rw-r--r--Utilities/cmlibuv/src/win/pipe.c229
-rw-r--r--Utilities/cmlibuv/src/win/poll.c3
-rw-r--r--Utilities/cmlibuv/src/win/process-stdio.c96
-rw-r--r--Utilities/cmlibuv/src/win/process.c9
-rw-r--r--Utilities/cmlibuv/src/win/stream.c23
-rw-r--r--Utilities/cmlibuv/src/win/tcp.c260
-rw-r--r--Utilities/cmlibuv/src/win/udp.c4
-rw-r--r--Utilities/cmlibuv/src/win/util.c20
-rw-r--r--Utilities/cmlibuv/src/win/winapi.c10
-rw-r--r--Utilities/cmlibuv/src/win/winapi.h7
58 files changed, 2005 insertions, 1256 deletions
diff --git a/Utilities/cmlibuv/include/uv.h b/Utilities/cmlibuv/include/uv.h
index 11891df8e2..48213e2077 100644
--- a/Utilities/cmlibuv/include/uv.h
+++ b/Utilities/cmlibuv/include/uv.h
@@ -130,6 +130,7 @@ extern "C" {
XX(ENOTEMPTY, "directory not empty") \
XX(ENOTSOCK, "socket operation on non-socket") \
XX(ENOTSUP, "operation not supported on socket") \
+ XX(EOVERFLOW, "value too large for defined data type") \
XX(EPERM, "operation not permitted") \
XX(EPIPE, "broken pipe") \
XX(EPROTO, "protocol error") \
@@ -152,6 +153,7 @@ extern "C" {
XX(ENOTTY, "inappropriate ioctl for device") \
XX(EFTYPE, "inappropriate file type or format") \
XX(EILSEQ, "illegal byte sequence") \
+ XX(ESOCKTNOSUPPORT, "socket type not supported") \
#define UV_HANDLE_TYPE_MAP(XX) \
XX(ASYNC, async) \
@@ -479,6 +481,12 @@ UV_EXTERN int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd);
UV_EXTERN uv_buf_t uv_buf_init(char* base, unsigned int len);
+UV_EXTERN int uv_pipe(uv_file fds[2], int read_flags, int write_flags);
+UV_EXTERN int uv_socketpair(int type,
+ int protocol,
+ uv_os_sock_t socket_vector[2],
+ int flags0,
+ int flags1);
#define UV_STREAM_FIELDS \
/* number of bytes queued for writing */ \
@@ -524,6 +532,10 @@ UV_EXTERN int uv_write2(uv_write_t* req,
UV_EXTERN int uv_try_write(uv_stream_t* handle,
const uv_buf_t bufs[],
unsigned int nbufs);
+UV_EXTERN int uv_try_write2(uv_stream_t* handle,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle);
/* uv_write_t is a subclass of uv_req_t. */
struct uv_write_s {
@@ -624,7 +636,14 @@ enum uv_udp_flags {
* in uv_udp_recv_cb, nread will always be 0 and addr will always be NULL.
*/
UV_UDP_MMSG_FREE = 16,
-
+ /*
+ * Indicates if IP_RECVERR/IPV6_RECVERR will be set when binding the handle.
+ * This sets IP_RECVERR for IPv4 and IPV6_RECVERR for IPv6 UDP sockets on
+ * Linux. This stops the Linux kernel from suppressing some ICMP error
+ * messages and enables full ICMP error reporting for faster failover.
+ * This flag is no-op on platforms other than Linux.
+ */
+ UV_UDP_LINUX_RECVERR = 32,
/*
* Indicates that recvmmsg should be used, if available.
*/
@@ -937,10 +956,13 @@ typedef enum {
UV_WRITABLE_PIPE = 0x20,
/*
- * Open the child pipe handle in overlapped mode on Windows.
- * On Unix it is silently ignored.
+ * When UV_CREATE_PIPE is specified, specifying UV_NONBLOCK_PIPE opens the
+ * handle in non-blocking mode in the child. This may cause loss of data,
+ * if the child is not designed to handle to encounter this mode,
+ * but can also be significantly more efficient.
*/
- UV_OVERLAPPED_PIPE = 0x40
+ UV_NONBLOCK_PIPE = 0x40,
+ UV_OVERLAPPED_PIPE = 0x40 /* old name, for compatibility */
} uv_stdio_flags;
typedef struct uv_stdio_container_s {
diff --git a/Utilities/cmlibuv/include/uv/errno.h b/Utilities/cmlibuv/include/uv/errno.h
index 8d4d7686d2..71906b3f5e 100644
--- a/Utilities/cmlibuv/include/uv/errno.h
+++ b/Utilities/cmlibuv/include/uv/errno.h
@@ -317,7 +317,7 @@
#if defined(EPROTO) && !defined(_WIN32)
# define UV__EPROTO UV__ERR(EPROTO)
#else
-# define UV__EPROTO UV__ERR(-4046)
+# define UV__EPROTO (-4046)
#endif
#if defined(EPROTONOSUPPORT) && !defined(_WIN32)
@@ -445,4 +445,16 @@
# define UV__EILSEQ (-4027)
#endif
+#if defined(EOVERFLOW) && !defined(_WIN32)
+# define UV__EOVERFLOW UV__ERR(EOVERFLOW)
+#else
+# define UV__EOVERFLOW (-4026)
+#endif
+
+#if defined(ESOCKTNOSUPPORT) && !defined(_WIN32)
+# define UV__ESOCKTNOSUPPORT UV__ERR(ESOCKTNOSUPPORT)
+#else
+# define UV__ESOCKTNOSUPPORT (-4025)
+#endif
+
#endif /* UV_ERRNO_H_ */
diff --git a/Utilities/cmlibuv/include/uv/tree.h b/Utilities/cmlibuv/include/uv/tree.h
index f936416e3d..2b28835fde 100644
--- a/Utilities/cmlibuv/include/uv/tree.h
+++ b/Utilities/cmlibuv/include/uv/tree.h
@@ -251,7 +251,7 @@ void name##_SPLAY_MINMAX(struct name *head, int __comp) \
SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL; \
__left = __right = &__node; \
\
- while (1) { \
+ for (;;) { \
if (__comp < 0) { \
__tmp = SPLAY_LEFT((head)->sph_root, field); \
if (__tmp == NULL) \
diff --git a/Utilities/cmlibuv/include/uv/unix.h b/Utilities/cmlibuv/include/uv/unix.h
index a59192fa41..7a5a3cb54b 100644
--- a/Utilities/cmlibuv/include/uv/unix.h
+++ b/Utilities/cmlibuv/include/uv/unix.h
@@ -72,12 +72,10 @@
# include "bsd.h"
#elif defined(__CYGWIN__) || \
defined(__MSYS__) || \
+ defined(__HAIKU__) || \
+ defined(__QNX__) || \
defined(__GNU__)
# include "posix.h"
-#elif defined(__HAIKU__)
-# include "posix.h"
-#elif defined(__QNX__)
-# include "posix.h"
#endif
#ifndef NI_MAXHOST
diff --git a/Utilities/cmlibuv/include/uv/version.h b/Utilities/cmlibuv/include/uv/version.h
index 96c1c130f2..ab5ed00954 100644
--- a/Utilities/cmlibuv/include/uv/version.h
+++ b/Utilities/cmlibuv/include/uv/version.h
@@ -31,7 +31,7 @@
*/
#define UV_VERSION_MAJOR 1
-#define UV_VERSION_MINOR 39
+#define UV_VERSION_MINOR 42
#define UV_VERSION_PATCH 1
#define UV_VERSION_IS_RELEASE 0
#define UV_VERSION_SUFFIX "dev"
diff --git a/Utilities/cmlibuv/include/uv/win.h b/Utilities/cmlibuv/include/uv/win.h
index f86357b4ed..9b5d5dc63a 100644
--- a/Utilities/cmlibuv/include/uv/win.h
+++ b/Utilities/cmlibuv/include/uv/win.h
@@ -45,7 +45,14 @@ typedef struct pollfd {
#endif
#include <mswsock.h>
+// Disable the typedef in mstcpip.h of MinGW.
+#define _TCP_INITIAL_RTO_PARAMETERS _TCP_INITIAL_RTO_PARAMETERS__AVOID
+#define TCP_INITIAL_RTO_PARAMETERS TCP_INITIAL_RTO_PARAMETERS__AVOID
+#define PTCP_INITIAL_RTO_PARAMETERS PTCP_INITIAL_RTO_PARAMETERS__AVOID
#include <ws2tcpip.h>
+#undef _TCP_INITIAL_RTO_PARAMETERS
+#undef TCP_INITIAL_RTO_PARAMETERS
+#undef PTCP_INITIAL_RTO_PARAMETERS
#include <windows.h>
#include <process.h>
diff --git a/Utilities/cmlibuv/src/idna.c b/Utilities/cmlibuv/src/idna.c
index 13ffac6be8..b44cb16a1e 100644
--- a/Utilities/cmlibuv/src/idna.c
+++ b/Utilities/cmlibuv/src/idna.c
@@ -19,6 +19,7 @@
#include "uv.h"
#include "idna.h"
+#include <assert.h>
#include <string.h>
static unsigned uv__utf8_decode1_slow(const char** p,
@@ -32,7 +33,7 @@ static unsigned uv__utf8_decode1_slow(const char** p,
if (a > 0xF7)
return -1;
- switch (*p - pe) {
+ switch (pe - *p) {
default:
if (a > 0xEF) {
min = 0x10000;
@@ -62,6 +63,8 @@ static unsigned uv__utf8_decode1_slow(const char** p,
a = 0;
break;
}
+ /* Fall through. */
+ case 0:
return -1; /* Invalid continuation byte. */
}
@@ -88,6 +91,8 @@ static unsigned uv__utf8_decode1_slow(const char** p,
unsigned uv__utf8_decode1(const char** p, const char* pe) {
unsigned a;
+ assert(*p < pe);
+
a = (unsigned char) *(*p)++;
if (a < 128)
@@ -96,9 +101,6 @@ unsigned uv__utf8_decode1(const char** p, const char* pe) {
return uv__utf8_decode1_slow(p, pe, a);
}
-#define foreach_codepoint(c, p, pe) \
- for (; (void) (*p <= pe && (c = uv__utf8_decode1(p, pe))), *p <= pe;)
-
static int uv__idna_toascii_label(const char* s, const char* se,
char** d, char* de) {
static const char alphabet[] = "abcdefghijklmnopqrstuvwxyz0123456789";
@@ -121,15 +123,22 @@ static int uv__idna_toascii_label(const char* s, const char* se,
ss = s;
todo = 0;
- foreach_codepoint(c, &s, se) {
+ /* Note: after this loop we've visited all UTF-8 characters and know
+ * they're legal so we no longer need to check for decode errors.
+ */
+ while (s < se) {
+ c = uv__utf8_decode1(&s, se);
+
+ if (c == -1u)
+ return UV_EINVAL;
+
if (c < 128)
h++;
- else if (c == (unsigned) -1)
- return UV_EINVAL;
else
todo++;
}
+ /* Only write "xn--" when there are non-ASCII characters. */
if (todo > 0) {
if (*d < de) *(*d)++ = 'x';
if (*d < de) *(*d)++ = 'n';
@@ -137,9 +146,13 @@ static int uv__idna_toascii_label(const char* s, const char* se,
if (*d < de) *(*d)++ = '-';
}
+ /* Write ASCII characters. */
x = 0;
s = ss;
- foreach_codepoint(c, &s, se) {
+ while (s < se) {
+ c = uv__utf8_decode1(&s, se);
+ assert(c != -1u);
+
if (c > 127)
continue;
@@ -166,10 +179,15 @@ static int uv__idna_toascii_label(const char* s, const char* se,
while (todo > 0) {
m = -1;
s = ss;
- foreach_codepoint(c, &s, se)
+
+ while (s < se) {
+ c = uv__utf8_decode1(&s, se);
+ assert(c != -1u);
+
if (c >= n)
if (c < m)
m = c;
+ }
x = m - n;
y = h + 1;
@@ -181,7 +199,10 @@ static int uv__idna_toascii_label(const char* s, const char* se,
n = m;
s = ss;
- foreach_codepoint(c, &s, se) {
+ while (s < se) {
+ c = uv__utf8_decode1(&s, se);
+ assert(c != -1u);
+
if (c < n)
if (++delta == 0)
return UV_E2BIG; /* Overflow. */
@@ -245,8 +266,6 @@ static int uv__idna_toascii_label(const char* s, const char* se,
return 0;
}
-#undef foreach_codepoint
-
long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
const char* si;
const char* st;
@@ -256,10 +275,14 @@ long uv__idna_toascii(const char* s, const char* se, char* d, char* de) {
ds = d;
- for (si = s; si < se; /* empty */) {
+ si = s;
+ while (si < se) {
st = si;
c = uv__utf8_decode1(&si, se);
+ if (c == -1u)
+ return UV_EINVAL;
+
if (c != '.')
if (c != 0x3002) /* 。 */
if (c != 0xFF0E) /* . */
diff --git a/Utilities/cmlibuv/src/inet.c b/Utilities/cmlibuv/src/inet.c
index 58238dcbb6..384c0f7261 100644
--- a/Utilities/cmlibuv/src/inet.c
+++ b/Utilities/cmlibuv/src/inet.c
@@ -141,8 +141,9 @@ static int inet_ntop6(const unsigned char *src, char *dst, size_t size) {
if (best.base != -1 && (best.base + best.len) == ARRAY_SIZE(words))
*tp++ = ':';
*tp++ = '\0';
- if (UV_E2BIG == uv__strscpy(dst, tmp, size))
+ if ((size_t) (tp - tmp) > size)
return UV_ENOSPC;
+ uv__strscpy(dst, tmp, size);
return 0;
}
diff --git a/Utilities/cmlibuv/src/threadpool.c b/Utilities/cmlibuv/src/threadpool.c
index 0998938f3e..869ae95f58 100644
--- a/Utilities/cmlibuv/src/threadpool.c
+++ b/Utilities/cmlibuv/src/threadpool.c
@@ -161,7 +161,6 @@ static void post(QUEUE* q, enum uv__work_kind kind) {
void uv__threadpool_cleanup(void) {
-#ifndef _WIN32
unsigned int i;
if (nthreads == 0)
@@ -181,7 +180,6 @@ void uv__threadpool_cleanup(void) {
threads = NULL;
nthreads = 0;
-#endif
}
diff --git a/Utilities/cmlibuv/src/timer.c b/Utilities/cmlibuv/src/timer.c
index 1bea2a8bd2..bc680e71a9 100644
--- a/Utilities/cmlibuv/src/timer.c
+++ b/Utilities/cmlibuv/src/timer.c
@@ -58,6 +58,7 @@ static int timer_less_than(const struct heap_node* ha,
int uv_timer_init(uv_loop_t* loop, uv_timer_t* handle) {
uv__handle_init(loop, (uv_handle_t*)handle, UV_TIMER);
handle->timer_cb = NULL;
+ handle->timeout = 0;
handle->repeat = 0;
return 0;
}
diff --git a/Utilities/cmlibuv/src/unix/async.c b/Utilities/cmlibuv/src/unix/async.c
index 5f58fb88d6..e1805c3237 100644
--- a/Utilities/cmlibuv/src/unix/async.c
+++ b/Utilities/cmlibuv/src/unix/async.c
@@ -214,7 +214,7 @@ static int uv__async_start(uv_loop_t* loop) {
pipefd[0] = err;
pipefd[1] = -1;
#else
- err = uv__make_pipe(pipefd, UV__F_NONBLOCK);
+ err = uv__make_pipe(pipefd, UV_NONBLOCK_PIPE);
if (err < 0)
return err;
#endif
diff --git a/Utilities/cmlibuv/src/unix/atomic-ops.h b/Utilities/cmlibuv/src/unix/atomic-ops.h
index 2518a0680f..63d8268ab5 100644
--- a/Utilities/cmlibuv/src/unix/atomic-ops.h
+++ b/Utilities/cmlibuv/src/unix/atomic-ops.h
@@ -58,9 +58,11 @@ UV_UNUSED(static int cmpxchgi(int* ptr, int oldval, int newval)) {
UV_UNUSED(static void cpu_relax(void)) {
#if defined(__i386__) || defined(__x86_64__)
- __asm__ __volatile__ ("rep; nop"); /* a.k.a. PAUSE */
+ __asm__ __volatile__ ("rep; nop" ::: "memory"); /* a.k.a. PAUSE */
#elif (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__)
- __asm__ volatile("yield");
+ __asm__ __volatile__ ("yield" ::: "memory");
+#elif defined(__powerpc64__) || defined(__ppc64__) || defined(__PPC64__)
+ __asm__ __volatile__ ("or 1,1,1; or 2,2,2" ::: "memory");
#endif
}
diff --git a/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c b/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
index 5223ab4879..e48934bce2 100644
--- a/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
+++ b/Utilities/cmlibuv/src/unix/bsd-ifaddrs.c
@@ -42,8 +42,8 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
return 1;
#if !defined(__CYGWIN__) && !defined(__MSYS__)
/*
- * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, just see whether `sa_family`
- * equals to `AF_LINK` or not. Otherwise, the result depends on the operation
+ * If `exclude_type` is `UV__EXCLUDE_IFPHYS`, return whether `sa_family`
+ * equals `AF_LINK`. Otherwise, the result depends on the operating
* system with `AF_LINK` or `PF_INET`.
*/
if (exclude_type == UV__EXCLUDE_IFPHYS)
@@ -53,7 +53,7 @@ static int uv__ifaddr_exclude(struct ifaddrs *ent, int exclude_type) {
defined(__HAIKU__)
/*
* On BSD getifaddrs returns information related to the raw underlying
- * devices. We're not interested in this information.
+ * devices. We're not interested in this information.
*/
if (ent->ifa_addr->sa_family == AF_LINK)
return 1;
diff --git a/Utilities/cmlibuv/src/unix/bsd-proctitle.c b/Utilities/cmlibuv/src/unix/bsd-proctitle.c
index 723b81c01c..4f4e9e5183 100644
--- a/Utilities/cmlibuv/src/unix/bsd-proctitle.c
+++ b/Utilities/cmlibuv/src/unix/bsd-proctitle.c
@@ -38,9 +38,7 @@ static void init_process_title_mutex_once(void) {
void uv__process_title_cleanup(void) {
- /* TODO(bnoordhuis) uv_mutex_destroy(&process_title_mutex)
- * and reset process_title_mutex_once?
- */
+ uv_mutex_destroy(&process_title_mutex);
}
diff --git a/Utilities/cmlibuv/src/unix/core.c b/Utilities/cmlibuv/src/unix/core.c
index 4245e027a6..225f775825 100644
--- a/Utilities/cmlibuv/src/unix/core.c
+++ b/Utilities/cmlibuv/src/unix/core.c
@@ -94,6 +94,10 @@ extern char** environ;
# define uv__accept4 accept4
#endif
+#if defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
+# include <sanitizer/linux_syscall_hooks.h>
+#endif
+
static int uv__run_pending(uv_loop_t* loop);
/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
@@ -545,7 +549,13 @@ int uv__close_nocancel(int fd) {
return close$NOCANCEL$UNIX2003(fd);
#endif
#pragma GCC diagnostic pop
-#elif defined(__linux__)
+#elif defined(__linux__) && defined(__SANITIZE_THREAD__) && defined(__clang__)
+ long rc;
+ __sanitizer_syscall_pre_close(fd);
+ rc = syscall(SYS_close, fd);
+ __sanitizer_syscall_post_close(rc, fd);
+ return rc;
+#elif defined(__linux__) && !defined(__SANITIZE_THREAD__)
return syscall(SYS_close, fd);
#else
return close(fd);
@@ -580,7 +590,7 @@ int uv__close(int fd) {
return uv__close_nocheckstdio(fd);
}
-
+#if UV__NONBLOCK_IS_IOCTL
int uv__nonblock_ioctl(int fd, int set) {
int r;
@@ -595,7 +605,6 @@ int uv__nonblock_ioctl(int fd, int set) {
}
-#if !defined(__hpux) && !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__)
int uv__cloexec_ioctl(int fd, int set) {
int r;
@@ -931,13 +940,12 @@ void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
if (w->pevents == 0) {
QUEUE_REMOVE(&w->watcher_queue);
QUEUE_INIT(&w->watcher_queue);
+ w->events = 0;
- if (loop->watchers[w->fd] != NULL) {
- assert(loop->watchers[w->fd] == w);
+ if (w == loop->watchers[w->fd]) {
assert(loop->nfds > 0);
loop->watchers[w->fd] = NULL;
loop->nfds--;
- w->events = 0;
}
}
else if (QUEUE_EMPTY(&w->watcher_queue))
@@ -1181,7 +1189,9 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
if (buf == NULL)
return UV_ENOMEM;
- r = getpwuid_r(uid, &pw, buf, bufsize, &result);
+ do
+ r = getpwuid_r(uid, &pw, buf, bufsize, &result);
+ while (r == EINTR);
if (r != ERANGE)
break;
@@ -1191,7 +1201,7 @@ int uv__getpwuid_r(uv_passwd_t* pwd) {
if (r != 0) {
uv__free(buf);
- return -r;
+ return UV__ERR(r);
}
if (result == NULL) {
@@ -1586,7 +1596,7 @@ int uv__search_path(const char* prog, char* buf, size_t* buflen) {
buf[*buflen] = '\0';
return 0;
- }
+ }
/* Case iii). Search PATH environment variable */
cloned_path = NULL;
diff --git a/Utilities/cmlibuv/src/unix/darwin.c b/Utilities/cmlibuv/src/unix/darwin.c
index d0ecd452d8..a7be0dd2f3 100644
--- a/Utilities/cmlibuv/src/unix/darwin.c
+++ b/Utilities/cmlibuv/src/unix/darwin.c
@@ -33,9 +33,7 @@
#include <sys/sysctl.h>
#include <unistd.h> /* sysconf */
-#if !TARGET_OS_IPHONE
#include "darwin-stub.h"
-#endif
static uv_once_t once = UV_ONCE_INIT;
static uint64_t (*time_func)(void);
@@ -223,10 +221,10 @@ static int uv__get_cpu_speed(uint64_t* speed) {
err = UV_ENOENT;
core_foundation_handle = dlopen("/System/Library/Frameworks/"
"CoreFoundation.framework/"
- "Versions/A/CoreFoundation",
+ "CoreFoundation",
RTLD_LAZY | RTLD_LOCAL);
iokit_handle = dlopen("/System/Library/Frameworks/IOKit.framework/"
- "Versions/A/IOKit",
+ "IOKit",
RTLD_LAZY | RTLD_LOCAL);
if (core_foundation_handle == NULL || iokit_handle == NULL)
@@ -304,6 +302,12 @@ static int uv__get_cpu_speed(uint64_t* speed) {
pIOObjectRelease(it);
err = 0;
+
+ if (device_type_str != NULL)
+ pCFRelease(device_type_str);
+ if (clock_frequency_str != NULL)
+ pCFRelease(clock_frequency_str);
+
out:
if (core_foundation_handle != NULL)
dlclose(core_foundation_handle);
diff --git a/Utilities/cmlibuv/src/unix/epoll.c b/Utilities/cmlibuv/src/unix/epoll.c
new file mode 100644
index 0000000000..97348e254b
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/epoll.c
@@ -0,0 +1,422 @@
+/* Copyright libuv contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+#include <errno.h>
+#include <sys/epoll.h>
+
+int uv__epoll_init(uv_loop_t* loop) {
+ int fd;
+ fd = epoll_create1(O_CLOEXEC);
+
+ /* epoll_create1() can fail either because it's not implemented (old kernel)
+ * or because it doesn't understand the O_CLOEXEC flag.
+ */
+ if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
+ fd = epoll_create(256);
+
+ if (fd != -1)
+ uv__cloexec(fd, 1);
+ }
+
+ loop->backend_fd = fd;
+ if (fd == -1)
+ return UV__ERR(errno);
+
+ return 0;
+}
+
+
+void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event* events;
+ struct epoll_event dummy;
+ uintptr_t i;
+ uintptr_t nfds;
+
+ assert(loop->watchers != NULL);
+ assert(fd >= 0);
+
+ events = (struct epoll_event*) loop->watchers[loop->nwatchers];
+ nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
+ if (events != NULL)
+ /* Invalidate events with same file descriptor */
+ for (i = 0; i < nfds; i++)
+ if (events[i].data.fd == fd)
+ events[i].data.fd = -1;
+
+ /* Remove the file descriptor from the epoll.
+ * This avoids a problem where the same file description remains open
+ * in another process, causing repeated junk epoll events.
+ *
+ * We pass in a dummy epoll_event, to work around a bug in old kernels.
+ */
+ if (loop->backend_fd >= 0) {
+ /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
+ * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
+ */
+ memset(&dummy, 0, sizeof(dummy));
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
+ }
+}
+
+
+int uv__io_check_fd(uv_loop_t* loop, int fd) {
+ struct epoll_event e;
+ int rc;
+
+ memset(&e, 0, sizeof(e));
+ e.events = POLLIN;
+ e.data.fd = -1;
+
+ rc = 0;
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
+ if (errno != EEXIST)
+ rc = UV__ERR(errno);
+
+ if (rc == 0)
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
+ abort();
+
+ return rc;
+}
+
+
+void uv__io_poll(uv_loop_t* loop, int timeout) {
+ /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
+ * effectively infinite on 32 bits architectures. To avoid blocking
+ * indefinitely, we cap the timeout and poll again if necessary.
+ *
+ * Note that "30 minutes" is a simplification because it depends on
+ * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
+ * that being the largest value I have seen in the wild (and only once.)
+ */
+ static const int max_safe_timeout = 1789569;
+ static int no_epoll_pwait_cached;
+ static int no_epoll_wait_cached;
+ int no_epoll_pwait;
+ int no_epoll_wait;
+ struct epoll_event events[1024];
+ struct epoll_event* pe;
+ struct epoll_event e;
+ int real_timeout;
+ QUEUE* q;
+ uv__io_t* w;
+ sigset_t sigset;
+ uint64_t sigmask;
+ uint64_t base;
+ int have_signals;
+ int nevents;
+ int count;
+ int nfds;
+ int fd;
+ int op;
+ int i;
+ int user_timeout;
+ int reset_timeout;
+
+ if (loop->nfds == 0) {
+ assert(QUEUE_EMPTY(&loop->watcher_queue));
+ return;
+ }
+
+ memset(&e, 0, sizeof(e));
+
+ while (!QUEUE_EMPTY(&loop->watcher_queue)) {
+ q = QUEUE_HEAD(&loop->watcher_queue);
+ QUEUE_REMOVE(q);
+ QUEUE_INIT(q);
+
+ w = QUEUE_DATA(q, uv__io_t, watcher_queue);
+ assert(w->pevents != 0);
+ assert(w->fd >= 0);
+ assert(w->fd < (int) loop->nwatchers);
+
+ e.events = w->pevents;
+ e.data.fd = w->fd;
+
+ if (w->events == 0)
+ op = EPOLL_CTL_ADD;
+ else
+ op = EPOLL_CTL_MOD;
+
+ /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
+ * events, skip the syscall and squelch the events after epoll_wait().
+ */
+ if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
+ if (errno != EEXIST)
+ abort();
+
+ assert(op == EPOLL_CTL_ADD);
+
+ /* We've reactivated a file descriptor that's been watched before. */
+ if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
+ abort();
+ }
+
+ w->events = w->pevents;
+ }
+
+ sigmask = 0;
+ if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
+ sigemptyset(&sigset);
+ sigaddset(&sigset, SIGPROF);
+ sigmask |= 1 << (SIGPROF - 1);
+ }
+
+ assert(timeout >= -1);
+ base = loop->time;
+ count = 48; /* Benchmarks suggest this gives the best throughput. */
+ real_timeout = timeout;
+
+ if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
+ reset_timeout = 1;
+ user_timeout = timeout;
+ timeout = 0;
+ } else {
+ reset_timeout = 0;
+ user_timeout = 0;
+ }
+
+ /* You could argue there is a dependency between these two but
+ * ultimately we don't care about their ordering with respect
+ * to one another. Worst case, we make a few system calls that
+ * could have been avoided because another thread already knows
+ * they fail with ENOSYS. Hardly the end of the world.
+ */
+ no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
+ no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
+
+ for (;;) {
+ /* Only need to set the provider_entry_time if timeout != 0. The function
+ * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
+ */
+ if (timeout != 0)
+ uv__metrics_set_provider_entry_time(loop);
+
+ /* See the comment for max_safe_timeout for an explanation of why
+ * this is necessary. Executive summary: kernel bug workaround.
+ */
+ if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
+ timeout = max_safe_timeout;
+
+ if (sigmask != 0 && no_epoll_pwait != 0)
+ if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
+ abort();
+
+ if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
+ nfds = epoll_pwait(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout,
+ &sigset);
+ if (nfds == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_epoll_pwait_cached, 1);
+ no_epoll_pwait = 1;
+ }
+ } else {
+ nfds = epoll_wait(loop->backend_fd,
+ events,
+ ARRAY_SIZE(events),
+ timeout);
+ if (nfds == -1 && errno == ENOSYS) {
+ uv__store_relaxed(&no_epoll_wait_cached, 1);
+ no_epoll_wait = 1;
+ }
+ }
+
+ if (sigmask != 0 && no_epoll_pwait != 0)
+ if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
+ abort();
+
+ /* Update loop->time unconditionally. It's tempting to skip the update when
+ * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
+ * operating system didn't reschedule our process while in the syscall.
+ */
+ SAVE_ERRNO(uv__update_time(loop));
+
+ if (nfds == 0) {
+ assert(timeout != -1);
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* We may have been inside the system call for longer than |timeout|
+ * milliseconds so we need to update the timestamp to avoid drift.
+ */
+ goto update_timeout;
+ }
+
+ if (nfds == -1) {
+ if (errno == ENOSYS) {
+ /* epoll_wait() or epoll_pwait() failed, try the other system call. */
+ assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
+ continue;
+ }
+
+ if (errno != EINTR)
+ abort();
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (timeout == -1)
+ continue;
+
+ if (timeout == 0)
+ return;
+
+ /* Interrupted by a signal. Update timeout and poll again. */
+ goto update_timeout;
+ }
+
+ have_signals = 0;
+ nevents = 0;
+
+ {
+ /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
+ union {
+ struct epoll_event* events;
+ uv__io_t* watchers;
+ } x;
+
+ x.events = events;
+ assert(loop->watchers != NULL);
+ loop->watchers[loop->nwatchers] = x.watchers;
+ loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
+ }
+
+ for (i = 0; i < nfds; i++) {
+ pe = events + i;
+ fd = pe->data.fd;
+
+ /* Skip invalidated events, see uv__platform_invalidate_fd */
+ if (fd == -1)
+ continue;
+
+ assert(fd >= 0);
+ assert((unsigned) fd < loop->nwatchers);
+
+ w = loop->watchers[fd];
+
+ if (w == NULL) {
+ /* File descriptor that we've stopped watching, disarm it.
+ *
+ * Ignore all errors because we may be racing with another thread
+ * when the file descriptor is closed.
+ */
+ epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
+ continue;
+ }
+
+ /* Give users only events they're interested in. Prevents spurious
+ * callbacks when previous callback invocation in this loop has stopped
+ * the current watcher. Also, filters out events that users has not
+ * requested us to watch.
+ */
+ pe->events &= w->pevents | POLLERR | POLLHUP;
+
+ /* Work around an epoll quirk where it sometimes reports just the
+ * EPOLLERR or EPOLLHUP event. In order to force the event loop to
+ * move forward, we merge in the read/write events that the watcher
+ * is interested in; uv__read() and uv__write() will then deal with
+ * the error or hangup in the usual fashion.
+ *
+ * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
+ * reads the available data, calls uv_read_stop(), then sometime later
+ * calls uv_read_start() again. By then, libuv has forgotten about the
+ * hangup and the kernel won't report EPOLLIN again because there's
+ * nothing left to read. If anything, libuv is to blame here. The
+ * current hack is just a quick bandaid; to properly fix it, libuv
+ * needs to remember the error/hangup event. We should get that for
+ * free when we switch over to edge-triggered I/O.
+ */
+ if (pe->events == POLLERR || pe->events == POLLHUP)
+ pe->events |=
+ w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
+
+ if (pe->events != 0) {
+ /* Run signal watchers last. This also affects child process watchers
+ * because those are implemented in terms of signal watchers.
+ */
+ if (w == &loop->signal_io_watcher) {
+ have_signals = 1;
+ } else {
+ uv__metrics_update_idle_time(loop);
+ w->cb(loop, w, pe->events);
+ }
+
+ nevents++;
+ }
+ }
+
+ if (reset_timeout != 0) {
+ timeout = user_timeout;
+ reset_timeout = 0;
+ }
+
+ if (have_signals != 0) {
+ uv__metrics_update_idle_time(loop);
+ loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
+ }
+
+ loop->watchers[loop->nwatchers] = NULL;
+ loop->watchers[loop->nwatchers + 1] = NULL;
+
+ if (have_signals != 0)
+ return; /* Event loop should cycle now so don't poll again. */
+
+ if (nevents != 0) {
+ if (nfds == ARRAY_SIZE(events) && --count != 0) {
+ /* Poll for more events but don't block this time. */
+ timeout = 0;
+ continue;
+ }
+ return;
+ }
+
+ if (timeout == 0)
+ return;
+
+ if (timeout == -1)
+ continue;
+
+update_timeout:
+ assert(timeout > 0);
+
+ real_timeout -= (loop->time - base);
+ if (real_timeout <= 0)
+ return;
+
+ timeout = real_timeout;
+ }
+}
+
diff --git a/Utilities/cmlibuv/src/unix/freebsd.c b/Utilities/cmlibuv/src/unix/freebsd.c
index fe795a0e75..170b897e20 100644
--- a/Utilities/cmlibuv/src/unix/freebsd.c
+++ b/Utilities/cmlibuv/src/unix/freebsd.c
@@ -265,8 +265,11 @@ int uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
-#if __FreeBSD__ >= 11
- return sendmmsg(fd, mmsg, vlen, /* flags */ 0);
+#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
+ return sendmmsg(fd,
+ (struct mmsghdr*) mmsg,
+ vlen,
+ 0 /* flags */);
#else
return errno = ENOSYS, -1;
#endif
@@ -274,8 +277,12 @@ int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
-#if __FreeBSD__ >= 11
- return recvmmsg(fd, mmsg, vlen, 0 /* flags */, NULL /* timeout */);
+#if __FreeBSD__ >= 11 && !defined(__DragonFly__)
+ return recvmmsg(fd,
+ (struct mmsghdr*) mmsg,
+ vlen,
+ 0 /* flags */,
+ NULL /* timeout */);
#else
return errno = ENOSYS, -1;
#endif
diff --git a/Utilities/cmlibuv/src/unix/fs.c b/Utilities/cmlibuv/src/unix/fs.c
index 6d57cee904..6b41f88ec5 100644
--- a/Utilities/cmlibuv/src/unix/fs.c
+++ b/Utilities/cmlibuv/src/unix/fs.c
@@ -56,8 +56,13 @@
# define HAVE_PREADV 0
#endif
+#if defined(__linux__)
+# include "sys/utsname.h"
+#endif
+
#if defined(__linux__) || defined(__sun)
# include <sys/sendfile.h>
+# include <sys/sysmacros.h>
#endif
#if defined(__APPLE__)
@@ -212,14 +217,30 @@ static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
UV_UNUSED(static struct timespec uv__fs_to_timespec(double time)) {
struct timespec ts;
ts.tv_sec = time;
- ts.tv_nsec = (uint64_t)(time * 1000000) % 1000000 * 1000;
+ ts.tv_nsec = (time - ts.tv_sec) * 1e9;
+
+ /* TODO(bnoordhuis) Remove this. utimesat() has nanosecond resolution but we
+ * stick to microsecond resolution for the sake of consistency with other
+ * platforms. I'm the original author of this compatibility hack but I'm
+ * less convinced it's useful nowadays.
+ */
+ ts.tv_nsec -= ts.tv_nsec % 1000;
+
+ if (ts.tv_nsec < 0) {
+ ts.tv_nsec += 1e9;
+ ts.tv_sec -= 1;
+ }
return ts;
}
UV_UNUSED(static struct timeval uv__fs_to_timeval(double time)) {
struct timeval tv;
tv.tv_sec = time;
- tv.tv_usec = (uint64_t)(time * 1000000) % 1000000;
+ tv.tv_usec = (time - tv.tv_sec) * 1e6;
+ if (tv.tv_usec < 0) {
+ tv.tv_usec += 1e6;
+ tv.tv_sec -= 1;
+ }
return tv;
}
@@ -227,9 +248,6 @@ static ssize_t uv__fs_futime(uv_fs_t* req) {
#if defined(__linux__) \
|| defined(_AIX71) \
|| defined(__HAIKU__)
- /* utimesat() has nanosecond resolution but we stick to microseconds
- * for the sake of consistency with other platforms.
- */
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@@ -897,6 +915,115 @@ out:
}
+#ifdef __linux__
+static unsigned uv__kernel_version(void) {
+ static unsigned cached_version;
+ struct utsname u;
+ unsigned version;
+ unsigned major;
+ unsigned minor;
+ unsigned patch;
+
+ version = uv__load_relaxed(&cached_version);
+ if (version != 0)
+ return version;
+
+ if (-1 == uname(&u))
+ return 0;
+
+ if (3 != sscanf(u.release, "%u.%u.%u", &major, &minor, &patch))
+ return 0;
+
+ version = major * 65536 + minor * 256 + patch;
+ uv__store_relaxed(&cached_version, version);
+
+ return version;
+}
+
+
+/* Pre-4.20 kernels have a bug where CephFS uses the RADOS copy-from command
+ * in copy_file_range() when it shouldn't. There is no workaround except to
+ * fall back to a regular copy.
+ */
+static int uv__is_buggy_cephfs(int fd) {
+ struct statfs s;
+
+ if (-1 == fstatfs(fd, &s))
+ return 0;
+
+ if (s.f_type != /* CephFS */ 0xC36400)
+ return 0;
+
+ return uv__kernel_version() < /* 4.20.0 */ 0x041400;
+}
+
+
+static int uv__is_cifs_or_smb(int fd) {
+ struct statfs s;
+
+ if (-1 == fstatfs(fd, &s))
+ return 0;
+
+ switch ((unsigned) s.f_type) {
+ case 0x0000517Bu: /* SMB */
+ case 0xFE534D42u: /* SMB2 */
+ case 0xFF534D42u: /* CIFS */
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static ssize_t uv__fs_try_copy_file_range(int in_fd, off_t* off,
+ int out_fd, size_t len) {
+ static int no_copy_file_range_support;
+ ssize_t r;
+
+ if (uv__load_relaxed(&no_copy_file_range_support)) {
+ errno = ENOSYS;
+ return -1;
+ }
+
+ r = uv__fs_copy_file_range(in_fd, off, out_fd, NULL, len, 0);
+
+ if (r != -1)
+ return r;
+
+ switch (errno) {
+ case EACCES:
+ /* Pre-4.20 kernels have a bug where CephFS uses the RADOS
+ * copy-from command when it shouldn't.
+ */
+ if (uv__is_buggy_cephfs(in_fd))
+ errno = ENOSYS; /* Use fallback. */
+ break;
+ case ENOSYS:
+ uv__store_relaxed(&no_copy_file_range_support, 1);
+ break;
+ case EPERM:
+ /* It's been reported that CIFS spuriously fails.
+ * Consider it a transient error.
+ */
+ if (uv__is_cifs_or_smb(out_fd))
+ errno = ENOSYS; /* Use fallback. */
+ break;
+ case ENOTSUP:
+ case EXDEV:
+ /* ENOTSUP - it could work on another file system type.
+ * EXDEV - it will not work when in_fd and out_fd are not on the same
+ * mounted filesystem (pre Linux 5.3)
+ */
+ errno = ENOSYS; /* Use fallback. */
+ break;
+ }
+
+ return -1;
+}
+
+#endif /* __linux__ */
+
+
static ssize_t uv__fs_sendfile(uv_fs_t* req) {
int in_fd;
int out_fd;
@@ -908,29 +1035,21 @@ static ssize_t uv__fs_sendfile(uv_fs_t* req) {
{
off_t off;
ssize_t r;
+ size_t len;
+ int try_sendfile;
off = req->off;
+ len = req->bufsml[0].len;
+ try_sendfile = 1;
#ifdef __linux__
- {
- static int copy_file_range_support = 1;
-
- if (copy_file_range_support) {
- r = uv__fs_copy_file_range(in_fd, NULL, out_fd, &off, req->bufsml[0].len, 0);
-
- if (r == -1 && errno == ENOSYS) {
- errno = 0;
- copy_file_range_support = 0;
- } else {
- goto ok;
- }
- }
- }
+ r = uv__fs_try_copy_file_range(in_fd, &off, out_fd, len);
+ try_sendfile = (r == -1 && errno == ENOSYS);
#endif
- r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
+ if (try_sendfile)
+ r = sendfile(out_fd, in_fd, &off, len);
-ok:
/* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
* it still writes out data. Fortunately, we can detect it by checking if
* the offset has been updated.
@@ -1020,9 +1139,6 @@ static ssize_t uv__fs_utime(uv_fs_t* req) {
|| defined(_AIX71) \
|| defined(__sun) \
|| defined(__HAIKU__)
- /* utimesat() has nanosecond resolution but we stick to microseconds
- * for the sake of consistency with other platforms.
- */
struct timespec ts[2];
ts[0] = uv__fs_to_timespec(req->atime);
ts[1] = uv__fs_to_timespec(req->mtime);
@@ -1217,22 +1333,15 @@ static ssize_t uv__fs_copyfile(uv_fs_t* req) {
if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
err = UV__ERR(errno);
#ifdef __linux__
+ /* fchmod() on CIFS shares always fails with EPERM unless the share is
+ * mounted with "noperm". As fchmod() is a meaningless operation on such
+ * shares anyway, detect that condition and squelch the error.
+ */
if (err != UV_EPERM)
goto out;
- {
- struct statfs s;
-
- /* fchmod() on CIFS shares always fails with EPERM unless the share is
- * mounted with "noperm". As fchmod() is a meaningless operation on such
- * shares anyway, detect that condition and squelch the error.
- */
- if (fstatfs(dstfd, &s) == -1)
- goto out;
-
- if (s.f_type != /* CIFS */ 0xFF534D42u)
- goto out;
- }
+ if (!uv__is_cifs_or_smb(dstfd))
+ goto out;
err = 0;
#else /* !__linux__ */
@@ -1350,7 +1459,8 @@ static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
dst->st_birthtim.tv_nsec = src->st_ctimensec;
dst->st_flags = 0;
dst->st_gen = 0;
-#elif !defined(_AIX) && ( \
+#elif !defined(_AIX) && \
+ !defined(__MVS__) && ( \
defined(__DragonFly__) || \
defined(__FreeBSD__) || \
defined(__OpenBSD__) || \
@@ -1430,8 +1540,9 @@ static int uv__fs_statx(int fd,
case -1:
/* EPERM happens when a seccomp filter rejects the system call.
* Has been observed with libseccomp < 2.3.3 and docker < 18.04.
+ * EOPNOTSUPP is used on DVS exported filesystems
*/
- if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
+ if (errno != EINVAL && errno != EPERM && errno != ENOSYS && errno != EOPNOTSUPP)
return -1;
/* Fall through. */
default:
@@ -1444,12 +1555,12 @@ static int uv__fs_statx(int fd,
return UV_ENOSYS;
}
- buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
+ buf->st_dev = makedev(statxbuf.stx_dev_major, statxbuf.stx_dev_minor);
buf->st_mode = statxbuf.stx_mode;
buf->st_nlink = statxbuf.stx_nlink;
buf->st_uid = statxbuf.stx_uid;
buf->st_gid = statxbuf.stx_gid;
- buf->st_rdev = statxbuf.stx_rdev_major;
+ buf->st_rdev = makedev(statxbuf.stx_rdev_major, statxbuf.stx_rdev_minor);
buf->st_ino = statxbuf.stx_ino;
buf->st_size = statxbuf.stx_size;
buf->st_blksize = statxbuf.stx_blksize;
diff --git a/Utilities/cmlibuv/src/unix/fsevents.c b/Utilities/cmlibuv/src/unix/fsevents.c
index a51f29b3f6..bf4f1f6a51 100644
--- a/Utilities/cmlibuv/src/unix/fsevents.c
+++ b/Utilities/cmlibuv/src/unix/fsevents.c
@@ -595,8 +595,7 @@ out:
static int uv__fsevents_loop_init(uv_loop_t* loop) {
CFRunLoopSourceContext ctx;
uv__cf_loop_state_t* state;
- pthread_attr_t attr_storage;
- pthread_attr_t* attr;
+ pthread_attr_t attr;
int err;
if (loop->cf_state != NULL)
@@ -641,25 +640,19 @@ static int uv__fsevents_loop_init(uv_loop_t* loop) {
goto fail_signal_source_create;
}
- /* In the unlikely event that pthread_attr_init() fails, create the thread
- * with the default stack size. We'll use a little more address space but
- * that in itself is not a fatal error.
- */
- attr = &attr_storage;
- if (pthread_attr_init(attr))
- attr = NULL;
+ if (pthread_attr_init(&attr))
+ abort();
- if (attr != NULL)
- if (pthread_attr_setstacksize(attr, 4 * PTHREAD_STACK_MIN))
- abort();
+ if (pthread_attr_setstacksize(&attr, uv__thread_stack_size()))
+ abort();
loop->cf_state = state;
/* uv_thread_t is an alias for pthread_t. */
- err = UV__ERR(pthread_create(&loop->cf_thread, attr, uv__cf_loop_runner, loop));
+ err = UV__ERR(pthread_create(&loop->cf_thread, &attr, uv__cf_loop_runner, loop));
- if (attr != NULL)
- pthread_attr_destroy(attr);
+ if (pthread_attr_destroy(&attr))
+ abort();
if (err)
goto fail_thread_create;
diff --git a/Utilities/cmlibuv/src/unix/getaddrinfo.c b/Utilities/cmlibuv/src/unix/getaddrinfo.c
index d7ca7d1a44..77337ace94 100644
--- a/Utilities/cmlibuv/src/unix/getaddrinfo.c
+++ b/Utilities/cmlibuv/src/unix/getaddrinfo.c
@@ -21,9 +21,6 @@
/* Expose glibc-specific EAI_* error codes. Needs to be defined before we
* include any headers.
*/
-#ifndef _GNU_SOURCE
-# define _GNU_SOURCE
-#endif
#include "uv.h"
#include "internal.h"
diff --git a/Utilities/cmlibuv/src/unix/ibmi.c b/Utilities/cmlibuv/src/unix/ibmi.c
index 73ab02a7ac..580ea1f3a8 100644
--- a/Utilities/cmlibuv/src/unix/ibmi.c
+++ b/Utilities/cmlibuv/src/unix/ibmi.c
@@ -26,7 +26,6 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
-#include <assert.h>
#include <errno.h>
#include <sys/types.h>
@@ -166,7 +165,7 @@ static void iconv_a2e(const char* src, unsigned char dst[], size_t length) {
srclen = strlen(src);
if (srclen > length)
- abort();
+ srclen = length;
for (i = 0; i < srclen; i++)
dst[i] = a2e[src[i]];
/* padding the remaining part with spaces */
@@ -360,6 +359,10 @@ static int get_ibmi_physical_address(const char* line, char (*phys_addr)[6]) {
if (rc != 0)
return rc;
+ if (err.bytes_available > 0) {
+ return -1;
+ }
+
/* convert ebcdic loca_adapter_address to ascii first */
iconv_e2a(rcvr.loca_adapter_address, mac_addr,
sizeof(rcvr.loca_adapter_address));
@@ -443,9 +446,42 @@ int uv_interface_addresses(uv_interface_address_t** addresses, int* count) {
}
address->is_internal = cur->ifa_flags & IFF_LOOPBACK ? 1 : 0;
if (!address->is_internal) {
- int rc = get_ibmi_physical_address(address->name, &address->phys_addr);
- if (rc != 0)
- r = rc;
+ int rc = -1;
+ size_t name_len = strlen(address->name);
+ /* To get the associated MAC address, we must convert the address to a
+ * line description. Normally, the name field contains the line
+ * description name, but for VLANs it has the VLAN appended with a
+ * period. Since object names can also contain periods and numbers, there
+ * is no way to know if a returned name is for a VLAN or not. eg.
+ * *LIND ETH1.1 and *LIND ETH1, VLAN 1 both have the same name: ETH1.1
+ *
+ * Instead, we apply the same heuristic used by some of the XPF ioctls:
+ * - names > 10 *must* contain a VLAN
+ * - assume names <= 10 do not contain a VLAN and try directly
+ * - if >10 or QDCRLIND returned an error, try to strip off a VLAN
+ * and try again
+ * - if we still get an error or couldn't find a period, leave the MAC as
+ * 00:00:00:00:00:00
+ */
+ if (name_len <= 10) {
+ /* Assume name does not contain a VLAN ID */
+ rc = get_ibmi_physical_address(address->name, &address->phys_addr);
+ }
+
+ if (name_len > 10 || rc != 0) {
+ /* The interface name must contain a VLAN ID suffix. Attempt to strip
+ * it off so we can get the line description to pass to QDCRLIND.
+ */
+ char* temp_name = uv__strdup(address->name);
+ char* dot = strrchr(temp_name, '.');
+ if (dot != NULL) {
+ *dot = '\0';
+ if (strlen(temp_name) <= 10) {
+ rc = get_ibmi_physical_address(temp_name, &address->phys_addr);
+ }
+ }
+ uv__free(temp_name);
+ }
}
address++;
@@ -499,3 +535,4 @@ int uv_get_process_title(char* buffer, size_t size) {
void uv__process_title_cleanup(void) {
}
+
diff --git a/Utilities/cmlibuv/src/unix/internal.h b/Utilities/cmlibuv/src/unix/internal.h
index 444dc85c59..586ae390d3 100644
--- a/Utilities/cmlibuv/src/unix/internal.h
+++ b/Utilities/cmlibuv/src/unix/internal.h
@@ -62,6 +62,17 @@
# include <AvailabilityMacros.h>
#endif
+/*
+ * Define common detection for active Thread Sanitizer
+ * - clang uses __has_feature(thread_sanitizer)
+ * - gcc-7+ uses __SANITIZE_THREAD__
+ */
+#if defined(__has_feature)
+# if __has_feature(thread_sanitizer)
+# define __SANITIZE_THREAD__ 1
+# endif
+#endif
+
#if defined(PATH_MAX)
# define UV__PATH_MAX PATH_MAX
#else
@@ -175,9 +186,11 @@ struct uv__stream_queued_fds_s {
defined(__NetBSD__)
#define uv__cloexec uv__cloexec_ioctl
#define uv__nonblock uv__nonblock_ioctl
+#define UV__NONBLOCK_IS_IOCTL 1
#else
#define uv__cloexec uv__cloexec_fcntl
#define uv__nonblock uv__nonblock_fcntl
+#define UV__NONBLOCK_IS_IOCTL 0
#endif
/* On Linux, uv__nonblock_fcntl() and uv__nonblock_ioctl() do not commute
@@ -256,6 +269,7 @@ int uv__signal_loop_fork(uv_loop_t* loop);
/* platform specific */
uint64_t uv__hrtime(uv_clocktype_t type);
int uv__kqueue_init(uv_loop_t* loop);
+int uv__epoll_init(uv_loop_t* loop);
int uv__platform_loop_init(uv_loop_t* loop);
void uv__platform_loop_delete(uv_loop_t* loop);
void uv__platform_invalidate_fd(uv_loop_t* loop, int fd);
@@ -271,6 +285,7 @@ void uv__prepare_close(uv_prepare_t* handle);
void uv__process_close(uv_process_t* handle);
void uv__stream_close(uv_stream_t* handle);
void uv__tcp_close(uv_tcp_t* handle);
+size_t uv__thread_stack_size(void);
void uv__udp_close(uv_udp_t* handle);
void uv__udp_finish_close(uv_udp_t* handle);
uv_handle_type uv__handle_type(int fd);
@@ -292,12 +307,6 @@ int uv___stream_fd(const uv_stream_t* handle);
#define uv__stream_fd(handle) ((handle)->io_watcher.fd)
#endif /* defined(__APPLE__) */
-#ifdef O_NONBLOCK
-# define UV__F_NONBLOCK O_NONBLOCK
-#else
-# define UV__F_NONBLOCK 1
-#endif
-
int uv__make_pipe(int fds[2], int flags);
#if defined(__APPLE__)
@@ -337,7 +346,8 @@ int uv__getsockpeername(const uv_handle_t* handle,
#if defined(__linux__) || \
defined(__FreeBSD__) || \
- defined(__FreeBSD_kernel__)
+ defined(__FreeBSD_kernel__) || \
+ defined(__DragonFly__)
#define HAVE_MMSG 1
struct uv__mmsghdr {
struct msghdr msg_hdr;
@@ -350,5 +360,11 @@ int uv__sendmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen);
#define HAVE_MMSG 0
#endif
+#if defined(__sun)
+#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
+size_t strnlen(const char* s, size_t maxlen);
+#endif
+#endif
+
#endif /* UV_UNIX_INTERNAL_H_ */
diff --git a/Utilities/cmlibuv/src/unix/linux-core.c b/Utilities/cmlibuv/src/unix/linux-core.c
index 4db2f05053..8c9bbb75f7 100644
--- a/Utilities/cmlibuv/src/unix/linux-core.c
+++ b/Utilities/cmlibuv/src/unix/linux-core.c
@@ -82,29 +82,12 @@ static int read_times(FILE* statfile_fp,
static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci);
static uint64_t read_cpufreq(unsigned int cpunum);
-
int uv__platform_loop_init(uv_loop_t* loop) {
- int fd;
- fd = epoll_create1(O_CLOEXEC);
-
- /* epoll_create1() can fail either because it's not implemented (old kernel)
- * or because it doesn't understand the O_CLOEXEC flag.
- */
- if (fd == -1 && (errno == ENOSYS || errno == EINVAL)) {
- fd = epoll_create(256);
-
- if (fd != -1)
- uv__cloexec(fd, 1);
- }
-
- loop->backend_fd = fd;
+
loop->inotify_fd = -1;
loop->inotify_watchers = NULL;
- if (fd == -1)
- return UV__ERR(errno);
-
- return 0;
+ return uv__epoll_init(loop);
}
@@ -134,380 +117,6 @@ void uv__platform_loop_delete(uv_loop_t* loop) {
}
-void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
- struct epoll_event* events;
- struct epoll_event dummy;
- uintptr_t i;
- uintptr_t nfds;
-
- assert(loop->watchers != NULL);
- assert(fd >= 0);
-
- events = (struct epoll_event*) loop->watchers[loop->nwatchers];
- nfds = (uintptr_t) loop->watchers[loop->nwatchers + 1];
- if (events != NULL)
- /* Invalidate events with same file descriptor */
- for (i = 0; i < nfds; i++)
- if (events[i].data.fd == fd)
- events[i].data.fd = -1;
-
- /* Remove the file descriptor from the epoll.
- * This avoids a problem where the same file description remains open
- * in another process, causing repeated junk epoll events.
- *
- * We pass in a dummy epoll_event, to work around a bug in old kernels.
- */
- if (loop->backend_fd >= 0) {
- /* Work around a bug in kernels 3.10 to 3.19 where passing a struct that
- * has the EPOLLWAKEUP flag set generates spurious audit syslog warnings.
- */
- memset(&dummy, 0, sizeof(dummy));
- epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
- }
-}
-
-
-int uv__io_check_fd(uv_loop_t* loop, int fd) {
- struct epoll_event e;
- int rc;
-
- memset(&e, 0, sizeof(e));
- e.events = POLLIN;
- e.data.fd = -1;
-
- rc = 0;
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
- if (errno != EEXIST)
- rc = UV__ERR(errno);
-
- if (rc == 0)
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
- abort();
-
- return rc;
-}
-
-
-void uv__io_poll(uv_loop_t* loop, int timeout) {
- /* A bug in kernels < 2.6.37 makes timeouts larger than ~30 minutes
- * effectively infinite on 32 bits architectures. To avoid blocking
- * indefinitely, we cap the timeout and poll again if necessary.
- *
- * Note that "30 minutes" is a simplification because it depends on
- * the value of CONFIG_HZ. The magic constant assumes CONFIG_HZ=1200,
- * that being the largest value I have seen in the wild (and only once.)
- */
- static const int max_safe_timeout = 1789569;
- static int no_epoll_pwait_cached;
- static int no_epoll_wait_cached;
- int no_epoll_pwait;
- int no_epoll_wait;
- struct epoll_event events[1024];
- struct epoll_event* pe;
- struct epoll_event e;
- int real_timeout;
- QUEUE* q;
- uv__io_t* w;
- sigset_t sigset;
- uint64_t sigmask;
- uint64_t base;
- int have_signals;
- int nevents;
- int count;
- int nfds;
- int fd;
- int op;
- int i;
- int user_timeout;
- int reset_timeout;
-
- if (loop->nfds == 0) {
- assert(QUEUE_EMPTY(&loop->watcher_queue));
- return;
- }
-
- memset(&e, 0, sizeof(e));
-
- while (!QUEUE_EMPTY(&loop->watcher_queue)) {
- q = QUEUE_HEAD(&loop->watcher_queue);
- QUEUE_REMOVE(q);
- QUEUE_INIT(q);
-
- w = QUEUE_DATA(q, uv__io_t, watcher_queue);
- assert(w->pevents != 0);
- assert(w->fd >= 0);
- assert(w->fd < (int) loop->nwatchers);
-
- e.events = w->pevents;
- e.data.fd = w->fd;
-
- if (w->events == 0)
- op = EPOLL_CTL_ADD;
- else
- op = EPOLL_CTL_MOD;
-
- /* XXX Future optimization: do EPOLL_CTL_MOD lazily if we stop watching
- * events, skip the syscall and squelch the events after epoll_wait().
- */
- if (epoll_ctl(loop->backend_fd, op, w->fd, &e)) {
- if (errno != EEXIST)
- abort();
-
- assert(op == EPOLL_CTL_ADD);
-
- /* We've reactivated a file descriptor that's been watched before. */
- if (epoll_ctl(loop->backend_fd, EPOLL_CTL_MOD, w->fd, &e))
- abort();
- }
-
- w->events = w->pevents;
- }
-
- sigmask = 0;
- if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
- sigemptyset(&sigset);
- sigaddset(&sigset, SIGPROF);
- sigmask |= 1 << (SIGPROF - 1);
- }
-
- assert(timeout >= -1);
- base = loop->time;
- count = 48; /* Benchmarks suggest this gives the best throughput. */
- real_timeout = timeout;
-
- if (uv__get_internal_fields(loop)->flags & UV_METRICS_IDLE_TIME) {
- reset_timeout = 1;
- user_timeout = timeout;
- timeout = 0;
- } else {
- reset_timeout = 0;
- user_timeout = 0;
- }
-
- /* You could argue there is a dependency between these two but
- * ultimately we don't care about their ordering with respect
- * to one another. Worst case, we make a few system calls that
- * could have been avoided because another thread already knows
- * they fail with ENOSYS. Hardly the end of the world.
- */
- no_epoll_pwait = uv__load_relaxed(&no_epoll_pwait_cached);
- no_epoll_wait = uv__load_relaxed(&no_epoll_wait_cached);
-
- for (;;) {
- /* Only need to set the provider_entry_time if timeout != 0. The function
- * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
- */
- if (timeout != 0)
- uv__metrics_set_provider_entry_time(loop);
-
- /* See the comment for max_safe_timeout for an explanation of why
- * this is necessary. Executive summary: kernel bug workaround.
- */
- if (sizeof(int32_t) == sizeof(long) && timeout >= max_safe_timeout)
- timeout = max_safe_timeout;
-
- if (sigmask != 0 && no_epoll_pwait != 0)
- if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
- abort();
-
- if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
- nfds = epoll_pwait(loop->backend_fd,
- events,
- ARRAY_SIZE(events),
- timeout,
- &sigset);
- if (nfds == -1 && errno == ENOSYS) {
- uv__store_relaxed(&no_epoll_pwait_cached, 1);
- no_epoll_pwait = 1;
- }
- } else {
- nfds = epoll_wait(loop->backend_fd,
- events,
- ARRAY_SIZE(events),
- timeout);
- if (nfds == -1 && errno == ENOSYS) {
- uv__store_relaxed(&no_epoll_wait_cached, 1);
- no_epoll_wait = 1;
- }
- }
-
- if (sigmask != 0 && no_epoll_pwait != 0)
- if (pthread_sigmask(SIG_UNBLOCK, &sigset, NULL))
- abort();
-
- /* Update loop->time unconditionally. It's tempting to skip the update when
- * timeout == 0 (i.e. non-blocking poll) but there is no guarantee that the
- * operating system didn't reschedule our process while in the syscall.
- */
- SAVE_ERRNO(uv__update_time(loop));
-
- if (nfds == 0) {
- assert(timeout != -1);
-
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- }
-
- if (timeout == -1)
- continue;
-
- if (timeout == 0)
- return;
-
- /* We may have been inside the system call for longer than |timeout|
- * milliseconds so we need to update the timestamp to avoid drift.
- */
- goto update_timeout;
- }
-
- if (nfds == -1) {
- if (errno == ENOSYS) {
- /* epoll_wait() or epoll_pwait() failed, try the other system call. */
- assert(no_epoll_wait == 0 || no_epoll_pwait == 0);
- continue;
- }
-
- if (errno != EINTR)
- abort();
-
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- }
-
- if (timeout == -1)
- continue;
-
- if (timeout == 0)
- return;
-
- /* Interrupted by a signal. Update timeout and poll again. */
- goto update_timeout;
- }
-
- have_signals = 0;
- nevents = 0;
-
- {
- /* Squelch a -Waddress-of-packed-member warning with gcc >= 9. */
- union {
- struct epoll_event* events;
- uv__io_t* watchers;
- } x;
-
- x.events = events;
- assert(loop->watchers != NULL);
- loop->watchers[loop->nwatchers] = x.watchers;
- loop->watchers[loop->nwatchers + 1] = (void*) (uintptr_t) nfds;
- }
-
- for (i = 0; i < nfds; i++) {
- pe = events + i;
- fd = pe->data.fd;
-
- /* Skip invalidated events, see uv__platform_invalidate_fd */
- if (fd == -1)
- continue;
-
- assert(fd >= 0);
- assert((unsigned) fd < loop->nwatchers);
-
- w = loop->watchers[fd];
-
- if (w == NULL) {
- /* File descriptor that we've stopped watching, disarm it.
- *
- * Ignore all errors because we may be racing with another thread
- * when the file descriptor is closed.
- */
- epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, pe);
- continue;
- }
-
- /* Give users only events they're interested in. Prevents spurious
- * callbacks when previous callback invocation in this loop has stopped
- * the current watcher. Also, filters out events that users has not
- * requested us to watch.
- */
- pe->events &= w->pevents | POLLERR | POLLHUP;
-
- /* Work around an epoll quirk where it sometimes reports just the
- * EPOLLERR or EPOLLHUP event. In order to force the event loop to
- * move forward, we merge in the read/write events that the watcher
- * is interested in; uv__read() and uv__write() will then deal with
- * the error or hangup in the usual fashion.
- *
- * Note to self: happens when epoll reports EPOLLIN|EPOLLHUP, the user
- * reads the available data, calls uv_read_stop(), then sometime later
- * calls uv_read_start() again. By then, libuv has forgotten about the
- * hangup and the kernel won't report EPOLLIN again because there's
- * nothing left to read. If anything, libuv is to blame here. The
- * current hack is just a quick bandaid; to properly fix it, libuv
- * needs to remember the error/hangup event. We should get that for
- * free when we switch over to edge-triggered I/O.
- */
- if (pe->events == POLLERR || pe->events == POLLHUP)
- pe->events |=
- w->pevents & (POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
-
- if (pe->events != 0) {
- /* Run signal watchers last. This also affects child process watchers
- * because those are implemented in terms of signal watchers.
- */
- if (w == &loop->signal_io_watcher) {
- have_signals = 1;
- } else {
- uv__metrics_update_idle_time(loop);
- w->cb(loop, w, pe->events);
- }
-
- nevents++;
- }
- }
-
- if (reset_timeout != 0) {
- timeout = user_timeout;
- reset_timeout = 0;
- }
-
- if (have_signals != 0) {
- uv__metrics_update_idle_time(loop);
- loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
- }
-
- loop->watchers[loop->nwatchers] = NULL;
- loop->watchers[loop->nwatchers + 1] = NULL;
-
- if (have_signals != 0)
- return; /* Event loop should cycle now so don't poll again. */
-
- if (nevents != 0) {
- if (nfds == ARRAY_SIZE(events) && --count != 0) {
- /* Poll for more events but don't block this time. */
- timeout = 0;
- continue;
- }
- return;
- }
-
- if (timeout == 0)
- return;
-
- if (timeout == -1)
- continue;
-
-update_timeout:
- assert(timeout > 0);
-
- real_timeout -= (loop->time - base);
- if (real_timeout <= 0)
- return;
-
- timeout = real_timeout;
- }
-}
-
uint64_t uv__hrtime(uv_clocktype_t type) {
static clock_t fast_clock_id = -1;
@@ -602,22 +211,53 @@ err:
return UV_EINVAL;
}
+static int uv__slurp(const char* filename, char* buf, size_t len) {
+ ssize_t n;
+ int fd;
+
+ assert(len > 0);
+
+ fd = uv__open_cloexec(filename, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ do
+ n = read(fd, buf, len - 1);
+ while (n == -1 && errno == EINTR);
+
+ if (uv__close_nocheckstdio(fd))
+ abort();
+
+ if (n < 0)
+ return UV__ERR(errno);
+
+ buf[n] = '\0';
+
+ return 0;
+}
int uv_uptime(double* uptime) {
static volatile int no_clock_boottime;
+ char buf[128];
struct timespec now;
int r;
+ /* Try /proc/uptime first, then fallback to clock_gettime(). */
+
+ if (0 == uv__slurp("/proc/uptime", buf, sizeof(buf)))
+ if (1 == sscanf(buf, "%lf", uptime))
+ return 0;
+
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
- retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
+ retry_clock_gettime: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
- goto retry;
+ goto retry_clock_gettime;
}
if (r)
@@ -709,35 +349,47 @@ static void read_speeds(unsigned int numcpus, uv_cpu_info_t* ci) {
}
-/* Also reads the CPU frequency on x86. The other architectures only have
- * a BogoMIPS field, which may not be very accurate.
+/* Also reads the CPU frequency on ppc and x86. The other architectures only
+ * have a BogoMIPS field, which may not be very accurate.
*
* Note: Simply returns on error, uv_cpu_info() takes care of the cleanup.
*/
static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
+#if defined(__PPC__)
+ static const char model_marker[] = "cpu\t\t: ";
+ static const char speed_marker[] = "clock\t\t: ";
+#else
static const char model_marker[] = "model name\t: ";
static const char speed_marker[] = "cpu MHz\t\t: ";
+#endif
const char* inferred_model;
unsigned int model_idx;
unsigned int speed_idx;
+ unsigned int part_idx;
char buf[1024];
char* model;
FILE* fp;
+ int model_id;
/* Most are unused on non-ARM, non-MIPS and non-x86 architectures. */
(void) &model_marker;
(void) &speed_marker;
(void) &speed_idx;
+ (void) &part_idx;
(void) &model;
(void) &buf;
(void) &fp;
+ (void) &model_id;
model_idx = 0;
speed_idx = 0;
+ part_idx = 0;
#if defined(__arm__) || \
defined(__i386__) || \
defined(__mips__) || \
+ defined(__aarch64__) || \
+ defined(__PPC__) || \
defined(__x86_64__)
fp = uv__open_file("/proc/cpuinfo");
if (fp == NULL)
@@ -756,11 +408,96 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
continue;
}
}
-#if defined(__arm__) || defined(__mips__)
+#if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
if (model_idx < numcpus) {
#if defined(__arm__)
/* Fallback for pre-3.8 kernels. */
static const char model_marker[] = "Processor\t: ";
+#elif defined(__aarch64__)
+ static const char part_marker[] = "CPU part\t: ";
+
+ /* Adapted from: https://github.com/karelzak/util-linux */
+ struct vendor_part {
+ const int id;
+ const char* name;
+ };
+
+ static const struct vendor_part arm_chips[] = {
+ { 0x811, "ARM810" },
+ { 0x920, "ARM920" },
+ { 0x922, "ARM922" },
+ { 0x926, "ARM926" },
+ { 0x940, "ARM940" },
+ { 0x946, "ARM946" },
+ { 0x966, "ARM966" },
+ { 0xa20, "ARM1020" },
+ { 0xa22, "ARM1022" },
+ { 0xa26, "ARM1026" },
+ { 0xb02, "ARM11 MPCore" },
+ { 0xb36, "ARM1136" },
+ { 0xb56, "ARM1156" },
+ { 0xb76, "ARM1176" },
+ { 0xc05, "Cortex-A5" },
+ { 0xc07, "Cortex-A7" },
+ { 0xc08, "Cortex-A8" },
+ { 0xc09, "Cortex-A9" },
+ { 0xc0d, "Cortex-A17" }, /* Originally A12 */
+ { 0xc0f, "Cortex-A15" },
+ { 0xc0e, "Cortex-A17" },
+ { 0xc14, "Cortex-R4" },
+ { 0xc15, "Cortex-R5" },
+ { 0xc17, "Cortex-R7" },
+ { 0xc18, "Cortex-R8" },
+ { 0xc20, "Cortex-M0" },
+ { 0xc21, "Cortex-M1" },
+ { 0xc23, "Cortex-M3" },
+ { 0xc24, "Cortex-M4" },
+ { 0xc27, "Cortex-M7" },
+ { 0xc60, "Cortex-M0+" },
+ { 0xd01, "Cortex-A32" },
+ { 0xd03, "Cortex-A53" },
+ { 0xd04, "Cortex-A35" },
+ { 0xd05, "Cortex-A55" },
+ { 0xd06, "Cortex-A65" },
+ { 0xd07, "Cortex-A57" },
+ { 0xd08, "Cortex-A72" },
+ { 0xd09, "Cortex-A73" },
+ { 0xd0a, "Cortex-A75" },
+ { 0xd0b, "Cortex-A76" },
+ { 0xd0c, "Neoverse-N1" },
+ { 0xd0d, "Cortex-A77" },
+ { 0xd0e, "Cortex-A76AE" },
+ { 0xd13, "Cortex-R52" },
+ { 0xd20, "Cortex-M23" },
+ { 0xd21, "Cortex-M33" },
+ { 0xd41, "Cortex-A78" },
+ { 0xd42, "Cortex-A78AE" },
+ { 0xd4a, "Neoverse-E1" },
+ { 0xd4b, "Cortex-A78C" },
+ };
+
+ if (strncmp(buf, part_marker, sizeof(part_marker) - 1) == 0) {
+ model = buf + sizeof(part_marker) - 1;
+
+ errno = 0;
+ model_id = strtol(model, NULL, 16);
+ if ((errno != 0) || model_id < 0) {
+ fclose(fp);
+ return UV_EINVAL;
+ }
+
+ for (part_idx = 0; part_idx < ARRAY_SIZE(arm_chips); part_idx++) {
+ if (model_id == arm_chips[part_idx].id) {
+ model = uv__strdup(arm_chips[part_idx].name);
+ if (model == NULL) {
+ fclose(fp);
+ return UV_ENOMEM;
+ }
+ ci[model_idx++].model = model;
+ break;
+ }
+ }
+ }
#else /* defined(__mips__) */
static const char model_marker[] = "cpu model\t\t: ";
#endif
@@ -775,18 +512,18 @@ static int read_models(unsigned int numcpus, uv_cpu_info_t* ci) {
continue;
}
}
-#else /* !__arm__ && !__mips__ */
+#else /* !__arm__ && !__mips__ && !__aarch64__ */
if (speed_idx < numcpus) {
if (strncmp(buf, speed_marker, sizeof(speed_marker) - 1) == 0) {
ci[speed_idx++].speed = atoi(buf + sizeof(speed_marker) - 1);
continue;
}
}
-#endif /* __arm__ || __mips__ */
+#endif /* __arm__ || __mips__ || __aarch64__ */
}
fclose(fp);
-#endif /* __arm__ || __i386__ || __mips__ || __x86_64__ */
+#endif /* __arm__ || __i386__ || __mips__ || __PPC__ || __x86_64__ || __aarch__ */
/* Now we want to make sure that all the models contain *something* because
* it's not safe to leave them as null. Copy the last entry unless there
@@ -824,9 +561,9 @@ static int read_times(FILE* statfile_fp,
char buf[1024];
ticks = (unsigned int)sysconf(_SC_CLK_TCK);
- multiplier = ((uint64_t)1000L / ticks);
assert(ticks != (unsigned int) -1);
assert(ticks != 0);
+ multiplier = ((uint64_t)1000L / ticks);
rewind(statfile_fp);
@@ -1025,32 +762,6 @@ void uv__set_process_title(const char* title) {
}
-static int uv__slurp(const char* filename, char* buf, size_t len) {
- ssize_t n;
- int fd;
-
- assert(len > 0);
-
- fd = uv__open_cloexec(filename, O_RDONLY);
- if (fd < 0)
- return fd;
-
- do
- n = read(fd, buf, len - 1);
- while (n == -1 && errno == EINTR);
-
- if (uv__close_nocheckstdio(fd))
- abort();
-
- if (n < 0)
- return UV__ERR(errno);
-
- buf[n] = '\0';
-
- return 0;
-}
-
-
static uint64_t uv__read_proc_meminfo(const char* what) {
uint64_t rc;
char* p;
diff --git a/Utilities/cmlibuv/src/unix/linux-inotify.c b/Utilities/cmlibuv/src/unix/linux-inotify.c
index 42b601adbf..c1bd260e16 100644
--- a/Utilities/cmlibuv/src/unix/linux-inotify.c
+++ b/Utilities/cmlibuv/src/unix/linux-inotify.c
@@ -178,7 +178,7 @@ static void uv__inotify_read(uv_loop_t* loop,
/* needs to be large enough for sizeof(inotify_event) + strlen(path) */
char buf[4096];
- while (1) {
+ for (;;) {
do
size = read(loop->inotify_fd, buf, sizeof(buf));
while (size == -1 && errno == EINTR);
diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.c b/Utilities/cmlibuv/src/unix/linux-syscalls.c
index 44daaf12d4..5071cd56d1 100644
--- a/Utilities/cmlibuv/src/unix/linux-syscalls.c
+++ b/Utilities/cmlibuv/src/unix/linux-syscalls.c
@@ -194,37 +194,37 @@ int uv__recvmmsg(int fd, struct uv__mmsghdr* mmsg, unsigned int vlen) {
ssize_t uv__preadv(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
-#if defined(__NR_preadv)
- return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
-#else
+#if !defined(__NR_preadv) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_preadv, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset) {
-#if defined(__NR_pwritev)
- return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
-#else
+#if !defined(__NR_pwritev) || defined(__ANDROID_API__) && __ANDROID_API__ < 24
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_pwritev, fd, iov, iovcnt, (long)offset, (long)(offset >> 32));
#endif
}
int uv__dup3(int oldfd, int newfd, int flags) {
-#if defined(__NR_dup3)
- return syscall(__NR_dup3, oldfd, newfd, flags);
-#else
+#if !defined(__NR_dup3) || defined(__ANDROID_API__) && __ANDROID_API__ < 21
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_dup3, oldfd, newfd, flags);
#endif
}
ssize_t
uv__fs_copy_file_range(int fd_in,
- ssize_t* off_in,
+ off_t* off_in,
int fd_out,
- ssize_t* off_out,
+ off_t* off_out,
size_t len,
unsigned int flags)
{
@@ -247,21 +247,18 @@ int uv__statx(int dirfd,
int flags,
unsigned int mask,
struct uv__statx* statxbuf) {
- /* __NR_statx make Android box killed by SIGSYS.
- * That looks like a seccomp2 sandbox filter rejecting the system call.
- */
-#if defined(__NR_statx) && !defined(__ANDROID__)
- return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
-#else
+#if !defined(__NR_statx) || defined(__ANDROID_API__) && __ANDROID_API__ < 30
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_statx, dirfd, path, flags, mask, statxbuf);
#endif
}
ssize_t uv__getrandom(void* buf, size_t buflen, unsigned flags) {
-#if defined(__NR_getrandom)
- return syscall(__NR_getrandom, buf, buflen, flags);
-#else
+#if !defined(__NR_getrandom) || defined(__ANDROID_API__) && __ANDROID_API__ < 28
return errno = ENOSYS, -1;
+#else
+ return syscall(__NR_getrandom, buf, buflen, flags);
#endif
}
diff --git a/Utilities/cmlibuv/src/unix/linux-syscalls.h b/Utilities/cmlibuv/src/unix/linux-syscalls.h
index 761ff32e21..b4d9082d46 100644
--- a/Utilities/cmlibuv/src/unix/linux-syscalls.h
+++ b/Utilities/cmlibuv/src/unix/linux-syscalls.h
@@ -22,9 +22,6 @@
#ifndef UV_LINUX_SYSCALL_H_
#define UV_LINUX_SYSCALL_H_
-#undef _GNU_SOURCE
-#define _GNU_SOURCE
-
#include <stdint.h>
#include <signal.h>
#include <sys/types.h>
@@ -66,9 +63,9 @@ ssize_t uv__pwritev(int fd, const struct iovec *iov, int iovcnt, int64_t offset)
int uv__dup3(int oldfd, int newfd, int flags);
ssize_t
uv__fs_copy_file_range(int fd_in,
- ssize_t* off_in,
+ off_t* off_in,
int fd_out,
- ssize_t* off_out,
+ off_t* off_out,
size_t len,
unsigned int flags);
int uv__statx(int dirfd,
diff --git a/Utilities/cmlibuv/src/unix/os390-proctitle.c b/Utilities/cmlibuv/src/unix/os390-proctitle.c
new file mode 100644
index 0000000000..ccda97c9ac
--- /dev/null
+++ b/Utilities/cmlibuv/src/unix/os390-proctitle.c
@@ -0,0 +1,136 @@
+/* Copyright libuv project contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "uv.h"
+#include "internal.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+static uv_mutex_t process_title_mutex;
+static uv_once_t process_title_mutex_once = UV_ONCE_INIT;
+static char* process_title = NULL;
+static void* args_mem = NULL;
+
+
+static void init_process_title_mutex_once(void) {
+ uv_mutex_init(&process_title_mutex);
+}
+
+
+char** uv_setup_args(int argc, char** argv) {
+ char** new_argv;
+ size_t size;
+ char* s;
+ int i;
+
+ if (argc <= 0)
+ return argv;
+
+ /* Calculate how much memory we need for the argv strings. */
+ size = 0;
+ for (i = 0; i < argc; i++)
+ size += strlen(argv[i]) + 1;
+
+ /* Add space for the argv pointers. */
+ size += (argc + 1) * sizeof(char*);
+
+ new_argv = uv__malloc(size);
+ if (new_argv == NULL)
+ return argv;
+
+ /* Copy over the strings and set up the pointer table. */
+ s = (char*) &new_argv[argc + 1];
+ for (i = 0; i < argc; i++) {
+ size = strlen(argv[i]) + 1;
+ memcpy(s, argv[i], size);
+ new_argv[i] = s;
+ s += size;
+ }
+ new_argv[i] = NULL;
+
+ args_mem = new_argv;
+ process_title = uv__strdup(argv[0]);
+
+ return new_argv;
+}
+
+
+int uv_set_process_title(const char* title) {
+ char* new_title;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL)
+ return UV_ENOBUFS;
+
+ /* We cannot free this pointer when libuv shuts down,
+ * the process may still be using it.
+ */
+ new_title = uv__strdup(title);
+ if (new_title == NULL)
+ return UV_ENOMEM;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ if (process_title != NULL)
+ uv__free(process_title);
+
+ process_title = new_title;
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+int uv_get_process_title(char* buffer, size_t size) {
+ size_t len;
+
+ if (buffer == NULL || size == 0)
+ return UV_EINVAL;
+
+ /* If uv_setup_args wasn't called or failed, we can't continue. */
+ if (args_mem == NULL || process_title == NULL)
+ return UV_ENOBUFS;
+
+ uv_once(&process_title_mutex_once, init_process_title_mutex_once);
+ uv_mutex_lock(&process_title_mutex);
+
+ len = strlen(process_title);
+
+ if (size <= len) {
+ uv_mutex_unlock(&process_title_mutex);
+ return UV_ENOBUFS;
+ }
+
+ strcpy(buffer, process_title);
+
+ uv_mutex_unlock(&process_title_mutex);
+
+ return 0;
+}
+
+
+void uv__process_title_cleanup(void) {
+ uv__free(args_mem); /* Keep valgrind happy. */
+ args_mem = NULL;
+}
diff --git a/Utilities/cmlibuv/src/unix/os390-syscalls.c b/Utilities/cmlibuv/src/unix/os390-syscalls.c
index 491e950c50..c19155339c 100644
--- a/Utilities/cmlibuv/src/unix/os390-syscalls.c
+++ b/Utilities/cmlibuv/src/unix/os390-syscalls.c
@@ -27,12 +27,6 @@
#include <termios.h>
#include <sys/msg.h>
-#define CW_INTRPT 1
-#define CW_CONDVAR 32
-
-#pragma linkage(BPX4CTW, OS)
-#pragma linkage(BPX1CTW, OS)
-
static QUEUE global_epoll_queue;
static uv_mutex_t global_epoll_lock;
static uv_once_t once = UV_ONCE_INIT;
@@ -55,7 +49,7 @@ int scandir(const char* maindir, struct dirent*** namelist,
if (!mdir)
return -1;
- while (1) {
+ for (;;) {
dirent = readdir(mdir);
if (!dirent)
break;
@@ -381,46 +375,6 @@ void epoll_queue_close(uv__os390_epoll* lst) {
}
-int nanosleep(const struct timespec* req, struct timespec* rem) {
- unsigned nano;
- unsigned seconds;
- unsigned events;
- unsigned secrem;
- unsigned nanorem;
- int rv;
- int err;
- int rsn;
-
- nano = (int)req->tv_nsec;
- seconds = req->tv_sec;
- events = CW_CONDVAR | CW_INTRPT;
- secrem = 0;
- nanorem = 0;
-
-#if defined(_LP64)
- BPX4CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
-#else
- BPX1CTW(&seconds, &nano, &events, &secrem, &nanorem, &rv, &err, &rsn);
-#endif
-
- /* Don't clobber errno unless BPX1CTW/BPX4CTW errored.
- * Don't leak EAGAIN, that just means the timeout expired.
- */
- if (rv == -1)
- if (err == EAGAIN)
- rv = 0;
- else
- errno = err;
-
- if (rem != NULL && (rv == 0 || err == EINTR)) {
- rem->tv_nsec = nanorem;
- rem->tv_sec = secrem;
- }
-
- return rv;
-}
-
-
char* mkdtemp(char* path) {
static const char* tempchars =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
@@ -550,15 +504,6 @@ ssize_t os390_readlink(const char* path, char* buf, size_t len) {
}
-size_t strnlen(const char* str, size_t maxlen) {
- char* p = memchr(str, 0, maxlen);
- if (p == NULL)
- return maxlen;
- else
- return p - str;
-}
-
-
int sem_init(UV_PLATFORM_SEM_T* semid, int pshared, unsigned int value) {
UNREACHABLE();
}
diff --git a/Utilities/cmlibuv/src/unix/os390-syscalls.h b/Utilities/cmlibuv/src/unix/os390-syscalls.h
index 86416bbc55..7d59b75e47 100644
--- a/Utilities/cmlibuv/src/unix/os390-syscalls.h
+++ b/Utilities/cmlibuv/src/unix/os390-syscalls.h
@@ -28,6 +28,7 @@
#include <dirent.h>
#include <poll.h>
#include <pthread.h>
+#include "zos-base.h"
#define EPOLL_CTL_ADD 1
#define EPOLL_CTL_DEL 2
@@ -57,7 +58,6 @@ int epoll_wait(uv__os390_epoll* ep, struct epoll_event *events, int maxevents, i
int epoll_file_close(int fd);
/* utility functions */
-int nanosleep(const struct timespec* req, struct timespec* rem);
int scandir(const char* maindir, struct dirent*** namelist,
int (*filter)(const struct dirent *),
int (*compar)(const struct dirent **,
diff --git a/Utilities/cmlibuv/src/unix/os390.c b/Utilities/cmlibuv/src/unix/os390.c
index 3bb44266d9..bf0448b519 100644
--- a/Utilities/cmlibuv/src/unix/os390.c
+++ b/Utilities/cmlibuv/src/unix/os390.c
@@ -28,6 +28,8 @@
#include <builtins.h>
#include <termios.h>
#include <sys/msg.h>
+#include <sys/resource.h>
+#include "zos-base.h"
#if defined(__clang__)
#include "csrsic.h"
#else
@@ -61,12 +63,6 @@
/* Address of the rsm control and enumeration area. */
#define CVTRCEP_OFFSET 0x490
-/*
- Number of frames currently available to system.
- Excluded are frames backing perm storage, frames offline, and bad frames.
-*/
-#define RCEPOOL_OFFSET 0x004
-
/* Total number of frames currently on all available frame queues. */
#define RCEAFC_OFFSET 0x088
@@ -144,102 +140,8 @@ uint64_t uv__hrtime(uv_clocktype_t type) {
}
-/*
- Get the exe path using the thread entry information
- in the address space.
-*/
-static int getexe(const int pid, char* buf, size_t len) {
- struct {
- int pid;
- int thid[2];
- char accesspid;
- char accessthid;
- char asid[2];
- char loginname[8];
- char flag;
- char len;
- } Input_data;
-
- union {
- struct {
- char gthb[4];
- int pid;
- int thid[2];
- char accesspid;
- char accessthid[3];
- int lenused;
- int offsetProcess;
- int offsetConTTY;
- int offsetPath;
- int offsetCommand;
- int offsetFileData;
- int offsetThread;
- } Output_data;
- char buf[2048];
- } Output_buf;
-
- struct Output_path_type {
- char gthe[4];
- short int len;
- char path[1024];
- };
-
- int Input_length;
- int Output_length;
- void* Input_address;
- void* Output_address;
- struct Output_path_type* Output_path;
- int rv;
- int rc;
- int rsn;
-
- Input_length = PGTH_LEN;
- Output_length = sizeof(Output_buf);
- Output_address = &Output_buf;
- Input_address = &Input_data;
- memset(&Input_data, 0, sizeof Input_data);
- Input_data.flag |= PGTHAPATH;
- Input_data.pid = pid;
- Input_data.accesspid = PGTH_CURRENT;
-
-#ifdef _LP64
- BPX4GTH(&Input_length,
- &Input_address,
- &Output_length,
- &Output_address,
- &rv,
- &rc,
- &rsn);
-#else
- BPX1GTH(&Input_length,
- &Input_address,
- &Output_length,
- &Output_address,
- &rv,
- &rc,
- &rsn);
-#endif
-
- if (rv == -1) {
- errno = rc;
- return -1;
- }
-
- /* Check highest byte to ensure data availability */
- assert(((Output_buf.Output_data.offsetPath >>24) & 0xFF) == 'A');
-
- /* Get the offset from the lowest 3 bytes */
- Output_path = (struct Output_path_type*) ((char*) (&Output_buf) +
- (Output_buf.Output_data.offsetPath & 0x00FFFFFF));
-
- if (Output_path->len >= len) {
- errno = ENOBUFS;
- return -1;
- }
-
- uv__strscpy(buf, Output_path->path, len);
-
- return 0;
+static int getexe(char* buf, size_t len) {
+ return uv__strscpy(buf, __getargv()[0], len);
}
@@ -259,8 +161,7 @@ int uv_exepath(char* buffer, size_t* size) {
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
- pid = getpid();
- res = getexe(pid, args, sizeof(args));
+ res = getexe(args, sizeof(args));
if (res < 0)
return UV_EINVAL;
@@ -275,25 +176,25 @@ uint64_t uv_get_free_memory(void) {
data_area_ptr rcep = {0};
cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
- freeram = *((uint64_t*)(rcep.deref + RCEAFC_OFFSET)) * 4;
+ freeram = (uint64_t)*((uint32_t*)(rcep.deref + RCEAFC_OFFSET)) * 4096;
return freeram;
}
uint64_t uv_get_total_memory(void) {
- uint64_t totalram;
-
- data_area_ptr cvt = {0};
- data_area_ptr rcep = {0};
- cvt.assign = *(data_area_ptr_assign_type*)(CVT_PTR);
- rcep.assign = *(data_area_ptr_assign_type*)(cvt.deref + CVTRCEP_OFFSET);
- totalram = *((uint64_t*)(rcep.deref + RCEPOOL_OFFSET)) * 4;
- return totalram;
+ /* Use CVTRLSTG to get the size of actual real storage online at IPL in K. */
+ return (uint64_t)((int)((char *__ptr32 *__ptr32 *)0)[4][214]) * 1024;
}
uint64_t uv_get_constrained_memory(void) {
- return 0; /* Memory constraints are unknown. */
+ struct rlimit rl;
+
+ /* RLIMIT_MEMLIMIT return value is in megabytes rather than bytes. */
+ if (getrlimit(RLIMIT_MEMLIMIT, &rl) == 0)
+ return rl.rlim_cur * 1024 * 1024;
+
+ return 0; /* There is no memory limit set. */
}
@@ -733,6 +634,10 @@ static int os390_message_queue_handler(uv__os390_epoll* ep) {
/* Some event that we are not interested in. */
return 0;
+ /* `__rfim_utok` is treated as text when it should be treated as binary while
+ * running in ASCII mode, resulting in an unwanted autoconversion.
+ */
+ __a2e_l(msg.__rfim_utok, sizeof(msg.__rfim_utok));
handle = *(uv_fs_event_t**)(msg.__rfim_utok);
handle->cb(handle, uv__basename_r(handle->path), events, 0);
return 1;
@@ -959,9 +864,6 @@ update_timeout:
}
}
-void uv__set_process_title(const char* title) {
- /* do nothing */
-}
int uv__io_fork(uv_loop_t* loop) {
/*
diff --git a/Utilities/cmlibuv/src/unix/pipe.c b/Utilities/cmlibuv/src/unix/pipe.c
index 52a8fd52fa..fc7c4eae8d 100644
--- a/Utilities/cmlibuv/src/unix/pipe.c
+++ b/Utilities/cmlibuv/src/unix/pipe.c
@@ -377,3 +377,57 @@ int uv_pipe_chmod(uv_pipe_t* handle, int mode) {
return r != -1 ? 0 : UV__ERR(errno);
}
+
+
+int uv_pipe(uv_os_fd_t fds[2], int read_flags, int write_flags) {
+ uv_os_fd_t temp[2];
+ int err;
+#if defined(__FreeBSD__) || defined(__linux__)
+ int flags = O_CLOEXEC;
+
+ if ((read_flags & UV_NONBLOCK_PIPE) && (write_flags & UV_NONBLOCK_PIPE))
+ flags |= UV_FS_O_NONBLOCK;
+
+ if (pipe2(temp, flags))
+ return UV__ERR(errno);
+
+ if (flags & UV_FS_O_NONBLOCK) {
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+ }
+#else
+ if (pipe(temp))
+ return UV__ERR(errno);
+
+ if ((err = uv__cloexec(temp[0], 1)))
+ goto fail;
+
+ if ((err = uv__cloexec(temp[1], 1)))
+ goto fail;
+#endif
+
+ if (read_flags & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[0], 1)))
+ goto fail;
+
+ if (write_flags & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[1], 1)))
+ goto fail;
+
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+
+fail:
+ uv__close(temp[0]);
+ uv__close(temp[1]);
+ return err;
+}
+
+
+int uv__make_pipe(int fds[2], int flags) {
+ return uv_pipe(fds,
+ flags & UV_NONBLOCK_PIPE,
+ flags & UV_NONBLOCK_PIPE);
+}
diff --git a/Utilities/cmlibuv/src/unix/poll.c b/Utilities/cmlibuv/src/unix/poll.c
index 3d5022b22e..7a12e2d148 100644
--- a/Utilities/cmlibuv/src/unix/poll.c
+++ b/Utilities/cmlibuv/src/unix/poll.c
@@ -79,9 +79,10 @@ int uv_poll_init(uv_loop_t* loop, uv_poll_t* handle, int fd) {
* Workaround for e.g. kqueue fds not supporting ioctls.
*/
err = uv__nonblock(fd, 1);
+#if UV__NONBLOCK_IS_IOCTL
if (err == UV_ENOTTY)
- if (uv__nonblock == uv__nonblock_ioctl)
- err = uv__nonblock_fcntl(fd, 1);
+ err = uv__nonblock_fcntl(fd, 1);
+#endif
if (err)
return err;
@@ -116,12 +117,21 @@ int uv_poll_stop(uv_poll_t* handle) {
int uv_poll_start(uv_poll_t* handle, int pevents, uv_poll_cb poll_cb) {
+ uv__io_t** watchers;
+ uv__io_t* w;
int events;
assert((pevents & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
UV_PRIORITIZED)) == 0);
assert(!uv__is_closing(handle));
+ watchers = handle->loop->watchers;
+ w = &handle->io_watcher;
+
+ if (uv__fd_exists(handle->loop, w->fd))
+ if (watchers[w->fd] != w)
+ return UV_EEXIST;
+
uv__poll_stop(handle);
if (pevents == 0)
diff --git a/Utilities/cmlibuv/src/unix/process.c b/Utilities/cmlibuv/src/unix/process.c
index 08aa2f3cc6..2af2088f90 100644
--- a/Utilities/cmlibuv/src/unix/process.c
+++ b/Utilities/cmlibuv/src/unix/process.c
@@ -26,6 +26,7 @@
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
+#include <signal.h>
#include <sys/types.h>
#include <sys/wait.h>
@@ -45,6 +46,10 @@ extern char **environ;
# include <grp.h>
#endif
+#if defined(__MVS__)
+# include "zos-base.h"
+#endif
+
#ifndef CMAKE_BOOTSTRAP
#if defined(__linux__)
# define uv__cpu_set_t cpu_set_t
@@ -122,68 +127,6 @@ static void uv__chld(uv_signal_t* handle, int signum) {
assert(QUEUE_EMPTY(&pending));
}
-
-static int uv__make_socketpair(int fds[2]) {
-#if defined(__FreeBSD__) || defined(__linux__)
- if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, fds))
- return UV__ERR(errno);
-
- return 0;
-#else
- int err;
-
- if (socketpair(AF_UNIX, SOCK_STREAM, 0, fds))
- return UV__ERR(errno);
-
- err = uv__cloexec(fds[0], 1);
- if (err == 0)
- err = uv__cloexec(fds[1], 1);
-
- if (err != 0) {
- uv__close(fds[0]);
- uv__close(fds[1]);
- return UV__ERR(errno);
- }
-
- return 0;
-#endif
-}
-
-
-int uv__make_pipe(int fds[2], int flags) {
-#if defined(__FreeBSD__) || defined(__linux__)
- if (pipe2(fds, flags | O_CLOEXEC))
- return UV__ERR(errno);
-
- return 0;
-#else
- if (pipe(fds))
- return UV__ERR(errno);
-
- if (uv__cloexec(fds[0], 1))
- goto fail;
-
- if (uv__cloexec(fds[1], 1))
- goto fail;
-
- if (flags & UV__F_NONBLOCK) {
- if (uv__nonblock(fds[0], 1))
- goto fail;
-
- if (uv__nonblock(fds[1], 1))
- goto fail;
- }
-
- return 0;
-
-fail:
- uv__close(fds[0]);
- uv__close(fds[1]);
- return UV__ERR(errno);
-#endif
-}
-
-
/*
* Used for initializing stdio streams like options.stdin_stream. Returns
* zero on success. See also the cleanup section in uv_spawn().
@@ -203,7 +146,7 @@ static int uv__process_init_stdio(uv_stdio_container_t* container, int fds[2]) {
if (container->data.stream->type != UV_NAMED_PIPE)
return UV_EINVAL;
else
- return uv__make_socketpair(fds);
+ return uv_socketpair(SOCK_STREAM, 0, fds, 0, 0);
case UV_INHERIT_FD:
case UV_INHERIT_STREAM:
@@ -270,6 +213,12 @@ static void uv__write_int(int fd, int val) {
}
+static void uv__write_errno(int error_fd) {
+ uv__write_int(error_fd, UV__ERR(errno));
+ _exit(127);
+}
+
+
#if !(defined(__APPLE__) && (TARGET_OS_TV || TARGET_OS_WATCH))
/* execvp is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED, so must be
* avoided. Since this isn't called on those targets, the function
@@ -279,10 +228,9 @@ static void uv__process_child_init(const uv_process_options_t* options,
int stdio_count,
int (*pipes)[2],
int error_fd) {
- sigset_t set;
+ sigset_t signewset;
int close_fd;
int use_fd;
- int err;
int fd;
int n;
#ifndef CMAKE_BOOTSTRAP
@@ -294,6 +242,26 @@ static void uv__process_child_init(const uv_process_options_t* options,
#endif
#endif
+ /* Reset signal disposition first. Use a hard-coded limit because NSIG is not
+ * fixed on Linux: it's either 32, 34 or 64, depending on whether RT signals
+ * are enabled. We are not allowed to touch RT signal handlers, glibc uses
+ * them internally.
+ */
+ for (n = 1; n < 32; n += 1) {
+ if (n == SIGKILL || n == SIGSTOP)
+ continue; /* Can't be changed. */
+
+#if defined(__HAIKU__)
+ if (n == SIGKILLTHR)
+ continue; /* Can't be changed. */
+#endif
+
+ if (SIG_ERR != signal(n, SIG_DFL))
+ continue;
+
+ uv__write_errno(error_fd);
+ }
+
if (options->flags & UV_PROCESS_DETACHED)
setsid();
@@ -306,10 +274,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
if (use_fd < 0 || use_fd >= fd)
continue;
pipes[fd][1] = fcntl(use_fd, F_DUPFD, stdio_count);
- if (pipes[fd][1] == -1) {
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
+ if (pipes[fd][1] == -1)
+ uv__write_errno(error_fd);
}
for (fd = 0; fd < stdio_count; fd++) {
@@ -326,10 +292,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
use_fd = open("/dev/null", fd == 0 ? O_RDONLY : O_RDWR);
close_fd = use_fd;
- if (use_fd < 0) {
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
+ if (use_fd < 0)
+ uv__write_errno(error_fd);
}
}
@@ -338,10 +302,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
else
fd = dup2(use_fd, fd);
- if (fd == -1) {
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
+ if (fd == -1)
+ uv__write_errno(error_fd);
if (fd <= 2)
uv__nonblock_fcntl(fd, 0);
@@ -357,10 +319,8 @@ static void uv__process_child_init(const uv_process_options_t* options,
uv__close(use_fd);
}
- if (options->cwd != NULL && chdir(options->cwd)) {
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
+ if (options->cwd != NULL && chdir(options->cwd))
+ uv__write_errno(error_fd);
if (options->flags & (UV_PROCESS_SETUID | UV_PROCESS_SETGID)) {
/* When dropping privileges from root, the `setgroups` call will
@@ -373,15 +333,11 @@ static void uv__process_child_init(const uv_process_options_t* options,
SAVE_ERRNO(setgroups(0, NULL));
}
- if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid)) {
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
+ if ((options->flags & UV_PROCESS_SETGID) && setgid(options->gid))
+ uv__write_errno(error_fd);
- if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid)) {
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
+ if ((options->flags & UV_PROCESS_SETUID) && setuid(options->uid))
+ uv__write_errno(error_fd);
#ifndef CMAKE_BOOTSTRAP
#if defined(__linux__) || defined(__FreeBSD__)
@@ -409,39 +365,19 @@ static void uv__process_child_init(const uv_process_options_t* options,
environ = options->env;
}
- /* Reset signal disposition. Use a hard-coded limit because NSIG
- * is not fixed on Linux: it's either 32, 34 or 64, depending on
- * whether RT signals are enabled. We are not allowed to touch
- * RT signal handlers, glibc uses them internally.
- */
- for (n = 1; n < 32; n += 1) {
- if (n == SIGKILL || n == SIGSTOP)
- continue; /* Can't be changed. */
+ /* Reset signal mask just before exec. */
+ sigemptyset(&signewset);
+ if (sigprocmask(SIG_SETMASK, &signewset, NULL) != 0)
+ abort();
-#if defined(__HAIKU__)
- if (n == SIGKILLTHR)
- continue; /* Can't be changed. */
+#ifdef __MVS__
+ execvpe(options->file, options->args, environ);
+#else
+ execvp(options->file, options->args);
#endif
- if (SIG_ERR != signal(n, SIG_DFL))
- continue;
-
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
- }
-
- /* Reset signal mask. */
- sigemptyset(&set);
- err = pthread_sigmask(SIG_SETMASK, &set, NULL);
-
- if (err != 0) {
- uv__write_int(error_fd, UV__ERR(err));
- _exit(127);
- }
-
- execvp(options->file, options->args);
- uv__write_int(error_fd, UV__ERR(errno));
- _exit(127);
+ uv__write_errno(error_fd);
+ abort();
}
#endif
@@ -453,6 +389,8 @@ int uv_spawn(uv_loop_t* loop,
/* fork is marked __WATCHOS_PROHIBITED __TVOS_PROHIBITED. */
return UV_ENOSYS;
#else
+ sigset_t signewset;
+ sigset_t sigoldset;
int signal_pipe[2] = { -1, -1 };
int pipes_storage[8][2];
int (*pipes)[2];
@@ -541,25 +479,41 @@ int uv_spawn(uv_loop_t* loop,
/* Acquire write lock to prevent opening new fds in worker threads */
uv_rwlock_wrlock(&loop->cloexec_lock);
- pid = fork();
- if (pid == -1) {
+ /* Start the child with most signals blocked, to avoid any issues before we
+ * can reset them, but allow program failures to exit (and not hang). */
+ sigfillset(&signewset);
+ sigdelset(&signewset, SIGKILL);
+ sigdelset(&signewset, SIGSTOP);
+ sigdelset(&signewset, SIGTRAP);
+ sigdelset(&signewset, SIGSEGV);
+ sigdelset(&signewset, SIGBUS);
+ sigdelset(&signewset, SIGILL);
+ sigdelset(&signewset, SIGSYS);
+ sigdelset(&signewset, SIGABRT);
+ if (pthread_sigmask(SIG_BLOCK, &signewset, &sigoldset) != 0)
+ abort();
+
+ pid = fork();
+ if (pid == -1)
err = UV__ERR(errno);
- uv_rwlock_wrunlock(&loop->cloexec_lock);
- uv__close(signal_pipe[0]);
- uv__close(signal_pipe[1]);
- goto error;
- }
- if (pid == 0) {
+ if (pid == 0)
uv__process_child_init(options, stdio_count, pipes, signal_pipe[1]);
+
+ if (pthread_sigmask(SIG_SETMASK, &sigoldset, NULL) != 0)
abort();
- }
/* Release lock in parent process */
uv_rwlock_wrunlock(&loop->cloexec_lock);
+
uv__close(signal_pipe[1]);
+ if (pid == -1) {
+ uv__close(signal_pipe[0]);
+ goto error;
+ }
+
process->status = 0;
exec_errorno = 0;
do
diff --git a/Utilities/cmlibuv/src/unix/proctitle.c b/Utilities/cmlibuv/src/unix/proctitle.c
index 9ffe5b629c..9d1f00ddf6 100644
--- a/Utilities/cmlibuv/src/unix/proctitle.c
+++ b/Utilities/cmlibuv/src/unix/proctitle.c
@@ -84,10 +84,7 @@ char** uv_setup_args(int argc, char** argv) {
}
new_argv[i] = NULL;
- /* argv is not adjacent on z/os, we use just argv[0] on that platform. */
-#ifndef __MVS__
pt.cap = argv[i - 1] + size - argv[0];
-#endif
args_mem = new_argv;
process_title = pt;
@@ -119,6 +116,7 @@ int uv_set_process_title(const char* title) {
memcpy(pt->str, title, len);
memset(pt->str + len, '\0', pt->cap - len);
pt->len = len;
+ uv__set_process_title(pt->str);
uv_mutex_unlock(&process_title_mutex);
diff --git a/Utilities/cmlibuv/src/unix/signal.c b/Utilities/cmlibuv/src/unix/signal.c
index f40a3e54eb..1133c73a95 100644
--- a/Utilities/cmlibuv/src/unix/signal.c
+++ b/Utilities/cmlibuv/src/unix/signal.c
@@ -265,7 +265,7 @@ static int uv__signal_loop_once_init(uv_loop_t* loop) {
if (loop->signal_pipefd[0] != -1)
return 0;
- err = uv__make_pipe(loop->signal_pipefd, UV__F_NONBLOCK);
+ err = uv__make_pipe(loop->signal_pipefd, UV_NONBLOCK_PIPE);
if (err)
return err;
diff --git a/Utilities/cmlibuv/src/unix/stream.c b/Utilities/cmlibuv/src/unix/stream.c
index 3b6da8d464..dd0e0f7f58 100644
--- a/Utilities/cmlibuv/src/unix/stream.c
+++ b/Utilities/cmlibuv/src/unix/stream.c
@@ -164,7 +164,7 @@ static void uv__stream_osx_select(void* arg) {
else
max_fd = s->int_fd;
- while (1) {
+ for (;;) {
/* Terminate on semaphore */
if (uv_sem_trywait(&s->close_sem) == 0)
break;
@@ -195,7 +195,7 @@ static void uv__stream_osx_select(void* arg) {
/* Empty socketpair's buffer in case of interruption */
if (FD_ISSET(s->int_fd, s->sread))
- while (1) {
+ for (;;) {
r = read(s->int_fd, buf, sizeof(buf));
if (r == sizeof(buf))
@@ -799,33 +799,21 @@ static int uv__handle_fd(uv_handle_t* handle) {
}
}
-static void uv__write(uv_stream_t* stream) {
+static int uv__try_write(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
struct iovec* iov;
- QUEUE* q;
- uv_write_t* req;
int iovmax;
int iovcnt;
ssize_t n;
- int err;
-
-start:
-
- assert(uv__stream_fd(stream) >= 0);
-
- if (QUEUE_EMPTY(&stream->write_queue))
- return;
-
- q = QUEUE_HEAD(&stream->write_queue);
- req = QUEUE_DATA(q, uv_write_t, queue);
- assert(req->handle == stream);
/*
* Cast to iovec. We had to have our own uv_buf_t instead of iovec
* because Windows's WSABUF is not an iovec.
*/
- assert(sizeof(uv_buf_t) == sizeof(struct iovec));
- iov = (struct iovec*) &(req->bufs[req->write_index]);
- iovcnt = req->nbufs - req->write_index;
+ iov = (struct iovec*) bufs;
+ iovcnt = nbufs;
iovmax = uv__getiovmax();
@@ -837,8 +825,7 @@ start:
* Now do the actual writev. Note that we've been updating the pointers
* inside the iov each time we write. So there is no need to offset it.
*/
-
- if (req->send_handle) {
+ if (send_handle != NULL) {
int fd_to_send;
struct msghdr msg;
struct cmsghdr *cmsg;
@@ -847,12 +834,10 @@ start:
struct cmsghdr alias;
} scratch;
- if (uv__is_closing(req->send_handle)) {
- err = UV_EBADF;
- goto error;
- }
+ if (uv__is_closing(send_handle))
+ return UV_EBADF;
- fd_to_send = uv__handle_fd((uv_handle_t*) req->send_handle);
+ fd_to_send = uv__handle_fd((uv_handle_t*) send_handle);
memset(&scratch, 0, sizeof(scratch));
@@ -882,44 +867,68 @@ start:
do
n = sendmsg(uv__stream_fd(stream), &msg, 0);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
-
- /* Ensure the handle isn't sent again in case this is a partial write. */
- if (n >= 0)
- req->send_handle = NULL;
} else {
do
n = uv__writev(uv__stream_fd(stream), iov, iovcnt);
while (n == -1 && RETRY_ON_WRITE_ERROR(errno));
}
- if (n == -1 && !IS_TRANSIENT_WRITE_ERROR(errno, req->send_handle)) {
- err = UV__ERR(errno);
- goto error;
- }
+ if (n >= 0)
+ return n;
- if (n >= 0 && uv__write_req_update(stream, req, n)) {
- uv__write_req_finish(req);
- return; /* TODO(bnoordhuis) Start trying to write the next request. */
- }
+ if (IS_TRANSIENT_WRITE_ERROR(errno, send_handle))
+ return UV_EAGAIN;
- /* If this is a blocking stream, try again. */
- if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
- goto start;
+ return UV__ERR(errno);
+}
- /* We're not done. */
- uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
+static void uv__write(uv_stream_t* stream) {
+ QUEUE* q;
+ uv_write_t* req;
+ ssize_t n;
- /* Notify select() thread about state change */
- uv__stream_osx_interrupt_select(stream);
+ assert(uv__stream_fd(stream) >= 0);
+
+ for (;;) {
+ if (QUEUE_EMPTY(&stream->write_queue))
+ return;
+
+ q = QUEUE_HEAD(&stream->write_queue);
+ req = QUEUE_DATA(q, uv_write_t, queue);
+ assert(req->handle == stream);
+
+ n = uv__try_write(stream,
+ &(req->bufs[req->write_index]),
+ req->nbufs - req->write_index,
+ req->send_handle);
+
+ /* Ensure the handle isn't sent again in case this is a partial write. */
+ if (n >= 0) {
+ req->send_handle = NULL;
+ if (uv__write_req_update(stream, req, n)) {
+ uv__write_req_finish(req);
+ return; /* TODO(bnoordhuis) Start trying to write the next request. */
+ }
+ } else if (n != UV_EAGAIN)
+ break;
+
+ /* If this is a blocking stream, try again. */
+ if (stream->flags & UV_HANDLE_BLOCKING_WRITES)
+ continue;
+
+ /* We're not done. */
+ uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
- return;
+ /* Notify select() thread about state change */
+ uv__stream_osx_interrupt_select(stream);
+
+ return;
+ }
-error:
- req->error = err;
+ req->error = n;
+ // XXX(jwn): this must call uv__stream_flush_write_queue(stream, n) here, since we won't generate any more events
uv__write_req_finish(req);
uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
- if (!uv__io_active(&stream->io_watcher, POLLIN))
- uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
}
@@ -1001,9 +1010,9 @@ uv_handle_type uv__handle_type(int fd) {
static void uv__stream_eof(uv_stream_t* stream, const uv_buf_t* buf) {
stream->flags |= UV_HANDLE_READ_EOF;
stream->flags &= ~UV_HANDLE_READING;
+ stream->flags &= ~UV_HANDLE_READABLE;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
- if (!uv__io_active(&stream->io_watcher, POLLOUT))
- uv__handle_stop(stream);
+ uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
stream->read_cb(stream, UV_EOF, buf);
}
@@ -1188,12 +1197,12 @@ static void uv__read(uv_stream_t* stream) {
#endif
} else {
/* Error. User should call uv_close(). */
+ stream->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
stream->read_cb(stream, UV__ERR(errno), &buf);
if (stream->flags & UV_HANDLE_READING) {
stream->flags &= ~UV_HANDLE_READING;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
- if (!uv__io_active(&stream->io_watcher, POLLOUT))
- uv__handle_stop(stream);
+ uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
}
}
@@ -1276,6 +1285,7 @@ int uv_shutdown(uv_shutdown_t* req, uv_stream_t* stream, uv_shutdown_cb cb) {
req->cb = cb;
stream->shutdown_req = req;
stream->flags |= UV_HANDLE_SHUTTING;
+ stream->flags &= ~UV_HANDLE_WRITABLE;
uv__io_start(stream->loop, &stream->io_watcher, POLLOUT);
uv__stream_osx_interrupt_select(stream);
@@ -1390,14 +1400,9 @@ static void uv__stream_connect(uv_stream_t* stream) {
}
-int uv_write2(uv_write_t* req,
- uv_stream_t* stream,
- const uv_buf_t bufs[],
- unsigned int nbufs,
- uv_stream_t* send_handle,
- uv_write_cb cb) {
- int empty_queue;
-
+static int uv__check_before_write(uv_stream_t* stream,
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
assert(nbufs > 0);
assert((stream->type == UV_TCP ||
stream->type == UV_NAMED_PIPE ||
@@ -1410,7 +1415,7 @@ int uv_write2(uv_write_t* req,
if (!(stream->flags & UV_HANDLE_WRITABLE))
return UV_EPIPE;
- if (send_handle) {
+ if (send_handle != NULL) {
if (stream->type != UV_NAMED_PIPE || !((uv_pipe_t*)stream)->ipc)
return UV_EINVAL;
@@ -1430,6 +1435,22 @@ int uv_write2(uv_write_t* req,
#endif
}
+ return 0;
+}
+
+int uv_write2(uv_write_t* req,
+ uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle,
+ uv_write_cb cb) {
+ int empty_queue;
+ int err;
+
+ err = uv__check_before_write(stream, nbufs, send_handle);
+ if (err < 0)
+ return err;
+
/* It's legal for write_queue_size > 0 even when the write_queue is empty;
* it means there are error-state requests in the write_completed_queue that
* will touch up write_queue_size later, see also uv__write_req_finish().
@@ -1498,72 +1519,37 @@ int uv_write(uv_write_t* req,
}
-void uv_try_write_cb(uv_write_t* req, int status) {
- /* Should not be called */
- abort();
-}
-
-
int uv_try_write(uv_stream_t* stream,
const uv_buf_t bufs[],
unsigned int nbufs) {
- int r;
- int has_pollout;
- size_t written;
- size_t req_size;
- uv_write_t req;
+ return uv_try_write2(stream, bufs, nbufs, NULL);
+}
+
+
+int uv_try_write2(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
+ int err;
/* Connecting or already writing some data */
if (stream->connect_req != NULL || stream->write_queue_size != 0)
return UV_EAGAIN;
- has_pollout = uv__io_active(&stream->io_watcher, POLLOUT);
+ err = uv__check_before_write(stream, nbufs, NULL);
+ if (err < 0)
+ return err;
- r = uv_write(&req, stream, bufs, nbufs, uv_try_write_cb);
- if (r != 0)
- return r;
-
- /* Remove not written bytes from write queue size */
- written = uv__count_bufs(bufs, nbufs);
- if (req.bufs != NULL)
- req_size = uv__write_req_size(&req);
- else
- req_size = 0;
- written -= req_size;
- stream->write_queue_size -= req_size;
-
- /* Unqueue request, regardless of immediateness */
- QUEUE_REMOVE(&req.queue);
- uv__req_unregister(stream->loop, &req);
- if (req.bufs != req.bufsml)
- uv__free(req.bufs);
- req.bufs = NULL;
-
- /* Do not poll for writable, if we wasn't before calling this */
- if (!has_pollout) {
- uv__io_stop(stream->loop, &stream->io_watcher, POLLOUT);
- uv__stream_osx_interrupt_select(stream);
- }
-
- if (written == 0 && req_size != 0)
- return req.error < 0 ? req.error : UV_EAGAIN;
- else
- return written;
+ return uv__try_write(stream, bufs, nbufs, send_handle);
}
-int uv_read_start(uv_stream_t* stream,
- uv_alloc_cb alloc_cb,
- uv_read_cb read_cb) {
+int uv__read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
assert(stream->type == UV_TCP || stream->type == UV_NAMED_PIPE ||
stream->type == UV_TTY);
- if (stream->flags & UV_HANDLE_CLOSING)
- return UV_EINVAL;
-
- if (!(stream->flags & UV_HANDLE_READABLE))
- return UV_ENOTCONN;
-
/* The UV_HANDLE_READING flag is irrelevant of the state of the tcp - it just
* expresses the desired state of the user.
*/
@@ -1593,8 +1579,7 @@ int uv_read_stop(uv_stream_t* stream) {
stream->flags &= ~UV_HANDLE_READING;
uv__io_stop(stream->loop, &stream->io_watcher, POLLIN);
- if (!uv__io_active(&stream->io_watcher, POLLOUT))
- uv__handle_stop(stream);
+ uv__handle_stop(stream);
uv__stream_osx_interrupt_select(stream);
stream->read_cb = NULL;
diff --git a/Utilities/cmlibuv/src/unix/sunos.c b/Utilities/cmlibuv/src/unix/sunos.c
index 2166e8f08e..eab2e40a67 100644
--- a/Utilities/cmlibuv/src/unix/sunos.c
+++ b/Utilities/cmlibuv/src/unix/sunos.c
@@ -869,3 +869,14 @@ void uv_free_interface_addresses(uv_interface_address_t* addresses,
uv__free(addresses);
}
+
+
+#if !defined(_POSIX_VERSION) || _POSIX_VERSION < 200809L
+size_t strnlen(const char* s, size_t maxlen) {
+ const char* end;
+ end = memchr(s, '\0', maxlen);
+ if (end == NULL)
+ return maxlen;
+ return end - s;
+}
+#endif
diff --git a/Utilities/cmlibuv/src/unix/tcp.c b/Utilities/cmlibuv/src/unix/tcp.c
index 18acd20df1..bc0fb661f1 100644
--- a/Utilities/cmlibuv/src/unix/tcp.c
+++ b/Utilities/cmlibuv/src/unix/tcp.c
@@ -214,14 +214,15 @@ int uv__tcp_connect(uv_connect_t* req,
if (handle->connect_req != NULL)
return UV_EALREADY; /* FIXME(bnoordhuis) UV_EINVAL or maybe UV_EBUSY. */
+ if (handle->delayed_error != 0)
+ goto out;
+
err = maybe_new_socket(handle,
addr->sa_family,
UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
if (err)
return err;
- handle->delayed_error = 0;
-
do {
errno = 0;
r = connect(uv__stream_fd(handle), addr, addrlen);
@@ -249,6 +250,8 @@ int uv__tcp_connect(uv_connect_t* req,
return UV__ERR(errno);
}
+out:
+
uv__req_init(handle->loop, req, UV_CONNECT);
req->cb = cb;
req->handle = (uv_stream_t*) handle;
@@ -459,3 +462,49 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
void uv__tcp_close(uv_tcp_t* handle) {
uv__stream_close((uv_stream_t*)handle);
}
+
+
+int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
+ uv_os_sock_t temp[2];
+ int err;
+#if defined(__FreeBSD__) || defined(__linux__)
+ int flags;
+
+ flags = type | SOCK_CLOEXEC;
+ if ((flags0 & UV_NONBLOCK_PIPE) && (flags1 & UV_NONBLOCK_PIPE))
+ flags |= SOCK_NONBLOCK;
+
+ if (socketpair(AF_UNIX, flags, protocol, temp))
+ return UV__ERR(errno);
+
+ if (flags & UV_FS_O_NONBLOCK) {
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+ }
+#else
+ if (socketpair(AF_UNIX, type, protocol, temp))
+ return UV__ERR(errno);
+
+ if ((err = uv__cloexec(temp[0], 1)))
+ goto fail;
+ if ((err = uv__cloexec(temp[1], 1)))
+ goto fail;
+#endif
+
+ if (flags0 & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[0], 1)))
+ goto fail;
+ if (flags1 & UV_NONBLOCK_PIPE)
+ if ((err = uv__nonblock(temp[1], 1)))
+ goto fail;
+
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+
+fail:
+ uv__close(temp[0]);
+ uv__close(temp[1]);
+ return err;
+}
diff --git a/Utilities/cmlibuv/src/unix/thread.c b/Utilities/cmlibuv/src/unix/thread.c
index 8aeb0ca382..cbddf173f7 100644
--- a/Utilities/cmlibuv/src/unix/thread.c
+++ b/Utilities/cmlibuv/src/unix/thread.c
@@ -107,8 +107,7 @@ int uv_barrier_wait(uv_barrier_t* barrier) {
}
last = (--b->out == 0);
- if (!last)
- uv_cond_signal(&b->cond); /* Not needed for last thread. */
+ uv_cond_signal(&b->cond);
uv_mutex_unlock(&b->mutex);
return last;
@@ -122,9 +121,10 @@ void uv_barrier_destroy(uv_barrier_t* barrier) {
uv_mutex_lock(&b->mutex);
assert(b->in == 0);
- assert(b->out == 0);
+ while (b->out != 0)
+ uv_cond_wait(&b->cond, &b->mutex);
- if (b->in != 0 || b->out != 0)
+ if (b->in != 0)
abort();
uv_mutex_unlock(&b->mutex);
@@ -168,7 +168,7 @@ void uv_barrier_destroy(uv_barrier_t* barrier) {
* On Linux, threads created by musl have a much smaller stack than threads
* created by glibc (80 vs. 2048 or 4096 kB.) Follow glibc for consistency.
*/
-static size_t thread_stack_size(void) {
+size_t uv__thread_stack_size(void) {
#if defined(__APPLE__) || defined(__linux__)
struct rlimit lim;
@@ -234,7 +234,7 @@ int uv_thread_create_ex(uv_thread_t* tid,
attr = NULL;
if (stack_size == 0) {
- stack_size = thread_stack_size();
+ stack_size = uv__thread_stack_size();
} else {
pagesize = (size_t)getpagesize();
/* Round up to the nearest page boundary. */
diff --git a/Utilities/cmlibuv/src/unix/tty.c b/Utilities/cmlibuv/src/unix/tty.c
index 82cd723d74..8cefc150bb 100644
--- a/Utilities/cmlibuv/src/unix/tty.c
+++ b/Utilities/cmlibuv/src/unix/tty.c
@@ -239,6 +239,24 @@ static void uv__tty_make_raw(struct termios* tio) {
tio->c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
tio->c_cflag &= ~(CSIZE | PARENB);
tio->c_cflag |= CS8;
+
+ /*
+ * By default, most software expects a pending read to block until at
+ * least one byte becomes available. As per termio(7I), this requires
+ * setting the MIN and TIME parameters appropriately.
+ *
+ * As a somewhat unfortunate artifact of history, the MIN and TIME slots
+ * in the control character array overlap with the EOF and EOL slots used
+ * for canonical mode processing. Because the EOF character needs to be
+ * the ASCII EOT value (aka Control-D), it has the byte value 4. When
+ * switching to raw mode, this is interpreted as a MIN value of 4; i.e.,
+ * reads will block until at least four bytes have been input.
+ *
+ * Other platforms with a distinct MIN slot like Linux and FreeBSD appear
+ * to default to a MIN value of 1, so we'll force that value here:
+ */
+ tio->c_cc[VMIN] = 1;
+ tio->c_cc[VTIME] = 0;
#else
cfmakeraw(tio);
#endif /* #ifdef __sun */
diff --git a/Utilities/cmlibuv/src/unix/udp.c b/Utilities/cmlibuv/src/unix/udp.c
index 7d699a1675..71bfbf7f79 100644
--- a/Utilities/cmlibuv/src/unix/udp.c
+++ b/Utilities/cmlibuv/src/unix/udp.c
@@ -32,8 +32,6 @@
#endif
#include <sys/un.h>
-#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
-
#if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP)
# define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP
#endif
@@ -377,8 +375,11 @@ write_queue_drain:
return;
}
+ /* Safety: npkts known to be >0 below. Hence cast from ssize_t
+ * to size_t safe.
+ */
for (i = 0, q = QUEUE_HEAD(&handle->write_queue);
- i < pkts && q != &handle->write_queue;
+ i < (size_t)npkts && q != &handle->write_queue;
++i, q = QUEUE_HEAD(&handle->write_queue)) {
assert(q != NULL);
req = QUEUE_DATA(q, uv_udp_send_t, queue);
@@ -504,6 +505,28 @@ static int uv__set_reuse(int fd) {
return 0;
}
+/*
+ * The Linux kernel suppresses some ICMP error messages by default for UDP
+ * sockets. Setting IP_RECVERR/IPV6_RECVERR on the socket enables full ICMP
+ * error reporting, hopefully resulting in faster failover to working name
+ * servers.
+ */
+static int uv__set_recverr(int fd, sa_family_t ss_family) {
+#if defined(__linux__)
+ int yes;
+
+ yes = 1;
+ if (ss_family == AF_INET) {
+ if (setsockopt(fd, IPPROTO_IP, IP_RECVERR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ } else if (ss_family == AF_INET6) {
+ if (setsockopt(fd, IPPROTO_IPV6, IPV6_RECVERR, &yes, sizeof(yes)))
+ return UV__ERR(errno);
+ }
+#endif
+ return 0;
+}
+
int uv__udp_bind(uv_udp_t* handle,
const struct sockaddr* addr,
@@ -514,7 +537,7 @@ int uv__udp_bind(uv_udp_t* handle,
int fd;
/* Check for bad flags. */
- if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR))
+ if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR | UV_UDP_LINUX_RECVERR))
return UV_EINVAL;
/* Cannot set IPv6-only mode on non-IPv6 socket. */
@@ -530,6 +553,12 @@ int uv__udp_bind(uv_udp_t* handle,
handle->io_watcher.fd = fd;
}
+ if (flags & UV_UDP_LINUX_RECVERR) {
+ err = uv__set_recverr(fd, addr->sa_family);
+ if (err)
+ return err;
+ }
+
if (flags & UV_UDP_REUSEADDR) {
err = uv__set_reuse(fd);
if (err)
@@ -854,7 +883,7 @@ static int uv__udp_set_membership6(uv_udp_t* handle,
#if !defined(__OpenBSD__) && \
!defined(__NetBSD__) && \
!defined(__ANDROID__) && \
- !defined(__DragonFly__) & \
+ !defined(__DragonFly__) && \
!defined(__QNX__)
static int uv__udp_set_source_membership4(uv_udp_t* handle,
const struct sockaddr_in* multicast_addr,
diff --git a/Utilities/cmlibuv/src/uv-common.c b/Utilities/cmlibuv/src/uv-common.c
index 213c7c638b..8bce62edf6 100644
--- a/Utilities/cmlibuv/src/uv-common.c
+++ b/Utilities/cmlibuv/src/uv-common.c
@@ -834,6 +834,25 @@ void uv_loop_delete(uv_loop_t* loop) {
}
+int uv_read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
+ if (stream == NULL || alloc_cb == NULL || read_cb == NULL)
+ return UV_EINVAL;
+
+ if (stream->flags & UV_HANDLE_CLOSING)
+ return UV_EINVAL;
+
+ if (stream->flags & UV_HANDLE_READING)
+ return UV_EALREADY;
+
+ if (!(stream->flags & UV_HANDLE_READABLE))
+ return UV_ENOTCONN;
+
+ return uv__read_start(stream, alloc_cb, read_cb);
+}
+
+
void uv_os_free_environ(uv_env_item_t* envitems, int count) {
int i;
@@ -855,7 +874,11 @@ void uv_free_cpu_info(uv_cpu_info_t* cpu_infos, int count) {
}
-#ifdef __GNUC__ /* Also covers __clang__, __LCC__, and __INTEL_COMPILER. */
+/* Also covers __clang__ and __INTEL_COMPILER. Disabled on Windows because
+ * threads have already been forcibly terminated by the operating system
+ * by the time destructors run, ergo, it's not safe to try to clean them up.
+ */
+#if defined(__GNUC__) && !defined(_WIN32)
__attribute__((destructor))
#endif
void uv_library_shutdown(void) {
diff --git a/Utilities/cmlibuv/src/uv-common.h b/Utilities/cmlibuv/src/uv-common.h
index e851291cc0..8a190bf8fa 100644
--- a/Utilities/cmlibuv/src/uv-common.h
+++ b/Utilities/cmlibuv/src/uv-common.h
@@ -68,6 +68,8 @@ extern int snprintf(char*, size_t, const char*, ...);
#define uv__store_relaxed(p, v) do *p = v; while (0)
#endif
+#define UV__UDP_DGRAM_MAXSIZE (64 * 1024)
+
/* Handle flags. Some flags are specific to Windows or UNIX. */
enum {
/* Used by all handles. */
@@ -106,8 +108,7 @@ enum {
UV_HANDLE_TCP_KEEPALIVE = 0x02000000,
UV_HANDLE_TCP_SINGLE_ACCEPT = 0x04000000,
UV_HANDLE_TCP_ACCEPT_STATE_CHANGING = 0x08000000,
- UV_HANDLE_TCP_SOCKET_CLOSED = 0x10000000,
- UV_HANDLE_SHARED_TCP_SOCKET = 0x20000000,
+ UV_HANDLE_SHARED_TCP_SOCKET = 0x10000000,
/* Only used by uv_udp_t handles. */
UV_HANDLE_UDP_PROCESSING = 0x01000000,
@@ -136,6 +137,10 @@ int uv__loop_configure(uv_loop_t* loop, uv_loop_option option, va_list ap);
void uv__loop_close(uv_loop_t* loop);
+int uv__read_start(uv_stream_t* stream,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb);
+
int uv__tcp_bind(uv_tcp_t* tcp,
const struct sockaddr* addr,
unsigned int addrlen,
diff --git a/Utilities/cmlibuv/src/win/error.c b/Utilities/cmlibuv/src/win/error.c
index 3ec984c83e..3a269da87a 100644
--- a/Utilities/cmlibuv/src/win/error.c
+++ b/Utilities/cmlibuv/src/win/error.c
@@ -105,7 +105,6 @@ int uv_translate_sys_error(int sys_errno) {
case ERROR_SYMLINK_NOT_SUPPORTED: return UV_EINVAL;
case WSAEINVAL: return UV_EINVAL;
case WSAEPFNOSUPPORT: return UV_EINVAL;
- case WSAESOCKTNOSUPPORT: return UV_EINVAL;
case ERROR_BEGINNING_OF_MEDIA: return UV_EIO;
case ERROR_BUS_RESET: return UV_EIO;
case ERROR_CRC: return UV_EIO;
@@ -168,6 +167,7 @@ int uv_translate_sys_error(int sys_errno) {
case ERROR_NOT_SAME_DEVICE: return UV_EXDEV;
case ERROR_INVALID_FUNCTION: return UV_EISDIR;
case ERROR_META_EXPANSION_TOO_LONG: return UV_E2BIG;
+ case WSAESOCKTNOSUPPORT: return UV_ESOCKTNOSUPPORT;
default: return UV_UNKNOWN;
}
}
diff --git a/Utilities/cmlibuv/src/win/fs-event.c b/Utilities/cmlibuv/src/win/fs-event.c
index 0126c5eded..76da077551 100644
--- a/Utilities/cmlibuv/src/win/fs-event.c
+++ b/Utilities/cmlibuv/src/win/fs-event.c
@@ -574,10 +574,10 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
handle->cb(handle, NULL, 0, uv_translate_sys_error(err));
}
- if (!(handle->flags & UV_HANDLE_CLOSING)) {
- uv_fs_event_queue_readdirchanges(loop, handle);
- } else {
+ if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*)handle);
+ } else if (uv__is_active(handle)) {
+ uv_fs_event_queue_readdirchanges(loop, handle);
}
}
diff --git a/Utilities/cmlibuv/src/win/fs.c b/Utilities/cmlibuv/src/win/fs.c
index d6ff7ed291..903764144f 100644
--- a/Utilities/cmlibuv/src/win/fs.c
+++ b/Utilities/cmlibuv/src/win/fs.c
@@ -92,30 +92,24 @@
return; \
}
-#define MILLIONu (1000U * 1000U)
-#define BILLIONu (1000U * 1000U * 1000U)
+#define MILLION ((int64_t) 1000 * 1000)
+#define BILLION ((int64_t) 1000 * 1000 * 1000)
-#define FILETIME_TO_UINT(filetime) \
- (*((uint64_t*) &(filetime)) - (uint64_t) 116444736 * BILLIONu)
-
-#define FILETIME_TO_TIME_T(filetime) \
- (FILETIME_TO_UINT(filetime) / (10u * MILLIONu))
-
-#define FILETIME_TO_TIME_NS(filetime, secs) \
- ((FILETIME_TO_UINT(filetime) - (secs * (uint64_t) 10 * MILLIONu)) * 100U)
-
-#define FILETIME_TO_TIMESPEC(ts, filetime) \
- do { \
- (ts).tv_sec = (long) FILETIME_TO_TIME_T(filetime); \
- (ts).tv_nsec = (long) FILETIME_TO_TIME_NS(filetime, (ts).tv_sec); \
- } while(0)
+static void uv__filetime_to_timespec(uv_timespec_t *ts, int64_t filetime) {
+ filetime -= 116444736 * BILLION;
+ ts->tv_sec = (long) (filetime / (10 * MILLION));
+ ts->tv_nsec = (long) ((filetime - ts->tv_sec * 10 * MILLION) * 100U);
+ if (ts->tv_nsec < 0) {
+ ts->tv_sec -= 1;
+ ts->tv_nsec += 1e9;
+ }
+}
#define TIME_T_TO_FILETIME(time, filetime_ptr) \
do { \
- uint64_t bigtime = ((uint64_t) ((time) * (uint64_t) 10 * MILLIONu)) + \
- (uint64_t) 116444736 * BILLIONu; \
- (filetime_ptr)->dwLowDateTime = bigtime & 0xFFFFFFFF; \
- (filetime_ptr)->dwHighDateTime = bigtime >> 32; \
+ int64_t bigtime = ((time) * 10 * MILLION + 116444736 * BILLION); \
+ (filetime_ptr)->dwLowDateTime = (uint64_t) bigtime & 0xFFFFFFFF; \
+ (filetime_ptr)->dwHighDateTime = (uint64_t) bigtime >> 32; \
} while(0)
#define IS_SLASH(c) ((c) == L'\\' || (c) == L'/')
@@ -764,7 +758,7 @@ void fs__read_filemap(uv_fs_t* req, struct uv__fd_info_s* fd_info) {
void* view;
if (rw_flags == UV_FS_O_WRONLY) {
- SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_FLAGS);
return;
}
if (fd_info->is_directory) {
@@ -918,6 +912,11 @@ void fs__read(uv_fs_t* req) {
SET_REQ_RESULT(req, bytes);
} else {
error = GetLastError();
+
+ if (error == ERROR_ACCESS_DENIED) {
+ error = ERROR_INVALID_FLAGS;
+ }
+
if (error == ERROR_HANDLE_EOF) {
SET_REQ_RESULT(req, bytes);
} else {
@@ -942,7 +941,7 @@ void fs__write_filemap(uv_fs_t* req, HANDLE file,
FILETIME ft;
if (rw_flags == UV_FS_O_RDONLY) {
- SET_REQ_WIN32_ERROR(req, ERROR_ACCESS_DENIED);
+ SET_REQ_WIN32_ERROR(req, ERROR_INVALID_FLAGS);
return;
}
if (fd_info->is_directory) {
@@ -1058,6 +1057,7 @@ void fs__write(uv_fs_t* req) {
OVERLAPPED overlapped, *overlapped_ptr;
LARGE_INTEGER offset_;
DWORD bytes;
+ DWORD error;
int result;
unsigned int index;
LARGE_INTEGER original_position;
@@ -1117,7 +1117,13 @@ void fs__write(uv_fs_t* req) {
if (result || bytes > 0) {
SET_REQ_RESULT(req, bytes);
} else {
- SET_REQ_WIN32_ERROR(req, GetLastError());
+ error = GetLastError();
+
+ if (error == ERROR_ACCESS_DENIED) {
+ error = ERROR_INVALID_FLAGS;
+ }
+
+ SET_REQ_WIN32_ERROR(req, error);
}
}
@@ -1224,7 +1230,8 @@ void fs__mkdir(uv_fs_t* req) {
SET_REQ_RESULT(req, 0);
} else {
SET_REQ_WIN32_ERROR(req, GetLastError());
- if (req->sys_errno_ == ERROR_INVALID_NAME)
+ if (req->sys_errno_ == ERROR_INVALID_NAME ||
+ req->sys_errno_ == ERROR_DIRECTORY)
req->result = UV_EINVAL;
}
}
@@ -1791,10 +1798,14 @@ INLINE static int fs__stat_handle(HANDLE handle, uv_stat_t* statbuf,
statbuf->st_mode |= (_S_IREAD | _S_IWRITE) | ((_S_IREAD | _S_IWRITE) >> 3) |
((_S_IREAD | _S_IWRITE) >> 6);
- FILETIME_TO_TIMESPEC(statbuf->st_atim, file_info.BasicInformation.LastAccessTime);
- FILETIME_TO_TIMESPEC(statbuf->st_ctim, file_info.BasicInformation.ChangeTime);
- FILETIME_TO_TIMESPEC(statbuf->st_mtim, file_info.BasicInformation.LastWriteTime);
- FILETIME_TO_TIMESPEC(statbuf->st_birthtim, file_info.BasicInformation.CreationTime);
+ uv__filetime_to_timespec(&statbuf->st_atim,
+ file_info.BasicInformation.LastAccessTime.QuadPart);
+ uv__filetime_to_timespec(&statbuf->st_ctim,
+ file_info.BasicInformation.ChangeTime.QuadPart);
+ uv__filetime_to_timespec(&statbuf->st_mtim,
+ file_info.BasicInformation.LastWriteTime.QuadPart);
+ uv__filetime_to_timespec(&statbuf->st_birthtim,
+ file_info.BasicInformation.CreationTime.QuadPart);
statbuf->st_ino = file_info.InternalInformation.IndexNumber.QuadPart;
diff --git a/Utilities/cmlibuv/src/win/internal.h b/Utilities/cmlibuv/src/win/internal.h
index 3f56777c61..7e1aef444a 100644
--- a/Utilities/cmlibuv/src/win/internal.h
+++ b/Utilities/cmlibuv/src/win/internal.h
@@ -119,8 +119,8 @@ void uv_udp_endgame(uv_loop_t* loop, uv_udp_t* handle);
/*
* Pipes
*/
-int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
- char* name, size_t nameSize);
+int uv__create_stdio_pipe_pair(uv_loop_t* loop,
+ uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags);
int uv_pipe_listen(uv_pipe_t* handle, int backlog, uv_connection_cb cb);
int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client);
diff --git a/Utilities/cmlibuv/src/win/pipe.c b/Utilities/cmlibuv/src/win/pipe.c
index f81245ec60..912aed9b62 100644
--- a/Utilities/cmlibuv/src/win/pipe.c
+++ b/Utilities/cmlibuv/src/win/pipe.c
@@ -202,17 +202,17 @@ static void close_pipe(uv_pipe_t* pipe) {
}
-int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
- char* name, size_t nameSize) {
+static int uv__pipe_server(
+ HANDLE* pipeHandle_ptr, DWORD access,
+ char* name, size_t nameSize, char* random) {
HANDLE pipeHandle;
int err;
- char* ptr = (char*)handle;
for (;;) {
- uv_unique_pipe_name(ptr, name, nameSize);
+ uv_unique_pipe_name(random, name, nameSize);
pipeHandle = CreateNamedPipeA(name,
- access | FILE_FLAG_OVERLAPPED | FILE_FLAG_FIRST_PIPE_INSTANCE | WRITE_DAC,
+ access | FILE_FLAG_FIRST_PIPE_INSTANCE,
PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, 1, 65536, 65536, 0,
NULL);
@@ -226,26 +226,225 @@ int uv_stdio_pipe_server(uv_loop_t* loop, uv_pipe_t* handle, DWORD access,
goto error;
}
- /* Pipe name collision. Increment the pointer and try again. */
- ptr++;
+ /* Pipe name collision. Increment the random number and try again. */
+ random++;
}
- if (CreateIoCompletionPort(pipeHandle,
+ *pipeHandle_ptr = pipeHandle;
+
+ return 0;
+
+ error:
+ if (pipeHandle != INVALID_HANDLE_VALUE)
+ CloseHandle(pipeHandle);
+
+ return err;
+}
+
+
+static int uv__create_pipe_pair(
+ HANDLE* server_pipe_ptr, HANDLE* client_pipe_ptr,
+ unsigned int server_flags, unsigned int client_flags,
+ int inherit_client, char* random) {
+ /* allowed flags are: UV_READABLE_PIPE | UV_WRITABLE_PIPE | UV_NONBLOCK_PIPE */
+ char pipe_name[64];
+ SECURITY_ATTRIBUTES sa;
+ DWORD server_access;
+ DWORD client_access;
+ HANDLE server_pipe;
+ HANDLE client_pipe;
+ int err;
+
+ server_pipe = INVALID_HANDLE_VALUE;
+ client_pipe = INVALID_HANDLE_VALUE;
+
+ server_access = 0;
+ if (server_flags & UV_READABLE_PIPE)
+ server_access |= PIPE_ACCESS_INBOUND;
+ if (server_flags & UV_WRITABLE_PIPE)
+ server_access |= PIPE_ACCESS_OUTBOUND;
+ if (server_flags & UV_NONBLOCK_PIPE)
+ server_access |= FILE_FLAG_OVERLAPPED;
+ server_access |= WRITE_DAC;
+
+ client_access = 0;
+ if (client_flags & UV_READABLE_PIPE)
+ client_access |= GENERIC_READ;
+ else
+ client_access |= FILE_READ_ATTRIBUTES;
+ if (client_flags & UV_WRITABLE_PIPE)
+ client_access |= GENERIC_WRITE;
+ else
+ client_access |= FILE_WRITE_ATTRIBUTES;
+ client_access |= WRITE_DAC;
+
+ /* Create server pipe handle. */
+ err = uv__pipe_server(&server_pipe,
+ server_access,
+ pipe_name,
+ sizeof(pipe_name),
+ random);
+ if (err)
+ goto error;
+
+ /* Create client pipe handle. */
+ sa.nLength = sizeof sa;
+ sa.lpSecurityDescriptor = NULL;
+ sa.bInheritHandle = inherit_client;
+
+ client_pipe = CreateFileA(pipe_name,
+ client_access,
+ 0,
+ &sa,
+ OPEN_EXISTING,
+ (client_flags & UV_NONBLOCK_PIPE) ? FILE_FLAG_OVERLAPPED : 0,
+ NULL);
+ if (client_pipe == INVALID_HANDLE_VALUE) {
+ err = GetLastError();
+ goto error;
+ }
+
+#ifndef NDEBUG
+ /* Validate that the pipe was opened in the right mode. */
+ {
+ DWORD mode;
+ BOOL r;
+ r = GetNamedPipeHandleState(client_pipe, &mode, NULL, NULL, NULL, NULL, 0);
+ if (r == TRUE) {
+ assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
+ } else {
+ fprintf(stderr, "libuv assertion failure: GetNamedPipeHandleState failed\n");
+ }
+ }
+#endif
+
+ /* Do a blocking ConnectNamedPipe. This should not block because we have
+ * both ends of the pipe created. */
+ if (!ConnectNamedPipe(server_pipe, NULL)) {
+ if (GetLastError() != ERROR_PIPE_CONNECTED) {
+ err = GetLastError();
+ goto error;
+ }
+ }
+
+ *client_pipe_ptr = client_pipe;
+ *server_pipe_ptr = server_pipe;
+ return 0;
+
+ error:
+ if (server_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(server_pipe);
+
+ if (client_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(client_pipe);
+
+ return err;
+}
+
+
+int uv_pipe(uv_file fds[2], int read_flags, int write_flags) {
+ uv_file temp[2];
+ int err;
+ HANDLE readh;
+ HANDLE writeh;
+
+ /* Make the server side the inbound (read) end, */
+ /* so that both ends will have FILE_READ_ATTRIBUTES permission. */
+ /* TODO: better source of local randomness than &fds? */
+ read_flags |= UV_READABLE_PIPE;
+ write_flags |= UV_WRITABLE_PIPE;
+ err = uv__create_pipe_pair(&readh, &writeh, read_flags, write_flags, 0, (char*) &fds[0]);
+ if (err != 0)
+ return err;
+ temp[0] = _open_osfhandle((intptr_t) readh, 0);
+ if (temp[0] == -1) {
+ if (errno == UV_EMFILE)
+ err = UV_EMFILE;
+ else
+ err = UV_UNKNOWN;
+ CloseHandle(readh);
+ CloseHandle(writeh);
+ return err;
+ }
+ temp[1] = _open_osfhandle((intptr_t) writeh, 0);
+ if (temp[1] == -1) {
+ if (errno == UV_EMFILE)
+ err = UV_EMFILE;
+ else
+ err = UV_UNKNOWN;
+ _close(temp[0]);
+ CloseHandle(writeh);
+ return err;
+ }
+ fds[0] = temp[0];
+ fds[1] = temp[1];
+ return 0;
+}
+
+
+int uv__create_stdio_pipe_pair(uv_loop_t* loop,
+ uv_pipe_t* parent_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
+ /* The parent_pipe is always the server_pipe and kept by libuv.
+ * The child_pipe is always the client_pipe and is passed to the child.
+ * The flags are specified with respect to their usage in the child. */
+ HANDLE server_pipe;
+ HANDLE client_pipe;
+ unsigned int server_flags;
+ unsigned int client_flags;
+ int err;
+
+ server_pipe = INVALID_HANDLE_VALUE;
+ client_pipe = INVALID_HANDLE_VALUE;
+
+ server_flags = 0;
+ client_flags = 0;
+ if (flags & UV_READABLE_PIPE) {
+ /* The server needs inbound (read) access too, otherwise CreateNamedPipe()
+ * won't give us the FILE_READ_ATTRIBUTES permission. We need that to probe
+ * the state of the write buffer when we're trying to shutdown the pipe. */
+ server_flags |= UV_READABLE_PIPE | UV_WRITABLE_PIPE;
+ client_flags |= UV_READABLE_PIPE;
+ }
+ if (flags & UV_WRITABLE_PIPE) {
+ server_flags |= UV_READABLE_PIPE;
+ client_flags |= UV_WRITABLE_PIPE;
+ }
+ server_flags |= UV_NONBLOCK_PIPE;
+ if (flags & UV_NONBLOCK_PIPE || parent_pipe->ipc) {
+ client_flags |= UV_NONBLOCK_PIPE;
+ }
+
+ err = uv__create_pipe_pair(&server_pipe, &client_pipe,
+ server_flags, client_flags, 1, (char*) server_pipe);
+ if (err)
+ goto error;
+
+ if (CreateIoCompletionPort(server_pipe,
loop->iocp,
- (ULONG_PTR)handle,
+ (ULONG_PTR) parent_pipe,
0) == NULL) {
err = GetLastError();
goto error;
}
- uv_pipe_connection_init(handle);
- handle->handle = pipeHandle;
+ uv_pipe_connection_init(parent_pipe);
+ parent_pipe->handle = server_pipe;
+ *child_pipe_ptr = client_pipe;
+
+ /* The server end is now readable and/or writable. */
+ if (flags & UV_READABLE_PIPE)
+ parent_pipe->flags |= UV_HANDLE_WRITABLE;
+ if (flags & UV_WRITABLE_PIPE)
+ parent_pipe->flags |= UV_HANDLE_READABLE;
return 0;
error:
- if (pipeHandle != INVALID_HANDLE_VALUE)
- CloseHandle(pipeHandle);
+ if (server_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(server_pipe);
+
+ if (client_pipe != INVALID_HANDLE_VALUE)
+ CloseHandle(client_pipe);
return err;
}
@@ -712,9 +911,8 @@ error:
handle->name = NULL;
}
- if (pipeHandle != INVALID_HANDLE_VALUE) {
+ if (pipeHandle != INVALID_HANDLE_VALUE)
CloseHandle(pipeHandle);
- }
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, err);
@@ -1054,7 +1252,6 @@ static DWORD WINAPI uv_pipe_writefile_thread_proc(void* parameter) {
assert(req != NULL);
assert(req->type == UV_WRITE);
assert(handle->type == UV_NAMED_PIPE);
- assert(req->write_buffer.base);
result = WriteFile(handle->handle,
req->write_buffer.base,
diff --git a/Utilities/cmlibuv/src/win/poll.c b/Utilities/cmlibuv/src/win/poll.c
index 87858590c8..9d37759606 100644
--- a/Utilities/cmlibuv/src/win/poll.c
+++ b/Utilities/cmlibuv/src/win/poll.c
@@ -488,7 +488,8 @@ static int uv__poll_set(uv_poll_t* handle, int events, uv_poll_cb cb) {
assert(handle->type == UV_POLL);
assert(!(handle->flags & UV_HANDLE_CLOSING));
- assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT)) == 0);
+ assert((events & ~(UV_READABLE | UV_WRITABLE | UV_DISCONNECT |
+ UV_PRIORITIZED)) == 0);
handle->events = events;
handle->poll_cb = cb;
diff --git a/Utilities/cmlibuv/src/win/process-stdio.c b/Utilities/cmlibuv/src/win/process-stdio.c
index 355d618808..0db3572373 100644
--- a/Utilities/cmlibuv/src/win/process-stdio.c
+++ b/Utilities/cmlibuv/src/win/process-stdio.c
@@ -95,102 +95,6 @@ void uv_disable_stdio_inheritance(void) {
}
-static int uv__create_stdio_pipe_pair(uv_loop_t* loop,
- uv_pipe_t* server_pipe, HANDLE* child_pipe_ptr, unsigned int flags) {
- char pipe_name[64];
- SECURITY_ATTRIBUTES sa;
- DWORD server_access = 0;
- DWORD client_access = 0;
- HANDLE child_pipe = INVALID_HANDLE_VALUE;
- int err;
- int overlap;
-
- if (flags & UV_READABLE_PIPE) {
- /* The server needs inbound access too, otherwise CreateNamedPipe() won't
- * give us the FILE_READ_ATTRIBUTES permission. We need that to probe the
- * state of the write buffer when we're trying to shutdown the pipe. */
- server_access |= PIPE_ACCESS_OUTBOUND | PIPE_ACCESS_INBOUND;
- client_access |= GENERIC_READ | FILE_WRITE_ATTRIBUTES;
- }
- if (flags & UV_WRITABLE_PIPE) {
- server_access |= PIPE_ACCESS_INBOUND;
- client_access |= GENERIC_WRITE | FILE_READ_ATTRIBUTES;
- }
-
- /* Create server pipe handle. */
- err = uv_stdio_pipe_server(loop,
- server_pipe,
- server_access,
- pipe_name,
- sizeof(pipe_name));
- if (err)
- goto error;
-
- /* Create child pipe handle. */
- sa.nLength = sizeof sa;
- sa.lpSecurityDescriptor = NULL;
- sa.bInheritHandle = TRUE;
-
- overlap = server_pipe->ipc || (flags & UV_OVERLAPPED_PIPE);
- child_pipe = CreateFileA(pipe_name,
- client_access,
- 0,
- &sa,
- OPEN_EXISTING,
- overlap ? FILE_FLAG_OVERLAPPED : 0,
- NULL);
- if (child_pipe == INVALID_HANDLE_VALUE) {
- err = GetLastError();
- goto error;
- }
-
-#ifndef NDEBUG
- /* Validate that the pipe was opened in the right mode. */
- {
- DWORD mode;
- BOOL r = GetNamedPipeHandleState(child_pipe,
- &mode,
- NULL,
- NULL,
- NULL,
- NULL,
- 0);
- assert(r == TRUE);
- assert(mode == (PIPE_READMODE_BYTE | PIPE_WAIT));
- }
-#endif
-
- /* Do a blocking ConnectNamedPipe. This should not block because we have both
- * ends of the pipe created. */
- if (!ConnectNamedPipe(server_pipe->handle, NULL)) {
- if (GetLastError() != ERROR_PIPE_CONNECTED) {
- err = GetLastError();
- goto error;
- }
- }
-
- /* The server end is now readable and/or writable. */
- if (flags & UV_READABLE_PIPE)
- server_pipe->flags |= UV_HANDLE_WRITABLE;
- if (flags & UV_WRITABLE_PIPE)
- server_pipe->flags |= UV_HANDLE_READABLE;
-
- *child_pipe_ptr = child_pipe;
- return 0;
-
- error:
- if (server_pipe->handle != INVALID_HANDLE_VALUE) {
- uv_pipe_cleanup(loop, server_pipe);
- }
-
- if (child_pipe != INVALID_HANDLE_VALUE) {
- CloseHandle(child_pipe);
- }
-
- return err;
-}
-
-
static int uv__duplicate_handle(uv_loop_t* loop, HANDLE handle, HANDLE* dup) {
HANDLE current_process;
diff --git a/Utilities/cmlibuv/src/win/process.c b/Utilities/cmlibuv/src/win/process.c
index aada889e16..9ca4122caa 100644
--- a/Utilities/cmlibuv/src/win/process.c
+++ b/Utilities/cmlibuv/src/win/process.c
@@ -169,10 +169,9 @@ static WCHAR* search_path_join_test(const WCHAR* dir,
size_t cwd_len) {
WCHAR *result, *result_pos;
DWORD attrs;
- if (
- (dir_len > 2 && dir[0] == L'\\' && dir[1] == L'\\') ||
- (dir_len > 2 && dir[0] == L'/' && dir[1] == L'/')
- ) {
+ if (dir_len > 2 &&
+ ((dir[0] == L'\\' || dir[0] == L'/') &&
+ (dir[1] == L'\\' || dir[1] == L'/'))) {
/* It's a UNC path so ignore cwd */
cwd_len = 0;
} else if (dir_len >= 1 && (dir[0] == L'/' || dir[0] == L'\\')) {
@@ -645,7 +644,7 @@ int env_strncmp(const wchar_t* a, int na, const wchar_t* b) {
assert(r==nb);
B[nb] = L'\0';
- while (1) {
+ for (;;) {
wchar_t AA = *A++;
wchar_t BB = *B++;
if (AA < BB) {
diff --git a/Utilities/cmlibuv/src/win/stream.c b/Utilities/cmlibuv/src/win/stream.c
index 46a0709a38..abf477f644 100644
--- a/Utilities/cmlibuv/src/win/stream.c
+++ b/Utilities/cmlibuv/src/win/stream.c
@@ -65,18 +65,11 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
}
-int uv_read_start(uv_stream_t* handle, uv_alloc_cb alloc_cb,
- uv_read_cb read_cb) {
+int uv__read_start(uv_stream_t* handle,
+ uv_alloc_cb alloc_cb,
+ uv_read_cb read_cb) {
int err;
- if (handle->flags & UV_HANDLE_READING) {
- return UV_EALREADY;
- }
-
- if (!(handle->flags & UV_HANDLE_READABLE)) {
- return UV_ENOTCONN;
- }
-
err = ERROR_INVALID_PARAMETER;
switch (handle->type) {
case UV_TCP:
@@ -195,6 +188,16 @@ int uv_try_write(uv_stream_t* stream,
}
+int uv_try_write2(uv_stream_t* stream,
+ const uv_buf_t bufs[],
+ unsigned int nbufs,
+ uv_stream_t* send_handle) {
+ if (send_handle != NULL)
+ return UV_EAGAIN;
+ return uv_try_write(stream, bufs, nbufs);
+}
+
+
int uv_shutdown(uv_shutdown_t* req, uv_stream_t* handle, uv_shutdown_cb cb) {
uv_loop_t* loop = handle->loop;
diff --git a/Utilities/cmlibuv/src/win/tcp.c b/Utilities/cmlibuv/src/win/tcp.c
index 0dcaa97df7..cf2dbd85fd 100644
--- a/Utilities/cmlibuv/src/win/tcp.c
+++ b/Utilities/cmlibuv/src/win/tcp.c
@@ -236,12 +236,7 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
if (handle->flags & UV_HANDLE_CLOSING &&
handle->reqs_pending == 0) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
-
- if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
- closesocket(handle->socket);
- handle->socket = INVALID_SOCKET;
- handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
- }
+ assert(handle->socket == INVALID_SOCKET);
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
@@ -599,6 +594,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
}
+ /* If this flag is set, we already made this listen call in xfer. */
if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
listen(handle->socket, backlog) == SOCKET_ERROR) {
return WSAGetLastError();
@@ -769,7 +765,7 @@ static int uv__is_loopback(const struct sockaddr_storage* storage) {
}
// Check if Windows version is 10.0.16299 or later
-static int uv__is_fast_loopback_fail_supported() {
+static int uv__is_fast_loopback_fail_supported(void) {
OSVERSIONINFOW os_info;
if (!pRtlGetVersion)
return 0;
@@ -800,9 +796,8 @@ static int uv_tcp_try_connect(uv_connect_t* req,
if (err)
return err;
- if (handle->delayed_error) {
- return handle->delayed_error;
- }
+ if (handle->delayed_error != 0)
+ goto out;
if (!(handle->flags & UV_HANDLE_BOUND)) {
if (addrlen == sizeof(uv_addr_ip4_any_)) {
@@ -815,8 +810,8 @@ static int uv_tcp_try_connect(uv_connect_t* req,
err = uv_tcp_try_bind(handle, bind_addr, addrlen, 0);
if (err)
return err;
- if (handle->delayed_error)
- return handle->delayed_error;
+ if (handle->delayed_error != 0)
+ goto out;
}
if (!handle->tcp.conn.func_connectex) {
@@ -844,11 +839,21 @@ static int uv_tcp_try_connect(uv_connect_t* req,
NULL);
}
+out:
+
UV_REQ_INIT(req, UV_CONNECT);
req->handle = (uv_stream_t*) handle;
req->cb = cb;
memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
+ if (handle->delayed_error != 0) {
+ /* Process the req without IOCP. */
+ handle->reqs_pending++;
+ REGISTER_HANDLE_REQ(loop, handle, req);
+ uv_insert_pending_req(loop, (uv_req_t*)req);
+ return 0;
+ }
+
success = handle->tcp.conn.func_connectex(handle->socket,
(const struct sockaddr*) &converted,
addrlen,
@@ -1015,6 +1020,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
*/
err = WSAECONNRESET;
}
+ handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
handle->read_cb((uv_stream_t*)handle,
uv_translate_sys_error(err),
@@ -1096,6 +1102,7 @@ void uv_process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
* Unix. */
err = WSAECONNRESET;
}
+ handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
handle->read_cb((uv_stream_t*)handle,
uv_translate_sys_error(err),
@@ -1149,9 +1156,14 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
}
handle->stream.conn.write_reqs_pending--;
- if (handle->stream.conn.shutdown_req != NULL &&
- handle->stream.conn.write_reqs_pending == 0) {
- uv_want_endgame(loop, (uv_handle_t*)handle);
+ if (handle->stream.conn.write_reqs_pending == 0) {
+ if (handle->flags & UV_HANDLE_CLOSING) {
+ closesocket(handle->socket);
+ handle->socket = INVALID_SOCKET;
+ }
+ if (handle->stream.conn.shutdown_req != NULL) {
+ uv_want_endgame(loop, (uv_handle_t*)handle);
+ }
}
DECREASE_PENDING_REQ_COUNT(handle);
@@ -1215,7 +1227,14 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
UNREGISTER_HANDLE_REQ(loop, handle, req);
err = 0;
- if (REQ_SUCCESS(req)) {
+ if (handle->delayed_error) {
+ /* To smooth over the differences between unixes errors that
+ * were reported synchronously on the first connect can be delayed
+ * until the next tick--which is now.
+ */
+ err = handle->delayed_error;
+ handle->delayed_error = 0;
+ } else if (REQ_SUCCESS(req)) {
if (handle->flags & UV_HANDLE_CLOSING) {
/* use UV_ECANCELED for consistency with Unix */
err = ERROR_OPERATION_ABORTED;
@@ -1320,7 +1339,7 @@ int uv_tcp_nodelay(uv_tcp_t* handle, int enable) {
if (handle->socket != INVALID_SOCKET) {
err = uv__tcp_nodelay(handle, handle->socket, enable);
if (err)
- return err;
+ return uv_translate_sys_error(err);
}
if (enable) {
@@ -1339,7 +1358,7 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
if (handle->socket != INVALID_SOCKET) {
err = uv__tcp_keepalive(handle, handle->socket, enable, delay);
if (err)
- return err;
+ return uv_translate_sys_error(err);
}
if (enable) {
@@ -1386,9 +1405,24 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
}
-static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
- SOCKET socket = tcp->socket;
+static void uv_tcp_try_cancel_reqs(uv_tcp_t* tcp) {
+ SOCKET socket;
int non_ifs_lsp;
+ int reading;
+ int writing;
+
+ socket = tcp->socket;
+ reading = tcp->flags & UV_HANDLE_READING;
+ writing = tcp->stream.conn.write_reqs_pending > 0;
+ if (!reading && !writing)
+ return;
+
+ /* TODO: in libuv v2, keep explicit track of write_reqs, so we can cancel
+ * them each explicitly with CancelIoEx (like unix). */
+ if (reading)
+ CancelIoEx((HANDLE) socket, &tcp->read_req.u.io.overlapped);
+ if (writing)
+ CancelIo((HANDLE) socket);
/* Check if we have any non-IFS LSPs stacked on top of TCP */
non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
@@ -1408,71 +1442,41 @@ static int uv_tcp_try_cancel_io(uv_tcp_t* tcp) {
NULL,
NULL) != 0) {
/* Failed. We can't do CancelIo. */
- return -1;
+ return;
}
}
assert(socket != 0 && socket != INVALID_SOCKET);
- if (!CancelIo((HANDLE) socket)) {
- return GetLastError();
+ if (socket != tcp->socket) {
+ if (reading)
+ CancelIoEx((HANDLE) socket, &tcp->read_req.u.io.overlapped);
+ if (writing)
+ CancelIo((HANDLE) socket);
}
-
- /* It worked. */
- return 0;
}
void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
- int close_socket = 1;
-
- if (tcp->flags & UV_HANDLE_READ_PENDING) {
- /* In order for winsock to do a graceful close there must not be any any
- * pending reads, or the socket must be shut down for writing */
- if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
- /* Just do shutdown on non-shared sockets, which ensures graceful close. */
- shutdown(tcp->socket, SD_SEND);
-
- } else if (uv_tcp_try_cancel_io(tcp) == 0) {
- /* In case of a shared socket, we try to cancel all outstanding I/O,. If
- * that works, don't close the socket yet - wait for the read req to
- * return and close the socket in uv_tcp_endgame. */
- close_socket = 0;
-
- } else {
- /* When cancelling isn't possible - which could happen when an LSP is
- * present on an old Windows version, we will have to close the socket
- * with a read pending. That is not nice because trailing sent bytes may
- * not make it to the other side. */
+ if (tcp->flags & UV_HANDLE_CONNECTION) {
+ uv_tcp_try_cancel_reqs(tcp);
+ if (tcp->flags & UV_HANDLE_READING) {
+ uv_read_stop((uv_stream_t*) tcp);
}
-
- } else if ((tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
- tcp->tcp.serv.accept_reqs != NULL) {
- /* Under normal circumstances closesocket() will ensure that all pending
- * accept reqs are canceled. However, when the socket is shared the
- * presence of another reference to the socket in another process will keep
- * the accept reqs going, so we have to ensure that these are canceled. */
- if (uv_tcp_try_cancel_io(tcp) != 0) {
- /* When cancellation is not possible, there is another option: we can
- * close the incoming sockets, which will also cancel the accept
- * operations. However this is not cool because we might inadvertently
- * close a socket that just accepted a new connection, which will cause
- * the connection to be aborted. */
+ } else {
+ if (tcp->tcp.serv.accept_reqs != NULL) {
+ /* First close the incoming sockets to cancel the accept operations before
+ * we free their resources. */
unsigned int i;
for (i = 0; i < uv_simultaneous_server_accepts; i++) {
uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
- if (req->accept_socket != INVALID_SOCKET &&
- !HasOverlappedIoCompleted(&req->u.io.overlapped)) {
+ if (req->accept_socket != INVALID_SOCKET) {
closesocket(req->accept_socket);
req->accept_socket = INVALID_SOCKET;
}
}
}
- }
-
- if (tcp->flags & UV_HANDLE_READING) {
- tcp->flags &= ~UV_HANDLE_READING;
- DECREASE_ACTIVE_COUNT(loop, tcp);
+ assert(!(tcp->flags & UV_HANDLE_READING));
}
if (tcp->flags & UV_HANDLE_LISTENING) {
@@ -1480,10 +1484,15 @@ void uv_tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
DECREASE_ACTIVE_COUNT(loop, tcp);
}
- if (close_socket) {
+ /* If any overlapped req failed to cancel, calling `closesocket` now would
+ * cause Win32 to send an RST packet. Try to avoid that for writes, if
+ * possibly applicable, by waiting to process the completion notifications
+ * first (which typically should be cancellations). There's not much we can
+ * do about canceled reads, which also will generate an RST packet. */
+ if (!(tcp->flags & UV_HANDLE_CONNECTION) ||
+ tcp->stream.conn.write_reqs_pending == 0) {
closesocket(tcp->socket);
tcp->socket = INVALID_SOCKET;
- tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
}
tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
@@ -1571,3 +1580,118 @@ int uv__tcp_connect(uv_connect_t* req,
return 0;
}
+
+#ifndef WSA_FLAG_NO_HANDLE_INHERIT
+/* Added in Windows 7 SP1. Specify this to avoid race conditions, */
+/* but also manually clear the inherit flag in case this failed. */
+#define WSA_FLAG_NO_HANDLE_INHERIT 0x80
+#endif
+
+int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
+ SOCKET server = INVALID_SOCKET;
+ SOCKET client0 = INVALID_SOCKET;
+ SOCKET client1 = INVALID_SOCKET;
+ SOCKADDR_IN name;
+ LPFN_ACCEPTEX func_acceptex;
+ WSAOVERLAPPED overlap;
+ char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
+ int namelen;
+ int err;
+ DWORD bytes;
+ DWORD flags;
+ DWORD client0_flags = WSA_FLAG_NO_HANDLE_INHERIT;
+ DWORD client1_flags = WSA_FLAG_NO_HANDLE_INHERIT;
+
+ if (flags0 & UV_NONBLOCK_PIPE)
+ client0_flags |= WSA_FLAG_OVERLAPPED;
+ if (flags1 & UV_NONBLOCK_PIPE)
+ client1_flags |= WSA_FLAG_OVERLAPPED;
+
+ server = WSASocketW(AF_INET, type, protocol, NULL, 0,
+ WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
+ if (server == INVALID_SOCKET)
+ goto wsaerror;
+ if (!SetHandleInformation((HANDLE) server, HANDLE_FLAG_INHERIT, 0))
+ goto error;
+ name.sin_family = AF_INET;
+ name.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ name.sin_port = 0;
+ if (bind(server, (SOCKADDR*) &name, sizeof(name)) != 0)
+ goto wsaerror;
+ if (listen(server, 1) != 0)
+ goto wsaerror;
+ namelen = sizeof(name);
+ if (getsockname(server, (SOCKADDR*) &name, &namelen) != 0)
+ goto wsaerror;
+ client0 = WSASocketW(AF_INET, type, protocol, NULL, 0, client0_flags);
+ if (client0 == INVALID_SOCKET)
+ goto wsaerror;
+ if (!SetHandleInformation((HANDLE) client0, HANDLE_FLAG_INHERIT, 0))
+ goto error;
+ if (connect(client0, (SOCKADDR*) &name, sizeof(name)) != 0)
+ goto wsaerror;
+ client1 = WSASocketW(AF_INET, type, protocol, NULL, 0, client1_flags);
+ if (client1 == INVALID_SOCKET)
+ goto wsaerror;
+ if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
+ goto error;
+ if (!uv_get_acceptex_function(server, &func_acceptex)) {
+ err = WSAEAFNOSUPPORT;
+ goto cleanup;
+ }
+ memset(&overlap, 0, sizeof(overlap));
+ if (!func_acceptex(server,
+ client1,
+ accept_buffer,
+ 0,
+ sizeof(struct sockaddr_storage),
+ sizeof(struct sockaddr_storage),
+ &bytes,
+ &overlap)) {
+ err = WSAGetLastError();
+ if (err == ERROR_IO_PENDING) {
+ /* Result should complete immediately, since we already called connect,
+ * but emperically, we sometimes have to poll the kernel a couple times
+ * until it notices that. */
+ while (!WSAGetOverlappedResult(client1, &overlap, &bytes, FALSE, &flags)) {
+ err = WSAGetLastError();
+ if (err != WSA_IO_INCOMPLETE)
+ goto cleanup;
+ SwitchToThread();
+ }
+ }
+ else {
+ goto cleanup;
+ }
+ }
+ if (setsockopt(client1, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
+ (char*) &server, sizeof(server)) != 0) {
+ goto wsaerror;
+ }
+
+ closesocket(server);
+
+ fds[0] = client0;
+ fds[1] = client1;
+
+ return 0;
+
+ wsaerror:
+ err = WSAGetLastError();
+ goto cleanup;
+
+ error:
+ err = GetLastError();
+ goto cleanup;
+
+ cleanup:
+ if (server != INVALID_SOCKET)
+ closesocket(server);
+ if (client0 != INVALID_SOCKET)
+ closesocket(client0);
+ if (client1 != INVALID_SOCKET)
+ closesocket(client1);
+
+ assert(err);
+ return uv_translate_sys_error(err);
+}
diff --git a/Utilities/cmlibuv/src/win/udp.c b/Utilities/cmlibuv/src/win/udp.c
index 68ca728aa5..3043f2da88 100644
--- a/Utilities/cmlibuv/src/win/udp.c
+++ b/Utilities/cmlibuv/src/win/udp.c
@@ -284,7 +284,7 @@ static void uv_udp_queue_recv(uv_loop_t* loop, uv_udp_t* handle) {
handle->flags &= ~UV_HANDLE_ZERO_READ;
handle->recv_buffer = uv_buf_init(NULL, 0);
- handle->alloc_cb((uv_handle_t*) handle, 65536, &handle->recv_buffer);
+ handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &handle->recv_buffer);
if (handle->recv_buffer.base == NULL || handle->recv_buffer.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &handle->recv_buffer, NULL, 0);
return;
@@ -501,7 +501,7 @@ void uv_process_udp_recv_req(uv_loop_t* loop, uv_udp_t* handle,
/* Do a nonblocking receive.
* TODO: try to read multiple datagrams at once. FIONREAD maybe? */
buf = uv_buf_init(NULL, 0);
- handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
+ handle->alloc_cb((uv_handle_t*) handle, UV__UDP_DGRAM_MAXSIZE, &buf);
if (buf.base == NULL || buf.len == 0) {
handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0);
goto done;
diff --git a/Utilities/cmlibuv/src/win/util.c b/Utilities/cmlibuv/src/win/util.c
index aad8f1a15e..33e874ac44 100644
--- a/Utilities/cmlibuv/src/win/util.c
+++ b/Utilities/cmlibuv/src/win/util.c
@@ -1664,26 +1664,36 @@ int uv_os_unsetenv(const char* name) {
int uv_os_gethostname(char* buffer, size_t* size) {
- char buf[UV_MAXHOSTNAMESIZE];
+ WCHAR buf[UV_MAXHOSTNAMESIZE];
size_t len;
+ char* utf8_str;
+ int convert_result;
if (buffer == NULL || size == NULL || *size == 0)
return UV_EINVAL;
uv__once_init(); /* Initialize winsock */
- if (gethostname(buf, sizeof(buf)) != 0)
+ if (pGetHostNameW == NULL)
+ return UV_ENOSYS;
+
+ if (pGetHostNameW(buf, UV_MAXHOSTNAMESIZE) != 0)
return uv_translate_sys_error(WSAGetLastError());
- buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
- len = strlen(buf);
+ convert_result = uv__convert_utf16_to_utf8(buf, -1, &utf8_str);
+
+ if (convert_result != 0)
+ return convert_result;
+ len = strlen(utf8_str);
if (len >= *size) {
*size = len + 1;
+ uv__free(utf8_str);
return UV_ENOBUFS;
}
- memcpy(buffer, buf, len + 1);
+ memcpy(buffer, utf8_str, len + 1);
+ uv__free(utf8_str);
*size = len;
return 0;
}
diff --git a/Utilities/cmlibuv/src/win/winapi.c b/Utilities/cmlibuv/src/win/winapi.c
index bb86ec8cea..bf306cd83b 100644
--- a/Utilities/cmlibuv/src/win/winapi.c
+++ b/Utilities/cmlibuv/src/win/winapi.c
@@ -45,12 +45,15 @@ sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotification;
/* User32.dll function pointer */
sSetWinEventHook pSetWinEventHook;
+/* ws2_32.dll function pointer */
+uv_sGetHostNameW pGetHostNameW;
void uv_winapi_init(void) {
HMODULE ntdll_module;
HMODULE powrprof_module;
HMODULE user32_module;
HMODULE kernel32_module;
+ HMODULE ws2_32_module;
ntdll_module = GetModuleHandleA("ntdll.dll");
if (ntdll_module == NULL) {
@@ -134,4 +137,11 @@ void uv_winapi_init(void) {
pSetWinEventHook = (sSetWinEventHook)
GetProcAddress(user32_module, "SetWinEventHook");
}
+
+ ws2_32_module = LoadLibraryA("ws2_32.dll");
+ if (ws2_32_module != NULL) {
+ pGetHostNameW = (uv_sGetHostNameW) GetProcAddress(
+ ws2_32_module,
+ "GetHostNameW");
+ }
}
diff --git a/Utilities/cmlibuv/src/win/winapi.h b/Utilities/cmlibuv/src/win/winapi.h
index 9bc455934a..e815f8f95e 100644
--- a/Utilities/cmlibuv/src/win/winapi.h
+++ b/Utilities/cmlibuv/src/win/winapi.h
@@ -4768,4 +4768,11 @@ extern sPowerRegisterSuspendResumeNotification pPowerRegisterSuspendResumeNotifi
/* User32.dll function pointer */
extern sSetWinEventHook pSetWinEventHook;
+/* ws2_32.dll function pointer */
+/* mingw doesn't have this definition, so let's declare it here locally */
+typedef int (WINAPI *uv_sGetHostNameW)
+ (PWSTR,
+ int);
+extern uv_sGetHostNameW pGetHostNameW;
+
#endif /* UV_WIN_WINAPI_H_ */