1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21#include "uv.h"
22#include "internal.h"
23
24#include <stddef.h> /* NULL */
25#include <stdio.h> /* printf */
26#include <stdlib.h>
27#include <string.h> /* strerror */
28#include <errno.h>
29#include <assert.h>
30#include <unistd.h>
31#include <sys/types.h>
32#include <sys/stat.h>
33#include <fcntl.h>
34#include <sys/ioctl.h>
35#include <sys/socket.h>
36#include <sys/un.h>
37#include <netinet/in.h>
38#include <arpa/inet.h>
39#include <limits.h> /* INT_MAX, PATH_MAX, IOV_MAX */
40#include <sys/uio.h> /* writev */
41#include <sys/resource.h> /* getrusage */
42#include <pwd.h>
43#include <sys/utsname.h>
44#include <sys/time.h>
45
46#ifdef __sun
47# include <sys/filio.h>
48# include <sys/types.h>
49# include <sys/wait.h>
50#endif
51
52#ifdef __APPLE__
53# include <crt_externs.h>
54# include <mach-o/dyld.h> /* _NSGetExecutablePath */
55# include <sys/filio.h>
56# if defined(O_CLOEXEC)
57# define UV__O_CLOEXEC O_CLOEXEC
58# endif
59# define environ (*_NSGetEnviron())
60#else
61extern char** environ;
62#endif
63
64#if defined(__DragonFly__) || \
65 defined(__FreeBSD__) || \
66 defined(__FreeBSD_kernel__) || \
67 defined(__NetBSD__)
68# include <sys/sysctl.h>
69# include <sys/filio.h>
70# include <sys/wait.h>
71# define UV__O_CLOEXEC O_CLOEXEC
72# if defined(__FreeBSD__) && __FreeBSD__ >= 10
73# define uv__accept4 accept4
74# endif
75# if defined(__NetBSD__)
76# define uv__accept4(a, b, c, d) paccept((a), (b), (c), NULL, (d))
77# endif
78# if (defined(__FreeBSD__) && __FreeBSD__ >= 10) || defined(__NetBSD__)
79# define UV__SOCK_NONBLOCK SOCK_NONBLOCK
80# define UV__SOCK_CLOEXEC SOCK_CLOEXEC
81# endif
82# if !defined(F_DUP2FD_CLOEXEC) && defined(_F_DUP2FD_CLOEXEC)
83# define F_DUP2FD_CLOEXEC _F_DUP2FD_CLOEXEC
84# endif
85#endif
86
87#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
88# include <dlfcn.h> /* for dlsym */
89#endif
90
91#if defined(__MVS__)
92#include <sys/ioctl.h>
93#endif
94
95#if defined(__linux__)
96#include <sys/syscall.h>
97#endif
98
99static int uv__run_pending(uv_loop_t* loop);
100
101/* Verify that uv_buf_t is ABI-compatible with struct iovec. */
102STATIC_ASSERT(sizeof(uv_buf_t) == sizeof(struct iovec));
103STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->base) ==
104 sizeof(((struct iovec*) 0)->iov_base));
105STATIC_ASSERT(sizeof(&((uv_buf_t*) 0)->len) ==
106 sizeof(((struct iovec*) 0)->iov_len));
107STATIC_ASSERT(offsetof(uv_buf_t, base) == offsetof(struct iovec, iov_base));
108STATIC_ASSERT(offsetof(uv_buf_t, len) == offsetof(struct iovec, iov_len));
109
110
111uint64_t uv_hrtime(void) {
112 return uv__hrtime(UV_CLOCK_PRECISE);
113}
114
115
116void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
117 assert(!uv__is_closing(handle));
118
119 handle->flags |= UV_HANDLE_CLOSING;
120 handle->close_cb = close_cb;
121
122 switch (handle->type) {
123 case UV_NAMED_PIPE:
124 uv__pipe_close((uv_pipe_t*)handle);
125 break;
126
127 case UV_TTY:
128 uv__stream_close((uv_stream_t*)handle);
129 break;
130
131 case UV_TCP:
132 uv__tcp_close((uv_tcp_t*)handle);
133 break;
134
135 case UV_UDP:
136 uv__udp_close((uv_udp_t*)handle);
137 break;
138
139 case UV_PREPARE:
140 uv__prepare_close((uv_prepare_t*)handle);
141 break;
142
143 case UV_CHECK:
144 uv__check_close((uv_check_t*)handle);
145 break;
146
147 case UV_IDLE:
148 uv__idle_close((uv_idle_t*)handle);
149 break;
150
151 case UV_ASYNC:
152 uv__async_close((uv_async_t*)handle);
153 break;
154
155 case UV_TIMER:
156 uv__timer_close((uv_timer_t*)handle);
157 break;
158
159 case UV_PROCESS:
160 uv__process_close((uv_process_t*)handle);
161 break;
162
163 case UV_FS_EVENT:
164 uv__fs_event_close((uv_fs_event_t*)handle);
165 break;
166
167 case UV_POLL:
168 uv__poll_close((uv_poll_t*)handle);
169 break;
170
171 case UV_FS_POLL:
172 uv__fs_poll_close((uv_fs_poll_t*)handle);
173 /* Poll handles use file system requests, and one of them may still be
174 * running. The poll code will call uv__make_close_pending() for us. */
175 return;
176
177 case UV_SIGNAL:
178 uv__signal_close((uv_signal_t*) handle);
179 /* Signal handles may not be closed immediately. The signal code will
180 * itself close uv__make_close_pending whenever appropriate. */
181 return;
182
183 default:
184 assert(0);
185 }
186
187 uv__make_close_pending(handle);
188}
189
190int uv__socket_sockopt(uv_handle_t* handle, int optname, int* value) {
191 int r;
192 int fd;
193 socklen_t len;
194
195 if (handle == NULL || value == NULL)
196 return UV_EINVAL;
197
198 if (handle->type == UV_TCP || handle->type == UV_NAMED_PIPE)
199 fd = uv__stream_fd((uv_stream_t*) handle);
200 else if (handle->type == UV_UDP)
201 fd = ((uv_udp_t *) handle)->io_watcher.fd;
202 else
203 return UV_ENOTSUP;
204
205 len = sizeof(*value);
206
207 if (*value == 0)
208 r = getsockopt(fd, SOL_SOCKET, optname, value, &len);
209 else
210 r = setsockopt(fd, SOL_SOCKET, optname, (const void*) value, len);
211
212 if (r < 0)
213 return UV__ERR(errno);
214
215 return 0;
216}
217
218void uv__make_close_pending(uv_handle_t* handle) {
219 assert(handle->flags & UV_HANDLE_CLOSING);
220 assert(!(handle->flags & UV_HANDLE_CLOSED));
221 handle->next_closing = handle->loop->closing_handles;
222 handle->loop->closing_handles = handle;
223}
224
225int uv__getiovmax(void) {
226#if defined(IOV_MAX)
227 return IOV_MAX;
228#elif defined(_SC_IOV_MAX)
229 static int iovmax = -1;
230 if (iovmax == -1) {
231 iovmax = sysconf(_SC_IOV_MAX);
232 /* On some embedded devices (arm-linux-uclibc based ip camera),
233 * sysconf(_SC_IOV_MAX) can not get the correct value. The return
234 * value is -1 and the errno is EINPROGRESS. Degrade the value to 1.
235 */
236 if (iovmax == -1) iovmax = 1;
237 }
238 return iovmax;
239#else
240 return 1024;
241#endif
242}
243
244
245static void uv__finish_close(uv_handle_t* handle) {
246 /* Note: while the handle is in the UV_HANDLE_CLOSING state now, it's still
247 * possible for it to be active in the sense that uv__is_active() returns
248 * true.
249 *
250 * A good example is when the user calls uv_shutdown(), immediately followed
251 * by uv_close(). The handle is considered active at this point because the
252 * completion of the shutdown req is still pending.
253 */
254 assert(handle->flags & UV_HANDLE_CLOSING);
255 assert(!(handle->flags & UV_HANDLE_CLOSED));
256 handle->flags |= UV_HANDLE_CLOSED;
257
258 switch (handle->type) {
259 case UV_PREPARE:
260 case UV_CHECK:
261 case UV_IDLE:
262 case UV_ASYNC:
263 case UV_TIMER:
264 case UV_PROCESS:
265 case UV_FS_EVENT:
266 case UV_FS_POLL:
267 case UV_POLL:
268 case UV_SIGNAL:
269 break;
270
271 case UV_NAMED_PIPE:
272 case UV_TCP:
273 case UV_TTY:
274 uv__stream_destroy((uv_stream_t*)handle);
275 break;
276
277 case UV_UDP:
278 uv__udp_finish_close((uv_udp_t*)handle);
279 break;
280
281 default:
282 assert(0);
283 break;
284 }
285
286 uv__handle_unref(handle);
287 QUEUE_REMOVE(&handle->handle_queue);
288
289 if (handle->close_cb) {
290 handle->close_cb(handle);
291 }
292}
293
294
295static void uv__run_closing_handles(uv_loop_t* loop) {
296 uv_handle_t* p;
297 uv_handle_t* q;
298
299 p = loop->closing_handles;
300 loop->closing_handles = NULL;
301
302 while (p) {
303 q = p->next_closing;
304 uv__finish_close(p);
305 p = q;
306 }
307}
308
309
310int uv_is_closing(const uv_handle_t* handle) {
311 return uv__is_closing(handle);
312}
313
314
315int uv_backend_fd(const uv_loop_t* loop) {
316 return loop->backend_fd;
317}
318
319
320int uv_backend_timeout(const uv_loop_t* loop) {
321 if (loop->stop_flag != 0)
322 return 0;
323
324 if (!uv__has_active_handles(loop) && !uv__has_active_reqs(loop))
325 return 0;
326
327 if (!QUEUE_EMPTY(&loop->idle_handles))
328 return 0;
329
330 if (!QUEUE_EMPTY(&loop->pending_queue))
331 return 0;
332
333 if (loop->closing_handles)
334 return 0;
335
336 return uv__next_timeout(loop);
337}
338
339
340static int uv__loop_alive(const uv_loop_t* loop) {
341 return uv__has_active_handles(loop) ||
342 uv__has_active_reqs(loop) ||
343 loop->closing_handles != NULL;
344}
345
346
347int uv_loop_alive(const uv_loop_t* loop) {
348 return uv__loop_alive(loop);
349}
350
351
352int uv_run(uv_loop_t* loop, uv_run_mode mode) {
353 int timeout;
354 int r;
355 int ran_pending;
356
357 r = uv__loop_alive(loop);
358 if (!r)
359 uv__update_time(loop);
360
361 while (r != 0 && loop->stop_flag == 0) {
362 uv__update_time(loop);
363 uv__run_timers(loop);
364 ran_pending = uv__run_pending(loop);
365 uv__run_idle(loop);
366 uv__run_prepare(loop);
367
368 timeout = 0;
369 if ((mode == UV_RUN_ONCE && !ran_pending) || mode == UV_RUN_DEFAULT)
370 timeout = uv_backend_timeout(loop);
371
372 uv__io_poll(loop, timeout);
373 uv__run_check(loop);
374 uv__run_closing_handles(loop);
375
376 if (mode == UV_RUN_ONCE) {
377 /* UV_RUN_ONCE implies forward progress: at least one callback must have
378 * been invoked when it returns. uv__io_poll() can return without doing
379 * I/O (meaning: no callbacks) when its timeout expires - which means we
380 * have pending timers that satisfy the forward progress constraint.
381 *
382 * UV_RUN_NOWAIT makes no guarantees about progress so it's omitted from
383 * the check.
384 */
385 uv__update_time(loop);
386 uv__run_timers(loop);
387 }
388
389 r = uv__loop_alive(loop);
390 if (mode == UV_RUN_ONCE || mode == UV_RUN_NOWAIT)
391 break;
392 }
393
394 /* The if statement lets gcc compile it to a conditional store. Avoids
395 * dirtying a cache line.
396 */
397 if (loop->stop_flag != 0)
398 loop->stop_flag = 0;
399
400 return r;
401}
402
403
404void uv_update_time(uv_loop_t* loop) {
405 uv__update_time(loop);
406}
407
408
409int uv_is_active(const uv_handle_t* handle) {
410 return uv__is_active(handle);
411}
412
413
414/* Open a socket in non-blocking close-on-exec mode, atomically if possible. */
415int uv__socket(int domain, int type, int protocol) {
416 int sockfd;
417 int err;
418
419#if defined(SOCK_NONBLOCK) && defined(SOCK_CLOEXEC)
420 sockfd = socket(domain, type | SOCK_NONBLOCK | SOCK_CLOEXEC, protocol);
421 if (sockfd != -1)
422 return sockfd;
423
424 if (errno != EINVAL)
425 return UV__ERR(errno);
426#endif
427
428 sockfd = socket(domain, type, protocol);
429 if (sockfd == -1)
430 return UV__ERR(errno);
431
432 err = uv__nonblock(sockfd, 1);
433 if (err == 0)
434 err = uv__cloexec(sockfd, 1);
435
436 if (err) {
437 uv__close(sockfd);
438 return err;
439 }
440
441#if defined(SO_NOSIGPIPE)
442 {
443 int on = 1;
444 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
445 }
446#endif
447
448 return sockfd;
449}
450
451/* get a file pointer to a file in read-only and close-on-exec mode */
452FILE* uv__open_file(const char* path) {
453 int fd;
454 FILE* fp;
455
456 fd = uv__open_cloexec(path, O_RDONLY);
457 if (fd < 0)
458 return NULL;
459
460 fp = fdopen(fd, "r");
461 if (fp == NULL)
462 uv__close(fd);
463
464 return fp;
465}
466
467
468int uv__accept(int sockfd) {
469 int peerfd;
470 int err;
471
472 assert(sockfd >= 0);
473
474 while (1) {
475#if defined(__linux__) || \
476 (defined(__FreeBSD__) && __FreeBSD__ >= 10) || \
477 defined(__NetBSD__)
478 static int no_accept4;
479
480 if (no_accept4)
481 goto skip;
482
483 peerfd = uv__accept4(sockfd,
484 NULL,
485 NULL,
486 UV__SOCK_NONBLOCK|UV__SOCK_CLOEXEC);
487 if (peerfd != -1)
488 return peerfd;
489
490 if (errno == EINTR)
491 continue;
492
493 if (errno != ENOSYS)
494 return UV__ERR(errno);
495
496 no_accept4 = 1;
497skip:
498#endif
499
500 peerfd = accept(sockfd, NULL, NULL);
501 if (peerfd == -1) {
502 if (errno == EINTR)
503 continue;
504 return UV__ERR(errno);
505 }
506
507 err = uv__cloexec(peerfd, 1);
508 if (err == 0)
509 err = uv__nonblock(peerfd, 1);
510
511 if (err) {
512 uv__close(peerfd);
513 return err;
514 }
515
516 return peerfd;
517 }
518}
519
520
521/* close() on macos has the "interesting" quirk that it fails with EINTR
522 * without closing the file descriptor when a thread is in the cancel state.
523 * That's why libuv calls close$NOCANCEL() instead.
524 *
525 * glibc on linux has a similar issue: close() is a cancellation point and
526 * will unwind the thread when it's in the cancel state. Work around that
527 * by making the system call directly. Musl libc is unaffected.
528 */
529int uv__close_nocancel(int fd) {
530#if defined(__APPLE__)
531#pragma GCC diagnostic push
532#pragma GCC diagnostic ignored "-Wdollar-in-identifier-extension"
533#if defined(__LP64__)
534 extern int close$NOCANCEL(int);
535 return close$NOCANCEL(fd);
536#else
537 extern int close$NOCANCEL$UNIX2003(int);
538 return close$NOCANCEL$UNIX2003(fd);
539#endif
540#pragma GCC diagnostic pop
541#elif defined(__linux__)
542 return syscall(SYS_close, fd);
543#else
544 return close(fd);
545#endif
546}
547
548
549int uv__close_nocheckstdio(int fd) {
550 int saved_errno;
551 int rc;
552
553 assert(fd > -1); /* Catch uninitialized io_watcher.fd bugs. */
554
555 saved_errno = errno;
556 rc = uv__close_nocancel(fd);
557 if (rc == -1) {
558 rc = UV__ERR(errno);
559 if (rc == UV_EINTR || rc == UV__ERR(EINPROGRESS))
560 rc = 0; /* The close is in progress, not an error. */
561 errno = saved_errno;
562 }
563
564 return rc;
565}
566
567
568int uv__close(int fd) {
569 assert(fd > STDERR_FILENO); /* Catch stdio close bugs. */
570#if defined(__MVS__)
571 SAVE_ERRNO(epoll_file_close(fd));
572#endif
573 return uv__close_nocheckstdio(fd);
574}
575
576
577int uv__nonblock_ioctl(int fd, int set) {
578 int r;
579
580 do
581 r = ioctl(fd, FIONBIO, &set);
582 while (r == -1 && errno == EINTR);
583
584 if (r)
585 return UV__ERR(errno);
586
587 return 0;
588}
589
590
591#if !defined(__CYGWIN__) && !defined(__MSYS__) && !defined(__HAIKU__)
592int uv__cloexec_ioctl(int fd, int set) {
593 int r;
594
595 do
596 r = ioctl(fd, set ? FIOCLEX : FIONCLEX);
597 while (r == -1 && errno == EINTR);
598
599 if (r)
600 return UV__ERR(errno);
601
602 return 0;
603}
604#endif
605
606
607int uv__nonblock_fcntl(int fd, int set) {
608 int flags;
609 int r;
610
611 do
612 r = fcntl(fd, F_GETFL);
613 while (r == -1 && errno == EINTR);
614
615 if (r == -1)
616 return UV__ERR(errno);
617
618 /* Bail out now if already set/clear. */
619 if (!!(r & O_NONBLOCK) == !!set)
620 return 0;
621
622 if (set)
623 flags = r | O_NONBLOCK;
624 else
625 flags = r & ~O_NONBLOCK;
626
627 do
628 r = fcntl(fd, F_SETFL, flags);
629 while (r == -1 && errno == EINTR);
630
631 if (r)
632 return UV__ERR(errno);
633
634 return 0;
635}
636
637
638int uv__cloexec_fcntl(int fd, int set) {
639 int flags;
640 int r;
641
642 do
643 r = fcntl(fd, F_GETFD);
644 while (r == -1 && errno == EINTR);
645
646 if (r == -1)
647 return UV__ERR(errno);
648
649 /* Bail out now if already set/clear. */
650 if (!!(r & FD_CLOEXEC) == !!set)
651 return 0;
652
653 if (set)
654 flags = r | FD_CLOEXEC;
655 else
656 flags = r & ~FD_CLOEXEC;
657
658 do
659 r = fcntl(fd, F_SETFD, flags);
660 while (r == -1 && errno == EINTR);
661
662 if (r)
663 return UV__ERR(errno);
664
665 return 0;
666}
667
668
669ssize_t uv__recvmsg(int fd, struct msghdr* msg, int flags) {
670 struct cmsghdr* cmsg;
671 ssize_t rc;
672 int* pfd;
673 int* end;
674#if defined(__linux__)
675 static int no_msg_cmsg_cloexec;
676 if (no_msg_cmsg_cloexec == 0) {
677 rc = recvmsg(fd, msg, flags | 0x40000000); /* MSG_CMSG_CLOEXEC */
678 if (rc != -1)
679 return rc;
680 if (errno != EINVAL)
681 return UV__ERR(errno);
682 rc = recvmsg(fd, msg, flags);
683 if (rc == -1)
684 return UV__ERR(errno);
685 no_msg_cmsg_cloexec = 1;
686 } else {
687 rc = recvmsg(fd, msg, flags);
688 }
689#else
690 rc = recvmsg(fd, msg, flags);
691#endif
692 if (rc == -1)
693 return UV__ERR(errno);
694 if (msg->msg_controllen == 0)
695 return rc;
696 for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg))
697 if (cmsg->cmsg_type == SCM_RIGHTS)
698 for (pfd = (int*) CMSG_DATA(cmsg),
699 end = (int*) ((char*) cmsg + cmsg->cmsg_len);
700 pfd < end;
701 pfd += 1)
702 uv__cloexec(*pfd, 1);
703 return rc;
704}
705
706
707int uv_cwd(char* buffer, size_t* size) {
708 char scratch[1 + UV__PATH_MAX];
709
710 if (buffer == NULL || size == NULL)
711 return UV_EINVAL;
712
713 /* Try to read directly into the user's buffer first... */
714 if (getcwd(buffer, *size) != NULL)
715 goto fixup;
716
717 if (errno != ERANGE)
718 return UV__ERR(errno);
719
720 /* ...or into scratch space if the user's buffer is too small
721 * so we can report how much space to provide on the next try.
722 */
723 if (getcwd(scratch, sizeof(scratch)) == NULL)
724 return UV__ERR(errno);
725
726 buffer = scratch;
727
728fixup:
729
730 *size = strlen(buffer);
731
732 if (*size > 1 && buffer[*size - 1] == '/') {
733 *size -= 1;
734 buffer[*size] = '\0';
735 }
736
737 if (buffer == scratch) {
738 *size += 1;
739 return UV_ENOBUFS;
740 }
741
742 return 0;
743}
744
745
746int uv_chdir(const char* dir) {
747 if (chdir(dir))
748 return UV__ERR(errno);
749
750 return 0;
751}
752
753
754void uv_disable_stdio_inheritance(void) {
755 int fd;
756
757 /* Set the CLOEXEC flag on all open descriptors. Unconditionally try the
758 * first 16 file descriptors. After that, bail out after the first error.
759 */
760 for (fd = 0; ; fd++)
761 if (uv__cloexec(fd, 1) && fd > 15)
762 break;
763}
764
765
766int uv_fileno(const uv_handle_t* handle, uv_os_fd_t* fd) {
767 int fd_out;
768
769 switch (handle->type) {
770 case UV_TCP:
771 case UV_NAMED_PIPE:
772 case UV_TTY:
773 fd_out = uv__stream_fd((uv_stream_t*) handle);
774 break;
775
776 case UV_UDP:
777 fd_out = ((uv_udp_t *) handle)->io_watcher.fd;
778 break;
779
780 case UV_POLL:
781 fd_out = ((uv_poll_t *) handle)->io_watcher.fd;
782 break;
783
784 default:
785 return UV_EINVAL;
786 }
787
788 if (uv__is_closing(handle) || fd_out == -1)
789 return UV_EBADF;
790
791 *fd = fd_out;
792 return 0;
793}
794
795
796static int uv__run_pending(uv_loop_t* loop) {
797 QUEUE* q;
798 QUEUE pq;
799 uv__io_t* w;
800
801 if (QUEUE_EMPTY(&loop->pending_queue))
802 return 0;
803
804 QUEUE_MOVE(&loop->pending_queue, &pq);
805
806 while (!QUEUE_EMPTY(&pq)) {
807 q = QUEUE_HEAD(&pq);
808 QUEUE_REMOVE(q);
809 QUEUE_INIT(q);
810 w = QUEUE_DATA(q, uv__io_t, pending_queue);
811 w->cb(loop, w, POLLOUT);
812 }
813
814 return 1;
815}
816
817
818static unsigned int next_power_of_two(unsigned int val) {
819 val -= 1;
820 val |= val >> 1;
821 val |= val >> 2;
822 val |= val >> 4;
823 val |= val >> 8;
824 val |= val >> 16;
825 val += 1;
826 return val;
827}
828
829static void maybe_resize(uv_loop_t* loop, unsigned int len) {
830 uv__io_t** watchers;
831 void* fake_watcher_list;
832 void* fake_watcher_count;
833 unsigned int nwatchers;
834 unsigned int i;
835
836 if (len <= loop->nwatchers)
837 return;
838
839 /* Preserve fake watcher list and count at the end of the watchers */
840 if (loop->watchers != NULL) {
841 fake_watcher_list = loop->watchers[loop->nwatchers];
842 fake_watcher_count = loop->watchers[loop->nwatchers + 1];
843 } else {
844 fake_watcher_list = NULL;
845 fake_watcher_count = NULL;
846 }
847
848 nwatchers = next_power_of_two(len + 2) - 2;
849 watchers = uv__realloc(loop->watchers,
850 (nwatchers + 2) * sizeof(loop->watchers[0]));
851
852 if (watchers == NULL)
853 abort();
854 for (i = loop->nwatchers; i < nwatchers; i++)
855 watchers[i] = NULL;
856 watchers[nwatchers] = fake_watcher_list;
857 watchers[nwatchers + 1] = fake_watcher_count;
858
859 loop->watchers = watchers;
860 loop->nwatchers = nwatchers;
861}
862
863
864void uv__io_init(uv__io_t* w, uv__io_cb cb, int fd) {
865 assert(cb != NULL);
866 assert(fd >= -1);
867 QUEUE_INIT(&w->pending_queue);
868 QUEUE_INIT(&w->watcher_queue);
869 w->cb = cb;
870 w->fd = fd;
871 w->events = 0;
872 w->pevents = 0;
873
874#if defined(UV_HAVE_KQUEUE)
875 w->rcount = 0;
876 w->wcount = 0;
877#endif /* defined(UV_HAVE_KQUEUE) */
878}
879
880
881void uv__io_start(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
882 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
883 assert(0 != events);
884 assert(w->fd >= 0);
885 assert(w->fd < INT_MAX);
886
887 w->pevents |= events;
888 maybe_resize(loop, w->fd + 1);
889
890#if !defined(__sun)
891 /* The event ports backend needs to rearm all file descriptors on each and
892 * every tick of the event loop but the other backends allow us to
893 * short-circuit here if the event mask is unchanged.
894 */
895 if (w->events == w->pevents)
896 return;
897#endif
898
899 if (QUEUE_EMPTY(&w->watcher_queue))
900 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
901
902 if (loop->watchers[w->fd] == NULL) {
903 loop->watchers[w->fd] = w;
904 loop->nfds++;
905 }
906}
907
908
909void uv__io_stop(uv_loop_t* loop, uv__io_t* w, unsigned int events) {
910 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
911 assert(0 != events);
912
913 if (w->fd == -1)
914 return;
915
916 assert(w->fd >= 0);
917
918 /* Happens when uv__io_stop() is called on a handle that was never started. */
919 if ((unsigned) w->fd >= loop->nwatchers)
920 return;
921
922 w->pevents &= ~events;
923
924 if (w->pevents == 0) {
925 QUEUE_REMOVE(&w->watcher_queue);
926 QUEUE_INIT(&w->watcher_queue);
927
928 if (loop->watchers[w->fd] != NULL) {
929 assert(loop->watchers[w->fd] == w);
930 assert(loop->nfds > 0);
931 loop->watchers[w->fd] = NULL;
932 loop->nfds--;
933 w->events = 0;
934 }
935 }
936 else if (QUEUE_EMPTY(&w->watcher_queue))
937 QUEUE_INSERT_TAIL(&loop->watcher_queue, &w->watcher_queue);
938}
939
940
941void uv__io_close(uv_loop_t* loop, uv__io_t* w) {
942 uv__io_stop(loop, w, POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI);
943 QUEUE_REMOVE(&w->pending_queue);
944
945 /* Remove stale events for this file descriptor */
946 if (w->fd != -1)
947 uv__platform_invalidate_fd(loop, w->fd);
948}
949
950
951void uv__io_feed(uv_loop_t* loop, uv__io_t* w) {
952 if (QUEUE_EMPTY(&w->pending_queue))
953 QUEUE_INSERT_TAIL(&loop->pending_queue, &w->pending_queue);
954}
955
956
957int uv__io_active(const uv__io_t* w, unsigned int events) {
958 assert(0 == (events & ~(POLLIN | POLLOUT | UV__POLLRDHUP | UV__POLLPRI)));
959 assert(0 != events);
960 return 0 != (w->pevents & events);
961}
962
963
964int uv__fd_exists(uv_loop_t* loop, int fd) {
965 return (unsigned) fd < loop->nwatchers && loop->watchers[fd] != NULL;
966}
967
968
969int uv_getrusage(uv_rusage_t* rusage) {
970 struct rusage usage;
971
972 if (getrusage(RUSAGE_SELF, &usage))
973 return UV__ERR(errno);
974
975 rusage->ru_utime.tv_sec = usage.ru_utime.tv_sec;
976 rusage->ru_utime.tv_usec = usage.ru_utime.tv_usec;
977
978 rusage->ru_stime.tv_sec = usage.ru_stime.tv_sec;
979 rusage->ru_stime.tv_usec = usage.ru_stime.tv_usec;
980
981#if !defined(__MVS__) && !defined(__HAIKU__)
982 rusage->ru_maxrss = usage.ru_maxrss;
983 rusage->ru_ixrss = usage.ru_ixrss;
984 rusage->ru_idrss = usage.ru_idrss;
985 rusage->ru_isrss = usage.ru_isrss;
986 rusage->ru_minflt = usage.ru_minflt;
987 rusage->ru_majflt = usage.ru_majflt;
988 rusage->ru_nswap = usage.ru_nswap;
989 rusage->ru_inblock = usage.ru_inblock;
990 rusage->ru_oublock = usage.ru_oublock;
991 rusage->ru_msgsnd = usage.ru_msgsnd;
992 rusage->ru_msgrcv = usage.ru_msgrcv;
993 rusage->ru_nsignals = usage.ru_nsignals;
994 rusage->ru_nvcsw = usage.ru_nvcsw;
995 rusage->ru_nivcsw = usage.ru_nivcsw;
996#endif
997
998 return 0;
999}
1000
1001
1002int uv__open_cloexec(const char* path, int flags) {
1003 int err;
1004 int fd;
1005
1006#if defined(UV__O_CLOEXEC)
1007 static int no_cloexec;
1008
1009 if (!no_cloexec) {
1010 fd = open(path, flags | UV__O_CLOEXEC);
1011 if (fd != -1)
1012 return fd;
1013
1014 if (errno != EINVAL)
1015 return UV__ERR(errno);
1016
1017 /* O_CLOEXEC not supported. */
1018 no_cloexec = 1;
1019 }
1020#endif
1021
1022 fd = open(path, flags);
1023 if (fd == -1)
1024 return UV__ERR(errno);
1025
1026 err = uv__cloexec(fd, 1);
1027 if (err) {
1028 uv__close(fd);
1029 return err;
1030 }
1031
1032 return fd;
1033}
1034
1035
1036int uv__dup2_cloexec(int oldfd, int newfd) {
1037 int r;
1038#if (defined(__FreeBSD__) && __FreeBSD__ >= 10) || defined(__NetBSD__)
1039 r = dup3(oldfd, newfd, O_CLOEXEC);
1040 if (r == -1)
1041 return UV__ERR(errno);
1042 return r;
1043#elif defined(__FreeBSD__) && defined(F_DUP2FD_CLOEXEC)
1044 r = fcntl(oldfd, F_DUP2FD_CLOEXEC, newfd);
1045 if (r != -1)
1046 return r;
1047 if (errno != EINVAL)
1048 return UV__ERR(errno);
1049 /* Fall through. */
1050#elif defined(__linux__)
1051 static int no_dup3;
1052 if (!no_dup3) {
1053 do
1054 r = uv__dup3(oldfd, newfd, UV__O_CLOEXEC);
1055 while (r == -1 && errno == EBUSY);
1056 if (r != -1)
1057 return r;
1058 if (errno != ENOSYS)
1059 return UV__ERR(errno);
1060 /* Fall through. */
1061 no_dup3 = 1;
1062 }
1063#endif
1064 {
1065 int err;
1066 do
1067 r = dup2(oldfd, newfd);
1068#if defined(__linux__)
1069 while (r == -1 && errno == EBUSY);
1070#else
1071 while (0); /* Never retry. */
1072#endif
1073
1074 if (r == -1)
1075 return UV__ERR(errno);
1076
1077 err = uv__cloexec(newfd, 1);
1078 if (err) {
1079 uv__close(newfd);
1080 return err;
1081 }
1082
1083 return r;
1084 }
1085}
1086
1087
1088int uv_os_homedir(char* buffer, size_t* size) {
1089 uv_passwd_t pwd;
1090 size_t len;
1091 int r;
1092
1093 /* Check if the HOME environment variable is set first. The task of
1094 performing input validation on buffer and size is taken care of by
1095 uv_os_getenv(). */
1096 r = uv_os_getenv("HOME", buffer, size);
1097
1098 if (r != UV_ENOENT)
1099 return r;
1100
1101 /* HOME is not set, so call uv__getpwuid_r() */
1102 r = uv__getpwuid_r(&pwd);
1103
1104 if (r != 0) {
1105 return r;
1106 }
1107
1108 len = strlen(pwd.homedir);
1109
1110 if (len >= *size) {
1111 *size = len + 1;
1112 uv_os_free_passwd(&pwd);
1113 return UV_ENOBUFS;
1114 }
1115
1116 memcpy(buffer, pwd.homedir, len + 1);
1117 *size = len;
1118 uv_os_free_passwd(&pwd);
1119
1120 return 0;
1121}
1122
1123
1124int uv_os_tmpdir(char* buffer, size_t* size) {
1125 const char* buf;
1126 size_t len;
1127
1128 if (buffer == NULL || size == NULL || *size == 0)
1129 return UV_EINVAL;
1130
1131#define CHECK_ENV_VAR(name) \
1132 do { \
1133 buf = getenv(name); \
1134 if (buf != NULL) \
1135 goto return_buffer; \
1136 } \
1137 while (0)
1138
1139 /* Check the TMPDIR, TMP, TEMP, and TEMPDIR environment variables in order */
1140 CHECK_ENV_VAR("TMPDIR");
1141 CHECK_ENV_VAR("TMP");
1142 CHECK_ENV_VAR("TEMP");
1143 CHECK_ENV_VAR("TEMPDIR");
1144
1145#undef CHECK_ENV_VAR
1146
1147 /* No temp environment variables defined */
1148 #if defined(__ANDROID__)
1149 buf = "/data/local/tmp";
1150 #else
1151 buf = "/tmp";
1152 #endif
1153
1154return_buffer:
1155 len = strlen(buf);
1156
1157 if (len >= *size) {
1158 *size = len + 1;
1159 return UV_ENOBUFS;
1160 }
1161
1162 /* The returned directory should not have a trailing slash. */
1163 if (len > 1 && buf[len - 1] == '/') {
1164 len--;
1165 }
1166
1167 memcpy(buffer, buf, len + 1);
1168 buffer[len] = '\0';
1169 *size = len;
1170
1171 return 0;
1172}
1173
1174
1175int uv__getpwuid_r(uv_passwd_t* pwd) {
1176 struct passwd pw;
1177 struct passwd* result;
1178 char* buf;
1179 uid_t uid;
1180 size_t bufsize;
1181 size_t name_size;
1182 size_t homedir_size;
1183 size_t shell_size;
1184 long initsize;
1185 int r;
1186#if defined(__ANDROID_API__) && __ANDROID_API__ < 21
1187 int (*getpwuid_r)(uid_t, struct passwd*, char*, size_t, struct passwd**);
1188
1189 getpwuid_r = dlsym(RTLD_DEFAULT, "getpwuid_r");
1190 if (getpwuid_r == NULL)
1191 return UV_ENOSYS;
1192#endif
1193
1194 if (pwd == NULL)
1195 return UV_EINVAL;
1196
1197 initsize = sysconf(_SC_GETPW_R_SIZE_MAX);
1198
1199 if (initsize <= 0)
1200 bufsize = 4096;
1201 else
1202 bufsize = (size_t) initsize;
1203
1204 uid = geteuid();
1205 buf = NULL;
1206
1207 for (;;) {
1208 uv__free(buf);
1209 buf = uv__malloc(bufsize);
1210
1211 if (buf == NULL)
1212 return UV_ENOMEM;
1213
1214 r = getpwuid_r(uid, &pw, buf, bufsize, &result);
1215
1216 if (r != ERANGE)
1217 break;
1218
1219 bufsize *= 2;
1220 }
1221
1222 if (r != 0) {
1223 uv__free(buf);
1224 return -r;
1225 }
1226
1227 if (result == NULL) {
1228 uv__free(buf);
1229 return UV_ENOENT;
1230 }
1231
1232 /* Allocate memory for the username, shell, and home directory */
1233 name_size = strlen(pw.pw_name) + 1;
1234 homedir_size = strlen(pw.pw_dir) + 1;
1235 shell_size = strlen(pw.pw_shell) + 1;
1236 pwd->username = uv__malloc(name_size + homedir_size + shell_size);
1237
1238 if (pwd->username == NULL) {
1239 uv__free(buf);
1240 return UV_ENOMEM;
1241 }
1242
1243 /* Copy the username */
1244 memcpy(pwd->username, pw.pw_name, name_size);
1245
1246 /* Copy the home directory */
1247 pwd->homedir = pwd->username + name_size;
1248 memcpy(pwd->homedir, pw.pw_dir, homedir_size);
1249
1250 /* Copy the shell */
1251 pwd->shell = pwd->homedir + homedir_size;
1252 memcpy(pwd->shell, pw.pw_shell, shell_size);
1253
1254 /* Copy the uid and gid */
1255 pwd->uid = pw.pw_uid;
1256 pwd->gid = pw.pw_gid;
1257
1258 uv__free(buf);
1259
1260 return 0;
1261}
1262
1263
1264void uv_os_free_passwd(uv_passwd_t* pwd) {
1265 if (pwd == NULL)
1266 return;
1267
1268 /*
1269 The memory for name, shell, and homedir are allocated in a single
1270 uv__malloc() call. The base of the pointer is stored in pwd->username, so
1271 that is the field that needs to be freed.
1272 */
1273 uv__free(pwd->username);
1274 pwd->username = NULL;
1275 pwd->shell = NULL;
1276 pwd->homedir = NULL;
1277}
1278
1279
1280int uv_os_get_passwd(uv_passwd_t* pwd) {
1281 return uv__getpwuid_r(pwd);
1282}
1283
1284
1285int uv_translate_sys_error(int sys_errno) {
1286 /* If < 0 then it's already a libuv error. */
1287 return sys_errno <= 0 ? sys_errno : -sys_errno;
1288}
1289
1290
1291int uv_os_environ(uv_env_item_t** envitems, int* count) {
1292 int i, j, cnt;
1293 uv_env_item_t* envitem;
1294
1295 *envitems = NULL;
1296 *count = 0;
1297
1298 for (i = 0; environ[i] != NULL; i++);
1299
1300 *envitems = uv__calloc(i, sizeof(**envitems));
1301
1302 if (envitems == NULL)
1303 return UV_ENOMEM;
1304
1305 for (j = 0, cnt = 0; j < i; j++) {
1306 char* buf;
1307 char* ptr;
1308
1309 if (environ[j] == NULL)
1310 break;
1311
1312 buf = uv__strdup(environ[j]);
1313 if (buf == NULL)
1314 goto fail;
1315
1316 ptr = strchr(buf, '=');
1317 if (ptr == NULL) {
1318 uv__free(buf);
1319 continue;
1320 }
1321
1322 *ptr = '\0';
1323
1324 envitem = &(*envitems)[cnt];
1325 envitem->name = buf;
1326 envitem->value = ptr + 1;
1327
1328 cnt++;
1329 }
1330
1331 *count = cnt;
1332 return 0;
1333
1334fail:
1335 for (i = 0; i < cnt; i++) {
1336 envitem = &(*envitems)[cnt];
1337 uv__free(envitem->name);
1338 }
1339 uv__free(*envitems);
1340
1341 *envitems = NULL;
1342 *count = 0;
1343 return UV_ENOMEM;
1344}
1345
1346
1347int uv_os_getenv(const char* name, char* buffer, size_t* size) {
1348 char* var;
1349 size_t len;
1350
1351 if (name == NULL || buffer == NULL || size == NULL || *size == 0)
1352 return UV_EINVAL;
1353
1354 var = getenv(name);
1355
1356 if (var == NULL)
1357 return UV_ENOENT;
1358
1359 len = strlen(var);
1360
1361 if (len >= *size) {
1362 *size = len + 1;
1363 return UV_ENOBUFS;
1364 }
1365
1366 memcpy(buffer, var, len + 1);
1367 *size = len;
1368
1369 return 0;
1370}
1371
1372
1373int uv_os_setenv(const char* name, const char* value) {
1374 if (name == NULL || value == NULL)
1375 return UV_EINVAL;
1376
1377 if (setenv(name, value, 1) != 0)
1378 return UV__ERR(errno);
1379
1380 return 0;
1381}
1382
1383
1384int uv_os_unsetenv(const char* name) {
1385 if (name == NULL)
1386 return UV_EINVAL;
1387
1388 if (unsetenv(name) != 0)
1389 return UV__ERR(errno);
1390
1391 return 0;
1392}
1393
1394
1395int uv_os_gethostname(char* buffer, size_t* size) {
1396 /*
1397 On some platforms, if the input buffer is not large enough, gethostname()
1398 succeeds, but truncates the result. libuv can detect this and return ENOBUFS
1399 instead by creating a large enough buffer and comparing the hostname length
1400 to the size input.
1401 */
1402 char buf[UV_MAXHOSTNAMESIZE];
1403 size_t len;
1404
1405 if (buffer == NULL || size == NULL || *size == 0)
1406 return UV_EINVAL;
1407
1408 if (gethostname(buf, sizeof(buf)) != 0)
1409 return UV__ERR(errno);
1410
1411 buf[sizeof(buf) - 1] = '\0'; /* Null terminate, just to be safe. */
1412 len = strlen(buf);
1413
1414 if (len >= *size) {
1415 *size = len + 1;
1416 return UV_ENOBUFS;
1417 }
1418
1419 memcpy(buffer, buf, len + 1);
1420 *size = len;
1421 return 0;
1422}
1423
1424
1425uv_os_fd_t uv_get_osfhandle(int fd) {
1426 return fd;
1427}
1428
1429int uv_open_osfhandle(uv_os_fd_t os_fd) {
1430 return os_fd;
1431}
1432
1433uv_pid_t uv_os_getpid(void) {
1434 return getpid();
1435}
1436
1437
1438uv_pid_t uv_os_getppid(void) {
1439 return getppid();
1440}
1441
1442
1443int uv_os_getpriority(uv_pid_t pid, int* priority) {
1444 int r;
1445
1446 if (priority == NULL)
1447 return UV_EINVAL;
1448
1449 errno = 0;
1450 r = getpriority(PRIO_PROCESS, (int) pid);
1451
1452 if (r == -1 && errno != 0)
1453 return UV__ERR(errno);
1454
1455 *priority = r;
1456 return 0;
1457}
1458
1459
1460int uv_os_setpriority(uv_pid_t pid, int priority) {
1461 if (priority < UV_PRIORITY_HIGHEST || priority > UV_PRIORITY_LOW)
1462 return UV_EINVAL;
1463
1464 if (setpriority(PRIO_PROCESS, (int) pid, priority) != 0)
1465 return UV__ERR(errno);
1466
1467 return 0;
1468}
1469
1470
1471int uv_os_uname(uv_utsname_t* buffer) {
1472 struct utsname buf;
1473 int r;
1474
1475 if (buffer == NULL)
1476 return UV_EINVAL;
1477
1478 if (uname(&buf) == -1) {
1479 r = UV__ERR(errno);
1480 goto error;
1481 }
1482
1483 r = uv__strscpy(buffer->sysname, buf.sysname, sizeof(buffer->sysname));
1484 if (r == UV_E2BIG)
1485 goto error;
1486
1487#ifdef _AIX
1488 r = snprintf(buffer->release,
1489 sizeof(buffer->release),
1490 "%s.%s",
1491 buf.version,
1492 buf.release);
1493 if (r >= sizeof(buffer->release)) {
1494 r = UV_E2BIG;
1495 goto error;
1496 }
1497#else
1498 r = uv__strscpy(buffer->release, buf.release, sizeof(buffer->release));
1499 if (r == UV_E2BIG)
1500 goto error;
1501#endif
1502
1503 r = uv__strscpy(buffer->version, buf.version, sizeof(buffer->version));
1504 if (r == UV_E2BIG)
1505 goto error;
1506
1507#if defined(_AIX) || defined(__PASE__)
1508 r = uv__strscpy(buffer->machine, "ppc64", sizeof(buffer->machine));
1509#else
1510 r = uv__strscpy(buffer->machine, buf.machine, sizeof(buffer->machine));
1511#endif
1512
1513 if (r == UV_E2BIG)
1514 goto error;
1515
1516 return 0;
1517
1518error:
1519 buffer->sysname[0] = '\0';
1520 buffer->release[0] = '\0';
1521 buffer->version[0] = '\0';
1522 buffer->machine[0] = '\0';
1523 return r;
1524}
1525
1526int uv__getsockpeername(const uv_handle_t* handle,
1527 uv__peersockfunc func,
1528 struct sockaddr* name,
1529 int* namelen) {
1530 socklen_t socklen;
1531 uv_os_fd_t fd;
1532 int r;
1533
1534 r = uv_fileno(handle, &fd);
1535 if (r < 0)
1536 return r;
1537
1538 /* sizeof(socklen_t) != sizeof(int) on some systems. */
1539 socklen = (socklen_t) *namelen;
1540
1541 if (func(fd, name, &socklen))
1542 return UV__ERR(errno);
1543
1544 *namelen = (int) socklen;
1545 return 0;
1546}
1547
1548int uv_gettimeofday(uv_timeval64_t* tv) {
1549 struct timeval time;
1550
1551 if (tv == NULL)
1552 return UV_EINVAL;
1553
1554 if (gettimeofday(&time, NULL) != 0)
1555 return UV__ERR(errno);
1556
1557 tv->tv_sec = (int64_t) time.tv_sec;
1558 tv->tv_usec = (int32_t) time.tv_usec;
1559 return 0;
1560}
1561