1 | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | * |
3 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
4 | * of this software and associated documentation files (the "Software"), to |
5 | * deal in the Software without restriction, including without limitation the |
6 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
7 | * sell copies of the Software, and to permit persons to whom the Software is |
8 | * furnished to do so, subject to the following conditions: |
9 | * |
10 | * The above copyright notice and this permission notice shall be included in |
11 | * all copies or substantial portions of the Software. |
12 | * |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
16 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
17 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
18 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
19 | * IN THE SOFTWARE. |
20 | */ |
21 | |
22 | #include "uv.h" |
23 | #include "internal.h" |
24 | |
25 | #include <assert.h> |
26 | #include <string.h> |
27 | #include <errno.h> |
28 | #include <stdlib.h> |
29 | #include <unistd.h> |
30 | #if defined(__MVS__) |
31 | #include <xti.h> |
32 | #endif |
33 | #include <sys/un.h> |
34 | |
35 | #if defined(IPV6_JOIN_GROUP) && !defined(IPV6_ADD_MEMBERSHIP) |
36 | # define IPV6_ADD_MEMBERSHIP IPV6_JOIN_GROUP |
37 | #endif |
38 | |
39 | #if defined(IPV6_LEAVE_GROUP) && !defined(IPV6_DROP_MEMBERSHIP) |
40 | # define IPV6_DROP_MEMBERSHIP IPV6_LEAVE_GROUP |
41 | #endif |
42 | |
43 | |
44 | static void uv__udp_run_completed(uv_udp_t* handle); |
45 | static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents); |
46 | static void uv__udp_recvmsg(uv_udp_t* handle); |
47 | static void uv__udp_sendmsg(uv_udp_t* handle); |
48 | static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, |
49 | int domain, |
50 | unsigned int flags); |
51 | |
52 | |
53 | void uv__udp_close(uv_udp_t* handle) { |
54 | uv__io_close(handle->loop, &handle->io_watcher); |
55 | uv__handle_stop(handle); |
56 | |
57 | if (handle->io_watcher.fd != -1) { |
58 | uv__close(handle->io_watcher.fd); |
59 | handle->io_watcher.fd = -1; |
60 | } |
61 | } |
62 | |
63 | |
64 | void uv__udp_finish_close(uv_udp_t* handle) { |
65 | uv_udp_send_t* req; |
66 | QUEUE* q; |
67 | |
68 | assert(!uv__io_active(&handle->io_watcher, POLLIN | POLLOUT)); |
69 | assert(handle->io_watcher.fd == -1); |
70 | |
71 | while (!QUEUE_EMPTY(&handle->write_queue)) { |
72 | q = QUEUE_HEAD(&handle->write_queue); |
73 | QUEUE_REMOVE(q); |
74 | |
75 | req = QUEUE_DATA(q, uv_udp_send_t, queue); |
76 | req->status = UV_ECANCELED; |
77 | QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); |
78 | } |
79 | |
80 | uv__udp_run_completed(handle); |
81 | |
82 | assert(handle->send_queue_size == 0); |
83 | assert(handle->send_queue_count == 0); |
84 | |
85 | /* Now tear down the handle. */ |
86 | handle->recv_cb = NULL; |
87 | handle->alloc_cb = NULL; |
88 | /* but _do not_ touch close_cb */ |
89 | } |
90 | |
91 | |
92 | static void uv__udp_run_completed(uv_udp_t* handle) { |
93 | uv_udp_send_t* req; |
94 | QUEUE* q; |
95 | |
96 | assert(!(handle->flags & UV_HANDLE_UDP_PROCESSING)); |
97 | handle->flags |= UV_HANDLE_UDP_PROCESSING; |
98 | |
99 | while (!QUEUE_EMPTY(&handle->write_completed_queue)) { |
100 | q = QUEUE_HEAD(&handle->write_completed_queue); |
101 | QUEUE_REMOVE(q); |
102 | |
103 | req = QUEUE_DATA(q, uv_udp_send_t, queue); |
104 | uv__req_unregister(handle->loop, req); |
105 | |
106 | handle->send_queue_size -= uv__count_bufs(req->bufs, req->nbufs); |
107 | handle->send_queue_count--; |
108 | |
109 | if (req->bufs != req->bufsml) |
110 | uv__free(req->bufs); |
111 | req->bufs = NULL; |
112 | |
113 | if (req->send_cb == NULL) |
114 | continue; |
115 | |
116 | /* req->status >= 0 == bytes written |
117 | * req->status < 0 == errno |
118 | */ |
119 | if (req->status >= 0) |
120 | req->send_cb(req, 0); |
121 | else |
122 | req->send_cb(req, req->status); |
123 | } |
124 | |
125 | if (QUEUE_EMPTY(&handle->write_queue)) { |
126 | /* Pending queue and completion queue empty, stop watcher. */ |
127 | uv__io_stop(handle->loop, &handle->io_watcher, POLLOUT); |
128 | if (!uv__io_active(&handle->io_watcher, POLLIN)) |
129 | uv__handle_stop(handle); |
130 | } |
131 | |
132 | handle->flags &= ~UV_HANDLE_UDP_PROCESSING; |
133 | } |
134 | |
135 | |
136 | static void uv__udp_io(uv_loop_t* loop, uv__io_t* w, unsigned int revents) { |
137 | uv_udp_t* handle; |
138 | |
139 | handle = container_of(w, uv_udp_t, io_watcher); |
140 | assert(handle->type == UV_UDP); |
141 | |
142 | if (revents & POLLIN) |
143 | uv__udp_recvmsg(handle); |
144 | |
145 | if (revents & POLLOUT) { |
146 | uv__udp_sendmsg(handle); |
147 | uv__udp_run_completed(handle); |
148 | } |
149 | } |
150 | |
151 | |
152 | static void uv__udp_recvmsg(uv_udp_t* handle) { |
153 | struct sockaddr_storage peer; |
154 | struct msghdr h; |
155 | ssize_t nread; |
156 | uv_buf_t buf; |
157 | int flags; |
158 | int count; |
159 | |
160 | assert(handle->recv_cb != NULL); |
161 | assert(handle->alloc_cb != NULL); |
162 | |
163 | /* Prevent loop starvation when the data comes in as fast as (or faster than) |
164 | * we can read it. XXX Need to rearm fd if we switch to edge-triggered I/O. |
165 | */ |
166 | count = 32; |
167 | |
168 | memset(&h, 0, sizeof(h)); |
169 | h.msg_name = &peer; |
170 | |
171 | do { |
172 | buf = uv_buf_init(NULL, 0); |
173 | handle->alloc_cb((uv_handle_t*) handle, 64 * 1024, &buf); |
174 | if (buf.base == NULL || buf.len == 0) { |
175 | handle->recv_cb(handle, UV_ENOBUFS, &buf, NULL, 0); |
176 | return; |
177 | } |
178 | assert(buf.base != NULL); |
179 | |
180 | h.msg_namelen = sizeof(peer); |
181 | h.msg_iov = (void*) &buf; |
182 | h.msg_iovlen = 1; |
183 | |
184 | do { |
185 | nread = recvmsg(handle->io_watcher.fd, &h, 0); |
186 | } |
187 | while (nread == -1 && errno == EINTR); |
188 | |
189 | if (nread == -1) { |
190 | if (errno == EAGAIN || errno == EWOULDBLOCK) |
191 | handle->recv_cb(handle, 0, &buf, NULL, 0); |
192 | else |
193 | handle->recv_cb(handle, UV__ERR(errno), &buf, NULL, 0); |
194 | } |
195 | else { |
196 | const struct sockaddr *addr; |
197 | if (h.msg_namelen == 0) |
198 | addr = NULL; |
199 | else |
200 | addr = (const struct sockaddr*) &peer; |
201 | |
202 | flags = 0; |
203 | if (h.msg_flags & MSG_TRUNC) |
204 | flags |= UV_UDP_PARTIAL; |
205 | |
206 | handle->recv_cb(handle, nread, &buf, addr, flags); |
207 | } |
208 | } |
209 | /* recv_cb callback may decide to pause or close the handle */ |
210 | while (nread != -1 |
211 | && count-- > 0 |
212 | && handle->io_watcher.fd != -1 |
213 | && handle->recv_cb != NULL); |
214 | } |
215 | |
216 | |
217 | static void uv__udp_sendmsg(uv_udp_t* handle) { |
218 | uv_udp_send_t* req; |
219 | QUEUE* q; |
220 | struct msghdr h; |
221 | ssize_t size; |
222 | |
223 | while (!QUEUE_EMPTY(&handle->write_queue)) { |
224 | q = QUEUE_HEAD(&handle->write_queue); |
225 | assert(q != NULL); |
226 | |
227 | req = QUEUE_DATA(q, uv_udp_send_t, queue); |
228 | assert(req != NULL); |
229 | |
230 | memset(&h, 0, sizeof h); |
231 | if (req->addr.ss_family == AF_UNSPEC) { |
232 | h.msg_name = NULL; |
233 | h.msg_namelen = 0; |
234 | } else { |
235 | h.msg_name = &req->addr; |
236 | if (req->addr.ss_family == AF_INET6) |
237 | h.msg_namelen = sizeof(struct sockaddr_in6); |
238 | else if (req->addr.ss_family == AF_INET) |
239 | h.msg_namelen = sizeof(struct sockaddr_in); |
240 | else if (req->addr.ss_family == AF_UNIX) |
241 | h.msg_namelen = sizeof(struct sockaddr_un); |
242 | else { |
243 | assert(0 && "unsupported address family" ); |
244 | abort(); |
245 | } |
246 | } |
247 | h.msg_iov = (struct iovec*) req->bufs; |
248 | h.msg_iovlen = req->nbufs; |
249 | |
250 | do { |
251 | size = sendmsg(handle->io_watcher.fd, &h, 0); |
252 | } while (size == -1 && errno == EINTR); |
253 | |
254 | if (size == -1) { |
255 | if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) |
256 | break; |
257 | } |
258 | |
259 | req->status = (size == -1 ? UV__ERR(errno) : size); |
260 | |
261 | /* Sending a datagram is an atomic operation: either all data |
262 | * is written or nothing is (and EMSGSIZE is raised). That is |
263 | * why we don't handle partial writes. Just pop the request |
264 | * off the write queue and onto the completed queue, done. |
265 | */ |
266 | QUEUE_REMOVE(&req->queue); |
267 | QUEUE_INSERT_TAIL(&handle->write_completed_queue, &req->queue); |
268 | uv__io_feed(handle->loop, &handle->io_watcher); |
269 | } |
270 | } |
271 | |
272 | |
273 | /* On the BSDs, SO_REUSEPORT implies SO_REUSEADDR but with some additional |
274 | * refinements for programs that use multicast. |
275 | * |
276 | * Linux as of 3.9 has a SO_REUSEPORT socket option but with semantics that |
277 | * are different from the BSDs: it _shares_ the port rather than steal it |
278 | * from the current listener. While useful, it's not something we can emulate |
279 | * on other platforms so we don't enable it. |
280 | * |
281 | * zOS does not support getsockname with SO_REUSEPORT option when using |
282 | * AF_UNIX. |
283 | */ |
284 | static int uv__set_reuse(int fd) { |
285 | int yes; |
286 | yes = 1; |
287 | |
288 | #if defined(SO_REUSEPORT) && defined(__MVS__) |
289 | struct sockaddr_in sockfd; |
290 | unsigned int sockfd_len = sizeof(sockfd); |
291 | if (getsockname(fd, (struct sockaddr*) &sockfd, &sockfd_len) == -1) |
292 | return UV__ERR(errno); |
293 | if (sockfd.sin_family == AF_UNIX) { |
294 | if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes))) |
295 | return UV__ERR(errno); |
296 | } else { |
297 | if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) |
298 | return UV__ERR(errno); |
299 | } |
300 | #elif defined(SO_REUSEPORT) && !defined(__linux__) |
301 | if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &yes, sizeof(yes))) |
302 | return UV__ERR(errno); |
303 | #else |
304 | if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes))) |
305 | return UV__ERR(errno); |
306 | #endif |
307 | |
308 | return 0; |
309 | } |
310 | |
311 | |
312 | int uv__udp_bind(uv_udp_t* handle, |
313 | const struct sockaddr* addr, |
314 | unsigned int addrlen, |
315 | unsigned int flags) { |
316 | int err; |
317 | int yes; |
318 | int fd; |
319 | |
320 | /* Check for bad flags. */ |
321 | if (flags & ~(UV_UDP_IPV6ONLY | UV_UDP_REUSEADDR)) |
322 | return UV_EINVAL; |
323 | |
324 | /* Cannot set IPv6-only mode on non-IPv6 socket. */ |
325 | if ((flags & UV_UDP_IPV6ONLY) && addr->sa_family != AF_INET6) |
326 | return UV_EINVAL; |
327 | |
328 | fd = handle->io_watcher.fd; |
329 | if (fd == -1) { |
330 | err = uv__socket(addr->sa_family, SOCK_DGRAM, 0); |
331 | if (err < 0) |
332 | return err; |
333 | fd = err; |
334 | handle->io_watcher.fd = fd; |
335 | } |
336 | |
337 | if (flags & UV_UDP_REUSEADDR) { |
338 | err = uv__set_reuse(fd); |
339 | if (err) |
340 | return err; |
341 | } |
342 | |
343 | if (flags & UV_UDP_IPV6ONLY) { |
344 | #ifdef IPV6_V6ONLY |
345 | yes = 1; |
346 | if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &yes, sizeof yes) == -1) { |
347 | err = UV__ERR(errno); |
348 | return err; |
349 | } |
350 | #else |
351 | err = UV_ENOTSUP; |
352 | return err; |
353 | #endif |
354 | } |
355 | |
356 | if (bind(fd, addr, addrlen)) { |
357 | err = UV__ERR(errno); |
358 | if (errno == EAFNOSUPPORT) |
359 | /* OSX, other BSDs and SunoS fail with EAFNOSUPPORT when binding a |
360 | * socket created with AF_INET to an AF_INET6 address or vice versa. */ |
361 | err = UV_EINVAL; |
362 | return err; |
363 | } |
364 | |
365 | if (addr->sa_family == AF_INET6) |
366 | handle->flags |= UV_HANDLE_IPV6; |
367 | |
368 | handle->flags |= UV_HANDLE_BOUND; |
369 | return 0; |
370 | } |
371 | |
372 | |
373 | static int uv__udp_maybe_deferred_bind(uv_udp_t* handle, |
374 | int domain, |
375 | unsigned int flags) { |
376 | union { |
377 | struct sockaddr_in6 in6; |
378 | struct sockaddr_in in; |
379 | struct sockaddr addr; |
380 | } taddr; |
381 | socklen_t addrlen; |
382 | |
383 | if (handle->io_watcher.fd != -1) |
384 | return 0; |
385 | |
386 | switch (domain) { |
387 | case AF_INET: |
388 | { |
389 | struct sockaddr_in* addr = &taddr.in; |
390 | memset(addr, 0, sizeof *addr); |
391 | addr->sin_family = AF_INET; |
392 | addr->sin_addr.s_addr = INADDR_ANY; |
393 | addrlen = sizeof *addr; |
394 | break; |
395 | } |
396 | case AF_INET6: |
397 | { |
398 | struct sockaddr_in6* addr = &taddr.in6; |
399 | memset(addr, 0, sizeof *addr); |
400 | addr->sin6_family = AF_INET6; |
401 | addr->sin6_addr = in6addr_any; |
402 | addrlen = sizeof *addr; |
403 | break; |
404 | } |
405 | default: |
406 | assert(0 && "unsupported address family" ); |
407 | abort(); |
408 | } |
409 | |
410 | return uv__udp_bind(handle, &taddr.addr, addrlen, flags); |
411 | } |
412 | |
413 | |
414 | int uv__udp_connect(uv_udp_t* handle, |
415 | const struct sockaddr* addr, |
416 | unsigned int addrlen) { |
417 | int err; |
418 | |
419 | err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); |
420 | if (err) |
421 | return err; |
422 | |
423 | do { |
424 | errno = 0; |
425 | err = connect(handle->io_watcher.fd, addr, addrlen); |
426 | } while (err == -1 && errno == EINTR); |
427 | |
428 | if (err) |
429 | return UV__ERR(errno); |
430 | |
431 | handle->flags |= UV_HANDLE_UDP_CONNECTED; |
432 | |
433 | return 0; |
434 | } |
435 | |
436 | |
437 | int uv__udp_disconnect(uv_udp_t* handle) { |
438 | int r; |
439 | struct sockaddr addr; |
440 | |
441 | memset(&addr, 0, sizeof(addr)); |
442 | |
443 | addr.sa_family = AF_UNSPEC; |
444 | |
445 | do { |
446 | errno = 0; |
447 | r = connect(handle->io_watcher.fd, &addr, sizeof(addr)); |
448 | } while (r == -1 && errno == EINTR); |
449 | |
450 | if (r == -1 && errno != EAFNOSUPPORT) |
451 | return UV__ERR(errno); |
452 | |
453 | handle->flags &= ~UV_HANDLE_UDP_CONNECTED; |
454 | return 0; |
455 | } |
456 | |
457 | |
458 | int uv__udp_send(uv_udp_send_t* req, |
459 | uv_udp_t* handle, |
460 | const uv_buf_t bufs[], |
461 | unsigned int nbufs, |
462 | const struct sockaddr* addr, |
463 | unsigned int addrlen, |
464 | uv_udp_send_cb send_cb) { |
465 | int err; |
466 | int empty_queue; |
467 | |
468 | assert(nbufs > 0); |
469 | |
470 | if (addr) { |
471 | err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); |
472 | if (err) |
473 | return err; |
474 | } |
475 | |
476 | /* It's legal for send_queue_count > 0 even when the write_queue is empty; |
477 | * it means there are error-state requests in the write_completed_queue that |
478 | * will touch up send_queue_size/count later. |
479 | */ |
480 | empty_queue = (handle->send_queue_count == 0); |
481 | |
482 | uv__req_init(handle->loop, req, UV_UDP_SEND); |
483 | assert(addrlen <= sizeof(req->addr)); |
484 | if (addr == NULL) |
485 | req->addr.ss_family = AF_UNSPEC; |
486 | else |
487 | memcpy(&req->addr, addr, addrlen); |
488 | req->send_cb = send_cb; |
489 | req->handle = handle; |
490 | req->nbufs = nbufs; |
491 | |
492 | req->bufs = req->bufsml; |
493 | if (nbufs > ARRAY_SIZE(req->bufsml)) |
494 | req->bufs = uv__malloc(nbufs * sizeof(bufs[0])); |
495 | |
496 | if (req->bufs == NULL) { |
497 | uv__req_unregister(handle->loop, req); |
498 | return UV_ENOMEM; |
499 | } |
500 | |
501 | memcpy(req->bufs, bufs, nbufs * sizeof(bufs[0])); |
502 | handle->send_queue_size += uv__count_bufs(req->bufs, req->nbufs); |
503 | handle->send_queue_count++; |
504 | QUEUE_INSERT_TAIL(&handle->write_queue, &req->queue); |
505 | uv__handle_start(handle); |
506 | |
507 | if (empty_queue && !(handle->flags & UV_HANDLE_UDP_PROCESSING)) { |
508 | uv__udp_sendmsg(handle); |
509 | |
510 | /* `uv__udp_sendmsg` may not be able to do non-blocking write straight |
511 | * away. In such cases the `io_watcher` has to be queued for asynchronous |
512 | * write. |
513 | */ |
514 | if (!QUEUE_EMPTY(&handle->write_queue)) |
515 | uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); |
516 | } else { |
517 | uv__io_start(handle->loop, &handle->io_watcher, POLLOUT); |
518 | } |
519 | |
520 | return 0; |
521 | } |
522 | |
523 | |
524 | int uv__udp_try_send(uv_udp_t* handle, |
525 | const uv_buf_t bufs[], |
526 | unsigned int nbufs, |
527 | const struct sockaddr* addr, |
528 | unsigned int addrlen) { |
529 | int err; |
530 | struct msghdr h; |
531 | ssize_t size; |
532 | |
533 | assert(nbufs > 0); |
534 | |
535 | /* already sending a message */ |
536 | if (handle->send_queue_count != 0) |
537 | return UV_EAGAIN; |
538 | |
539 | if (addr) { |
540 | err = uv__udp_maybe_deferred_bind(handle, addr->sa_family, 0); |
541 | if (err) |
542 | return err; |
543 | } else { |
544 | assert(handle->flags & UV_HANDLE_UDP_CONNECTED); |
545 | } |
546 | |
547 | memset(&h, 0, sizeof h); |
548 | h.msg_name = (struct sockaddr*) addr; |
549 | h.msg_namelen = addrlen; |
550 | h.msg_iov = (struct iovec*) bufs; |
551 | h.msg_iovlen = nbufs; |
552 | |
553 | do { |
554 | size = sendmsg(handle->io_watcher.fd, &h, 0); |
555 | } while (size == -1 && errno == EINTR); |
556 | |
557 | if (size == -1) { |
558 | if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ENOBUFS) |
559 | return UV_EAGAIN; |
560 | else |
561 | return UV__ERR(errno); |
562 | } |
563 | |
564 | return size; |
565 | } |
566 | |
567 | |
568 | static int uv__udp_set_membership4(uv_udp_t* handle, |
569 | const struct sockaddr_in* multicast_addr, |
570 | const char* interface_addr, |
571 | uv_membership membership) { |
572 | struct ip_mreq mreq; |
573 | int optname; |
574 | int err; |
575 | |
576 | memset(&mreq, 0, sizeof mreq); |
577 | |
578 | if (interface_addr) { |
579 | err = uv_inet_pton(AF_INET, interface_addr, &mreq.imr_interface.s_addr); |
580 | if (err) |
581 | return err; |
582 | } else { |
583 | mreq.imr_interface.s_addr = htonl(INADDR_ANY); |
584 | } |
585 | |
586 | mreq.imr_multiaddr.s_addr = multicast_addr->sin_addr.s_addr; |
587 | |
588 | switch (membership) { |
589 | case UV_JOIN_GROUP: |
590 | optname = IP_ADD_MEMBERSHIP; |
591 | break; |
592 | case UV_LEAVE_GROUP: |
593 | optname = IP_DROP_MEMBERSHIP; |
594 | break; |
595 | default: |
596 | return UV_EINVAL; |
597 | } |
598 | |
599 | if (setsockopt(handle->io_watcher.fd, |
600 | IPPROTO_IP, |
601 | optname, |
602 | &mreq, |
603 | sizeof(mreq))) { |
604 | #if defined(__MVS__) |
605 | if (errno == ENXIO) |
606 | return UV_ENODEV; |
607 | #endif |
608 | return UV__ERR(errno); |
609 | } |
610 | |
611 | return 0; |
612 | } |
613 | |
614 | |
615 | static int uv__udp_set_membership6(uv_udp_t* handle, |
616 | const struct sockaddr_in6* multicast_addr, |
617 | const char* interface_addr, |
618 | uv_membership membership) { |
619 | int optname; |
620 | struct ipv6_mreq mreq; |
621 | struct sockaddr_in6 addr6; |
622 | |
623 | memset(&mreq, 0, sizeof mreq); |
624 | |
625 | if (interface_addr) { |
626 | if (uv_ip6_addr(interface_addr, 0, &addr6)) |
627 | return UV_EINVAL; |
628 | mreq.ipv6mr_interface = addr6.sin6_scope_id; |
629 | } else { |
630 | mreq.ipv6mr_interface = 0; |
631 | } |
632 | |
633 | mreq.ipv6mr_multiaddr = multicast_addr->sin6_addr; |
634 | |
635 | switch (membership) { |
636 | case UV_JOIN_GROUP: |
637 | optname = IPV6_ADD_MEMBERSHIP; |
638 | break; |
639 | case UV_LEAVE_GROUP: |
640 | optname = IPV6_DROP_MEMBERSHIP; |
641 | break; |
642 | default: |
643 | return UV_EINVAL; |
644 | } |
645 | |
646 | if (setsockopt(handle->io_watcher.fd, |
647 | IPPROTO_IPV6, |
648 | optname, |
649 | &mreq, |
650 | sizeof(mreq))) { |
651 | #if defined(__MVS__) |
652 | if (errno == ENXIO) |
653 | return UV_ENODEV; |
654 | #endif |
655 | return UV__ERR(errno); |
656 | } |
657 | |
658 | return 0; |
659 | } |
660 | |
661 | |
662 | int uv_udp_init_ex(uv_loop_t* loop, uv_udp_t* handle, unsigned int flags) { |
663 | int domain; |
664 | int err; |
665 | int fd; |
666 | |
667 | /* Use the lower 8 bits for the domain */ |
668 | domain = flags & 0xFF; |
669 | if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC) |
670 | return UV_EINVAL; |
671 | |
672 | if (flags & ~0xFF) |
673 | return UV_EINVAL; |
674 | |
675 | if (domain != AF_UNSPEC) { |
676 | err = uv__socket(domain, SOCK_DGRAM, 0); |
677 | if (err < 0) |
678 | return err; |
679 | fd = err; |
680 | } else { |
681 | fd = -1; |
682 | } |
683 | |
684 | uv__handle_init(loop, (uv_handle_t*)handle, UV_UDP); |
685 | handle->alloc_cb = NULL; |
686 | handle->recv_cb = NULL; |
687 | handle->send_queue_size = 0; |
688 | handle->send_queue_count = 0; |
689 | uv__io_init(&handle->io_watcher, uv__udp_io, fd); |
690 | QUEUE_INIT(&handle->write_queue); |
691 | QUEUE_INIT(&handle->write_completed_queue); |
692 | |
693 | return 0; |
694 | } |
695 | |
696 | |
697 | int uv_udp_init(uv_loop_t* loop, uv_udp_t* handle) { |
698 | return uv_udp_init_ex(loop, handle, AF_UNSPEC); |
699 | } |
700 | |
701 | |
702 | int uv_udp_open(uv_udp_t* handle, uv_os_sock_t sock) { |
703 | int err; |
704 | |
705 | /* Check for already active socket. */ |
706 | if (handle->io_watcher.fd != -1) |
707 | return UV_EBUSY; |
708 | |
709 | if (uv__fd_exists(handle->loop, sock)) |
710 | return UV_EEXIST; |
711 | |
712 | err = uv__nonblock(sock, 1); |
713 | if (err) |
714 | return err; |
715 | |
716 | err = uv__set_reuse(sock); |
717 | if (err) |
718 | return err; |
719 | |
720 | handle->io_watcher.fd = sock; |
721 | if (uv__udp_is_connected(handle)) |
722 | handle->flags |= UV_HANDLE_UDP_CONNECTED; |
723 | |
724 | return 0; |
725 | } |
726 | |
727 | |
728 | int uv_udp_set_membership(uv_udp_t* handle, |
729 | const char* multicast_addr, |
730 | const char* interface_addr, |
731 | uv_membership membership) { |
732 | int err; |
733 | struct sockaddr_in addr4; |
734 | struct sockaddr_in6 addr6; |
735 | |
736 | if (uv_ip4_addr(multicast_addr, 0, &addr4) == 0) { |
737 | err = uv__udp_maybe_deferred_bind(handle, AF_INET, UV_UDP_REUSEADDR); |
738 | if (err) |
739 | return err; |
740 | return uv__udp_set_membership4(handle, &addr4, interface_addr, membership); |
741 | } else if (uv_ip6_addr(multicast_addr, 0, &addr6) == 0) { |
742 | err = uv__udp_maybe_deferred_bind(handle, AF_INET6, UV_UDP_REUSEADDR); |
743 | if (err) |
744 | return err; |
745 | return uv__udp_set_membership6(handle, &addr6, interface_addr, membership); |
746 | } else { |
747 | return UV_EINVAL; |
748 | } |
749 | } |
750 | |
751 | static int uv__setsockopt(uv_udp_t* handle, |
752 | int option4, |
753 | int option6, |
754 | const void* val, |
755 | size_t size) { |
756 | int r; |
757 | |
758 | if (handle->flags & UV_HANDLE_IPV6) |
759 | r = setsockopt(handle->io_watcher.fd, |
760 | IPPROTO_IPV6, |
761 | option6, |
762 | val, |
763 | size); |
764 | else |
765 | r = setsockopt(handle->io_watcher.fd, |
766 | IPPROTO_IP, |
767 | option4, |
768 | val, |
769 | size); |
770 | if (r) |
771 | return UV__ERR(errno); |
772 | |
773 | return 0; |
774 | } |
775 | |
776 | static int uv__setsockopt_maybe_char(uv_udp_t* handle, |
777 | int option4, |
778 | int option6, |
779 | int val) { |
780 | #if defined(__sun) || defined(_AIX) || defined(__MVS__) |
781 | char arg = val; |
782 | #elif defined(__OpenBSD__) |
783 | unsigned char arg = val; |
784 | #else |
785 | int arg = val; |
786 | #endif |
787 | |
788 | if (val < 0 || val > 255) |
789 | return UV_EINVAL; |
790 | |
791 | return uv__setsockopt(handle, option4, option6, &arg, sizeof(arg)); |
792 | } |
793 | |
794 | |
795 | int uv_udp_set_broadcast(uv_udp_t* handle, int on) { |
796 | if (setsockopt(handle->io_watcher.fd, |
797 | SOL_SOCKET, |
798 | SO_BROADCAST, |
799 | &on, |
800 | sizeof(on))) { |
801 | return UV__ERR(errno); |
802 | } |
803 | |
804 | return 0; |
805 | } |
806 | |
807 | |
808 | int uv_udp_set_ttl(uv_udp_t* handle, int ttl) { |
809 | if (ttl < 1 || ttl > 255) |
810 | return UV_EINVAL; |
811 | |
812 | #if defined(__MVS__) |
813 | if (!(handle->flags & UV_HANDLE_IPV6)) |
814 | return UV_ENOTSUP; /* zOS does not support setting ttl for IPv4 */ |
815 | #endif |
816 | |
817 | /* |
818 | * On Solaris and derivatives such as SmartOS, the length of socket options |
819 | * is sizeof(int) for IP_TTL and IPV6_UNICAST_HOPS, |
820 | * so hardcode the size of these options on this platform, |
821 | * and use the general uv__setsockopt_maybe_char call on other platforms. |
822 | */ |
823 | #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \ |
824 | defined(__MVS__) |
825 | |
826 | return uv__setsockopt(handle, |
827 | IP_TTL, |
828 | IPV6_UNICAST_HOPS, |
829 | &ttl, |
830 | sizeof(ttl)); |
831 | |
832 | #else /* !(defined(__sun) || defined(_AIX) || defined (__OpenBSD__) || |
833 | defined(__MVS__)) */ |
834 | |
835 | return uv__setsockopt_maybe_char(handle, |
836 | IP_TTL, |
837 | IPV6_UNICAST_HOPS, |
838 | ttl); |
839 | |
840 | #endif /* defined(__sun) || defined(_AIX) || defined (__OpenBSD__) || |
841 | defined(__MVS__) */ |
842 | } |
843 | |
844 | |
845 | int uv_udp_set_multicast_ttl(uv_udp_t* handle, int ttl) { |
846 | /* |
847 | * On Solaris and derivatives such as SmartOS, the length of socket options |
848 | * is sizeof(int) for IPV6_MULTICAST_HOPS and sizeof(char) for |
849 | * IP_MULTICAST_TTL, so hardcode the size of the option in the IPv6 case, |
850 | * and use the general uv__setsockopt_maybe_char call otherwise. |
851 | */ |
852 | #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \ |
853 | defined(__MVS__) |
854 | if (handle->flags & UV_HANDLE_IPV6) |
855 | return uv__setsockopt(handle, |
856 | IP_MULTICAST_TTL, |
857 | IPV6_MULTICAST_HOPS, |
858 | &ttl, |
859 | sizeof(ttl)); |
860 | #endif /* defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \ |
861 | defined(__MVS__) */ |
862 | |
863 | return uv__setsockopt_maybe_char(handle, |
864 | IP_MULTICAST_TTL, |
865 | IPV6_MULTICAST_HOPS, |
866 | ttl); |
867 | } |
868 | |
869 | |
870 | int uv_udp_set_multicast_loop(uv_udp_t* handle, int on) { |
871 | /* |
872 | * On Solaris and derivatives such as SmartOS, the length of socket options |
873 | * is sizeof(int) for IPV6_MULTICAST_LOOP and sizeof(char) for |
874 | * IP_MULTICAST_LOOP, so hardcode the size of the option in the IPv6 case, |
875 | * and use the general uv__setsockopt_maybe_char call otherwise. |
876 | */ |
877 | #if defined(__sun) || defined(_AIX) || defined(__OpenBSD__) || \ |
878 | defined(__MVS__) |
879 | if (handle->flags & UV_HANDLE_IPV6) |
880 | return uv__setsockopt(handle, |
881 | IP_MULTICAST_LOOP, |
882 | IPV6_MULTICAST_LOOP, |
883 | &on, |
884 | sizeof(on)); |
885 | #endif /* defined(__sun) || defined(_AIX) ||defined(__OpenBSD__) || |
886 | defined(__MVS__) */ |
887 | |
888 | return uv__setsockopt_maybe_char(handle, |
889 | IP_MULTICAST_LOOP, |
890 | IPV6_MULTICAST_LOOP, |
891 | on); |
892 | } |
893 | |
894 | int uv_udp_set_multicast_interface(uv_udp_t* handle, const char* interface_addr) { |
895 | struct sockaddr_storage addr_st; |
896 | struct sockaddr_in* addr4; |
897 | struct sockaddr_in6* addr6; |
898 | |
899 | addr4 = (struct sockaddr_in*) &addr_st; |
900 | addr6 = (struct sockaddr_in6*) &addr_st; |
901 | |
902 | if (!interface_addr) { |
903 | memset(&addr_st, 0, sizeof addr_st); |
904 | if (handle->flags & UV_HANDLE_IPV6) { |
905 | addr_st.ss_family = AF_INET6; |
906 | addr6->sin6_scope_id = 0; |
907 | } else { |
908 | addr_st.ss_family = AF_INET; |
909 | addr4->sin_addr.s_addr = htonl(INADDR_ANY); |
910 | } |
911 | } else if (uv_ip4_addr(interface_addr, 0, addr4) == 0) { |
912 | /* nothing, address was parsed */ |
913 | } else if (uv_ip6_addr(interface_addr, 0, addr6) == 0) { |
914 | /* nothing, address was parsed */ |
915 | } else { |
916 | return UV_EINVAL; |
917 | } |
918 | |
919 | if (addr_st.ss_family == AF_INET) { |
920 | if (setsockopt(handle->io_watcher.fd, |
921 | IPPROTO_IP, |
922 | IP_MULTICAST_IF, |
923 | (void*) &addr4->sin_addr, |
924 | sizeof(addr4->sin_addr)) == -1) { |
925 | return UV__ERR(errno); |
926 | } |
927 | } else if (addr_st.ss_family == AF_INET6) { |
928 | if (setsockopt(handle->io_watcher.fd, |
929 | IPPROTO_IPV6, |
930 | IPV6_MULTICAST_IF, |
931 | &addr6->sin6_scope_id, |
932 | sizeof(addr6->sin6_scope_id)) == -1) { |
933 | return UV__ERR(errno); |
934 | } |
935 | } else { |
936 | assert(0 && "unexpected address family" ); |
937 | abort(); |
938 | } |
939 | |
940 | return 0; |
941 | } |
942 | |
943 | int uv_udp_getpeername(const uv_udp_t* handle, |
944 | struct sockaddr* name, |
945 | int* namelen) { |
946 | |
947 | return uv__getsockpeername((const uv_handle_t*) handle, |
948 | getpeername, |
949 | name, |
950 | namelen); |
951 | } |
952 | |
953 | int uv_udp_getsockname(const uv_udp_t* handle, |
954 | struct sockaddr* name, |
955 | int* namelen) { |
956 | |
957 | return uv__getsockpeername((const uv_handle_t*) handle, |
958 | getsockname, |
959 | name, |
960 | namelen); |
961 | } |
962 | |
963 | |
964 | int uv__udp_recv_start(uv_udp_t* handle, |
965 | uv_alloc_cb alloc_cb, |
966 | uv_udp_recv_cb recv_cb) { |
967 | int err; |
968 | |
969 | if (alloc_cb == NULL || recv_cb == NULL) |
970 | return UV_EINVAL; |
971 | |
972 | if (uv__io_active(&handle->io_watcher, POLLIN)) |
973 | return UV_EALREADY; /* FIXME(bnoordhuis) Should be UV_EBUSY. */ |
974 | |
975 | err = uv__udp_maybe_deferred_bind(handle, AF_INET, 0); |
976 | if (err) |
977 | return err; |
978 | |
979 | handle->alloc_cb = alloc_cb; |
980 | handle->recv_cb = recv_cb; |
981 | |
982 | uv__io_start(handle->loop, &handle->io_watcher, POLLIN); |
983 | uv__handle_start(handle); |
984 | |
985 | return 0; |
986 | } |
987 | |
988 | |
989 | int uv__udp_recv_stop(uv_udp_t* handle) { |
990 | uv__io_stop(handle->loop, &handle->io_watcher, POLLIN); |
991 | |
992 | if (!uv__io_active(&handle->io_watcher, POLLOUT)) |
993 | uv__handle_stop(handle); |
994 | |
995 | handle->alloc_cb = NULL; |
996 | handle->recv_cb = NULL; |
997 | |
998 | return 0; |
999 | } |
1000 | |