1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22/* Caveat emptor: this file deviates from the libuv convention of returning
23 * negated errno codes. Most uv_fs_*() functions map directly to the system
24 * call of the same name. For more complex wrappers, it's easier to just
25 * return -1 with errno set. The dispatcher in uv__fs_work() takes care of
26 * getting the errno to the right place (req->result or as the return value.)
27 */
28
29#include "uv.h"
30#include "internal.h"
31
32#include <errno.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <string.h>
36#include <limits.h> /* PATH_MAX */
37
38#include <sys/types.h>
39#include <sys/socket.h>
40#include <sys/stat.h>
41#include <sys/time.h>
42#include <sys/uio.h>
43#include <pthread.h>
44#include <unistd.h>
45#include <fcntl.h>
46#include <poll.h>
47
48#if defined(__DragonFly__) || \
49 defined(__FreeBSD__) || \
50 defined(__FreeBSD_kernel__) || \
51 defined(__OpenBSD__) || \
52 defined(__NetBSD__)
53# define HAVE_PREADV 1
54#else
55# define HAVE_PREADV 0
56#endif
57
58#if defined(__linux__) || defined(__sun)
59# include <sys/sendfile.h>
60#endif
61
62#if defined(__APPLE__)
63# include <sys/sysctl.h>
64#elif defined(__linux__) && !defined(FICLONE)
65# include <sys/ioctl.h>
66# define FICLONE _IOW(0x94, 9, int)
67#endif
68
69#if defined(_AIX) && !defined(_AIX71)
70# include <utime.h>
71#endif
72
73#if defined(__APPLE__) || \
74 defined(__DragonFly__) || \
75 defined(__FreeBSD__) || \
76 defined(__FreeBSD_kernel__) || \
77 defined(__OpenBSD__) || \
78 defined(__NetBSD__)
79# include <sys/param.h>
80# include <sys/mount.h>
81#elif defined(__sun) || defined(__MVS__)
82# include <sys/statvfs.h>
83#else
84# include <sys/statfs.h>
85#endif
86
87#if defined(_AIX) && _XOPEN_SOURCE <= 600
88extern char *mkdtemp(char *template); /* See issue #740 on AIX < 7 */
89#endif
90
91#define INIT(subtype) \
92 do { \
93 if (req == NULL) \
94 return UV_EINVAL; \
95 UV_REQ_INIT(req, UV_FS); \
96 req->fs_type = UV_FS_ ## subtype; \
97 req->result = 0; \
98 req->ptr = NULL; \
99 req->loop = loop; \
100 req->path = NULL; \
101 req->new_path = NULL; \
102 req->bufs = NULL; \
103 req->cb = cb; \
104 } \
105 while (0)
106
107#define PATH \
108 do { \
109 assert(path != NULL); \
110 if (cb == NULL) { \
111 req->path = path; \
112 } else { \
113 req->path = uv__strdup(path); \
114 if (req->path == NULL) \
115 return UV_ENOMEM; \
116 } \
117 } \
118 while (0)
119
120#define PATH2 \
121 do { \
122 if (cb == NULL) { \
123 req->path = path; \
124 req->new_path = new_path; \
125 } else { \
126 size_t path_len; \
127 size_t new_path_len; \
128 path_len = strlen(path) + 1; \
129 new_path_len = strlen(new_path) + 1; \
130 req->path = uv__malloc(path_len + new_path_len); \
131 if (req->path == NULL) \
132 return UV_ENOMEM; \
133 req->new_path = req->path + path_len; \
134 memcpy((void*) req->path, path, path_len); \
135 memcpy((void*) req->new_path, new_path, new_path_len); \
136 } \
137 } \
138 while (0)
139
140#define POST \
141 do { \
142 if (cb != NULL) { \
143 uv__req_register(loop, req); \
144 uv__work_submit(loop, \
145 &req->work_req, \
146 UV__WORK_FAST_IO, \
147 uv__fs_work, \
148 uv__fs_done); \
149 return 0; \
150 } \
151 else { \
152 uv__fs_work(&req->work_req); \
153 return req->result; \
154 } \
155 } \
156 while (0)
157
158
159static int uv__fs_close(int fd) {
160 int rc;
161
162 rc = uv__close_nocancel(fd);
163 if (rc == -1)
164 if (errno == EINTR || errno == EINPROGRESS)
165 rc = 0; /* The close is in progress, not an error. */
166
167 return rc;
168}
169
170
171static ssize_t uv__fs_fsync(uv_fs_t* req) {
172#if defined(__APPLE__)
173 /* Apple's fdatasync and fsync explicitly do NOT flush the drive write cache
174 * to the drive platters. This is in contrast to Linux's fdatasync and fsync
175 * which do, according to recent man pages. F_FULLFSYNC is Apple's equivalent
176 * for flushing buffered data to permanent storage. If F_FULLFSYNC is not
177 * supported by the file system we fall back to F_BARRIERFSYNC or fsync().
178 * This is the same approach taken by sqlite, except sqlite does not issue
179 * an F_BARRIERFSYNC call.
180 */
181 int r;
182
183 r = fcntl(req->file, F_FULLFSYNC);
184 if (r != 0)
185 r = fcntl(req->file, 85 /* F_BARRIERFSYNC */); /* fsync + barrier */
186 if (r != 0)
187 r = fsync(req->file);
188 return r;
189#else
190 return fsync(req->file);
191#endif
192}
193
194
195static ssize_t uv__fs_fdatasync(uv_fs_t* req) {
196#if defined(__linux__) || defined(__sun) || defined(__NetBSD__)
197 return fdatasync(req->file);
198#elif defined(__APPLE__)
199 /* See the comment in uv__fs_fsync. */
200 return uv__fs_fsync(req);
201#else
202 return fsync(req->file);
203#endif
204}
205
206
207static ssize_t uv__fs_futime(uv_fs_t* req) {
208#if defined(__linux__) \
209 || defined(_AIX71) \
210 || defined(__HAIKU__)
211 /* utimesat() has nanosecond resolution but we stick to microseconds
212 * for the sake of consistency with other platforms.
213 */
214 struct timespec ts[2];
215 ts[0].tv_sec = req->atime;
216 ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
217 ts[1].tv_sec = req->mtime;
218 ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
219 return futimens(req->file, ts);
220#elif defined(__APPLE__) \
221 || defined(__DragonFly__) \
222 || defined(__FreeBSD__) \
223 || defined(__FreeBSD_kernel__) \
224 || defined(__NetBSD__) \
225 || defined(__OpenBSD__) \
226 || defined(__sun)
227 struct timeval tv[2];
228 tv[0].tv_sec = req->atime;
229 tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
230 tv[1].tv_sec = req->mtime;
231 tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
232# if defined(__sun)
233 return futimesat(req->file, NULL, tv);
234# else
235 return futimes(req->file, tv);
236# endif
237#elif defined(__MVS__)
238 attrib_t atr;
239 memset(&atr, 0, sizeof(atr));
240 atr.att_mtimechg = 1;
241 atr.att_atimechg = 1;
242 atr.att_mtime = req->mtime;
243 atr.att_atime = req->atime;
244 return __fchattr(req->file, &atr, sizeof(atr));
245#else
246 errno = ENOSYS;
247 return -1;
248#endif
249}
250
251
252static ssize_t uv__fs_mkdtemp(uv_fs_t* req) {
253 return mkdtemp((char*) req->path) ? 0 : -1;
254}
255
256
257static ssize_t uv__fs_open(uv_fs_t* req) {
258 static int no_cloexec_support;
259 int r;
260
261 /* Try O_CLOEXEC before entering locks */
262 if (no_cloexec_support == 0) {
263#ifdef O_CLOEXEC
264 r = open(req->path, req->flags | O_CLOEXEC, req->mode);
265 if (r >= 0)
266 return r;
267 if (errno != EINVAL)
268 return r;
269 no_cloexec_support = 1;
270#endif /* O_CLOEXEC */
271 }
272
273 if (req->cb != NULL)
274 uv_rwlock_rdlock(&req->loop->cloexec_lock);
275
276 r = open(req->path, req->flags, req->mode);
277
278 /* In case of failure `uv__cloexec` will leave error in `errno`,
279 * so it is enough to just set `r` to `-1`.
280 */
281 if (r >= 0 && uv__cloexec(r, 1) != 0) {
282 r = uv__close(r);
283 if (r != 0)
284 abort();
285 r = -1;
286 }
287
288 if (req->cb != NULL)
289 uv_rwlock_rdunlock(&req->loop->cloexec_lock);
290
291 return r;
292}
293
294
295#if !HAVE_PREADV
296static ssize_t uv__fs_preadv(uv_file fd,
297 uv_buf_t* bufs,
298 unsigned int nbufs,
299 off_t off) {
300 uv_buf_t* buf;
301 uv_buf_t* end;
302 ssize_t result;
303 ssize_t rc;
304 size_t pos;
305
306 assert(nbufs > 0);
307
308 result = 0;
309 pos = 0;
310 buf = bufs + 0;
311 end = bufs + nbufs;
312
313 for (;;) {
314 do
315 rc = pread(fd, buf->base + pos, buf->len - pos, off + result);
316 while (rc == -1 && errno == EINTR);
317
318 if (rc == 0)
319 break;
320
321 if (rc == -1 && result == 0)
322 return UV__ERR(errno);
323
324 if (rc == -1)
325 break; /* We read some data so return that, ignore the error. */
326
327 pos += rc;
328 result += rc;
329
330 if (pos < buf->len)
331 continue;
332
333 pos = 0;
334 buf += 1;
335
336 if (buf == end)
337 break;
338 }
339
340 return result;
341}
342#endif
343
344
345static ssize_t uv__fs_read(uv_fs_t* req) {
346#if defined(__linux__)
347 static int no_preadv;
348#endif
349 unsigned int iovmax;
350 ssize_t result;
351
352 iovmax = uv__getiovmax();
353 if (req->nbufs > iovmax)
354 req->nbufs = iovmax;
355
356 if (req->off < 0) {
357 if (req->nbufs == 1)
358 result = read(req->file, req->bufs[0].base, req->bufs[0].len);
359 else
360 result = readv(req->file, (struct iovec*) req->bufs, req->nbufs);
361 } else {
362 if (req->nbufs == 1) {
363 result = pread(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
364 goto done;
365 }
366
367#if HAVE_PREADV
368 result = preadv(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
369#else
370# if defined(__linux__)
371 if (no_preadv) retry:
372# endif
373 {
374 result = uv__fs_preadv(req->file, req->bufs, req->nbufs, req->off);
375 }
376# if defined(__linux__)
377 else {
378 result = uv__preadv(req->file,
379 (struct iovec*)req->bufs,
380 req->nbufs,
381 req->off);
382 if (result == -1 && errno == ENOSYS) {
383 no_preadv = 1;
384 goto retry;
385 }
386 }
387# endif
388#endif
389 }
390
391done:
392 /* Early cleanup of bufs allocation, since we're done with it. */
393 if (req->bufs != req->bufsml)
394 uv__free(req->bufs);
395
396 req->bufs = NULL;
397 req->nbufs = 0;
398
399#ifdef __PASE__
400 /* PASE returns EOPNOTSUPP when reading a directory, convert to EISDIR */
401 if (result == -1 && errno == EOPNOTSUPP) {
402 struct stat buf;
403 ssize_t rc;
404 rc = fstat(req->file, &buf);
405 if (rc == 0 && S_ISDIR(buf.st_mode)) {
406 errno = EISDIR;
407 }
408 }
409#endif
410
411 return result;
412}
413
414
415#if defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_8)
416#define UV_CONST_DIRENT uv__dirent_t
417#else
418#define UV_CONST_DIRENT const uv__dirent_t
419#endif
420
421
422static int uv__fs_scandir_filter(UV_CONST_DIRENT* dent) {
423 return strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0;
424}
425
426
427static int uv__fs_scandir_sort(UV_CONST_DIRENT** a, UV_CONST_DIRENT** b) {
428 return strcmp((*a)->d_name, (*b)->d_name);
429}
430
431
432static ssize_t uv__fs_scandir(uv_fs_t* req) {
433 uv__dirent_t** dents;
434 int n;
435
436 dents = NULL;
437 n = scandir(req->path, &dents, uv__fs_scandir_filter, uv__fs_scandir_sort);
438
439 /* NOTE: We will use nbufs as an index field */
440 req->nbufs = 0;
441
442 if (n == 0) {
443 /* OS X still needs to deallocate some memory.
444 * Memory was allocated using the system allocator, so use free() here.
445 */
446 free(dents);
447 dents = NULL;
448 } else if (n == -1) {
449 return n;
450 }
451
452 req->ptr = dents;
453
454 return n;
455}
456
457static int uv__fs_opendir(uv_fs_t* req) {
458 uv_dir_t* dir;
459
460 dir = uv__malloc(sizeof(*dir));
461 if (dir == NULL)
462 goto error;
463
464 dir->dir = opendir(req->path);
465 if (dir->dir == NULL)
466 goto error;
467
468 req->ptr = dir;
469 return 0;
470
471error:
472 uv__free(dir);
473 req->ptr = NULL;
474 return -1;
475}
476
477static int uv__fs_readdir(uv_fs_t* req) {
478 uv_dir_t* dir;
479 uv_dirent_t* dirent;
480 struct dirent* res;
481 unsigned int dirent_idx;
482 unsigned int i;
483
484 dir = req->ptr;
485 dirent_idx = 0;
486
487 while (dirent_idx < dir->nentries) {
488 /* readdir() returns NULL on end of directory, as well as on error. errno
489 is used to differentiate between the two conditions. */
490 errno = 0;
491 res = readdir(dir->dir);
492
493 if (res == NULL) {
494 if (errno != 0)
495 goto error;
496 break;
497 }
498
499 if (strcmp(res->d_name, ".") == 0 || strcmp(res->d_name, "..") == 0)
500 continue;
501
502 dirent = &dir->dirents[dirent_idx];
503 dirent->name = uv__strdup(res->d_name);
504
505 if (dirent->name == NULL)
506 goto error;
507
508 dirent->type = uv__fs_get_dirent_type(res);
509 ++dirent_idx;
510 }
511
512 return dirent_idx;
513
514error:
515 for (i = 0; i < dirent_idx; ++i) {
516 uv__free((char*) dir->dirents[i].name);
517 dir->dirents[i].name = NULL;
518 }
519
520 return -1;
521}
522
523static int uv__fs_closedir(uv_fs_t* req) {
524 uv_dir_t* dir;
525
526 dir = req->ptr;
527
528 if (dir->dir != NULL) {
529 closedir(dir->dir);
530 dir->dir = NULL;
531 }
532
533 uv__free(req->ptr);
534 req->ptr = NULL;
535 return 0;
536}
537
538static int uv__fs_statfs(uv_fs_t* req) {
539 uv_statfs_t* stat_fs;
540#if defined(__sun) || defined(__MVS__)
541 struct statvfs buf;
542
543 if (0 != statvfs(req->path, &buf))
544#else
545 struct statfs buf;
546
547 if (0 != statfs(req->path, &buf))
548#endif /* defined(__sun) */
549 return -1;
550
551 stat_fs = uv__malloc(sizeof(*stat_fs));
552 if (stat_fs == NULL) {
553 errno = ENOMEM;
554 return -1;
555 }
556
557#if defined(__sun) || defined(__MVS__)
558 stat_fs->f_type = 0; /* f_type is not supported. */
559#else
560 stat_fs->f_type = buf.f_type;
561#endif
562 stat_fs->f_bsize = buf.f_bsize;
563 stat_fs->f_blocks = buf.f_blocks;
564 stat_fs->f_bfree = buf.f_bfree;
565 stat_fs->f_bavail = buf.f_bavail;
566 stat_fs->f_files = buf.f_files;
567 stat_fs->f_ffree = buf.f_ffree;
568 req->ptr = stat_fs;
569 return 0;
570}
571
572static ssize_t uv__fs_pathmax_size(const char* path) {
573 ssize_t pathmax;
574
575 pathmax = pathconf(path, _PC_PATH_MAX);
576
577 if (pathmax == -1)
578 pathmax = UV__PATH_MAX;
579
580 return pathmax;
581}
582
583static ssize_t uv__fs_readlink(uv_fs_t* req) {
584 ssize_t maxlen;
585 ssize_t len;
586 char* buf;
587 char* newbuf;
588
589#if defined(_POSIX_PATH_MAX) || defined(PATH_MAX)
590 maxlen = uv__fs_pathmax_size(req->path);
591#else
592 /* We may not have a real PATH_MAX. Read size of link. */
593 struct stat st;
594 int ret;
595 ret = lstat(req->path, &st);
596 if (ret != 0)
597 return -1;
598 if (!S_ISLNK(st.st_mode)) {
599 errno = EINVAL;
600 return -1;
601 }
602
603 maxlen = st.st_size;
604
605 /* According to readlink(2) lstat can report st_size == 0
606 for some symlinks, such as those in /proc or /sys. */
607 if (maxlen == 0)
608 maxlen = uv__fs_pathmax_size(req->path);
609#endif
610
611 buf = uv__malloc(maxlen);
612
613 if (buf == NULL) {
614 errno = ENOMEM;
615 return -1;
616 }
617
618#if defined(__MVS__)
619 len = os390_readlink(req->path, buf, maxlen);
620#else
621 len = readlink(req->path, buf, maxlen);
622#endif
623
624 if (len == -1) {
625 uv__free(buf);
626 return -1;
627 }
628
629 /* Uncommon case: resize to make room for the trailing nul byte. */
630 if (len == maxlen) {
631 newbuf = uv__realloc(buf, len + 1);
632
633 if (newbuf == NULL) {
634 uv__free(buf);
635 return -1;
636 }
637
638 buf = newbuf;
639 }
640
641 buf[len] = '\0';
642 req->ptr = buf;
643
644 return 0;
645}
646
647static ssize_t uv__fs_realpath(uv_fs_t* req) {
648 char* buf;
649
650#if defined(_POSIX_VERSION) && _POSIX_VERSION >= 200809L
651 buf = realpath(req->path, NULL);
652 if (buf == NULL)
653 return -1;
654#else
655 ssize_t len;
656
657 len = uv__fs_pathmax_size(req->path);
658 buf = uv__malloc(len + 1);
659
660 if (buf == NULL) {
661 errno = ENOMEM;
662 return -1;
663 }
664
665 if (realpath(req->path, buf) == NULL) {
666 uv__free(buf);
667 return -1;
668 }
669#endif
670
671 req->ptr = buf;
672
673 return 0;
674}
675
676static ssize_t uv__fs_sendfile_emul(uv_fs_t* req) {
677 struct pollfd pfd;
678 int use_pread;
679 off_t offset;
680 ssize_t nsent;
681 ssize_t nread;
682 ssize_t nwritten;
683 size_t buflen;
684 size_t len;
685 ssize_t n;
686 int in_fd;
687 int out_fd;
688 char buf[8192];
689
690 len = req->bufsml[0].len;
691 in_fd = req->flags;
692 out_fd = req->file;
693 offset = req->off;
694 use_pread = 1;
695
696 /* Here are the rules regarding errors:
697 *
698 * 1. Read errors are reported only if nsent==0, otherwise we return nsent.
699 * The user needs to know that some data has already been sent, to stop
700 * them from sending it twice.
701 *
702 * 2. Write errors are always reported. Write errors are bad because they
703 * mean data loss: we've read data but now we can't write it out.
704 *
705 * We try to use pread() and fall back to regular read() if the source fd
706 * doesn't support positional reads, for example when it's a pipe fd.
707 *
708 * If we get EAGAIN when writing to the target fd, we poll() on it until
709 * it becomes writable again.
710 *
711 * FIXME: If we get a write error when use_pread==1, it should be safe to
712 * return the number of sent bytes instead of an error because pread()
713 * is, in theory, idempotent. However, special files in /dev or /proc
714 * may support pread() but not necessarily return the same data on
715 * successive reads.
716 *
717 * FIXME: There is no way now to signal that we managed to send *some* data
718 * before a write error.
719 */
720 for (nsent = 0; (size_t) nsent < len; ) {
721 buflen = len - nsent;
722
723 if (buflen > sizeof(buf))
724 buflen = sizeof(buf);
725
726 do
727 if (use_pread)
728 nread = pread(in_fd, buf, buflen, offset);
729 else
730 nread = read(in_fd, buf, buflen);
731 while (nread == -1 && errno == EINTR);
732
733 if (nread == 0)
734 goto out;
735
736 if (nread == -1) {
737 if (use_pread && nsent == 0 && (errno == EIO || errno == ESPIPE)) {
738 use_pread = 0;
739 continue;
740 }
741
742 if (nsent == 0)
743 nsent = -1;
744
745 goto out;
746 }
747
748 for (nwritten = 0; nwritten < nread; ) {
749 do
750 n = write(out_fd, buf + nwritten, nread - nwritten);
751 while (n == -1 && errno == EINTR);
752
753 if (n != -1) {
754 nwritten += n;
755 continue;
756 }
757
758 if (errno != EAGAIN && errno != EWOULDBLOCK) {
759 nsent = -1;
760 goto out;
761 }
762
763 pfd.fd = out_fd;
764 pfd.events = POLLOUT;
765 pfd.revents = 0;
766
767 do
768 n = poll(&pfd, 1, -1);
769 while (n == -1 && errno == EINTR);
770
771 if (n == -1 || (pfd.revents & ~POLLOUT) != 0) {
772 errno = EIO;
773 nsent = -1;
774 goto out;
775 }
776 }
777
778 offset += nread;
779 nsent += nread;
780 }
781
782out:
783 if (nsent != -1)
784 req->off = offset;
785
786 return nsent;
787}
788
789
790static ssize_t uv__fs_sendfile(uv_fs_t* req) {
791 int in_fd;
792 int out_fd;
793
794 in_fd = req->flags;
795 out_fd = req->file;
796
797#if defined(__linux__) || defined(__sun)
798 {
799 off_t off;
800 ssize_t r;
801
802 off = req->off;
803 r = sendfile(out_fd, in_fd, &off, req->bufsml[0].len);
804
805 /* sendfile() on SunOS returns EINVAL if the target fd is not a socket but
806 * it still writes out data. Fortunately, we can detect it by checking if
807 * the offset has been updated.
808 */
809 if (r != -1 || off > req->off) {
810 r = off - req->off;
811 req->off = off;
812 return r;
813 }
814
815 if (errno == EINVAL ||
816 errno == EIO ||
817 errno == ENOTSOCK ||
818 errno == EXDEV) {
819 errno = 0;
820 return uv__fs_sendfile_emul(req);
821 }
822
823 return -1;
824 }
825#elif defined(__APPLE__) || \
826 defined(__DragonFly__) || \
827 defined(__FreeBSD__) || \
828 defined(__FreeBSD_kernel__)
829 {
830 off_t len;
831 ssize_t r;
832
833 /* sendfile() on FreeBSD and Darwin returns EAGAIN if the target fd is in
834 * non-blocking mode and not all data could be written. If a non-zero
835 * number of bytes have been sent, we don't consider it an error.
836 */
837
838#if defined(__FreeBSD__) || defined(__DragonFly__)
839 len = 0;
840 r = sendfile(in_fd, out_fd, req->off, req->bufsml[0].len, NULL, &len, 0);
841#elif defined(__FreeBSD_kernel__)
842 len = 0;
843 r = bsd_sendfile(in_fd,
844 out_fd,
845 req->off,
846 req->bufsml[0].len,
847 NULL,
848 &len,
849 0);
850#else
851 /* The darwin sendfile takes len as an input for the length to send,
852 * so make sure to initialize it with the caller's value. */
853 len = req->bufsml[0].len;
854 r = sendfile(in_fd, out_fd, req->off, &len, NULL, 0);
855#endif
856
857 /*
858 * The man page for sendfile(2) on DragonFly states that `len` contains
859 * a meaningful value ONLY in case of EAGAIN and EINTR.
860 * Nothing is said about it's value in case of other errors, so better
861 * not depend on the potential wrong assumption that is was not modified
862 * by the syscall.
863 */
864 if (r == 0 || ((errno == EAGAIN || errno == EINTR) && len != 0)) {
865 req->off += len;
866 return (ssize_t) len;
867 }
868
869 if (errno == EINVAL ||
870 errno == EIO ||
871 errno == ENOTSOCK ||
872 errno == EXDEV) {
873 errno = 0;
874 return uv__fs_sendfile_emul(req);
875 }
876
877 return -1;
878 }
879#else
880 /* Squelch compiler warnings. */
881 (void) &in_fd;
882 (void) &out_fd;
883
884 return uv__fs_sendfile_emul(req);
885#endif
886}
887
888
889static ssize_t uv__fs_utime(uv_fs_t* req) {
890#if defined(__linux__) \
891 || defined(_AIX71) \
892 || defined(__sun) \
893 || defined(__HAIKU__)
894 /* utimesat() has nanosecond resolution but we stick to microseconds
895 * for the sake of consistency with other platforms.
896 */
897 struct timespec ts[2];
898 ts[0].tv_sec = req->atime;
899 ts[0].tv_nsec = (uint64_t)(req->atime * 1000000) % 1000000 * 1000;
900 ts[1].tv_sec = req->mtime;
901 ts[1].tv_nsec = (uint64_t)(req->mtime * 1000000) % 1000000 * 1000;
902 return utimensat(AT_FDCWD, req->path, ts, 0);
903#elif defined(__APPLE__) \
904 || defined(__DragonFly__) \
905 || defined(__FreeBSD__) \
906 || defined(__FreeBSD_kernel__) \
907 || defined(__NetBSD__) \
908 || defined(__OpenBSD__)
909 struct timeval tv[2];
910 tv[0].tv_sec = req->atime;
911 tv[0].tv_usec = (uint64_t)(req->atime * 1000000) % 1000000;
912 tv[1].tv_sec = req->mtime;
913 tv[1].tv_usec = (uint64_t)(req->mtime * 1000000) % 1000000;
914 return utimes(req->path, tv);
915#elif defined(_AIX) \
916 && !defined(_AIX71)
917 struct utimbuf buf;
918 buf.actime = req->atime;
919 buf.modtime = req->mtime;
920 return utime(req->path, &buf);
921#elif defined(__MVS__)
922 attrib_t atr;
923 memset(&atr, 0, sizeof(atr));
924 atr.att_mtimechg = 1;
925 atr.att_atimechg = 1;
926 atr.att_mtime = req->mtime;
927 atr.att_atime = req->atime;
928 return __lchattr((char*) req->path, &atr, sizeof(atr));
929#else
930 errno = ENOSYS;
931 return -1;
932#endif
933}
934
935
936static ssize_t uv__fs_write(uv_fs_t* req) {
937#if defined(__linux__)
938 static int no_pwritev;
939#endif
940 ssize_t r;
941
942 /* Serialize writes on OS X, concurrent write() and pwrite() calls result in
943 * data loss. We can't use a per-file descriptor lock, the descriptor may be
944 * a dup().
945 */
946#if defined(__APPLE__)
947 static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
948
949 if (pthread_mutex_lock(&lock))
950 abort();
951#endif
952
953 if (req->off < 0) {
954 if (req->nbufs == 1)
955 r = write(req->file, req->bufs[0].base, req->bufs[0].len);
956 else
957 r = writev(req->file, (struct iovec*) req->bufs, req->nbufs);
958 } else {
959 if (req->nbufs == 1) {
960 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
961 goto done;
962 }
963#if HAVE_PREADV
964 r = pwritev(req->file, (struct iovec*) req->bufs, req->nbufs, req->off);
965#else
966# if defined(__linux__)
967 if (no_pwritev) retry:
968# endif
969 {
970 r = pwrite(req->file, req->bufs[0].base, req->bufs[0].len, req->off);
971 }
972# if defined(__linux__)
973 else {
974 r = uv__pwritev(req->file,
975 (struct iovec*) req->bufs,
976 req->nbufs,
977 req->off);
978 if (r == -1 && errno == ENOSYS) {
979 no_pwritev = 1;
980 goto retry;
981 }
982 }
983# endif
984#endif
985 }
986
987done:
988#if defined(__APPLE__)
989 if (pthread_mutex_unlock(&lock))
990 abort();
991#endif
992
993 return r;
994}
995
996static ssize_t uv__fs_copyfile(uv_fs_t* req) {
997 uv_fs_t fs_req;
998 uv_file srcfd;
999 uv_file dstfd;
1000 struct stat src_statsbuf;
1001 struct stat dst_statsbuf;
1002 int dst_flags;
1003 int result;
1004 int err;
1005 size_t bytes_to_send;
1006 int64_t in_offset;
1007
1008 dstfd = -1;
1009 err = 0;
1010
1011 /* Open the source file. */
1012 srcfd = uv_fs_open(NULL, &fs_req, req->path, O_RDONLY, 0, NULL);
1013 uv_fs_req_cleanup(&fs_req);
1014
1015 if (srcfd < 0)
1016 return srcfd;
1017
1018 /* Get the source file's mode. */
1019 if (fstat(srcfd, &src_statsbuf)) {
1020 err = UV__ERR(errno);
1021 goto out;
1022 }
1023
1024 dst_flags = O_WRONLY | O_CREAT | O_TRUNC;
1025
1026 if (req->flags & UV_FS_COPYFILE_EXCL)
1027 dst_flags |= O_EXCL;
1028
1029 /* Open the destination file. */
1030 dstfd = uv_fs_open(NULL,
1031 &fs_req,
1032 req->new_path,
1033 dst_flags,
1034 src_statsbuf.st_mode,
1035 NULL);
1036 uv_fs_req_cleanup(&fs_req);
1037
1038 if (dstfd < 0) {
1039 err = dstfd;
1040 goto out;
1041 }
1042
1043 /* Get the destination file's mode. */
1044 if (fstat(dstfd, &dst_statsbuf)) {
1045 err = UV__ERR(errno);
1046 goto out;
1047 }
1048
1049 /* Check if srcfd and dstfd refer to the same file */
1050 if (src_statsbuf.st_dev == dst_statsbuf.st_dev &&
1051 src_statsbuf.st_ino == dst_statsbuf.st_ino) {
1052 goto out;
1053 }
1054
1055 if (fchmod(dstfd, src_statsbuf.st_mode) == -1) {
1056 err = UV__ERR(errno);
1057 goto out;
1058 }
1059
1060#ifdef FICLONE
1061 if (req->flags & UV_FS_COPYFILE_FICLONE ||
1062 req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1063 if (ioctl(dstfd, FICLONE, srcfd) == -1) {
1064 /* If an error occurred that the sendfile fallback also won't handle, or
1065 this is a force clone then exit. Otherwise, fall through to try using
1066 sendfile(). */
1067 if (errno != ENOTTY && errno != EOPNOTSUPP && errno != EXDEV) {
1068 err = UV__ERR(errno);
1069 goto out;
1070 } else if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1071 err = UV_ENOTSUP;
1072 goto out;
1073 }
1074 } else {
1075 goto out;
1076 }
1077 }
1078#else
1079 if (req->flags & UV_FS_COPYFILE_FICLONE_FORCE) {
1080 err = UV_ENOSYS;
1081 goto out;
1082 }
1083#endif
1084
1085 bytes_to_send = src_statsbuf.st_size;
1086 in_offset = 0;
1087 while (bytes_to_send != 0) {
1088 err = uv_fs_sendfile(NULL,
1089 &fs_req,
1090 dstfd,
1091 srcfd,
1092 in_offset,
1093 bytes_to_send,
1094 NULL);
1095 uv_fs_req_cleanup(&fs_req);
1096 if (err < 0)
1097 break;
1098 bytes_to_send -= fs_req.result;
1099 in_offset += fs_req.result;
1100 }
1101
1102out:
1103 if (err < 0)
1104 result = err;
1105 else
1106 result = 0;
1107
1108 /* Close the source file. */
1109 err = uv__close_nocheckstdio(srcfd);
1110
1111 /* Don't overwrite any existing errors. */
1112 if (err != 0 && result == 0)
1113 result = err;
1114
1115 /* Close the destination file if it is open. */
1116 if (dstfd >= 0) {
1117 err = uv__close_nocheckstdio(dstfd);
1118
1119 /* Don't overwrite any existing errors. */
1120 if (err != 0 && result == 0)
1121 result = err;
1122
1123 /* Remove the destination file if something went wrong. */
1124 if (result != 0) {
1125 uv_fs_unlink(NULL, &fs_req, req->new_path, NULL);
1126 /* Ignore the unlink return value, as an error already happened. */
1127 uv_fs_req_cleanup(&fs_req);
1128 }
1129 }
1130
1131 if (result == 0)
1132 return 0;
1133
1134 errno = UV__ERR(result);
1135 return -1;
1136}
1137
1138static void uv__to_stat(struct stat* src, uv_stat_t* dst) {
1139 dst->st_dev = src->st_dev;
1140 dst->st_mode = src->st_mode;
1141 dst->st_nlink = src->st_nlink;
1142 dst->st_uid = src->st_uid;
1143 dst->st_gid = src->st_gid;
1144 dst->st_rdev = src->st_rdev;
1145 dst->st_ino = src->st_ino;
1146 dst->st_size = src->st_size;
1147 dst->st_blksize = src->st_blksize;
1148 dst->st_blocks = src->st_blocks;
1149
1150#if defined(__APPLE__)
1151 dst->st_atim.tv_sec = src->st_atimespec.tv_sec;
1152 dst->st_atim.tv_nsec = src->st_atimespec.tv_nsec;
1153 dst->st_mtim.tv_sec = src->st_mtimespec.tv_sec;
1154 dst->st_mtim.tv_nsec = src->st_mtimespec.tv_nsec;
1155 dst->st_ctim.tv_sec = src->st_ctimespec.tv_sec;
1156 dst->st_ctim.tv_nsec = src->st_ctimespec.tv_nsec;
1157 dst->st_birthtim.tv_sec = src->st_birthtimespec.tv_sec;
1158 dst->st_birthtim.tv_nsec = src->st_birthtimespec.tv_nsec;
1159 dst->st_flags = src->st_flags;
1160 dst->st_gen = src->st_gen;
1161#elif defined(__ANDROID__)
1162 dst->st_atim.tv_sec = src->st_atime;
1163 dst->st_atim.tv_nsec = src->st_atimensec;
1164 dst->st_mtim.tv_sec = src->st_mtime;
1165 dst->st_mtim.tv_nsec = src->st_mtimensec;
1166 dst->st_ctim.tv_sec = src->st_ctime;
1167 dst->st_ctim.tv_nsec = src->st_ctimensec;
1168 dst->st_birthtim.tv_sec = src->st_ctime;
1169 dst->st_birthtim.tv_nsec = src->st_ctimensec;
1170 dst->st_flags = 0;
1171 dst->st_gen = 0;
1172#elif !defined(_AIX) && ( \
1173 defined(__DragonFly__) || \
1174 defined(__FreeBSD__) || \
1175 defined(__OpenBSD__) || \
1176 defined(__NetBSD__) || \
1177 defined(_GNU_SOURCE) || \
1178 defined(_BSD_SOURCE) || \
1179 defined(_SVID_SOURCE) || \
1180 defined(_XOPEN_SOURCE) || \
1181 defined(_DEFAULT_SOURCE))
1182 dst->st_atim.tv_sec = src->st_atim.tv_sec;
1183 dst->st_atim.tv_nsec = src->st_atim.tv_nsec;
1184 dst->st_mtim.tv_sec = src->st_mtim.tv_sec;
1185 dst->st_mtim.tv_nsec = src->st_mtim.tv_nsec;
1186 dst->st_ctim.tv_sec = src->st_ctim.tv_sec;
1187 dst->st_ctim.tv_nsec = src->st_ctim.tv_nsec;
1188# if defined(__FreeBSD__) || \
1189 defined(__NetBSD__)
1190 dst->st_birthtim.tv_sec = src->st_birthtim.tv_sec;
1191 dst->st_birthtim.tv_nsec = src->st_birthtim.tv_nsec;
1192 dst->st_flags = src->st_flags;
1193 dst->st_gen = src->st_gen;
1194# else
1195 dst->st_birthtim.tv_sec = src->st_ctim.tv_sec;
1196 dst->st_birthtim.tv_nsec = src->st_ctim.tv_nsec;
1197 dst->st_flags = 0;
1198 dst->st_gen = 0;
1199# endif
1200#else
1201 dst->st_atim.tv_sec = src->st_atime;
1202 dst->st_atim.tv_nsec = 0;
1203 dst->st_mtim.tv_sec = src->st_mtime;
1204 dst->st_mtim.tv_nsec = 0;
1205 dst->st_ctim.tv_sec = src->st_ctime;
1206 dst->st_ctim.tv_nsec = 0;
1207 dst->st_birthtim.tv_sec = src->st_ctime;
1208 dst->st_birthtim.tv_nsec = 0;
1209 dst->st_flags = 0;
1210 dst->st_gen = 0;
1211#endif
1212}
1213
1214
1215static int uv__fs_statx(int fd,
1216 const char* path,
1217 int is_fstat,
1218 int is_lstat,
1219 uv_stat_t* buf) {
1220 STATIC_ASSERT(UV_ENOSYS != -1);
1221#ifdef __linux__
1222 static int no_statx;
1223 struct uv__statx statxbuf;
1224 int dirfd;
1225 int flags;
1226 int mode;
1227 int rc;
1228
1229 if (no_statx)
1230 return UV_ENOSYS;
1231
1232 dirfd = AT_FDCWD;
1233 flags = 0; /* AT_STATX_SYNC_AS_STAT */
1234 mode = 0xFFF; /* STATX_BASIC_STATS + STATX_BTIME */
1235
1236 if (is_fstat) {
1237 dirfd = fd;
1238 flags |= 0x1000; /* AT_EMPTY_PATH */
1239 }
1240
1241 if (is_lstat)
1242 flags |= AT_SYMLINK_NOFOLLOW;
1243
1244 rc = uv__statx(dirfd, path, flags, mode, &statxbuf);
1245
1246 if (rc == -1) {
1247 /* EPERM happens when a seccomp filter rejects the system call.
1248 * Has been observed with libseccomp < 2.3.3 and docker < 18.04.
1249 */
1250 if (errno != EINVAL && errno != EPERM && errno != ENOSYS)
1251 return -1;
1252
1253 no_statx = 1;
1254 return UV_ENOSYS;
1255 }
1256
1257 buf->st_dev = 256 * statxbuf.stx_dev_major + statxbuf.stx_dev_minor;
1258 buf->st_mode = statxbuf.stx_mode;
1259 buf->st_nlink = statxbuf.stx_nlink;
1260 buf->st_uid = statxbuf.stx_uid;
1261 buf->st_gid = statxbuf.stx_gid;
1262 buf->st_rdev = statxbuf.stx_rdev_major;
1263 buf->st_ino = statxbuf.stx_ino;
1264 buf->st_size = statxbuf.stx_size;
1265 buf->st_blksize = statxbuf.stx_blksize;
1266 buf->st_blocks = statxbuf.stx_blocks;
1267 buf->st_atim.tv_sec = statxbuf.stx_atime.tv_sec;
1268 buf->st_atim.tv_nsec = statxbuf.stx_atime.tv_nsec;
1269 buf->st_mtim.tv_sec = statxbuf.stx_mtime.tv_sec;
1270 buf->st_mtim.tv_nsec = statxbuf.stx_mtime.tv_nsec;
1271 buf->st_ctim.tv_sec = statxbuf.stx_ctime.tv_sec;
1272 buf->st_ctim.tv_nsec = statxbuf.stx_ctime.tv_nsec;
1273 buf->st_birthtim.tv_sec = statxbuf.stx_btime.tv_sec;
1274 buf->st_birthtim.tv_nsec = statxbuf.stx_btime.tv_nsec;
1275 buf->st_flags = 0;
1276 buf->st_gen = 0;
1277
1278 return 0;
1279#else
1280 return UV_ENOSYS;
1281#endif /* __linux__ */
1282}
1283
1284
1285static int uv__fs_stat(const char *path, uv_stat_t *buf) {
1286 struct stat pbuf;
1287 int ret;
1288
1289 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 0, buf);
1290 if (ret != UV_ENOSYS)
1291 return ret;
1292
1293 ret = stat(path, &pbuf);
1294 if (ret == 0)
1295 uv__to_stat(&pbuf, buf);
1296
1297 return ret;
1298}
1299
1300
1301static int uv__fs_lstat(const char *path, uv_stat_t *buf) {
1302 struct stat pbuf;
1303 int ret;
1304
1305 ret = uv__fs_statx(-1, path, /* is_fstat */ 0, /* is_lstat */ 1, buf);
1306 if (ret != UV_ENOSYS)
1307 return ret;
1308
1309 ret = lstat(path, &pbuf);
1310 if (ret == 0)
1311 uv__to_stat(&pbuf, buf);
1312
1313 return ret;
1314}
1315
1316
1317static int uv__fs_fstat(int fd, uv_stat_t *buf) {
1318 struct stat pbuf;
1319 int ret;
1320
1321 ret = uv__fs_statx(fd, "", /* is_fstat */ 1, /* is_lstat */ 0, buf);
1322 if (ret != UV_ENOSYS)
1323 return ret;
1324
1325 ret = fstat(fd, &pbuf);
1326 if (ret == 0)
1327 uv__to_stat(&pbuf, buf);
1328
1329 return ret;
1330}
1331
1332static size_t uv__fs_buf_offset(uv_buf_t* bufs, size_t size) {
1333 size_t offset;
1334 /* Figure out which bufs are done */
1335 for (offset = 0; size > 0 && bufs[offset].len <= size; ++offset)
1336 size -= bufs[offset].len;
1337
1338 /* Fix a partial read/write */
1339 if (size > 0) {
1340 bufs[offset].base += size;
1341 bufs[offset].len -= size;
1342 }
1343 return offset;
1344}
1345
1346static ssize_t uv__fs_write_all(uv_fs_t* req) {
1347 unsigned int iovmax;
1348 unsigned int nbufs;
1349 uv_buf_t* bufs;
1350 ssize_t total;
1351 ssize_t result;
1352
1353 iovmax = uv__getiovmax();
1354 nbufs = req->nbufs;
1355 bufs = req->bufs;
1356 total = 0;
1357
1358 while (nbufs > 0) {
1359 req->nbufs = nbufs;
1360 if (req->nbufs > iovmax)
1361 req->nbufs = iovmax;
1362
1363 do
1364 result = uv__fs_write(req);
1365 while (result < 0 && errno == EINTR);
1366
1367 if (result <= 0) {
1368 if (total == 0)
1369 total = result;
1370 break;
1371 }
1372
1373 if (req->off >= 0)
1374 req->off += result;
1375
1376 req->nbufs = uv__fs_buf_offset(req->bufs, result);
1377 req->bufs += req->nbufs;
1378 nbufs -= req->nbufs;
1379 total += result;
1380 }
1381
1382 if (bufs != req->bufsml)
1383 uv__free(bufs);
1384
1385 req->bufs = NULL;
1386 req->nbufs = 0;
1387
1388 return total;
1389}
1390
1391
1392static void uv__fs_work(struct uv__work* w) {
1393 int retry_on_eintr;
1394 uv_fs_t* req;
1395 ssize_t r;
1396
1397 req = container_of(w, uv_fs_t, work_req);
1398 retry_on_eintr = !(req->fs_type == UV_FS_CLOSE ||
1399 req->fs_type == UV_FS_READ);
1400
1401 do {
1402 errno = 0;
1403
1404#define X(type, action) \
1405 case UV_FS_ ## type: \
1406 r = action; \
1407 break;
1408
1409 switch (req->fs_type) {
1410 X(ACCESS, access(req->path, req->flags));
1411 X(CHMOD, chmod(req->path, req->mode));
1412 X(CHOWN, chown(req->path, req->uid, req->gid));
1413 X(CLOSE, uv__fs_close(req->file));
1414 X(COPYFILE, uv__fs_copyfile(req));
1415 X(FCHMOD, fchmod(req->file, req->mode));
1416 X(FCHOWN, fchown(req->file, req->uid, req->gid));
1417 X(LCHOWN, lchown(req->path, req->uid, req->gid));
1418 X(FDATASYNC, uv__fs_fdatasync(req));
1419 X(FSTAT, uv__fs_fstat(req->file, &req->statbuf));
1420 X(FSYNC, uv__fs_fsync(req));
1421 X(FTRUNCATE, ftruncate(req->file, req->off));
1422 X(FUTIME, uv__fs_futime(req));
1423 X(LSTAT, uv__fs_lstat(req->path, &req->statbuf));
1424 X(LINK, link(req->path, req->new_path));
1425 X(MKDIR, mkdir(req->path, req->mode));
1426 X(MKDTEMP, uv__fs_mkdtemp(req));
1427 X(OPEN, uv__fs_open(req));
1428 X(READ, uv__fs_read(req));
1429 X(SCANDIR, uv__fs_scandir(req));
1430 X(OPENDIR, uv__fs_opendir(req));
1431 X(READDIR, uv__fs_readdir(req));
1432 X(CLOSEDIR, uv__fs_closedir(req));
1433 X(READLINK, uv__fs_readlink(req));
1434 X(REALPATH, uv__fs_realpath(req));
1435 X(RENAME, rename(req->path, req->new_path));
1436 X(RMDIR, rmdir(req->path));
1437 X(SENDFILE, uv__fs_sendfile(req));
1438 X(STAT, uv__fs_stat(req->path, &req->statbuf));
1439 X(STATFS, uv__fs_statfs(req));
1440 X(SYMLINK, symlink(req->path, req->new_path));
1441 X(UNLINK, unlink(req->path));
1442 X(UTIME, uv__fs_utime(req));
1443 X(WRITE, uv__fs_write_all(req));
1444 default: abort();
1445 }
1446#undef X
1447 } while (r == -1 && errno == EINTR && retry_on_eintr);
1448
1449 if (r == -1)
1450 req->result = UV__ERR(errno);
1451 else
1452 req->result = r;
1453
1454 if (r == 0 && (req->fs_type == UV_FS_STAT ||
1455 req->fs_type == UV_FS_FSTAT ||
1456 req->fs_type == UV_FS_LSTAT)) {
1457 req->ptr = &req->statbuf;
1458 }
1459}
1460
1461
1462static void uv__fs_done(struct uv__work* w, int status) {
1463 uv_fs_t* req;
1464
1465 req = container_of(w, uv_fs_t, work_req);
1466 uv__req_unregister(req->loop, req);
1467
1468 if (status == UV_ECANCELED) {
1469 assert(req->result == 0);
1470 req->result = UV_ECANCELED;
1471 }
1472
1473 req->cb(req);
1474}
1475
1476
1477int uv_fs_access(uv_loop_t* loop,
1478 uv_fs_t* req,
1479 const char* path,
1480 int flags,
1481 uv_fs_cb cb) {
1482 INIT(ACCESS);
1483 PATH;
1484 req->flags = flags;
1485 POST;
1486}
1487
1488
1489int uv_fs_chmod(uv_loop_t* loop,
1490 uv_fs_t* req,
1491 const char* path,
1492 int mode,
1493 uv_fs_cb cb) {
1494 INIT(CHMOD);
1495 PATH;
1496 req->mode = mode;
1497 POST;
1498}
1499
1500
1501int uv_fs_chown(uv_loop_t* loop,
1502 uv_fs_t* req,
1503 const char* path,
1504 uv_uid_t uid,
1505 uv_gid_t gid,
1506 uv_fs_cb cb) {
1507 INIT(CHOWN);
1508 PATH;
1509 req->uid = uid;
1510 req->gid = gid;
1511 POST;
1512}
1513
1514
1515int uv_fs_close(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1516 INIT(CLOSE);
1517 req->file = file;
1518 POST;
1519}
1520
1521
1522int uv_fs_fchmod(uv_loop_t* loop,
1523 uv_fs_t* req,
1524 uv_file file,
1525 int mode,
1526 uv_fs_cb cb) {
1527 INIT(FCHMOD);
1528 req->file = file;
1529 req->mode = mode;
1530 POST;
1531}
1532
1533
1534int uv_fs_fchown(uv_loop_t* loop,
1535 uv_fs_t* req,
1536 uv_file file,
1537 uv_uid_t uid,
1538 uv_gid_t gid,
1539 uv_fs_cb cb) {
1540 INIT(FCHOWN);
1541 req->file = file;
1542 req->uid = uid;
1543 req->gid = gid;
1544 POST;
1545}
1546
1547
1548int uv_fs_lchown(uv_loop_t* loop,
1549 uv_fs_t* req,
1550 const char* path,
1551 uv_uid_t uid,
1552 uv_gid_t gid,
1553 uv_fs_cb cb) {
1554 INIT(LCHOWN);
1555 PATH;
1556 req->uid = uid;
1557 req->gid = gid;
1558 POST;
1559}
1560
1561
1562int uv_fs_fdatasync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1563 INIT(FDATASYNC);
1564 req->file = file;
1565 POST;
1566}
1567
1568
1569int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1570 INIT(FSTAT);
1571 req->file = file;
1572 POST;
1573}
1574
1575
1576int uv_fs_fsync(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
1577 INIT(FSYNC);
1578 req->file = file;
1579 POST;
1580}
1581
1582
1583int uv_fs_ftruncate(uv_loop_t* loop,
1584 uv_fs_t* req,
1585 uv_file file,
1586 int64_t off,
1587 uv_fs_cb cb) {
1588 INIT(FTRUNCATE);
1589 req->file = file;
1590 req->off = off;
1591 POST;
1592}
1593
1594
1595int uv_fs_futime(uv_loop_t* loop,
1596 uv_fs_t* req,
1597 uv_file file,
1598 double atime,
1599 double mtime,
1600 uv_fs_cb cb) {
1601 INIT(FUTIME);
1602 req->file = file;
1603 req->atime = atime;
1604 req->mtime = mtime;
1605 POST;
1606}
1607
1608
1609int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1610 INIT(LSTAT);
1611 PATH;
1612 POST;
1613}
1614
1615
1616int uv_fs_link(uv_loop_t* loop,
1617 uv_fs_t* req,
1618 const char* path,
1619 const char* new_path,
1620 uv_fs_cb cb) {
1621 INIT(LINK);
1622 PATH2;
1623 POST;
1624}
1625
1626
1627int uv_fs_mkdir(uv_loop_t* loop,
1628 uv_fs_t* req,
1629 const char* path,
1630 int mode,
1631 uv_fs_cb cb) {
1632 INIT(MKDIR);
1633 PATH;
1634 req->mode = mode;
1635 POST;
1636}
1637
1638
1639int uv_fs_mkdtemp(uv_loop_t* loop,
1640 uv_fs_t* req,
1641 const char* tpl,
1642 uv_fs_cb cb) {
1643 INIT(MKDTEMP);
1644 req->path = uv__strdup(tpl);
1645 if (req->path == NULL)
1646 return UV_ENOMEM;
1647 POST;
1648}
1649
1650
1651int uv_fs_open(uv_loop_t* loop,
1652 uv_fs_t* req,
1653 const char* path,
1654 int flags,
1655 int mode,
1656 uv_fs_cb cb) {
1657 INIT(OPEN);
1658 PATH;
1659 req->flags = flags;
1660 req->mode = mode;
1661 POST;
1662}
1663
1664
1665int uv_fs_read(uv_loop_t* loop, uv_fs_t* req,
1666 uv_file file,
1667 const uv_buf_t bufs[],
1668 unsigned int nbufs,
1669 int64_t off,
1670 uv_fs_cb cb) {
1671 INIT(READ);
1672
1673 if (bufs == NULL || nbufs == 0)
1674 return UV_EINVAL;
1675
1676 req->file = file;
1677
1678 req->nbufs = nbufs;
1679 req->bufs = req->bufsml;
1680 if (nbufs > ARRAY_SIZE(req->bufsml))
1681 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1682
1683 if (req->bufs == NULL)
1684 return UV_ENOMEM;
1685
1686 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1687
1688 req->off = off;
1689 POST;
1690}
1691
1692
1693int uv_fs_scandir(uv_loop_t* loop,
1694 uv_fs_t* req,
1695 const char* path,
1696 int flags,
1697 uv_fs_cb cb) {
1698 INIT(SCANDIR);
1699 PATH;
1700 req->flags = flags;
1701 POST;
1702}
1703
1704int uv_fs_opendir(uv_loop_t* loop,
1705 uv_fs_t* req,
1706 const char* path,
1707 uv_fs_cb cb) {
1708 INIT(OPENDIR);
1709 PATH;
1710 POST;
1711}
1712
1713int uv_fs_readdir(uv_loop_t* loop,
1714 uv_fs_t* req,
1715 uv_dir_t* dir,
1716 uv_fs_cb cb) {
1717 INIT(READDIR);
1718
1719 if (dir == NULL || dir->dir == NULL || dir->dirents == NULL)
1720 return UV_EINVAL;
1721
1722 req->ptr = dir;
1723 POST;
1724}
1725
1726int uv_fs_closedir(uv_loop_t* loop,
1727 uv_fs_t* req,
1728 uv_dir_t* dir,
1729 uv_fs_cb cb) {
1730 INIT(CLOSEDIR);
1731
1732 if (dir == NULL)
1733 return UV_EINVAL;
1734
1735 req->ptr = dir;
1736 POST;
1737}
1738
1739int uv_fs_readlink(uv_loop_t* loop,
1740 uv_fs_t* req,
1741 const char* path,
1742 uv_fs_cb cb) {
1743 INIT(READLINK);
1744 PATH;
1745 POST;
1746}
1747
1748
1749int uv_fs_realpath(uv_loop_t* loop,
1750 uv_fs_t* req,
1751 const char * path,
1752 uv_fs_cb cb) {
1753 INIT(REALPATH);
1754 PATH;
1755 POST;
1756}
1757
1758
1759int uv_fs_rename(uv_loop_t* loop,
1760 uv_fs_t* req,
1761 const char* path,
1762 const char* new_path,
1763 uv_fs_cb cb) {
1764 INIT(RENAME);
1765 PATH2;
1766 POST;
1767}
1768
1769
1770int uv_fs_rmdir(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1771 INIT(RMDIR);
1772 PATH;
1773 POST;
1774}
1775
1776
1777int uv_fs_sendfile(uv_loop_t* loop,
1778 uv_fs_t* req,
1779 uv_file out_fd,
1780 uv_file in_fd,
1781 int64_t off,
1782 size_t len,
1783 uv_fs_cb cb) {
1784 INIT(SENDFILE);
1785 req->flags = in_fd; /* hack */
1786 req->file = out_fd;
1787 req->off = off;
1788 req->bufsml[0].len = len;
1789 POST;
1790}
1791
1792
1793int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1794 INIT(STAT);
1795 PATH;
1796 POST;
1797}
1798
1799
1800int uv_fs_symlink(uv_loop_t* loop,
1801 uv_fs_t* req,
1802 const char* path,
1803 const char* new_path,
1804 int flags,
1805 uv_fs_cb cb) {
1806 INIT(SYMLINK);
1807 PATH2;
1808 req->flags = flags;
1809 POST;
1810}
1811
1812
1813int uv_fs_unlink(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
1814 INIT(UNLINK);
1815 PATH;
1816 POST;
1817}
1818
1819
1820int uv_fs_utime(uv_loop_t* loop,
1821 uv_fs_t* req,
1822 const char* path,
1823 double atime,
1824 double mtime,
1825 uv_fs_cb cb) {
1826 INIT(UTIME);
1827 PATH;
1828 req->atime = atime;
1829 req->mtime = mtime;
1830 POST;
1831}
1832
1833
1834int uv_fs_write(uv_loop_t* loop,
1835 uv_fs_t* req,
1836 uv_file file,
1837 const uv_buf_t bufs[],
1838 unsigned int nbufs,
1839 int64_t off,
1840 uv_fs_cb cb) {
1841 INIT(WRITE);
1842
1843 if (bufs == NULL || nbufs == 0)
1844 return UV_EINVAL;
1845
1846 req->file = file;
1847
1848 req->nbufs = nbufs;
1849 req->bufs = req->bufsml;
1850 if (nbufs > ARRAY_SIZE(req->bufsml))
1851 req->bufs = uv__malloc(nbufs * sizeof(*bufs));
1852
1853 if (req->bufs == NULL)
1854 return UV_ENOMEM;
1855
1856 memcpy(req->bufs, bufs, nbufs * sizeof(*bufs));
1857
1858 req->off = off;
1859 POST;
1860}
1861
1862
1863void uv_fs_req_cleanup(uv_fs_t* req) {
1864 if (req == NULL)
1865 return;
1866
1867 /* Only necessary for asychronous requests, i.e., requests with a callback.
1868 * Synchronous ones don't copy their arguments and have req->path and
1869 * req->new_path pointing to user-owned memory. UV_FS_MKDTEMP is the
1870 * exception to the rule, it always allocates memory.
1871 */
1872 if (req->path != NULL && (req->cb != NULL || req->fs_type == UV_FS_MKDTEMP))
1873 uv__free((void*) req->path); /* Memory is shared with req->new_path. */
1874
1875 req->path = NULL;
1876 req->new_path = NULL;
1877
1878 if (req->fs_type == UV_FS_READDIR && req->ptr != NULL)
1879 uv__fs_readdir_cleanup(req);
1880
1881 if (req->fs_type == UV_FS_SCANDIR && req->ptr != NULL)
1882 uv__fs_scandir_cleanup(req);
1883
1884 if (req->bufs != req->bufsml)
1885 uv__free(req->bufs);
1886 req->bufs = NULL;
1887
1888 if (req->fs_type != UV_FS_OPENDIR && req->ptr != &req->statbuf)
1889 uv__free(req->ptr);
1890 req->ptr = NULL;
1891}
1892
1893
1894int uv_fs_copyfile(uv_loop_t* loop,
1895 uv_fs_t* req,
1896 const char* path,
1897 const char* new_path,
1898 int flags,
1899 uv_fs_cb cb) {
1900 INIT(COPYFILE);
1901
1902 if (flags & ~(UV_FS_COPYFILE_EXCL |
1903 UV_FS_COPYFILE_FICLONE |
1904 UV_FS_COPYFILE_FICLONE_FORCE)) {
1905 return UV_EINVAL;
1906 }
1907
1908 PATH2;
1909 req->flags = flags;
1910 POST;
1911}
1912
1913
1914int uv_fs_statfs(uv_loop_t* loop,
1915 uv_fs_t* req,
1916 const char* path,
1917 uv_fs_cb cb) {
1918 INIT(STATFS);
1919 PATH;
1920 POST;
1921}
1922