1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
18 * IN THE SOFTWARE.
19 */
20
21#include "uv.h"
22#include "uv/tree.h"
23#include "internal.h"
24
25#include <stdint.h>
26#include <stdio.h>
27#include <stdlib.h>
28#include <string.h>
29#include <assert.h>
30#include <errno.h>
31
32#include <sys/types.h>
33#include <unistd.h>
34
35struct watcher_list {
36 RB_ENTRY(watcher_list) entry;
37 QUEUE watchers;
38 int iterating;
39 char* path;
40 int wd;
41};
42
43struct watcher_root {
44 struct watcher_list* rbh_root;
45};
46#define CAST(p) ((struct watcher_root*)(p))
47
48
49static int compare_watchers(const struct watcher_list* a,
50 const struct watcher_list* b) {
51 if (a->wd < b->wd) return -1;
52 if (a->wd > b->wd) return 1;
53 return 0;
54}
55
56
57RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers)
58
59
60static void uv__inotify_read(uv_loop_t* loop,
61 uv__io_t* w,
62 unsigned int revents);
63
64static void maybe_free_watcher_list(struct watcher_list* w,
65 uv_loop_t* loop);
66
67static int new_inotify_fd(void) {
68 int err;
69 int fd;
70
71 fd = uv__inotify_init1(UV__IN_NONBLOCK | UV__IN_CLOEXEC);
72 if (fd != -1)
73 return fd;
74
75 if (errno != ENOSYS)
76 return UV__ERR(errno);
77
78 fd = uv__inotify_init();
79 if (fd == -1)
80 return UV__ERR(errno);
81
82 err = uv__cloexec(fd, 1);
83 if (err == 0)
84 err = uv__nonblock(fd, 1);
85
86 if (err) {
87 uv__close(fd);
88 return err;
89 }
90
91 return fd;
92}
93
94
95static int init_inotify(uv_loop_t* loop) {
96 int err;
97
98 if (loop->inotify_fd != -1)
99 return 0;
100
101 err = new_inotify_fd();
102 if (err < 0)
103 return err;
104
105 loop->inotify_fd = err;
106 uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
107 uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
108
109 return 0;
110}
111
112
113int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) {
114 /* Open the inotify_fd, and re-arm all the inotify watchers. */
115 int err;
116 struct watcher_list* tmp_watcher_list_iter;
117 struct watcher_list* watcher_list;
118 struct watcher_list tmp_watcher_list;
119 QUEUE queue;
120 QUEUE* q;
121 uv_fs_event_t* handle;
122 char* tmp_path;
123
124 if (old_watchers != NULL) {
125 /* We must restore the old watcher list to be able to close items
126 * out of it.
127 */
128 loop->inotify_watchers = old_watchers;
129
130 QUEUE_INIT(&tmp_watcher_list.watchers);
131 /* Note that the queue we use is shared with the start and stop()
132 * functions, making QUEUE_FOREACH unsafe to use. So we use the
133 * QUEUE_MOVE trick to safely iterate. Also don't free the watcher
134 * list until we're done iterating. c.f. uv__inotify_read.
135 */
136 RB_FOREACH_SAFE(watcher_list, watcher_root,
137 CAST(&old_watchers), tmp_watcher_list_iter) {
138 watcher_list->iterating = 1;
139 QUEUE_MOVE(&watcher_list->watchers, &queue);
140 while (!QUEUE_EMPTY(&queue)) {
141 q = QUEUE_HEAD(&queue);
142 handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
143 /* It's critical to keep a copy of path here, because it
144 * will be set to NULL by stop() and then deallocated by
145 * maybe_free_watcher_list
146 */
147 tmp_path = uv__strdup(handle->path);
148 assert(tmp_path != NULL);
149 QUEUE_REMOVE(q);
150 QUEUE_INSERT_TAIL(&watcher_list->watchers, q);
151 uv_fs_event_stop(handle);
152
153 QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers);
154 handle->path = tmp_path;
155 }
156 watcher_list->iterating = 0;
157 maybe_free_watcher_list(watcher_list, loop);
158 }
159
160 QUEUE_MOVE(&tmp_watcher_list.watchers, &queue);
161 while (!QUEUE_EMPTY(&queue)) {
162 q = QUEUE_HEAD(&queue);
163 QUEUE_REMOVE(q);
164 handle = QUEUE_DATA(q, uv_fs_event_t, watchers);
165 tmp_path = handle->path;
166 handle->path = NULL;
167 err = uv_fs_event_start(handle, handle->cb, tmp_path, 0);
168 uv__free(tmp_path);
169 if (err)
170 return err;
171 }
172 }
173
174 return 0;
175}
176
177
178static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
179 struct watcher_list w;
180 w.wd = wd;
181 return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w);
182}
183
184static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
185 /* if the watcher_list->watchers is being iterated over, we can't free it. */
186 if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) {
187 /* No watchers left for this path. Clean up. */
188 RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w);
189 uv__inotify_rm_watch(loop->inotify_fd, w->wd);
190 uv__free(w);
191 }
192}
193
194static void uv__inotify_read(uv_loop_t* loop,
195 uv__io_t* dummy,
196 unsigned int events) {
197 const struct uv__inotify_event* e;
198 struct watcher_list* w;
199 uv_fs_event_t* h;
200 QUEUE queue;
201 QUEUE* q;
202 const char* path;
203 ssize_t size;
204 const char *p;
205 /* needs to be large enough for sizeof(inotify_event) + strlen(path) */
206 char buf[4096];
207
208 while (1) {
209 do
210 size = read(loop->inotify_fd, buf, sizeof(buf));
211 while (size == -1 && errno == EINTR);
212
213 if (size == -1) {
214 assert(errno == EAGAIN || errno == EWOULDBLOCK);
215 break;
216 }
217
218 assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */
219
220 /* Now we have one or more inotify_event structs. */
221 for (p = buf; p < buf + size; p += sizeof(*e) + e->len) {
222 e = (const struct uv__inotify_event*)p;
223
224 events = 0;
225 if (e->mask & (UV__IN_ATTRIB|UV__IN_MODIFY))
226 events |= UV_CHANGE;
227 if (e->mask & ~(UV__IN_ATTRIB|UV__IN_MODIFY))
228 events |= UV_RENAME;
229
230 w = find_watcher(loop, e->wd);
231 if (w == NULL)
232 continue; /* Stale event, no watchers left. */
233
234 /* inotify does not return the filename when monitoring a single file
235 * for modifications. Repurpose the filename for API compatibility.
236 * I'm not convinced this is a good thing, maybe it should go.
237 */
238 path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path);
239
240 /* We're about to iterate over the queue and call user's callbacks.
241 * What can go wrong?
242 * A callback could call uv_fs_event_stop()
243 * and the queue can change under our feet.
244 * So, we use QUEUE_MOVE() trick to safely iterate over the queue.
245 * And we don't free the watcher_list until we're done iterating.
246 *
247 * First,
248 * tell uv_fs_event_stop() (that could be called from a user's callback)
249 * not to free watcher_list.
250 */
251 w->iterating = 1;
252 QUEUE_MOVE(&w->watchers, &queue);
253 while (!QUEUE_EMPTY(&queue)) {
254 q = QUEUE_HEAD(&queue);
255 h = QUEUE_DATA(q, uv_fs_event_t, watchers);
256
257 QUEUE_REMOVE(q);
258 QUEUE_INSERT_TAIL(&w->watchers, q);
259
260 h->cb(h, path, events, 0);
261 }
262 /* done iterating, time to (maybe) free empty watcher_list */
263 w->iterating = 0;
264 maybe_free_watcher_list(w, loop);
265 }
266 }
267}
268
269
270int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
271 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
272 return 0;
273}
274
275
276int uv_fs_event_start(uv_fs_event_t* handle,
277 uv_fs_event_cb cb,
278 const char* path,
279 unsigned int flags) {
280 struct watcher_list* w;
281 size_t len;
282 int events;
283 int err;
284 int wd;
285
286 if (uv__is_active(handle))
287 return UV_EINVAL;
288
289 err = init_inotify(handle->loop);
290 if (err)
291 return err;
292
293 events = UV__IN_ATTRIB
294 | UV__IN_CREATE
295 | UV__IN_MODIFY
296 | UV__IN_DELETE
297 | UV__IN_DELETE_SELF
298 | UV__IN_MOVE_SELF
299 | UV__IN_MOVED_FROM
300 | UV__IN_MOVED_TO;
301
302 wd = uv__inotify_add_watch(handle->loop->inotify_fd, path, events);
303 if (wd == -1)
304 return UV__ERR(errno);
305
306 w = find_watcher(handle->loop, wd);
307 if (w)
308 goto no_insert;
309
310 len = strlen(path) + 1;
311 w = uv__malloc(sizeof(*w) + len);
312 if (w == NULL)
313 return UV_ENOMEM;
314
315 w->wd = wd;
316 w->path = memcpy(w + 1, path, len);
317 QUEUE_INIT(&w->watchers);
318 w->iterating = 0;
319 RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w);
320
321no_insert:
322 uv__handle_start(handle);
323 QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers);
324 handle->path = w->path;
325 handle->cb = cb;
326 handle->wd = wd;
327
328 return 0;
329}
330
331
332int uv_fs_event_stop(uv_fs_event_t* handle) {
333 struct watcher_list* w;
334
335 if (!uv__is_active(handle))
336 return 0;
337
338 w = find_watcher(handle->loop, handle->wd);
339 assert(w != NULL);
340
341 handle->wd = -1;
342 handle->path = NULL;
343 uv__handle_stop(handle);
344 QUEUE_REMOVE(&handle->watchers);
345
346 maybe_free_watcher_list(w, handle->loop);
347
348 return 0;
349}
350
351
352void uv__fs_event_close(uv_fs_event_t* handle) {
353 uv_fs_event_stop(handle);
354}
355