/src/CMake/Utilities/cmlibuv/src/unix/linux-inotify.c
Line | Count | Source |
1 | | /* Copyright Joyent, Inc. and other Node contributors. All rights reserved. |
2 | | * Permission is hereby granted, free of charge, to any person obtaining a copy |
3 | | * of this software and associated documentation files (the "Software"), to |
4 | | * deal in the Software without restriction, including without limitation the |
5 | | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
6 | | * sell copies of the Software, and to permit persons to whom the Software is |
7 | | * furnished to do so, subject to the following conditions: |
8 | | * |
9 | | * The above copyright notice and this permission notice shall be included in |
10 | | * all copies or substantial portions of the Software. |
11 | | * |
12 | | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
13 | | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
15 | | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
16 | | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
17 | | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
18 | | * IN THE SOFTWARE. |
19 | | */ |
20 | | |
21 | | #include "uv.h" |
22 | | #include "uv/tree.h" |
23 | | #include "internal.h" |
24 | | |
25 | | #include <stdint.h> |
26 | | #include <stdio.h> |
27 | | #include <stdlib.h> |
28 | | #include <string.h> |
29 | | #include <assert.h> |
30 | | #include <errno.h> |
31 | | |
32 | | #include <sys/inotify.h> |
33 | | #include <sys/types.h> |
34 | | #include <unistd.h> |
35 | | |
36 | | struct watcher_list { |
37 | | RB_ENTRY(watcher_list) entry; |
38 | | QUEUE watchers; |
39 | | int iterating; |
40 | | char* path; |
41 | | int wd; |
42 | | }; |
43 | | |
44 | | struct watcher_root { |
45 | | struct watcher_list* rbh_root; |
46 | | }; |
47 | | #define CAST(p) ((struct watcher_root*)(p)) |
48 | | |
49 | | |
50 | | static int compare_watchers(const struct watcher_list* a, |
51 | 0 | const struct watcher_list* b) { |
52 | 0 | if (a->wd < b->wd) return -1; |
53 | 0 | if (a->wd > b->wd) return 1; |
54 | 0 | return 0; |
55 | 0 | } |
56 | | |
57 | | |
58 | 0 | RB_GENERATE_STATIC(watcher_root, watcher_list, entry, compare_watchers) Unexecuted instantiation: linux-inotify.c:watcher_root_RB_MINMAX Unexecuted instantiation: linux-inotify.c:watcher_root_RB_REMOVE Unexecuted instantiation: linux-inotify.c:watcher_root_RB_REMOVE_COLOR Unexecuted instantiation: linux-inotify.c:watcher_root_RB_FIND Unexecuted instantiation: linux-inotify.c:watcher_root_RB_INSERT Unexecuted instantiation: linux-inotify.c:watcher_root_RB_INSERT_COLOR |
59 | 0 |
|
60 | 0 |
|
61 | 0 | static void uv__inotify_read(uv_loop_t* loop, |
62 | 0 | uv__io_t* w, |
63 | 0 | unsigned int revents); |
64 | 0 |
|
65 | 0 | static void maybe_free_watcher_list(struct watcher_list* w, |
66 | 0 | uv_loop_t* loop); |
67 | 0 |
|
68 | 0 | static int init_inotify(uv_loop_t* loop) { |
69 | 0 | int fd; |
70 | |
|
71 | 0 | if (loop->inotify_fd != -1) |
72 | 0 | return 0; |
73 | | |
74 | 0 | fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC); |
75 | 0 | if (fd < 0) |
76 | 0 | return UV__ERR(errno); |
77 | | |
78 | 0 | loop->inotify_fd = fd; |
79 | 0 | uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd); |
80 | 0 | uv__io_start(loop, &loop->inotify_read_watcher, POLLIN); |
81 | |
|
82 | 0 | return 0; |
83 | 0 | } |
84 | | |
85 | | |
86 | 0 | int uv__inotify_fork(uv_loop_t* loop, void* old_watchers) { |
87 | | /* Open the inotify_fd, and re-arm all the inotify watchers. */ |
88 | 0 | int err; |
89 | 0 | struct watcher_list* tmp_watcher_list_iter; |
90 | 0 | struct watcher_list* watcher_list; |
91 | 0 | struct watcher_list tmp_watcher_list; |
92 | 0 | QUEUE queue; |
93 | 0 | QUEUE* q; |
94 | 0 | uv_fs_event_t* handle; |
95 | 0 | char* tmp_path; |
96 | |
|
97 | 0 | if (old_watchers != NULL) { |
98 | | /* We must restore the old watcher list to be able to close items |
99 | | * out of it. |
100 | | */ |
101 | 0 | loop->inotify_watchers = old_watchers; |
102 | |
|
103 | 0 | QUEUE_INIT(&tmp_watcher_list.watchers); |
104 | | /* Note that the queue we use is shared with the start and stop() |
105 | | * functions, making QUEUE_FOREACH unsafe to use. So we use the |
106 | | * QUEUE_MOVE trick to safely iterate. Also don't free the watcher |
107 | | * list until we're done iterating. c.f. uv__inotify_read. |
108 | | */ |
109 | 0 | RB_FOREACH_SAFE(watcher_list, watcher_root, |
110 | 0 | CAST(&old_watchers), tmp_watcher_list_iter) { |
111 | 0 | watcher_list->iterating = 1; |
112 | 0 | QUEUE_MOVE(&watcher_list->watchers, &queue); |
113 | 0 | while (!QUEUE_EMPTY(&queue)) { |
114 | 0 | q = QUEUE_HEAD(&queue); |
115 | 0 | handle = QUEUE_DATA(q, uv_fs_event_t, watchers); |
116 | | /* It's critical to keep a copy of path here, because it |
117 | | * will be set to NULL by stop() and then deallocated by |
118 | | * maybe_free_watcher_list |
119 | | */ |
120 | 0 | tmp_path = uv__strdup(handle->path); |
121 | 0 | assert(tmp_path != NULL); |
122 | 0 | QUEUE_REMOVE(q); |
123 | 0 | QUEUE_INSERT_TAIL(&watcher_list->watchers, q); |
124 | 0 | uv_fs_event_stop(handle); |
125 | |
|
126 | 0 | QUEUE_INSERT_TAIL(&tmp_watcher_list.watchers, &handle->watchers); |
127 | 0 | handle->path = tmp_path; |
128 | 0 | } |
129 | 0 | watcher_list->iterating = 0; |
130 | 0 | maybe_free_watcher_list(watcher_list, loop); |
131 | 0 | } |
132 | |
|
133 | 0 | QUEUE_MOVE(&tmp_watcher_list.watchers, &queue); |
134 | 0 | while (!QUEUE_EMPTY(&queue)) { |
135 | 0 | q = QUEUE_HEAD(&queue); |
136 | 0 | QUEUE_REMOVE(q); |
137 | 0 | handle = QUEUE_DATA(q, uv_fs_event_t, watchers); |
138 | 0 | tmp_path = handle->path; |
139 | 0 | handle->path = NULL; |
140 | 0 | err = uv_fs_event_start(handle, handle->cb, tmp_path, 0); |
141 | 0 | uv__free(tmp_path); |
142 | 0 | if (err) |
143 | 0 | return err; |
144 | 0 | } |
145 | 0 | } |
146 | | |
147 | 0 | return 0; |
148 | 0 | } |
149 | | |
150 | | |
151 | 0 | static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) { |
152 | 0 | struct watcher_list w; |
153 | 0 | w.wd = wd; |
154 | 0 | return RB_FIND(watcher_root, CAST(&loop->inotify_watchers), &w); |
155 | 0 | } |
156 | | |
157 | 0 | static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) { |
158 | | /* if the watcher_list->watchers is being iterated over, we can't free it. */ |
159 | 0 | if ((!w->iterating) && QUEUE_EMPTY(&w->watchers)) { |
160 | | /* No watchers left for this path. Clean up. */ |
161 | 0 | RB_REMOVE(watcher_root, CAST(&loop->inotify_watchers), w); |
162 | 0 | inotify_rm_watch(loop->inotify_fd, w->wd); |
163 | 0 | uv__free(w); |
164 | 0 | } |
165 | 0 | } |
166 | | |
167 | | static void uv__inotify_read(uv_loop_t* loop, |
168 | | uv__io_t* dummy, |
169 | 0 | unsigned int events) { |
170 | 0 | const struct inotify_event* e; |
171 | 0 | struct watcher_list* w; |
172 | 0 | uv_fs_event_t* h; |
173 | 0 | QUEUE queue; |
174 | 0 | QUEUE* q; |
175 | 0 | const char* path; |
176 | 0 | ssize_t size; |
177 | 0 | const char *p; |
178 | | /* needs to be large enough for sizeof(inotify_event) + strlen(path) */ |
179 | 0 | char buf[4096]; |
180 | |
|
181 | 0 | for (;;) { |
182 | 0 | do |
183 | 0 | size = read(loop->inotify_fd, buf, sizeof(buf)); |
184 | 0 | while (size == -1 && errno == EINTR); |
185 | |
|
186 | 0 | if (size == -1) { |
187 | 0 | assert(errno == EAGAIN || errno == EWOULDBLOCK); |
188 | 0 | break; |
189 | 0 | } |
190 | | |
191 | 0 | assert(size > 0); /* pre-2.6.21 thing, size=0 == read buffer too small */ |
192 | | |
193 | | /* Now we have one or more inotify_event structs. */ |
194 | 0 | for (p = buf; p < buf + size; p += sizeof(*e) + e->len) { |
195 | 0 | e = (const struct inotify_event*) p; |
196 | |
|
197 | 0 | events = 0; |
198 | 0 | if (e->mask & (IN_ATTRIB|IN_MODIFY)) |
199 | 0 | events |= UV_CHANGE; |
200 | 0 | if (e->mask & ~(IN_ATTRIB|IN_MODIFY)) |
201 | 0 | events |= UV_RENAME; |
202 | |
|
203 | 0 | w = find_watcher(loop, e->wd); |
204 | 0 | if (w == NULL) |
205 | 0 | continue; /* Stale event, no watchers left. */ |
206 | | |
207 | | /* inotify does not return the filename when monitoring a single file |
208 | | * for modifications. Repurpose the filename for API compatibility. |
209 | | * I'm not convinced this is a good thing, maybe it should go. |
210 | | */ |
211 | 0 | path = e->len ? (const char*) (e + 1) : uv__basename_r(w->path); |
212 | | |
213 | | /* We're about to iterate over the queue and call user's callbacks. |
214 | | * What can go wrong? |
215 | | * A callback could call uv_fs_event_stop() |
216 | | * and the queue can change under our feet. |
217 | | * So, we use QUEUE_MOVE() trick to safely iterate over the queue. |
218 | | * And we don't free the watcher_list until we're done iterating. |
219 | | * |
220 | | * First, |
221 | | * tell uv_fs_event_stop() (that could be called from a user's callback) |
222 | | * not to free watcher_list. |
223 | | */ |
224 | 0 | w->iterating = 1; |
225 | 0 | QUEUE_MOVE(&w->watchers, &queue); |
226 | 0 | while (!QUEUE_EMPTY(&queue)) { |
227 | 0 | q = QUEUE_HEAD(&queue); |
228 | 0 | h = QUEUE_DATA(q, uv_fs_event_t, watchers); |
229 | |
|
230 | 0 | QUEUE_REMOVE(q); |
231 | 0 | QUEUE_INSERT_TAIL(&w->watchers, q); |
232 | |
|
233 | 0 | h->cb(h, path, events, 0); |
234 | 0 | } |
235 | | /* done iterating, time to (maybe) free empty watcher_list */ |
236 | 0 | w->iterating = 0; |
237 | 0 | maybe_free_watcher_list(w, loop); |
238 | 0 | } |
239 | 0 | } |
240 | 0 | } |
241 | | |
242 | | |
243 | 0 | int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) { |
244 | 0 | uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT); |
245 | 0 | return 0; |
246 | 0 | } |
247 | | |
248 | | |
249 | | int uv_fs_event_start(uv_fs_event_t* handle, |
250 | | uv_fs_event_cb cb, |
251 | | const char* path, |
252 | 0 | unsigned int flags) { |
253 | 0 | struct watcher_list* w; |
254 | 0 | size_t len; |
255 | 0 | int events; |
256 | 0 | int err; |
257 | 0 | int wd; |
258 | |
|
259 | 0 | if (uv__is_active(handle)) |
260 | 0 | return UV_EINVAL; |
261 | | |
262 | 0 | err = init_inotify(handle->loop); |
263 | 0 | if (err) |
264 | 0 | return err; |
265 | | |
266 | 0 | events = IN_ATTRIB |
267 | 0 | | IN_CREATE |
268 | 0 | | IN_MODIFY |
269 | 0 | | IN_DELETE |
270 | 0 | | IN_DELETE_SELF |
271 | 0 | | IN_MOVE_SELF |
272 | 0 | | IN_MOVED_FROM |
273 | 0 | | IN_MOVED_TO; |
274 | |
|
275 | 0 | wd = inotify_add_watch(handle->loop->inotify_fd, path, events); |
276 | 0 | if (wd == -1) |
277 | 0 | return UV__ERR(errno); |
278 | | |
279 | 0 | w = find_watcher(handle->loop, wd); |
280 | 0 | if (w) |
281 | 0 | goto no_insert; |
282 | | |
283 | 0 | len = strlen(path) + 1; |
284 | 0 | w = uv__malloc(sizeof(*w) + len); |
285 | 0 | if (w == NULL) |
286 | 0 | return UV_ENOMEM; |
287 | | |
288 | 0 | w->wd = wd; |
289 | 0 | w->path = memcpy(w + 1, path, len); |
290 | 0 | QUEUE_INIT(&w->watchers); |
291 | 0 | w->iterating = 0; |
292 | 0 | RB_INSERT(watcher_root, CAST(&handle->loop->inotify_watchers), w); |
293 | |
|
294 | 0 | no_insert: |
295 | 0 | uv__handle_start(handle); |
296 | 0 | QUEUE_INSERT_TAIL(&w->watchers, &handle->watchers); |
297 | 0 | handle->path = w->path; |
298 | 0 | handle->cb = cb; |
299 | 0 | handle->wd = wd; |
300 | |
|
301 | 0 | return 0; |
302 | 0 | } |
303 | | |
304 | | |
305 | 0 | int uv_fs_event_stop(uv_fs_event_t* handle) { |
306 | 0 | struct watcher_list* w; |
307 | |
|
308 | 0 | if (!uv__is_active(handle)) |
309 | 0 | return 0; |
310 | | |
311 | 0 | w = find_watcher(handle->loop, handle->wd); |
312 | 0 | assert(w != NULL); |
313 | |
|
314 | 0 | handle->wd = -1; |
315 | 0 | handle->path = NULL; |
316 | 0 | uv__handle_stop(handle); |
317 | 0 | QUEUE_REMOVE(&handle->watchers); |
318 | |
|
319 | 0 | maybe_free_watcher_list(w, handle->loop); |
320 | |
|
321 | 0 | return 0; |
322 | 0 | } |
323 | | |
324 | | |
325 | 0 | void uv__fs_event_close(uv_fs_event_t* handle) { |
326 | 0 | uv_fs_event_stop(handle); |
327 | 0 | } |