/src/glib/gio/inotify/inotify-kernel.c
Line  | Count  | Source (jump to first uncovered line)  | 
1  |  | /*  | 
2  |  |    Copyright (C) 2005 John McCutchan  | 
3  |  |    Copyright © 2015 Canonical Limited  | 
4  |  |  | 
5  |  |    This library is free software; you can redistribute it and/or  | 
6  |  |    modify it under the terms of the GNU Lesser General Public  | 
7  |  |    License as published by the Free Software Foundation; either  | 
8  |  |    version 2.1 of the License, or (at your option) any later version.  | 
9  |  |  | 
10  |  |    This library is distributed in the hope that it will be useful,  | 
11  |  |    but WITHOUT ANY WARRANTY; without even the implied warranty of  | 
12  |  |    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU  | 
13  |  |    Lesser General Public License for more details.  | 
14  |  |  | 
15  |  |    You should have received a copy of the GNU Lesser General Public License  | 
16  |  |    along with this library; if not, see <http://www.gnu.org/licenses/>.  | 
17  |  |  | 
18  |  |    Authors:  | 
19  |  |      Ryan Lortie <desrt@desrt.ca>  | 
20  |  |      John McCutchan <john@johnmccutchan.com>  | 
21  |  | */  | 
22  |  |  | 
23  |  | #include "config.h"  | 
24  |  |  | 
25  |  | #include <stdio.h>  | 
26  |  | #include <sys/ioctl.h>  | 
27  |  | #include <unistd.h>  | 
28  |  | #include <errno.h>  | 
29  |  | #include <string.h>  | 
30  |  | #include <glib.h>  | 
31  |  | #include "inotify-kernel.h"  | 
32  |  | #include <sys/inotify.h>  | 
33  |  | #ifdef HAVE_SYS_FILIO_H  | 
34  |  | #include <sys/filio.h>  | 
35  |  | #endif  | 
36  |  | #include <glib/glib-unix.h>  | 
37  |  |  | 
38  |  | #include "glib-private.h"  | 
39  |  |  | 
40  |  | /* From inotify(7) */  | 
41  | 0  | #define MAX_EVENT_SIZE       (sizeof(struct inotify_event) + NAME_MAX + 1)  | 
42  |  |  | 
43  |  | /* Amount of time to sleep on receipt of uninteresting events */  | 
44  | 0  | #define BOREDOM_SLEEP_TIME   (100 * G_TIME_SPAN_MILLISECOND)  | 
45  |  |  | 
46  |  | /* Define limits on the maximum amount of time and maximum amount of  | 
47  |  |  * interceding events between FROM/TO that can be merged.  | 
48  |  |  */  | 
49  | 0  | #define MOVE_PAIR_DELAY      (10 * G_TIME_SPAN_MILLISECOND)  | 
50  | 0  | #define MOVE_PAIR_DISTANCE   (100)  | 
51  |  |  | 
52  |  | /* We use the lock from inotify-helper.c  | 
53  |  |  *  | 
54  |  |  * We only have to take it on our read callback.  | 
55  |  |  *  | 
56  |  |  * The rest of locking is taken care of in inotify-helper.c  | 
57  |  |  */  | 
58  |  | G_LOCK_EXTERN (inotify_lock);  | 
59  |  |  | 
60  |  | static ik_event_t *  | 
61  |  | ik_event_new (struct inotify_event *kevent,  | 
62  |  |               gint64                now)  | 
63  | 0  | { | 
64  | 0  |   ik_event_t *event = g_new0 (ik_event_t, 1);  | 
65  |  | 
  | 
66  | 0  |   event->wd = kevent->wd;  | 
67  | 0  |   event->mask = kevent->mask;  | 
68  | 0  |   event->cookie = kevent->cookie;  | 
69  | 0  |   event->len = kevent->len;  | 
70  | 0  |   event->timestamp = now;  | 
71  | 0  |   if (event->len)  | 
72  | 0  |     event->name = g_strdup (kevent->name);  | 
73  | 0  |   else  | 
74  | 0  |     event->name = NULL;  | 
75  |  | 
  | 
76  | 0  |   return event;  | 
77  | 0  | }  | 
78  |  |  | 
79  |  | void  | 
80  |  | _ik_event_free (ik_event_t *event)  | 
81  | 0  | { | 
82  | 0  |   if (event->pair)  | 
83  | 0  |     { | 
84  | 0  |       event->pair->pair = NULL;  | 
85  | 0  |       _ik_event_free (event->pair);  | 
86  | 0  |     }  | 
87  |  | 
  | 
88  | 0  |   g_free (event->name);  | 
89  | 0  |   g_free (event);  | 
90  | 0  | }  | 
91  |  |  | 
92  |  | typedef struct  | 
93  |  | { | 
94  |  |   GSource     source;  | 
95  |  |  | 
96  |  |   GQueue      queue;  | 
97  |  |   gpointer    fd_tag;  | 
98  |  |   gint        fd;  | 
99  |  |  | 
100  |  |   GHashTable *unmatched_moves;  | 
101  |  |   gboolean    is_bored;  | 
102  |  | } InotifyKernelSource;  | 
103  |  |  | 
104  |  | static InotifyKernelSource *inotify_source;  | 
105  |  |  | 
106  |  | static gint64  | 
107  |  | ik_source_get_dispatch_time (InotifyKernelSource *iks)  | 
108  | 0  | { | 
109  | 0  |   ik_event_t *head;  | 
110  |  | 
  | 
111  | 0  |   head = g_queue_peek_head (&iks->queue);  | 
112  |  |  | 
113  |  |   /* nothing in the queue: not ready */  | 
114  | 0  |   if (!head)  | 
115  | 0  |     return -1;  | 
116  |  |  | 
117  |  |   /* if it's not an unpaired move, it is ready now */  | 
118  | 0  |   if (~head->mask & IN_MOVED_FROM || head->pair)  | 
119  | 0  |     return 0;  | 
120  |  |  | 
121  |  |   /* if the queue is too long then it's ready now */  | 
122  | 0  |   if (iks->queue.length > MOVE_PAIR_DISTANCE)  | 
123  | 0  |     return 0;  | 
124  |  |  | 
125  |  |   /* otherwise, it's ready after the delay */  | 
126  | 0  |   return head->timestamp + MOVE_PAIR_DELAY;  | 
127  | 0  | }  | 
128  |  |  | 
129  |  | static gboolean  | 
130  |  | ik_source_can_dispatch_now (InotifyKernelSource *iks,  | 
131  |  |                             gint64               now)  | 
132  | 0  | { | 
133  | 0  |   gint64 dispatch_time;  | 
134  |  | 
  | 
135  | 0  |   dispatch_time = ik_source_get_dispatch_time (iks);  | 
136  |  | 
  | 
137  | 0  |   return 0 <= dispatch_time && dispatch_time <= now;  | 
138  | 0  | }  | 
139  |  |  | 
140  |  | static gsize  | 
141  |  | ik_source_read_some_events (InotifyKernelSource *iks,  | 
142  |  |                             gchar               *buffer,  | 
143  |  |                             gsize                buffer_len)  | 
144  | 0  | { | 
145  | 0  |   gssize result;  | 
146  | 0  |   int errsv;  | 
147  |  | 
  | 
148  | 0  | again:  | 
149  | 0  |   result = read (iks->fd, buffer, buffer_len);  | 
150  | 0  |   errsv = errno;  | 
151  |  | 
  | 
152  | 0  |   if (result < 0)  | 
153  | 0  |     { | 
154  | 0  |       if (errsv == EINTR)  | 
155  | 0  |         goto again;  | 
156  |  |  | 
157  | 0  |       if (errsv == EAGAIN)  | 
158  | 0  |         return 0;  | 
159  |  |  | 
160  | 0  |       g_error ("inotify read(): %s", g_strerror (errsv)); | 
161  | 0  |     }  | 
162  | 0  |   else if (result == 0)  | 
163  | 0  |     g_error ("inotify unexpectedly hit eof"); | 
164  |  |  | 
165  | 0  |   return result;  | 
166  | 0  | }  | 
167  |  |  | 
168  |  | static gchar *  | 
169  |  | ik_source_read_all_the_events (InotifyKernelSource *iks,  | 
170  |  |                                gchar               *buffer,  | 
171  |  |                                gsize                buffer_len,  | 
172  |  |                                gsize               *length_out)  | 
173  | 0  | { | 
174  | 0  |   gsize n_read;  | 
175  |  | 
  | 
176  | 0  |   n_read = ik_source_read_some_events (iks, buffer, buffer_len);  | 
177  |  |  | 
178  |  |   /* Check if we might have gotten another event if we had passed in a  | 
179  |  |    * bigger buffer...  | 
180  |  |    */  | 
181  | 0  |   if (n_read + MAX_EVENT_SIZE > buffer_len)  | 
182  | 0  |     { | 
183  | 0  |       gchar *new_buffer;  | 
184  | 0  |       guint n_readable;  | 
185  | 0  |       gint result;  | 
186  | 0  |       int errsv;  | 
187  |  |  | 
188  |  |       /* figure out how many more bytes there are to read */  | 
189  | 0  |       result = ioctl (iks->fd, FIONREAD, &n_readable);  | 
190  | 0  |       errsv = errno;  | 
191  | 0  |       if (result != 0)  | 
192  | 0  |         g_error ("inotify ioctl(FIONREAD): %s", g_strerror (errsv)); | 
193  |  | 
  | 
194  | 0  |       if (n_readable != 0)  | 
195  | 0  |         { | 
196  |  |           /* there is in fact more data.  allocate a new buffer, copy  | 
197  |  |            * the existing data, and then append the remaining.  | 
198  |  |            */  | 
199  | 0  |           new_buffer = g_malloc (n_read + n_readable);  | 
200  | 0  |           memcpy (new_buffer, buffer, n_read);  | 
201  | 0  |           n_read += ik_source_read_some_events (iks, new_buffer + n_read, n_readable);  | 
202  |  | 
  | 
203  | 0  |           buffer = new_buffer;  | 
204  |  |  | 
205  |  |           /* There may be new events in the buffer that were added after  | 
206  |  |            * the FIONREAD was performed, but we can't risk getting into  | 
207  |  |            * a loop.  We'll get them next time.  | 
208  |  |            */  | 
209  | 0  |         }  | 
210  | 0  |     }  | 
211  |  | 
  | 
212  | 0  |   *length_out = n_read;  | 
213  |  | 
  | 
214  | 0  |   return buffer;  | 
215  | 0  | }  | 
216  |  |  | 
217  |  | static gboolean  | 
218  |  | ik_source_dispatch (GSource     *source,  | 
219  |  |                     GSourceFunc  func,  | 
220  |  |                     gpointer     user_data)  | 
221  | 0  | { | 
222  | 0  |   InotifyKernelSource *iks = (InotifyKernelSource *) source;  | 
223  | 0  |   gboolean (*user_callback) (ik_event_t *event) = (void *) func;  | 
224  | 0  |   gboolean interesting = FALSE;  | 
225  | 0  |   gint64 now;  | 
226  |  | 
  | 
227  | 0  |   now = g_source_get_time (source);  | 
228  |  | 
  | 
229  | 0  |   if (iks->is_bored || g_source_query_unix_fd (source, iks->fd_tag))  | 
230  | 0  |     { | 
231  | 0  |       gchar stack_buffer[4096];  | 
232  | 0  |       gsize buffer_len;  | 
233  | 0  |       gchar *buffer;  | 
234  | 0  |       gsize offset;  | 
235  |  |  | 
236  |  |       /* We want to read all of the available events.  | 
237  |  |        *  | 
238  |  |        * We need to do it in a finite number of steps so that we don't  | 
239  |  |        * get caught in a loop of read() with another process  | 
240  |  |        * continuously adding events each time we drain them.  | 
241  |  |        *  | 
242  |  |        * In the normal case we will have only a few events in the queue,  | 
243  |  |        * so start out by reading into a small stack-allocated buffer.  | 
244  |  |        * Even though we're on a fresh stack frame, there is no need to  | 
245  |  |        * pointlessly blow up with the size of the worker thread stack  | 
246  |  |        * with a huge buffer here.  | 
247  |  |        *  | 
248  |  |        * If the result is large enough to cause us to suspect that  | 
249  |  |        * another event may be pending then we allocate a buffer on the  | 
250  |  |        * heap that can hold all of the events and read (once!) into that  | 
251  |  |        * buffer.  | 
252  |  |        */  | 
253  | 0  |       buffer = ik_source_read_all_the_events (iks, stack_buffer, sizeof stack_buffer, &buffer_len);  | 
254  |  | 
  | 
255  | 0  |       offset = 0;  | 
256  |  | 
  | 
257  | 0  |       while (offset < buffer_len)  | 
258  | 0  |         { | 
259  | 0  |           struct inotify_event *kevent = (struct inotify_event *) (buffer + offset);  | 
260  | 0  |           ik_event_t *event;  | 
261  |  | 
  | 
262  | 0  |           event = ik_event_new (kevent, now);  | 
263  |  | 
  | 
264  | 0  |           offset += sizeof (struct inotify_event) + event->len;  | 
265  |  | 
  | 
266  | 0  |           if (event->mask & IN_MOVED_TO)  | 
267  | 0  |             { | 
268  | 0  |               ik_event_t *pair;  | 
269  |  | 
  | 
270  | 0  |               pair = g_hash_table_lookup (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));  | 
271  | 0  |               if (pair != NULL)  | 
272  | 0  |                 { | 
273  | 0  |                   g_assert (!pair->pair);  | 
274  |  |  | 
275  | 0  |                   g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));  | 
276  | 0  |                   event->is_second_in_pair = TRUE;  | 
277  | 0  |                   event->pair = pair;  | 
278  | 0  |                   pair->pair = event;  | 
279  | 0  |                   continue;  | 
280  | 0  |                 }  | 
281  |  |  | 
282  | 0  |               interesting = TRUE;  | 
283  | 0  |             }  | 
284  |  |  | 
285  | 0  |           else if (event->mask & IN_MOVED_FROM)  | 
286  | 0  |             { | 
287  | 0  |               gboolean new;  | 
288  |  | 
  | 
289  | 0  |               new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);  | 
290  | 0  |               if G_UNLIKELY (!new)  | 
291  | 0  |                 g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie); | 
292  |  | 
  | 
293  | 0  |               interesting = TRUE;  | 
294  | 0  |             }  | 
295  |  |  | 
296  | 0  |           g_queue_push_tail (&iks->queue, event);  | 
297  | 0  |         }  | 
298  |  |  | 
299  | 0  |       if (buffer_len == 0)  | 
300  | 0  |         { | 
301  |  |           /* We can end up reading nothing if we arrived here due to a  | 
302  |  |            * boredom timer but the stream of events stopped meanwhile.  | 
303  |  |            *  | 
304  |  |            * In that case, we need to switch back to polling the file  | 
305  |  |            * descriptor in the usual way.  | 
306  |  |            */  | 
307  | 0  |           g_assert (iks->is_bored);  | 
308  | 0  |           interesting = TRUE;  | 
309  | 0  |         }  | 
310  |  |  | 
311  | 0  |       if (buffer != stack_buffer)  | 
312  | 0  |         g_free (buffer);  | 
313  | 0  |     }  | 
314  |  |  | 
315  | 0  |   while (ik_source_can_dispatch_now (iks, now))  | 
316  | 0  |     { | 
317  | 0  |       ik_event_t *event;  | 
318  |  |  | 
319  |  |       /* callback will free the event */  | 
320  | 0  |       event = g_queue_pop_head (&iks->queue);  | 
321  |  | 
  | 
322  | 0  |       if (event->mask & IN_MOVED_FROM && !event->pair)  | 
323  | 0  |         g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));  | 
324  |  | 
  | 
325  | 0  |       G_LOCK (inotify_lock);  | 
326  |  | 
  | 
327  | 0  |       interesting |= (* user_callback) (event);  | 
328  |  | 
  | 
329  | 0  |       G_UNLOCK (inotify_lock);  | 
330  | 0  |     }  | 
331  |  |  | 
332  |  |   /* The queue gets blocked iff we have unmatched moves */  | 
333  | 0  |   g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0));  | 
334  |  |  | 
335  |  |   /* Here's where we decide what will wake us up next.  | 
336  |  |    *  | 
337  |  |    * If the last event was interesting then we will wake up on the fd or  | 
338  |  |    * when the timeout is reached on an unpaired move (if any).  | 
339  |  |    *  | 
340  |  |    * If the last event was uninteresting then we will wake up after the  | 
341  |  |    * shorter of the boredom sleep or any timeout for an unpaired move.  | 
342  |  |    */  | 
343  | 0  |   if (interesting)  | 
344  | 0  |     { | 
345  | 0  |       if (iks->is_bored)  | 
346  | 0  |         { | 
347  | 0  |           g_source_modify_unix_fd (source, iks->fd_tag, G_IO_IN);  | 
348  | 0  |           iks->is_bored = FALSE;  | 
349  | 0  |         }  | 
350  |  | 
  | 
351  | 0  |       g_source_set_ready_time (source, ik_source_get_dispatch_time (iks));  | 
352  | 0  |     }  | 
353  | 0  |   else  | 
354  | 0  |     { | 
355  | 0  |       guint64 dispatch_time = ik_source_get_dispatch_time (iks);  | 
356  | 0  |       guint64 boredom_time = now + BOREDOM_SLEEP_TIME;  | 
357  |  | 
  | 
358  | 0  |       if (!iks->is_bored)  | 
359  | 0  |         { | 
360  | 0  |           g_source_modify_unix_fd (source, iks->fd_tag, 0);  | 
361  | 0  |           iks->is_bored = TRUE;  | 
362  | 0  |         }  | 
363  |  | 
  | 
364  | 0  |       g_source_set_ready_time (source, MIN (dispatch_time, boredom_time));  | 
365  | 0  |     }  | 
366  |  | 
  | 
367  | 0  |   return TRUE;  | 
368  | 0  | }  | 
369  |  |  | 
370  |  | static InotifyKernelSource *  | 
371  |  | ik_source_new (gboolean (* callback) (ik_event_t *event))  | 
372  | 0  | { | 
373  | 0  |   static GSourceFuncs source_funcs = { | 
374  | 0  |     NULL, NULL,  | 
375  | 0  |     ik_source_dispatch,  | 
376  | 0  |     NULL, NULL, NULL  | 
377  | 0  |   };  | 
378  | 0  |   InotifyKernelSource *iks;  | 
379  | 0  |   GSource *source;  | 
380  | 0  |   gboolean should_set_nonblock = FALSE;  | 
381  |  | 
  | 
382  | 0  |   source = g_source_new (&source_funcs, sizeof (InotifyKernelSource));  | 
383  | 0  |   iks = (InotifyKernelSource *) source;  | 
384  |  | 
  | 
385  | 0  |   g_source_set_static_name (source, "inotify kernel source");  | 
386  |  | 
  | 
387  | 0  |   iks->unmatched_moves = g_hash_table_new (NULL, NULL);  | 
388  | 0  |   iks->fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);  | 
389  |  | 
  | 
390  | 0  |   if (iks->fd < 0)  | 
391  | 0  |     { | 
392  | 0  |       should_set_nonblock = TRUE;  | 
393  | 0  |       iks->fd = inotify_init ();  | 
394  | 0  |     }  | 
395  |  | 
  | 
396  | 0  |   if (iks->fd >= 0)  | 
397  | 0  |     { | 
398  | 0  |       GError *error = NULL;  | 
399  |  | 
  | 
400  | 0  |       if (should_set_nonblock)  | 
401  | 0  |         { | 
402  | 0  |           g_unix_set_fd_nonblocking (iks->fd, TRUE, &error);  | 
403  | 0  |           g_assert_no_error (error);  | 
404  | 0  |         }  | 
405  |  | 
  | 
406  | 0  |       iks->fd_tag = g_source_add_unix_fd (source, iks->fd, G_IO_IN);  | 
407  | 0  |     }  | 
408  |  | 
  | 
409  | 0  |   g_source_set_callback (source, (GSourceFunc) callback, NULL, NULL);  | 
410  |  | 
  | 
411  | 0  |   g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());  | 
412  |  | 
  | 
413  | 0  |   return iks;  | 
414  | 0  | }  | 
415  |  |  | 
416  |  | gboolean  | 
417  |  | _ik_startup (gboolean (*cb)(ik_event_t *event))  | 
418  | 0  | { | 
419  | 0  |   if (g_once_init_enter (&inotify_source))  | 
420  | 0  |     g_once_init_leave (&inotify_source, ik_source_new (cb));  | 
421  |  | 
  | 
422  | 0  |   return inotify_source->fd >= 0;  | 
423  | 0  | }  | 
424  |  |  | 
425  |  | gint32  | 
426  |  | _ik_watch (const char *path,  | 
427  |  |            guint32     mask,  | 
428  |  |            int        *err)  | 
429  | 0  | { | 
430  | 0  |   gint32 wd = -1;  | 
431  |  | 
  | 
432  | 0  |   g_assert (path != NULL);  | 
433  | 0  |   g_assert (inotify_source && inotify_source->fd >= 0);  | 
434  |  |  | 
435  | 0  |   wd = inotify_add_watch (inotify_source->fd, path, mask);  | 
436  |  | 
  | 
437  | 0  |   if (wd < 0)  | 
438  | 0  |     { | 
439  | 0  |       int e = errno;  | 
440  |  |       /* FIXME: debug msg failed to add watch */  | 
441  | 0  |       if (err)  | 
442  | 0  |         *err = e;  | 
443  | 0  |       return wd;  | 
444  | 0  |     }  | 
445  |  |  | 
446  | 0  |   g_assert (wd >= 0);  | 
447  | 0  |   return wd;  | 
448  | 0  | }  | 
449  |  |  | 
450  |  | int  | 
451  |  | _ik_ignore (const char *path,  | 
452  |  |             gint32      wd)  | 
453  | 0  | { | 
454  | 0  |   g_assert (wd >= 0);  | 
455  | 0  |   g_assert (inotify_source && inotify_source->fd >= 0);  | 
456  |  |  | 
457  | 0  |   if (inotify_rm_watch (inotify_source->fd, wd) < 0)  | 
458  | 0  |     { | 
459  |  |       /* int e = errno; */  | 
460  |  |       /* failed to rm watch */  | 
461  | 0  |       return -1;  | 
462  | 0  |     }  | 
463  |  |  | 
464  | 0  |   return 0;  | 
465  | 0  | }  |