Coverage Report

Created: 2025-07-23 08:13

/src/pango/subprojects/glib/gio/inotify/inotify-kernel.c
Line
Count
Source (jump to first uncovered line)
1
/*
2
   Copyright (C) 2005 John McCutchan
3
   Copyright © 2015 Canonical Limited
4
   Copyright © 2024 Future Crew LLC
5
6
   SPDX-License-Identifier: LGPL-2.1-or-later
7
8
   This library is free software; you can redistribute it and/or
9
   modify it under the terms of the GNU Lesser General Public
10
   License as published by the Free Software Foundation; either
11
   version 2.1 of the License, or (at your option) any later version.
12
13
   This library is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
   Lesser General Public License for more details.
17
18
   You should have received a copy of the GNU Lesser General Public License
19
   along with this library; if not, see <http://www.gnu.org/licenses/>.
20
21
   Authors:
22
     Ryan Lortie <desrt@desrt.ca>
23
     John McCutchan <john@johnmccutchan.com>
24
     Gleb Popov <arrowd@FreeBSD.org>
25
*/
26
27
#include "config.h"
28
29
#include <stdio.h>
30
#include <sys/ioctl.h>
31
#include <unistd.h>
32
#include <errno.h>
33
#include <string.h>
34
#include <glib.h>
35
#include "inotify-kernel.h"
36
#include <sys/inotify.h>
37
#ifdef HAVE_SYS_UIO_H
38
#include <sys/uio.h>
39
#endif
40
#ifdef HAVE_SYS_FILIO_H
41
#include <sys/filio.h>
42
#endif
43
#include <glib/glib-unix.h>
44
45
#include "glib-private.h"
46
47
/* From inotify(7) */
48
0
#define MAX_EVENT_SIZE       (sizeof(struct inotify_event) + NAME_MAX + 1)
49
50
/* Amount of time to sleep on receipt of uninteresting events */
51
0
#define BOREDOM_SLEEP_TIME   (100 * G_TIME_SPAN_MILLISECOND)
52
53
/* Define limits on the maximum amount of time and maximum amount of
54
 * interceding events between FROM/TO that can be merged.
55
 */
56
0
#define MOVE_PAIR_DELAY      (10 * G_TIME_SPAN_MILLISECOND)
57
0
#define MOVE_PAIR_DISTANCE   (100)
58
59
/* We use the lock from inotify-helper.c
60
 *
61
 * We only have to take it on our read callback.
62
 *
63
 * The rest of locking is taken care of in inotify-helper.c
64
 */
65
G_LOCK_EXTERN (inotify_lock);
66
67
static ik_event_t *
68
ik_event_new (struct inotify_event *kevent,
69
              gint64                now)
70
0
{
71
0
  ik_event_t *event = g_new0 (ik_event_t, 1);
72
73
0
  event->wd = kevent->wd;
74
0
  event->mask = kevent->mask;
75
0
  event->cookie = kevent->cookie;
76
0
  event->len = kevent->len;
77
0
  event->timestamp = now;
78
0
  if (event->len)
79
0
    event->name = g_strdup (kevent->name);
80
0
  else
81
0
    event->name = NULL;
82
83
0
  return event;
84
0
}
85
86
void
87
_ik_event_free (ik_event_t *event)
88
0
{
89
0
  if (event->pair)
90
0
    {
91
0
      event->pair->pair = NULL;
92
0
      _ik_event_free (event->pair);
93
0
    }
94
95
0
  g_free (event->name);
96
0
  g_free (event);
97
0
}
98
99
typedef struct
100
{
101
  GSource     source;
102
103
  GQueue      queue;  /* (element-type ik_event_t) */
104
  gpointer    fd_tag;
105
  gint        fd;
106
107
  GHashTable *unmatched_moves;  /* (element-type guint ik_event_t) */
108
  gboolean    is_bored;
109
} InotifyKernelSource;
110
111
static InotifyKernelSource *inotify_source;
112
113
static gint64
114
ik_source_get_dispatch_time (InotifyKernelSource *iks)
115
0
{
116
0
  ik_event_t *head;
117
118
0
  head = g_queue_peek_head (&iks->queue);
119
120
  /* nothing in the queue: not ready */
121
0
  if (!head)
122
0
    return -1;
123
124
  /* if it's not an unpaired move, it is ready now */
125
0
  if (~head->mask & IN_MOVED_FROM || head->pair)
126
0
    return 0;
127
128
  /* if the queue is too long then it's ready now */
129
0
  if (iks->queue.length > MOVE_PAIR_DISTANCE)
130
0
    return 0;
131
132
  /* otherwise, it's ready after the delay */
133
0
  return head->timestamp + MOVE_PAIR_DELAY;
134
0
}
135
136
static gboolean
137
ik_source_can_dispatch_now (InotifyKernelSource *iks,
138
                            gint64               now)
139
0
{
140
0
  gint64 dispatch_time;
141
142
0
  dispatch_time = ik_source_get_dispatch_time (iks);
143
144
0
  return 0 <= dispatch_time && dispatch_time <= now;
145
0
}
146
147
static gsize
148
ik_source_read_some_events (InotifyKernelSource *iks,
149
                            gchar               *buffer,
150
                            gsize                buffer_len)
151
0
{
152
0
  gssize result;
153
0
  int errsv;
154
155
0
again:
156
0
  result = read (iks->fd, buffer, buffer_len);
157
0
  errsv = errno;
158
159
0
  if (result < 0)
160
0
    {
161
0
      if (errsv == EINTR)
162
0
        goto again;
163
164
0
      if (errsv == EAGAIN)
165
0
        return 0;
166
167
0
      g_error ("inotify read(): %s", g_strerror (errsv));
168
0
    }
169
0
  else if (result == 0)
170
0
    g_error ("inotify unexpectedly hit eof");
171
172
0
  return result;
173
0
}
174
175
static gchar *
176
ik_source_read_all_the_events (InotifyKernelSource *iks,
177
                               gchar               *buffer,
178
                               gsize                buffer_len,
179
                               gsize               *length_out)
180
0
{
181
0
  gsize n_read;
182
183
0
  n_read = ik_source_read_some_events (iks, buffer, buffer_len);
184
185
  /* Check if we might have gotten another event if we had passed in a
186
   * bigger buffer...
187
   */
188
0
  if (n_read + MAX_EVENT_SIZE > buffer_len)
189
0
    {
190
0
      gchar *new_buffer;
191
0
      guint n_readable;
192
0
      gint result;
193
0
      int errsv;
194
195
      /* figure out how many more bytes there are to read */
196
0
      result = ioctl (iks->fd, FIONREAD, &n_readable);
197
0
      errsv = errno;
198
0
      if (result != 0)
199
0
        g_error ("inotify ioctl(FIONREAD): %s", g_strerror (errsv));
200
201
0
      if (n_readable != 0)
202
0
        {
203
          /* there is in fact more data.  allocate a new buffer, copy
204
           * the existing data, and then append the remaining.
205
           */
206
0
          new_buffer = g_malloc (n_read + n_readable);
207
0
          memcpy (new_buffer, buffer, n_read);
208
0
          n_read += ik_source_read_some_events (iks, new_buffer + n_read, n_readable);
209
210
0
          buffer = new_buffer;
211
212
          /* There may be new events in the buffer that were added after
213
           * the FIONREAD was performed, but we can't risk getting into
214
           * a loop.  We'll get them next time.
215
           */
216
0
        }
217
0
    }
218
219
0
  *length_out = n_read;
220
221
0
  return buffer;
222
0
}
223
224
static gboolean
225
ik_source_dispatch (GSource     *source,
226
                    GSourceFunc  func,
227
                    gpointer     user_data)
228
0
{
229
0
  InotifyKernelSource *iks = (InotifyKernelSource *) source;
230
0
  gboolean (*user_callback) (ik_event_t *event) = (void *) func;
231
0
  gboolean interesting = FALSE;
232
0
  gint64 now;
233
234
0
  now = g_source_get_time (source);
235
236
0
  if (iks->is_bored || g_source_query_unix_fd (source, iks->fd_tag))
237
0
    {
238
0
#if defined(FILE_MONITOR_BACKEND_INOTIFY)
239
0
      gchar stack_buffer[4096];
240
0
      gsize buffer_len;
241
0
      gchar *buffer;
242
0
      gsize offset;
243
244
      /* We want to read all of the available events.
245
       *
246
       * We need to do it in a finite number of steps so that we don't
247
       * get caught in a loop of read() with another process
248
       * continuously adding events each time we drain them.
249
       *
250
       * In the normal case we will have only a few events in the queue,
251
       * so start out by reading into a small stack-allocated buffer.
252
       * Even though we're on a fresh stack frame, there is no need to
253
       * pointlessly blow up with the size of the worker thread stack
254
       * with a huge buffer here.
255
       *
256
       * If the result is large enough to cause us to suspect that
257
       * another event may be pending then we allocate a buffer on the
258
       * heap that can hold all of the events and read (once!) into that
259
       * buffer.
260
       */
261
0
      buffer = ik_source_read_all_the_events (iks, stack_buffer, sizeof stack_buffer, &buffer_len);
262
263
0
      offset = 0;
264
265
0
      while (offset < buffer_len)
266
0
        {
267
0
          struct inotify_event *kevent = (struct inotify_event *) (buffer + offset);
268
0
          ik_event_t *event;
269
270
0
          event = ik_event_new (kevent, now);
271
272
0
          offset += sizeof (struct inotify_event) + event->len;
273
274
0
          if (event->mask & IN_MOVED_TO)
275
0
            {
276
0
              ik_event_t *pair;
277
278
0
              if (g_hash_table_steal_extended (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), NULL, (gpointer*)&pair))
279
0
                {
280
0
                  g_assert (!pair->pair);
281
282
0
                  event->is_second_in_pair = TRUE;
283
0
                  event->pair = pair;
284
0
                  pair->pair = event;
285
0
                  continue;
286
0
                }
287
288
0
              interesting = TRUE;
289
0
            }
290
291
0
          else if (event->mask & IN_MOVED_FROM)
292
0
            {
293
0
              gboolean new;
294
295
0
              new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);
296
0
              if G_UNLIKELY (!new)
297
0
                g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie);
298
299
0
              interesting = TRUE;
300
0
            }
301
302
0
          g_queue_push_tail (&iks->queue, event);
303
0
        }
304
305
0
      if (buffer_len == 0)
306
0
        {
307
          /* We can end up reading nothing if we arrived here due to a
308
           * boredom timer but the stream of events stopped meanwhile.
309
           *
310
           * In that case, we need to switch back to polling the file
311
           * descriptor in the usual way.
312
           */
313
0
          g_assert (iks->is_bored);
314
0
          interesting = TRUE;
315
0
        }
316
317
0
      if (buffer != stack_buffer)
318
0
        g_free (buffer);
319
#elif defined(FILE_MONITOR_BACKEND_LIBINOTIFY_KQUEUE)
320
      struct iovec *received[5];
321
      int num_events = libinotify_direct_readv (iks->fd, received, G_N_ELEMENTS(received), /* no_block=*/ 1);
322
323
      if (num_events < 0)
324
        {
325
          int errsv = errno;
326
          g_warning ("Failed to read inotify events: %s", g_strerror (errsv));
327
          /* fall through and skip the next few blocks */
328
        }
329
330
      for (int i = 0; i < num_events; i++)
331
        {
332
          struct iovec *cur_event = received[i];
333
          while (cur_event->iov_base)
334
            {
335
              struct inotify_event *kevent = (struct inotify_event *) cur_event->iov_base;
336
337
              ik_event_t *event;
338
339
              event = ik_event_new (kevent, now);
340
341
              if (event->mask & IN_MOVED_TO)
342
                {
343
                  ik_event_t *pair;
344
345
                  if (g_hash_table_steal_extended (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), NULL, (gpointer*)&pair))
346
                    {
347
                      g_assert (!pair->pair);
348
349
                      event->is_second_in_pair = TRUE;
350
                      event->pair = pair;
351
                      pair->pair = event;
352
353
                      cur_event++;
354
                      continue;
355
                    }
356
357
                  interesting = TRUE;
358
                }
359
              else if (event->mask & IN_MOVED_FROM)
360
                {
361
                  gboolean new;
362
363
                  new = g_hash_table_insert (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie), event);
364
                  if G_UNLIKELY (!new)
365
                    g_warning ("inotify: got IN_MOVED_FROM event with already-pending cookie %#x", event->cookie);
366
367
                  interesting = TRUE;
368
                }
369
370
              g_queue_push_tail (&iks->queue, event);
371
372
              cur_event++;
373
            }
374
          libinotify_free_iovec (received[i]);
375
        }
376
377
      if (num_events == 0)
378
        {
379
          /* We can end up reading nothing if we arrived here due to a
380
           * boredom timer but the stream of events stopped meanwhile.
381
           *
382
           * In that case, we need to switch back to polling the file
383
           * descriptor in the usual way.
384
           */
385
          g_assert (iks->is_bored);
386
          interesting = TRUE;
387
        }
388
#endif
389
0
    }
390
391
0
  while (ik_source_can_dispatch_now (iks, now))
392
0
    {
393
0
      ik_event_t *event;
394
395
      /* callback will free the event */
396
0
      event = g_queue_pop_head (&iks->queue);
397
398
0
      if (event->mask & IN_MOVED_FROM && !event->pair)
399
0
        g_hash_table_remove (iks->unmatched_moves, GUINT_TO_POINTER (event->cookie));
400
401
0
      G_LOCK (inotify_lock);
402
403
0
      interesting |= (* user_callback) (event);
404
405
0
      G_UNLOCK (inotify_lock);
406
0
    }
407
408
  /* The queue gets blocked iff we have unmatched moves */
409
0
  g_assert ((iks->queue.length > 0) == (g_hash_table_size (iks->unmatched_moves) > 0));
410
411
  /* Here's where we decide what will wake us up next.
412
   *
413
   * If the last event was interesting then we will wake up on the fd or
414
   * when the timeout is reached on an unpaired move (if any).
415
   *
416
   * If the last event was uninteresting then we will wake up after the
417
   * shorter of the boredom sleep or any timeout for an unpaired move.
418
   */
419
0
  if (interesting)
420
0
    {
421
0
      if (iks->is_bored)
422
0
        {
423
0
          g_source_modify_unix_fd (source, iks->fd_tag, G_IO_IN);
424
0
          iks->is_bored = FALSE;
425
0
        }
426
427
0
      g_source_set_ready_time (source, ik_source_get_dispatch_time (iks));
428
0
    }
429
0
  else
430
0
    {
431
0
      guint64 dispatch_time = ik_source_get_dispatch_time (iks);
432
0
      guint64 boredom_time = now + BOREDOM_SLEEP_TIME;
433
434
0
      if (!iks->is_bored)
435
0
        {
436
0
          g_source_modify_unix_fd (source, iks->fd_tag, 0);
437
0
          iks->is_bored = TRUE;
438
0
        }
439
440
0
      g_source_set_ready_time (source, MIN (dispatch_time, boredom_time));
441
0
    }
442
443
0
  return TRUE;
444
0
}
445
446
static void
447
ik_source_finalize (GSource *source)
448
0
{
449
0
  InotifyKernelSource *iks;
450
451
0
  iks = (InotifyKernelSource *) source;
452
453
0
#if defined(FILE_MONITOR_BACKEND_INOTIFY)
454
0
  close (iks->fd);
455
#elif defined(FILE_MONITOR_BACKEND_LIBINOTIFY_KQUEUE)
456
  libinotify_direct_close (iks->fd);
457
#endif
458
459
0
  iks->fd = -1;
460
0
}
461
462
static InotifyKernelSource *
463
ik_source_new (gboolean (* callback) (ik_event_t *event))
464
0
{
465
0
  static GSourceFuncs source_funcs = {
466
0
    NULL, NULL,
467
0
    ik_source_dispatch,
468
0
    ik_source_finalize,
469
0
    NULL, NULL
470
0
  };
471
0
  InotifyKernelSource *iks;
472
0
  GSource *source;
473
0
  gboolean should_set_nonblock = FALSE;
474
475
0
  source = g_source_new (&source_funcs, sizeof (InotifyKernelSource));
476
0
  iks = (InotifyKernelSource *) source;
477
478
0
  g_source_set_static_name (source, "inotify kernel source");
479
480
0
  iks->unmatched_moves = g_hash_table_new (NULL, NULL);
481
0
#if defined(FILE_MONITOR_BACKEND_INOTIFY)
482
0
  iks->fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
483
#elif defined(FILE_MONITOR_BACKEND_LIBINOTIFY_KQUEUE)
484
  iks->fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK | IN_DIRECT);
485
#endif
486
487
0
#ifdef FILE_MONITOR_BACKEND_INOTIFY
488
0
  if (iks->fd < 0)
489
0
    {
490
0
      should_set_nonblock = TRUE;
491
0
      iks->fd = inotify_init ();
492
0
    }
493
0
#endif
494
495
0
  if (iks->fd >= 0)
496
0
    {
497
0
      GError *error = NULL;
498
499
0
#ifdef FILE_MONITOR_BACKEND_INOTIFY
500
0
      if (should_set_nonblock)
501
0
        {
502
0
          g_unix_set_fd_nonblocking (iks->fd, TRUE, &error);
503
0
          g_assert_no_error (error);
504
0
        }
505
0
#endif
506
507
0
      iks->fd_tag = g_source_add_unix_fd (source, iks->fd, G_IO_IN);
508
0
    }
509
510
0
  g_source_set_callback (source, (GSourceFunc) callback, NULL, NULL);
511
512
0
  g_source_attach (source, GLIB_PRIVATE_CALL (g_get_worker_context) ());
513
514
0
  return iks;
515
0
}
516
517
gboolean
518
_ik_startup (gboolean (*cb)(ik_event_t *event))
519
0
{
520
0
  if (g_once_init_enter_pointer (&inotify_source))
521
0
    g_once_init_leave_pointer (&inotify_source, ik_source_new (cb));
522
523
0
  return inotify_source->fd >= 0;
524
0
}
525
526
gint32
527
_ik_watch (const char *path,
528
           guint32     mask,
529
           int        *err)
530
0
{
531
0
  gint32 wd = -1;
532
533
0
  g_assert (path != NULL);
534
0
  g_assert (inotify_source && inotify_source->fd >= 0);
535
536
0
  wd = inotify_add_watch (inotify_source->fd, path, mask);
537
538
0
  if (wd < 0)
539
0
    {
540
0
      int e = errno;
541
      /* FIXME: debug msg failed to add watch */
542
0
      if (err)
543
0
        *err = e;
544
0
      return wd;
545
0
    }
546
547
0
  g_assert (wd >= 0);
548
0
  return wd;
549
0
}
550
551
int
552
_ik_ignore (const char *path,
553
            gint32      wd)
554
0
{
555
0
  g_assert (wd >= 0);
556
0
  g_assert (inotify_source && inotify_source->fd >= 0);
557
558
0
  if (inotify_rm_watch (inotify_source->fd, wd) < 0)
559
0
    {
560
      /* int e = errno; */
561
      /* failed to rm watch */
562
0
      return -1;
563
0
    }
564
565
0
  return 0;
566
0
}