Coverage Report

Created: 2025-08-18 06:36

/src/nspr/pr/src/md/unix/unix.c
Line
Count
Source (jump to first uncovered line)
1
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2
/* This Source Code Form is subject to the terms of the Mozilla Public
3
 * License, v. 2.0. If a copy of the MPL was not distributed with this
4
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6
#include "primpl.h"
7
8
#include <string.h>
9
#include <signal.h>
10
#include <unistd.h>
11
#include <fcntl.h>
12
#include <sys/types.h>
13
#include <sys/socket.h>
14
#include <sys/time.h>
15
#include <sys/ioctl.h>
16
#include <sys/mman.h>
17
#include <unistd.h>
18
#include <sys/utsname.h>
19
20
#ifdef _PR_POLL_AVAILABLE
21
#  include <poll.h>
22
#endif
23
24
#if defined(ANDROID)
25
#  include <android/api-level.h>
26
#endif
27
28
#if defined(NTO)
29
#  include <sys/statvfs.h>
30
#endif
31
32
/*
33
 * Make sure _PRSockLen_t is 32-bit, because we will cast a PRUint32* or
34
 * PRInt32* pointer to a _PRSockLen_t* pointer.
35
 */
36
#if defined(HAVE_SOCKLEN_T) || (defined(__GLIBC__) && __GLIBC__ >= 2)
37
0
#  define _PRSockLen_t socklen_t
38
#elif defined(HPUX) || defined(SOLARIS) || defined(AIX4_1) || \
39
    defined(LINUX) || defined(DARWIN) || defined(QNX)
40
#  define _PRSockLen_t int
41
#elif (defined(AIX) && !defined(AIX4_1)) || defined(FREEBSD) || \
42
    defined(NETBSD) || defined(OPENBSD)
43
|| defined(NTO) ||
44
    defined(RISCOS)
45
#  define _PRSockLen_t size_t
46
#else
47
#  error "Cannot determine architecture"
48
#endif
49
50
/*
51
** Global lock variable used to bracket calls into rusty libraries that
52
** aren't thread safe (like libc, libX, etc).
53
*/
54
static PRLock* _pr_unix_rename_lock = NULL;
55
static PRMonitor* _pr_Xfe_mon = NULL;
56
57
static PRInt64 minus_one;
58
59
sigset_t timer_set;
60
61
#if !defined(_PR_PTHREADS)
62
63
static sigset_t empty_set;
64
65
#  ifdef SOLARIS
66
#    include <sys/file.h>
67
#    include <sys/filio.h>
68
#  endif
69
70
#  ifndef PIPE_BUF
71
#    define PIPE_BUF 512
72
#  endif
73
74
/*
75
 * _nspr_noclock - if set clock interrupts are disabled
76
 */
77
int _nspr_noclock = 1;
78
79
/*
80
 * There is an assertion in this code that NSPR's definition of PRIOVec
81
 * is bit compatible with UNIX' definition of a struct iovec. This is
82
 * applicable to the 'writev()' operations where the types are casually
83
 * cast to avoid warnings.
84
 */
85
86
int _pr_md_pipefd[2] = {-1, -1};
87
static char _pr_md_pipebuf[PIPE_BUF];
88
static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag,
89
                             PRIntervalTime timeout);
90
91
_PRInterruptTable _pr_interruptTable[] = {{
92
                                              "clock",
93
                                              _PR_MISSED_CLOCK,
94
                                              _PR_ClockInterrupt,
95
                                          },
96
                                          {0}};
97
98
void _MD_unix_init_running_cpu(_PRCPU* cpu) {
99
  PR_INIT_CLIST(&(cpu->md.md_unix.ioQ));
100
  cpu->md.md_unix.ioq_max_osfd = -1;
101
  cpu->md.md_unix.ioq_timeout = PR_INTERVAL_NO_TIMEOUT;
102
}
103
104
PRStatus _MD_open_dir(_MDDir* d, const char* name) {
105
  int err;
106
107
  d->d = opendir(name);
108
  if (!d->d) {
109
    err = _MD_ERRNO();
110
    _PR_MD_MAP_OPENDIR_ERROR(err);
111
    return PR_FAILURE;
112
  }
113
  return PR_SUCCESS;
114
}
115
116
PRInt32 _MD_close_dir(_MDDir* d) {
117
  int rv = 0, err;
118
119
  if (d->d) {
120
    rv = closedir(d->d);
121
    if (rv == -1) {
122
      err = _MD_ERRNO();
123
      _PR_MD_MAP_CLOSEDIR_ERROR(err);
124
    }
125
  }
126
  return rv;
127
}
128
129
char* _MD_read_dir(_MDDir* d, PRIntn flags) {
130
  struct dirent* de;
131
  int err;
132
133
  for (;;) {
134
    /*
135
     * XXX: readdir() is not MT-safe. There is an MT-safe version
136
     * readdir_r() on some systems.
137
     */
138
    _MD_ERRNO() = 0;
139
    de = readdir(d->d);
140
    if (!de) {
141
      err = _MD_ERRNO();
142
      _PR_MD_MAP_READDIR_ERROR(err);
143
      return 0;
144
    }
145
    if ((flags & PR_SKIP_DOT) && (de->d_name[0] == '.') &&
146
        (de->d_name[1] == 0)) {
147
      continue;
148
    }
149
    if ((flags & PR_SKIP_DOT_DOT) && (de->d_name[0] == '.') &&
150
        (de->d_name[1] == '.') && (de->d_name[2] == 0)) {
151
      continue;
152
    }
153
    if ((flags & PR_SKIP_HIDDEN) && (de->d_name[0] == '.')) {
154
      continue;
155
    }
156
    break;
157
  }
158
  return de->d_name;
159
}
160
161
PRInt32 _MD_delete(const char* name) {
162
  PRInt32 rv, err;
163
164
  rv = unlink(name);
165
  if (rv == -1) {
166
    err = _MD_ERRNO();
167
    _PR_MD_MAP_UNLINK_ERROR(err);
168
  }
169
  return (rv);
170
}
171
172
PRInt32 _MD_rename(const char* from, const char* to) {
173
  PRInt32 rv = -1, err;
174
175
  /*
176
  ** This is trying to enforce the semantics of WINDOZE' rename
177
  ** operation. That means one is not allowed to rename over top
178
  ** of an existing file. Holding a lock across these two function
179
  ** and the open function is known to be a bad idea, but ....
180
  */
181
  if (NULL != _pr_unix_rename_lock) {
182
    PR_Lock(_pr_unix_rename_lock);
183
  }
184
  if (0 == access(to, F_OK)) {
185
    PR_SetError(PR_FILE_EXISTS_ERROR, 0);
186
  } else {
187
    rv = rename(from, to);
188
    if (rv < 0) {
189
      err = _MD_ERRNO();
190
      _PR_MD_MAP_RENAME_ERROR(err);
191
    }
192
  }
193
  if (NULL != _pr_unix_rename_lock) {
194
    PR_Unlock(_pr_unix_rename_lock);
195
  }
196
  return rv;
197
}
198
199
PRInt32 _MD_access(const char* name, PRAccessHow how) {
200
  PRInt32 rv, err;
201
  int amode;
202
203
  switch (how) {
204
    case PR_ACCESS_WRITE_OK:
205
      amode = W_OK;
206
      break;
207
    case PR_ACCESS_READ_OK:
208
      amode = R_OK;
209
      break;
210
    case PR_ACCESS_EXISTS:
211
      amode = F_OK;
212
      break;
213
    default:
214
      PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
215
      rv = -1;
216
      goto done;
217
  }
218
  rv = access(name, amode);
219
220
  if (rv < 0) {
221
    err = _MD_ERRNO();
222
    _PR_MD_MAP_ACCESS_ERROR(err);
223
  }
224
225
done:
226
  return (rv);
227
}
228
229
PRInt32 _MD_mkdir(const char* name, PRIntn mode) {
230
  int rv, err;
231
232
  /*
233
  ** This lock is used to enforce rename semantics as described
234
  ** in PR_Rename. Look there for more fun details.
235
  */
236
  if (NULL != _pr_unix_rename_lock) {
237
    PR_Lock(_pr_unix_rename_lock);
238
  }
239
  rv = mkdir(name, mode);
240
  if (rv < 0) {
241
    err = _MD_ERRNO();
242
    _PR_MD_MAP_MKDIR_ERROR(err);
243
  }
244
  if (NULL != _pr_unix_rename_lock) {
245
    PR_Unlock(_pr_unix_rename_lock);
246
  }
247
  return rv;
248
}
249
250
PRInt32 _MD_rmdir(const char* name) {
251
  int rv, err;
252
253
  rv = rmdir(name);
254
  if (rv == -1) {
255
    err = _MD_ERRNO();
256
    _PR_MD_MAP_RMDIR_ERROR(err);
257
  }
258
  return rv;
259
}
260
261
PRInt32 _MD_read(PRFileDesc* fd, void* buf, PRInt32 amount) {
262
  PRThread* me = _PR_MD_CURRENT_THREAD();
263
  PRInt32 rv, err;
264
#  ifndef _PR_USE_POLL
265
  fd_set rd;
266
#  else
267
  struct pollfd pfd;
268
#  endif /* _PR_USE_POLL */
269
  PRInt32 osfd = fd->secret->md.osfd;
270
271
#  ifndef _PR_USE_POLL
272
  FD_ZERO(&rd);
273
  FD_SET(osfd, &rd);
274
#  else
275
  pfd.fd = osfd;
276
  pfd.events = POLLIN;
277
#  endif /* _PR_USE_POLL */
278
  while ((rv = read(osfd, buf, amount)) == -1) {
279
    err = _MD_ERRNO();
280
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
281
      if (fd->secret->nonblocking) {
282
        break;
283
      }
284
      if (!_PR_IS_NATIVE_THREAD(me)) {
285
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ,
286
                                PR_INTERVAL_NO_TIMEOUT)) < 0) {
287
          goto done;
288
        }
289
      } else {
290
#  ifndef _PR_USE_POLL
291
        while ((rv = _MD_SELECT(osfd + 1, &rd, NULL, NULL, NULL)) == -1 &&
292
               (err = _MD_ERRNO()) == EINTR) {
293
          /* retry _MD_SELECT() if it is interrupted */
294
        }
295
#  else  /* _PR_USE_POLL */
296
        while ((rv = _MD_POLL(&pfd, 1, -1)) == -1 &&
297
               (err = _MD_ERRNO()) == EINTR) {
298
          /* retry _MD_POLL() if it is interrupted */
299
        }
300
#  endif /* _PR_USE_POLL */
301
        if (rv == -1) {
302
          break;
303
        }
304
      }
305
      if (_PR_PENDING_INTERRUPT(me)) {
306
        break;
307
      }
308
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
309
      continue;
310
    } else {
311
      break;
312
    }
313
  }
314
  if (rv < 0) {
315
    if (_PR_PENDING_INTERRUPT(me)) {
316
      me->flags &= ~_PR_INTERRUPT;
317
      PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
318
    } else {
319
      _PR_MD_MAP_READ_ERROR(err);
320
    }
321
  }
322
done:
323
  return (rv);
324
}
325
326
PRInt32 _MD_write(PRFileDesc* fd, const void* buf, PRInt32 amount) {
327
  PRThread* me = _PR_MD_CURRENT_THREAD();
328
  PRInt32 rv, err;
329
#  ifndef _PR_USE_POLL
330
  fd_set wd;
331
#  else
332
  struct pollfd pfd;
333
#  endif /* _PR_USE_POLL */
334
  PRInt32 osfd = fd->secret->md.osfd;
335
336
#  ifndef _PR_USE_POLL
337
  FD_ZERO(&wd);
338
  FD_SET(osfd, &wd);
339
#  else
340
  pfd.fd = osfd;
341
  pfd.events = POLLOUT;
342
#  endif /* _PR_USE_POLL */
343
  while ((rv = write(osfd, buf, amount)) == -1) {
344
    err = _MD_ERRNO();
345
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
346
      if (fd->secret->nonblocking) {
347
        break;
348
      }
349
      if (!_PR_IS_NATIVE_THREAD(me)) {
350
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE,
351
                                PR_INTERVAL_NO_TIMEOUT)) < 0) {
352
          goto done;
353
        }
354
      } else {
355
#  ifndef _PR_USE_POLL
356
        while ((rv = _MD_SELECT(osfd + 1, NULL, &wd, NULL, NULL)) == -1 &&
357
               (err = _MD_ERRNO()) == EINTR) {
358
          /* retry _MD_SELECT() if it is interrupted */
359
        }
360
#  else  /* _PR_USE_POLL */
361
        while ((rv = _MD_POLL(&pfd, 1, -1)) == -1 &&
362
               (err = _MD_ERRNO()) == EINTR) {
363
          /* retry _MD_POLL() if it is interrupted */
364
        }
365
#  endif /* _PR_USE_POLL */
366
        if (rv == -1) {
367
          break;
368
        }
369
      }
370
      if (_PR_PENDING_INTERRUPT(me)) {
371
        break;
372
      }
373
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
374
      continue;
375
    } else {
376
      break;
377
    }
378
  }
379
  if (rv < 0) {
380
    if (_PR_PENDING_INTERRUPT(me)) {
381
      me->flags &= ~_PR_INTERRUPT;
382
      PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
383
    } else {
384
      _PR_MD_MAP_WRITE_ERROR(err);
385
    }
386
  }
387
done:
388
  return (rv);
389
}
390
391
PRInt32 _MD_fsync(PRFileDesc* fd) {
392
  PRInt32 rv, err;
393
394
  rv = fsync(fd->secret->md.osfd);
395
  if (rv == -1) {
396
    err = _MD_ERRNO();
397
    _PR_MD_MAP_FSYNC_ERROR(err);
398
  }
399
  return (rv);
400
}
401
402
PRInt32 _MD_close(PRInt32 osfd) {
403
  PRInt32 rv, err;
404
405
  rv = close(osfd);
406
  if (rv == -1) {
407
    err = _MD_ERRNO();
408
    _PR_MD_MAP_CLOSE_ERROR(err);
409
  }
410
  return (rv);
411
}
412
413
PRInt32 _MD_socket(PRInt32 domain, PRInt32 type, PRInt32 proto) {
414
  PRInt32 osfd, err;
415
416
  osfd = socket(domain, type, proto);
417
418
  if (osfd == -1) {
419
    err = _MD_ERRNO();
420
    _PR_MD_MAP_SOCKET_ERROR(err);
421
    return (osfd);
422
  }
423
424
  return (osfd);
425
}
426
427
PRInt32 _MD_socketavailable(PRFileDesc* fd) {
428
  PRInt32 result;
429
430
  if (ioctl(fd->secret->md.osfd, FIONREAD, &result) < 0) {
431
    _PR_MD_MAP_SOCKETAVAILABLE_ERROR(_MD_ERRNO());
432
    return -1;
433
  }
434
  return result;
435
}
436
437
PRInt64 _MD_socketavailable64(PRFileDesc* fd) {
438
  PRInt64 result;
439
  LL_I2L(result, _MD_socketavailable(fd));
440
  return result;
441
} /* _MD_socketavailable64 */
442
443
#  define READ_FD 1
444
#  define WRITE_FD 2
445
446
/*
447
 * socket_io_wait --
448
 *
449
 * wait for socket i/o, periodically checking for interrupt
450
 *
451
 * The first implementation uses select(), for platforms without
452
 * poll().  The second (preferred) implementation uses poll().
453
 */
454
455
#  ifndef _PR_USE_POLL
456
457
static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
458
                              PRIntervalTime timeout) {
459
  PRInt32 rv = -1;
460
  struct timeval tv;
461
  PRThread* me = _PR_MD_CURRENT_THREAD();
462
  PRIntervalTime epoch, now, elapsed, remaining;
463
  PRBool wait_for_remaining;
464
  PRInt32 syserror;
465
  fd_set rd_wr;
466
467
  switch (timeout) {
468
    case PR_INTERVAL_NO_WAIT:
469
      PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
470
      break;
471
    case PR_INTERVAL_NO_TIMEOUT:
472
      /*
473
       * This is a special case of the 'default' case below.
474
       * Please see the comments there.
475
       */
476
      tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
477
      tv.tv_usec = 0;
478
      FD_ZERO(&rd_wr);
479
      do {
480
        FD_SET(osfd, &rd_wr);
481
        if (fd_type == READ_FD) {
482
          rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv);
483
        } else {
484
          rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv);
485
        }
486
        if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
487
          _PR_MD_MAP_SELECT_ERROR(syserror);
488
          break;
489
        }
490
        if (_PR_PENDING_INTERRUPT(me)) {
491
          me->flags &= ~_PR_INTERRUPT;
492
          PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
493
          rv = -1;
494
          break;
495
        }
496
      } while (rv == 0 || (rv == -1 && syserror == EINTR));
497
      break;
498
    default:
499
      now = epoch = PR_IntervalNow();
500
      remaining = timeout;
501
      FD_ZERO(&rd_wr);
502
      do {
503
        /*
504
         * We block in _MD_SELECT for at most
505
         * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
506
         * so that there is an upper limit on the delay
507
         * before the interrupt bit is checked.
508
         */
509
        wait_for_remaining = PR_TRUE;
510
        tv.tv_sec = PR_IntervalToSeconds(remaining);
511
        if (tv.tv_sec > _PR_INTERRUPT_CHECK_INTERVAL_SECS) {
512
          wait_for_remaining = PR_FALSE;
513
          tv.tv_sec = _PR_INTERRUPT_CHECK_INTERVAL_SECS;
514
          tv.tv_usec = 0;
515
        } else {
516
          tv.tv_usec = PR_IntervalToMicroseconds(
517
              remaining - PR_SecondsToInterval(tv.tv_sec));
518
        }
519
        FD_SET(osfd, &rd_wr);
520
        if (fd_type == READ_FD) {
521
          rv = _MD_SELECT(osfd + 1, &rd_wr, NULL, NULL, &tv);
522
        } else {
523
          rv = _MD_SELECT(osfd + 1, NULL, &rd_wr, NULL, &tv);
524
        }
525
        /*
526
         * we don't consider EINTR a real error
527
         */
528
        if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
529
          _PR_MD_MAP_SELECT_ERROR(syserror);
530
          break;
531
        }
532
        if (_PR_PENDING_INTERRUPT(me)) {
533
          me->flags &= ~_PR_INTERRUPT;
534
          PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
535
          rv = -1;
536
          break;
537
        }
538
        /*
539
         * We loop again if _MD_SELECT timed out or got interrupted
540
         * by a signal, and the timeout deadline has not passed yet.
541
         */
542
        if (rv == 0 || (rv == -1 && syserror == EINTR)) {
543
          /*
544
           * If _MD_SELECT timed out, we know how much time
545
           * we spent in blocking, so we can avoid a
546
           * PR_IntervalNow() call.
547
           */
548
          if (rv == 0) {
549
            if (wait_for_remaining) {
550
              now += remaining;
551
            } else {
552
              now += PR_SecondsToInterval(tv.tv_sec) +
553
                     PR_MicrosecondsToInterval(tv.tv_usec);
554
            }
555
          } else {
556
            now = PR_IntervalNow();
557
          }
558
          elapsed = (PRIntervalTime)(now - epoch);
559
          if (elapsed >= timeout) {
560
            PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
561
            rv = -1;
562
            break;
563
          } else {
564
            remaining = timeout - elapsed;
565
          }
566
        }
567
      } while (rv == 0 || (rv == -1 && syserror == EINTR));
568
      break;
569
  }
570
  return (rv);
571
}
572
573
#  else /* _PR_USE_POLL */
574
575
static PRInt32 socket_io_wait(PRInt32 osfd, PRInt32 fd_type,
576
                              PRIntervalTime timeout) {
577
  PRInt32 rv = -1;
578
  int msecs;
579
  PRThread* me = _PR_MD_CURRENT_THREAD();
580
  PRIntervalTime epoch, now, elapsed, remaining;
581
  PRBool wait_for_remaining;
582
  PRInt32 syserror;
583
  struct pollfd pfd;
584
585
  switch (timeout) {
586
    case PR_INTERVAL_NO_WAIT:
587
      PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
588
      break;
589
    case PR_INTERVAL_NO_TIMEOUT:
590
      /*
591
       * This is a special case of the 'default' case below.
592
       * Please see the comments there.
593
       */
594
      msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000;
595
      pfd.fd = osfd;
596
      if (fd_type == READ_FD) {
597
        pfd.events = POLLIN;
598
      } else {
599
        pfd.events = POLLOUT;
600
      }
601
      do {
602
        rv = _MD_POLL(&pfd, 1, msecs);
603
        if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
604
          _PR_MD_MAP_POLL_ERROR(syserror);
605
          break;
606
        }
607
        /*
608
         * If POLLERR is set, don't process it; retry the operation
609
         */
610
        if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) {
611
          rv = -1;
612
          _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents);
613
          break;
614
        }
615
        if (_PR_PENDING_INTERRUPT(me)) {
616
          me->flags &= ~_PR_INTERRUPT;
617
          PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
618
          rv = -1;
619
          break;
620
        }
621
      } while (rv == 0 || (rv == -1 && syserror == EINTR));
622
      break;
623
    default:
624
      now = epoch = PR_IntervalNow();
625
      remaining = timeout;
626
      pfd.fd = osfd;
627
      if (fd_type == READ_FD) {
628
        pfd.events = POLLIN;
629
      } else {
630
        pfd.events = POLLOUT;
631
      }
632
      do {
633
        /*
634
         * We block in _MD_POLL for at most
635
         * _PR_INTERRUPT_CHECK_INTERVAL_SECS seconds,
636
         * so that there is an upper limit on the delay
637
         * before the interrupt bit is checked.
638
         */
639
        wait_for_remaining = PR_TRUE;
640
        msecs = PR_IntervalToMilliseconds(remaining);
641
        if (msecs > _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000) {
642
          wait_for_remaining = PR_FALSE;
643
          msecs = _PR_INTERRUPT_CHECK_INTERVAL_SECS * 1000;
644
        }
645
        rv = _MD_POLL(&pfd, 1, msecs);
646
        /*
647
         * we don't consider EINTR a real error
648
         */
649
        if (rv == -1 && (syserror = _MD_ERRNO()) != EINTR) {
650
          _PR_MD_MAP_POLL_ERROR(syserror);
651
          break;
652
        }
653
        if (_PR_PENDING_INTERRUPT(me)) {
654
          me->flags &= ~_PR_INTERRUPT;
655
          PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
656
          rv = -1;
657
          break;
658
        }
659
        /*
660
         * If POLLERR is set, don't process it; retry the operation
661
         */
662
        if ((rv == 1) && (pfd.revents & (POLLHUP | POLLNVAL))) {
663
          rv = -1;
664
          _PR_MD_MAP_POLL_REVENTS_ERROR(pfd.revents);
665
          break;
666
        }
667
        /*
668
         * We loop again if _MD_POLL timed out or got interrupted
669
         * by a signal, and the timeout deadline has not passed yet.
670
         */
671
        if (rv == 0 || (rv == -1 && syserror == EINTR)) {
672
          /*
673
           * If _MD_POLL timed out, we know how much time
674
           * we spent in blocking, so we can avoid a
675
           * PR_IntervalNow() call.
676
           */
677
          if (rv == 0) {
678
            if (wait_for_remaining) {
679
              now += remaining;
680
            } else {
681
              now += PR_MillisecondsToInterval(msecs);
682
            }
683
          } else {
684
            now = PR_IntervalNow();
685
          }
686
          elapsed = (PRIntervalTime)(now - epoch);
687
          if (elapsed >= timeout) {
688
            PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
689
            rv = -1;
690
            break;
691
          } else {
692
            remaining = timeout - elapsed;
693
          }
694
        }
695
      } while (rv == 0 || (rv == -1 && syserror == EINTR));
696
      break;
697
  }
698
  return (rv);
699
}
700
701
#  endif /* _PR_USE_POLL */
702
703
static PRInt32 local_io_wait(PRInt32 osfd, PRInt32 wait_flag,
704
                             PRIntervalTime timeout) {
705
  _PRUnixPollDesc pd;
706
  PRInt32 rv;
707
708
  PR_LOG(_pr_io_lm, PR_LOG_MIN,
709
         ("waiting to %s on osfd=%d",
710
          (wait_flag == _PR_UNIX_POLL_READ) ? "read" : "write", osfd));
711
712
  if (timeout == PR_INTERVAL_NO_WAIT) {
713
    return 0;
714
  }
715
716
  pd.osfd = osfd;
717
  pd.in_flags = wait_flag;
718
  pd.out_flags = 0;
719
720
  rv = _PR_WaitForMultipleFDs(&pd, 1, timeout);
721
722
  if (rv == 0) {
723
    PR_SetError(PR_IO_TIMEOUT_ERROR, 0);
724
    rv = -1;
725
  }
726
  return rv;
727
}
728
729
PRInt32 _MD_recv(PRFileDesc* fd, void* buf, PRInt32 amount, PRInt32 flags,
730
                 PRIntervalTime timeout) {
731
  PRInt32 osfd = fd->secret->md.osfd;
732
  PRInt32 rv, err;
733
  PRThread* me = _PR_MD_CURRENT_THREAD();
734
735
  /*
736
   * Many OS's (ex: Solaris) have a broken recv which won't read
737
   * from socketpairs.  As long as we don't use flags on socketpairs, this
738
   * is a decent fix. - mikep
739
   */
740
#  if defined(SOLARIS)
741
  while ((rv = read(osfd, buf, amount)) == -1) {
742
#  else
743
  while ((rv = recv(osfd, buf, amount, flags)) == -1) {
744
#  endif
745
    err = _MD_ERRNO();
746
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
747
      if (fd->secret->nonblocking) {
748
        break;
749
      }
750
      if (!_PR_IS_NATIVE_THREAD(me)) {
751
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) {
752
          goto done;
753
        }
754
      } else {
755
        if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) {
756
          goto done;
757
        }
758
      }
759
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
760
      continue;
761
    } else {
762
      break;
763
    }
764
  }
765
  if (rv < 0) {
766
    _PR_MD_MAP_RECV_ERROR(err);
767
  }
768
done:
769
  return (rv);
770
}
771
772
PRInt32 _MD_recvfrom(PRFileDesc* fd, void* buf, PRInt32 amount, PRIntn flags,
773
                     PRNetAddr* addr, PRUint32* addrlen,
774
                     PRIntervalTime timeout) {
775
  PRInt32 osfd = fd->secret->md.osfd;
776
  PRInt32 rv, err;
777
  PRThread* me = _PR_MD_CURRENT_THREAD();
778
779
  while ((*addrlen = PR_NETADDR_SIZE(addr)),
780
         ((rv = recvfrom(osfd, buf, amount, flags, (struct sockaddr*)addr,
781
                         (_PRSockLen_t*)addrlen)) == -1)) {
782
    err = _MD_ERRNO();
783
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
784
      if (fd->secret->nonblocking) {
785
        break;
786
      }
787
      if (!_PR_IS_NATIVE_THREAD(me)) {
788
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) {
789
          goto done;
790
        }
791
      } else {
792
        if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) {
793
          goto done;
794
        }
795
      }
796
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
797
      continue;
798
    } else {
799
      break;
800
    }
801
  }
802
  if (rv < 0) {
803
    _PR_MD_MAP_RECVFROM_ERROR(err);
804
  }
805
done:
806
#  ifdef _PR_HAVE_SOCKADDR_LEN
807
  if (rv != -1) {
808
    /* ignore the sa_len field of struct sockaddr */
809
    if (addr) {
810
      addr->raw.family = ((struct sockaddr*)addr)->sa_family;
811
    }
812
  }
813
#  endif /* _PR_HAVE_SOCKADDR_LEN */
814
  return (rv);
815
}
816
817
PRInt32 _MD_send(PRFileDesc* fd, const void* buf, PRInt32 amount, PRInt32 flags,
818
                 PRIntervalTime timeout) {
819
  PRInt32 osfd = fd->secret->md.osfd;
820
  PRInt32 rv, err;
821
  PRThread* me = _PR_MD_CURRENT_THREAD();
822
#  if defined(SOLARIS)
823
  PRInt32 tmp_amount = amount;
824
#  endif
825
826
  /*
827
   * On pre-2.6 Solaris, send() is much slower than write().
828
   * On 2.6 and beyond, with in-kernel sockets, send() and
829
   * write() are fairly equivalent in performance.
830
   */
831
#  if defined(SOLARIS)
832
  PR_ASSERT(0 == flags);
833
  while ((rv = write(osfd, buf, tmp_amount)) == -1) {
834
#  else
835
  while ((rv = send(osfd, buf, amount, flags)) == -1) {
836
#  endif
837
    err = _MD_ERRNO();
838
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
839
      if (fd->secret->nonblocking) {
840
        break;
841
      }
842
      if (!_PR_IS_NATIVE_THREAD(me)) {
843
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) {
844
          goto done;
845
        }
846
      } else {
847
        if ((rv = socket_io_wait(osfd, WRITE_FD, timeout)) < 0) {
848
          goto done;
849
        }
850
      }
851
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
852
      continue;
853
    } else {
854
#  if defined(SOLARIS)
855
      /*
856
       * The write system call has been reported to return the ERANGE
857
       * error on occasion. Try to write in smaller chunks to workaround
858
       * this bug.
859
       */
860
      if (err == ERANGE) {
861
        if (tmp_amount > 1) {
862
          tmp_amount = tmp_amount / 2; /* half the bytes */
863
          continue;
864
        }
865
      }
866
#  endif
867
      break;
868
    }
869
  }
870
  /*
871
   * optimization; if bytes sent is less than "amount" call
872
   * select before returning. This is because it is likely that
873
   * the next send() call will return EWOULDBLOCK.
874
   */
875
  if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) &&
876
      (timeout != PR_INTERVAL_NO_WAIT)) {
877
    if (_PR_IS_NATIVE_THREAD(me)) {
878
      if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) {
879
        rv = -1;
880
        goto done;
881
      }
882
    } else {
883
      if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) {
884
        rv = -1;
885
        goto done;
886
      }
887
    }
888
  }
889
  if (rv < 0) {
890
    _PR_MD_MAP_SEND_ERROR(err);
891
  }
892
done:
893
  return (rv);
894
}
895
896
PRInt32 _MD_sendto(PRFileDesc* fd, const void* buf, PRInt32 amount,
897
                   PRIntn flags, const PRNetAddr* addr, PRUint32 addrlen,
898
                   PRIntervalTime timeout) {
899
  PRInt32 osfd = fd->secret->md.osfd;
900
  PRInt32 rv, err;
901
  PRThread* me = _PR_MD_CURRENT_THREAD();
902
#  ifdef _PR_HAVE_SOCKADDR_LEN
903
  PRNetAddr addrCopy;
904
905
  addrCopy = *addr;
906
  ((struct sockaddr*)&addrCopy)->sa_len = addrlen;
907
  ((struct sockaddr*)&addrCopy)->sa_family = addr->raw.family;
908
909
  while ((rv = sendto(osfd, buf, amount, flags, (struct sockaddr*)&addrCopy,
910
                      addrlen)) == -1) {
911
#  else
912
  while ((rv = sendto(osfd, buf, amount, flags, (struct sockaddr*)addr,
913
                      addrlen)) == -1) {
914
#  endif
915
    err = _MD_ERRNO();
916
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
917
      if (fd->secret->nonblocking) {
918
        break;
919
      }
920
      if (!_PR_IS_NATIVE_THREAD(me)) {
921
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) {
922
          goto done;
923
        }
924
      } else {
925
        if ((rv = socket_io_wait(osfd, WRITE_FD, timeout)) < 0) {
926
          goto done;
927
        }
928
      }
929
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
930
      continue;
931
    } else {
932
      break;
933
    }
934
  }
935
  if (rv < 0) {
936
    _PR_MD_MAP_SENDTO_ERROR(err);
937
  }
938
done:
939
  return (rv);
940
}
941
942
PRInt32 _MD_writev(PRFileDesc* fd, const PRIOVec* iov, PRInt32 iov_size,
943
                   PRIntervalTime timeout) {
944
  PRInt32 rv, err;
945
  PRThread* me = _PR_MD_CURRENT_THREAD();
946
  PRInt32 index, amount = 0;
947
  PRInt32 osfd = fd->secret->md.osfd;
948
949
  /*
950
   * Calculate the total number of bytes to be sent; needed for
951
   * optimization later.
952
   * We could avoid this if this number was passed in; but it is
953
   * probably not a big deal because iov_size is usually small (less than
954
   * 3)
955
   */
956
  if (!fd->secret->nonblocking) {
957
    for (index = 0; index < iov_size; index++) {
958
      amount += iov[index].iov_len;
959
    }
960
  }
961
962
  while ((rv = writev(osfd, (const struct iovec*)iov, iov_size)) == -1) {
963
    err = _MD_ERRNO();
964
    if ((err == EAGAIN) || (err == EWOULDBLOCK)) {
965
      if (fd->secret->nonblocking) {
966
        break;
967
      }
968
      if (!_PR_IS_NATIVE_THREAD(me)) {
969
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) {
970
          goto done;
971
        }
972
      } else {
973
        if ((rv = socket_io_wait(osfd, WRITE_FD, timeout)) < 0) {
974
          goto done;
975
        }
976
      }
977
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
978
      continue;
979
    } else {
980
      break;
981
    }
982
  }
983
  /*
984
   * optimization; if bytes sent is less than "amount" call
985
   * select before returning. This is because it is likely that
986
   * the next writev() call will return EWOULDBLOCK.
987
   */
988
  if ((!fd->secret->nonblocking) && (rv > 0) && (rv < amount) &&
989
      (timeout != PR_INTERVAL_NO_WAIT)) {
990
    if (_PR_IS_NATIVE_THREAD(me)) {
991
      if (socket_io_wait(osfd, WRITE_FD, timeout) < 0) {
992
        rv = -1;
993
        goto done;
994
      }
995
    } else {
996
      if (local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout) < 0) {
997
        rv = -1;
998
        goto done;
999
      }
1000
    }
1001
  }
1002
  if (rv < 0) {
1003
    _PR_MD_MAP_WRITEV_ERROR(err);
1004
  }
1005
done:
1006
  return (rv);
1007
}
1008
1009
PRInt32 _MD_accept(PRFileDesc* fd, PRNetAddr* addr, PRUint32* addrlen,
1010
                   PRIntervalTime timeout) {
1011
  PRInt32 osfd = fd->secret->md.osfd;
1012
  PRInt32 rv, err;
1013
  PRThread* me = _PR_MD_CURRENT_THREAD();
1014
1015
  while ((rv = accept(osfd, (struct sockaddr*)addr, (_PRSockLen_t*)addrlen)) ==
1016
         -1) {
1017
    err = _MD_ERRNO();
1018
    if ((err == EAGAIN) || (err == EWOULDBLOCK) || (err == ECONNABORTED)) {
1019
      if (fd->secret->nonblocking) {
1020
        break;
1021
      }
1022
      if (!_PR_IS_NATIVE_THREAD(me)) {
1023
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_READ, timeout)) < 0) {
1024
          goto done;
1025
        }
1026
      } else {
1027
        if ((rv = socket_io_wait(osfd, READ_FD, timeout)) < 0) {
1028
          goto done;
1029
        }
1030
      }
1031
    } else if ((err == EINTR) && (!_PR_PENDING_INTERRUPT(me))) {
1032
      continue;
1033
    } else {
1034
      break;
1035
    }
1036
  }
1037
  if (rv < 0) {
1038
    _PR_MD_MAP_ACCEPT_ERROR(err);
1039
  }
1040
done:
1041
#  ifdef _PR_HAVE_SOCKADDR_LEN
1042
  if (rv != -1) {
1043
    /* ignore the sa_len field of struct sockaddr */
1044
    if (addr) {
1045
      addr->raw.family = ((struct sockaddr*)addr)->sa_family;
1046
    }
1047
  }
1048
#  endif /* _PR_HAVE_SOCKADDR_LEN */
1049
  return (rv);
1050
}
1051
1052
extern int _connect(int s, const struct sockaddr* name, int namelen);
1053
PRInt32 _MD_connect(PRFileDesc* fd, const PRNetAddr* addr, PRUint32 addrlen,
1054
                    PRIntervalTime timeout) {
1055
  PRInt32 rv, err;
1056
  PRThread* me = _PR_MD_CURRENT_THREAD();
1057
  PRInt32 osfd = fd->secret->md.osfd;
1058
#  ifdef _PR_HAVE_SOCKADDR_LEN
1059
  PRNetAddr addrCopy;
1060
1061
  addrCopy = *addr;
1062
  ((struct sockaddr*)&addrCopy)->sa_len = addrlen;
1063
  ((struct sockaddr*)&addrCopy)->sa_family = addr->raw.family;
1064
#  endif
1065
1066
  /*
1067
   * We initiate the connection setup by making a nonblocking connect()
1068
   * call.  If the connect() call fails, there are two cases we handle
1069
   * specially:
1070
   * 1. The connect() call was interrupted by a signal.  In this case
1071
   *    we simply retry connect().
1072
   * 2. The NSPR socket is nonblocking and connect() fails with
1073
   *    EINPROGRESS.  We first wait until the socket becomes writable.
1074
   *    Then we try to find out whether the connection setup succeeded
1075
   *    or failed.
1076
   */
1077
1078
retry:
1079
#  ifdef _PR_HAVE_SOCKADDR_LEN
1080
  if ((rv = connect(osfd, (struct sockaddr*)&addrCopy, addrlen)) == -1) {
1081
#  else
1082
  if ((rv = connect(osfd, (struct sockaddr*)addr, addrlen)) == -1) {
1083
#  endif
1084
    err = _MD_ERRNO();
1085
1086
    if (err == EINTR) {
1087
      if (_PR_PENDING_INTERRUPT(me)) {
1088
        me->flags &= ~_PR_INTERRUPT;
1089
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
1090
        return -1;
1091
      }
1092
      goto retry;
1093
    }
1094
1095
    if (!fd->secret->nonblocking && (err == EINPROGRESS)) {
1096
      if (!_PR_IS_NATIVE_THREAD(me)) {
1097
        if ((rv = local_io_wait(osfd, _PR_UNIX_POLL_WRITE, timeout)) < 0) {
1098
          return -1;
1099
        }
1100
      } else {
1101
        /*
1102
         * socket_io_wait() may return -1 or 1.
1103
         */
1104
1105
        rv = socket_io_wait(osfd, WRITE_FD, timeout);
1106
        if (rv == -1) {
1107
          return -1;
1108
        }
1109
      }
1110
1111
      PR_ASSERT(rv == 1);
1112
      if (_PR_PENDING_INTERRUPT(me)) {
1113
        me->flags &= ~_PR_INTERRUPT;
1114
        PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
1115
        return -1;
1116
      }
1117
      err = _MD_unix_get_nonblocking_connect_error(osfd);
1118
      if (err != 0) {
1119
        _PR_MD_MAP_CONNECT_ERROR(err);
1120
        return -1;
1121
      }
1122
      return 0;
1123
    }
1124
1125
    _PR_MD_MAP_CONNECT_ERROR(err);
1126
  }
1127
1128
  return rv;
1129
} /* _MD_connect */
1130
1131
PRInt32 _MD_bind(PRFileDesc* fd, const PRNetAddr* addr, PRUint32 addrlen) {
1132
  PRInt32 rv, err;
1133
#  ifdef _PR_HAVE_SOCKADDR_LEN
1134
  PRNetAddr addrCopy;
1135
1136
  addrCopy = *addr;
1137
  ((struct sockaddr*)&addrCopy)->sa_len = addrlen;
1138
  ((struct sockaddr*)&addrCopy)->sa_family = addr->raw.family;
1139
  rv = bind(fd->secret->md.osfd, (struct sockaddr*)&addrCopy, (int)addrlen);
1140
#  else
1141
  rv = bind(fd->secret->md.osfd, (struct sockaddr*)addr, (int)addrlen);
1142
#  endif
1143
  if (rv < 0) {
1144
    err = _MD_ERRNO();
1145
    _PR_MD_MAP_BIND_ERROR(err);
1146
  }
1147
  return (rv);
1148
}
1149
1150
PRInt32 _MD_listen(PRFileDesc* fd, PRIntn backlog) {
1151
  PRInt32 rv, err;
1152
1153
  rv = listen(fd->secret->md.osfd, backlog);
1154
  if (rv < 0) {
1155
    err = _MD_ERRNO();
1156
    _PR_MD_MAP_LISTEN_ERROR(err);
1157
  }
1158
  return (rv);
1159
}
1160
1161
PRInt32 _MD_shutdown(PRFileDesc* fd, PRIntn how) {
1162
  PRInt32 rv, err;
1163
1164
  rv = shutdown(fd->secret->md.osfd, how);
1165
  if (rv < 0) {
1166
    err = _MD_ERRNO();
1167
    _PR_MD_MAP_SHUTDOWN_ERROR(err);
1168
  }
1169
  return (rv);
1170
}
1171
1172
PRInt32 _MD_socketpair(int af, int type, int flags, PRInt32* osfd) {
1173
  PRInt32 rv, err;
1174
1175
  rv = socketpair(af, type, flags, osfd);
1176
  if (rv < 0) {
1177
    err = _MD_ERRNO();
1178
    _PR_MD_MAP_SOCKETPAIR_ERROR(err);
1179
  }
1180
  return rv;
1181
}
1182
1183
PRStatus _MD_getsockname(PRFileDesc* fd, PRNetAddr* addr, PRUint32* addrlen) {
1184
  PRInt32 rv, err;
1185
1186
  rv = getsockname(fd->secret->md.osfd, (struct sockaddr*)addr,
1187
                   (_PRSockLen_t*)addrlen);
1188
#  ifdef _PR_HAVE_SOCKADDR_LEN
1189
  if (rv == 0) {
1190
    /* ignore the sa_len field of struct sockaddr */
1191
    if (addr) {
1192
      addr->raw.family = ((struct sockaddr*)addr)->sa_family;
1193
    }
1194
  }
1195
#  endif /* _PR_HAVE_SOCKADDR_LEN */
1196
  if (rv < 0) {
1197
    err = _MD_ERRNO();
1198
    _PR_MD_MAP_GETSOCKNAME_ERROR(err);
1199
  }
1200
  return rv == 0 ? PR_SUCCESS : PR_FAILURE;
1201
}
1202
1203
PRStatus _MD_getpeername(PRFileDesc* fd, PRNetAddr* addr, PRUint32* addrlen) {
1204
  PRInt32 rv, err;
1205
1206
  rv = getpeername(fd->secret->md.osfd, (struct sockaddr*)addr,
1207
                   (_PRSockLen_t*)addrlen);
1208
#  ifdef _PR_HAVE_SOCKADDR_LEN
1209
  if (rv == 0) {
1210
    /* ignore the sa_len field of struct sockaddr */
1211
    if (addr) {
1212
      addr->raw.family = ((struct sockaddr*)addr)->sa_family;
1213
    }
1214
  }
1215
#  endif /* _PR_HAVE_SOCKADDR_LEN */
1216
  if (rv < 0) {
1217
    err = _MD_ERRNO();
1218
    _PR_MD_MAP_GETPEERNAME_ERROR(err);
1219
  }
1220
  return rv == 0 ? PR_SUCCESS : PR_FAILURE;
1221
}
1222
1223
PRStatus _MD_getsockopt(PRFileDesc* fd, PRInt32 level, PRInt32 optname,
1224
                        char* optval, PRInt32* optlen) {
1225
  PRInt32 rv, err;
1226
1227
  rv = getsockopt(fd->secret->md.osfd, level, optname, optval,
1228
                  (_PRSockLen_t*)optlen);
1229
  if (rv < 0) {
1230
    err = _MD_ERRNO();
1231
    _PR_MD_MAP_GETSOCKOPT_ERROR(err);
1232
  }
1233
  return rv == 0 ? PR_SUCCESS : PR_FAILURE;
1234
}
1235
1236
PRStatus _MD_setsockopt(PRFileDesc* fd, PRInt32 level, PRInt32 optname,
1237
                        const char* optval, PRInt32 optlen) {
1238
  PRInt32 rv, err;
1239
1240
  rv = setsockopt(fd->secret->md.osfd, level, optname, optval, optlen);
1241
  if (rv < 0) {
1242
    err = _MD_ERRNO();
1243
    _PR_MD_MAP_SETSOCKOPT_ERROR(err);
1244
  }
1245
  return rv == 0 ? PR_SUCCESS : PR_FAILURE;
1246
}
1247
1248
PRStatus _MD_set_fd_inheritable(PRFileDesc* fd, PRBool inheritable) {
1249
  int rv;
1250
1251
  rv = fcntl(fd->secret->md.osfd, F_SETFD, inheritable ? 0 : FD_CLOEXEC);
1252
  if (-1 == rv) {
1253
    PR_SetError(PR_UNKNOWN_ERROR, _MD_ERRNO());
1254
    return PR_FAILURE;
1255
  }
1256
  return PR_SUCCESS;
1257
}
1258
1259
void _MD_init_fd_inheritable(PRFileDesc* fd, PRBool imported) {
1260
  if (imported) {
1261
    fd->secret->inheritable = _PR_TRI_UNKNOWN;
1262
  } else {
1263
    /* By default, a Unix fd is not closed on exec. */
1264
#  ifdef DEBUG
1265
    {
1266
      int flags = fcntl(fd->secret->md.osfd, F_GETFD, 0);
1267
      PR_ASSERT(0 == flags);
1268
    }
1269
#  endif
1270
    fd->secret->inheritable = _PR_TRI_TRUE;
1271
  }
1272
}
1273
1274
/************************************************************************/
1275
#  if !defined(_PR_USE_POLL)
1276
1277
/*
1278
** Scan through io queue and find any bad fd's that triggered the error
1279
** from _MD_SELECT
1280
*/
1281
static void FindBadFDs(void) {
1282
  PRCList* q;
1283
  PRThread* me = _MD_CURRENT_THREAD();
1284
1285
  PR_ASSERT(!_PR_IS_NATIVE_THREAD(me));
1286
  q = (_PR_IOQ(me->cpu)).next;
1287
  _PR_IOQ_MAX_OSFD(me->cpu) = -1;
1288
  _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
1289
  while (q != &_PR_IOQ(me->cpu)) {
1290
    PRPollQueue* pq = _PR_POLLQUEUE_PTR(q);
1291
    PRBool notify = PR_FALSE;
1292
    _PRUnixPollDesc* pds = pq->pds;
1293
    _PRUnixPollDesc* epds = pds + pq->npds;
1294
    PRInt32 pq_max_osfd = -1;
1295
1296
    q = q->next;
1297
    for (; pds < epds; pds++) {
1298
      PRInt32 osfd = pds->osfd;
1299
      pds->out_flags = 0;
1300
      PR_ASSERT(osfd >= 0 || pds->in_flags == 0);
1301
      if (pds->in_flags == 0) {
1302
        continue; /* skip this fd */
1303
      }
1304
      if (fcntl(osfd, F_GETFL, 0) == -1) {
1305
        /* Found a bad descriptor, remove it from the fd_sets. */
1306
        PR_LOG(_pr_io_lm, PR_LOG_MAX, ("file descriptor %d is bad", osfd));
1307
        pds->out_flags = _PR_UNIX_POLL_NVAL;
1308
        notify = PR_TRUE;
1309
      }
1310
      if (osfd > pq_max_osfd) {
1311
        pq_max_osfd = osfd;
1312
      }
1313
    }
1314
1315
    if (notify) {
1316
      PRIntn pri;
1317
      PR_REMOVE_LINK(&pq->links);
1318
      pq->on_ioq = PR_FALSE;
1319
1320
      /*
1321
       * Decrement the count of descriptors for each desciptor/event
1322
       * because this I/O request is being removed from the
1323
       * ioq
1324
       */
1325
      pds = pq->pds;
1326
      for (; pds < epds; pds++) {
1327
        PRInt32 osfd = pds->osfd;
1328
        PRInt16 in_flags = pds->in_flags;
1329
        PR_ASSERT(osfd >= 0 || in_flags == 0);
1330
        if (in_flags & _PR_UNIX_POLL_READ) {
1331
          if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) {
1332
            FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
1333
          }
1334
        }
1335
        if (in_flags & _PR_UNIX_POLL_WRITE) {
1336
          if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) {
1337
            FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
1338
          }
1339
        }
1340
        if (in_flags & _PR_UNIX_POLL_EXCEPT) {
1341
          if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) {
1342
            FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
1343
          }
1344
        }
1345
      }
1346
1347
      _PR_THREAD_LOCK(pq->thr);
1348
      if (pq->thr->flags & (_PR_ON_PAUSEQ | _PR_ON_SLEEPQ)) {
1349
        _PRCPU* cpu = pq->thr->cpu;
1350
        _PR_SLEEPQ_LOCK(pq->thr->cpu);
1351
        _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
1352
        _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
1353
1354
        if (pq->thr->flags & _PR_SUSPENDING) {
1355
          /*
1356
           * set thread state to SUSPENDED;
1357
           * a Resume operation on the thread
1358
           * will move it to the runQ
1359
           */
1360
          pq->thr->state = _PR_SUSPENDED;
1361
          _PR_MISCQ_LOCK(pq->thr->cpu);
1362
          _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
1363
          _PR_MISCQ_UNLOCK(pq->thr->cpu);
1364
        } else {
1365
          pri = pq->thr->priority;
1366
          pq->thr->state = _PR_RUNNABLE;
1367
1368
          _PR_RUNQ_LOCK(cpu);
1369
          _PR_ADD_RUNQ(pq->thr, cpu, pri);
1370
          _PR_RUNQ_UNLOCK(cpu);
1371
        }
1372
      }
1373
      _PR_THREAD_UNLOCK(pq->thr);
1374
    } else {
1375
      if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) {
1376
        _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
1377
      }
1378
      if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) {
1379
        _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
1380
      }
1381
    }
1382
  }
1383
  if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
1384
    if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) {
1385
      _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
1386
    }
1387
  }
1388
}
1389
#  endif /* !defined(_PR_USE_POLL) */
1390
1391
/************************************************************************/
1392
1393
/*
1394
** Called by the scheduler when there is nothing to do. This means that
1395
** all threads are blocked on some monitor somewhere.
1396
**
1397
** Note: this code doesn't release the scheduler lock.
1398
*/
1399
/*
1400
** Pause the current CPU. longjmp to the cpu's pause stack
1401
**
1402
** This must be called with the scheduler locked
1403
*/
1404
void _MD_PauseCPU(PRIntervalTime ticks) {
1405
  PRThread* me = _MD_CURRENT_THREAD();
1406
#  ifdef _PR_USE_POLL
1407
  int timeout;
1408
  struct pollfd* pollfds;   /* an array of pollfd structures */
1409
  struct pollfd* pollfdPtr; /* a pointer that steps through the array */
1410
  unsigned long npollfds;   /* number of pollfd structures in array */
1411
  unsigned long pollfds_size;
1412
  int nfd; /* to hold the return value of poll() */
1413
#  else
1414
  struct timeval timeout, *tvp;
1415
  fd_set r, w, e;
1416
  fd_set *rp, *wp, *ep;
1417
  PRInt32 max_osfd, nfd;
1418
#  endif /* _PR_USE_POLL */
1419
  PRInt32 rv;
1420
  PRCList* q;
1421
  PRUint32 min_timeout;
1422
  sigset_t oldset;
1423
1424
  PR_ASSERT(_PR_MD_GET_INTSOFF() != 0);
1425
1426
  _PR_MD_IOQ_LOCK();
1427
1428
#  ifdef _PR_USE_POLL
1429
  /* Build up the pollfd structure array to wait on */
1430
1431
  /* Find out how many pollfd structures are needed */
1432
  npollfds = _PR_IOQ_OSFD_CNT(me->cpu);
1433
  PR_ASSERT(npollfds >= 0);
1434
1435
  /*
1436
   * We use a pipe to wake up a native thread.  An fd is needed
1437
   * for the pipe and we poll it for reading.
1438
   */
1439
  if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
1440
    npollfds++;
1441
  }
1442
1443
  /*
1444
   * if the cpu's pollfd array is not big enough, release it and allocate a new
1445
   * one
1446
   */
1447
  if (npollfds > _PR_IOQ_POLLFDS_SIZE(me->cpu)) {
1448
    if (_PR_IOQ_POLLFDS(me->cpu) != NULL) {
1449
      PR_DELETE(_PR_IOQ_POLLFDS(me->cpu));
1450
    }
1451
    pollfds_size = PR_MAX(_PR_IOQ_MIN_POLLFDS_SIZE(me->cpu), npollfds);
1452
    pollfds = (struct pollfd*)PR_MALLOC(pollfds_size * sizeof(struct pollfd));
1453
    _PR_IOQ_POLLFDS(me->cpu) = pollfds;
1454
    _PR_IOQ_POLLFDS_SIZE(me->cpu) = pollfds_size;
1455
  } else {
1456
    pollfds = _PR_IOQ_POLLFDS(me->cpu);
1457
  }
1458
  pollfdPtr = pollfds;
1459
1460
  /*
1461
   * If we need to poll the pipe for waking up a native thread,
1462
   * the pipe's fd is the first element in the pollfds array.
1463
   */
1464
  if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
1465
    pollfdPtr->fd = _pr_md_pipefd[0];
1466
    pollfdPtr->events = POLLIN;
1467
    pollfdPtr++;
1468
  }
1469
1470
  min_timeout = PR_INTERVAL_NO_TIMEOUT;
1471
  for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
1472
    PRPollQueue* pq = _PR_POLLQUEUE_PTR(q);
1473
    _PRUnixPollDesc* pds = pq->pds;
1474
    _PRUnixPollDesc* epds = pds + pq->npds;
1475
1476
    if (pq->timeout < min_timeout) {
1477
      min_timeout = pq->timeout;
1478
    }
1479
    for (; pds < epds; pds++, pollfdPtr++) {
1480
      /*
1481
       * Assert that the pollfdPtr pointer does not go
1482
       * beyond the end of the pollfds array
1483
       */
1484
      PR_ASSERT(pollfdPtr < pollfds + npollfds);
1485
      pollfdPtr->fd = pds->osfd;
1486
      /* direct copy of poll flags */
1487
      pollfdPtr->events = pds->in_flags;
1488
    }
1489
  }
1490
  _PR_IOQ_TIMEOUT(me->cpu) = min_timeout;
1491
#  else
1492
  /*
1493
   * assigment of fd_sets
1494
   */
1495
  r = _PR_FD_READ_SET(me->cpu);
1496
  w = _PR_FD_WRITE_SET(me->cpu);
1497
  e = _PR_FD_EXCEPTION_SET(me->cpu);
1498
1499
  rp = &r;
1500
  wp = &w;
1501
  ep = &e;
1502
1503
  max_osfd = _PR_IOQ_MAX_OSFD(me->cpu) + 1;
1504
  min_timeout = _PR_IOQ_TIMEOUT(me->cpu);
1505
#  endif /* _PR_USE_POLL */
1506
  /*
1507
  ** Compute the minimum timeout value: make it the smaller of the
1508
  ** timeouts specified by the i/o pollers or the timeout of the first
1509
  ** sleeping thread.
1510
  */
1511
  q = _PR_SLEEPQ(me->cpu).next;
1512
1513
  if (q != &_PR_SLEEPQ(me->cpu)) {
1514
    PRThread* t = _PR_THREAD_PTR(q);
1515
1516
    if (t->sleep < min_timeout) {
1517
      min_timeout = t->sleep;
1518
    }
1519
  }
1520
  if (min_timeout > ticks) {
1521
    min_timeout = ticks;
1522
  }
1523
1524
#  ifdef _PR_USE_POLL
1525
  if (min_timeout == PR_INTERVAL_NO_TIMEOUT) {
1526
    timeout = -1;
1527
  } else {
1528
    timeout = PR_IntervalToMilliseconds(min_timeout);
1529
  }
1530
#  else
1531
  if (min_timeout == PR_INTERVAL_NO_TIMEOUT) {
1532
    tvp = NULL;
1533
  } else {
1534
    timeout.tv_sec = PR_IntervalToSeconds(min_timeout);
1535
    timeout.tv_usec = PR_IntervalToMicroseconds(min_timeout) % PR_USEC_PER_SEC;
1536
    tvp = &timeout;
1537
  }
1538
#  endif /* _PR_USE_POLL */
1539
1540
  _PR_MD_IOQ_UNLOCK();
1541
  _MD_CHECK_FOR_EXIT();
1542
  /*
1543
   * check for i/o operations
1544
   */
1545
#  ifndef _PR_NO_CLOCK_TIMER
1546
  /*
1547
   * Disable the clock interrupts while we are in select, if clock interrupts
1548
   * are enabled. Otherwise, when the select/poll calls are interrupted, the
1549
   * timer value starts ticking from zero again when the system call is
1550
   * restarted.
1551
   */
1552
  if (!_nspr_noclock) {
1553
    PR_ASSERT(sigismember(&timer_set, SIGALRM));
1554
  }
1555
  sigprocmask(SIG_BLOCK, &timer_set, &oldset);
1556
#  endif /* !_PR_NO_CLOCK_TIMER */
1557
1558
#  ifndef _PR_USE_POLL
1559
  PR_ASSERT(FD_ISSET(_pr_md_pipefd[0], rp));
1560
  nfd = _MD_SELECT(max_osfd, rp, wp, ep, tvp);
1561
#  else
1562
  nfd = _MD_POLL(pollfds, npollfds, timeout);
1563
#  endif /* !_PR_USE_POLL */
1564
1565
#  ifndef _PR_NO_CLOCK_TIMER
1566
  if (!_nspr_noclock) {
1567
    sigprocmask(SIG_SETMASK, &oldset, 0);
1568
  }
1569
#  endif /* !_PR_NO_CLOCK_TIMER */
1570
1571
  _MD_CHECK_FOR_EXIT();
1572
1573
  _PR_MD_primordial_cpu();
1574
1575
  _PR_MD_IOQ_LOCK();
1576
  /*
1577
  ** Notify monitors that are associated with the selected descriptors.
1578
  */
1579
#  ifdef _PR_USE_POLL
1580
  if (nfd > 0) {
1581
    pollfdPtr = pollfds;
1582
    if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
1583
      /*
1584
       * Assert that the pipe is the first element in the
1585
       * pollfds array.
1586
       */
1587
      PR_ASSERT(pollfds[0].fd == _pr_md_pipefd[0]);
1588
      if ((pollfds[0].revents & POLLIN) && (nfd == 1)) {
1589
        /*
1590
         * woken up by another thread; read all the data
1591
         * in the pipe to empty the pipe
1592
         */
1593
        while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) ==
1594
               PIPE_BUF) {
1595
        }
1596
        PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN)));
1597
      }
1598
      pollfdPtr++;
1599
    }
1600
    for (q = _PR_IOQ(me->cpu).next; q != &_PR_IOQ(me->cpu); q = q->next) {
1601
      PRPollQueue* pq = _PR_POLLQUEUE_PTR(q);
1602
      PRBool notify = PR_FALSE;
1603
      _PRUnixPollDesc* pds = pq->pds;
1604
      _PRUnixPollDesc* epds = pds + pq->npds;
1605
1606
      for (; pds < epds; pds++, pollfdPtr++) {
1607
        /*
1608
         * Assert that the pollfdPtr pointer does not go beyond
1609
         * the end of the pollfds array.
1610
         */
1611
        PR_ASSERT(pollfdPtr < pollfds + npollfds);
1612
        /*
1613
         * Assert that the fd's in the pollfds array (stepped
1614
         * through by pollfdPtr) are in the same order as
1615
         * the fd's in _PR_IOQ() (stepped through by q and pds).
1616
         * This is how the pollfds array was created earlier.
1617
         */
1618
        PR_ASSERT(pollfdPtr->fd == pds->osfd);
1619
        pds->out_flags = pollfdPtr->revents;
1620
        /* Negative fd's are ignored by poll() */
1621
        if (pds->osfd >= 0 && pds->out_flags) {
1622
          notify = PR_TRUE;
1623
        }
1624
      }
1625
      if (notify) {
1626
        PRIntn pri;
1627
        PRThread* thred;
1628
1629
        PR_REMOVE_LINK(&pq->links);
1630
        pq->on_ioq = PR_FALSE;
1631
1632
        thred = pq->thr;
1633
        _PR_THREAD_LOCK(thred);
1634
        if (pq->thr->flags & (_PR_ON_PAUSEQ | _PR_ON_SLEEPQ)) {
1635
          _PRCPU* cpu = pq->thr->cpu;
1636
          _PR_SLEEPQ_LOCK(pq->thr->cpu);
1637
          _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
1638
          _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
1639
1640
          if (pq->thr->flags & _PR_SUSPENDING) {
1641
            /*
1642
             * set thread state to SUSPENDED;
1643
             * a Resume operation on the thread
1644
             * will move it to the runQ
1645
             */
1646
            pq->thr->state = _PR_SUSPENDED;
1647
            _PR_MISCQ_LOCK(pq->thr->cpu);
1648
            _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
1649
            _PR_MISCQ_UNLOCK(pq->thr->cpu);
1650
          } else {
1651
            pri = pq->thr->priority;
1652
            pq->thr->state = _PR_RUNNABLE;
1653
1654
            _PR_RUNQ_LOCK(cpu);
1655
            _PR_ADD_RUNQ(pq->thr, cpu, pri);
1656
            _PR_RUNQ_UNLOCK(cpu);
1657
            if (_pr_md_idle_cpus > 1) {
1658
              _PR_MD_WAKEUP_WAITER(thred);
1659
            }
1660
          }
1661
        }
1662
        _PR_THREAD_UNLOCK(thred);
1663
        _PR_IOQ_OSFD_CNT(me->cpu) -= pq->npds;
1664
        PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0);
1665
      }
1666
    }
1667
  } else if (nfd == -1) {
1668
    PR_LOG(_pr_io_lm, PR_LOG_MAX, ("poll() failed with errno %d", errno));
1669
  }
1670
1671
#  else
1672
  if (nfd > 0) {
1673
    q = _PR_IOQ(me->cpu).next;
1674
    _PR_IOQ_MAX_OSFD(me->cpu) = -1;
1675
    _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
1676
    while (q != &_PR_IOQ(me->cpu)) {
1677
      PRPollQueue* pq = _PR_POLLQUEUE_PTR(q);
1678
      PRBool notify = PR_FALSE;
1679
      _PRUnixPollDesc* pds = pq->pds;
1680
      _PRUnixPollDesc* epds = pds + pq->npds;
1681
      PRInt32 pq_max_osfd = -1;
1682
1683
      q = q->next;
1684
      for (; pds < epds; pds++) {
1685
        PRInt32 osfd = pds->osfd;
1686
        PRInt16 in_flags = pds->in_flags;
1687
        PRInt16 out_flags = 0;
1688
        PR_ASSERT(osfd >= 0 || in_flags == 0);
1689
        if ((in_flags & _PR_UNIX_POLL_READ) && FD_ISSET(osfd, rp)) {
1690
          out_flags |= _PR_UNIX_POLL_READ;
1691
        }
1692
        if ((in_flags & _PR_UNIX_POLL_WRITE) && FD_ISSET(osfd, wp)) {
1693
          out_flags |= _PR_UNIX_POLL_WRITE;
1694
        }
1695
        if ((in_flags & _PR_UNIX_POLL_EXCEPT) && FD_ISSET(osfd, ep)) {
1696
          out_flags |= _PR_UNIX_POLL_EXCEPT;
1697
        }
1698
        pds->out_flags = out_flags;
1699
        if (out_flags) {
1700
          notify = PR_TRUE;
1701
        }
1702
        if (osfd > pq_max_osfd) {
1703
          pq_max_osfd = osfd;
1704
        }
1705
      }
1706
      if (notify == PR_TRUE) {
1707
        PRIntn pri;
1708
        PRThread* thred;
1709
1710
        PR_REMOVE_LINK(&pq->links);
1711
        pq->on_ioq = PR_FALSE;
1712
1713
        /*
1714
         * Decrement the count of descriptors for each desciptor/event
1715
         * because this I/O request is being removed from the
1716
         * ioq
1717
         */
1718
        pds = pq->pds;
1719
        for (; pds < epds; pds++) {
1720
          PRInt32 osfd = pds->osfd;
1721
          PRInt16 in_flags = pds->in_flags;
1722
          PR_ASSERT(osfd >= 0 || in_flags == 0);
1723
          if (in_flags & _PR_UNIX_POLL_READ) {
1724
            if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) {
1725
              FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
1726
            }
1727
          }
1728
          if (in_flags & _PR_UNIX_POLL_WRITE) {
1729
            if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) {
1730
              FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
1731
            }
1732
          }
1733
          if (in_flags & _PR_UNIX_POLL_EXCEPT) {
1734
            if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) {
1735
              FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
1736
            }
1737
          }
1738
        }
1739
1740
        /*
1741
         * Because this thread can run on a different cpu right
1742
         * after being added to the run queue, do not dereference
1743
         * pq
1744
         */
1745
        thred = pq->thr;
1746
        _PR_THREAD_LOCK(thred);
1747
        if (pq->thr->flags & (_PR_ON_PAUSEQ | _PR_ON_SLEEPQ)) {
1748
          _PRCPU* cpu = thred->cpu;
1749
          _PR_SLEEPQ_LOCK(pq->thr->cpu);
1750
          _PR_DEL_SLEEPQ(pq->thr, PR_TRUE);
1751
          _PR_SLEEPQ_UNLOCK(pq->thr->cpu);
1752
1753
          if (pq->thr->flags & _PR_SUSPENDING) {
1754
            /*
1755
             * set thread state to SUSPENDED;
1756
             * a Resume operation on the thread
1757
             * will move it to the runQ
1758
             */
1759
            pq->thr->state = _PR_SUSPENDED;
1760
            _PR_MISCQ_LOCK(pq->thr->cpu);
1761
            _PR_ADD_SUSPENDQ(pq->thr, pq->thr->cpu);
1762
            _PR_MISCQ_UNLOCK(pq->thr->cpu);
1763
          } else {
1764
            pri = pq->thr->priority;
1765
            pq->thr->state = _PR_RUNNABLE;
1766
1767
            pq->thr->cpu = cpu;
1768
            _PR_RUNQ_LOCK(cpu);
1769
            _PR_ADD_RUNQ(pq->thr, cpu, pri);
1770
            _PR_RUNQ_UNLOCK(cpu);
1771
            if (_pr_md_idle_cpus > 1) {
1772
              _PR_MD_WAKEUP_WAITER(thred);
1773
            }
1774
          }
1775
        }
1776
        _PR_THREAD_UNLOCK(thred);
1777
      } else {
1778
        if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) {
1779
          _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
1780
        }
1781
        if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) {
1782
          _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
1783
        }
1784
      }
1785
    }
1786
    if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
1787
      if ((FD_ISSET(_pr_md_pipefd[0], rp)) && (nfd == 1)) {
1788
        /*
1789
         * woken up by another thread; read all the data
1790
         * in the pipe to empty the pipe
1791
         */
1792
        while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) ==
1793
               PIPE_BUF) {
1794
        }
1795
        PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN)));
1796
      }
1797
      if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) {
1798
        _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
1799
      }
1800
    }
1801
  } else if (nfd < 0) {
1802
    if (errno == EBADF) {
1803
      FindBadFDs();
1804
    } else {
1805
      PR_LOG(_pr_io_lm, PR_LOG_MAX, ("select() failed with errno %d", errno));
1806
    }
1807
  } else {
1808
    PR_ASSERT(nfd == 0);
1809
    /*
1810
     * compute the new value of _PR_IOQ_TIMEOUT
1811
     */
1812
    q = _PR_IOQ(me->cpu).next;
1813
    _PR_IOQ_MAX_OSFD(me->cpu) = -1;
1814
    _PR_IOQ_TIMEOUT(me->cpu) = PR_INTERVAL_NO_TIMEOUT;
1815
    while (q != &_PR_IOQ(me->cpu)) {
1816
      PRPollQueue* pq = _PR_POLLQUEUE_PTR(q);
1817
      _PRUnixPollDesc* pds = pq->pds;
1818
      _PRUnixPollDesc* epds = pds + pq->npds;
1819
      PRInt32 pq_max_osfd = -1;
1820
1821
      q = q->next;
1822
      for (; pds < epds; pds++) {
1823
        if (pds->osfd > pq_max_osfd) {
1824
          pq_max_osfd = pds->osfd;
1825
        }
1826
      }
1827
      if (pq->timeout < _PR_IOQ_TIMEOUT(me->cpu)) {
1828
        _PR_IOQ_TIMEOUT(me->cpu) = pq->timeout;
1829
      }
1830
      if (_PR_IOQ_MAX_OSFD(me->cpu) < pq_max_osfd) {
1831
        _PR_IOQ_MAX_OSFD(me->cpu) = pq_max_osfd;
1832
      }
1833
    }
1834
    if (_PR_IS_NATIVE_THREAD_SUPPORTED()) {
1835
      if (_PR_IOQ_MAX_OSFD(me->cpu) < _pr_md_pipefd[0]) {
1836
        _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
1837
      }
1838
    }
1839
  }
1840
#  endif /* _PR_USE_POLL */
1841
  _PR_MD_IOQ_UNLOCK();
1842
}
1843
1844
void _MD_Wakeup_CPUs() {
1845
  PRInt32 rv, data;
1846
1847
  data = 0;
1848
  rv = write(_pr_md_pipefd[1], &data, 1);
1849
1850
  while ((rv < 0) && (errno == EAGAIN)) {
1851
    /*
1852
     * pipe full, read all data in pipe to empty it
1853
     */
1854
    while ((rv = read(_pr_md_pipefd[0], _pr_md_pipebuf, PIPE_BUF)) ==
1855
           PIPE_BUF) {
1856
    }
1857
    PR_ASSERT((rv > 0) || ((rv == -1) && (errno == EAGAIN)));
1858
    rv = write(_pr_md_pipefd[1], &data, 1);
1859
  }
1860
}
1861
1862
void _MD_InitCPUS() {
1863
  PRInt32 rv, flags;
1864
  PRThread* me = _MD_CURRENT_THREAD();
1865
1866
  rv = pipe(_pr_md_pipefd);
1867
  PR_ASSERT(rv == 0);
1868
  _PR_IOQ_MAX_OSFD(me->cpu) = _pr_md_pipefd[0];
1869
#  ifndef _PR_USE_POLL
1870
  FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(me->cpu));
1871
#  endif
1872
1873
  flags = fcntl(_pr_md_pipefd[0], F_GETFL, 0);
1874
  fcntl(_pr_md_pipefd[0], F_SETFL, flags | O_NONBLOCK);
1875
  flags = fcntl(_pr_md_pipefd[1], F_GETFL, 0);
1876
  fcntl(_pr_md_pipefd[1], F_SETFL, flags | O_NONBLOCK);
1877
}
1878
1879
/*
1880
** Unix SIGALRM (clock) signal handler
1881
*/
1882
static void ClockInterruptHandler() {
1883
  int olderrno;
1884
  PRUintn pri;
1885
  _PRCPU* cpu = _PR_MD_CURRENT_CPU();
1886
  PRThread* me = _MD_CURRENT_THREAD();
1887
1888
#  ifdef SOLARIS
1889
  if (!me || _PR_IS_NATIVE_THREAD(me)) {
1890
    _pr_primordialCPU->u.missed[_pr_primordialCPU->where] |= _PR_MISSED_CLOCK;
1891
    return;
1892
  }
1893
#  endif
1894
1895
  if (_PR_MD_GET_INTSOFF() != 0) {
1896
    cpu->u.missed[cpu->where] |= _PR_MISSED_CLOCK;
1897
    return;
1898
  }
1899
  _PR_MD_SET_INTSOFF(1);
1900
1901
  olderrno = errno;
1902
  _PR_ClockInterrupt();
1903
  errno = olderrno;
1904
1905
  /*
1906
  ** If the interrupt wants a resched or if some other thread at
1907
  ** the same priority needs the cpu, reschedule.
1908
  */
1909
  pri = me->priority;
1910
  if ((cpu->u.missed[3] || (_PR_RUNQREADYMASK(me->cpu) >> pri))) {
1911
#  ifdef _PR_NO_PREEMPT
1912
    cpu->resched = PR_TRUE;
1913
    if (pr_interruptSwitchHook) {
1914
      (*pr_interruptSwitchHook)(pr_interruptSwitchHookArg);
1915
    }
1916
#  else  /* _PR_NO_PREEMPT */
1917
    /*
1918
    ** Re-enable unix interrupts (so that we can use
1919
    ** setjmp/longjmp for context switching without having to
1920
    ** worry about the signal state)
1921
    */
1922
    sigprocmask(SIG_SETMASK, &empty_set, 0);
1923
    PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock caused context switch"));
1924
1925
    if (!(me->flags & _PR_IDLE_THREAD)) {
1926
      _PR_THREAD_LOCK(me);
1927
      me->state = _PR_RUNNABLE;
1928
      me->cpu = cpu;
1929
      _PR_RUNQ_LOCK(cpu);
1930
      _PR_ADD_RUNQ(me, cpu, pri);
1931
      _PR_RUNQ_UNLOCK(cpu);
1932
      _PR_THREAD_UNLOCK(me);
1933
    } else {
1934
      me->state = _PR_RUNNABLE;
1935
    }
1936
    _MD_SWITCH_CONTEXT(me);
1937
    PR_LOG(_pr_sched_lm, PR_LOG_MIN, ("clock back from context switch"));
1938
#  endif /* _PR_NO_PREEMPT */
1939
  }
1940
  /*
1941
   * Because this thread could be running on a different cpu after
1942
   * a context switch the current cpu should be accessed and the
1943
   * value of the 'cpu' variable should not be used.
1944
   */
1945
  _PR_MD_SET_INTSOFF(0);
1946
}
1947
1948
/* # of milliseconds per clock tick that we will use */
1949
#  define MSEC_PER_TICK 50
1950
1951
void _MD_StartInterrupts() {
1952
  char* eval;
1953
1954
  if ((eval = getenv("NSPR_NOCLOCK")) != NULL) {
1955
    if (atoi(eval) == 0) {
1956
      _nspr_noclock = 0;
1957
    } else {
1958
      _nspr_noclock = 1;
1959
    }
1960
  }
1961
1962
#  ifndef _PR_NO_CLOCK_TIMER
1963
  if (!_nspr_noclock) {
1964
    _MD_EnableClockInterrupts();
1965
  }
1966
#  endif
1967
}
1968
1969
void _MD_StopInterrupts() { sigprocmask(SIG_BLOCK, &timer_set, 0); }
1970
1971
void _MD_EnableClockInterrupts() {
1972
  struct itimerval itval;
1973
  extern PRUintn _pr_numCPU;
1974
  struct sigaction vtact;
1975
1976
  vtact.sa_handler = (void (*)())ClockInterruptHandler;
1977
  sigemptyset(&vtact.sa_mask);
1978
  vtact.sa_flags = SA_RESTART;
1979
  sigaction(SIGALRM, &vtact, 0);
1980
1981
  PR_ASSERT(_pr_numCPU == 1);
1982
  itval.it_interval.tv_sec = 0;
1983
  itval.it_interval.tv_usec = MSEC_PER_TICK * PR_USEC_PER_MSEC;
1984
  itval.it_value = itval.it_interval;
1985
  setitimer(ITIMER_REAL, &itval, 0);
1986
}
1987
1988
void _MD_DisableClockInterrupts() {
1989
  struct itimerval itval;
1990
  extern PRUintn _pr_numCPU;
1991
1992
  PR_ASSERT(_pr_numCPU == 1);
1993
  itval.it_interval.tv_sec = 0;
1994
  itval.it_interval.tv_usec = 0;
1995
  itval.it_value = itval.it_interval;
1996
  setitimer(ITIMER_REAL, &itval, 0);
1997
}
1998
1999
void _MD_BlockClockInterrupts() { sigprocmask(SIG_BLOCK, &timer_set, 0); }
2000
2001
void _MD_UnblockClockInterrupts() { sigprocmask(SIG_UNBLOCK, &timer_set, 0); }
2002
2003
void _MD_MakeNonblock(PRFileDesc* fd) {
2004
  PRInt32 osfd = fd->secret->md.osfd;
2005
  int flags;
2006
2007
  if (osfd <= 2) {
2008
    /* Don't mess around with stdin, stdout or stderr */
2009
    return;
2010
  }
2011
  flags = fcntl(osfd, F_GETFL, 0);
2012
2013
  /*
2014
   * Use O_NONBLOCK (POSIX-style non-blocking I/O) whenever possible.
2015
   * On SunOS 4, we must use FNDELAY (BSD-style non-blocking I/O),
2016
   * otherwise connect() still blocks and can be interrupted by SIGALRM.
2017
   */
2018
2019
  fcntl(osfd, F_SETFL, flags | O_NONBLOCK);
2020
}
2021
2022
PRInt32 _MD_open(const char* name, PRIntn flags, PRIntn mode) {
2023
  PRInt32 osflags;
2024
  PRInt32 rv, err;
2025
2026
  if (flags & PR_RDWR) {
2027
    osflags = O_RDWR;
2028
  } else if (flags & PR_WRONLY) {
2029
    osflags = O_WRONLY;
2030
  } else {
2031
    osflags = O_RDONLY;
2032
  }
2033
2034
  if (flags & PR_EXCL) {
2035
    osflags |= O_EXCL;
2036
  }
2037
  if (flags & PR_APPEND) {
2038
    osflags |= O_APPEND;
2039
  }
2040
  if (flags & PR_TRUNCATE) {
2041
    osflags |= O_TRUNC;
2042
  }
2043
  if (flags & PR_SYNC) {
2044
#  if defined(O_SYNC)
2045
    osflags |= O_SYNC;
2046
#  elif defined(O_FSYNC)
2047
    osflags |= O_FSYNC;
2048
#  else
2049
#    error "Neither O_SYNC nor O_FSYNC is defined on this platform"
2050
#  endif
2051
  }
2052
2053
  /*
2054
  ** On creations we hold the 'create' lock in order to enforce
2055
  ** the semantics of PR_Rename. (see the latter for more details)
2056
  */
2057
  if (flags & PR_CREATE_FILE) {
2058
    osflags |= O_CREAT;
2059
    if (NULL != _pr_unix_rename_lock) {
2060
      PR_Lock(_pr_unix_rename_lock);
2061
    }
2062
  }
2063
2064
#  if defined(ANDROID)
2065
  osflags |= O_LARGEFILE;
2066
#  endif
2067
2068
  rv = _md_iovector._open64(name, osflags, mode);
2069
2070
  if (rv < 0) {
2071
    err = _MD_ERRNO();
2072
    _PR_MD_MAP_OPEN_ERROR(err);
2073
  }
2074
2075
  if ((flags & PR_CREATE_FILE) && (NULL != _pr_unix_rename_lock)) {
2076
    PR_Unlock(_pr_unix_rename_lock);
2077
  }
2078
  return rv;
2079
}
2080
2081
PRIntervalTime intr_timeout_ticks;
2082
2083
#  if defined(SOLARIS)
2084
static void sigsegvhandler() {
2085
  fprintf(stderr, "Received SIGSEGV\n");
2086
  fflush(stderr);
2087
  pause();
2088
}
2089
2090
static void sigaborthandler() {
2091
  fprintf(stderr, "Received SIGABRT\n");
2092
  fflush(stderr);
2093
  pause();
2094
}
2095
2096
static void sigbushandler() {
2097
  fprintf(stderr, "Received SIGBUS\n");
2098
  fflush(stderr);
2099
  pause();
2100
}
2101
#  endif /* SOLARIS */
2102
2103
#endif /* !defined(_PR_PTHREADS) */
2104
2105
0
void _MD_query_fd_inheritable(PRFileDesc* fd) {
2106
0
  int flags;
2107
2108
0
  PR_ASSERT(_PR_TRI_UNKNOWN == fd->secret->inheritable);
2109
0
  flags = fcntl(fd->secret->md.osfd, F_GETFD, 0);
2110
0
  PR_ASSERT(-1 != flags);
2111
0
  fd->secret->inheritable = (flags & FD_CLOEXEC) ? _PR_TRI_FALSE : _PR_TRI_TRUE;
2112
0
}
2113
2114
0
PROffset32 _MD_lseek(PRFileDesc* fd, PROffset32 offset, PRSeekWhence whence) {
2115
0
  PROffset32 rv, where;
2116
2117
0
  switch (whence) {
2118
0
    case PR_SEEK_SET:
2119
0
      where = SEEK_SET;
2120
0
      break;
2121
0
    case PR_SEEK_CUR:
2122
0
      where = SEEK_CUR;
2123
0
      break;
2124
0
    case PR_SEEK_END:
2125
0
      where = SEEK_END;
2126
0
      break;
2127
0
    default:
2128
0
      PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
2129
0
      rv = -1;
2130
0
      goto done;
2131
0
  }
2132
0
  rv = lseek(fd->secret->md.osfd, offset, where);
2133
0
  if (rv == -1) {
2134
0
    PRInt32 syserr = _MD_ERRNO();
2135
0
    _PR_MD_MAP_LSEEK_ERROR(syserr);
2136
0
  }
2137
0
done:
2138
0
  return (rv);
2139
0
}
2140
2141
0
PROffset64 _MD_lseek64(PRFileDesc* fd, PROffset64 offset, PRSeekWhence whence) {
2142
0
  PRInt32 where;
2143
0
  PROffset64 rv;
2144
2145
0
  switch (whence) {
2146
0
    case PR_SEEK_SET:
2147
0
      where = SEEK_SET;
2148
0
      break;
2149
0
    case PR_SEEK_CUR:
2150
0
      where = SEEK_CUR;
2151
0
      break;
2152
0
    case PR_SEEK_END:
2153
0
      where = SEEK_END;
2154
0
      break;
2155
0
    default:
2156
0
      PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
2157
0
      rv = minus_one;
2158
0
      goto done;
2159
0
  }
2160
0
  rv = _md_iovector._lseek64(fd->secret->md.osfd, offset, where);
2161
0
  if (LL_EQ(rv, minus_one)) {
2162
0
    PRInt32 syserr = _MD_ERRNO();
2163
0
    _PR_MD_MAP_LSEEK_ERROR(syserr);
2164
0
  }
2165
0
done:
2166
0
  return rv;
2167
0
} /* _MD_lseek64 */
2168
2169
/*
2170
** _MD_set_fileinfo_times --
2171
**     Set the modifyTime and creationTime of the PRFileInfo
2172
**     structure using the values in struct stat.
2173
**
2174
** _MD_set_fileinfo64_times --
2175
**     Set the modifyTime and creationTime of the PRFileInfo64
2176
**     structure using the values in _MDStat64.
2177
*/
2178
2179
#if defined(_PR_STAT_HAS_ST_ATIM)
2180
/*
2181
** struct stat has st_atim, st_mtim, and st_ctim fields of
2182
** type timestruc_t.
2183
*/
2184
static void _MD_set_fileinfo_times(const struct stat* sb, PRFileInfo* info) {
2185
  PRInt64 us, s2us;
2186
2187
  LL_I2L(s2us, PR_USEC_PER_SEC);
2188
  LL_I2L(info->modifyTime, sb->st_mtim.tv_sec);
2189
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2190
  LL_I2L(us, sb->st_mtim.tv_nsec / 1000);
2191
  LL_ADD(info->modifyTime, info->modifyTime, us);
2192
  LL_I2L(info->creationTime, sb->st_ctim.tv_sec);
2193
  LL_MUL(info->creationTime, info->creationTime, s2us);
2194
  LL_I2L(us, sb->st_ctim.tv_nsec / 1000);
2195
  LL_ADD(info->creationTime, info->creationTime, us);
2196
}
2197
2198
static void _MD_set_fileinfo64_times(const _MDStat64* sb, PRFileInfo64* info) {
2199
  PRInt64 us, s2us;
2200
2201
  LL_I2L(s2us, PR_USEC_PER_SEC);
2202
  LL_I2L(info->modifyTime, sb->st_mtim.tv_sec);
2203
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2204
  LL_I2L(us, sb->st_mtim.tv_nsec / 1000);
2205
  LL_ADD(info->modifyTime, info->modifyTime, us);
2206
  LL_I2L(info->creationTime, sb->st_ctim.tv_sec);
2207
  LL_MUL(info->creationTime, info->creationTime, s2us);
2208
  LL_I2L(us, sb->st_ctim.tv_nsec / 1000);
2209
  LL_ADD(info->creationTime, info->creationTime, us);
2210
}
2211
#elif defined(_PR_STAT_HAS_ST_ATIM_UNION)
2212
/*
2213
** The st_atim, st_mtim, and st_ctim fields in struct stat are
2214
** unions with a st__tim union member of type timestruc_t.
2215
*/
2216
static void _MD_set_fileinfo_times(const struct stat* sb, PRFileInfo* info) {
2217
  PRInt64 us, s2us;
2218
2219
  LL_I2L(s2us, PR_USEC_PER_SEC);
2220
  LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec);
2221
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2222
  LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000);
2223
  LL_ADD(info->modifyTime, info->modifyTime, us);
2224
  LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec);
2225
  LL_MUL(info->creationTime, info->creationTime, s2us);
2226
  LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000);
2227
  LL_ADD(info->creationTime, info->creationTime, us);
2228
}
2229
2230
static void _MD_set_fileinfo64_times(const _MDStat64* sb, PRFileInfo64* info) {
2231
  PRInt64 us, s2us;
2232
2233
  LL_I2L(s2us, PR_USEC_PER_SEC);
2234
  LL_I2L(info->modifyTime, sb->st_mtim.st__tim.tv_sec);
2235
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2236
  LL_I2L(us, sb->st_mtim.st__tim.tv_nsec / 1000);
2237
  LL_ADD(info->modifyTime, info->modifyTime, us);
2238
  LL_I2L(info->creationTime, sb->st_ctim.st__tim.tv_sec);
2239
  LL_MUL(info->creationTime, info->creationTime, s2us);
2240
  LL_I2L(us, sb->st_ctim.st__tim.tv_nsec / 1000);
2241
  LL_ADD(info->creationTime, info->creationTime, us);
2242
}
2243
#elif defined(_PR_STAT_HAS_ST_ATIMESPEC)
2244
/*
2245
** struct stat has st_atimespec, st_mtimespec, and st_ctimespec
2246
** fields of type struct timespec.
2247
*/
2248
#  if defined(_PR_TIMESPEC_HAS_TS_SEC)
2249
static void _MD_set_fileinfo_times(const struct stat* sb, PRFileInfo* info) {
2250
  PRInt64 us, s2us;
2251
2252
  LL_I2L(s2us, PR_USEC_PER_SEC);
2253
  LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec);
2254
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2255
  LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000);
2256
  LL_ADD(info->modifyTime, info->modifyTime, us);
2257
  LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec);
2258
  LL_MUL(info->creationTime, info->creationTime, s2us);
2259
  LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000);
2260
  LL_ADD(info->creationTime, info->creationTime, us);
2261
}
2262
2263
static void _MD_set_fileinfo64_times(const _MDStat64* sb, PRFileInfo64* info) {
2264
  PRInt64 us, s2us;
2265
2266
  LL_I2L(s2us, PR_USEC_PER_SEC);
2267
  LL_I2L(info->modifyTime, sb->st_mtimespec.ts_sec);
2268
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2269
  LL_I2L(us, sb->st_mtimespec.ts_nsec / 1000);
2270
  LL_ADD(info->modifyTime, info->modifyTime, us);
2271
  LL_I2L(info->creationTime, sb->st_ctimespec.ts_sec);
2272
  LL_MUL(info->creationTime, info->creationTime, s2us);
2273
  LL_I2L(us, sb->st_ctimespec.ts_nsec / 1000);
2274
  LL_ADD(info->creationTime, info->creationTime, us);
2275
}
2276
#  else  /* _PR_TIMESPEC_HAS_TS_SEC */
2277
/*
2278
** The POSIX timespec structure has tv_sec and tv_nsec.
2279
*/
2280
static void _MD_set_fileinfo_times(const struct stat* sb, PRFileInfo* info) {
2281
  PRInt64 us, s2us;
2282
2283
  LL_I2L(s2us, PR_USEC_PER_SEC);
2284
  LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec);
2285
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2286
  LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000);
2287
  LL_ADD(info->modifyTime, info->modifyTime, us);
2288
  LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec);
2289
  LL_MUL(info->creationTime, info->creationTime, s2us);
2290
  LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000);
2291
  LL_ADD(info->creationTime, info->creationTime, us);
2292
}
2293
2294
static void _MD_set_fileinfo64_times(const _MDStat64* sb, PRFileInfo64* info) {
2295
  PRInt64 us, s2us;
2296
2297
  LL_I2L(s2us, PR_USEC_PER_SEC);
2298
  LL_I2L(info->modifyTime, sb->st_mtimespec.tv_sec);
2299
  LL_MUL(info->modifyTime, info->modifyTime, s2us);
2300
  LL_I2L(us, sb->st_mtimespec.tv_nsec / 1000);
2301
  LL_ADD(info->modifyTime, info->modifyTime, us);
2302
  LL_I2L(info->creationTime, sb->st_ctimespec.tv_sec);
2303
  LL_MUL(info->creationTime, info->creationTime, s2us);
2304
  LL_I2L(us, sb->st_ctimespec.tv_nsec / 1000);
2305
  LL_ADD(info->creationTime, info->creationTime, us);
2306
}
2307
#  endif /* _PR_TIMESPEC_HAS_TS_SEC */
2308
#elif defined(_PR_STAT_HAS_ONLY_ST_ATIME)
2309
/*
2310
** struct stat only has st_atime, st_mtime, and st_ctime fields
2311
** of type time_t.
2312
*/
2313
0
static void _MD_set_fileinfo_times(const struct stat* sb, PRFileInfo* info) {
2314
0
  PRInt64 s, s2us;
2315
0
  LL_I2L(s2us, PR_USEC_PER_SEC);
2316
0
  LL_I2L(s, sb->st_mtime);
2317
0
  LL_MUL(s, s, s2us);
2318
0
  info->modifyTime = s;
2319
0
  LL_I2L(s, sb->st_ctime);
2320
0
  LL_MUL(s, s, s2us);
2321
0
  info->creationTime = s;
2322
0
}
2323
2324
0
static void _MD_set_fileinfo64_times(const _MDStat64* sb, PRFileInfo64* info) {
2325
0
  PRInt64 s, s2us;
2326
0
  LL_I2L(s2us, PR_USEC_PER_SEC);
2327
0
  LL_I2L(s, sb->st_mtime);
2328
0
  LL_MUL(s, s, s2us);
2329
0
  info->modifyTime = s;
2330
0
  LL_I2L(s, sb->st_ctime);
2331
0
  LL_MUL(s, s, s2us);
2332
0
  info->creationTime = s;
2333
0
}
2334
#else
2335
#  error "I don't know yet"
2336
#endif
2337
2338
static int _MD_convert_stat_to_fileinfo(const struct stat* sb,
2339
0
                                        PRFileInfo* info) {
2340
0
  if (S_IFREG & sb->st_mode) {
2341
0
    info->type = PR_FILE_FILE;
2342
0
  } else if (S_IFDIR & sb->st_mode) {
2343
0
    info->type = PR_FILE_DIRECTORY;
2344
0
  } else {
2345
0
    info->type = PR_FILE_OTHER;
2346
0
  }
2347
2348
#if defined(_PR_HAVE_LARGE_OFF_T)
2349
  if (0x7fffffffL < sb->st_size) {
2350
    PR_SetError(PR_FILE_TOO_BIG_ERROR, 0);
2351
    return -1;
2352
  }
2353
#endif /* defined(_PR_HAVE_LARGE_OFF_T) */
2354
0
  info->size = sb->st_size;
2355
2356
0
  _MD_set_fileinfo_times(sb, info);
2357
0
  return 0;
2358
0
} /* _MD_convert_stat_to_fileinfo */
2359
2360
static int _MD_convert_stat64_to_fileinfo64(const _MDStat64* sb,
2361
0
                                            PRFileInfo64* info) {
2362
0
  if (S_IFREG & sb->st_mode) {
2363
0
    info->type = PR_FILE_FILE;
2364
0
  } else if (S_IFDIR & sb->st_mode) {
2365
0
    info->type = PR_FILE_DIRECTORY;
2366
0
  } else {
2367
0
    info->type = PR_FILE_OTHER;
2368
0
  }
2369
2370
0
  LL_I2L(info->size, sb->st_size);
2371
2372
0
  _MD_set_fileinfo64_times(sb, info);
2373
0
  return 0;
2374
0
} /* _MD_convert_stat64_to_fileinfo64 */
2375
2376
0
PRInt32 _MD_getfileinfo(const char* fn, PRFileInfo* info) {
2377
0
  PRInt32 rv;
2378
0
  struct stat sb;
2379
2380
0
  rv = stat(fn, &sb);
2381
0
  if (rv < 0) {
2382
0
    _PR_MD_MAP_STAT_ERROR(_MD_ERRNO());
2383
0
  } else if (NULL != info) {
2384
0
    rv = _MD_convert_stat_to_fileinfo(&sb, info);
2385
0
  }
2386
0
  return rv;
2387
0
}
2388
2389
0
PRInt32 _MD_getfileinfo64(const char* fn, PRFileInfo64* info) {
2390
0
  _MDStat64 sb;
2391
0
  PRInt32 rv = _md_iovector._stat64(fn, &sb);
2392
0
  if (rv < 0) {
2393
0
    _PR_MD_MAP_STAT_ERROR(_MD_ERRNO());
2394
0
  } else if (NULL != info) {
2395
0
    rv = _MD_convert_stat64_to_fileinfo64(&sb, info);
2396
0
  }
2397
0
  return rv;
2398
0
}
2399
2400
0
PRInt32 _MD_getopenfileinfo(const PRFileDesc* fd, PRFileInfo* info) {
2401
0
  struct stat sb;
2402
0
  PRInt32 rv = fstat(fd->secret->md.osfd, &sb);
2403
0
  if (rv < 0) {
2404
0
    _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO());
2405
0
  } else if (NULL != info) {
2406
0
    rv = _MD_convert_stat_to_fileinfo(&sb, info);
2407
0
  }
2408
0
  return rv;
2409
0
}
2410
2411
0
PRInt32 _MD_getopenfileinfo64(const PRFileDesc* fd, PRFileInfo64* info) {
2412
0
  _MDStat64 sb;
2413
0
  PRInt32 rv = _md_iovector._fstat64(fd->secret->md.osfd, &sb);
2414
0
  if (rv < 0) {
2415
0
    _PR_MD_MAP_FSTAT_ERROR(_MD_ERRNO());
2416
0
  } else if (NULL != info) {
2417
0
    rv = _MD_convert_stat64_to_fileinfo64(&sb, info);
2418
0
  }
2419
0
  return rv;
2420
0
}
2421
2422
/*
2423
 * _md_iovector._open64 must be initialized to 'open' so that _PR_InitLog can
2424
 * open the log file during NSPR initialization, before _md_iovector is
2425
 * initialized by _PR_MD_FINAL_INIT.  This means the log file cannot be a
2426
 * large file on some platforms.
2427
 */
2428
struct _MD_IOVector _md_iovector = {open};
2429
2430
/*
2431
** These implementations are to emulate large file routines on systems that
2432
** don't have them. Their goal is to check in case overflow occurs. Otherwise
2433
** they will just operate as normal using 32-bit file routines.
2434
**
2435
** The checking might be pre- or post-op, depending on the semantics.
2436
*/
2437
2438
#if defined(SOLARIS2_5)
2439
2440
static PRIntn _MD_solaris25_fstat64(PRIntn osfd, _MDStat64* buf) {
2441
  PRInt32 rv;
2442
  struct stat sb;
2443
2444
  rv = fstat(osfd, &sb);
2445
  if (rv >= 0) {
2446
    /*
2447
    ** I'm only copying the fields that are immediately needed.
2448
    ** If somebody else calls this function, some of the fields
2449
    ** may not be defined.
2450
    */
2451
    (void)memset(buf, 0, sizeof(_MDStat64));
2452
    buf->st_mode = sb.st_mode;
2453
    buf->st_ctim = sb.st_ctim;
2454
    buf->st_mtim = sb.st_mtim;
2455
    buf->st_size = sb.st_size;
2456
  }
2457
  return rv;
2458
} /* _MD_solaris25_fstat64 */
2459
2460
static PRIntn _MD_solaris25_stat64(const char* fn, _MDStat64* buf) {
2461
  PRInt32 rv;
2462
  struct stat sb;
2463
2464
  rv = stat(fn, &sb);
2465
  if (rv >= 0) {
2466
    /*
2467
    ** I'm only copying the fields that are immediately needed.
2468
    ** If somebody else calls this function, some of the fields
2469
    ** may not be defined.
2470
    */
2471
    (void)memset(buf, 0, sizeof(_MDStat64));
2472
    buf->st_mode = sb.st_mode;
2473
    buf->st_ctim = sb.st_ctim;
2474
    buf->st_mtim = sb.st_mtim;
2475
    buf->st_size = sb.st_size;
2476
  }
2477
  return rv;
2478
} /* _MD_solaris25_stat64 */
2479
#endif /* defined(SOLARIS2_5) */
2480
2481
#if defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5)
2482
2483
static PROffset64 _MD_Unix_lseek64(PRIntn osfd, PROffset64 offset,
2484
                                   PRIntn whence) {
2485
  PRUint64 maxoff;
2486
  PROffset64 rv = minus_one;
2487
  LL_I2L(maxoff, 0x7fffffff);
2488
  if (LL_CMP(offset, <=, maxoff)) {
2489
    off_t off;
2490
    LL_L2I(off, offset);
2491
    LL_I2L(rv, lseek(osfd, off, whence));
2492
  } else {
2493
    errno = EFBIG; /* we can't go there */
2494
  }
2495
  return rv;
2496
} /* _MD_Unix_lseek64 */
2497
2498
static void* _MD_Unix_mmap64(void* addr, PRSize len, PRIntn prot, PRIntn flags,
2499
                             PRIntn fildes, PRInt64 offset) {
2500
  PR_SetError(PR_FILE_TOO_BIG_ERROR, 0);
2501
  return NULL;
2502
} /* _MD_Unix_mmap64 */
2503
#endif /* defined(_PR_NO_LARGE_FILES) || defined(SOLARIS2_5) */
2504
2505
/* NDK non-unified headers for API < 21 don't have mmap64. However,
2506
 * NDK unified headers do provide mmap64 for all API versions when building
2507
 * with clang. Therefore, we should provide mmap64 here for API < 21 if we're
2508
 * not using clang or if we're using non-unified headers. We check for
2509
 * non-unified headers by the lack of __ANDROID_API_L__ macro. */
2510
#if defined(ANDROID) && __ANDROID_API__ < 21 && \
2511
    (!defined(__clang__) || !defined(__ANDROID_API_L__))
2512
PR_IMPORT(void) * __mmap2(void*, size_t, int, int, int, size_t);
2513
2514
#  define ANDROID_PAGE_SIZE 4096
2515
2516
static void* mmap64(void* addr, size_t len, int prot, int flags, int fd,
2517
                    loff_t offset) {
2518
  if (offset & (ANDROID_PAGE_SIZE - 1)) {
2519
    errno = EINVAL;
2520
    return MAP_FAILED;
2521
  }
2522
  return __mmap2(addr, len, prot, flags, fd, offset / ANDROID_PAGE_SIZE);
2523
}
2524
#endif
2525
2526
0
static void _PR_InitIOV(void) {
2527
#if defined(SOLARIS2_5)
2528
  PRLibrary* lib;
2529
  void* open64_func;
2530
2531
  open64_func = PR_FindSymbolAndLibrary("open64", &lib);
2532
  if (NULL != open64_func) {
2533
    PR_ASSERT(NULL != lib);
2534
    _md_iovector._open64 = (_MD_Open64)open64_func;
2535
    _md_iovector._mmap64 = (_MD_Mmap64)PR_FindSymbol(lib, "mmap64");
2536
    _md_iovector._fstat64 = (_MD_Fstat64)PR_FindSymbol(lib, "fstat64");
2537
    _md_iovector._stat64 = (_MD_Stat64)PR_FindSymbol(lib, "stat64");
2538
    _md_iovector._lseek64 = (_MD_Lseek64)PR_FindSymbol(lib, "lseek64");
2539
    (void)PR_UnloadLibrary(lib);
2540
  } else {
2541
    _md_iovector._open64 = open;
2542
    _md_iovector._mmap64 = _MD_Unix_mmap64;
2543
    _md_iovector._fstat64 = _MD_solaris25_fstat64;
2544
    _md_iovector._stat64 = _MD_solaris25_stat64;
2545
    _md_iovector._lseek64 = _MD_Unix_lseek64;
2546
  }
2547
#elif defined(_PR_NO_LARGE_FILES)
2548
  _md_iovector._open64 = open;
2549
  _md_iovector._mmap64 = _MD_Unix_mmap64;
2550
  _md_iovector._fstat64 = fstat;
2551
  _md_iovector._stat64 = stat;
2552
  _md_iovector._lseek64 = _MD_Unix_lseek64;
2553
#elif defined(_PR_HAVE_OFF64_T)
2554
#  if (defined(ANDROID) && __ANDROID_API__ < 21)
2555
  /*
2556
   * Android < 21 doesn't have open64.  We pass the O_LARGEFILE flag to open
2557
   * in _MD_open.
2558
   */
2559
  _md_iovector._open64 = open;
2560
#  else
2561
0
  _md_iovector._open64 = open64;
2562
0
#  endif
2563
0
  _md_iovector._mmap64 = mmap64;
2564
#  if (defined(ANDROID) && __ANDROID_API__ < 21)
2565
  /* Same as the open64 case for Android. */
2566
  _md_iovector._fstat64 = (_MD_Fstat64)fstat;
2567
  _md_iovector._stat64 = (_MD_Stat64)stat;
2568
#  else
2569
0
  _md_iovector._fstat64 = fstat64;
2570
0
  _md_iovector._stat64 = stat64;
2571
0
#  endif
2572
0
  _md_iovector._lseek64 = lseek64;
2573
#elif defined(_PR_HAVE_LARGE_OFF_T)
2574
  _md_iovector._open64 = open;
2575
  _md_iovector._mmap64 = mmap;
2576
  _md_iovector._fstat64 = fstat;
2577
  _md_iovector._stat64 = stat;
2578
  _md_iovector._lseek64 = lseek;
2579
#else
2580
#  error "I don't know yet"
2581
#endif
2582
0
  LL_I2L(minus_one, -1);
2583
0
} /* _PR_InitIOV */
2584
2585
0
void _PR_UnixInit(void) {
2586
0
  struct sigaction sigact;
2587
0
  int rv;
2588
2589
0
  sigemptyset(&timer_set);
2590
2591
#if !defined(_PR_PTHREADS)
2592
2593
  sigaddset(&timer_set, SIGALRM);
2594
  sigemptyset(&empty_set);
2595
  intr_timeout_ticks = PR_SecondsToInterval(_PR_INTERRUPT_CHECK_INTERVAL_SECS);
2596
2597
#  if defined(SOLARIS)
2598
2599
  if (getenv("NSPR_SIGSEGV_HANDLE")) {
2600
    sigact.sa_handler = sigsegvhandler;
2601
    sigact.sa_flags = 0;
2602
    sigact.sa_mask = timer_set;
2603
    sigaction(SIGSEGV, &sigact, 0);
2604
  }
2605
2606
  if (getenv("NSPR_SIGABRT_HANDLE")) {
2607
    sigact.sa_handler = sigaborthandler;
2608
    sigact.sa_flags = 0;
2609
    sigact.sa_mask = timer_set;
2610
    sigaction(SIGABRT, &sigact, 0);
2611
  }
2612
2613
  if (getenv("NSPR_SIGBUS_HANDLE")) {
2614
    sigact.sa_handler = sigbushandler;
2615
    sigact.sa_flags = 0;
2616
    sigact.sa_mask = timer_set;
2617
    sigaction(SIGBUS, &sigact, 0);
2618
  }
2619
2620
#  endif
2621
#endif /* !defined(_PR_PTHREADS) */
2622
2623
0
  sigact.sa_handler = SIG_IGN;
2624
0
  sigemptyset(&sigact.sa_mask);
2625
0
  sigact.sa_flags = 0;
2626
0
  rv = sigaction(SIGPIPE, &sigact, 0);
2627
0
  PR_ASSERT(0 == rv);
2628
2629
0
  _pr_unix_rename_lock = PR_NewLock();
2630
0
  PR_ASSERT(NULL != _pr_unix_rename_lock);
2631
0
  _pr_Xfe_mon = PR_NewMonitor();
2632
0
  PR_ASSERT(NULL != _pr_Xfe_mon);
2633
2634
0
  _PR_InitIOV(); /* one last hack */
2635
0
}
2636
2637
0
void _PR_UnixCleanup(void) {
2638
0
  if (_pr_unix_rename_lock) {
2639
0
    PR_DestroyLock(_pr_unix_rename_lock);
2640
0
    _pr_unix_rename_lock = NULL;
2641
0
  }
2642
0
  if (_pr_Xfe_mon) {
2643
0
    PR_DestroyMonitor(_pr_Xfe_mon);
2644
0
    _pr_Xfe_mon = NULL;
2645
0
  }
2646
0
}
2647
2648
#if !defined(_PR_PTHREADS)
2649
2650
/*
2651
 * Variables used by the GC code, initialized in _MD_InitSegs().
2652
 */
2653
static PRInt32 _pr_zero_fd = -1;
2654
static PRLock* _pr_md_lock = NULL;
2655
2656
/*
2657
 * _MD_InitSegs --
2658
 *
2659
 * This is Unix's version of _PR_MD_INIT_SEGS(), which is
2660
 * called by _PR_InitSegs(), which in turn is called by
2661
 * PR_Init().
2662
 */
2663
void _MD_InitSegs(void) {
2664
#  ifdef DEBUG
2665
  /*
2666
  ** Disable using mmap(2) if NSPR_NO_MMAP is set
2667
  */
2668
  if (getenv("NSPR_NO_MMAP")) {
2669
    _pr_zero_fd = -2;
2670
    return;
2671
  }
2672
#  endif
2673
  _pr_zero_fd = open("/dev/zero", O_RDWR, 0);
2674
  /* Prevent the fd from being inherited by child processes */
2675
  fcntl(_pr_zero_fd, F_SETFD, FD_CLOEXEC);
2676
  _pr_md_lock = PR_NewLock();
2677
}
2678
2679
PRStatus _MD_AllocSegment(PRSegment* seg, PRUint32 size, void* vaddr) {
2680
  static char* lastaddr = (char*)_PR_STACK_VMBASE;
2681
  PRStatus retval = PR_SUCCESS;
2682
  int prot;
2683
  void* rv;
2684
2685
  PR_ASSERT(seg != 0);
2686
  PR_ASSERT(size != 0);
2687
2688
  PR_Lock(_pr_md_lock);
2689
  if (_pr_zero_fd < 0) {
2690
  from_heap:
2691
    seg->vaddr = PR_MALLOC(size);
2692
    if (!seg->vaddr) {
2693
      retval = PR_FAILURE;
2694
    } else {
2695
      seg->size = size;
2696
    }
2697
    goto exit;
2698
  }
2699
2700
  prot = PROT_READ | PROT_WRITE;
2701
  /*
2702
   * On Alpha Linux, the user-level thread stack needs
2703
   * to be made executable because longjmp/signal seem
2704
   * to put machine instructions on the stack.
2705
   */
2706
#  if defined(LINUX) && defined(__alpha)
2707
  prot |= PROT_EXEC;
2708
#  endif
2709
  rv = mmap((vaddr != 0) ? vaddr : lastaddr, size, prot, _MD_MMAP_FLAGS,
2710
            _pr_zero_fd, 0);
2711
  if (rv == (void*)-1) {
2712
    goto from_heap;
2713
  }
2714
  lastaddr += size;
2715
  seg->vaddr = rv;
2716
  seg->size = size;
2717
  seg->flags = _PR_SEG_VM;
2718
2719
exit:
2720
  PR_Unlock(_pr_md_lock);
2721
  return retval;
2722
}
2723
2724
void _MD_FreeSegment(PRSegment* seg) {
2725
  if (seg->flags & _PR_SEG_VM) {
2726
    (void)munmap(seg->vaddr, seg->size);
2727
  } else {
2728
    PR_DELETE(seg->vaddr);
2729
  }
2730
}
2731
2732
#endif /* _PR_PTHREADS */
2733
2734
/*
2735
 *-----------------------------------------------------------------------
2736
 *
2737
 * PR_Now --
2738
 *
2739
 *     Returns the current time in microseconds since the epoch.
2740
 *     The epoch is midnight January 1, 1970 GMT.
2741
 *     The implementation is machine dependent.  This is the Unix
2742
 *     implementation.
2743
 *     Cf. time_t time(time_t *tp)
2744
 *
2745
 *-----------------------------------------------------------------------
2746
 */
2747
2748
PR_IMPLEMENT(PRTime)
2749
0
PR_Now(void) {
2750
0
  struct timeval tv;
2751
0
  PRInt64 s, us, s2us;
2752
2753
0
  GETTIMEOFDAY(&tv);
2754
0
  LL_I2L(s2us, PR_USEC_PER_SEC);
2755
0
  LL_I2L(s, tv.tv_sec);
2756
0
  LL_I2L(us, tv.tv_usec);
2757
0
  LL_MUL(s, s, s2us);
2758
0
  LL_ADD(s, s, us);
2759
0
  return s;
2760
0
}
2761
2762
#if defined(_MD_INTERVAL_USE_GTOD)
2763
/*
2764
 * This version of interval times is based on the time of day
2765
 * capability offered by the system. This isn't valid for two reasons:
2766
 * 1) The time of day is neither linear nor montonically increasing
2767
 * 2) The units here are milliseconds. That's not appropriate for our use.
2768
 */
2769
PRIntervalTime _PR_UNIX_GetInterval() {
2770
  struct timeval time;
2771
  PRIntervalTime ticks;
2772
2773
  (void)GETTIMEOFDAY(&time);                       /* fallicy of course */
2774
  ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC; /* that's in milliseconds */
2775
  ticks += (PRUint32)time.tv_usec / PR_USEC_PER_MSEC; /* so's that */
2776
  return ticks;
2777
} /* _PR_UNIX_GetInterval */
2778
2779
PRIntervalTime _PR_UNIX_TicksPerSecond() {
2780
  return 1000; /* this needs some work :) */
2781
}
2782
#endif
2783
2784
#if defined(_PR_HAVE_CLOCK_MONOTONIC)
2785
0
PRIntervalTime _PR_UNIX_GetInterval2() {
2786
0
  struct timespec time;
2787
0
  PRIntervalTime ticks;
2788
2789
0
  if (clock_gettime(CLOCK_MONOTONIC, &time) != 0) {
2790
0
    fprintf(stderr, "clock_gettime failed: %d\n", errno);
2791
0
    abort();
2792
0
  }
2793
2794
0
  ticks = (PRUint32)time.tv_sec * PR_MSEC_PER_SEC;
2795
0
  ticks += (PRUint32)time.tv_nsec / PR_NSEC_PER_MSEC;
2796
0
  return ticks;
2797
0
}
2798
2799
0
PRIntervalTime _PR_UNIX_TicksPerSecond2() { return 1000; }
2800
#endif
2801
2802
#if !defined(_PR_PTHREADS)
2803
/*
2804
 * Wait for I/O on multiple descriptors.
2805
 *
2806
 * Return 0 if timed out, return -1 if interrupted,
2807
 * else return the number of ready descriptors.
2808
 */
2809
PRInt32 _PR_WaitForMultipleFDs(_PRUnixPollDesc* unixpds, PRInt32 pdcnt,
2810
                               PRIntervalTime timeout) {
2811
  PRPollQueue pq;
2812
  PRIntn is;
2813
  PRInt32 rv;
2814
  _PRCPU* io_cpu;
2815
  _PRUnixPollDesc *unixpd, *eunixpd;
2816
  PRThread* me = _PR_MD_CURRENT_THREAD();
2817
2818
  PR_ASSERT(!(me->flags & _PR_IDLE_THREAD));
2819
2820
  if (_PR_PENDING_INTERRUPT(me)) {
2821
    me->flags &= ~_PR_INTERRUPT;
2822
    PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
2823
    return -1;
2824
  }
2825
2826
  pq.pds = unixpds;
2827
  pq.npds = pdcnt;
2828
2829
  _PR_INTSOFF(is);
2830
  _PR_MD_IOQ_LOCK();
2831
  _PR_THREAD_LOCK(me);
2832
2833
  pq.thr = me;
2834
  io_cpu = me->cpu;
2835
  pq.on_ioq = PR_TRUE;
2836
  pq.timeout = timeout;
2837
  _PR_ADD_TO_IOQ(pq, me->cpu);
2838
2839
#  if !defined(_PR_USE_POLL)
2840
  eunixpd = unixpds + pdcnt;
2841
  for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
2842
    PRInt32 osfd = unixpd->osfd;
2843
    if (unixpd->in_flags & _PR_UNIX_POLL_READ) {
2844
      FD_SET(osfd, &_PR_FD_READ_SET(me->cpu));
2845
      _PR_FD_READ_CNT(me->cpu)[osfd]++;
2846
    }
2847
    if (unixpd->in_flags & _PR_UNIX_POLL_WRITE) {
2848
      FD_SET(osfd, &_PR_FD_WRITE_SET(me->cpu));
2849
      (_PR_FD_WRITE_CNT(me->cpu))[osfd]++;
2850
    }
2851
    if (unixpd->in_flags & _PR_UNIX_POLL_EXCEPT) {
2852
      FD_SET(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
2853
      (_PR_FD_EXCEPTION_CNT(me->cpu))[osfd]++;
2854
    }
2855
    if (osfd > _PR_IOQ_MAX_OSFD(me->cpu)) {
2856
      _PR_IOQ_MAX_OSFD(me->cpu) = osfd;
2857
    }
2858
  }
2859
#  endif /* !defined(_PR_USE_POLL) */
2860
2861
  if (_PR_IOQ_TIMEOUT(me->cpu) > timeout) {
2862
    _PR_IOQ_TIMEOUT(me->cpu) = timeout;
2863
  }
2864
2865
  _PR_IOQ_OSFD_CNT(me->cpu) += pdcnt;
2866
2867
  _PR_SLEEPQ_LOCK(me->cpu);
2868
  _PR_ADD_SLEEPQ(me, timeout);
2869
  me->state = _PR_IO_WAIT;
2870
  me->io_pending = PR_TRUE;
2871
  me->io_suspended = PR_FALSE;
2872
  _PR_SLEEPQ_UNLOCK(me->cpu);
2873
  _PR_THREAD_UNLOCK(me);
2874
  _PR_MD_IOQ_UNLOCK();
2875
2876
  _PR_MD_WAIT(me, timeout);
2877
2878
  me->io_pending = PR_FALSE;
2879
  me->io_suspended = PR_FALSE;
2880
2881
  /*
2882
   * This thread should run on the same cpu on which it was blocked; when
2883
   * the IO request times out the fd sets and fd counts for the
2884
   * cpu are updated below.
2885
   */
2886
  PR_ASSERT(me->cpu == io_cpu);
2887
2888
  /*
2889
  ** If we timed out the pollq might still be on the ioq. Remove it
2890
  ** before continuing.
2891
  */
2892
  if (pq.on_ioq) {
2893
    _PR_MD_IOQ_LOCK();
2894
    /*
2895
     * Need to check pq.on_ioq again
2896
     */
2897
    if (pq.on_ioq) {
2898
      PR_REMOVE_LINK(&pq.links);
2899
#  ifndef _PR_USE_POLL
2900
      eunixpd = unixpds + pdcnt;
2901
      for (unixpd = unixpds; unixpd < eunixpd; unixpd++) {
2902
        PRInt32 osfd = unixpd->osfd;
2903
        PRInt16 in_flags = unixpd->in_flags;
2904
2905
        if (in_flags & _PR_UNIX_POLL_READ) {
2906
          if (--(_PR_FD_READ_CNT(me->cpu))[osfd] == 0) {
2907
            FD_CLR(osfd, &_PR_FD_READ_SET(me->cpu));
2908
          }
2909
        }
2910
        if (in_flags & _PR_UNIX_POLL_WRITE) {
2911
          if (--(_PR_FD_WRITE_CNT(me->cpu))[osfd] == 0) {
2912
            FD_CLR(osfd, &_PR_FD_WRITE_SET(me->cpu));
2913
          }
2914
        }
2915
        if (in_flags & _PR_UNIX_POLL_EXCEPT) {
2916
          if (--(_PR_FD_EXCEPTION_CNT(me->cpu))[osfd] == 0) {
2917
            FD_CLR(osfd, &_PR_FD_EXCEPTION_SET(me->cpu));
2918
          }
2919
        }
2920
      }
2921
#  endif /* _PR_USE_POLL */
2922
      PR_ASSERT(pq.npds == pdcnt);
2923
      _PR_IOQ_OSFD_CNT(me->cpu) -= pdcnt;
2924
      PR_ASSERT(_PR_IOQ_OSFD_CNT(me->cpu) >= 0);
2925
    }
2926
    _PR_MD_IOQ_UNLOCK();
2927
  }
2928
  /* XXX Should we use _PR_FAST_INTSON or _PR_INTSON? */
2929
  if (1 == pdcnt) {
2930
    _PR_FAST_INTSON(is);
2931
  } else {
2932
    _PR_INTSON(is);
2933
  }
2934
2935
  if (_PR_PENDING_INTERRUPT(me)) {
2936
    me->flags &= ~_PR_INTERRUPT;
2937
    PR_SetError(PR_PENDING_INTERRUPT_ERROR, 0);
2938
    return -1;
2939
  }
2940
2941
  rv = 0;
2942
  if (pq.on_ioq == PR_FALSE) {
2943
    /* Count the number of ready descriptors */
2944
    while (--pdcnt >= 0) {
2945
      if (unixpds->out_flags != 0) {
2946
        rv++;
2947
      }
2948
      unixpds++;
2949
    }
2950
  }
2951
2952
  return rv;
2953
}
2954
2955
/*
2956
 * Unblock threads waiting for I/O
2957
 *    used when interrupting threads
2958
 *
2959
 * NOTE: The thread lock should held when this function is called.
2960
 * On return, the thread lock is released.
2961
 */
2962
void _PR_Unblock_IO_Wait(PRThread* thr) {
2963
  int pri = thr->priority;
2964
  _PRCPU* cpu = thr->cpu;
2965
2966
  /*
2967
   * GLOBAL threads wakeup periodically to check for interrupt
2968
   */
2969
  if (_PR_IS_NATIVE_THREAD(thr)) {
2970
    _PR_THREAD_UNLOCK(thr);
2971
    return;
2972
  }
2973
2974
  PR_ASSERT(thr->flags & (_PR_ON_SLEEPQ | _PR_ON_PAUSEQ));
2975
  _PR_SLEEPQ_LOCK(cpu);
2976
  _PR_DEL_SLEEPQ(thr, PR_TRUE);
2977
  _PR_SLEEPQ_UNLOCK(cpu);
2978
2979
  PR_ASSERT(!(thr->flags & _PR_IDLE_THREAD));
2980
  thr->state = _PR_RUNNABLE;
2981
  _PR_RUNQ_LOCK(cpu);
2982
  _PR_ADD_RUNQ(thr, cpu, pri);
2983
  _PR_RUNQ_UNLOCK(cpu);
2984
  _PR_THREAD_UNLOCK(thr);
2985
  _PR_MD_WAKEUP_WAITER(thr);
2986
}
2987
#endif /* !defined(_PR_PTHREADS) */
2988
2989
/*
2990
 * When a nonblocking connect has completed, determine whether it
2991
 * succeeded or failed, and if it failed, what the error code is.
2992
 *
2993
 * The function returns the error code.  An error code of 0 means
2994
 * that the nonblocking connect succeeded.
2995
 */
2996
2997
0
int _MD_unix_get_nonblocking_connect_error(int osfd) {
2998
#if defined(NTO)
2999
  /* Neutrino does not support the SO_ERROR socket option */
3000
  PRInt32 rv;
3001
  PRNetAddr addr;
3002
  _PRSockLen_t addrlen = sizeof(addr);
3003
3004
  /* Test to see if we are using the Tiny TCP/IP Stack or the Full one. */
3005
  struct statvfs superblock;
3006
  rv = fstatvfs(osfd, &superblock);
3007
  if (rv == 0) {
3008
    if (strcmp(superblock.f_basetype, "ttcpip") == 0) {
3009
      /* Using the Tiny Stack! */
3010
      rv = getpeername(osfd, (struct sockaddr*)&addr, (_PRSockLen_t*)&addrlen);
3011
      if (rv == -1) {
3012
        int errno_copy = errno; /* make a copy so I don't
3013
                                 * accidentally reset */
3014
3015
        if (errno_copy == ENOTCONN) {
3016
          struct stat StatInfo;
3017
          rv = fstat(osfd, &StatInfo);
3018
          if (rv == 0) {
3019
            time_t current_time = time(NULL);
3020
3021
            /*
3022
             * this is a real hack, can't explain why it
3023
             * works it just does
3024
             */
3025
            if (abs(current_time - StatInfo.st_atime) < 5) {
3026
              return ECONNREFUSED;
3027
            } else {
3028
              return ETIMEDOUT;
3029
            }
3030
          } else {
3031
            return ECONNREFUSED;
3032
          }
3033
        } else {
3034
          return errno_copy;
3035
        }
3036
      } else {
3037
        /* No Error */
3038
        return 0;
3039
      }
3040
    } else {
3041
      /* Have the FULL Stack which supports SO_ERROR */
3042
      /* Hasn't been written yet, never been tested! */
3043
      /* Jerry.Kirk@Nexwarecorp.com */
3044
3045
      int err;
3046
      _PRSockLen_t optlen = sizeof(err);
3047
3048
      if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char*)&err, &optlen) == -1) {
3049
        return errno;
3050
      } else {
3051
        return err;
3052
      }
3053
    }
3054
  } else {
3055
    return ECONNREFUSED;
3056
  }
3057
#else
3058
0
  int err;
3059
0
  _PRSockLen_t optlen = sizeof(err);
3060
0
  if (getsockopt(osfd, SOL_SOCKET, SO_ERROR, (char*)&err, &optlen) == -1) {
3061
0
    return errno;
3062
0
  }
3063
0
  return err;
3064
3065
0
#endif
3066
0
}
3067
3068
/************************************************************************/
3069
3070
/*
3071
** Special hacks for xlib. Xlib/Xt/Xm is not re-entrant nor is it thread
3072
** safe.  Unfortunately, neither is mozilla. To make these programs work
3073
** in a pre-emptive threaded environment, we need to use a lock.
3074
*/
3075
3076
0
void _PR_XLock(void) { PR_EnterMonitor(_pr_Xfe_mon); }
3077
3078
0
void _PR_XUnlock(void) { PR_ExitMonitor(_pr_Xfe_mon); }
3079
3080
0
PRBool _PR_XIsLocked(void) {
3081
0
  return (PR_InMonitor(_pr_Xfe_mon)) ? PR_TRUE : PR_FALSE;
3082
0
}
3083
3084
#if defined(HAVE_FCNTL_FILE_LOCKING)
3085
3086
0
PRStatus _MD_LockFile(PRInt32 f) {
3087
0
  PRInt32 rv;
3088
0
  struct flock arg;
3089
3090
0
  arg.l_type = F_WRLCK;
3091
0
  arg.l_whence = SEEK_SET;
3092
0
  arg.l_start = 0;
3093
0
  arg.l_len = 0; /* until EOF */
3094
0
  rv = fcntl(f, F_SETLKW, &arg);
3095
0
  if (rv == 0) {
3096
0
    return PR_SUCCESS;
3097
0
  }
3098
0
  _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
3099
0
  return PR_FAILURE;
3100
0
}
3101
3102
0
PRStatus _MD_TLockFile(PRInt32 f) {
3103
0
  PRInt32 rv;
3104
0
  struct flock arg;
3105
3106
0
  arg.l_type = F_WRLCK;
3107
0
  arg.l_whence = SEEK_SET;
3108
0
  arg.l_start = 0;
3109
0
  arg.l_len = 0; /* until EOF */
3110
0
  rv = fcntl(f, F_SETLK, &arg);
3111
0
  if (rv == 0) {
3112
0
    return PR_SUCCESS;
3113
0
  }
3114
0
  _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
3115
0
  return PR_FAILURE;
3116
0
}
3117
3118
0
PRStatus _MD_UnlockFile(PRInt32 f) {
3119
0
  PRInt32 rv;
3120
0
  struct flock arg;
3121
3122
0
  arg.l_type = F_UNLCK;
3123
0
  arg.l_whence = SEEK_SET;
3124
0
  arg.l_start = 0;
3125
0
  arg.l_len = 0; /* until EOF */
3126
0
  rv = fcntl(f, F_SETLK, &arg);
3127
0
  if (rv == 0) {
3128
0
    return PR_SUCCESS;
3129
0
  }
3130
0
  _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
3131
0
  return PR_FAILURE;
3132
0
}
3133
3134
#elif defined(HAVE_BSD_FLOCK)
3135
3136
#  include <sys/file.h>
3137
3138
PRStatus _MD_LockFile(PRInt32 f) {
3139
  PRInt32 rv;
3140
  rv = flock(f, LOCK_EX);
3141
  if (rv == 0) {
3142
    return PR_SUCCESS;
3143
  }
3144
  _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
3145
  return PR_FAILURE;
3146
}
3147
3148
PRStatus _MD_TLockFile(PRInt32 f) {
3149
  PRInt32 rv;
3150
  rv = flock(f, LOCK_EX | LOCK_NB);
3151
  if (rv == 0) {
3152
    return PR_SUCCESS;
3153
  }
3154
  _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
3155
  return PR_FAILURE;
3156
}
3157
3158
PRStatus _MD_UnlockFile(PRInt32 f) {
3159
  PRInt32 rv;
3160
  rv = flock(f, LOCK_UN);
3161
  if (rv == 0) {
3162
    return PR_SUCCESS;
3163
  }
3164
  _PR_MD_MAP_FLOCK_ERROR(_MD_ERRNO());
3165
  return PR_FAILURE;
3166
}
3167
#else
3168
3169
PRStatus _MD_LockFile(PRInt32 f) {
3170
  PRInt32 rv;
3171
  rv = lockf(f, F_LOCK, 0);
3172
  if (rv == 0) {
3173
    return PR_SUCCESS;
3174
  }
3175
  _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
3176
  return PR_FAILURE;
3177
}
3178
3179
PRStatus _MD_TLockFile(PRInt32 f) {
3180
  PRInt32 rv;
3181
  rv = lockf(f, F_TLOCK, 0);
3182
  if (rv == 0) {
3183
    return PR_SUCCESS;
3184
  }
3185
  _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
3186
  return PR_FAILURE;
3187
}
3188
3189
PRStatus _MD_UnlockFile(PRInt32 f) {
3190
  PRInt32 rv;
3191
  rv = lockf(f, F_ULOCK, 0);
3192
  if (rv == 0) {
3193
    return PR_SUCCESS;
3194
  }
3195
  _PR_MD_MAP_LOCKF_ERROR(_MD_ERRNO());
3196
  return PR_FAILURE;
3197
}
3198
#endif
3199
3200
0
PRStatus _MD_gethostname(char* name, PRUint32 namelen) {
3201
0
  PRIntn rv;
3202
3203
0
  rv = gethostname(name, namelen);
3204
0
  if (0 == rv) {
3205
0
    return PR_SUCCESS;
3206
0
  }
3207
0
  _PR_MD_MAP_GETHOSTNAME_ERROR(_MD_ERRNO());
3208
0
  return PR_FAILURE;
3209
0
}
3210
3211
0
PRStatus _MD_getsysinfo(PRSysInfo cmd, char* name, PRUint32 namelen) {
3212
0
  struct utsname info;
3213
3214
0
  PR_ASSERT((cmd == PR_SI_SYSNAME) || (cmd == PR_SI_RELEASE) ||
3215
0
            (cmd == PR_SI_RELEASE_BUILD));
3216
3217
0
  if (uname(&info) == -1) {
3218
0
    _PR_MD_MAP_DEFAULT_ERROR(errno);
3219
0
    return PR_FAILURE;
3220
0
  }
3221
0
  if (PR_SI_SYSNAME == cmd) {
3222
0
    (void)PR_snprintf(name, namelen, info.sysname);
3223
0
  } else if (PR_SI_RELEASE == cmd) {
3224
0
    (void)PR_snprintf(name, namelen, info.release);
3225
0
  } else if (PR_SI_RELEASE_BUILD == cmd) {
3226
0
    (void)PR_snprintf(name, namelen, info.version);
3227
0
  } else {
3228
0
    return PR_FAILURE;
3229
0
  }
3230
0
  return PR_SUCCESS;
3231
0
}
3232
3233
/*
3234
 *******************************************************************
3235
 *
3236
 * Memory-mapped files
3237
 *
3238
 *******************************************************************
3239
 */
3240
3241
0
PRStatus _MD_CreateFileMap(PRFileMap* fmap, PRInt64 size) {
3242
0
  PRFileInfo info;
3243
0
  PRUint32 sz;
3244
3245
0
  LL_L2UI(sz, size);
3246
0
  if (sz) {
3247
0
    if (PR_GetOpenFileInfo(fmap->fd, &info) == PR_FAILURE) {
3248
0
      return PR_FAILURE;
3249
0
    }
3250
0
    if (sz > info.size) {
3251
      /*
3252
       * Need to extend the file
3253
       */
3254
0
      if (fmap->prot != PR_PROT_READWRITE) {
3255
0
        PR_SetError(PR_NO_ACCESS_RIGHTS_ERROR, 0);
3256
0
        return PR_FAILURE;
3257
0
      }
3258
0
      if (PR_Seek(fmap->fd, sz - 1, PR_SEEK_SET) == -1) {
3259
0
        return PR_FAILURE;
3260
0
      }
3261
0
      if (PR_Write(fmap->fd, "", 1) != 1) {
3262
0
        return PR_FAILURE;
3263
0
      }
3264
0
    }
3265
0
  }
3266
0
  if (fmap->prot == PR_PROT_READONLY) {
3267
0
    fmap->md.prot = PROT_READ;
3268
#if defined(DARWIN) || defined(ANDROID)
3269
    /*
3270
     * This is needed on OS X because its implementation of
3271
     * POSIX shared memory returns an error for MAP_PRIVATE, even
3272
     * when the mapping is read-only.
3273
     *
3274
     * And this is needed on Android, because mapping ashmem with
3275
     * MAP_PRIVATE creates a mapping of zeroed memory instead of
3276
     * the shm contents.
3277
     */
3278
    fmap->md.flags = MAP_SHARED;
3279
#else
3280
0
    fmap->md.flags = MAP_PRIVATE;
3281
0
#endif
3282
0
  } else if (fmap->prot == PR_PROT_READWRITE) {
3283
0
    fmap->md.prot = PROT_READ | PROT_WRITE;
3284
0
    fmap->md.flags = MAP_SHARED;
3285
0
  } else {
3286
0
    PR_ASSERT(fmap->prot == PR_PROT_WRITECOPY);
3287
0
    fmap->md.prot = PROT_READ | PROT_WRITE;
3288
0
    fmap->md.flags = MAP_PRIVATE;
3289
0
  }
3290
0
  return PR_SUCCESS;
3291
0
}
3292
3293
0
void* _MD_MemMap(PRFileMap* fmap, PRInt64 offset, PRUint32 len) {
3294
0
  PRInt32 off;
3295
0
  void* addr;
3296
3297
0
  LL_L2I(off, offset);
3298
0
  if ((addr = mmap(0, len, fmap->md.prot, fmap->md.flags,
3299
0
                   fmap->fd->secret->md.osfd, off)) == (void*)-1) {
3300
0
    _PR_MD_MAP_MMAP_ERROR(_MD_ERRNO());
3301
0
    addr = NULL;
3302
0
  }
3303
0
  return addr;
3304
0
}
3305
3306
0
PRStatus _MD_MemUnmap(void* addr, PRUint32 len) {
3307
0
  if (munmap(addr, len) == 0) {
3308
0
    return PR_SUCCESS;
3309
0
  }
3310
0
  _PR_MD_MAP_DEFAULT_ERROR(errno);
3311
0
  return PR_FAILURE;
3312
0
}
3313
3314
0
PRStatus _MD_CloseFileMap(PRFileMap* fmap) {
3315
0
  if (PR_TRUE == fmap->md.isAnonFM) {
3316
0
    PRStatus rc = PR_Close(fmap->fd);
3317
0
    if (PR_FAILURE == rc) {
3318
0
      PR_LOG(_pr_io_lm, PR_LOG_DEBUG,
3319
0
             ("_MD_CloseFileMap(): error closing anonymnous file map osfd"));
3320
0
      return PR_FAILURE;
3321
0
    }
3322
0
  }
3323
0
  PR_DELETE(fmap);
3324
0
  return PR_SUCCESS;
3325
0
}
3326
3327
0
PRStatus _MD_SyncMemMap(PRFileDesc* fd, void* addr, PRUint32 len) {
3328
  /* msync(..., MS_SYNC) alone is sufficient to flush modified data to disk
3329
   * synchronously. It is not necessary to call fsync. */
3330
0
  if (msync(addr, len, MS_SYNC) == 0) {
3331
0
    return PR_SUCCESS;
3332
0
  }
3333
0
  _PR_MD_MAP_DEFAULT_ERROR(errno);
3334
0
  return PR_FAILURE;
3335
0
}
3336
3337
#if defined(_PR_NEED_FAKE_POLL)
3338
3339
/*
3340
 * Some platforms don't have poll().  For easier porting of code
3341
 * that calls poll(), we emulate poll() using select().
3342
 */
3343
3344
int poll(struct pollfd* filedes, unsigned long nfds, int timeout) {
3345
  int i;
3346
  int rv;
3347
  int maxfd;
3348
  fd_set rd, wr, ex;
3349
  struct timeval tv, *tvp;
3350
3351
  if (timeout < 0 && timeout != -1) {
3352
    errno = EINVAL;
3353
    return -1;
3354
  }
3355
3356
  if (timeout == -1) {
3357
    tvp = NULL;
3358
  } else {
3359
    tv.tv_sec = timeout / 1000;
3360
    tv.tv_usec = (timeout % 1000) * 1000;
3361
    tvp = &tv;
3362
  }
3363
3364
  maxfd = -1;
3365
  FD_ZERO(&rd);
3366
  FD_ZERO(&wr);
3367
  FD_ZERO(&ex);
3368
3369
  for (i = 0; i < nfds; i++) {
3370
    int osfd = filedes[i].fd;
3371
    int events = filedes[i].events;
3372
    PRBool fdHasEvent = PR_FALSE;
3373
3374
    PR_ASSERT(osfd < FD_SETSIZE);
3375
    if (osfd < 0 || osfd >= FD_SETSIZE) {
3376
      continue; /* Skip this osfd. */
3377
    }
3378
3379
    /*
3380
     * Map the poll events to the select fd_sets.
3381
     *     POLLIN, POLLRDNORM  ===> readable
3382
     *     POLLOUT, POLLWRNORM ===> writable
3383
     *     POLLPRI, POLLRDBAND ===> exception
3384
     *     POLLNORM, POLLWRBAND (and POLLMSG on some platforms)
3385
     *     are ignored.
3386
     *
3387
     * The output events POLLERR and POLLHUP are never turned on.
3388
     * POLLNVAL may be turned on.
3389
     */
3390
3391
    if (events & (POLLIN | POLLRDNORM)) {
3392
      FD_SET(osfd, &rd);
3393
      fdHasEvent = PR_TRUE;
3394
    }
3395
    if (events & (POLLOUT | POLLWRNORM)) {
3396
      FD_SET(osfd, &wr);
3397
      fdHasEvent = PR_TRUE;
3398
    }
3399
    if (events & (POLLPRI | POLLRDBAND)) {
3400
      FD_SET(osfd, &ex);
3401
      fdHasEvent = PR_TRUE;
3402
    }
3403
    if (fdHasEvent && osfd > maxfd) {
3404
      maxfd = osfd;
3405
    }
3406
  }
3407
3408
  rv = select(maxfd + 1, &rd, &wr, &ex, tvp);
3409
3410
  /* Compute poll results */
3411
  if (rv > 0) {
3412
    rv = 0;
3413
    for (i = 0; i < nfds; i++) {
3414
      PRBool fdHasEvent = PR_FALSE;
3415
3416
      filedes[i].revents = 0;
3417
      if (filedes[i].fd < 0) {
3418
        continue;
3419
      }
3420
      if (filedes[i].fd >= FD_SETSIZE) {
3421
        filedes[i].revents |= POLLNVAL;
3422
        continue;
3423
      }
3424
      if (FD_ISSET(filedes[i].fd, &rd)) {
3425
        if (filedes[i].events & POLLIN) {
3426
          filedes[i].revents |= POLLIN;
3427
        }
3428
        if (filedes[i].events & POLLRDNORM) {
3429
          filedes[i].revents |= POLLRDNORM;
3430
        }
3431
        fdHasEvent = PR_TRUE;
3432
      }
3433
      if (FD_ISSET(filedes[i].fd, &wr)) {
3434
        if (filedes[i].events & POLLOUT) {
3435
          filedes[i].revents |= POLLOUT;
3436
        }
3437
        if (filedes[i].events & POLLWRNORM) {
3438
          filedes[i].revents |= POLLWRNORM;
3439
        }
3440
        fdHasEvent = PR_TRUE;
3441
      }
3442
      if (FD_ISSET(filedes[i].fd, &ex)) {
3443
        if (filedes[i].events & POLLPRI) {
3444
          filedes[i].revents |= POLLPRI;
3445
        }
3446
        if (filedes[i].events & POLLRDBAND) {
3447
          filedes[i].revents |= POLLRDBAND;
3448
        }
3449
        fdHasEvent = PR_TRUE;
3450
      }
3451
      if (fdHasEvent) {
3452
        rv++;
3453
      }
3454
    }
3455
    PR_ASSERT(rv > 0);
3456
  } else if (rv == -1 && errno == EBADF) {
3457
    rv = 0;
3458
    for (i = 0; i < nfds; i++) {
3459
      filedes[i].revents = 0;
3460
      if (filedes[i].fd < 0) {
3461
        continue;
3462
      }
3463
      if (fcntl(filedes[i].fd, F_GETFL, 0) == -1) {
3464
        filedes[i].revents = POLLNVAL;
3465
        rv++;
3466
      }
3467
    }
3468
    PR_ASSERT(rv > 0);
3469
  }
3470
  PR_ASSERT(-1 != timeout || rv != 0);
3471
3472
  return rv;
3473
}
3474
#endif /* _PR_NEED_FAKE_POLL */