/src/nspr/pr/src/io/prfdcach.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "primpl.h" |
7 | | |
8 | | #include <string.h> |
9 | | |
10 | | /*****************************************************************************/ |
11 | | /*****************************************************************************/ |
12 | | /************************** File descriptor caching **************************/ |
13 | | /*****************************************************************************/ |
14 | | /*****************************************************************************/ |
15 | | |
16 | | /* |
17 | | ** This code is built into debuggable versions of NSPR to assist in |
18 | | ** finding misused file descriptors. Since file descritors (PRFileDesc) |
19 | | ** are identified by a pointer to their structure, they can be the |
20 | | ** target of dangling references. Furthermore, NSPR caches and tries |
21 | | ** to aggressively reuse file descriptors, leading to more ambiguity. |
22 | | ** The following code will allow a debugging client to set environment |
23 | | ** variables and control the number of file descriptors that will be |
24 | | ** preserved before they are recycled. The environment variables are |
25 | | ** NSPR_FD_CACHE_SIZE_LOW and NSPR_FD_CACHE_SIZE_HIGH. The former sets |
26 | | ** the number of descriptors NSPR will allocate before beginning to |
27 | | ** recycle. The latter is the maximum number permitted in the cache |
28 | | ** (exclusive of those in use) at a time. |
29 | | */ |
30 | | typedef struct _PR_Fd_Cache { |
31 | | PRLock* ml; |
32 | | PRIntn count; |
33 | | PRFileDesc *head, *tail; |
34 | | PRIntn limit_low, limit_high; |
35 | | } _PR_Fd_Cache; |
36 | | |
37 | | static _PR_Fd_Cache _pr_fd_cache; |
38 | | |
39 | | /* |
40 | | ** Get a FileDescriptor from the cache if one exists. If not allocate |
41 | | ** a new one from the heap. |
42 | | */ |
43 | 0 | PRFileDesc* _PR_Getfd(void) { |
44 | 0 | PRFileDesc* fd; |
45 | | /* |
46 | | ** $$$ |
47 | | ** This may look a little wasteful. We'll see. Right now I want to |
48 | | ** be able to toggle between caching and not at runtime to measure |
49 | | ** the differences. If it isn't too annoying, I'll leave it in. |
50 | | ** $$$$ |
51 | | ** |
52 | | ** The test is against _pr_fd_cache.limit_high. If that's zero, |
53 | | ** we're not doing the extended cache but going for performance. |
54 | | */ |
55 | 0 | if (0 == _pr_fd_cache.limit_high) { |
56 | 0 | goto allocate; |
57 | 0 | } else { |
58 | 0 | do { |
59 | 0 | if (NULL == _pr_fd_cache.head) { |
60 | 0 | goto allocate; /* nothing there */ |
61 | 0 | } |
62 | 0 | if (_pr_fd_cache.count < _pr_fd_cache.limit_low) { |
63 | 0 | goto allocate; |
64 | 0 | } |
65 | | |
66 | | /* we "should" be able to extract an fd from the cache */ |
67 | 0 | PR_Lock(_pr_fd_cache.ml); /* need the lock to do this safely */ |
68 | 0 | fd = _pr_fd_cache.head; /* protected extraction */ |
69 | 0 | if (NULL == fd) /* unexpected, but not fatal */ |
70 | 0 | { |
71 | 0 | PR_ASSERT(0 == _pr_fd_cache.count); |
72 | 0 | PR_ASSERT(NULL == _pr_fd_cache.tail); |
73 | 0 | } else { |
74 | 0 | _pr_fd_cache.count -= 1; |
75 | 0 | _pr_fd_cache.head = fd->higher; |
76 | 0 | if (NULL == _pr_fd_cache.head) { |
77 | 0 | PR_ASSERT(0 == _pr_fd_cache.count); |
78 | 0 | _pr_fd_cache.tail = NULL; |
79 | 0 | } |
80 | 0 | PR_ASSERT(&_pr_faulty_methods == fd->methods); |
81 | 0 | PR_ASSERT(PR_INVALID_IO_LAYER == fd->identity); |
82 | 0 | PR_ASSERT(_PR_FILEDESC_FREED == fd->secret->state); |
83 | 0 | } |
84 | 0 | PR_Unlock(_pr_fd_cache.ml); |
85 | |
|
86 | 0 | } while (NULL == fd); /* then go around and allocate a new one */ |
87 | 0 | } |
88 | | |
89 | 0 | finished: |
90 | 0 | fd->dtor = NULL; |
91 | 0 | fd->lower = fd->higher = NULL; |
92 | 0 | fd->identity = PR_NSPR_IO_LAYER; |
93 | 0 | memset(fd->secret, 0, sizeof(PRFilePrivate)); |
94 | 0 | return fd; |
95 | | |
96 | 0 | allocate: |
97 | 0 | fd = PR_NEW(PRFileDesc); |
98 | 0 | if (NULL != fd) { |
99 | 0 | fd->secret = PR_NEW(PRFilePrivate); |
100 | 0 | if (NULL == fd->secret) { |
101 | 0 | PR_DELETE(fd); |
102 | 0 | } |
103 | 0 | } |
104 | 0 | if (NULL != fd) { |
105 | 0 | goto finished; |
106 | 0 | } else { |
107 | 0 | return NULL; |
108 | 0 | } |
109 | |
|
110 | 0 | } /* _PR_Getfd */ |
111 | | |
112 | | /* |
113 | | ** Return a file descriptor to the cache unless there are too many in |
114 | | ** there already. If put in cache, clear the fields first. |
115 | | */ |
116 | 0 | void _PR_Putfd(PRFileDesc* fd) { |
117 | 0 | PR_ASSERT(PR_NSPR_IO_LAYER == fd->identity); |
118 | 0 | fd->methods = &_pr_faulty_methods; |
119 | 0 | fd->identity = PR_INVALID_IO_LAYER; |
120 | 0 | fd->secret->state = _PR_FILEDESC_FREED; |
121 | |
|
122 | 0 | if (0 != _pr_fd_cache.limit_high) { |
123 | 0 | if (_pr_fd_cache.count < _pr_fd_cache.limit_high) { |
124 | 0 | PR_Lock(_pr_fd_cache.ml); |
125 | 0 | if (NULL == _pr_fd_cache.tail) { |
126 | 0 | PR_ASSERT(0 == _pr_fd_cache.count); |
127 | 0 | PR_ASSERT(NULL == _pr_fd_cache.head); |
128 | 0 | _pr_fd_cache.head = _pr_fd_cache.tail = fd; |
129 | 0 | } else { |
130 | 0 | PR_ASSERT(NULL == _pr_fd_cache.tail->higher); |
131 | 0 | _pr_fd_cache.tail->higher = fd; |
132 | 0 | _pr_fd_cache.tail = fd; /* new value */ |
133 | 0 | } |
134 | 0 | fd->higher = NULL; /* always so */ |
135 | 0 | _pr_fd_cache.count += 1; /* count the new entry */ |
136 | 0 | PR_Unlock(_pr_fd_cache.ml); |
137 | 0 | return; |
138 | 0 | } |
139 | 0 | } |
140 | | |
141 | 0 | PR_Free(fd->secret); |
142 | 0 | PR_Free(fd); |
143 | 0 | } /* _PR_Putfd */ |
144 | | |
145 | 0 | PR_IMPLEMENT(PRStatus) PR_SetFDCacheSize(PRIntn low, PRIntn high) { |
146 | | /* |
147 | | ** This can be called at any time, may adjust the cache sizes, |
148 | | ** turn the caches off, or turn them on. It is not dependent |
149 | | ** on the compilation setting of DEBUG. |
150 | | */ |
151 | 0 | if (!_pr_initialized) { |
152 | 0 | _PR_ImplicitInitialization(); |
153 | 0 | } |
154 | |
|
155 | 0 | if (low > high) { |
156 | 0 | low = high; /* sanity check the params */ |
157 | 0 | } |
158 | |
|
159 | 0 | PR_Lock(_pr_fd_cache.ml); |
160 | 0 | _pr_fd_cache.limit_high = high; |
161 | 0 | _pr_fd_cache.limit_low = low; |
162 | 0 | PR_Unlock(_pr_fd_cache.ml); |
163 | 0 | return PR_SUCCESS; |
164 | 0 | } /* PR_SetFDCacheSize */ |
165 | | |
166 | 0 | void _PR_InitFdCache(void) { |
167 | | /* |
168 | | ** The fd caching is enabled by default for DEBUG builds, |
169 | | ** disabled by default for OPT builds. That default can |
170 | | ** be overridden at runtime using environment variables |
171 | | ** or a super-wiz-bang API. |
172 | | */ |
173 | 0 | const char* low = PR_GetEnv("NSPR_FD_CACHE_SIZE_LOW"); |
174 | 0 | const char* high = PR_GetEnv("NSPR_FD_CACHE_SIZE_HIGH"); |
175 | | |
176 | | /* |
177 | | ** _low is allowed to be zero, _high is not. |
178 | | ** If _high is zero, we're not doing the caching. |
179 | | */ |
180 | |
|
181 | 0 | _pr_fd_cache.limit_low = 0; |
182 | 0 | #if defined(DEBUG) |
183 | 0 | _pr_fd_cache.limit_high = FD_SETSIZE; |
184 | | #else |
185 | | _pr_fd_cache.limit_high = 0; |
186 | | #endif /* defined(DEBUG) */ |
187 | |
|
188 | 0 | if (NULL != low) { |
189 | 0 | _pr_fd_cache.limit_low = atoi(low); |
190 | 0 | } |
191 | 0 | if (NULL != high) { |
192 | 0 | _pr_fd_cache.limit_high = atoi(high); |
193 | 0 | } |
194 | |
|
195 | 0 | if (_pr_fd_cache.limit_low < 0) { |
196 | 0 | _pr_fd_cache.limit_low = 0; |
197 | 0 | } |
198 | 0 | if (_pr_fd_cache.limit_low > FD_SETSIZE) { |
199 | 0 | _pr_fd_cache.limit_low = FD_SETSIZE; |
200 | 0 | } |
201 | |
|
202 | 0 | if (_pr_fd_cache.limit_high > FD_SETSIZE) { |
203 | 0 | _pr_fd_cache.limit_high = FD_SETSIZE; |
204 | 0 | } |
205 | |
|
206 | 0 | if (_pr_fd_cache.limit_high < _pr_fd_cache.limit_low) { |
207 | 0 | _pr_fd_cache.limit_high = _pr_fd_cache.limit_low; |
208 | 0 | } |
209 | |
|
210 | 0 | _pr_fd_cache.ml = PR_NewLock(); |
211 | 0 | PR_ASSERT(NULL != _pr_fd_cache.ml); |
212 | |
|
213 | 0 | } /* _PR_InitFdCache */ |
214 | | |
215 | 0 | void _PR_CleanupFdCache(void) { |
216 | 0 | PRFileDesc *fd, *next; |
217 | |
|
218 | 0 | for (fd = _pr_fd_cache.head; fd != NULL; fd = next) { |
219 | 0 | next = fd->higher; |
220 | 0 | PR_DELETE(fd->secret); |
221 | 0 | PR_DELETE(fd); |
222 | 0 | } |
223 | 0 | _pr_fd_cache.head = NULL; |
224 | 0 | _pr_fd_cache.tail = NULL; |
225 | 0 | _pr_fd_cache.count = 0; |
226 | 0 | PR_DestroyLock(_pr_fd_cache.ml); |
227 | 0 | _pr_fd_cache.ml = NULL; |
228 | 0 | } /* _PR_CleanupFdCache */ |
229 | | |
230 | | /* prfdcach.c */ |