/src/open62541/arch/common/timer.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
2 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
3 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. |
4 | | * |
5 | | * Copyright 2017, 2018, 2021 (c) Fraunhofer IOSB (Author: Julius Pfrommer) |
6 | | * Copyright 2017 (c) Stefan Profanter, fortiss GmbH |
7 | | */ |
8 | | |
9 | | #include "timer.h" |
10 | | |
11 | | static enum ZIP_CMP |
12 | 0 | cmpDateTime(const UA_DateTime *a, const UA_DateTime *b) { |
13 | 0 | if(*a == *b) |
14 | 0 | return ZIP_CMP_EQ; |
15 | 0 | return (*a < *b) ? ZIP_CMP_LESS : ZIP_CMP_MORE; |
16 | 0 | } |
17 | | |
18 | | static enum ZIP_CMP |
19 | 0 | cmpId(const UA_UInt64 *a, const UA_UInt64 *b) { |
20 | 0 | if(*a == *b) |
21 | 0 | return ZIP_CMP_EQ; |
22 | 0 | return (*a < *b) ? ZIP_CMP_LESS : ZIP_CMP_MORE; |
23 | 0 | } |
24 | | |
25 | | ZIP_FUNCTIONS(UA_TimerTree, UA_TimerEntry, treeEntry, UA_DateTime, nextTime, cmpDateTime) |
26 | | ZIP_FUNCTIONS(UA_TimerIdTree, UA_TimerEntry, idTreeEntry, UA_UInt64, id, cmpId) |
27 | | |
28 | | static UA_DateTime |
29 | | calculateNextTime(UA_DateTime currentTime, UA_DateTime baseTime, |
30 | 0 | UA_DateTime interval) { |
31 | | /* Take the difference between current and base time */ |
32 | 0 | UA_DateTime diffCurrentTimeBaseTime = currentTime - baseTime; |
33 | | |
34 | | /* Take modulo of the diff time with the interval. This is the duration we |
35 | | * are already "into" the current interval. Subtract it from (current + |
36 | | * interval) to get the next execution time. */ |
37 | 0 | UA_DateTime cycleDelay = diffCurrentTimeBaseTime % interval; |
38 | | |
39 | | /* Handle the special case where the baseTime is in the future */ |
40 | 0 | if(UA_UNLIKELY(cycleDelay < 0)) |
41 | 0 | cycleDelay += interval; |
42 | |
|
43 | 0 | return currentTime + interval - cycleDelay; |
44 | 0 | } |
45 | | |
46 | | void |
47 | 201 | UA_Timer_init(UA_Timer *t) { |
48 | 201 | memset(t, 0, sizeof(UA_Timer)); |
49 | 201 | UA_LOCK_INIT(&t->timerMutex); |
50 | 201 | } |
51 | | |
52 | | /* Global variables, only used behind the mutex */ |
53 | | static UA_DateTime earliest, latest, adjustedNextTime; |
54 | | |
55 | | static void * |
56 | 0 | findTimer2Batch(void *context, UA_TimerEntry *compare) { |
57 | 0 | UA_TimerEntry *te = (UA_TimerEntry*)context; |
58 | | |
59 | | /* NextTime deviation within interval? */ |
60 | 0 | if(compare->nextTime < earliest || compare->nextTime > latest) |
61 | 0 | return NULL; |
62 | | |
63 | | /* Check if one interval is a multiple of the other */ |
64 | 0 | if(te->interval < compare->interval && compare->interval % te->interval != 0) |
65 | 0 | return NULL; |
66 | 0 | if(te->interval > compare->interval && te->interval % compare->interval != 0) |
67 | 0 | return NULL; |
68 | | |
69 | 0 | adjustedNextTime = compare->nextTime; /* Candidate found */ |
70 | | |
71 | | /* Abort when a perfect match is found */ |
72 | 0 | return (te->interval == compare->interval) ? te : NULL; |
73 | 0 | } |
74 | | |
75 | | /* Adjust the nextTime to batch cyclic callbacks. Look in an interval around the |
76 | | * original nextTime. Deviate from the original nextTime by at most 1/4 of the |
77 | | * interval and at most by 1s. */ |
78 | | static void |
79 | 0 | batchTimerEntry(UA_Timer *t, UA_TimerEntry *te) { |
80 | 0 | if(te->timerPolicy != UA_TIMERPOLICY_CURRENTTIME) |
81 | 0 | return; |
82 | 0 | UA_UInt64 deviate = te->interval / 4; |
83 | 0 | if(deviate > UA_DATETIME_SEC) |
84 | 0 | deviate = UA_DATETIME_SEC; |
85 | 0 | earliest = te->nextTime - deviate; |
86 | 0 | latest = te->nextTime + deviate; |
87 | 0 | adjustedNextTime = te->nextTime; |
88 | 0 | ZIP_ITER(UA_TimerIdTree, &t->idTree, findTimer2Batch, te); |
89 | 0 | te->nextTime = adjustedNextTime; |
90 | 0 | } |
91 | | |
92 | | /* Adding repeated callbacks: Add an entry with the "nextTime" timestamp in the |
93 | | * future. This will be picked up in the next iteration and inserted at the |
94 | | * correct place. So that the next execution takes place ät "nextTime". */ |
95 | | UA_StatusCode |
96 | | UA_Timer_add(UA_Timer *t, UA_ApplicationCallback callback, |
97 | | void *application, void *data, UA_Double interval_ms, |
98 | | UA_DateTime now, UA_DateTime *baseTime, |
99 | 0 | UA_TimerPolicy timerPolicy, UA_UInt64 *callbackId) { |
100 | | /* A callback method needs to be present */ |
101 | 0 | if(!callback) |
102 | 0 | return UA_STATUSCODE_BADINTERNALERROR; |
103 | | |
104 | | /* The interval needs to be positive. The exception is for the "once" policy |
105 | | * where we allow baseTime + interval < now. Then the timer executes once in |
106 | | * the next processing iteration. */ |
107 | 0 | UA_DateTime interval = (UA_DateTime)(interval_ms * UA_DATETIME_MSEC); |
108 | 0 | if(interval <= 0) { |
109 | 0 | if(timerPolicy != UA_TIMERPOLICY_ONCE) |
110 | 0 | return UA_STATUSCODE_BADINTERNALERROR; |
111 | | /* Ensure that (now + interval) == *baseTime for setting nextTime */ |
112 | 0 | if(baseTime) { |
113 | 0 | interval = *baseTime - now; |
114 | 0 | baseTime = NULL; |
115 | 0 | } |
116 | 0 | } |
117 | | |
118 | | /* Compute the first time for execution */ |
119 | 0 | UA_DateTime nextTime = (baseTime == NULL) ? |
120 | 0 | now + interval : calculateNextTime(now, *baseTime, interval); |
121 | | |
122 | | /* Allocate the repeated callback structure */ |
123 | 0 | UA_TimerEntry *te = (UA_TimerEntry*)UA_malloc(sizeof(UA_TimerEntry)); |
124 | 0 | if(!te) |
125 | 0 | return UA_STATUSCODE_BADOUTOFMEMORY; |
126 | | |
127 | | /* Set the repeated callback */ |
128 | 0 | te->interval = interval; |
129 | 0 | te->callback = callback; |
130 | 0 | te->application = application; |
131 | 0 | te->data = data; |
132 | 0 | te->nextTime = nextTime; |
133 | 0 | te->timerPolicy = timerPolicy; |
134 | | |
135 | | /* Adjust the nextTime to batch cyclic callbacks */ |
136 | 0 | batchTimerEntry(t, te); |
137 | | |
138 | | /* Insert into the timer */ |
139 | 0 | UA_LOCK(&t->timerMutex); |
140 | 0 | te->id = ++t->idCounter; |
141 | 0 | if(callbackId) |
142 | 0 | *callbackId = te->id; |
143 | 0 | ZIP_INSERT(UA_TimerTree, &t->tree, te); |
144 | 0 | ZIP_INSERT(UA_TimerIdTree, &t->idTree, te); |
145 | 0 | UA_UNLOCK(&t->timerMutex); |
146 | |
|
147 | 0 | return UA_STATUSCODE_GOOD; |
148 | 0 | } |
149 | | |
150 | | UA_StatusCode |
151 | | UA_Timer_modify(UA_Timer *t, UA_UInt64 callbackId, |
152 | | UA_Double interval_ms, UA_DateTime now, |
153 | 0 | UA_DateTime *baseTime, UA_TimerPolicy timerPolicy) { |
154 | | /* The interval needs to be positive. The exception is for the "once" policy |
155 | | * where we allow baseTime + interval < now. Then the timer executes once in |
156 | | * the next processing iteration. */ |
157 | 0 | UA_DateTime interval = (UA_DateTime)(interval_ms * UA_DATETIME_MSEC); |
158 | 0 | if(interval <= 0) { |
159 | 0 | if(timerPolicy != UA_TIMERPOLICY_ONCE) |
160 | 0 | return UA_STATUSCODE_BADINTERNALERROR; |
161 | | /* Ensure that (now + interval) == *baseTime for setting nextTime */ |
162 | 0 | if(baseTime) { |
163 | 0 | interval = *baseTime - now; |
164 | 0 | baseTime = NULL; |
165 | 0 | } |
166 | 0 | } |
167 | | |
168 | 0 | UA_LOCK(&t->timerMutex); |
169 | | |
170 | | /* Find timer entry based on id */ |
171 | 0 | UA_TimerEntry *te = ZIP_FIND(UA_TimerIdTree, &t->idTree, &callbackId); |
172 | 0 | if(!te) { |
173 | 0 | UA_UNLOCK(&t->timerMutex); |
174 | 0 | return UA_STATUSCODE_BADNOTFOUND; |
175 | 0 | } |
176 | | |
177 | | /* The entry is either in the timer tree or current processed. If |
178 | | * in-process, the entry is re-added to the timer-tree right after. */ |
179 | 0 | UA_Boolean processing = (ZIP_REMOVE(UA_TimerTree, &t->tree, te) == NULL); |
180 | | |
181 | | /* The nextTime must only be modified after ZIP_REMOVE. The logic is |
182 | | * identical to the creation of a new timer. */ |
183 | 0 | te->nextTime = (baseTime == NULL) ? |
184 | 0 | now + interval : calculateNextTime(now, *baseTime, interval); |
185 | 0 | te->interval = interval; |
186 | 0 | te->timerPolicy = timerPolicy; |
187 | | |
188 | | /* Adjust the nextTime to batch cyclic callbacks */ |
189 | 0 | batchTimerEntry(t, te); |
190 | |
|
191 | 0 | if(processing) |
192 | 0 | te->nextTime -= interval; /* adjust for re-adding after processing */ |
193 | 0 | else |
194 | 0 | ZIP_INSERT(UA_TimerTree, &t->tree, te); |
195 | |
|
196 | 0 | UA_UNLOCK(&t->timerMutex); |
197 | 0 | return UA_STATUSCODE_GOOD; |
198 | 0 | } |
199 | | |
200 | | void |
201 | 0 | UA_Timer_remove(UA_Timer *t, UA_UInt64 callbackId) { |
202 | 0 | UA_LOCK(&t->timerMutex); |
203 | 0 | UA_TimerEntry *te = ZIP_FIND(UA_TimerIdTree, &t->idTree, &callbackId); |
204 | 0 | if(!te) { |
205 | 0 | UA_UNLOCK(&t->timerMutex); |
206 | 0 | return; |
207 | 0 | } |
208 | | |
209 | | /* The entry is either in the timer tree or in the process tree. If in the |
210 | | * process tree, leave a sentinel (callback == NULL) to delete it during |
211 | | * processing. Do not edit the process tree while iterating over it. */ |
212 | 0 | UA_Boolean processing = (ZIP_REMOVE(UA_TimerTree, &t->tree, te) == NULL); |
213 | 0 | if(!processing) { |
214 | 0 | ZIP_REMOVE(UA_TimerIdTree, &t->idTree, te); |
215 | 0 | UA_free(te); |
216 | 0 | } else { |
217 | 0 | te->callback = NULL; |
218 | 0 | } |
219 | |
|
220 | 0 | UA_UNLOCK(&t->timerMutex); |
221 | 0 | } |
222 | | |
223 | | struct TimerProcessContext { |
224 | | UA_Timer *t; |
225 | | UA_DateTime now; |
226 | | }; |
227 | | |
228 | | static void * |
229 | 0 | processEntryCallback(void *context, UA_TimerEntry *te) { |
230 | 0 | struct TimerProcessContext *tpc = (struct TimerProcessContext*)context; |
231 | 0 | UA_Timer *t = tpc->t; |
232 | | |
233 | | /* Execute the callback */ |
234 | 0 | if(te->callback) { |
235 | 0 | te->callback(te->application, te->data); |
236 | 0 | } |
237 | | |
238 | | /* Remove the entry if marked for deletion or a "once" policy */ |
239 | 0 | if(!te->callback || te->timerPolicy == UA_TIMERPOLICY_ONCE) { |
240 | 0 | ZIP_REMOVE(UA_TimerIdTree, &t->idTree, te); |
241 | 0 | UA_free(te); |
242 | 0 | return NULL; |
243 | 0 | } |
244 | | |
245 | | /* Set the time for the next regular execution */ |
246 | 0 | te->nextTime += te->interval; |
247 | | |
248 | | /* Handle the case where the execution "window" was missed. E.g. due to |
249 | | * congestion of the application or if the clock was shifted. |
250 | | * |
251 | | * If the timer policy is "CurrentTime", then there is at least the |
252 | | * interval between executions. This is used for Monitoreditems, for |
253 | | * which the spec says: The sampling interval indicates the fastest rate |
254 | | * at which the Server should sample its underlying source for data |
255 | | * changes. (Part 4, 5.12.1.2). |
256 | | * |
257 | | * Otherwise calculate the next execution time based on the original base |
258 | | * time. */ |
259 | 0 | if(te->nextTime < tpc->now) { |
260 | 0 | te->nextTime = (te->timerPolicy == UA_TIMERPOLICY_CURRENTTIME) ? |
261 | 0 | tpc->now + te->interval : |
262 | 0 | calculateNextTime(tpc->now, te->nextTime, te->interval); |
263 | 0 | } |
264 | | |
265 | | /* Insert back into the time-sorted tree */ |
266 | 0 | ZIP_INSERT(UA_TimerTree, &t->tree, te); |
267 | 0 | return NULL; |
268 | 0 | } |
269 | | |
270 | | UA_DateTime |
271 | 201 | UA_Timer_process(UA_Timer *t, UA_DateTime now) { |
272 | 201 | UA_LOCK(&t->timerMutex); |
273 | | |
274 | | /* Move all entries <= now to the processTree */ |
275 | 201 | UA_TimerTree processTree; |
276 | 201 | ZIP_INIT(&processTree); |
277 | 201 | ZIP_UNZIP(UA_TimerTree, &t->tree, &now, &processTree, &t->tree); |
278 | | |
279 | | /* Consistency check. The smallest not-processed entry isn't ready. */ |
280 | 201 | UA_assert(!ZIP_MIN(UA_TimerTree, &t->tree) || |
281 | 201 | ZIP_MIN(UA_TimerTree, &t->tree)->nextTime > now); |
282 | | |
283 | | /* Iterate over the entries that need processing in-order. This also |
284 | | * moves them back to the regular time-ordered tree. */ |
285 | 201 | struct TimerProcessContext ctx; |
286 | 201 | ctx.t = t; |
287 | 201 | ctx.now = now; |
288 | 201 | ZIP_ITER(UA_TimerTree, &processTree, processEntryCallback, &ctx); |
289 | | |
290 | | /* Compute the timestamp of the earliest next callback */ |
291 | 201 | UA_TimerEntry *first = ZIP_MIN(UA_TimerTree, &t->tree); |
292 | 201 | UA_DateTime next = (first) ? first->nextTime : UA_INT64_MAX; |
293 | 201 | UA_UNLOCK(&t->timerMutex); |
294 | 201 | return next; |
295 | 201 | } |
296 | | |
297 | | UA_DateTime |
298 | 0 | UA_Timer_next(UA_Timer *t) { |
299 | 0 | UA_LOCK(&t->timerMutex); |
300 | 0 | UA_TimerEntry *first = ZIP_MIN(UA_TimerTree, &t->tree); |
301 | 0 | UA_DateTime next = (first) ? first->nextTime : UA_INT64_MAX; |
302 | 0 | UA_UNLOCK(&t->timerMutex); |
303 | 0 | return next; |
304 | 0 | } |
305 | | |
306 | | static void * |
307 | 0 | freeEntryCallback(void *context, UA_TimerEntry *entry) { |
308 | 0 | UA_free(entry); |
309 | 0 | return NULL; |
310 | 0 | } |
311 | | |
312 | | void |
313 | 201 | UA_Timer_clear(UA_Timer *t) { |
314 | 201 | UA_LOCK(&t->timerMutex); |
315 | | |
316 | 201 | ZIP_ITER(UA_TimerIdTree, &t->idTree, freeEntryCallback, NULL); |
317 | 201 | t->tree.root = NULL; |
318 | 201 | t->idTree.root = NULL; |
319 | 201 | t->idCounter = 0; |
320 | | |
321 | 201 | UA_UNLOCK(&t->timerMutex); |
322 | | |
323 | 201 | #if UA_MULTITHREADING >= 100 |
324 | 201 | UA_LOCK_DESTROY(&t->timerMutex); |
325 | 201 | #endif |
326 | 201 | } |