/src/systemd/src/core/transaction.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* SPDX-License-Identifier: LGPL-2.1+ */ |
2 | | |
3 | | #include <fcntl.h> |
4 | | #include <unistd.h> |
5 | | |
6 | | #include "alloc-util.h" |
7 | | #include "bus-common-errors.h" |
8 | | #include "bus-error.h" |
9 | | #include "terminal-util.h" |
10 | | #include "transaction.h" |
11 | | #include "dbus-unit.h" |
12 | | |
13 | | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies); |
14 | | |
15 | 0 | static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) { |
16 | 0 | assert(tr); |
17 | 0 | assert(j); |
18 | 0 |
|
19 | 0 | /* Deletes one job from the transaction */ |
20 | 0 |
|
21 | 0 | transaction_unlink_job(tr, j, delete_dependencies); |
22 | 0 |
|
23 | 0 | job_free(j); |
24 | 0 | } |
25 | | |
26 | 0 | static void transaction_delete_unit(Transaction *tr, Unit *u) { |
27 | 0 | Job *j; |
28 | 0 |
|
29 | 0 | /* Deletes all jobs associated with a certain unit from the |
30 | 0 | * transaction */ |
31 | 0 |
|
32 | 0 | while ((j = hashmap_get(tr->jobs, u))) |
33 | 0 | transaction_delete_job(tr, j, true); |
34 | 0 | } |
35 | | |
36 | 0 | void transaction_abort(Transaction *tr) { |
37 | 0 | Job *j; |
38 | 0 |
|
39 | 0 | assert(tr); |
40 | 0 |
|
41 | 0 | while ((j = hashmap_first(tr->jobs))) |
42 | 0 | transaction_delete_job(tr, j, false); |
43 | 0 |
|
44 | 0 | assert(hashmap_isempty(tr->jobs)); |
45 | 0 | } |
46 | | |
47 | 0 | static void transaction_find_jobs_that_matter_to_anchor(Job *j, unsigned generation) { |
48 | 0 | JobDependency *l; |
49 | 0 |
|
50 | 0 | /* A recursive sweep through the graph that marks all units |
51 | 0 | * that matter to the anchor job, i.e. are directly or |
52 | 0 | * indirectly a dependency of the anchor job via paths that |
53 | 0 | * are fully marked as mattering. */ |
54 | 0 |
|
55 | 0 | j->matters_to_anchor = true; |
56 | 0 | j->generation = generation; |
57 | 0 |
|
58 | 0 | LIST_FOREACH(subject, l, j->subject_list) { |
59 | 0 |
|
60 | 0 | /* This link does not matter */ |
61 | 0 | if (!l->matters) |
62 | 0 | continue; |
63 | 0 | |
64 | 0 | /* This unit has already been marked */ |
65 | 0 | if (l->object->generation == generation) |
66 | 0 | continue; |
67 | 0 | |
68 | 0 | transaction_find_jobs_that_matter_to_anchor(l->object, generation); |
69 | 0 | } |
70 | 0 | } |
71 | | |
72 | 0 | static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) { |
73 | 0 | JobDependency *l, *last; |
74 | 0 |
|
75 | 0 | assert(j); |
76 | 0 | assert(other); |
77 | 0 | assert(j->unit == other->unit); |
78 | 0 | assert(!j->installed); |
79 | 0 |
|
80 | 0 | /* Merges 'other' into 'j' and then deletes 'other'. */ |
81 | 0 |
|
82 | 0 | j->type = t; |
83 | 0 | j->state = JOB_WAITING; |
84 | 0 | j->irreversible = j->irreversible || other->irreversible; |
85 | 0 | j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor; |
86 | 0 |
|
87 | 0 | /* Patch us in as new owner of the JobDependency objects */ |
88 | 0 | last = NULL; |
89 | 0 | LIST_FOREACH(subject, l, other->subject_list) { |
90 | 0 | assert(l->subject == other); |
91 | 0 | l->subject = j; |
92 | 0 | last = l; |
93 | 0 | } |
94 | 0 |
|
95 | 0 | /* Merge both lists */ |
96 | 0 | if (last) { |
97 | 0 | last->subject_next = j->subject_list; |
98 | 0 | if (j->subject_list) |
99 | 0 | j->subject_list->subject_prev = last; |
100 | 0 | j->subject_list = other->subject_list; |
101 | 0 | } |
102 | 0 |
|
103 | 0 | /* Patch us in as new owner of the JobDependency objects */ |
104 | 0 | last = NULL; |
105 | 0 | LIST_FOREACH(object, l, other->object_list) { |
106 | 0 | assert(l->object == other); |
107 | 0 | l->object = j; |
108 | 0 | last = l; |
109 | 0 | } |
110 | 0 |
|
111 | 0 | /* Merge both lists */ |
112 | 0 | if (last) { |
113 | 0 | last->object_next = j->object_list; |
114 | 0 | if (j->object_list) |
115 | 0 | j->object_list->object_prev = last; |
116 | 0 | j->object_list = other->object_list; |
117 | 0 | } |
118 | 0 |
|
119 | 0 | /* Kill the other job */ |
120 | 0 | other->subject_list = NULL; |
121 | 0 | other->object_list = NULL; |
122 | 0 | transaction_delete_job(tr, other, true); |
123 | 0 | } |
124 | | |
125 | 0 | _pure_ static bool job_is_conflicted_by(Job *j) { |
126 | 0 | JobDependency *l; |
127 | 0 |
|
128 | 0 | assert(j); |
129 | 0 |
|
130 | 0 | /* Returns true if this job is pulled in by a least one |
131 | 0 | * ConflictedBy dependency. */ |
132 | 0 |
|
133 | 0 | LIST_FOREACH(object, l, j->object_list) |
134 | 0 | if (l->conflicts) |
135 | 0 | return true; |
136 | 0 |
|
137 | 0 | return false; |
138 | 0 | } |
139 | | |
140 | 0 | static int delete_one_unmergeable_job(Transaction *tr, Job *j) { |
141 | 0 | Job *k; |
142 | 0 |
|
143 | 0 | assert(j); |
144 | 0 |
|
145 | 0 | /* Tries to delete one item in the linked list |
146 | 0 | * j->transaction_next->transaction_next->... that conflicts |
147 | 0 | * with another one, in an attempt to make an inconsistent |
148 | 0 | * transaction work. */ |
149 | 0 |
|
150 | 0 | /* We rely here on the fact that if a merged with b does not |
151 | 0 | * merge with c, either a or b merge with c neither */ |
152 | 0 | LIST_FOREACH(transaction, j, j) |
153 | 0 | LIST_FOREACH(transaction, k, j->transaction_next) { |
154 | 0 | Job *d; |
155 | 0 |
|
156 | 0 | /* Is this one mergeable? Then skip it */ |
157 | 0 | if (job_type_is_mergeable(j->type, k->type)) |
158 | 0 | continue; |
159 | 0 | |
160 | 0 | /* Ok, we found two that conflict, let's see if we can |
161 | 0 | * drop one of them */ |
162 | 0 | if (!j->matters_to_anchor && !k->matters_to_anchor) { |
163 | 0 |
|
164 | 0 | /* Both jobs don't matter, so let's |
165 | 0 | * find the one that is smarter to |
166 | 0 | * remove. Let's think positive and |
167 | 0 | * rather remove stops then starts -- |
168 | 0 | * except if something is being |
169 | 0 | * stopped because it is conflicted by |
170 | 0 | * another unit in which case we |
171 | 0 | * rather remove the start. */ |
172 | 0 |
|
173 | 0 | log_unit_debug(j->unit, |
174 | 0 | "Looking at job %s/%s conflicted_by=%s", |
175 | 0 | j->unit->id, job_type_to_string(j->type), |
176 | 0 | yes_no(j->type == JOB_STOP && job_is_conflicted_by(j))); |
177 | 0 | log_unit_debug(k->unit, |
178 | 0 | "Looking at job %s/%s conflicted_by=%s", |
179 | 0 | k->unit->id, job_type_to_string(k->type), |
180 | 0 | yes_no(k->type == JOB_STOP && job_is_conflicted_by(k))); |
181 | 0 |
|
182 | 0 | if (j->type == JOB_STOP) { |
183 | 0 |
|
184 | 0 | if (job_is_conflicted_by(j)) |
185 | 0 | d = k; |
186 | 0 | else |
187 | 0 | d = j; |
188 | 0 |
|
189 | 0 | } else if (k->type == JOB_STOP) { |
190 | 0 |
|
191 | 0 | if (job_is_conflicted_by(k)) |
192 | 0 | d = j; |
193 | 0 | else |
194 | 0 | d = k; |
195 | 0 | } else |
196 | 0 | d = j; |
197 | 0 |
|
198 | 0 | } else if (!j->matters_to_anchor) |
199 | 0 | d = j; |
200 | 0 | else if (!k->matters_to_anchor) |
201 | 0 | d = k; |
202 | 0 | else |
203 | 0 | return -ENOEXEC; |
204 | 0 | |
205 | 0 | /* Ok, we can drop one, so let's do so. */ |
206 | 0 | log_unit_debug(d->unit, |
207 | 0 | "Fixing conflicting jobs %s/%s,%s/%s by deleting job %s/%s", |
208 | 0 | j->unit->id, job_type_to_string(j->type), |
209 | 0 | k->unit->id, job_type_to_string(k->type), |
210 | 0 | d->unit->id, job_type_to_string(d->type)); |
211 | 0 | transaction_delete_job(tr, d, true); |
212 | 0 | return 0; |
213 | 0 | } |
214 | 0 |
|
215 | 0 | return -EINVAL; |
216 | 0 | } |
217 | | |
218 | 0 | static int transaction_merge_jobs(Transaction *tr, sd_bus_error *e) { |
219 | 0 | Job *j; |
220 | 0 | Iterator i; |
221 | 0 | int r; |
222 | 0 |
|
223 | 0 | assert(tr); |
224 | 0 |
|
225 | 0 | /* First step, check whether any of the jobs for one specific |
226 | 0 | * task conflict. If so, try to drop one of them. */ |
227 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
228 | 0 | JobType t; |
229 | 0 | Job *k; |
230 | 0 |
|
231 | 0 | t = j->type; |
232 | 0 | LIST_FOREACH(transaction, k, j->transaction_next) { |
233 | 0 | if (job_type_merge_and_collapse(&t, k->type, j->unit) >= 0) |
234 | 0 | continue; |
235 | 0 | |
236 | 0 | /* OK, we could not merge all jobs for this |
237 | 0 | * action. Let's see if we can get rid of one |
238 | 0 | * of them */ |
239 | 0 | |
240 | 0 | r = delete_one_unmergeable_job(tr, j); |
241 | 0 | if (r >= 0) |
242 | 0 | /* Ok, we managed to drop one, now |
243 | 0 | * let's ask our callers to call us |
244 | 0 | * again after garbage collecting */ |
245 | 0 | return -EAGAIN; |
246 | 0 | |
247 | 0 | /* We couldn't merge anything. Failure */ |
248 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, |
249 | 0 | "Transaction contains conflicting jobs '%s' and '%s' for %s. " |
250 | 0 | "Probably contradicting requirement dependencies configured.", |
251 | 0 | job_type_to_string(t), |
252 | 0 | job_type_to_string(k->type), |
253 | 0 | k->unit->id); |
254 | 0 | } |
255 | 0 | } |
256 | 0 |
|
257 | 0 | /* Second step, merge the jobs. */ |
258 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
259 | 0 | JobType t = j->type; |
260 | 0 | Job *k; |
261 | 0 |
|
262 | 0 | /* Merge all transaction jobs for j->unit */ |
263 | 0 | LIST_FOREACH(transaction, k, j->transaction_next) |
264 | 0 | assert_se(job_type_merge_and_collapse(&t, k->type, j->unit) == 0); |
265 | 0 |
|
266 | 0 | while ((k = j->transaction_next)) { |
267 | 0 | if (tr->anchor_job == k) { |
268 | 0 | transaction_merge_and_delete_job(tr, k, j, t); |
269 | 0 | j = k; |
270 | 0 | } else |
271 | 0 | transaction_merge_and_delete_job(tr, j, k, t); |
272 | 0 | } |
273 | 0 |
|
274 | 0 | assert(!j->transaction_next); |
275 | 0 | assert(!j->transaction_prev); |
276 | 0 | } |
277 | 0 |
|
278 | 0 | return 0; |
279 | 0 | } |
280 | | |
281 | 0 | static void transaction_drop_redundant(Transaction *tr) { |
282 | 0 | bool again; |
283 | 0 |
|
284 | 0 | /* Goes through the transaction and removes all jobs of the units whose jobs are all noops. If not |
285 | 0 | * all of a unit's jobs are redundant, they are kept. */ |
286 | 0 |
|
287 | 0 | assert(tr); |
288 | 0 |
|
289 | 0 | do { |
290 | 0 | Iterator i; |
291 | 0 | Job *j; |
292 | 0 |
|
293 | 0 | again = false; |
294 | 0 |
|
295 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
296 | 0 | bool keep = false; |
297 | 0 | Job *k; |
298 | 0 |
|
299 | 0 | LIST_FOREACH(transaction, k, j) |
300 | 0 | if (tr->anchor_job == k || |
301 | 0 | !job_type_is_redundant(k->type, unit_active_state(k->unit)) || |
302 | 0 | (k->unit->job && job_type_is_conflicting(k->type, k->unit->job->type))) { |
303 | 0 | keep = true; |
304 | 0 | break; |
305 | 0 | } |
306 | 0 |
|
307 | 0 | if (!keep) { |
308 | 0 | log_trace("Found redundant job %s/%s, dropping from transaction.", |
309 | 0 | j->unit->id, job_type_to_string(j->type)); |
310 | 0 | transaction_delete_job(tr, j, false); |
311 | 0 | again = true; |
312 | 0 | break; |
313 | 0 | } |
314 | 0 | } |
315 | 0 | } while (again); |
316 | 0 | } |
317 | | |
318 | 0 | _pure_ static bool unit_matters_to_anchor(Unit *u, Job *j) { |
319 | 0 | assert(u); |
320 | 0 | assert(!j->transaction_prev); |
321 | 0 |
|
322 | 0 | /* Checks whether at least one of the jobs for this unit |
323 | 0 | * matters to the anchor. */ |
324 | 0 |
|
325 | 0 | LIST_FOREACH(transaction, j, j) |
326 | 0 | if (j->matters_to_anchor) |
327 | 0 | return true; |
328 | 0 |
|
329 | 0 | return false; |
330 | 0 | } |
331 | | |
332 | 0 | static char* merge_unit_ids(const char* unit_log_field, char **pairs) { |
333 | 0 | char **unit_id, **job_type, *ans = NULL; |
334 | 0 | size_t alloc = 0, size = 0, next; |
335 | 0 |
|
336 | 0 | STRV_FOREACH_PAIR(unit_id, job_type, pairs) { |
337 | 0 | next = strlen(unit_log_field) + strlen(*unit_id); |
338 | 0 | if (!GREEDY_REALLOC(ans, alloc, size + next + 1)) { |
339 | 0 | return mfree(ans); |
340 | 0 | } |
341 | 0 | |
342 | 0 | sprintf(ans + size, "%s%s", unit_log_field, *unit_id); |
343 | 0 | if (*(unit_id+1)) |
344 | 0 | ans[size + next] = '\n'; |
345 | 0 | size += next + 1; |
346 | 0 | } |
347 | 0 |
|
348 | 0 | return ans; |
349 | 0 | } |
350 | | |
351 | 0 | static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, sd_bus_error *e) { |
352 | 0 | Iterator i; |
353 | 0 | Unit *u; |
354 | 0 | void *v; |
355 | 0 | int r; |
356 | 0 |
|
357 | 0 | assert(tr); |
358 | 0 | assert(j); |
359 | 0 | assert(!j->transaction_prev); |
360 | 0 |
|
361 | 0 | /* Does a recursive sweep through the ordering graph, looking |
362 | 0 | * for a cycle. If we find a cycle we try to break it. */ |
363 | 0 |
|
364 | 0 | /* Have we seen this before? */ |
365 | 0 | if (j->generation == generation) { |
366 | 0 | Job *k, *delete = NULL; |
367 | 0 | _cleanup_free_ char **array = NULL, *unit_ids = NULL; |
368 | 0 | char **unit_id, **job_type; |
369 | 0 |
|
370 | 0 | /* If the marker is NULL we have been here already and |
371 | 0 | * decided the job was loop-free from here. Hence |
372 | 0 | * shortcut things and return right-away. */ |
373 | 0 | if (!j->marker) |
374 | 0 | return 0; |
375 | 0 | |
376 | 0 | /* So, the marker is not NULL and we already have been here. We have |
377 | 0 | * a cycle. Let's try to break it. We go backwards in our path and |
378 | 0 | * try to find a suitable job to remove. We use the marker to find |
379 | 0 | * our way back, since smart how we are we stored our way back in |
380 | 0 | * there. */ |
381 | 0 | |
382 | 0 | for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) { |
383 | 0 |
|
384 | 0 | /* For logging below */ |
385 | 0 | if (strv_push_pair(&array, k->unit->id, (char*) job_type_to_string(k->type)) < 0) |
386 | 0 | log_oom(); |
387 | 0 |
|
388 | 0 | if (!delete && hashmap_get(tr->jobs, k->unit) && !unit_matters_to_anchor(k->unit, k)) |
389 | 0 | /* Ok, we can drop this one, so let's do so. */ |
390 | 0 | delete = k; |
391 | 0 |
|
392 | 0 | /* Check if this in fact was the beginning of the cycle */ |
393 | 0 | if (k == j) |
394 | 0 | break; |
395 | 0 | } |
396 | 0 |
|
397 | 0 | unit_ids = merge_unit_ids(j->manager->unit_log_field, array); /* ignore error */ |
398 | 0 |
|
399 | 0 | STRV_FOREACH_PAIR(unit_id, job_type, array) |
400 | 0 | /* logging for j not k here to provide a consistent narrative */ |
401 | 0 | log_struct(LOG_WARNING, |
402 | 0 | "MESSAGE=%s: Found %s on %s/%s", |
403 | 0 | j->unit->id, |
404 | 0 | unit_id == array ? "ordering cycle" : "dependency", |
405 | 0 | *unit_id, *job_type, |
406 | 0 | unit_ids); |
407 | 0 |
|
408 | 0 | if (delete) { |
409 | 0 | const char *status; |
410 | 0 | /* logging for j not k here to provide a consistent narrative */ |
411 | 0 | log_struct(LOG_ERR, |
412 | 0 | "MESSAGE=%s: Job %s/%s deleted to break ordering cycle starting with %s/%s", |
413 | 0 | j->unit->id, delete->unit->id, job_type_to_string(delete->type), |
414 | 0 | j->unit->id, job_type_to_string(j->type), |
415 | 0 | unit_ids); |
416 | 0 |
|
417 | 0 | if (log_get_show_color()) |
418 | 0 | status = ANSI_HIGHLIGHT_RED " SKIP " ANSI_NORMAL; |
419 | 0 | else |
420 | 0 | status = " SKIP "; |
421 | 0 |
|
422 | 0 | unit_status_printf(delete->unit, status, |
423 | 0 | "Ordering cycle found, skipping %s"); |
424 | 0 | transaction_delete_unit(tr, delete->unit); |
425 | 0 | return -EAGAIN; |
426 | 0 | } |
427 | 0 |
|
428 | 0 | log_struct(LOG_ERR, |
429 | 0 | "MESSAGE=%s: Unable to break cycle starting with %s/%s", |
430 | 0 | j->unit->id, j->unit->id, job_type_to_string(j->type), |
431 | 0 | unit_ids); |
432 | 0 |
|
433 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, |
434 | 0 | "Transaction order is cyclic. See system logs for details."); |
435 | 0 | } |
436 | 0 |
|
437 | 0 | /* Make the marker point to where we come from, so that we can |
438 | 0 | * find our way backwards if we want to break a cycle. We use |
439 | 0 | * a special marker for the beginning: we point to |
440 | 0 | * ourselves. */ |
441 | 0 | j->marker = from ? from : j; |
442 | 0 | j->generation = generation; |
443 | 0 |
|
444 | 0 | /* We assume that the dependencies are bidirectional, and |
445 | 0 | * hence can ignore UNIT_AFTER */ |
446 | 0 | HASHMAP_FOREACH_KEY(v, u, j->unit->dependencies[UNIT_BEFORE], i) { |
447 | 0 | Job *o; |
448 | 0 |
|
449 | 0 | /* Is there a job for this unit? */ |
450 | 0 | o = hashmap_get(tr->jobs, u); |
451 | 0 | if (!o) { |
452 | 0 | /* Ok, there is no job for this in the |
453 | 0 | * transaction, but maybe there is already one |
454 | 0 | * running? */ |
455 | 0 | o = u->job; |
456 | 0 | if (!o) |
457 | 0 | continue; |
458 | 0 | } |
459 | 0 | |
460 | 0 | r = transaction_verify_order_one(tr, o, j, generation, e); |
461 | 0 | if (r < 0) |
462 | 0 | return r; |
463 | 0 | } |
464 | 0 |
|
465 | 0 | /* Ok, let's backtrack, and remember that this entry is not on |
466 | 0 | * our path anymore. */ |
467 | 0 | j->marker = NULL; |
468 | 0 |
|
469 | 0 | return 0; |
470 | 0 | } |
471 | | |
472 | 0 | static int transaction_verify_order(Transaction *tr, unsigned *generation, sd_bus_error *e) { |
473 | 0 | Job *j; |
474 | 0 | int r; |
475 | 0 | Iterator i; |
476 | 0 | unsigned g; |
477 | 0 |
|
478 | 0 | assert(tr); |
479 | 0 | assert(generation); |
480 | 0 |
|
481 | 0 | /* Check if the ordering graph is cyclic. If it is, try to fix |
482 | 0 | * that up by dropping one of the jobs. */ |
483 | 0 |
|
484 | 0 | g = (*generation)++; |
485 | 0 |
|
486 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
487 | 0 | r = transaction_verify_order_one(tr, j, NULL, g, e); |
488 | 0 | if (r < 0) |
489 | 0 | return r; |
490 | 0 | } |
491 | 0 |
|
492 | 0 | return 0; |
493 | 0 | } |
494 | | |
495 | 0 | static void transaction_collect_garbage(Transaction *tr) { |
496 | 0 | bool again; |
497 | 0 |
|
498 | 0 | assert(tr); |
499 | 0 |
|
500 | 0 | /* Drop jobs that are not required by any other job */ |
501 | 0 |
|
502 | 0 | do { |
503 | 0 | Iterator i; |
504 | 0 | Job *j; |
505 | 0 |
|
506 | 0 | again = false; |
507 | 0 |
|
508 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
509 | 0 | if (tr->anchor_job == j) |
510 | 0 | continue; |
511 | 0 | |
512 | 0 | if (!j->object_list) { |
513 | 0 | log_trace("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); |
514 | 0 | transaction_delete_job(tr, j, true); |
515 | 0 | again = true; |
516 | 0 | break; |
517 | 0 | } |
518 | 0 |
|
519 | 0 | log_trace("Keeping job %s/%s because of %s/%s", |
520 | 0 | j->unit->id, job_type_to_string(j->type), |
521 | 0 | j->object_list->subject ? j->object_list->subject->unit->id : "root", |
522 | 0 | j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); |
523 | 0 | } |
524 | 0 |
|
525 | 0 | } while (again); |
526 | 0 | } |
527 | | |
528 | 0 | static int transaction_is_destructive(Transaction *tr, JobMode mode, sd_bus_error *e) { |
529 | 0 | Iterator i; |
530 | 0 | Job *j; |
531 | 0 |
|
532 | 0 | assert(tr); |
533 | 0 |
|
534 | 0 | /* Checks whether applying this transaction means that |
535 | 0 | * existing jobs would be replaced */ |
536 | 0 |
|
537 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
538 | 0 |
|
539 | 0 | /* Assume merged */ |
540 | 0 | assert(!j->transaction_prev); |
541 | 0 | assert(!j->transaction_next); |
542 | 0 |
|
543 | 0 | if (j->unit->job && (mode == JOB_FAIL || j->unit->job->irreversible) && |
544 | 0 | job_type_is_conflicting(j->unit->job->type, j->type)) |
545 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, |
546 | 0 | "Transaction for %s/%s is destructive (%s has '%s' job queued, but '%s' is included in transaction).", |
547 | 0 | tr->anchor_job->unit->id, job_type_to_string(tr->anchor_job->type), |
548 | 0 | j->unit->id, job_type_to_string(j->unit->job->type), job_type_to_string(j->type)); |
549 | 0 | } |
550 | 0 |
|
551 | 0 | return 0; |
552 | 0 | } |
553 | | |
554 | 0 | static void transaction_minimize_impact(Transaction *tr) { |
555 | 0 | Job *j; |
556 | 0 | Iterator i; |
557 | 0 |
|
558 | 0 | assert(tr); |
559 | 0 |
|
560 | 0 | /* Drops all unnecessary jobs that reverse already active jobs |
561 | 0 | * or that stop a running service. */ |
562 | 0 |
|
563 | 0 | rescan: |
564 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
565 | 0 | LIST_FOREACH(transaction, j, j) { |
566 | 0 | bool stops_running_service, changes_existing_job; |
567 | 0 |
|
568 | 0 | /* If it matters, we shouldn't drop it */ |
569 | 0 | if (j->matters_to_anchor) |
570 | 0 | continue; |
571 | 0 | |
572 | 0 | /* Would this stop a running service? |
573 | 0 | * Would this change an existing job? |
574 | 0 | * If so, let's drop this entry */ |
575 | 0 | |
576 | 0 | stops_running_service = |
577 | 0 | j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit)); |
578 | 0 |
|
579 | 0 | changes_existing_job = |
580 | 0 | j->unit->job && |
581 | 0 | job_type_is_conflicting(j->type, j->unit->job->type); |
582 | 0 |
|
583 | 0 | if (!stops_running_service && !changes_existing_job) |
584 | 0 | continue; |
585 | 0 | |
586 | 0 | if (stops_running_service) |
587 | 0 | log_unit_debug(j->unit, |
588 | 0 | "%s/%s would stop a running service.", |
589 | 0 | j->unit->id, job_type_to_string(j->type)); |
590 | 0 |
|
591 | 0 | if (changes_existing_job) |
592 | 0 | log_unit_debug(j->unit, |
593 | 0 | "%s/%s would change existing job.", |
594 | 0 | j->unit->id, job_type_to_string(j->type)); |
595 | 0 |
|
596 | 0 | /* Ok, let's get rid of this */ |
597 | 0 | log_unit_debug(j->unit, |
598 | 0 | "Deleting %s/%s to minimize impact.", |
599 | 0 | j->unit->id, job_type_to_string(j->type)); |
600 | 0 |
|
601 | 0 | transaction_delete_job(tr, j, true); |
602 | 0 | goto rescan; |
603 | 0 | } |
604 | 0 | } |
605 | 0 | } |
606 | | |
607 | | static int transaction_apply( |
608 | | Transaction *tr, |
609 | | Manager *m, |
610 | | JobMode mode, |
611 | 0 | Set *affected_jobs) { |
612 | 0 |
|
613 | 0 | Iterator i; |
614 | 0 | Job *j; |
615 | 0 | int r; |
616 | 0 |
|
617 | 0 | /* Moves the transaction jobs to the set of active jobs */ |
618 | 0 |
|
619 | 0 | if (IN_SET(mode, JOB_ISOLATE, JOB_FLUSH)) { |
620 | 0 |
|
621 | 0 | /* When isolating first kill all installed jobs which |
622 | 0 | * aren't part of the new transaction */ |
623 | 0 | HASHMAP_FOREACH(j, m->jobs, i) { |
624 | 0 | assert(j->installed); |
625 | 0 |
|
626 | 0 | if (j->unit->ignore_on_isolate) |
627 | 0 | continue; |
628 | 0 | |
629 | 0 | if (hashmap_get(tr->jobs, j->unit)) |
630 | 0 | continue; |
631 | 0 | |
632 | 0 | /* Not invalidating recursively. Avoids triggering |
633 | 0 | * OnFailure= actions of dependent jobs. Also avoids |
634 | 0 | * invalidating our iterator. */ |
635 | 0 | job_finish_and_invalidate(j, JOB_CANCELED, false, false); |
636 | 0 | } |
637 | 0 | } |
638 | 0 |
|
639 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) { |
640 | 0 | /* Assume merged */ |
641 | 0 | assert(!j->transaction_prev); |
642 | 0 | assert(!j->transaction_next); |
643 | 0 |
|
644 | 0 | r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j); |
645 | 0 | if (r < 0) |
646 | 0 | goto rollback; |
647 | 0 | } |
648 | 0 |
|
649 | 0 | while ((j = hashmap_steal_first(tr->jobs))) { |
650 | 0 | Job *installed_job; |
651 | 0 |
|
652 | 0 | /* Clean the job dependencies */ |
653 | 0 | transaction_unlink_job(tr, j, false); |
654 | 0 |
|
655 | 0 | installed_job = job_install(j); |
656 | 0 | if (installed_job != j) { |
657 | 0 | /* j has been merged into a previously installed job */ |
658 | 0 | if (tr->anchor_job == j) |
659 | 0 | tr->anchor_job = installed_job; |
660 | 0 | hashmap_remove(m->jobs, UINT32_TO_PTR(j->id)); |
661 | 0 | job_free(j); |
662 | 0 | j = installed_job; |
663 | 0 | } |
664 | 0 |
|
665 | 0 | job_add_to_run_queue(j); |
666 | 0 | job_add_to_dbus_queue(j); |
667 | 0 | job_start_timer(j, false); |
668 | 0 | job_shutdown_magic(j); |
669 | 0 |
|
670 | 0 | /* When 'affected' is specified, let's track all in it all jobs that were touched because of |
671 | 0 | * this transaction. */ |
672 | 0 | if (affected_jobs) |
673 | 0 | (void) set_put(affected_jobs, j); |
674 | 0 | } |
675 | 0 |
|
676 | 0 | return 0; |
677 | 0 |
|
678 | 0 | rollback: |
679 | 0 |
|
680 | 0 | HASHMAP_FOREACH(j, tr->jobs, i) |
681 | 0 | hashmap_remove(m->jobs, UINT32_TO_PTR(j->id)); |
682 | 0 |
|
683 | 0 | return r; |
684 | 0 | } |
685 | | |
686 | | int transaction_activate( |
687 | | Transaction *tr, |
688 | | Manager *m, |
689 | | JobMode mode, |
690 | | Set *affected_jobs, |
691 | 0 | sd_bus_error *e) { |
692 | 0 |
|
693 | 0 | Iterator i; |
694 | 0 | Job *j; |
695 | 0 | int r; |
696 | 0 | unsigned generation = 1; |
697 | 0 |
|
698 | 0 | assert(tr); |
699 | 0 |
|
700 | 0 | /* This applies the changes recorded in tr->jobs to |
701 | 0 | * the actual list of jobs, if possible. */ |
702 | 0 |
|
703 | 0 | /* Reset the generation counter of all installed jobs. The detection of cycles |
704 | 0 | * looks at installed jobs. If they had a non-zero generation from some previous |
705 | 0 | * walk of the graph, the algorithm would break. */ |
706 | 0 | HASHMAP_FOREACH(j, m->jobs, i) |
707 | 0 | j->generation = 0; |
708 | 0 |
|
709 | 0 | /* First step: figure out which jobs matter */ |
710 | 0 | transaction_find_jobs_that_matter_to_anchor(tr->anchor_job, generation++); |
711 | 0 |
|
712 | 0 | /* Second step: Try not to stop any running services if |
713 | 0 | * we don't have to. Don't try to reverse running |
714 | 0 | * jobs if we don't have to. */ |
715 | 0 | if (mode == JOB_FAIL) |
716 | 0 | transaction_minimize_impact(tr); |
717 | 0 |
|
718 | 0 | /* Third step: Drop redundant jobs */ |
719 | 0 | transaction_drop_redundant(tr); |
720 | 0 |
|
721 | 0 | for (;;) { |
722 | 0 | /* Fourth step: Let's remove unneeded jobs that might |
723 | 0 | * be lurking. */ |
724 | 0 | if (mode != JOB_ISOLATE) |
725 | 0 | transaction_collect_garbage(tr); |
726 | 0 |
|
727 | 0 | /* Fifth step: verify order makes sense and correct |
728 | 0 | * cycles if necessary and possible */ |
729 | 0 | r = transaction_verify_order(tr, &generation, e); |
730 | 0 | if (r >= 0) |
731 | 0 | break; |
732 | 0 | |
733 | 0 | if (r != -EAGAIN) |
734 | 0 | return log_warning_errno(r, "Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error_message(e, r)); |
735 | 0 |
|
736 | 0 | /* Let's see if the resulting transaction ordering |
737 | 0 | * graph is still cyclic... */ |
738 | 0 | } |
739 | 0 |
|
740 | 0 | for (;;) { |
741 | 0 | /* Sixth step: let's drop unmergeable entries if |
742 | 0 | * necessary and possible, merge entries we can |
743 | 0 | * merge */ |
744 | 0 | r = transaction_merge_jobs(tr, e); |
745 | 0 | if (r >= 0) |
746 | 0 | break; |
747 | 0 | |
748 | 0 | if (r != -EAGAIN) |
749 | 0 | return log_warning_errno(r, "Requested transaction contains unmergeable jobs: %s", bus_error_message(e, r)); |
750 | 0 | |
751 | 0 | /* Seventh step: an entry got dropped, let's garbage |
752 | 0 | * collect its dependencies. */ |
753 | 0 | if (mode != JOB_ISOLATE) |
754 | 0 | transaction_collect_garbage(tr); |
755 | 0 |
|
756 | 0 | /* Let's see if the resulting transaction still has |
757 | 0 | * unmergeable entries ... */ |
758 | 0 | } |
759 | 0 |
|
760 | 0 | /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */ |
761 | 0 | transaction_drop_redundant(tr); |
762 | 0 |
|
763 | 0 | /* Ninth step: check whether we can actually apply this */ |
764 | 0 | r = transaction_is_destructive(tr, mode, e); |
765 | 0 | if (r < 0) |
766 | 0 | return log_notice_errno(r, "Requested transaction contradicts existing jobs: %s", bus_error_message(e, r)); |
767 | 0 | |
768 | 0 | /* Tenth step: apply changes */ |
769 | 0 | r = transaction_apply(tr, m, mode, affected_jobs); |
770 | 0 | if (r < 0) |
771 | 0 | return log_warning_errno(r, "Failed to apply transaction: %m"); |
772 | 0 | |
773 | 0 | assert(hashmap_isempty(tr->jobs)); |
774 | 0 |
|
775 | 0 | if (!hashmap_isempty(m->jobs)) { |
776 | 0 | /* Are there any jobs now? Then make sure we have the |
777 | 0 | * idle pipe around. We don't really care too much |
778 | 0 | * whether this works or not, as the idle pipe is a |
779 | 0 | * feature for cosmetics, not actually useful for |
780 | 0 | * anything beyond that. */ |
781 | 0 |
|
782 | 0 | if (m->idle_pipe[0] < 0 && m->idle_pipe[1] < 0 && |
783 | 0 | m->idle_pipe[2] < 0 && m->idle_pipe[3] < 0) { |
784 | 0 | (void) pipe2(m->idle_pipe, O_NONBLOCK|O_CLOEXEC); |
785 | 0 | (void) pipe2(m->idle_pipe + 2, O_NONBLOCK|O_CLOEXEC); |
786 | 0 | } |
787 | 0 | } |
788 | 0 |
|
789 | 0 | return 0; |
790 | 0 | } |
791 | | |
792 | 0 | static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool *is_new) { |
793 | 0 | Job *j, *f; |
794 | 0 |
|
795 | 0 | assert(tr); |
796 | 0 | assert(unit); |
797 | 0 |
|
798 | 0 | /* Looks for an existing prospective job and returns that. If |
799 | 0 | * it doesn't exist it is created and added to the prospective |
800 | 0 | * jobs list. */ |
801 | 0 |
|
802 | 0 | f = hashmap_get(tr->jobs, unit); |
803 | 0 |
|
804 | 0 | LIST_FOREACH(transaction, j, f) { |
805 | 0 | assert(j->unit == unit); |
806 | 0 |
|
807 | 0 | if (j->type == type) { |
808 | 0 | if (is_new) |
809 | 0 | *is_new = false; |
810 | 0 | return j; |
811 | 0 | } |
812 | 0 | } |
813 | 0 |
|
814 | 0 | j = job_new(unit, type); |
815 | 0 | if (!j) |
816 | 0 | return NULL; |
817 | 0 | |
818 | 0 | j->generation = 0; |
819 | 0 | j->marker = NULL; |
820 | 0 | j->matters_to_anchor = false; |
821 | 0 | j->irreversible = tr->irreversible; |
822 | 0 |
|
823 | 0 | LIST_PREPEND(transaction, f, j); |
824 | 0 |
|
825 | 0 | if (hashmap_replace(tr->jobs, unit, f) < 0) { |
826 | 0 | LIST_REMOVE(transaction, f, j); |
827 | 0 | job_free(j); |
828 | 0 | return NULL; |
829 | 0 | } |
830 | 0 | |
831 | 0 | if (is_new) |
832 | 0 | *is_new = true; |
833 | 0 |
|
834 | 0 | log_trace("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); |
835 | 0 |
|
836 | 0 | return j; |
837 | 0 | } |
838 | | |
839 | 0 | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) { |
840 | 0 | assert(tr); |
841 | 0 | assert(j); |
842 | 0 |
|
843 | 0 | if (j->transaction_prev) |
844 | 0 | j->transaction_prev->transaction_next = j->transaction_next; |
845 | 0 | else if (j->transaction_next) |
846 | 0 | hashmap_replace(tr->jobs, j->unit, j->transaction_next); |
847 | 0 | else |
848 | 0 | hashmap_remove_value(tr->jobs, j->unit, j); |
849 | 0 |
|
850 | 0 | if (j->transaction_next) |
851 | 0 | j->transaction_next->transaction_prev = j->transaction_prev; |
852 | 0 |
|
853 | 0 | j->transaction_prev = j->transaction_next = NULL; |
854 | 0 |
|
855 | 0 | while (j->subject_list) |
856 | 0 | job_dependency_free(j->subject_list); |
857 | 0 |
|
858 | 0 | while (j->object_list) { |
859 | 0 | Job *other = j->object_list->matters ? j->object_list->subject : NULL; |
860 | 0 |
|
861 | 0 | job_dependency_free(j->object_list); |
862 | 0 |
|
863 | 0 | if (other && delete_dependencies) { |
864 | 0 | log_unit_debug(other->unit, |
865 | 0 | "Deleting job %s/%s as dependency of job %s/%s", |
866 | 0 | other->unit->id, job_type_to_string(other->type), |
867 | 0 | j->unit->id, job_type_to_string(j->type)); |
868 | 0 | transaction_delete_job(tr, other, delete_dependencies); |
869 | 0 | } |
870 | 0 | } |
871 | 0 | } |
872 | | |
873 | 0 | void transaction_add_propagate_reload_jobs(Transaction *tr, Unit *unit, Job *by, bool ignore_order, sd_bus_error *e) { |
874 | 0 | Iterator i; |
875 | 0 | JobType nt; |
876 | 0 | Unit *dep; |
877 | 0 | void *v; |
878 | 0 | int r; |
879 | 0 |
|
880 | 0 | assert(tr); |
881 | 0 | assert(unit); |
882 | 0 |
|
883 | 0 | HASHMAP_FOREACH_KEY(v, dep, unit->dependencies[UNIT_PROPAGATES_RELOAD_TO], i) { |
884 | 0 | nt = job_type_collapse(JOB_TRY_RELOAD, dep); |
885 | 0 | if (nt == JOB_NOP) |
886 | 0 | continue; |
887 | 0 | |
888 | 0 | r = transaction_add_job_and_dependencies(tr, nt, dep, by, false, false, false, ignore_order, e); |
889 | 0 | if (r < 0) { |
890 | 0 | log_unit_warning(dep, |
891 | 0 | "Cannot add dependency reload job, ignoring: %s", |
892 | 0 | bus_error_message(e, r)); |
893 | 0 | sd_bus_error_free(e); |
894 | 0 | } |
895 | 0 | } |
896 | 0 | } |
897 | | |
898 | | int transaction_add_job_and_dependencies( |
899 | | Transaction *tr, |
900 | | JobType type, |
901 | | Unit *unit, |
902 | | Job *by, |
903 | | bool matters, |
904 | | bool conflicts, |
905 | | bool ignore_requirements, |
906 | | bool ignore_order, |
907 | 0 | sd_bus_error *e) { |
908 | 0 |
|
909 | 0 | bool is_new; |
910 | 0 | Iterator i; |
911 | 0 | Unit *dep; |
912 | 0 | Job *ret; |
913 | 0 | void *v; |
914 | 0 | int r; |
915 | 0 |
|
916 | 0 | assert(tr); |
917 | 0 | assert(type < _JOB_TYPE_MAX); |
918 | 0 | assert(type < _JOB_TYPE_MAX_IN_TRANSACTION); |
919 | 0 | assert(unit); |
920 | 0 |
|
921 | 0 | /* Before adding jobs for this unit, let's ensure that its state has been loaded |
922 | 0 | * This matters when jobs are spawned as part of coldplugging itself (see e. g. path_coldplug()). |
923 | 0 | * This way, we "recursively" coldplug units, ensuring that we do not look at state of |
924 | 0 | * not-yet-coldplugged units. */ |
925 | 0 | if (MANAGER_IS_RELOADING(unit->manager)) |
926 | 0 | unit_coldplug(unit); |
927 | 0 |
|
928 | 0 | if (by) |
929 | 0 | log_trace("Pulling in %s/%s from %s/%s", unit->id, job_type_to_string(type), by->unit->id, job_type_to_string(by->type)); |
930 | 0 |
|
931 | 0 | /* Safety check that the unit is a valid state, i.e. not in UNIT_STUB or UNIT_MERGED which should only be set |
932 | 0 | * temporarily. */ |
933 | 0 | if (!IN_SET(unit->load_state, UNIT_LOADED, UNIT_ERROR, UNIT_NOT_FOUND, UNIT_BAD_SETTING, UNIT_MASKED)) |
934 | 0 | return sd_bus_error_setf(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id); |
935 | 0 |
|
936 | 0 | if (type != JOB_STOP) { |
937 | 0 | r = bus_unit_validate_load_state(unit, e); |
938 | 0 | if (r < 0) |
939 | 0 | return r; |
940 | 0 | } |
941 | 0 | |
942 | 0 | if (!unit_job_is_applicable(unit, type)) |
943 | 0 | return sd_bus_error_setf(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, |
944 | 0 | "Job type %s is not applicable for unit %s.", |
945 | 0 | job_type_to_string(type), unit->id); |
946 | 0 |
|
947 | 0 | /* First add the job. */ |
948 | 0 | ret = transaction_add_one_job(tr, type, unit, &is_new); |
949 | 0 | if (!ret) |
950 | 0 | return -ENOMEM; |
951 | 0 | |
952 | 0 | ret->ignore_order = ret->ignore_order || ignore_order; |
953 | 0 |
|
954 | 0 | /* Then, add a link to the job. */ |
955 | 0 | if (by) { |
956 | 0 | if (!job_dependency_new(by, ret, matters, conflicts)) |
957 | 0 | return -ENOMEM; |
958 | 0 | } else { |
959 | 0 | /* If the job has no parent job, it is the anchor job. */ |
960 | 0 | assert(!tr->anchor_job); |
961 | 0 | tr->anchor_job = ret; |
962 | 0 | } |
963 | 0 |
|
964 | 0 | if (is_new && !ignore_requirements && type != JOB_NOP) { |
965 | 0 | Set *following; |
966 | 0 |
|
967 | 0 | /* If we are following some other unit, make sure we |
968 | 0 | * add all dependencies of everybody following. */ |
969 | 0 | if (unit_following_set(ret->unit, &following) > 0) { |
970 | 0 | SET_FOREACH(dep, following, i) { |
971 | 0 | r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, false, false, ignore_order, e); |
972 | 0 | if (r < 0) { |
973 | 0 | log_unit_full(dep, |
974 | 0 | r == -ERFKILL ? LOG_INFO : LOG_WARNING, |
975 | 0 | r, "Cannot add dependency job, ignoring: %s", |
976 | 0 | bus_error_message(e, r)); |
977 | 0 | sd_bus_error_free(e); |
978 | 0 | } |
979 | 0 | } |
980 | 0 |
|
981 | 0 | set_free(following); |
982 | 0 | } |
983 | 0 |
|
984 | 0 | /* Finally, recursively add in all dependencies. */ |
985 | 0 | if (IN_SET(type, JOB_START, JOB_RESTART)) { |
986 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[UNIT_REQUIRES], i) { |
987 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, false, false, ignore_order, e); |
988 | 0 | if (r < 0) { |
989 | 0 | if (r != -EBADR) /* job type not applicable */ |
990 | 0 | goto fail; |
991 | 0 | |
992 | 0 | sd_bus_error_free(e); |
993 | 0 | } |
994 | 0 | } |
995 | 0 |
|
996 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[UNIT_BINDS_TO], i) { |
997 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, false, false, ignore_order, e); |
998 | 0 | if (r < 0) { |
999 | 0 | if (r != -EBADR) /* job type not applicable */ |
1000 | 0 | goto fail; |
1001 | 0 | |
1002 | 0 | sd_bus_error_free(e); |
1003 | 0 | } |
1004 | 0 | } |
1005 | 0 |
|
1006 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[UNIT_WANTS], i) { |
1007 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, ignore_order, e); |
1008 | 0 | if (r < 0) { |
1009 | 0 | /* unit masked, job type not applicable and unit not found are not considered as errors. */ |
1010 | 0 | log_unit_full(dep, |
1011 | 0 | IN_SET(r, -ERFKILL, -EBADR, -ENOENT) ? LOG_DEBUG : LOG_WARNING, |
1012 | 0 | r, "Cannot add dependency job, ignoring: %s", |
1013 | 0 | bus_error_message(e, r)); |
1014 | 0 | sd_bus_error_free(e); |
1015 | 0 | } |
1016 | 0 | } |
1017 | 0 |
|
1018 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[UNIT_REQUISITE], i) { |
1019 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, false, false, ignore_order, e); |
1020 | 0 | if (r < 0) { |
1021 | 0 | if (r != -EBADR) /* job type not applicable */ |
1022 | 0 | goto fail; |
1023 | 0 | |
1024 | 0 | sd_bus_error_free(e); |
1025 | 0 | } |
1026 | 0 | } |
1027 | 0 |
|
1028 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[UNIT_CONFLICTS], i) { |
1029 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, true, false, ignore_order, e); |
1030 | 0 | if (r < 0) { |
1031 | 0 | if (r != -EBADR) /* job type not applicable */ |
1032 | 0 | goto fail; |
1033 | 0 | |
1034 | 0 | sd_bus_error_free(e); |
1035 | 0 | } |
1036 | 0 | } |
1037 | 0 |
|
1038 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) { |
1039 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, false, false, ignore_order, e); |
1040 | 0 | if (r < 0) { |
1041 | 0 | log_unit_warning(dep, |
1042 | 0 | "Cannot add dependency job, ignoring: %s", |
1043 | 0 | bus_error_message(e, r)); |
1044 | 0 | sd_bus_error_free(e); |
1045 | 0 | } |
1046 | 0 | } |
1047 | 0 |
|
1048 | 0 | } |
1049 | 0 |
|
1050 | 0 | if (IN_SET(type, JOB_STOP, JOB_RESTART)) { |
1051 | 0 | static const UnitDependency propagate_deps[] = { |
1052 | 0 | UNIT_REQUIRED_BY, |
1053 | 0 | UNIT_REQUISITE_OF, |
1054 | 0 | UNIT_BOUND_BY, |
1055 | 0 | UNIT_CONSISTS_OF, |
1056 | 0 | }; |
1057 | 0 |
|
1058 | 0 | JobType ptype; |
1059 | 0 | unsigned j; |
1060 | 0 |
|
1061 | 0 | /* We propagate STOP as STOP, but RESTART only |
1062 | 0 | * as TRY_RESTART, in order not to start |
1063 | 0 | * dependencies that are not around. */ |
1064 | 0 | ptype = type == JOB_RESTART ? JOB_TRY_RESTART : type; |
1065 | 0 |
|
1066 | 0 | for (j = 0; j < ELEMENTSOF(propagate_deps); j++) |
1067 | 0 | HASHMAP_FOREACH_KEY(v, dep, ret->unit->dependencies[propagate_deps[j]], i) { |
1068 | 0 | JobType nt; |
1069 | 0 |
|
1070 | 0 | nt = job_type_collapse(ptype, dep); |
1071 | 0 | if (nt == JOB_NOP) |
1072 | 0 | continue; |
1073 | 0 | |
1074 | 0 | r = transaction_add_job_and_dependencies(tr, nt, dep, ret, true, false, false, ignore_order, e); |
1075 | 0 | if (r < 0) { |
1076 | 0 | if (r != -EBADR) /* job type not applicable */ |
1077 | 0 | goto fail; |
1078 | 0 | |
1079 | 0 | sd_bus_error_free(e); |
1080 | 0 | } |
1081 | 0 | } |
1082 | 0 | } |
1083 | 0 |
|
1084 | 0 | if (type == JOB_RELOAD) |
1085 | 0 | transaction_add_propagate_reload_jobs(tr, ret->unit, ret, ignore_order, e); |
1086 | 0 |
|
1087 | 0 | /* JOB_VERIFY_ACTIVE requires no dependency handling */ |
1088 | 0 | } |
1089 | 0 |
|
1090 | 0 | return 0; |
1091 | 0 | |
1092 | 0 | fail: |
1093 | 0 | return r; |
1094 | 0 | } |
1095 | | |
1096 | 0 | int transaction_add_isolate_jobs(Transaction *tr, Manager *m) { |
1097 | 0 | Iterator i; |
1098 | 0 | Unit *u; |
1099 | 0 | char *k; |
1100 | 0 | int r; |
1101 | 0 |
|
1102 | 0 | assert(tr); |
1103 | 0 | assert(m); |
1104 | 0 |
|
1105 | 0 | HASHMAP_FOREACH_KEY(u, k, m->units, i) { |
1106 | 0 |
|
1107 | 0 | /* ignore aliases */ |
1108 | 0 | if (u->id != k) |
1109 | 0 | continue; |
1110 | 0 | |
1111 | 0 | if (u->ignore_on_isolate) |
1112 | 0 | continue; |
1113 | 0 | |
1114 | 0 | /* No need to stop inactive jobs */ |
1115 | 0 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job) |
1116 | 0 | continue; |
1117 | 0 | |
1118 | 0 | /* Is there already something listed for this? */ |
1119 | 0 | if (hashmap_get(tr->jobs, u)) |
1120 | 0 | continue; |
1121 | 0 | |
1122 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, tr->anchor_job, true, false, false, false, NULL); |
1123 | 0 | if (r < 0) |
1124 | 0 | log_unit_warning_errno(u, r, "Cannot add isolate job, ignoring: %m"); |
1125 | 0 | } |
1126 | 0 |
|
1127 | 0 | return 0; |
1128 | 0 | } |
1129 | | |
1130 | 0 | Transaction *transaction_new(bool irreversible) { |
1131 | 0 | Transaction *tr; |
1132 | 0 |
|
1133 | 0 | tr = new0(Transaction, 1); |
1134 | 0 | if (!tr) |
1135 | 0 | return NULL; |
1136 | 0 | |
1137 | 0 | tr->jobs = hashmap_new(NULL); |
1138 | 0 | if (!tr->jobs) |
1139 | 0 | return mfree(tr); |
1140 | 0 | |
1141 | 0 | tr->irreversible = irreversible; |
1142 | 0 |
|
1143 | 0 | return tr; |
1144 | 0 | } |
1145 | | |
1146 | 0 | void transaction_free(Transaction *tr) { |
1147 | 0 | assert(hashmap_isempty(tr->jobs)); |
1148 | 0 | hashmap_free(tr->jobs); |
1149 | 0 | free(tr); |
1150 | 0 | } |