/src/systemd/src/core/transaction.c
Line | Count | Source |
1 | | /* SPDX-License-Identifier: LGPL-2.1-or-later */ |
2 | | |
3 | | #include "sd-bus.h" |
4 | | #include "sd-messages.h" |
5 | | |
6 | | #include "alloc-util.h" |
7 | | #include "ansi-color.h" |
8 | | #include "bus-common-errors.h" |
9 | | #include "bus-error.h" |
10 | | #include "dbus-unit.h" |
11 | | #include "hash-funcs.h" |
12 | | #include "manager.h" |
13 | | #include "set.h" |
14 | | #include "slice.h" |
15 | | #include "string-util.h" |
16 | | #include "strv.h" |
17 | | #include "transaction.h" |
18 | | |
19 | 0 | #define CYCLIC_TRANSACTIONS_MAX 4096U |
20 | | |
21 | | static bool job_matters_to_anchor(Job *job); |
22 | | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies); |
23 | | |
24 | 0 | static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) { |
25 | 0 | assert(tr); |
26 | 0 | assert(j); |
27 | | |
28 | | /* Deletes one job from the transaction. */ |
29 | |
|
30 | 0 | transaction_unlink_job(tr, j, delete_dependencies); |
31 | |
|
32 | 0 | job_free(j); |
33 | 0 | } |
34 | | |
35 | 0 | static void transaction_delete_unit(Transaction *tr, Unit *u) { |
36 | 0 | Job *j; |
37 | | |
38 | | /* Deletes all jobs associated with a certain unit from the transaction. */ |
39 | |
|
40 | 0 | while ((j = hashmap_get(tr->jobs, u))) |
41 | 0 | transaction_delete_job(tr, j, true); |
42 | 0 | } |
43 | | |
44 | 0 | static void transaction_abort(Transaction *tr) { |
45 | 0 | Job *j; |
46 | |
|
47 | 0 | assert(tr); |
48 | |
|
49 | 0 | while ((j = hashmap_first(tr->jobs))) |
50 | 0 | transaction_delete_job(tr, j, false); |
51 | |
|
52 | 0 | assert(hashmap_isempty(tr->jobs)); |
53 | 0 | } |
54 | | |
55 | 0 | static void transaction_find_jobs_that_matter_to_anchor(Job *j, unsigned generation) { |
56 | 0 | assert(j); |
57 | | |
58 | | /* A recursive sweep through the graph that marks all units that matter to the anchor job, i.e. are |
59 | | * directly or indirectly a dependency of the anchor job via paths that are fully marked as |
60 | | * mattering. */ |
61 | |
|
62 | 0 | j->matters_to_anchor = true; |
63 | 0 | j->generation = generation; |
64 | |
|
65 | 0 | LIST_FOREACH(subject, l, j->subject_list) { |
66 | | |
67 | | /* This link does not matter. */ |
68 | 0 | if (!l->matters) |
69 | 0 | continue; |
70 | | |
71 | | /* This unit has already been marked. */ |
72 | 0 | if (l->object->generation == generation) |
73 | 0 | continue; |
74 | | |
75 | 0 | transaction_find_jobs_that_matter_to_anchor(l->object, generation); |
76 | 0 | } |
77 | 0 | } |
78 | | |
79 | 0 | static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) { |
80 | 0 | JobDependency *last; |
81 | |
|
82 | 0 | assert(j); |
83 | 0 | assert(other); |
84 | 0 | assert(j->unit == other->unit); |
85 | 0 | assert(!j->installed); |
86 | | |
87 | | /* Merges 'other' into 'j' and then deletes 'other'. */ |
88 | |
|
89 | 0 | j->type = t; |
90 | 0 | j->state = JOB_WAITING; |
91 | 0 | j->irreversible = j->irreversible || other->irreversible; |
92 | 0 | j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor; |
93 | | |
94 | | /* Patch us in as new owner of the JobDependency objects. */ |
95 | 0 | last = NULL; |
96 | 0 | LIST_FOREACH(subject, l, other->subject_list) { |
97 | 0 | assert(l->subject == other); |
98 | 0 | l->subject = j; |
99 | 0 | last = l; |
100 | 0 | } |
101 | | |
102 | | /* Merge both lists. */ |
103 | 0 | if (last) { |
104 | 0 | last->subject_next = j->subject_list; |
105 | 0 | if (j->subject_list) |
106 | 0 | j->subject_list->subject_prev = last; |
107 | 0 | j->subject_list = other->subject_list; |
108 | 0 | } |
109 | | |
110 | | /* Patch us in as new owner of the JobDependency objects. */ |
111 | 0 | last = NULL; |
112 | 0 | LIST_FOREACH(object, l, other->object_list) { |
113 | 0 | assert(l->object == other); |
114 | 0 | l->object = j; |
115 | 0 | last = l; |
116 | 0 | } |
117 | | |
118 | | /* Merge both lists. */ |
119 | 0 | if (last) { |
120 | 0 | last->object_next = j->object_list; |
121 | 0 | if (j->object_list) |
122 | 0 | j->object_list->object_prev = last; |
123 | 0 | j->object_list = other->object_list; |
124 | 0 | } |
125 | | |
126 | | /* Kill the other job. */ |
127 | 0 | other->subject_list = NULL; |
128 | 0 | other->object_list = NULL; |
129 | 0 | transaction_delete_job(tr, other, true); |
130 | 0 | } |
131 | | |
132 | 0 | static bool job_is_conflicted_by(Job *j) { |
133 | 0 | assert(j); |
134 | | |
135 | | /* Returns true if this job is pulled in by a least one ConflictedBy= dependency. */ |
136 | |
|
137 | 0 | LIST_FOREACH(object, l, j->object_list) |
138 | 0 | if (l->conflicts) |
139 | 0 | return true; |
140 | | |
141 | 0 | return false; |
142 | 0 | } |
143 | | |
144 | 0 | static int delete_one_unmergeable_job(Transaction *tr, Job *job) { |
145 | 0 | assert(job); |
146 | | |
147 | | /* Tries to delete one item in the linked list |
148 | | * j->transaction_next->transaction_next->... that conflicts with another one, in an attempt to make |
149 | | * an inconsistent transaction work. */ |
150 | | |
151 | | /* We rely here on the fact that if a merged with b does not merge with c, either a or b merge with c |
152 | | * neither. */ |
153 | 0 | LIST_FOREACH(transaction, j, job) |
154 | 0 | LIST_FOREACH(transaction, k, j->transaction_next) { |
155 | 0 | Job *d; |
156 | | |
157 | | /* Is this one mergeable? Then skip it. */ |
158 | 0 | if (job_type_is_mergeable(j->type, k->type)) |
159 | 0 | continue; |
160 | | |
161 | | /* Ok, we found two that conflict, let's see if we can drop one of them. */ |
162 | 0 | if (!j->matters_to_anchor && !k->matters_to_anchor) { |
163 | | |
164 | | /* Both jobs don't matter, so let's find the one that is smarter to remove. |
165 | | * Let's think positive and rather remove stops than starts -- except if |
166 | | * something is being stopped because it is conflicted by another unit in |
167 | | * which case we rather remove the start. */ |
168 | |
|
169 | 0 | bool j_is_conflicted_by = job_is_conflicted_by(j), |
170 | 0 | k_is_conflicted_by = job_is_conflicted_by(k); |
171 | | |
172 | | /* Update test/units/TEST-87-AUX-UTILS-VM.sh when logs below are changed. */ |
173 | 0 | log_unit_debug(j->unit, |
174 | 0 | "Looking at job %s/%s conflicted_by=%s", |
175 | 0 | j->unit->id, job_type_to_string(j->type), |
176 | 0 | yes_no(j->type == JOB_STOP && j_is_conflicted_by)); |
177 | 0 | log_unit_debug(k->unit, |
178 | 0 | "Looking at job %s/%s conflicted_by=%s", |
179 | 0 | k->unit->id, job_type_to_string(k->type), |
180 | 0 | yes_no(k->type == JOB_STOP && k_is_conflicted_by)); |
181 | |
|
182 | 0 | if (j->type == JOB_STOP && j_is_conflicted_by) |
183 | 0 | d = k; |
184 | 0 | else if (k->type == JOB_STOP && k_is_conflicted_by) |
185 | 0 | d = j; |
186 | 0 | else if (j->type == JOB_STOP) |
187 | 0 | d = j; |
188 | 0 | else if (k->type == JOB_STOP) |
189 | 0 | d = k; |
190 | 0 | else |
191 | 0 | d = j; |
192 | |
|
193 | 0 | } else if (!j->matters_to_anchor) |
194 | 0 | d = j; |
195 | 0 | else if (!k->matters_to_anchor) |
196 | 0 | d = k; |
197 | 0 | else |
198 | 0 | return -ENOEXEC; |
199 | | |
200 | | /* Ok, we can drop one, so let's do so. */ |
201 | 0 | log_unit_debug(d->unit, |
202 | 0 | "Fixing conflicting jobs %s/%s,%s/%s by deleting job %s/%s", |
203 | 0 | j->unit->id, job_type_to_string(j->type), |
204 | 0 | k->unit->id, job_type_to_string(k->type), |
205 | 0 | d->unit->id, job_type_to_string(d->type)); |
206 | 0 | transaction_delete_job(tr, d, true); |
207 | 0 | return 0; |
208 | 0 | } |
209 | | |
210 | 0 | return -EINVAL; |
211 | 0 | } |
212 | | |
213 | 0 | static int transaction_ensure_mergeable(Transaction *tr, bool matters_to_anchor, sd_bus_error *e) { |
214 | 0 | Job *j; |
215 | 0 | int r; |
216 | |
|
217 | 0 | assert(tr); |
218 | |
|
219 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
220 | 0 | JobType t; |
221 | |
|
222 | 0 | if (job_matters_to_anchor(j) != matters_to_anchor) |
223 | 0 | continue; |
224 | | |
225 | 0 | t = j->type; |
226 | 0 | LIST_FOREACH(transaction, k, j->transaction_next) { |
227 | 0 | if (job_type_merge_and_collapse(&t, k->type, j->unit) >= 0) |
228 | 0 | continue; |
229 | | |
230 | | /* OK, we could not merge all jobs for this action. Let's see if we can get rid of |
231 | | * one of them. */ |
232 | | |
233 | 0 | r = delete_one_unmergeable_job(tr, j); |
234 | 0 | if (r >= 0) |
235 | | /* Ok, we managed to drop one, now let's ask our callers to call us again |
236 | | * after garbage collecting. */ |
237 | 0 | return -EAGAIN; |
238 | | |
239 | | /* We couldn't merge anything. Failure. */ |
240 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, |
241 | 0 | "Transaction contains conflicting jobs '%s' and '%s' for %s. " |
242 | 0 | "Probably contradicting requirement dependencies configured.", |
243 | 0 | job_type_to_string(t), |
244 | 0 | job_type_to_string(k->type), |
245 | 0 | k->unit->id); |
246 | 0 | } |
247 | 0 | } |
248 | | |
249 | 0 | return 0; |
250 | 0 | } |
251 | | |
252 | 0 | static int transaction_merge_jobs(Transaction *tr, sd_bus_error *e) { |
253 | 0 | Job *j; |
254 | 0 | int r; |
255 | |
|
256 | 0 | assert(tr); |
257 | | |
258 | | /* First step, try to drop unmergeable jobs for jobs that matter to anchor. */ |
259 | 0 | r = transaction_ensure_mergeable(tr, /* matters_to_anchor= */ true, e); |
260 | 0 | if (r < 0) |
261 | 0 | return r; |
262 | | |
263 | | /* Second step, do the same for jobs that not matter to anchor. */ |
264 | 0 | r = transaction_ensure_mergeable(tr, /* matters_to_anchor= */ false, e); |
265 | 0 | if (r < 0) |
266 | 0 | return r; |
267 | | |
268 | | /* Third step, merge the jobs. */ |
269 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
270 | 0 | JobType t = j->type; |
271 | | |
272 | | /* Merge all transaction jobs for j->unit. */ |
273 | 0 | LIST_FOREACH(transaction, k, j->transaction_next) |
274 | 0 | assert_se(job_type_merge_and_collapse(&t, k->type, j->unit) == 0); |
275 | |
|
276 | 0 | Job *k; |
277 | 0 | while ((k = j->transaction_next)) { |
278 | 0 | if (tr->anchor_job == k) { |
279 | 0 | transaction_merge_and_delete_job(tr, k, j, t); |
280 | 0 | j = k; |
281 | 0 | } else |
282 | 0 | transaction_merge_and_delete_job(tr, j, k, t); |
283 | 0 | } |
284 | |
|
285 | 0 | assert(!j->transaction_next); |
286 | 0 | assert(!j->transaction_prev); |
287 | 0 | } |
288 | |
|
289 | 0 | return 0; |
290 | 0 | } |
291 | | |
292 | 0 | static void transaction_drop_redundant(Transaction *tr) { |
293 | 0 | bool again; |
294 | | |
295 | | /* Goes through the transaction and removes all jobs of the units whose jobs are all noops. If not |
296 | | * all of a unit's jobs are redundant, they are kept. */ |
297 | |
|
298 | 0 | assert(tr); |
299 | |
|
300 | 0 | do { |
301 | 0 | Job *j; |
302 | |
|
303 | 0 | again = false; |
304 | |
|
305 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
306 | 0 | bool keep = false; |
307 | |
|
308 | 0 | LIST_FOREACH(transaction, k, j) |
309 | 0 | if (tr->anchor_job == k || |
310 | 0 | !job_type_is_redundant(k->type, unit_active_state(k->unit)) || |
311 | 0 | (k->unit->job && job_type_is_conflicting(k->type, k->unit->job->type))) { |
312 | 0 | keep = true; |
313 | 0 | break; |
314 | 0 | } |
315 | |
|
316 | 0 | if (!keep) { |
317 | 0 | log_trace("Found redundant job %s/%s, dropping from transaction.", |
318 | 0 | j->unit->id, job_type_to_string(j->type)); |
319 | 0 | transaction_delete_job(tr, j, false); |
320 | 0 | again = true; |
321 | 0 | break; |
322 | 0 | } |
323 | 0 | } |
324 | 0 | } while (again); |
325 | 0 | } |
326 | | |
327 | 0 | static bool job_matters_to_anchor(Job *job) { |
328 | 0 | assert(job); |
329 | 0 | assert(!job->transaction_prev); |
330 | | |
331 | | /* Checks whether at least one of the jobs for this transaction matters to the anchor. */ |
332 | |
|
333 | 0 | LIST_FOREACH(transaction, j, job) |
334 | 0 | if (j->matters_to_anchor) |
335 | 0 | return true; |
336 | | |
337 | 0 | return false; |
338 | 0 | } |
339 | | |
340 | 0 | static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, sd_bus_error *e) { |
341 | |
|
342 | 0 | static const UnitDependencyAtom directions[] = { |
343 | 0 | UNIT_ATOM_BEFORE, |
344 | 0 | UNIT_ATOM_AFTER, |
345 | 0 | }; |
346 | |
|
347 | 0 | int r; |
348 | |
|
349 | 0 | assert(tr); |
350 | 0 | assert(j); |
351 | 0 | assert(!j->transaction_prev); |
352 | | |
353 | | /* Does a recursive sweep through the ordering graph, looking for a cycle. If we find a cycle we try |
354 | | * to break it. */ |
355 | | |
356 | | /* Have we seen this before? */ |
357 | 0 | if (j->generation == generation) { |
358 | 0 | _cleanup_free_ char **array = NULL; |
359 | 0 | Job *delete = NULL; |
360 | | |
361 | | /* If the marker is NULL we have been here already and decided the job was loop-free from |
362 | | * here. Hence shortcut things and return right-away. */ |
363 | 0 | if (!j->marker) |
364 | 0 | return 0; |
365 | | |
366 | | /* So, the marker is not NULL and we already have been here. We have a cycle. Let's try to |
367 | | * break it. We go backwards in our path and try to find a suitable job to remove. We use the |
368 | | * marker to find our way back, since smart how we are we stored our way back in there. */ |
369 | 0 | for (Job *k = from; k; k = (k->generation == generation && k->marker != k) ? k->marker : NULL) { |
370 | | |
371 | | /* For logging below. */ |
372 | 0 | if (strv_push_pair(&array, k->unit->id, (char*) job_type_to_string(k->type)) < 0) |
373 | 0 | (void) log_oom_warning(); |
374 | |
|
375 | 0 | if (!delete && hashmap_contains(tr->jobs, k->unit) && !job_matters_to_anchor(k)) |
376 | | /* Ok, we can drop this one, so let's do so. */ |
377 | 0 | delete = k; |
378 | | |
379 | | /* Check if this in fact was the beginning of the cycle. */ |
380 | 0 | if (k == j) |
381 | 0 | break; |
382 | 0 | } |
383 | |
|
384 | 0 | _cleanup_free_ char *unit_ids = NULL; |
385 | 0 | STRV_FOREACH_PAIR(unit_id, job_type, array) |
386 | 0 | (void) strextendf_with_separator(&unit_ids, "\n", "%s%s", unit_log_field(j->unit), *unit_id); |
387 | |
|
388 | 0 | _cleanup_free_ char *cycle_path_text = strdup("Found ordering cycle"); |
389 | 0 | if (!strv_isempty(array)) { |
390 | 0 | (void) strextendf(&cycle_path_text, ": %s/%s", array[0], array[1]); |
391 | |
|
392 | 0 | STRV_FOREACH_PAIR(unit_id, job_type, strv_skip(array, 2)) |
393 | 0 | (void) strextendf(&cycle_path_text, " after %s/%s", *unit_id, *job_type); |
394 | |
|
395 | 0 | (void) strextendf(&cycle_path_text, " - after %s", array[0]); |
396 | 0 | } |
397 | | |
398 | | /* logging for j not k here to provide a consistent narrative */ |
399 | 0 | if (cycle_path_text) |
400 | 0 | log_struct(LOG_ERR, |
401 | 0 | LOG_UNIT_MESSAGE(j->unit, "%s", cycle_path_text), |
402 | 0 | LOG_MESSAGE_ID(SD_MESSAGE_UNIT_ORDERING_CYCLE_STR), |
403 | 0 | LOG_ITEM("%s", strempty(unit_ids))); |
404 | |
|
405 | 0 | if (set_size(j->manager->transactions_with_cycle) >= CYCLIC_TRANSACTIONS_MAX) |
406 | 0 | log_warning("Too many transactions with ordering cycle, suppressing record."); |
407 | 0 | else { |
408 | 0 | uint64_t *id_buf = newdup(uint64_t, &tr->id, 1); |
409 | 0 | if (!id_buf) |
410 | 0 | log_oom_warning(); |
411 | 0 | else |
412 | 0 | (void) set_ensure_consume(&j->manager->transactions_with_cycle, &uint64_hash_ops_value_free, id_buf); |
413 | 0 | } |
414 | |
|
415 | 0 | if (delete) { |
416 | 0 | const char *status; |
417 | | /* logging for j not k here to provide a consistent narrative */ |
418 | 0 | log_struct(LOG_WARNING, |
419 | 0 | LOG_UNIT_MESSAGE(j->unit, |
420 | 0 | "Job %s/%s deleted to break ordering cycle starting with %s/%s", |
421 | 0 | delete->unit->id, job_type_to_string(delete->type), |
422 | 0 | j->unit->id, job_type_to_string(j->type)), |
423 | 0 | LOG_MESSAGE_ID(SD_MESSAGE_DELETING_JOB_BECAUSE_ORDERING_CYCLE_STR), |
424 | 0 | LOG_ITEM("DELETED_UNIT=%s", delete->unit->id), |
425 | 0 | LOG_ITEM("DELETED_TYPE=%s", job_type_to_string(delete->type)), |
426 | 0 | LOG_ITEM("%s", strempty(unit_ids))); |
427 | |
|
428 | 0 | if (log_get_show_color()) |
429 | 0 | status = ANSI_HIGHLIGHT_RED " SKIP " ANSI_NORMAL; |
430 | 0 | else |
431 | 0 | status = " SKIP "; |
432 | |
|
433 | 0 | unit_status_printf(delete->unit, |
434 | 0 | STATUS_TYPE_NOTICE, |
435 | 0 | status, |
436 | 0 | "Ordering cycle found, skipping %s", |
437 | 0 | unit_status_string(delete->unit, NULL)); |
438 | 0 | transaction_delete_unit(tr, delete->unit); |
439 | 0 | return -EAGAIN; |
440 | 0 | } |
441 | | |
442 | 0 | log_struct(LOG_ERR, |
443 | 0 | LOG_UNIT_MESSAGE(j->unit, "Unable to break cycle starting with %s/%s", |
444 | 0 | j->unit->id, job_type_to_string(j->type)), |
445 | 0 | LOG_MESSAGE_ID(SD_MESSAGE_CANT_BREAK_ORDERING_CYCLE_STR), |
446 | 0 | LOG_ITEM("%s", strempty(unit_ids))); |
447 | |
|
448 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, |
449 | 0 | "Transaction order is cyclic. See system logs for details."); |
450 | 0 | } |
451 | | |
452 | | /* Make the marker point to where we come from, so that we can find our way backwards if we want to |
453 | | * break a cycle. We use a special marker for the beginning: we point to ourselves. */ |
454 | 0 | j->marker = from ?: j; |
455 | 0 | j->generation = generation; |
456 | | |
457 | | /* Actual ordering of jobs depends on the unit ordering dependency and job types. We need to traverse |
458 | | * the graph over 'before' edges in the actual job execution order. We traverse over both unit |
459 | | * ordering dependencies and we test with job_compare() whether it is the 'before' edge in the job |
460 | | * execution ordering. */ |
461 | 0 | FOREACH_ELEMENT(d, directions) { |
462 | 0 | Unit *u; |
463 | |
|
464 | 0 | UNIT_FOREACH_DEPENDENCY(u, j->unit, *d) { |
465 | 0 | Job *o; |
466 | | |
467 | | /* Is there a job for this unit? */ |
468 | 0 | o = hashmap_get(tr->jobs, u); |
469 | 0 | if (!o) { |
470 | | /* Ok, there is no job for this in the transaction, but maybe there is |
471 | | * already one running? */ |
472 | 0 | o = u->job; |
473 | 0 | if (!o) |
474 | 0 | continue; |
475 | 0 | } |
476 | | |
477 | | /* Cut traversing if the job j is not really *before* o. */ |
478 | 0 | if (job_compare(j, o, *d) >= 0) |
479 | 0 | continue; |
480 | | |
481 | 0 | r = transaction_verify_order_one(tr, o, j, generation, e); |
482 | 0 | if (r < 0) |
483 | 0 | return r; |
484 | 0 | } |
485 | 0 | } |
486 | | |
487 | | /* Ok, let's backtrack, and remember that this entry is not on our path anymore. */ |
488 | 0 | j->marker = NULL; |
489 | |
|
490 | 0 | return 0; |
491 | 0 | } |
492 | | |
493 | 0 | static int transaction_verify_order(Transaction *tr, unsigned *generation, sd_bus_error *e) { |
494 | 0 | Job *j; |
495 | 0 | int r; |
496 | 0 | unsigned g; |
497 | |
|
498 | 0 | assert(tr); |
499 | 0 | assert(generation); |
500 | | |
501 | | /* Check if the ordering graph is cyclic. If it is, try to fix that up by dropping one of the jobs. */ |
502 | |
|
503 | 0 | g = (*generation)++; |
504 | |
|
505 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
506 | 0 | r = transaction_verify_order_one(tr, j, NULL, g, e); |
507 | 0 | if (r < 0) |
508 | 0 | return r; |
509 | 0 | } |
510 | | |
511 | 0 | return 0; |
512 | 0 | } |
513 | | |
514 | 0 | static void transaction_collect_garbage(Transaction *tr) { |
515 | 0 | bool again; |
516 | |
|
517 | 0 | assert(tr); |
518 | | |
519 | | /* Drop jobs that are not required by any other job. */ |
520 | |
|
521 | 0 | do { |
522 | 0 | Job *j; |
523 | |
|
524 | 0 | again = false; |
525 | |
|
526 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
527 | 0 | if (tr->anchor_job == j) |
528 | 0 | continue; |
529 | | |
530 | 0 | if (!j->object_list) { |
531 | 0 | log_trace("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); |
532 | 0 | transaction_delete_job(tr, j, true); |
533 | 0 | again = true; |
534 | 0 | break; |
535 | 0 | } |
536 | | |
537 | 0 | log_trace("Keeping job %s/%s because of %s/%s", |
538 | 0 | j->unit->id, job_type_to_string(j->type), |
539 | 0 | j->object_list->subject ? j->object_list->subject->unit->id : "root", |
540 | 0 | j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); |
541 | 0 | } |
542 | |
|
543 | 0 | } while (again); |
544 | 0 | } |
545 | | |
546 | 0 | static int transaction_is_destructive(Transaction *tr, JobMode mode, sd_bus_error *e) { |
547 | 0 | Job *j; |
548 | |
|
549 | 0 | assert(tr); |
550 | | |
551 | | /* Checks whether applying this transaction means that existing jobs would be replaced. */ |
552 | |
|
553 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
554 | | |
555 | | /* Assume merged */ |
556 | 0 | assert(!j->transaction_prev); |
557 | 0 | assert(!j->transaction_next); |
558 | |
|
559 | 0 | if (j->unit->job && (IN_SET(mode, JOB_FAIL, JOB_LENIENT) || j->unit->job->irreversible) && |
560 | 0 | job_type_is_conflicting(j->unit->job->type, j->type)) |
561 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, |
562 | 0 | "Transaction for %s/%s is destructive (%s has '%s' job queued, but '%s' is included in transaction).", |
563 | 0 | tr->anchor_job->unit->id, job_type_to_string(tr->anchor_job->type), |
564 | 0 | j->unit->id, job_type_to_string(j->unit->job->type), job_type_to_string(j->type)); |
565 | 0 | } |
566 | | |
567 | 0 | return 0; |
568 | 0 | } |
569 | | |
570 | 0 | static int transaction_minimize_impact(Transaction *tr, JobMode mode, sd_bus_error *e) { |
571 | 0 | Job *head; |
572 | |
|
573 | 0 | assert(tr); |
574 | | |
575 | | /* Drops all unnecessary jobs that reverse already active jobs or that stop a running service. */ |
576 | |
|
577 | 0 | if (!IN_SET(mode, JOB_FAIL, JOB_LENIENT)) |
578 | 0 | return 0; |
579 | | |
580 | 0 | rescan: |
581 | 0 | HASHMAP_FOREACH(head, tr->jobs) { |
582 | 0 | LIST_FOREACH(transaction, j, head) { |
583 | 0 | bool stops_running_service, changes_existing_job; |
584 | | |
585 | | /* If it matters, we shouldn't drop it. */ |
586 | 0 | if (j->matters_to_anchor && mode != JOB_LENIENT) |
587 | 0 | continue; |
588 | | |
589 | | /* Would this stop a running service? Would this change an existing job? If so, let's |
590 | | * drop this entry. */ |
591 | | |
592 | 0 | stops_running_service = |
593 | 0 | j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit)); |
594 | |
|
595 | 0 | changes_existing_job = |
596 | 0 | j->unit->job && |
597 | 0 | job_type_is_conflicting(j->type, j->unit->job->type); |
598 | |
|
599 | 0 | if (!stops_running_service && !changes_existing_job) |
600 | 0 | continue; |
601 | | |
602 | 0 | if (j->matters_to_anchor) { |
603 | 0 | assert(mode == JOB_LENIENT); |
604 | 0 | return sd_bus_error_setf(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, |
605 | 0 | "%s/%s would stop a running unit or change existing job, bailing", |
606 | 0 | j->unit->id, job_type_to_string(j->type)); |
607 | 0 | } |
608 | | |
609 | 0 | if (stops_running_service) |
610 | 0 | log_unit_debug(j->unit, |
611 | 0 | "%s/%s would stop a running service.", |
612 | 0 | j->unit->id, job_type_to_string(j->type)); |
613 | |
|
614 | 0 | if (changes_existing_job) |
615 | 0 | log_unit_debug(j->unit, |
616 | 0 | "%s/%s would change existing job.", |
617 | 0 | j->unit->id, job_type_to_string(j->type)); |
618 | | |
619 | | /* Ok, let's get rid of this. */ |
620 | 0 | log_unit_debug(j->unit, |
621 | 0 | "Deleting %s/%s to minimize impact.", |
622 | 0 | j->unit->id, job_type_to_string(j->type)); |
623 | |
|
624 | 0 | transaction_delete_job(tr, j, true); |
625 | 0 | goto rescan; |
626 | 0 | } |
627 | 0 | } |
628 | | |
629 | 0 | return 0; |
630 | 0 | } |
631 | | |
632 | | static int transaction_apply( |
633 | | Transaction *tr, |
634 | | Manager *m, |
635 | | JobMode mode, |
636 | 0 | Set *affected_jobs) { |
637 | |
|
638 | 0 | Job *j; |
639 | 0 | int r; |
640 | |
|
641 | 0 | assert(tr); |
642 | 0 | assert(m); |
643 | | |
644 | | /* Moves the transaction jobs to the set of active jobs. */ |
645 | |
|
646 | 0 | if (IN_SET(mode, JOB_ISOLATE, JOB_FLUSH)) { |
647 | | |
648 | | /* When isolating first kill all installed jobs which aren't part of the new transaction. */ |
649 | 0 | HASHMAP_FOREACH(j, m->jobs) { |
650 | 0 | assert(j->installed); |
651 | |
|
652 | 0 | if (j->unit->ignore_on_isolate) |
653 | 0 | continue; |
654 | | |
655 | 0 | if (hashmap_contains(tr->jobs, j->unit)) |
656 | 0 | continue; |
657 | | |
658 | | /* Not invalidating recursively. Avoids triggering OnFailure= actions of dependent |
659 | | * jobs. Also avoids invalidating our iterator. */ |
660 | 0 | job_finish_and_invalidate(j, JOB_CANCELED, false, false); |
661 | 0 | } |
662 | 0 | } |
663 | | |
664 | 0 | HASHMAP_FOREACH(j, tr->jobs) { |
665 | | /* Assume merged. */ |
666 | 0 | assert(!j->transaction_prev); |
667 | 0 | assert(!j->transaction_next); |
668 | |
|
669 | 0 | r = hashmap_ensure_put(&m->jobs, NULL, UINT32_TO_PTR(j->id), j); |
670 | 0 | if (r < 0) |
671 | 0 | goto rollback; |
672 | 0 | } |
673 | | |
674 | 0 | while ((j = hashmap_steal_first(tr->jobs))) { |
675 | 0 | Job *installed_job; |
676 | | |
677 | | /* Clean the job dependencies. */ |
678 | 0 | transaction_unlink_job(tr, j, false); |
679 | |
|
680 | 0 | installed_job = job_install(j); |
681 | 0 | if (installed_job != j) { |
682 | | /* j has been merged into a previously installed job. */ |
683 | 0 | if (tr->anchor_job == j) |
684 | 0 | tr->anchor_job = installed_job; |
685 | |
|
686 | 0 | hashmap_remove_value(m->jobs, UINT32_TO_PTR(j->id), j); |
687 | 0 | free_and_replace_full(j, installed_job, job_free); |
688 | 0 | } |
689 | |
|
690 | 0 | job_add_to_run_queue(j); |
691 | 0 | job_add_to_dbus_queue(j); |
692 | 0 | job_start_timer(j, false); |
693 | 0 | job_shutdown_magic(j); |
694 | | |
695 | | /* When 'affected' is specified, let's track all in it all jobs that were touched because of |
696 | | * this transaction. */ |
697 | 0 | if (affected_jobs) |
698 | 0 | (void) set_put(affected_jobs, j); |
699 | 0 | } |
700 | |
|
701 | 0 | return 0; |
702 | | |
703 | 0 | rollback: |
704 | |
|
705 | 0 | HASHMAP_FOREACH(j, tr->jobs) |
706 | 0 | hashmap_remove_value(m->jobs, UINT32_TO_PTR(j->id), j); |
707 | |
|
708 | 0 | return r; |
709 | 0 | } |
710 | | |
711 | | int transaction_activate( |
712 | | Transaction *tr, |
713 | | Manager *m, |
714 | | JobMode mode, |
715 | | Set *affected_jobs, |
716 | 0 | sd_bus_error *e) { |
717 | |
|
718 | 0 | Job *j; |
719 | 0 | int r; |
720 | 0 | unsigned generation = 1; |
721 | | |
722 | | /* This applies the changes recorded in tr->jobs to the actual list of jobs, if possible. */ |
723 | |
|
724 | 0 | assert(tr); |
725 | 0 | assert(m); |
726 | | |
727 | | /* Reset the generation counter of all installed jobs. The detection of cycles looks at installed |
728 | | * jobs. If they had a non-zero generation from some previous walk of the graph, the algorithm would |
729 | | * break. */ |
730 | 0 | HASHMAP_FOREACH(j, m->jobs) |
731 | 0 | j->generation = 0; |
732 | | |
733 | | /* First step: figure out which jobs matter. */ |
734 | 0 | transaction_find_jobs_that_matter_to_anchor(tr->anchor_job, generation++); |
735 | | |
736 | | /* Second step: Try not to stop any running services if we don't have to. Don't try to reverse |
737 | | * running jobs if we don't have to. */ |
738 | 0 | r = transaction_minimize_impact(tr, mode, e); |
739 | 0 | if (r < 0) |
740 | 0 | return r; /* Note that we don't log here, because for JOB_LENIENT conflicts are very much |
741 | | * expected and shouldn't appear to be fatal for the unit. Only inform the caller |
742 | | * via bus error. */ |
743 | | |
744 | | /* Third step: Drop redundant jobs. */ |
745 | 0 | transaction_drop_redundant(tr); |
746 | |
|
747 | 0 | for (;;) { |
748 | | /* Fourth step: Let's remove unneeded jobs that might be lurking. */ |
749 | 0 | if (mode != JOB_ISOLATE) |
750 | 0 | transaction_collect_garbage(tr); |
751 | | |
752 | | /* Fifth step: verify order makes sense and correct cycles if necessary and possible. */ |
753 | 0 | r = transaction_verify_order(tr, &generation, e); |
754 | 0 | if (r >= 0) |
755 | 0 | break; |
756 | 0 | if (r != -EAGAIN) |
757 | 0 | return log_warning_errno(r, "Requested transaction contains an unfixable cyclic ordering dependency: %s", |
758 | 0 | bus_error_message(e, r)); |
759 | | |
760 | | /* Let's see if the resulting transaction ordering graph is still cyclic... */ |
761 | 0 | } |
762 | | |
763 | 0 | for (;;) { |
764 | | /* Sixth step: let's drop unmergeable entries if necessary and possible, merge entries we can |
765 | | * merge. */ |
766 | 0 | r = transaction_merge_jobs(tr, e); |
767 | 0 | if (r >= 0) |
768 | 0 | break; |
769 | 0 | if (r != -EAGAIN) |
770 | 0 | return log_warning_errno(r, "Requested transaction contains unmergeable jobs: %s", |
771 | 0 | bus_error_message(e, r)); |
772 | | |
773 | | /* Seventh step: an entry got dropped, let's garbage collect its dependencies. */ |
774 | 0 | if (mode != JOB_ISOLATE) |
775 | 0 | transaction_collect_garbage(tr); |
776 | | |
777 | | /* Let's see if the resulting transaction still has unmergeable entries... */ |
778 | 0 | } |
779 | | |
780 | | /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */ |
781 | 0 | transaction_drop_redundant(tr); |
782 | | |
783 | | /* Ninth step: check whether we can actually apply this. */ |
784 | 0 | r = transaction_is_destructive(tr, mode, e); |
785 | 0 | if (r < 0) |
786 | 0 | return log_notice_errno(r, "Requested transaction contradicts existing jobs: %s", |
787 | 0 | bus_error_message(e, r)); |
788 | | |
789 | | /* Tenth step: apply changes. */ |
790 | 0 | r = transaction_apply(tr, m, mode, affected_jobs); |
791 | 0 | if (r < 0) |
792 | 0 | return log_warning_errno(r, "Failed to apply transaction: %m"); |
793 | | |
794 | 0 | assert(hashmap_isempty(tr->jobs)); |
795 | | |
796 | | /* Are there any jobs now? Then make sure we have the idle pipe around. We don't really care too much |
797 | | * whether this works or not, as the idle pipe is a feature for cosmetics, not actually useful for |
798 | | * anything beyond that. */ |
799 | 0 | if (!hashmap_isempty(m->jobs)) |
800 | 0 | (void) manager_allocate_idle_pipe(m); |
801 | |
|
802 | 0 | return 0; |
803 | 0 | } |
804 | | |
805 | 0 | static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool *is_new) { |
806 | 0 | Job *j, *f; |
807 | |
|
808 | 0 | assert(tr); |
809 | 0 | assert(unit); |
810 | | |
811 | | /* Looks for an existing prospective job and returns that. If it doesn't exist it is created and |
812 | | * added to the prospective jobs list. */ |
813 | |
|
814 | 0 | f = hashmap_get(tr->jobs, unit); |
815 | |
|
816 | 0 | LIST_FOREACH(transaction, i, f) { |
817 | 0 | assert(i->unit == unit); |
818 | |
|
819 | 0 | if (i->type == type) { |
820 | 0 | if (is_new) |
821 | 0 | *is_new = false; |
822 | 0 | return i; |
823 | 0 | } |
824 | 0 | } |
825 | | |
826 | 0 | j = job_new(unit, type); |
827 | 0 | if (!j) |
828 | 0 | return NULL; |
829 | | |
830 | 0 | j->irreversible = tr->irreversible; |
831 | |
|
832 | 0 | LIST_PREPEND(transaction, f, j); |
833 | |
|
834 | 0 | if (hashmap_replace(tr->jobs, unit, f) < 0) { |
835 | 0 | LIST_REMOVE(transaction, f, j); |
836 | 0 | job_free(j); |
837 | 0 | return NULL; |
838 | 0 | } |
839 | | |
840 | 0 | if (is_new) |
841 | 0 | *is_new = true; |
842 | |
|
843 | 0 | log_trace("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); |
844 | |
|
845 | 0 | return j; |
846 | 0 | } |
847 | | |
848 | 0 | static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) { |
849 | 0 | assert(tr); |
850 | 0 | assert(j); |
851 | |
|
852 | 0 | if (j->transaction_prev) |
853 | 0 | j->transaction_prev->transaction_next = j->transaction_next; |
854 | 0 | else if (j->transaction_next) |
855 | 0 | hashmap_replace(tr->jobs, j->unit, j->transaction_next); |
856 | 0 | else |
857 | 0 | hashmap_remove_value(tr->jobs, j->unit, j); |
858 | |
|
859 | 0 | if (j->transaction_next) |
860 | 0 | j->transaction_next->transaction_prev = j->transaction_prev; |
861 | |
|
862 | 0 | j->transaction_prev = j->transaction_next = NULL; |
863 | |
|
864 | 0 | while (j->subject_list) |
865 | 0 | job_dependency_free(j->subject_list); |
866 | |
|
867 | 0 | while (j->object_list) { |
868 | 0 | Job *other = j->object_list->matters ? j->object_list->subject : NULL; |
869 | |
|
870 | 0 | job_dependency_free(j->object_list); |
871 | |
|
872 | 0 | if (other && delete_dependencies) { |
873 | 0 | log_unit_debug(other->unit, |
874 | 0 | "Deleting job %s/%s as dependency of job %s/%s", |
875 | 0 | other->unit->id, job_type_to_string(other->type), |
876 | 0 | j->unit->id, job_type_to_string(j->type)); |
877 | 0 | transaction_delete_job(tr, other, delete_dependencies); |
878 | 0 | } |
879 | 0 | } |
880 | 0 | } |
881 | | |
882 | | void transaction_add_propagate_reload_jobs( |
883 | | Transaction *tr, |
884 | | Unit *unit, |
885 | | Job *by, |
886 | 0 | TransactionAddFlags flags) { |
887 | |
|
888 | 0 | JobType nt; |
889 | 0 | Unit *dep; |
890 | 0 | int r; |
891 | |
|
892 | 0 | assert(tr); |
893 | 0 | assert(unit); |
894 | |
|
895 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, unit, UNIT_ATOM_PROPAGATES_RELOAD_TO) { |
896 | 0 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; |
897 | |
|
898 | 0 | nt = job_type_collapse(JOB_TRY_RELOAD, dep); |
899 | 0 | if (nt == JOB_NOP) |
900 | 0 | continue; |
901 | | |
902 | 0 | r = transaction_add_job_and_dependencies(tr, nt, dep, by, flags, &e); |
903 | 0 | if (r < 0) |
904 | 0 | log_unit_warning(dep, |
905 | 0 | "Cannot add dependency reload job, ignoring: %s", |
906 | 0 | bus_error_message(&e, r)); |
907 | 0 | } |
908 | 0 | } |
909 | | |
910 | 0 | static JobType job_type_propagate_stop_graceful(Job *j) { |
911 | 0 | JobType type; |
912 | |
|
913 | 0 | if (!j) |
914 | 0 | return JOB_STOP; |
915 | | |
916 | 0 | type = JOB_STOP; |
917 | |
|
918 | 0 | LIST_FOREACH(transaction, i, j) |
919 | 0 | switch (i->type) { |
920 | | |
921 | 0 | case JOB_STOP: |
922 | 0 | case JOB_RESTART: |
923 | | /* Nothing to worry about, an appropriate job is in-place. */ |
924 | 0 | return JOB_NOP; |
925 | | |
926 | 0 | case JOB_START: |
927 | | /* This unit is pulled in by other dependency types in this transaction. We will run |
928 | | * into job type conflict if we enqueue a stop job, so let's enqueue a restart job |
929 | | * instead. */ |
930 | 0 | type = JOB_RESTART; |
931 | 0 | break; |
932 | | |
933 | 0 | default: /* We don't care about others. */ |
934 | 0 | ; |
935 | |
|
936 | 0 | } |
937 | | |
938 | 0 | return type; |
939 | 0 | } |
940 | | |
941 | | int transaction_add_job_and_dependencies( |
942 | | Transaction *tr, |
943 | | JobType type, |
944 | | Unit *unit, |
945 | | Job *by, |
946 | | TransactionAddFlags flags, |
947 | 0 | sd_bus_error *e) { |
948 | |
|
949 | 0 | bool is_new; |
950 | 0 | Job *job; |
951 | 0 | int r; |
952 | |
|
953 | 0 | assert(tr); |
954 | 0 | assert(type >= 0); |
955 | 0 | assert(type < _JOB_TYPE_MAX); |
956 | 0 | assert(type < _JOB_TYPE_MAX_IN_TRANSACTION); |
957 | 0 | assert(unit); |
958 | | |
959 | | /* Before adding jobs for this unit, let's ensure that its state has been loaded. This matters when |
960 | | * jobs are spawned as part of coldplugging itself (see e. g. path_coldplug()). This way, we |
961 | | * "recursively" coldplug units, ensuring that we do not look at state of not-yet-coldplugged units. */ |
962 | 0 | if (MANAGER_IS_RELOADING(unit->manager)) |
963 | 0 | unit_coldplug(unit); |
964 | |
|
965 | 0 | if (by) |
966 | 0 | log_trace("Pulling in %s/%s from %s/%s", |
967 | 0 | unit->id, job_type_to_string(type), |
968 | 0 | by->unit->id, job_type_to_string(by->type)); |
969 | | |
970 | | /* Safety check that the unit is a valid state, i.e. not in UNIT_STUB or UNIT_MERGED which should |
971 | | * only be set temporarily. */ |
972 | 0 | if (!UNIT_IS_LOAD_COMPLETE(unit->load_state)) |
973 | 0 | return sd_bus_error_setf(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id); |
974 | | |
975 | 0 | if (type != JOB_STOP) { |
976 | | /* The time-based cache allows new units to be started without daemon-reload, but if they are |
977 | | * already referenced (because of dependencies or ordering) then we have to force a load of |
978 | | * the fragment. As an optimization, check first if anything in the usual paths was modified |
979 | | * since the last time the cache was loaded. Also check if the last time an attempt to load |
980 | | * the unit was made was before the most recent cache refresh, so that we know we need to try |
981 | | * again — even if the cache is current, it might have been updated in a different context |
982 | | * before we had a chance to retry loading this particular unit. |
983 | | * |
984 | | * Given building up the transaction is a synchronous operation, attempt to load the unit |
985 | | * immediately. */ |
986 | 0 | if (manager_unit_cache_should_retry_load(unit)) { |
987 | 0 | assert(unit->load_state == UNIT_NOT_FOUND); |
988 | 0 | unit->load_state = UNIT_STUB; |
989 | 0 | unit->load_error = 0; |
990 | 0 | (void) unit_load(unit); |
991 | 0 | assert(unit->load_state != UNIT_STUB); |
992 | 0 | } |
993 | |
|
994 | 0 | r = bus_unit_validate_load_state(unit, e); |
995 | 0 | if (r < 0) |
996 | 0 | return r; |
997 | 0 | } |
998 | | |
999 | 0 | if (!unit_job_is_applicable(unit, type)) |
1000 | 0 | return sd_bus_error_setf(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, |
1001 | 0 | "Job type %s is not applicable for unit %s.", |
1002 | 0 | job_type_to_string(type), unit->id); |
1003 | | |
1004 | 0 | if (type == JOB_START) { |
1005 | | /* The hard concurrency limit for slice units we already enforce when a job is enqueued. */ |
1006 | 0 | Slice *slice = SLICE(UNIT_GET_SLICE(unit)); |
1007 | 0 | if (slice && slice_concurrency_hard_max_reached(slice, unit)) |
1008 | 0 | return sd_bus_error_setf( |
1009 | 0 | e, BUS_ERROR_CONCURRENCY_LIMIT_REACHED, |
1010 | 0 | "Concurrency limit of the slice unit '%s' (or any of its parents) the unit '%s' is contained in has been reached, refusing start job.", |
1011 | 0 | UNIT(slice)->id, unit->id); |
1012 | 0 | } |
1013 | | |
1014 | | /* First add the job. */ |
1015 | 0 | job = transaction_add_one_job(tr, type, unit, &is_new); |
1016 | 0 | if (!job) |
1017 | 0 | return -ENOMEM; |
1018 | | |
1019 | 0 | if (FLAGS_SET(flags, TRANSACTION_IGNORE_ORDER)) |
1020 | 0 | job->ignore_order = true; |
1021 | | |
1022 | | /* Then, add a link to the job. */ |
1023 | 0 | if (by) { |
1024 | 0 | if (!job_dependency_new(by, job, FLAGS_SET(flags, TRANSACTION_MATTERS), FLAGS_SET(flags, TRANSACTION_CONFLICTS))) |
1025 | 0 | return -ENOMEM; |
1026 | 0 | } else { |
1027 | | /* If the job has no parent job, it is the anchor job. */ |
1028 | 0 | assert(!tr->anchor_job); |
1029 | 0 | tr->anchor_job = job; |
1030 | |
|
1031 | 0 | if (FLAGS_SET(flags, TRANSACTION_REENQUEUE_ANCHOR)) |
1032 | 0 | job->refuse_late_merge = true; |
1033 | 0 | } |
1034 | | |
1035 | 0 | if (!is_new || FLAGS_SET(flags, TRANSACTION_IGNORE_REQUIREMENTS) || type == JOB_NOP) |
1036 | 0 | return 0; |
1037 | | |
1038 | 0 | _cleanup_set_free_ Set *following = NULL; |
1039 | 0 | Unit *dep; |
1040 | | |
1041 | | /* If we are following some other unit, make sure we add all dependencies of everybody following. */ |
1042 | 0 | if (unit_following_set(job->unit, &following) > 0) |
1043 | 0 | SET_FOREACH(dep, following) { |
1044 | 0 | r = transaction_add_job_and_dependencies(tr, type, dep, job, flags & TRANSACTION_IGNORE_ORDER, e); |
1045 | 0 | if (r < 0) { |
1046 | 0 | log_unit_full_errno(dep, r == -ERFKILL ? LOG_INFO : LOG_WARNING, r, |
1047 | 0 | "Cannot add dependency job, ignoring: %s", |
1048 | 0 | bus_error_message(e, r)); |
1049 | 0 | sd_bus_error_free(e); |
1050 | 0 | } |
1051 | 0 | } |
1052 | | |
1053 | | /* Finally, recursively add in all dependencies. */ |
1054 | 0 | if (IN_SET(type, JOB_START, JOB_RESTART)) { |
1055 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PULL_IN_START) { |
1056 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, job, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); |
1057 | 0 | if (r < 0) { |
1058 | 0 | if (r != -EBADR) /* job type not applicable */ |
1059 | 0 | goto fail; |
1060 | | |
1061 | 0 | sd_bus_error_free(e); |
1062 | 0 | } |
1063 | 0 | } |
1064 | | |
1065 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PULL_IN_START_IGNORED) { |
1066 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_START, dep, job, flags & TRANSACTION_IGNORE_ORDER, e); |
1067 | 0 | if (r < 0) { |
1068 | | /* unit masked, job type not applicable and unit not found are not considered |
1069 | | * as errors. */ |
1070 | 0 | log_unit_full_errno(dep, |
1071 | 0 | IN_SET(r, -ERFKILL, -EBADR, -ENOENT) ? LOG_DEBUG : LOG_WARNING, |
1072 | 0 | r, "Cannot add dependency job, ignoring: %s", |
1073 | 0 | bus_error_message(e, r)); |
1074 | 0 | sd_bus_error_free(e); |
1075 | 0 | } |
1076 | 0 | } |
1077 | |
|
1078 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PULL_IN_VERIFY) { |
1079 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, job, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); |
1080 | 0 | if (r < 0) { |
1081 | 0 | if (r != -EBADR) /* job type not applicable */ |
1082 | 0 | goto fail; |
1083 | | |
1084 | 0 | sd_bus_error_free(e); |
1085 | 0 | } |
1086 | 0 | } |
1087 | | |
1088 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PULL_IN_STOP) { |
1089 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, job, TRANSACTION_MATTERS | TRANSACTION_CONFLICTS | (flags & TRANSACTION_IGNORE_ORDER), e); |
1090 | 0 | if (r < 0) { |
1091 | 0 | if (r != -EBADR) /* job type not applicable */ |
1092 | 0 | goto fail; |
1093 | | |
1094 | 0 | sd_bus_error_free(e); |
1095 | 0 | } |
1096 | 0 | } |
1097 | | |
1098 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PULL_IN_STOP_IGNORED) { |
1099 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, job, flags & TRANSACTION_IGNORE_ORDER, e); |
1100 | 0 | if (r < 0) { |
1101 | 0 | log_unit_warning(dep, |
1102 | 0 | "Cannot add dependency job, ignoring: %s", |
1103 | 0 | bus_error_message(e, r)); |
1104 | 0 | sd_bus_error_free(e); |
1105 | 0 | } |
1106 | 0 | } |
1107 | 0 | } |
1108 | | |
1109 | 0 | if (IN_SET(type, JOB_RESTART, JOB_STOP) || (type == JOB_START && FLAGS_SET(flags, TRANSACTION_PROPAGATE_START_AS_RESTART))) { |
1110 | 0 | bool is_stop = type == JOB_STOP; |
1111 | |
|
1112 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PROPAGATE_STOP) { |
1113 | | /* We propagate RESTART only as TRY_RESTART, in order not to start dependencies that |
1114 | | * are not around. */ |
1115 | 0 | JobType nt; |
1116 | |
|
1117 | 0 | nt = job_type_collapse(is_stop ? JOB_STOP : JOB_TRY_RESTART, dep); |
1118 | 0 | if (nt == JOB_NOP) |
1119 | 0 | continue; |
1120 | | |
1121 | 0 | r = transaction_add_job_and_dependencies(tr, nt, dep, job, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER), e); |
1122 | 0 | if (r < 0) { |
1123 | 0 | if (r != -EBADR) /* job type not applicable */ |
1124 | 0 | return r; |
1125 | | |
1126 | 0 | sd_bus_error_free(e); |
1127 | 0 | } |
1128 | 0 | } |
1129 | | |
1130 | | /* Process UNIT_ATOM_PROPAGATE_STOP_GRACEFUL (PropagatesStopTo=) units. We need to wait until |
1131 | | * all other dependencies are processed, i.e. we're the anchor job or already in the |
1132 | | * recursion that handles it. */ |
1133 | 0 | if (!by || FLAGS_SET(flags, TRANSACTION_PROCESS_PROPAGATE_STOP_GRACEFUL)) |
1134 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(dep, job->unit, UNIT_ATOM_PROPAGATE_STOP_GRACEFUL) { |
1135 | 0 | JobType nt; |
1136 | 0 | Job *j; |
1137 | |
|
1138 | 0 | j = hashmap_get(tr->jobs, dep); |
1139 | 0 | nt = job_type_propagate_stop_graceful(j); |
1140 | |
|
1141 | 0 | if (nt == JOB_NOP) |
1142 | 0 | continue; |
1143 | | |
1144 | 0 | r = transaction_add_job_and_dependencies(tr, nt, dep, job, TRANSACTION_MATTERS | (flags & TRANSACTION_IGNORE_ORDER) | TRANSACTION_PROCESS_PROPAGATE_STOP_GRACEFUL, e); |
1145 | 0 | if (r < 0) { |
1146 | 0 | if (r != -EBADR) /* job type not applicable */ |
1147 | 0 | return r; |
1148 | | |
1149 | 0 | sd_bus_error_free(e); |
1150 | 0 | } |
1151 | 0 | } |
1152 | 0 | } |
1153 | | |
1154 | 0 | if (type == JOB_RELOAD) |
1155 | 0 | transaction_add_propagate_reload_jobs(tr, job->unit, job, flags & TRANSACTION_IGNORE_ORDER); |
1156 | | |
1157 | | /* JOB_VERIFY_ACTIVE requires no dependency handling. */ |
1158 | |
|
1159 | 0 | return 0; |
1160 | | |
1161 | 0 | fail: |
1162 | | /* Recursive call failed to add required jobs so let's drop top level job as well. */ |
1163 | 0 | log_unit_debug_errno(unit, r, "Cannot add dependency job to transaction, deleting job %s/%s again: %s", |
1164 | 0 | unit->id, job_type_to_string(type), bus_error_message(e, r)); |
1165 | |
|
1166 | 0 | transaction_delete_job(tr, job, /* delete_dependencies= */ false); |
1167 | 0 | return r; |
1168 | 0 | } |
1169 | | |
1170 | 0 | static bool shall_stop_on_isolate(Transaction *tr, Unit *u) { |
1171 | 0 | assert(tr); |
1172 | 0 | assert(u); |
1173 | |
|
1174 | 0 | if (u->ignore_on_isolate) |
1175 | 0 | return false; |
1176 | | |
1177 | | /* Is there already something listed for this? */ |
1178 | 0 | if (hashmap_contains(tr->jobs, u)) |
1179 | 0 | return false; |
1180 | | |
1181 | | /* Keep units that are triggered by units we want to keep around. */ |
1182 | 0 | Unit *other; |
1183 | 0 | UNIT_FOREACH_DEPENDENCY(other, u, UNIT_ATOM_TRIGGERED_BY) { |
1184 | 0 | if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(other))) |
1185 | 0 | continue; |
1186 | | |
1187 | | /* Is the trigger about to go down? */ |
1188 | 0 | Job *other_job = hashmap_get(tr->jobs, other); |
1189 | |
|
1190 | 0 | bool has_stop = false; |
1191 | 0 | LIST_FOREACH(transaction, j, other_job) |
1192 | 0 | if (j->type == JOB_STOP) { |
1193 | 0 | has_stop = true; |
1194 | 0 | break; |
1195 | 0 | } |
1196 | 0 | if (has_stop) |
1197 | 0 | continue; |
1198 | | |
1199 | 0 | if (other->ignore_on_isolate || other_job) |
1200 | 0 | return false; |
1201 | 0 | } |
1202 | | |
1203 | 0 | return true; |
1204 | 0 | } |
1205 | | |
1206 | 0 | int transaction_add_isolate_jobs(Transaction *tr, Manager *m) { |
1207 | 0 | Unit *u; |
1208 | 0 | char *k; |
1209 | 0 | int r; |
1210 | |
|
1211 | 0 | assert(tr); |
1212 | 0 | assert(m); |
1213 | |
|
1214 | 0 | HASHMAP_FOREACH_KEY(u, k, m->units) { |
1215 | 0 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; |
1216 | | |
1217 | | /* Ignore aliases. */ |
1218 | 0 | if (u->id != k) |
1219 | 0 | continue; |
1220 | | |
1221 | | /* No need to stop inactive units. */ |
1222 | 0 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job) |
1223 | 0 | continue; |
1224 | | |
1225 | 0 | if (!shall_stop_on_isolate(tr, u)) |
1226 | 0 | continue; |
1227 | | |
1228 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, tr->anchor_job, TRANSACTION_MATTERS, &e); |
1229 | 0 | if (r < 0) |
1230 | 0 | log_unit_warning_errno(u, r, "Cannot add isolate job, ignoring: %s", bus_error_message(&e, r)); |
1231 | 0 | } |
1232 | | |
1233 | 0 | return 0; |
1234 | 0 | } |
1235 | | |
1236 | 0 | int transaction_add_triggering_jobs(Transaction *tr, Unit *u) { |
1237 | 0 | Unit *trigger; |
1238 | 0 | int r; |
1239 | |
|
1240 | 0 | assert(tr); |
1241 | 0 | assert(u); |
1242 | |
|
1243 | 0 | UNIT_FOREACH_DEPENDENCY_SAFE(trigger, u, UNIT_ATOM_TRIGGERED_BY) { |
1244 | 0 | _cleanup_(sd_bus_error_free) sd_bus_error e = SD_BUS_ERROR_NULL; |
1245 | | |
1246 | | /* No need to stop inactive jobs. */ |
1247 | 0 | if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(trigger)) && !trigger->job) |
1248 | 0 | continue; |
1249 | | |
1250 | | /* Is there already something listed for this? */ |
1251 | 0 | if (hashmap_contains(tr->jobs, trigger)) |
1252 | 0 | continue; |
1253 | | |
1254 | 0 | r = transaction_add_job_and_dependencies(tr, JOB_STOP, trigger, tr->anchor_job, TRANSACTION_MATTERS, &e); |
1255 | 0 | if (r < 0) |
1256 | 0 | log_unit_warning_errno(u, r, "Cannot add triggered by job, ignoring: %s", bus_error_message(&e, r)); |
1257 | 0 | } |
1258 | | |
1259 | 0 | return 0; |
1260 | 0 | } |
1261 | | |
1262 | 0 | Transaction* transaction_new(bool irreversible, uint64_t id) { |
1263 | 0 | _cleanup_free_ Transaction *tr = NULL; |
1264 | |
|
1265 | 0 | assert(id != 0); |
1266 | |
|
1267 | 0 | tr = new(Transaction, 1); |
1268 | 0 | if (!tr) |
1269 | 0 | return NULL; |
1270 | | |
1271 | 0 | *tr = (Transaction) { |
1272 | 0 | .jobs = hashmap_new(NULL), |
1273 | 0 | .irreversible = irreversible, |
1274 | 0 | .id = id, |
1275 | 0 | }; |
1276 | 0 | if (!tr->jobs) |
1277 | 0 | return NULL; |
1278 | | |
1279 | 0 | return TAKE_PTR(tr); |
1280 | 0 | } |
1281 | | |
1282 | 0 | Transaction* transaction_free(Transaction *tr) { |
1283 | 0 | if (!tr) |
1284 | 0 | return NULL; |
1285 | | |
1286 | 0 | assert(hashmap_isempty(tr->jobs)); |
1287 | 0 | hashmap_free(tr->jobs); |
1288 | |
|
1289 | 0 | return mfree(tr); |
1290 | 0 | } |
1291 | | |
1292 | 0 | Transaction* transaction_abort_and_free(Transaction *tr) { |
1293 | 0 | if (!tr) |
1294 | 0 | return NULL; |
1295 | | |
1296 | 0 | transaction_abort(tr); |
1297 | |
|
1298 | 0 | return transaction_free(tr); |
1299 | 0 | } |