/src/qubes-os/qubes-core-qubesdb/daemon/db-daemon.c
Line | Count | Source (jump to first uncovered line) |
1 | | #define _GNU_SOURCE 1 |
2 | | #include <stdio.h> |
3 | | #include <stdlib.h> |
4 | | #include <fcntl.h> |
5 | | #include <errno.h> |
6 | | #ifndef WIN32 |
7 | | #include <sys/socket.h> |
8 | | #include <sys/un.h> |
9 | | #include <unistd.h> |
10 | | #include <sys/types.h> |
11 | | #include <sys/stat.h> |
12 | | #include <signal.h> |
13 | | #include <poll.h> |
14 | | #else |
15 | | #include <windows.h> |
16 | | #include <sddl.h> |
17 | | #include <lmcons.h> |
18 | | #include <strsafe.h> |
19 | | |
20 | | #include <log.h> |
21 | | #include <pipe-server.h> |
22 | | #include <service.h> |
23 | | #include <list.h> |
24 | | #include <vchan-common.h> |
25 | | #endif |
26 | | |
27 | | #ifndef WIN32 |
28 | | #ifdef HAVE_SYSTEMD |
29 | | #include <systemd/sd-daemon.h> |
30 | | #endif |
31 | | #else // !WIN32 |
32 | | // parameters for a client pipe thread |
33 | | struct thread_param { |
34 | | struct db_daemon_data *daemon; |
35 | | LONGLONG id; |
36 | | }; |
37 | | #endif |
38 | | |
39 | | #include "buffer.h" |
40 | | #include <qubesdb.h> |
41 | | #include "qubesdb_internal.h" |
42 | | |
43 | | int init_vchan(struct db_daemon_data *d); |
44 | | |
45 | | #ifndef WIN32 |
46 | | int sigterm_received = 0; |
47 | 0 | static void sigterm_handler(int s) { |
48 | 0 | sigterm_received = 1; |
49 | 0 | } |
50 | | |
51 | | /** Register new client |
52 | | * @param d Daemon global data |
53 | | * @param c Socket of new client |
54 | | * @return 1 on success, 0 on failure |
55 | | */ |
56 | 0 | static int add_client(struct db_daemon_data *d, client_socket_t c) { |
57 | 0 | struct client *client; |
58 | |
|
59 | 0 | client = malloc(sizeof(*client)); |
60 | 0 | if (!client) { |
61 | 0 | fprintf(stderr, "ERROR: cannot allocate memory for new client\n"); |
62 | 0 | return 0; |
63 | 0 | } |
64 | 0 | client->fd = c; |
65 | |
|
66 | 0 | client->write_queue = buffer_create(); |
67 | 0 | if (!client->write_queue) { |
68 | 0 | fprintf(stderr, "ERROR: cannot allocate memory for new client buffer\n"); |
69 | 0 | free(client); |
70 | 0 | return 0; |
71 | 0 | } |
72 | 0 | client->next = d->client_list; |
73 | 0 | d->client_list = client; |
74 | |
|
75 | 0 | return handle_client_connect(d, client); |
76 | 0 | } |
77 | | |
78 | | /** Disconnect client |
79 | | * @param d Daemon global data |
80 | | * @param c Socket of client to disconnect |
81 | | * @return 1 on success, 0 on failure |
82 | | */ |
83 | 0 | static int disconnect_client(struct db_daemon_data *d, struct client *c) { |
84 | 0 | struct client *client, *prev_client; |
85 | |
|
86 | 0 | if (!handle_client_disconnect(d, c)) |
87 | 0 | return 0; |
88 | | |
89 | 0 | close(c->fd); |
90 | 0 | buffer_free(c->write_queue); |
91 | |
|
92 | 0 | client = d->client_list; |
93 | 0 | prev_client = NULL; |
94 | 0 | while (client) { |
95 | 0 | if (client == c) { |
96 | 0 | if (prev_client) |
97 | 0 | prev_client->next = client->next; |
98 | 0 | else |
99 | 0 | d->client_list = client->next; |
100 | 0 | free(client); |
101 | 0 | break; |
102 | 0 | } |
103 | 0 | prev_client = client; |
104 | 0 | client = client->next; |
105 | 0 | } |
106 | |
|
107 | 0 | return 1; |
108 | 0 | } |
109 | | |
110 | | /** Receive new client connection and register such client |
111 | | * @param d Daemon global data |
112 | | * @return 1 on success, 0 on failure |
113 | | */ |
114 | 0 | static int accept_new_client(struct db_daemon_data *d) { |
115 | 0 | client_socket_t new_client_fd; |
116 | 0 | struct sockaddr_un peer; |
117 | 0 | unsigned int addrlen; |
118 | |
|
119 | 0 | addrlen = sizeof(peer); |
120 | 0 | new_client_fd = accept(d->socket_fd, (struct sockaddr *) &peer, &addrlen); |
121 | 0 | if (new_client_fd == -1) { |
122 | 0 | perror("unix accept"); |
123 | 0 | exit(1); |
124 | 0 | } |
125 | 0 | return add_client(d, new_client_fd); |
126 | 0 | } |
127 | | |
128 | | #else // !WIN32 |
129 | | |
130 | | /* Main pipe server processing loop (separate thread). |
131 | | * Takes care of accepting clients and receiving data. |
132 | | */ |
133 | | DWORD WINAPI pipe_thread_main(PVOID param) { |
134 | | PIPE_SERVER ps = (PIPE_SERVER)param; |
135 | | |
136 | | // only returns on error |
137 | | return QpsMainLoop(ps); |
138 | | } |
139 | | |
140 | | int mainloop(struct db_daemon_data *d) { |
141 | | DWORD ret; |
142 | | DWORD status; |
143 | | HANDLE pipe_thread; |
144 | | HANDLE wait_objects[3]; |
145 | | |
146 | | if (!init_vchan(d)) { |
147 | | perror("vchan initialization failed"); |
148 | | return 0; |
149 | | } |
150 | | |
151 | | if (!d->remote_name) { |
152 | | /* request database sync from dom0 */ |
153 | | if (!request_full_db_sync(d)) { |
154 | | LogError("FATAL: failed to request DB sync"); |
155 | | return 0; |
156 | | } |
157 | | d->multiread_requested = 1; |
158 | | /* wait for complete response */ |
159 | | while (d->multiread_requested) { |
160 | | AcquireSRWLockExclusive(&d->lock); |
161 | | if (!handle_vchan_data(d)) { |
162 | | LogError("FATAL: vchan error"); |
163 | | ReleaseSRWLockExclusive(&d->lock); |
164 | | return 0; |
165 | | } |
166 | | ReleaseSRWLockExclusive(&d->lock); |
167 | | } |
168 | | } |
169 | | |
170 | | // Create the thread that will handle client pipes |
171 | | pipe_thread = CreateThread(NULL, 0, pipe_thread_main, d->pipe_server, 0, NULL); |
172 | | if (!pipe_thread) { |
173 | | win_perror("CreateThread(main pipe thread)"); |
174 | | return 0; |
175 | | } |
176 | | |
177 | | // We'll wait for the pipe thread to exit, if it terminates |
178 | | // we're going down as well. |
179 | | wait_objects[0] = pipe_thread; |
180 | | |
181 | | // Also exit if the service is being stopped. |
182 | | wait_objects[1] = d->service_stop_event; |
183 | | |
184 | | // This loop will just process vchan data. |
185 | | while (1) { |
186 | | wait_objects[2] = libvchan_fd_for_select(d->vchan); |
187 | | /* TODO: add one more event for service termination */ |
188 | | ret = WaitForMultipleObjects(3, wait_objects, FALSE, INFINITE) - WAIT_OBJECT_0; |
189 | | |
190 | | switch (ret) { |
191 | | case 0: { |
192 | | // pipe thread terminated, abort |
193 | | GetExitCodeThread(pipe_thread, &status); |
194 | | win_perror2(status, "pipe thread"); |
195 | | return 0; |
196 | | } |
197 | | |
198 | | case 1: { |
199 | | // service stopped |
200 | | LogInfo("service stopped, exiting"); |
201 | | goto cleanup; |
202 | | } |
203 | | |
204 | | case 2: { |
205 | | // vchan read |
206 | | if (d->remote_connected && !libvchan_is_open(d->vchan)) { |
207 | | fprintf(stderr, "vchan closed\n"); |
208 | | if (!d->remote_name) { |
209 | | /* In the VM, wait for possible qubesdb-daemon dom0 restart. |
210 | | * This can be a case for DispVM */ |
211 | | /* FIXME: in such case dom0 daemon will have no entries |
212 | | * currently present in VM instance; perhaps we should |
213 | | * clear VM instance? */ |
214 | | if (!init_vchan(d)) { |
215 | | fprintf(stderr, "vchan reconnection failed\n"); |
216 | | break; |
217 | | } |
218 | | /* request database sync from dom0 */ |
219 | | if (!request_full_db_sync(d)) { |
220 | | fprintf(stderr, "FATAL: failed to request DB sync\n"); |
221 | | return 0; |
222 | | } |
223 | | d->multiread_requested = 1; |
224 | | } else { |
225 | | /* do not send further updates, until VM's daemon restart |
226 | | * and re-sync */ |
227 | | d->remote_connected = 0; |
228 | | break; |
229 | | } |
230 | | break; |
231 | | } |
232 | | |
233 | | if (d->remote_connected || libvchan_is_open(d->vchan)) { |
234 | | while (libvchan_data_ready(d->vchan)) { |
235 | | AcquireSRWLockExclusive(&d->lock); |
236 | | if (!handle_vchan_data(d)) { |
237 | | fprintf(stderr, "FATAL: vchan data processing failed\n"); |
238 | | ReleaseSRWLockExclusive(&d->lock); |
239 | | return 0; |
240 | | } |
241 | | ReleaseSRWLockExclusive(&d->lock); |
242 | | } |
243 | | } |
244 | | break; |
245 | | } |
246 | | |
247 | | default: { |
248 | | // wait failed |
249 | | win_perror("WaitForMultipleObjects"); |
250 | | return 0; |
251 | | } |
252 | | } |
253 | | } |
254 | | |
255 | | cleanup: |
256 | | if (WaitForSingleObject(pipe_thread, 1000) != WAIT_OBJECT_0) |
257 | | { |
258 | | TerminateThread(pipe_thread, 0); |
259 | | CloseHandle(pipe_thread); |
260 | | } |
261 | | QpsDestroy(d->pipe_server); |
262 | | d->pipe_server = NULL; |
263 | | if (d->vchan) |
264 | | { |
265 | | libvchan_close(d->vchan); |
266 | | d->vchan = NULL; |
267 | | } |
268 | | |
269 | | return 1; |
270 | | } |
271 | | |
272 | | DWORD WINAPI pipe_thread_client(PVOID param) { |
273 | | struct thread_param *p = param; |
274 | | struct client c; |
275 | | struct qdb_hdr hdr; |
276 | | DWORD status; |
277 | | |
278 | | c.id = p->id; |
279 | | |
280 | | while (1) { |
281 | | // blocking read |
282 | | status = QpsRead(p->daemon->pipe_server, p->id, &hdr, sizeof(hdr)); |
283 | | if (ERROR_SUCCESS != status) { |
284 | | LogWarning("QpsRead from client %lu failed: %d", p->id, (int)status); |
285 | | AcquireSRWLockExclusive(&p->daemon->lock); |
286 | | handle_client_disconnect(p->daemon, &c); |
287 | | QpsDisconnectClient(p->daemon->pipe_server, p->id); |
288 | | ReleaseSRWLockExclusive(&p->daemon->lock); |
289 | | free(param); |
290 | | return status; |
291 | | } |
292 | | |
293 | | AcquireSRWLockExclusive(&p->daemon->lock); |
294 | | if (!handle_client_data(p->daemon, &c, (char*)&hdr, sizeof(hdr))) { |
295 | | LogWarning("handle_client_data failed, disconnecting client %lu", p->id); |
296 | | handle_client_disconnect(p->daemon, &c); |
297 | | QpsDisconnectClient(p->daemon->pipe_server, p->id); |
298 | | ReleaseSRWLockExclusive(&p->daemon->lock); |
299 | | free(param); |
300 | | return 1; |
301 | | } |
302 | | ReleaseSRWLockExclusive(&p->daemon->lock); |
303 | | } |
304 | | return 0; |
305 | | } |
306 | | |
307 | | void client_connected_callback(PIPE_SERVER server, LONGLONG id, PVOID context) { |
308 | | HANDLE client_thread; |
309 | | struct thread_param *param; |
310 | | |
311 | | param = malloc(sizeof(struct thread_param)); |
312 | | if (!param) { |
313 | | LogError("no memory"); |
314 | | QpsDisconnectClient(server, id); |
315 | | return; |
316 | | } |
317 | | |
318 | | param->id = id; |
319 | | param->daemon = context; |
320 | | client_thread = CreateThread(NULL, 0, pipe_thread_client, param, 0, NULL); |
321 | | if (!client_thread) { |
322 | | win_perror("CreateThread"); |
323 | | free(param); |
324 | | return; |
325 | | } |
326 | | CloseHandle(client_thread); |
327 | | // the client thread will take care of processing client's data |
328 | | } |
329 | | |
330 | | int init_server_socket(struct db_daemon_data *d) { |
331 | | WCHAR pipe_name[MAX_FILE_PATH]; |
332 | | PSECURITY_DESCRIPTOR sd = NULL; |
333 | | DWORD status; |
334 | | |
335 | | /* In dom0 listen only on "local" socket */ |
336 | | if (d->remote_name && d->remote_domid != 0) { |
337 | | StringCbPrintfW(pipe_name, sizeof(pipe_name), QDB_DAEMON_PATH_PATTERN, d->remote_name); |
338 | | } else { |
339 | | StringCbPrintfW(pipe_name, sizeof(pipe_name), QDB_DAEMON_LOCAL_PATH); |
340 | | } |
341 | | /* |
342 | | if (!ConvertStringSecurityDescriptorToSecurityDescriptorW( |
343 | | //TEXT("S:(ML;;NW;;;LW)D:(A;;FA;;;SY)(A;;FA;;;CO)"), |
344 | | L"D:(A;;FA;;;SY)(A;;FA;;;CO)", |
345 | | SDDL_REVISION_1, |
346 | | &sd, |
347 | | NULL)) { |
348 | | win_perror("ConvertStringSecurityDescriptorToSecurityDescriptor"); |
349 | | return 0; |
350 | | } |
351 | | |
352 | | d->sa.lpSecurityDescriptor = sd; |
353 | | d->sa.bInheritHandle = FALSE; |
354 | | d->sa.nLength = sizeof(d->sa); |
355 | | */ |
356 | | LogDebug("pipe: %s", pipe_name); |
357 | | status = QpsCreate(pipe_name, |
358 | | 4096, // pipe buffers |
359 | | 1024 * 1024, // read buffer |
360 | | 1000, // write timeout |
361 | | client_connected_callback, |
362 | | NULL, |
363 | | NULL, |
364 | | d, // context |
365 | | NULL,//&d->sa, |
366 | | &d->pipe_server); |
367 | | |
368 | | return status == ERROR_SUCCESS; |
369 | | } |
370 | | |
371 | | void close_server_socket(struct db_daemon_data *d) { |
372 | | if (d->pipe_server) |
373 | | QpsDestroy(d->pipe_server); |
374 | | d->pipe_server = NULL; |
375 | | } |
376 | | #endif // WIN32 |
377 | | |
378 | | #ifndef WIN32 |
379 | | |
380 | | static size_t fill_fdsets_for_select(struct db_daemon_data *d, |
381 | 0 | struct pollfd fds[static MAX_CLIENTS + 2]) { |
382 | 0 | struct client *client; |
383 | 0 | size_t total_fds = 2; |
384 | |
|
385 | 0 | fds[0] = (struct pollfd) { |
386 | 0 | .fd = d->socket_fd, |
387 | 0 | .events = POLLIN | POLLHUP, |
388 | 0 | .revents = 0, |
389 | 0 | }; |
390 | 0 | fds[1] = (struct pollfd) { |
391 | 0 | .fd = d->vchan ? libvchan_fd_for_select(d->vchan) : -1, |
392 | 0 | .events = POLLIN | POLLHUP, |
393 | 0 | .revents = 0, |
394 | 0 | }; |
395 | |
|
396 | 0 | client = d->client_list; |
397 | 0 | while (client) { |
398 | 0 | assert(total_fds < MAX_CLIENTS + 2); |
399 | | /* Do not read commands from client, which have some buffered data, |
400 | | * first try to send them all. If client do not handle write buffering |
401 | | * properly, it can cause a deadlock there, but at least qubesdb-daemon |
402 | | * will still handle other requests */ |
403 | 0 | fds[total_fds++] = (struct pollfd) { |
404 | 0 | .fd = client->fd, |
405 | 0 | .events = buffer_datacount(client->write_queue) ? POLLOUT : POLLIN | POLLHUP, |
406 | 0 | .revents = 0, |
407 | 0 | }; |
408 | 0 | client = client->next; |
409 | 0 | } |
410 | 0 | return total_fds; |
411 | 0 | } |
412 | | |
413 | 0 | static int mainloop(struct db_daemon_data *d) { |
414 | 0 | struct client *client; |
415 | 0 | int ret; |
416 | 0 | static struct pollfd fds[MAX_CLIENTS + 2]; |
417 | 0 | sigset_t sigterm_mask; |
418 | 0 | sigset_t oldmask; |
419 | 0 | struct timespec ts = { 10, 0 }; |
420 | |
|
421 | 0 | sigemptyset(&sigterm_mask); |
422 | 0 | sigaddset(&sigterm_mask, SIGTERM); |
423 | |
|
424 | 0 | while (1) { |
425 | 0 | size_t current_fd = 2; |
426 | 0 | size_t const nfds = fill_fdsets_for_select(d, fds); |
427 | 0 | assert(nfds >= 2); |
428 | 0 | assert(nfds <= MAX_CLIENTS + 2); |
429 | | |
430 | 0 | if (sigprocmask(SIG_BLOCK, &sigterm_mask, &oldmask) < 0) { |
431 | 0 | perror("sigprocmask"); |
432 | 0 | break; |
433 | 0 | } |
434 | 0 | if (sigterm_received) { |
435 | 0 | fprintf(stderr, "terminating\n"); |
436 | 0 | break; |
437 | 0 | } |
438 | 0 | ret = ppoll(fds, nfds, &ts, &oldmask); |
439 | 0 | if (ret < 0) { |
440 | 0 | if (errno == EINTR) |
441 | 0 | continue; |
442 | 0 | perror("ppoll"); |
443 | 0 | break; |
444 | 0 | } |
445 | | /* restore signal mask */ |
446 | 0 | sigprocmask(SIG_SETMASK, &oldmask, NULL); |
447 | |
|
448 | 0 | if (d->vchan) { |
449 | 0 | if (fds[1].revents) |
450 | 0 | libvchan_wait(d->vchan); |
451 | 0 | if (!libvchan_is_open(d->vchan)) { |
452 | 0 | fprintf(stderr, "vchan closed\n"); |
453 | 0 | if (d->remote_connected) { |
454 | 0 | d->remote_connected = 0; |
455 | | /* it was connected before, try to reconnect */ |
456 | 0 | fprintf(stderr, "reconnecting\n"); |
457 | 0 | if (!init_vchan(d)) { |
458 | 0 | fprintf(stderr, "vchan reconnection failed\n"); |
459 | 0 | break; |
460 | 0 | } |
461 | 0 | if (!d->remote_name) { |
462 | | /* FIXME: consider clearing the database, but needs to |
463 | | * handle watches (DispVM case) */ |
464 | | /* request database sync from dom0 */ |
465 | 0 | if (!request_full_db_sync(d)) { |
466 | 0 | fprintf(stderr, "FATAL: failed to request DB sync\n"); |
467 | 0 | exit(1); |
468 | 0 | } |
469 | 0 | d->multiread_requested = 1; |
470 | 0 | } |
471 | 0 | } else { |
472 | | /* it wasn't connected, domain is probably dead */ |
473 | 0 | break; |
474 | 0 | } |
475 | 0 | } |
476 | | /* trigger pending data write */ |
477 | 0 | if (libvchan_buffer_space(d->vchan)) |
478 | 0 | write_vchan_or_client(d, NULL, NULL, 0); |
479 | 0 | while (libvchan_data_ready(d->vchan)) { |
480 | 0 | if (!handle_vchan_data(d)) { |
481 | 0 | fprintf(stderr, "FATAL: vchan data processing failed\n"); |
482 | 0 | exit(1); |
483 | 0 | } |
484 | 0 | } |
485 | 0 | } |
486 | | |
487 | 0 | client = d->client_list; |
488 | 0 | while (client) { |
489 | 0 | assert(current_fd < MAX_CLIENTS + 2); |
490 | 0 | short revents = fds[current_fd++].revents; |
491 | 0 | if (revents & POLLOUT) { |
492 | | /* just send bufferred data, possibly not all of them */ |
493 | 0 | write_client_buffered(client, NULL, 0); |
494 | 0 | } |
495 | 0 | if (revents & (POLLIN | POLLHUP)) { |
496 | 0 | if (!handle_client_data(d, client, NULL, 0)) { |
497 | 0 | struct client *client_to_remove = client; |
498 | 0 | client = client->next; |
499 | 0 | disconnect_client(d, client_to_remove); |
500 | 0 | continue; |
501 | 0 | } |
502 | 0 | } |
503 | 0 | client = client->next; |
504 | 0 | } |
505 | 0 | assert(current_fd == nfds); |
506 | | |
507 | 0 | if (fds[0].revents) { |
508 | 0 | accept_new_client(d); |
509 | 0 | } |
510 | 0 | } |
511 | 0 | return 1; |
512 | 0 | } |
513 | | |
514 | 0 | static int init_server_socket(struct db_daemon_data *d) { |
515 | 0 | struct sockaddr_un sockname; |
516 | 0 | int s; |
517 | 0 | struct stat stat_buf; |
518 | 0 | mode_t old_umask; |
519 | |
|
520 | 0 | memset(&sockname, 0, sizeof(sockname)); |
521 | 0 | sockname.sun_family = AF_UNIX; |
522 | 0 | if (mkdir("/var/run/qubes", 0775) && errno != EEXIST) { |
523 | 0 | perror("mkdir /var/run/qubes"); |
524 | 0 | return 0; |
525 | 0 | } |
526 | 0 | if (d->remote_name) { |
527 | 0 | if ((unsigned)snprintf(sockname.sun_path, sizeof sockname.sun_path, |
528 | 0 | QDB_DAEMON_PATH_PATTERN, d->remote_name) >= |
529 | 0 | sizeof sockname.sun_path) { |
530 | 0 | perror("snprintf()"); |
531 | 0 | return 0; |
532 | 0 | } |
533 | 0 | if (d->remote_domid == 0) { |
534 | | /* the same daemon as both VM and Admin parts */ |
535 | 0 | unlink(QDB_DAEMON_LOCAL_PATH); |
536 | 0 | if (symlink(sockname.sun_path, QDB_DAEMON_LOCAL_PATH) < 0) { |
537 | 0 | perror("symlink " QDB_DAEMON_LOCAL_PATH); |
538 | 0 | return 0; |
539 | 0 | } |
540 | 0 | } |
541 | 0 | } else { |
542 | 0 | _Static_assert(sizeof QDB_DAEMON_LOCAL_PATH <= sizeof sockname.sun_path, |
543 | 0 | QDB_DAEMON_LOCAL_PATH "too long"); |
544 | 0 | strcpy(sockname.sun_path, QDB_DAEMON_LOCAL_PATH); |
545 | 0 | } |
546 | | |
547 | 0 | if (unlink(sockname.sun_path) && errno != ENOENT) { |
548 | 0 | perror("unlink() failed"); |
549 | 0 | return 0; |
550 | 0 | } |
551 | | |
552 | | /* make socket available for anyone */ |
553 | 0 | old_umask = umask(0); |
554 | |
|
555 | 0 | s = socket(AF_UNIX, SOCK_STREAM, 0); |
556 | 0 | if (bind(s, (struct sockaddr *) &sockname, sizeof(sockname)) == -1) { |
557 | 0 | perror("bind() failed"); |
558 | 0 | close(s); |
559 | 0 | return 0; |
560 | 0 | } |
561 | | // chmod(sockname.sun_path, 0666); |
562 | 0 | if (listen(s, SERVER_SOCKET_BACKLOG) == -1) { |
563 | 0 | perror("listen() failed"); |
564 | 0 | close(s); |
565 | 0 | return 0; |
566 | 0 | } |
567 | 0 | d->socket_fd = s; |
568 | 0 | umask(old_umask); |
569 | 0 | if (stat(sockname.sun_path, &stat_buf) == 0) |
570 | 0 | d->socket_ino = stat_buf.st_ino; |
571 | 0 | return 1; |
572 | 0 | } |
573 | | |
574 | | #endif /* !WIN32 */ |
575 | | |
576 | 0 | int init_vchan(struct db_daemon_data *d) { |
577 | 0 | if (d->vchan) { |
578 | 0 | buffer_free(d->vchan_buffer); |
579 | 0 | libvchan_close(d->vchan); |
580 | 0 | d->vchan = NULL; |
581 | 0 | } |
582 | 0 | d->vchan_buffer = buffer_create(); |
583 | 0 | if (!d->vchan_buffer) { |
584 | 0 | fprintf(stderr, "vchan buffer allocation failed\n"); |
585 | 0 | return 0; |
586 | 0 | } |
587 | 0 | d->vchan_pending_hdr.type = QDB_INVALID_CMD; |
588 | |
|
589 | 0 | if (d->remote_name) { |
590 | | /* dom0 part: listen for connection */ |
591 | 0 | if (d->remote_domid == 0) { |
592 | | /* do not connect from dom0 to dom0 */ |
593 | 0 | d->vchan = NULL; |
594 | 0 | return 1; |
595 | 0 | } |
596 | 0 | #ifndef WIN32 |
597 | 0 | d->vchan = libvchan_server_init(d->remote_domid, QUBESDB_VCHAN_PORT, 4096, 4096); |
598 | | #else |
599 | | // We give a 5 minute timeout here because xeniface can take some time |
600 | | // to load the first time after reboot after pvdrivers installation. |
601 | | d->vchan = VchanInitServer(d->remote_domid, QUBESDB_VCHAN_PORT, 4096, 5 * 60 * 1000); |
602 | | #endif |
603 | 0 | if (!d->vchan) |
604 | 0 | return 0; |
605 | 0 | d->remote_connected = 0; |
606 | 0 | } else { |
607 | | /* VM part: connect to admin domain */ |
608 | 0 | #ifndef WIN32 |
609 | 0 | d->vchan = libvchan_client_init(d->remote_domid, QUBESDB_VCHAN_PORT); |
610 | | #else |
611 | | // We give a 5 minute timeout here because xeniface can take some time |
612 | | // to load the first time after reboot after pvdrivers installation. |
613 | | d->vchan = VchanInitClient(d->remote_domid, QUBESDB_VCHAN_PORT, 5 * 60 * 1000); |
614 | | #endif |
615 | 0 | if (!d->vchan) |
616 | 0 | return 0; |
617 | 0 | d->remote_connected = 1; |
618 | 0 | } |
619 | 0 | return 1; |
620 | 0 | } |
621 | | |
622 | | #ifndef WIN32 |
623 | 0 | static int create_pidfile(struct db_daemon_data *d) { |
624 | 0 | char pidfile_name[256]; |
625 | 0 | FILE *pidfile; |
626 | 0 | mode_t old_umask; |
627 | 0 | struct stat stat_buf; |
628 | | |
629 | | /* do not create pidfile for VM daemon - service is managed by systemd */ |
630 | 0 | if (!d->remote_name) |
631 | 0 | return 1; |
632 | 0 | snprintf(pidfile_name, sizeof(pidfile_name), |
633 | 0 | "/var/run/qubes/qubesdb.%s.pid", d->remote_name); |
634 | |
|
635 | 0 | old_umask = umask(0002); |
636 | 0 | pidfile = fopen(pidfile_name, "w"); |
637 | 0 | umask(old_umask); |
638 | 0 | if (!pidfile) { |
639 | 0 | perror("pidfile create"); |
640 | 0 | return 0; |
641 | 0 | } |
642 | 0 | fprintf(pidfile, "%d\n", getpid()); |
643 | 0 | if (fstat(fileno(pidfile), &stat_buf) == 0) |
644 | 0 | d->pidfile_ino = stat_buf.st_ino; |
645 | 0 | fclose(pidfile); |
646 | 0 | return 1; |
647 | 0 | } |
648 | | |
649 | 0 | static void remove_pidfile(struct db_daemon_data *d) { |
650 | 0 | char pidfile_name[256]; |
651 | 0 | struct stat stat_buf; |
652 | | |
653 | | /* no pidfile for VM daemon - service is managed by systemd */ |
654 | 0 | if (!d->remote_name) |
655 | 0 | return; |
656 | 0 | snprintf(pidfile_name, sizeof(pidfile_name), |
657 | 0 | "/var/run/qubes/qubesdb.%s.pid", d->remote_name); |
658 | |
|
659 | 0 | if (stat(pidfile_name, &stat_buf) == 0) { |
660 | | /* remove pidfile only if it's the one created this process */ |
661 | 0 | if (d->pidfile_ino == stat_buf.st_ino) |
662 | 0 | unlink(pidfile_name); |
663 | 0 | } |
664 | 0 | } |
665 | | |
666 | 0 | static void close_server_socket(struct db_daemon_data *d) { |
667 | 0 | struct sockaddr_un sockname; |
668 | 0 | socklen_t addrlen; |
669 | 0 | struct stat stat_buf; |
670 | |
|
671 | 0 | if (d->socket_fd < 0) |
672 | | /* already closed */ |
673 | 0 | return ; |
674 | 0 | addrlen = sizeof(sockname); |
675 | 0 | if (getsockname(d->socket_fd, (struct sockaddr *)&sockname, &addrlen) < 0) |
676 | | /* just do not remove socket when cannot get its path */ |
677 | 0 | return; |
678 | | |
679 | 0 | close(d->socket_fd); |
680 | 0 | if (stat(sockname.sun_path, &stat_buf) == 0) { |
681 | | /* remove the socket only if it's the one created this process */ |
682 | 0 | if (d->socket_ino == stat_buf.st_ino) |
683 | 0 | unlink(sockname.sun_path); |
684 | 0 | } |
685 | 0 | } |
686 | | #endif // !WIN32 |
687 | | |
688 | 0 | static void usage(char *argv0) { |
689 | 0 | fprintf(stderr, "Usage: %s <remote-domid> [<remote-name>]\n", argv0); |
690 | 0 | fprintf(stderr, " Give <remote-name> only in dom0\n"); |
691 | 0 | } |
692 | | |
693 | | #ifdef WIN32 |
694 | | DWORD WINAPI service_thread(PVOID param) { |
695 | | PSERVICE_WORKER_CONTEXT ctx = param; |
696 | | struct db_daemon_data *d = ctx->UserContext; |
697 | | |
698 | | d->service_stop_event = ctx->StopEvent; |
699 | | |
700 | | return mainloop(d) ? NO_ERROR : ERROR_UNIDENTIFIED_ERROR; |
701 | | } |
702 | | |
703 | | static void vchan_logger(IN int logLevel, IN const CHAR *function, IN const WCHAR *format, IN va_list args) |
704 | | { |
705 | | WCHAR buf[1024]; |
706 | | |
707 | | StringCbVPrintfW(buf, sizeof(buf), format, args); |
708 | | _LogFormat(logLevel, FALSE, function, buf); |
709 | | } |
710 | | |
711 | | #endif |
712 | | |
713 | | #ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION |
714 | | int main(int argc, char **argv) { |
715 | | #else |
716 | 0 | int fuzz_main(int argc, char **argv) { |
717 | 0 | #endif |
718 | 0 | struct db_daemon_data d; |
719 | 0 | #ifndef WIN32 |
720 | 0 | int ready_pipe[2] = {0, 0}; |
721 | 0 | #endif |
722 | 0 | int ret; |
723 | |
|
724 | 0 | if (argc != 2 && argc != 3 && argc != 4) { |
725 | 0 | usage(argv[0]); |
726 | 0 | exit(1); |
727 | 0 | } |
728 | | |
729 | 0 | #ifndef WIN32 |
730 | 0 | memset(&d, 0, sizeof(d)); |
731 | | #else |
732 | | RtlSecureZeroMemory(&d, sizeof(d)); |
733 | | #endif |
734 | |
|
735 | 0 | d.remote_domid = atoi(argv[1]); |
736 | 0 | if (argc >= 3 && strlen(argv[2]) > 0) |
737 | 0 | d.remote_name = argv[2]; |
738 | 0 | else |
739 | 0 | d.remote_name = NULL; |
740 | | |
741 | | /* if not running under SystemD, fork and use pipe() to notify parent about |
742 | | * sucessful start */ |
743 | | /* FIXME: OS dependent code */ |
744 | 0 | #ifndef WIN32 |
745 | | #ifdef HAVE_SYSTEMD |
746 | | if (!getenv("NOTIFY_SOCKET")) { |
747 | | #else |
748 | 0 | if (1) { |
749 | 0 | #endif |
750 | 0 | char buf[6]; |
751 | 0 | char log_path[MAX_FILE_PATH]; |
752 | 0 | int log_fd; |
753 | 0 | mode_t old_umask; |
754 | |
|
755 | 0 | if (pipe(ready_pipe) < 0) { |
756 | 0 | perror("pipe"); |
757 | 0 | exit(1); |
758 | 0 | } |
759 | 0 | switch (fork()) { |
760 | 0 | case -1: |
761 | 0 | perror("fork"); |
762 | 0 | exit(1); |
763 | 0 | case 0: |
764 | 0 | close(ready_pipe[0]); |
765 | 0 | snprintf(log_path, sizeof(log_path), "/var/log/qubes/qubesdb.%s.log", d.remote_name ? d.remote_name : "dom0"); |
766 | |
|
767 | 0 | close(0); |
768 | 0 | old_umask = umask(0); |
769 | 0 | log_fd = open(log_path, O_WRONLY | O_CREAT | O_APPEND, 0664); |
770 | 0 | umask(old_umask); |
771 | 0 | if (log_fd < 0) { |
772 | 0 | perror("open logfile"); |
773 | 0 | exit(1); |
774 | 0 | } |
775 | 0 | dup2(log_fd, 1); |
776 | 0 | dup2(log_fd, 2); |
777 | 0 | close(log_fd); |
778 | |
|
779 | 0 | setsid(); |
780 | |
|
781 | 0 | break; |
782 | 0 | default: |
783 | 0 | close(ready_pipe[1]); |
784 | 0 | if (read(ready_pipe[0], buf, sizeof(buf)) < strlen("ready")) { |
785 | 0 | fprintf(stderr, "startup failed\n"); |
786 | 0 | exit(1); |
787 | 0 | } |
788 | 0 | exit(0); |
789 | 0 | } |
790 | 0 | } |
791 | | |
792 | | /* setup graceful shutdown handling */ |
793 | 0 | signal(SIGTERM, sigterm_handler); |
794 | 0 | #endif |
795 | |
|
796 | 0 | #ifndef WIN32 |
797 | 0 | d.db = qubesdb_init(write_client_buffered); |
798 | | #else |
799 | | libvchan_register_logger(vchan_logger); |
800 | | d.db = qubesdb_init(send_watch_notify); |
801 | | InitializeSRWLock(&d.lock); |
802 | | #endif |
803 | 0 | if (!d.db) { |
804 | 0 | fprintf(stderr, "FATAL: database initialization failed\n"); |
805 | 0 | exit(1); |
806 | 0 | } |
807 | | |
808 | 0 | if (!init_server_socket(&d)) { |
809 | 0 | fprintf(stderr, "FATAL: server socket initialization failed\n"); |
810 | 0 | exit(1); |
811 | 0 | } |
812 | | |
813 | | #ifdef WIN32 |
814 | | d.db->pipe_server = d.pipe_server; |
815 | | /* For Windows, vchan is initialized later, after the service starts |
816 | | and reports to the OS. Otherwise it can time-out after the first |
817 | | reboot after installation and OS will kill the service. |
818 | | |
819 | | start the service loop, service_thread runs mainloop() |
820 | | */ |
821 | | ret = SvcMainLoop(QDB_DAEMON_SERVICE_NAME, |
822 | | 0, // not interested in any control codes |
823 | | service_thread, // worker thread |
824 | | &d, // worker thread context |
825 | | NULL, // notification handler |
826 | | NULL // notification context |
827 | | ); |
828 | | #else /* WIN32 */ |
829 | 0 | if (!init_vchan(&d)) { |
830 | 0 | fprintf(stderr, "FATAL: vchan initialization failed\n"); |
831 | 0 | exit(1); |
832 | 0 | } |
833 | | |
834 | 0 | if (!d.remote_name) { |
835 | | /* request database sync from dom0 */ |
836 | 0 | if (!request_full_db_sync(&d)) { |
837 | 0 | fprintf(stderr, "FATAL: failed to request DB sync\n"); |
838 | 0 | exit(1); |
839 | 0 | } |
840 | 0 | d.multiread_requested = 1; |
841 | | /* wait for complete response */ |
842 | 0 | while (d.multiread_requested) { |
843 | 0 | if (!handle_vchan_data(&d)) { |
844 | 0 | fprintf(stderr, "FATAL: vchan error\n"); |
845 | 0 | exit(1); |
846 | 0 | } |
847 | 0 | } |
848 | 0 | } |
849 | | |
850 | | /* now ready for serving requests, notify parent */ |
851 | | /* FIXME: OS dependent code */ |
852 | | #ifdef HAVE_SYSTEMD |
853 | | if (getenv("NOTIFY_SOCKET")) { |
854 | | sd_notify(1, "READY=1"); |
855 | | } else |
856 | | #endif /* HAVE_SYSTEMD */ |
857 | 0 | { |
858 | 0 | if (write(ready_pipe[1], "ready", strlen("ready")) != strlen("ready")) |
859 | 0 | perror("failed to notify parent"); |
860 | 0 | close(ready_pipe[1]); |
861 | 0 | } |
862 | |
|
863 | 0 | create_pidfile(&d); |
864 | |
|
865 | 0 | ret = !mainloop(&d); |
866 | 0 | #endif /* !WIN32 */ |
867 | |
|
868 | 0 | close_server_socket(&d); |
869 | |
|
870 | 0 | #ifndef WIN32 |
871 | 0 | remove_pidfile(&d); |
872 | 0 | #endif |
873 | |
|
874 | 0 | return ret; |
875 | 0 | } |