/src/samba/source3/rpc_client/local_np.c
Line | Count | Source (jump to first uncovered line) |
1 | | /* |
2 | | * Unix SMB/CIFS implementation. |
3 | | * |
4 | | * This program is free software; you can redistribute it and/or modify |
5 | | * it under the terms of the GNU General Public License as published by |
6 | | * the Free Software Foundation; either version 3 of the License, or |
7 | | * (at your option) any later version. |
8 | | * |
9 | | * This program is distributed in the hope that it will be useful, |
10 | | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | | * GNU General Public License for more details. |
13 | | * |
14 | | * You should have received a copy of the GNU General Public License |
15 | | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
16 | | */ |
17 | | |
18 | | #include "source3/include/includes.h" |
19 | | #include <spawn.h> |
20 | | #include "local_np.h" |
21 | | #include "lib/async_req/async_sock.h" |
22 | | #include "librpc/gen_ndr/ndr_named_pipe_auth.h" |
23 | | #include "libcli/named_pipe_auth/npa_tstream.h" |
24 | | #include "libcli/named_pipe_auth/tstream_u32_read.h" |
25 | | #include "lib/util/tevent_unix.h" |
26 | | #include "auth/auth_util.h" |
27 | | #include "libcli/security/dom_sid.h" |
28 | | #include "libcli/security/security_token.h" |
29 | | #include "nsswitch/winbind_client.h" |
30 | | |
31 | | /** |
32 | | * @file local_np.c |
33 | | * |
34 | | * Connect to a local named pipe by connecting to |
35 | | * samba-dcerpcd. Start samba-dcerpcd if it isn't |
36 | | * already running. |
37 | | */ |
38 | | |
39 | | extern bool override_logfile; |
40 | | |
41 | | struct np_sock_connect_state { |
42 | | struct tevent_context *ev; |
43 | | struct samba_sockaddr addr; |
44 | | const struct named_pipe_auth_req *npa_req; |
45 | | struct named_pipe_auth_rep *npa_rep; |
46 | | |
47 | | DATA_BLOB npa_blob; |
48 | | struct iovec iov; |
49 | | |
50 | | int sock; |
51 | | struct tevent_req *subreq; |
52 | | struct tstream_context *transport; |
53 | | struct tstream_context *npa_stream; |
54 | | }; |
55 | | |
56 | | static void np_sock_connect_cleanup( |
57 | | struct tevent_req *req, enum tevent_req_state req_state); |
58 | | static void np_sock_connect_before(void *private_data); |
59 | | static void np_sock_connect_after(void *private_data); |
60 | | static void np_sock_connect_connected(struct tevent_req *subreq); |
61 | | static void np_sock_connect_written(struct tevent_req *subreq); |
62 | | static void np_sock_connect_read_done(struct tevent_req *subreq); |
63 | | |
64 | | static struct tevent_req *np_sock_connect_send( |
65 | | TALLOC_CTX *mem_ctx, |
66 | | struct tevent_context *ev, |
67 | | const char *sockpath, |
68 | | const struct named_pipe_auth_req *npa_req) |
69 | 0 | { |
70 | 0 | struct tevent_req *req = NULL; |
71 | 0 | struct np_sock_connect_state *state = NULL; |
72 | 0 | size_t len; |
73 | 0 | int ret; |
74 | 0 | bool ok; |
75 | |
|
76 | 0 | req = tevent_req_create(mem_ctx, &state, struct np_sock_connect_state); |
77 | 0 | if (req == NULL) { |
78 | 0 | return NULL; |
79 | 0 | } |
80 | 0 | state->ev = ev; |
81 | 0 | state->npa_req = npa_req; |
82 | 0 | state->sock = -1; |
83 | 0 | state->addr.u.un.sun_family = AF_UNIX; |
84 | |
|
85 | 0 | state->npa_rep = talloc_zero(state, struct named_pipe_auth_rep); |
86 | 0 | if (tevent_req_nomem(state->npa_rep, req)) { |
87 | 0 | return tevent_req_post(req, ev); |
88 | 0 | } |
89 | | |
90 | 0 | tevent_req_set_cleanup_fn(req, np_sock_connect_cleanup); |
91 | |
|
92 | 0 | state->addr.sa_socklen = sizeof(struct sockaddr_un); |
93 | 0 | len = strlcpy(state->addr.u.un.sun_path, |
94 | 0 | sockpath, |
95 | 0 | sizeof(state->addr.u.un.sun_path)); |
96 | 0 | if (len >= sizeof(state->addr.u.un.sun_path)) { |
97 | 0 | tevent_req_error(req, ENAMETOOLONG); |
98 | 0 | return tevent_req_post(req, ev); |
99 | 0 | } |
100 | | |
101 | 0 | state->sock = socket(AF_UNIX, SOCK_STREAM, 0); |
102 | 0 | if (state->sock == -1) { |
103 | 0 | tevent_req_error(req, errno); |
104 | 0 | return tevent_req_post(req, ev); |
105 | 0 | } |
106 | | |
107 | 0 | ret = set_blocking(state->sock, true); |
108 | 0 | if (ret == -1) { |
109 | 0 | tevent_req_error(req, errno); |
110 | 0 | return tevent_req_post(req, ev); |
111 | 0 | } |
112 | 0 | ok = set_close_on_exec(state->sock); |
113 | 0 | if (!ok) { |
114 | 0 | tevent_req_error(req, errno); |
115 | 0 | return tevent_req_post(req, ev); |
116 | 0 | } |
117 | | |
118 | 0 | state->subreq = async_connect_send( |
119 | 0 | state, |
120 | 0 | ev, |
121 | 0 | state->sock, |
122 | 0 | &state->addr.u.sa, |
123 | 0 | state->addr.sa_socklen, |
124 | 0 | np_sock_connect_before, |
125 | 0 | np_sock_connect_after, |
126 | 0 | NULL); |
127 | 0 | if (tevent_req_nomem(state->subreq, req)) { |
128 | 0 | return tevent_req_post(req, ev); |
129 | 0 | } |
130 | 0 | tevent_req_set_callback(state->subreq, np_sock_connect_connected, req); |
131 | |
|
132 | 0 | return req; |
133 | 0 | } |
134 | | |
135 | | static void np_sock_connect_cleanup( |
136 | | struct tevent_req *req, enum tevent_req_state req_state) |
137 | 0 | { |
138 | 0 | struct np_sock_connect_state *state = tevent_req_data( |
139 | 0 | req, struct np_sock_connect_state); |
140 | |
|
141 | 0 | TALLOC_FREE(state->subreq); |
142 | 0 | TALLOC_FREE(state->transport); |
143 | |
|
144 | 0 | if (state->sock != -1) { |
145 | 0 | close(state->sock); |
146 | 0 | state->sock = -1; |
147 | 0 | } |
148 | 0 | } |
149 | | |
150 | | static void np_sock_connect_before(void *private_data) |
151 | 0 | { |
152 | 0 | become_root(); |
153 | 0 | } |
154 | | |
155 | | static void np_sock_connect_after(void *private_data) |
156 | 0 | { |
157 | 0 | unbecome_root(); |
158 | 0 | } |
159 | | |
160 | | static void np_sock_connect_connected(struct tevent_req *subreq) |
161 | 0 | { |
162 | 0 | struct tevent_req *req = tevent_req_callback_data( |
163 | 0 | subreq, struct tevent_req); |
164 | 0 | struct np_sock_connect_state *state = tevent_req_data( |
165 | 0 | req, struct np_sock_connect_state); |
166 | 0 | enum ndr_err_code ndr_err; |
167 | 0 | int ret, err; |
168 | |
|
169 | 0 | SMB_ASSERT(subreq == state->subreq); |
170 | | |
171 | 0 | ret = async_connect_recv(subreq, &err); |
172 | 0 | TALLOC_FREE(subreq); |
173 | 0 | state->subreq = NULL; |
174 | 0 | if (ret == -1) { |
175 | 0 | DBG_DEBUG("async_connect_recv returned %s\n", strerror(err)); |
176 | 0 | tevent_req_error(req, err); |
177 | 0 | return; |
178 | 0 | } |
179 | | |
180 | | /* |
181 | | * As a quick workaround for bug 15310 we have done the |
182 | | * connect in blocking mode (see np_sock_connect_send()). The |
183 | | * rest of our code expects a nonblocking socket, activate |
184 | | * this after the connect succeeded. |
185 | | */ |
186 | 0 | ret = set_blocking(state->sock, false); |
187 | 0 | if (ret == -1) { |
188 | 0 | tevent_req_error(req, errno); |
189 | 0 | return; |
190 | 0 | } |
191 | | |
192 | 0 | ret = tstream_bsd_existing_socket( |
193 | 0 | state, state->sock, &state->transport); |
194 | 0 | if (ret == -1) { |
195 | 0 | err = errno; |
196 | 0 | DBG_DEBUG("tstream_bsd_existing_socket failed: %s\n", |
197 | 0 | strerror(err)); |
198 | 0 | tevent_req_error(req, err); |
199 | 0 | return; |
200 | 0 | } |
201 | 0 | state->sock = -1; |
202 | |
|
203 | 0 | ndr_err = ndr_push_struct_blob( |
204 | 0 | &state->npa_blob, |
205 | 0 | state, |
206 | 0 | state->npa_req, |
207 | 0 | (ndr_push_flags_fn_t)ndr_push_named_pipe_auth_req); |
208 | 0 | if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { |
209 | 0 | DBG_DEBUG("ndr_push_struct_blob failed: %s\n", |
210 | 0 | ndr_errstr(ndr_err)); |
211 | 0 | tevent_req_error(req, ndr_map_error2errno(ndr_err)); |
212 | 0 | return; |
213 | 0 | } |
214 | 0 | state->iov = (struct iovec) { |
215 | 0 | .iov_base = state->npa_blob.data, |
216 | 0 | .iov_len = state->npa_blob.length, |
217 | 0 | }; |
218 | |
|
219 | 0 | subreq = tstream_writev_send( |
220 | 0 | state, state->ev, state->transport, &state->iov, 1); |
221 | 0 | if (tevent_req_nomem(subreq, req)) { |
222 | 0 | return; |
223 | 0 | } |
224 | 0 | tevent_req_set_callback(subreq, np_sock_connect_written, req); |
225 | 0 | } |
226 | | |
227 | | static void np_sock_connect_written(struct tevent_req *subreq) |
228 | 0 | { |
229 | 0 | struct tevent_req *req = tevent_req_callback_data( |
230 | 0 | subreq, struct tevent_req); |
231 | 0 | struct np_sock_connect_state *state = tevent_req_data( |
232 | 0 | req, struct np_sock_connect_state); |
233 | 0 | int ret, err; |
234 | |
|
235 | 0 | ret = tstream_writev_recv(subreq, &err); |
236 | 0 | TALLOC_FREE(subreq); |
237 | 0 | if (ret == -1) { |
238 | 0 | DBG_DEBUG("tstream_writev_recv returned %s\n", strerror(err)); |
239 | 0 | tevent_req_error(req, err); |
240 | 0 | return; |
241 | 0 | } |
242 | | |
243 | 0 | subreq = tstream_u32_read_send( |
244 | 0 | state, state->ev, 0x00FFFFFF, state->transport); |
245 | 0 | if (tevent_req_nomem(subreq, req)) { |
246 | 0 | return; |
247 | 0 | } |
248 | 0 | tevent_req_set_callback(subreq, np_sock_connect_read_done, req); |
249 | 0 | } |
250 | | |
251 | | static void np_sock_connect_read_done(struct tevent_req *subreq) |
252 | 0 | { |
253 | 0 | struct tevent_req *req = tevent_req_callback_data( |
254 | 0 | subreq, struct tevent_req); |
255 | 0 | struct np_sock_connect_state *state = tevent_req_data( |
256 | 0 | req, struct np_sock_connect_state); |
257 | 0 | DATA_BLOB in; |
258 | 0 | int ret; |
259 | 0 | enum ndr_err_code ndr_err; |
260 | |
|
261 | 0 | ret = tstream_u32_read_recv(subreq, state, &in.data, &in.length); |
262 | 0 | TALLOC_FREE(subreq); |
263 | 0 | if (tevent_req_error(req, ret)) { |
264 | 0 | return; |
265 | 0 | } |
266 | | |
267 | 0 | ndr_err = ndr_pull_struct_blob_all( |
268 | 0 | &in, |
269 | 0 | state->npa_rep, |
270 | 0 | state->npa_rep, |
271 | 0 | (ndr_pull_flags_fn_t)ndr_pull_named_pipe_auth_rep); |
272 | 0 | if (!NDR_ERR_CODE_IS_SUCCESS(ndr_err)) { |
273 | 0 | DBG_DEBUG("ndr_pull_named_pipe_auth_rep failed: %s\n", |
274 | 0 | ndr_errstr(ndr_err)); |
275 | 0 | tevent_req_error(req, ndr_map_error2errno(ndr_err)); |
276 | 0 | return; |
277 | 0 | } |
278 | 0 | if (state->npa_rep->level != 8) { |
279 | 0 | DBG_DEBUG("npa level = %" PRIu32 ", expected 8\n", |
280 | 0 | state->npa_rep->level); |
281 | 0 | tevent_req_error(req, EIO); |
282 | 0 | return; |
283 | 0 | } |
284 | | |
285 | 0 | ret = tstream_npa_existing_stream(state, |
286 | 0 | &state->transport, |
287 | 0 | state->npa_rep->info.info8.file_type, |
288 | 0 | &state->npa_stream); |
289 | 0 | if (ret == -1) { |
290 | 0 | ret = errno; |
291 | 0 | DBG_DEBUG("tstream_npa_existing_stream failed: %s\n", |
292 | 0 | strerror(ret)); |
293 | 0 | tevent_req_error(req, ret); |
294 | 0 | return; |
295 | 0 | } |
296 | | |
297 | 0 | tevent_req_done(req); |
298 | 0 | } |
299 | | |
300 | | static int np_sock_connect_recv( |
301 | | struct tevent_req *req, |
302 | | TALLOC_CTX *mem_ctx, |
303 | | struct tstream_context **stream) |
304 | 0 | { |
305 | 0 | struct np_sock_connect_state *state = tevent_req_data( |
306 | 0 | req, struct np_sock_connect_state); |
307 | 0 | int err; |
308 | |
|
309 | 0 | if (tevent_req_is_unix_error(req, &err)) { |
310 | 0 | tevent_req_received(req); |
311 | 0 | return err; |
312 | 0 | } |
313 | 0 | *stream = talloc_move(mem_ctx, &state->npa_stream); |
314 | 0 | tevent_req_received(req); |
315 | 0 | return 0; |
316 | 0 | } |
317 | | |
318 | | struct start_rpc_host_state { |
319 | | int ready_fd; |
320 | | struct tevent_req *read_ready_req; |
321 | | }; |
322 | | |
323 | | static void start_rpc_host_cleanup( |
324 | | struct tevent_req *req, enum tevent_req_state req_state); |
325 | | static void start_rpc_host_ready(struct tevent_req *subreq); |
326 | | |
327 | | /* |
328 | | * Start samba-dcerpcd and wait for it to report ready. |
329 | | */ |
330 | | static struct tevent_req *start_rpc_host_send( |
331 | | TALLOC_CTX *mem_ctx, struct tevent_context *ev) |
332 | 0 | { |
333 | 0 | struct tevent_req *req = NULL, *subreq = NULL; |
334 | 0 | struct start_rpc_host_state *state = NULL; |
335 | 0 | int ret; |
336 | 0 | int ready_fds[2] = { -1, -1 }; |
337 | 0 | char **argv = NULL; |
338 | 0 | pid_t pid; |
339 | 0 | bool ok; |
340 | |
|
341 | 0 | req = tevent_req_create( |
342 | 0 | mem_ctx, &state, struct start_rpc_host_state); |
343 | 0 | if (req == NULL) { |
344 | 0 | return NULL; |
345 | 0 | } |
346 | | |
347 | 0 | ret = pipe(ready_fds); |
348 | 0 | if (ret == -1) { |
349 | 0 | ret = errno; |
350 | 0 | DBG_DEBUG("pipe() failed: %s\n", strerror(ret)); |
351 | 0 | goto fail; |
352 | 0 | } |
353 | | |
354 | 0 | ok = smb_set_close_on_exec(ready_fds[0]); |
355 | 0 | if (!ok) { |
356 | 0 | ret = errno; |
357 | 0 | DBG_DEBUG("smb_set_close_on_exec failed: %s\n", |
358 | 0 | strerror(ret)); |
359 | 0 | goto fail; |
360 | 0 | } |
361 | | |
362 | 0 | argv = str_list_make_empty(mem_ctx); |
363 | 0 | str_list_add_printf( |
364 | 0 | &argv, "%s/samba-dcerpcd", get_dyn_SAMBA_LIBEXECDIR()); |
365 | 0 | if (!is_default_dyn_CONFIGFILE()) { |
366 | 0 | str_list_add_printf( |
367 | 0 | &argv, "--configfile=%s", get_dyn_CONFIGFILE()); |
368 | 0 | } |
369 | 0 | str_list_add_printf(&argv, "--libexec-rpcds"); |
370 | 0 | str_list_add_printf(&argv, "--ready-signal-fd=%d", ready_fds[1]); |
371 | 0 | str_list_add_printf(&argv, "--np-helper"); |
372 | 0 | str_list_add_printf( |
373 | 0 | &argv, "--debuglevel=%d", debuglevel_get_class(DBGC_RPC_SRV)); |
374 | 0 | if (!is_default_dyn_LOGFILEBASE()) { |
375 | 0 | str_list_add_printf( |
376 | 0 | &argv, "--log-basename=%s", get_dyn_LOGFILEBASE()); |
377 | 0 | } |
378 | 0 | if (argv == NULL) { |
379 | 0 | errno = ENOMEM; |
380 | 0 | goto fail; |
381 | 0 | } |
382 | | |
383 | 0 | become_root(); |
384 | 0 | ret = posix_spawn(&pid, argv[0], NULL, NULL, argv, environ); |
385 | 0 | unbecome_root(); |
386 | 0 | if (ret != 0) { |
387 | 0 | DBG_DEBUG("posix_spawn() failed: %s\n", strerror(ret)); |
388 | 0 | goto fail; |
389 | 0 | } |
390 | | |
391 | 0 | state->ready_fd = ready_fds[0]; |
392 | 0 | ready_fds[0] = -1; |
393 | 0 | tevent_req_set_cleanup_fn(req, start_rpc_host_cleanup); |
394 | |
|
395 | 0 | close(ready_fds[1]); |
396 | 0 | ready_fds[1] = -1; |
397 | |
|
398 | 0 | subreq = read_packet_send(state, ev, state->ready_fd, 1, NULL, NULL); |
399 | 0 | if (tevent_req_nomem(subreq, req)) { |
400 | 0 | return tevent_req_post(req, ev); |
401 | 0 | } |
402 | 0 | tevent_req_set_callback(subreq, start_rpc_host_ready, req); |
403 | 0 | return req; |
404 | | |
405 | 0 | fail: |
406 | 0 | if (ready_fds[0] != -1) { |
407 | 0 | close(ready_fds[0]); |
408 | 0 | ready_fds[0] = -1; |
409 | 0 | } |
410 | 0 | if (ready_fds[1] != -1) { |
411 | 0 | close(ready_fds[1]); |
412 | 0 | ready_fds[1] = -1; |
413 | 0 | } |
414 | 0 | tevent_req_error(req, ret); |
415 | 0 | return tevent_req_post(req, ev); |
416 | 0 | } |
417 | | |
418 | | static void start_rpc_host_cleanup( |
419 | | struct tevent_req *req, enum tevent_req_state req_state) |
420 | 0 | { |
421 | 0 | struct start_rpc_host_state *state = tevent_req_data( |
422 | 0 | req, struct start_rpc_host_state); |
423 | |
|
424 | 0 | if (state->ready_fd != -1) { |
425 | 0 | close(state->ready_fd); |
426 | 0 | state->ready_fd = -1; |
427 | 0 | } |
428 | 0 | } |
429 | | |
430 | | static void start_rpc_host_ready(struct tevent_req *subreq) |
431 | 0 | { |
432 | 0 | struct tevent_req *req = tevent_req_callback_data( |
433 | 0 | subreq, struct tevent_req); |
434 | 0 | struct start_rpc_host_state *state = tevent_req_data( |
435 | 0 | req, struct start_rpc_host_state); |
436 | 0 | uint8_t *buf; |
437 | 0 | int err; |
438 | 0 | ssize_t nread; |
439 | |
|
440 | 0 | nread = read_packet_recv(subreq, state, &buf, &err); |
441 | 0 | TALLOC_FREE(subreq); |
442 | 0 | if (nread == -1) { |
443 | 0 | tevent_req_error(req, err); |
444 | 0 | return; |
445 | 0 | } |
446 | | |
447 | 0 | close(state->ready_fd); |
448 | 0 | state->ready_fd = -1; |
449 | |
|
450 | 0 | tevent_req_done(req); |
451 | 0 | } |
452 | | |
453 | | static int start_rpc_host_recv(struct tevent_req *req) |
454 | 0 | { |
455 | 0 | return tevent_req_simple_recv_unix(req); |
456 | 0 | } |
457 | | |
458 | | struct local_np_connect_state { |
459 | | struct tevent_context *ev; |
460 | | const char *socketpath; |
461 | | struct named_pipe_auth_req *npa_req; |
462 | | struct tstream_context *npa_stream; |
463 | | }; |
464 | | |
465 | | static void local_np_connect_connected(struct tevent_req *subreq); |
466 | | static void local_np_connect_started(struct tevent_req *subreq); |
467 | | static void local_np_connect_retried(struct tevent_req *subreq); |
468 | | |
469 | | /** |
470 | | * @brief Async connect to a local named pipe RPC interface |
471 | | * |
472 | | * Start "samba-dcerpcd" on demand if it does not exist |
473 | | * |
474 | | * @param[in] mem_ctx The memory context to use. |
475 | | * @param[in] ev The tevent context to use. |
476 | | * |
477 | | * @param[in] pipename The raw pipename to connect to without path |
478 | | * @param[in] remote_client_name The client name to transmit |
479 | | * @param[in] remote_client_addr The client addr to transmit |
480 | | * @param[in] local_server_name The server name to transmit |
481 | | * @param[in] local_server_addr The server addr to transmit |
482 | | * @param[in] session_info The authorization info to use |
483 | | * @param[in] need_idle_server Does this need to be an exclusive server? |
484 | | * @return The tevent_req that was started |
485 | | */ |
486 | | |
487 | | struct tevent_req *local_np_connect_send( |
488 | | TALLOC_CTX *mem_ctx, |
489 | | struct tevent_context *ev, |
490 | | const char *pipename, |
491 | | enum dcerpc_transport_t transport, |
492 | | const char *remote_client_name, |
493 | | const struct tsocket_address *remote_client_addr, |
494 | | const char *local_server_name, |
495 | | const struct tsocket_address *local_server_addr, |
496 | | const struct auth_session_info *session_info, |
497 | | bool need_idle_server) |
498 | 0 | { |
499 | 0 | struct tevent_req *req = NULL, *subreq = NULL; |
500 | 0 | struct local_np_connect_state *state = NULL; |
501 | 0 | struct named_pipe_auth_req_info8 *i8 = NULL; |
502 | 0 | const char *socket_dir = NULL; |
503 | 0 | char *lower_case_pipename = NULL; |
504 | 0 | struct dom_sid npa_sid = global_sid_Samba_NPA_Flags; |
505 | 0 | uint32_t npa_flags = 0; |
506 | 0 | struct security_token *token = NULL; |
507 | 0 | NTSTATUS status; |
508 | 0 | size_t num_npa_sids; |
509 | 0 | bool ok; |
510 | |
|
511 | 0 | req = tevent_req_create( |
512 | 0 | mem_ctx, &state, struct local_np_connect_state); |
513 | 0 | if (req == NULL) { |
514 | 0 | return NULL; |
515 | 0 | } |
516 | 0 | state->ev = ev; |
517 | |
|
518 | 0 | num_npa_sids = |
519 | 0 | security_token_count_flag_sids(session_info->security_token, |
520 | 0 | &npa_sid, |
521 | 0 | 1, |
522 | 0 | NULL); |
523 | 0 | if (num_npa_sids != 0) { |
524 | 0 | DBG_ERR("ERROR: %zu NPA Flags SIDs have already been " |
525 | 0 | "detected in the security token!\n", |
526 | 0 | num_npa_sids); |
527 | 0 | tevent_req_error(req, EACCES); |
528 | 0 | return tevent_req_post(req, ev); |
529 | 0 | } |
530 | | |
531 | 0 | socket_dir = lp_parm_const_string( |
532 | 0 | GLOBAL_SECTION_SNUM, "external_rpc_pipe", "socket_dir", |
533 | 0 | lp_ncalrpc_dir()); |
534 | 0 | if (socket_dir == NULL) { |
535 | 0 | DBG_DEBUG("external_rpc_pipe:socket_dir not set\n"); |
536 | 0 | tevent_req_error(req, EINVAL); |
537 | 0 | return tevent_req_post(req, ev); |
538 | 0 | } |
539 | | |
540 | 0 | lower_case_pipename = strlower_talloc(state, pipename); |
541 | 0 | if (tevent_req_nomem(lower_case_pipename, req)) { |
542 | 0 | return tevent_req_post(req, ev); |
543 | 0 | } |
544 | | |
545 | | /* |
546 | | * Ensure we cannot process a path that exits |
547 | | * the socket_dir. |
548 | | */ |
549 | 0 | if (ISDOTDOT(lower_case_pipename) || |
550 | 0 | (strchr(lower_case_pipename, '/')!=NULL)) |
551 | 0 | { |
552 | 0 | DBG_DEBUG("attempt to connect to invalid pipe pathname %s\n", |
553 | 0 | lower_case_pipename); |
554 | 0 | tevent_req_error(req, ENOENT); |
555 | 0 | return tevent_req_post(req, ev); |
556 | 0 | } |
557 | | |
558 | 0 | state->socketpath = talloc_asprintf( |
559 | 0 | state, "%s/np/%s", socket_dir, lower_case_pipename); |
560 | 0 | if (tevent_req_nomem(state->socketpath, req)) { |
561 | 0 | return tevent_req_post(req, ev); |
562 | 0 | } |
563 | 0 | TALLOC_FREE(lower_case_pipename); |
564 | |
|
565 | 0 | state->npa_req = talloc_zero(state, struct named_pipe_auth_req); |
566 | 0 | if (tevent_req_nomem(state->npa_req, req)) { |
567 | 0 | return tevent_req_post(req, ev); |
568 | 0 | } |
569 | 0 | state->npa_req->level = 8; |
570 | |
|
571 | 0 | i8 = &state->npa_req->info.info8; |
572 | |
|
573 | 0 | i8->transport = transport; |
574 | | |
575 | | /* we don't have "int" in IDL, make sure we don't overflow */ |
576 | 0 | SMB_ASSERT(i8->transport == transport); |
577 | | |
578 | 0 | if (remote_client_name == NULL) { |
579 | 0 | remote_client_name = get_myname(state->npa_req); |
580 | 0 | if (remote_client_name == NULL) { |
581 | 0 | tevent_req_error(req, errno); |
582 | 0 | return tevent_req_post(req, ev); |
583 | 0 | } |
584 | 0 | } |
585 | 0 | i8->remote_client_name = remote_client_name; |
586 | |
|
587 | 0 | if (remote_client_addr == NULL) { |
588 | 0 | struct tsocket_address *addr = NULL; |
589 | 0 | int ret = tsocket_address_inet_from_strings( |
590 | 0 | state->npa_req, "ip", NULL, 0, &addr); |
591 | 0 | if (ret != 0) { |
592 | 0 | tevent_req_error(req, errno); |
593 | 0 | return tevent_req_post(req, ev); |
594 | 0 | } |
595 | 0 | remote_client_addr = addr; |
596 | 0 | } |
597 | 0 | i8->remote_client_addr = |
598 | 0 | tsocket_address_inet_addr_string(remote_client_addr, |
599 | 0 | state->npa_req); |
600 | 0 | if (i8->remote_client_addr == NULL) { |
601 | 0 | tevent_req_error(req, errno); |
602 | 0 | return tevent_req_post(req, ev); |
603 | 0 | } |
604 | 0 | i8->remote_client_port = tsocket_address_inet_port(remote_client_addr); |
605 | |
|
606 | 0 | if (local_server_name == NULL) { |
607 | 0 | local_server_name = remote_client_name; |
608 | 0 | } |
609 | 0 | i8->local_server_name = local_server_name; |
610 | |
|
611 | 0 | if (local_server_addr == NULL) { |
612 | 0 | struct tsocket_address *addr = NULL; |
613 | 0 | int ret = tsocket_address_inet_from_strings( |
614 | 0 | state->npa_req, "ip", NULL, 0, &addr); |
615 | 0 | if (ret != 0) { |
616 | 0 | tevent_req_error(req, errno); |
617 | 0 | return tevent_req_post(req, ev); |
618 | 0 | } |
619 | 0 | local_server_addr = addr; |
620 | 0 | } |
621 | 0 | i8->local_server_addr = |
622 | 0 | tsocket_address_inet_addr_string(local_server_addr, |
623 | 0 | state->npa_req); |
624 | 0 | if (i8->local_server_addr == NULL) { |
625 | 0 | tevent_req_error(req, errno); |
626 | 0 | return tevent_req_post(req, ev); |
627 | 0 | } |
628 | 0 | i8->local_server_port = tsocket_address_inet_port(local_server_addr); |
629 | |
|
630 | 0 | i8->session_info = talloc_zero(state->npa_req, |
631 | 0 | struct auth_session_info_transport); |
632 | 0 | if (tevent_req_nomem(i8->session_info, req)) { |
633 | 0 | return tevent_req_post(req, ev); |
634 | 0 | } |
635 | | |
636 | 0 | i8->session_info->session_info = |
637 | 0 | copy_session_info(i8->session_info, session_info); |
638 | 0 | if (tevent_req_nomem(i8->session_info->session_info, req)) { |
639 | 0 | return tevent_req_post(req, ev); |
640 | 0 | } |
641 | | |
642 | 0 | if (need_idle_server) { |
643 | 0 | npa_flags |= SAMBA_NPA_FLAGS_NEED_IDLE; |
644 | 0 | } |
645 | |
|
646 | 0 | ok = winbind_env_set(); |
647 | 0 | if (ok) { |
648 | 0 | npa_flags |= SAMBA_NPA_FLAGS_WINBIND_OFF; |
649 | 0 | } |
650 | |
|
651 | 0 | ok = sid_append_rid(&npa_sid, npa_flags); |
652 | 0 | if (!ok) { |
653 | 0 | tevent_req_error(req, EINVAL); |
654 | 0 | return tevent_req_post(req, ev); |
655 | 0 | } |
656 | | |
657 | 0 | token = i8->session_info->session_info->security_token; |
658 | |
|
659 | 0 | status = add_sid_to_array_unique(token, |
660 | 0 | &npa_sid, |
661 | 0 | &token->sids, |
662 | 0 | &token->num_sids); |
663 | 0 | if (!NT_STATUS_IS_OK(status)) { |
664 | 0 | tevent_req_oom(req); |
665 | 0 | return tevent_req_post(req, ev); |
666 | 0 | } |
667 | | |
668 | 0 | subreq = np_sock_connect_send( |
669 | 0 | state, state->ev, state->socketpath, state->npa_req); |
670 | 0 | if (tevent_req_nomem(subreq, req)) { |
671 | 0 | return tevent_req_post(req, ev); |
672 | 0 | } |
673 | 0 | tevent_req_set_callback(subreq, local_np_connect_connected, req); |
674 | |
|
675 | 0 | return req; |
676 | 0 | } |
677 | | |
678 | | static void local_np_connect_connected(struct tevent_req *subreq) |
679 | 0 | { |
680 | 0 | struct tevent_req *req = tevent_req_callback_data( |
681 | 0 | subreq, struct tevent_req); |
682 | 0 | struct local_np_connect_state *state = tevent_req_data( |
683 | 0 | req, struct local_np_connect_state); |
684 | 0 | int ret; |
685 | |
|
686 | 0 | ret = np_sock_connect_recv(subreq, state, &state->npa_stream); |
687 | 0 | TALLOC_FREE(subreq); |
688 | |
|
689 | 0 | if (ret == 0) { |
690 | 0 | tevent_req_done(req); |
691 | 0 | return; |
692 | 0 | } |
693 | | |
694 | 0 | DBG_DEBUG("np_sock_connect failed: %s\n", strerror(ret)); |
695 | |
|
696 | 0 | if (!lp_rpc_start_on_demand_helpers()) { |
697 | | /* |
698 | | * samba-dcerpcd should already be started in |
699 | | * daemon/standalone mode when "rpc start on demand |
700 | | * helpers = false". We are prohibited from starting |
701 | | * on demand as a named-pipe helper. |
702 | | */ |
703 | 0 | DBG_ERR("Can't connect to a running samba-dcerpcd. smb.conf " |
704 | 0 | "config prohibits starting as named pipe helper as " |
705 | 0 | "the [global] section contains " |
706 | 0 | "\"rpc start on demand helpers = false\".\n"); |
707 | 0 | tevent_req_error(req, ret); |
708 | 0 | return; |
709 | 0 | } |
710 | | |
711 | | /* |
712 | | * samba-dcerpcd isn't running. We need to start it. |
713 | | * Note if it doesn't start we treat this as a fatal |
714 | | * error for connecting to the named pipe and don't |
715 | | * keep trying to restart for this connection. |
716 | | */ |
717 | 0 | subreq = start_rpc_host_send(state, state->ev); |
718 | 0 | if (tevent_req_nomem(subreq, req)) { |
719 | 0 | return; |
720 | 0 | } |
721 | 0 | tevent_req_set_callback(subreq, local_np_connect_started, req); |
722 | 0 | } |
723 | | |
724 | | static void local_np_connect_started(struct tevent_req *subreq) |
725 | 0 | { |
726 | 0 | struct tevent_req *req = tevent_req_callback_data( |
727 | 0 | subreq, struct tevent_req); |
728 | 0 | struct local_np_connect_state *state = tevent_req_data( |
729 | 0 | req, struct local_np_connect_state); |
730 | 0 | int ret; |
731 | |
|
732 | 0 | ret = start_rpc_host_recv(subreq); |
733 | 0 | TALLOC_FREE(subreq); |
734 | 0 | if (tevent_req_error(req, ret)) { |
735 | 0 | DBG_DEBUG("start_rpc_host_recv failed: %s\n", |
736 | 0 | strerror(ret)); |
737 | 0 | return; |
738 | 0 | } |
739 | | |
740 | 0 | subreq = np_sock_connect_send( |
741 | 0 | state, state->ev, state->socketpath, state->npa_req); |
742 | 0 | if (tevent_req_nomem(subreq, req)) { |
743 | 0 | return; |
744 | 0 | } |
745 | 0 | tevent_req_set_callback(subreq, local_np_connect_retried, req); |
746 | 0 | } |
747 | | |
748 | | static void local_np_connect_retried(struct tevent_req *subreq) |
749 | 0 | { |
750 | 0 | struct tevent_req *req = tevent_req_callback_data( |
751 | 0 | subreq, struct tevent_req); |
752 | 0 | struct local_np_connect_state *state = tevent_req_data( |
753 | 0 | req, struct local_np_connect_state); |
754 | 0 | int ret; |
755 | |
|
756 | 0 | ret = np_sock_connect_recv(subreq, state, &state->npa_stream); |
757 | 0 | TALLOC_FREE(subreq); |
758 | 0 | if (tevent_req_error(req, ret)) { |
759 | 0 | return; |
760 | 0 | } |
761 | 0 | tevent_req_done(req); |
762 | 0 | } |
763 | | |
764 | | /** |
765 | | * @brief Receive handle to a local named pipe RPC interface |
766 | | * |
767 | | * @param[in] req The tevent_req that started the operation |
768 | | * @param[in] ev The tevent context to use. |
769 | | * @param[in] mem_ctx The memory context to put pstream on |
770 | | * @param[out] pstream The established connection to the RPC server |
771 | | * |
772 | | * @return 0/errno |
773 | | */ |
774 | | |
775 | | int local_np_connect_recv( |
776 | | struct tevent_req *req, |
777 | | TALLOC_CTX *mem_ctx, |
778 | | struct tstream_context **pstream) |
779 | 0 | { |
780 | 0 | struct local_np_connect_state *state = tevent_req_data( |
781 | 0 | req, struct local_np_connect_state); |
782 | 0 | int err; |
783 | |
|
784 | 0 | if (tevent_req_is_unix_error(req, &err)) { |
785 | 0 | tevent_req_received(req); |
786 | 0 | return err; |
787 | 0 | } |
788 | | |
789 | 0 | *pstream = talloc_move(mem_ctx, &state->npa_stream); |
790 | 0 | return 0; |
791 | 0 | } |
792 | | |
793 | | /** |
794 | | * @brief Sync connect to a local named pipe RPC interface |
795 | | * |
796 | | * Start "samba-dcerpcd" on demand if it does not exist |
797 | | * |
798 | | * @param[in] pipename The raw pipename to connect to without path |
799 | | * @param[in] remote_client_name The client name to transmit |
800 | | * @param[in] remote_client_addr The client addr to transmit |
801 | | * @param[in] local_server_name The server name to transmit |
802 | | * @param[in] local_server_addr The server addr to transmit |
803 | | * @param[in] session_info The authorization info to use |
804 | | * @param[in] need_idle_server Does this need to be an exclusive server? |
805 | | * @param[in] mem_ctx The memory context to use. |
806 | | * @param[out] pstream The established connection to the RPC server |
807 | | * @return 0/errno |
808 | | */ |
809 | | |
810 | | int local_np_connect( |
811 | | const char *pipename, |
812 | | enum dcerpc_transport_t transport, |
813 | | const char *remote_client_name, |
814 | | const struct tsocket_address *remote_client_addr, |
815 | | const char *local_server_name, |
816 | | const struct tsocket_address *local_server_addr, |
817 | | const struct auth_session_info *session_info, |
818 | | bool need_idle_server, |
819 | | TALLOC_CTX *mem_ctx, |
820 | | struct tstream_context **pstream) |
821 | 0 | { |
822 | 0 | struct tevent_context *ev = NULL; |
823 | 0 | struct tevent_req *req = NULL; |
824 | 0 | int ret = ENOMEM; |
825 | |
|
826 | 0 | ev = samba_tevent_context_init(mem_ctx); |
827 | 0 | if (ev == NULL) { |
828 | 0 | goto fail; |
829 | 0 | } |
830 | 0 | req = local_np_connect_send( |
831 | 0 | ev, |
832 | 0 | ev, |
833 | 0 | pipename, |
834 | 0 | transport, |
835 | 0 | remote_client_name, |
836 | 0 | remote_client_addr, |
837 | 0 | local_server_name, |
838 | 0 | local_server_addr, |
839 | 0 | session_info, |
840 | 0 | need_idle_server); |
841 | 0 | if (req == NULL) { |
842 | 0 | goto fail; |
843 | 0 | } |
844 | 0 | if (!tevent_req_poll_unix(req, ev, &ret)) { |
845 | 0 | goto fail; |
846 | 0 | } |
847 | 0 | ret = local_np_connect_recv(req, mem_ctx, pstream); |
848 | 0 | fail: |
849 | 0 | TALLOC_FREE(req); |
850 | 0 | TALLOC_FREE(ev); |
851 | 0 | return ret; |
852 | 0 | } |