Line data Source code
1 : #include "fd_quic_sandbox.h"
2 : #include "../fd_quic_private.h"
3 : #include "../templ/fd_quic_parse_util.h"
4 :
5 : /* fd_quic_sandbox_capture_pkt captures a single outgoing packet sent by
6 : fd_quic. */
7 :
8 : static void
9 : fd_quic_sandbox_capture_pkt( fd_quic_sandbox_t * sandbox,
10 0 : fd_aio_pkt_info_t const * pkt ) {
11 :
12 0 : ulong seq = sandbox->pkt_seq_w;
13 0 : fd_frag_meta_t * mcache = sandbox->pkt_mcache;
14 0 : void * dcache = sandbox->pkt_dcache;
15 0 : ulong mtu = sandbox->pkt_mtu;
16 0 : ulong chunk = sandbox->pkt_chunk;
17 0 : ulong chunk0 = fd_dcache_compact_chunk0( sandbox, dcache );
18 0 : ulong wmark = fd_dcache_compact_wmark ( sandbox, dcache, mtu );
19 0 : ulong depth = fd_mcache_depth( mcache );
20 0 : ulong sz = pkt->buf_sz;
21 0 : uchar * data = fd_chunk_to_laddr( sandbox, chunk );
22 0 : ulong ctl = fd_frag_meta_ctl( /* orig */ 0, /* som */ 1, /* eom */ 1, /* err */ 0 );
23 0 : long ts = sandbox->wallclock;
24 :
25 0 : fd_memcpy( data, pkt->buf, sz );
26 0 : ulong tscomp = fd_frag_meta_ts_comp( ts );
27 0 : fd_mcache_publish( mcache, depth, seq, 0UL, chunk, sz, ctl, tscomp, tscomp );
28 :
29 0 : sandbox->pkt_seq_w = fd_seq_inc( seq, 1UL );
30 0 : sandbox->pkt_chunk = fd_dcache_compact_next( chunk, pkt->buf_sz, chunk0, wmark );
31 0 : }
32 :
33 : /* fd_quic_sandbox_aio_send implements fd_aio_send_func_t. Called by
34 : the sandbox fd_quic to capture response packets into the sandbox
35 : capture ring. */
36 :
37 : static int
38 : fd_quic_sandbox_aio_send( void * ctx,
39 : fd_aio_pkt_info_t const * batch,
40 : ulong batch_cnt,
41 : ulong * opt_batch_idx,
42 0 : int flush ) {
43 :
44 0 : fd_quic_sandbox_t * sandbox = (fd_quic_sandbox_t *)ctx;
45 :
46 0 : for( ulong j=0UL; j<batch_cnt; j++ ) {
47 0 : fd_quic_sandbox_capture_pkt( sandbox, batch + j );
48 0 : }
49 :
50 0 : ulong _batch_idx[1];
51 0 : opt_batch_idx = opt_batch_idx ? opt_batch_idx : _batch_idx;
52 0 : *opt_batch_idx = batch_cnt;
53 :
54 0 : (void)flush;
55 0 : return FD_AIO_SUCCESS;
56 0 : }
57 :
58 : fd_frag_meta_t const *
59 0 : fd_quic_sandbox_next_packet( fd_quic_sandbox_t * sandbox ) {
60 0 : fd_frag_meta_t * mcache = sandbox->pkt_mcache;
61 :
62 0 : ulong depth = fd_mcache_depth( mcache );
63 0 : ulong seq = sandbox->pkt_seq_r;
64 0 : ulong mline = fd_mcache_line_idx( seq, depth );
65 :
66 0 : fd_frag_meta_t * frag = mcache + mline;
67 0 : if( FD_UNLIKELY( fd_seq_lt( frag->seq, seq ) ) ) return NULL;
68 0 : if( FD_UNLIKELY( fd_seq_gt( frag->seq, seq ) ) ) {
69 : /* Occurs if the fd_quic published 'depth' packets in succession
70 : without any reads via this function. */
71 0 : FD_LOG_WARNING(( "overrun detected, some captured packets were lost" ));
72 0 : seq = frag->seq;
73 0 : }
74 :
75 0 : sandbox->pkt_seq_r = fd_seq_inc( seq, 1UL );
76 :
77 0 : return frag;
78 0 : }
79 :
80 : uchar const fd_quic_sandbox_self_ed25519_keypair[64] =
81 : { /* private key */
82 : 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
83 : 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
84 : 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
85 : 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41, 0x41,
86 : /* public key */
87 : 0xdb, 0x99, 0x5f, 0xe2, 0x51, 0x69, 0xd1, 0x41,
88 : 0xca, 0xb9, 0xbb, 0xba, 0x92, 0xba, 0xa0, 0x1f,
89 : 0x9f, 0x2e, 0x1e, 0xce, 0x7d, 0xf4, 0xcb, 0x2a,
90 : 0xc0, 0x51, 0x90, 0xf3, 0x7f, 0xcc, 0x1f, 0x9d };
91 :
92 :
93 : uchar const fd_quic_sandbox_peer_ed25519_keypair[64] =
94 : { /* private key */
95 : 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
96 : 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
97 : 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
98 : 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42, 0x42,
99 : /* public key */
100 : 0x21, 0x52, 0xf8, 0xd1, 0x9b, 0x79, 0x1d, 0x24,
101 : 0x45, 0x32, 0x42, 0xe1, 0x5f, 0x2e, 0xab, 0x6c,
102 : 0xb7, 0xcf, 0xfa, 0x7b, 0x6a, 0x5e, 0xd3, 0x00,
103 : 0x97, 0x96, 0x0e, 0x06, 0x98, 0x81, 0xdb, 0x12 };
104 :
105 : uchar const fd_quic_sandbox_aes128_key[16] =
106 : { 0x43, 0x43, 0x43, 0x43, 0x43, 0x43, 0x43, 0x43,
107 : 0x43, 0x43, 0x43, 0x43, 0x43, 0x43, 0x43, 0x43 };
108 :
109 : uchar const fd_quic_sandbox_aes128_iv[12] =
110 : { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
111 : 0x00, 0x00, 0x00, 0x00 };
112 :
113 : ulong
114 0 : fd_quic_sandbox_align( void ) {
115 0 : return fd_ulong_max( fd_ulong_max( fd_ulong_max( fd_ulong_max(
116 0 : alignof(fd_quic_sandbox_t),
117 0 : fd_quic_align() ),
118 0 : fd_mcache_align() ),
119 0 : fd_dcache_align() ),
120 0 : FD_CHUNK_ALIGN );
121 0 : }
122 :
123 : ulong
124 : fd_quic_sandbox_footprint( fd_quic_limits_t const * quic_limits,
125 : ulong pkt_cnt,
126 0 : ulong mtu ) {
127 :
128 0 : ulong root_align = fd_quic_sandbox_align();
129 0 : ulong quic_fp = fd_quic_footprint( quic_limits );
130 0 : ulong mcache_fp = fd_mcache_footprint( pkt_cnt, 0UL );
131 0 : ulong dcache_fp = fd_dcache_footprint( fd_dcache_req_data_sz( mtu, pkt_cnt, 1UL, 1 ), 0UL );
132 :
133 0 : if( FD_UNLIKELY( !quic_fp ) ) return 0UL;
134 0 : if( FD_UNLIKELY( !mcache_fp ) ) return 0UL;
135 0 : if( FD_UNLIKELY( !dcache_fp ) ) return 0UL;
136 :
137 0 : ulong l = FD_LAYOUT_INIT;
138 0 : l = FD_LAYOUT_APPEND( l, root_align, sizeof(fd_quic_sandbox_t) );
139 0 : l = FD_LAYOUT_APPEND( l, fd_quic_align(), quic_fp );
140 0 : l = FD_LAYOUT_APPEND( l, fd_mcache_align(), mcache_fp );
141 0 : l = FD_LAYOUT_APPEND( l, fd_dcache_align(), dcache_fp );
142 0 : return FD_LAYOUT_FINI( l, root_align );
143 0 : }
144 :
145 : fd_quic_sandbox_t *
146 : fd_quic_sandbox_new( void * mem,
147 : fd_quic_limits_t const * quic_limits,
148 : ulong pkt_cnt,
149 0 : ulong mtu ) {
150 :
151 0 : if( FD_UNLIKELY( !mem ) ) {
152 0 : FD_LOG_WARNING(( "NULL mem" ));
153 0 : return NULL;
154 0 : }
155 :
156 0 : if( FD_UNLIKELY( !fd_ulong_is_aligned( (ulong)mem, fd_quic_sandbox_align() ) ) ) {
157 0 : FD_LOG_WARNING(( "misaligned mem" ));
158 0 : return NULL;
159 0 : }
160 :
161 0 : ulong fp = fd_quic_sandbox_footprint( quic_limits, pkt_cnt, mtu );
162 0 : if( FD_UNLIKELY( !fp ) ) {
163 0 : FD_LOG_WARNING(( "invalid params" ));
164 0 : return NULL;
165 0 : }
166 :
167 0 : ulong root_align = fd_quic_sandbox_align();
168 0 : ulong quic_fp = fd_quic_footprint( quic_limits );
169 0 : ulong mcache_fp = fd_mcache_footprint( pkt_cnt, 0UL );
170 0 : ulong dcache_data_sz = fd_dcache_req_data_sz( mtu, pkt_cnt, 1UL, 1 );
171 0 : ulong dcache_fp = fd_dcache_footprint( dcache_data_sz, 0UL );
172 :
173 0 : FD_SCRATCH_ALLOC_INIT( l, mem );
174 0 : fd_quic_sandbox_t * sandbox = FD_SCRATCH_ALLOC_APPEND( l, root_align, sizeof(fd_quic_sandbox_t) );
175 0 : void * quic_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_quic_align(), quic_fp );
176 0 : void * mcache_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_mcache_align(), mcache_fp );
177 0 : void * dcache_mem = FD_SCRATCH_ALLOC_APPEND( l, fd_dcache_align(), dcache_fp );
178 0 : FD_SCRATCH_ALLOC_FINI( l, root_align );
179 :
180 0 : ulong seq0 = 0UL; /* the first packet in the capture always has sequence number 0 */
181 :
182 0 : *sandbox = (fd_quic_sandbox_t) {
183 0 : .quic = fd_quic_join ( fd_quic_new( quic_mem, quic_limits ) ),
184 0 : .pkt_mcache = fd_mcache_join( fd_mcache_new( mcache_mem, pkt_cnt, 0UL, seq0 ) ),
185 0 : .pkt_dcache = fd_dcache_join( fd_dcache_new( dcache_mem, dcache_data_sz, 0UL ) ),
186 0 : .pkt_seq_r = seq0,
187 0 : .pkt_mtu = mtu
188 0 : };
189 0 : void * shmlog = (void *)( (ulong)sandbox->quic + sandbox->quic->layout.log_off );
190 0 : if( FD_UNLIKELY( !fd_quic_log_rx_join( sandbox->log_rx, shmlog ) ) ) {
191 0 : FD_LOG_CRIT(( "Failed to join the log of a newly created quic" ));
192 0 : }
193 :
194 0 : FD_COMPILER_MFENCE();
195 0 : sandbox->magic = FD_QUIC_SANDBOX_MAGIC;
196 0 : FD_COMPILER_MFENCE();
197 :
198 0 : return sandbox;
199 0 : }
200 :
201 : fd_quic_sandbox_t *
202 : fd_quic_sandbox_init( fd_quic_sandbox_t * sandbox,
203 0 : int role ) {
204 :
205 0 : fd_quic_t * quic = sandbox->quic;
206 0 : fd_quic_config_t * quic_cfg = &quic->config;
207 :
208 0 : quic_cfg->role = role;
209 0 : quic_cfg->idle_timeout = FD_QUIC_SANDBOX_IDLE_TIMEOUT;
210 0 : quic_cfg->initial_rx_max_stream_data = 512UL; /* arbitrary */
211 0 : memcpy( quic_cfg->identity_public_key, fd_quic_sandbox_self_ed25519_keypair + 32, 32 );
212 0 : memset( &quic->metrics, 0, sizeof(fd_quic_metrics_t) );
213 :
214 0 : fd_aio_t aio_tx = {
215 0 : .send_func = fd_quic_sandbox_aio_send,
216 0 : .ctx = sandbox
217 0 : };
218 0 : fd_quic_set_aio_net_tx( quic, &aio_tx );
219 :
220 0 : if( FD_UNLIKELY( !fd_quic_init( quic ) ) ) {
221 0 : FD_LOG_WARNING(( "fd_quic_init failed" ));
222 0 : return NULL;
223 0 : }
224 :
225 : /* Verify that the conn state counts start correct */
226 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_INVALID ] == quic->limits.conn_cnt );
227 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_HANDSHAKE ] == 0 );
228 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_HANDSHAKE_COMPLETE ] == 0 );
229 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_ACTIVE ] == 0 );
230 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_PEER_CLOSE ] == 0 );
231 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_ABORT ] == 0 );
232 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_CLOSE_PENDING ] == 0 );
233 0 : FD_TEST( quic->metrics.conn_state_cnt[ FD_QUIC_CONN_STATE_DEAD ] == 0 );
234 :
235 0 : fd_quic_state_t * state = fd_quic_get_state( quic );
236 0 : state->now = 1L;
237 0 : sandbox->wallclock = 1L;
238 0 : sandbox->pkt_seq_r = 0UL;
239 0 : sandbox->pkt_seq_w = 0UL;
240 0 : sandbox->pkt_mcache[0].seq = ULONG_MAX; /* mark first entry as unpublished */
241 0 : sandbox->pkt_chunk = fd_dcache_compact_chunk0( sandbox, sandbox->pkt_dcache );
242 :
243 : /* skip ahead the log seq no */
244 0 : fd_quic_log_tx_t * log_tx = state->log_tx;
245 0 : log_tx->seq += 4093; /* prime */
246 :
247 0 : return sandbox;
248 0 : }
249 :
250 : void *
251 0 : fd_quic_sandbox_delete( fd_quic_sandbox_t * mem ) {
252 :
253 0 : if( FD_UNLIKELY( !mem ) ) {
254 0 : FD_LOG_WARNING(( "NULL mem" ));
255 0 : return NULL;
256 0 : }
257 :
258 0 : fd_quic_sandbox_t * sandbox = (fd_quic_sandbox_t *)mem;
259 0 : if( FD_UNLIKELY( sandbox->magic != FD_QUIC_SANDBOX_MAGIC ) ) {
260 0 : FD_LOG_WARNING(( "invalid magic" ));
261 0 : return NULL;
262 0 : }
263 :
264 0 : FD_COMPILER_MFENCE();
265 0 : sandbox->magic = 0UL;
266 0 : FD_COMPILER_MFENCE();
267 :
268 0 : fd_quic_delete ( fd_quic_leave ( sandbox->quic ) );
269 0 : fd_mcache_delete( fd_mcache_leave( sandbox->pkt_mcache ) );
270 0 : fd_dcache_delete( fd_dcache_leave( sandbox->pkt_dcache ) );
271 :
272 0 : return mem;
273 0 : }
274 :
275 : fd_quic_conn_t *
276 : fd_quic_sandbox_new_conn_established( fd_quic_sandbox_t * sandbox,
277 0 : fd_rng_t * rng ) {
278 :
279 0 : fd_quic_t * quic = sandbox->quic;
280 :
281 : /* fd_quic_t conn IDs are always 8 bytes */
282 0 : ulong our_conn_id_u64 = fd_rng_ulong( rng );
283 :
284 : /* the peer may choose a conn ID size 1 to 16 bytes
285 : For now, pick 8 bytes too */
286 0 : ulong peer_conn_id_u64 = fd_rng_ulong( rng );
287 0 : fd_quic_conn_id_t peer_conn_id = fd_quic_conn_id_new( &peer_conn_id_u64, 8UL );
288 :
289 0 : fd_quic_conn_t * conn = fd_quic_conn_create(
290 0 : /* quic */ quic,
291 0 : /* our_conn_id */ our_conn_id_u64,
292 0 : /* peer_conn_id */ &peer_conn_id,
293 : /* dst_ip_addr */ FD_QUIC_SANDBOX_PEER_IP4,
294 : /* dst_udp_addr */ FD_QUIC_SANDBOX_PEER_PORT,
295 : /* src_ip_addr */ FD_QUIC_SANDBOX_SELF_IP4,
296 : /* src_udp_addr */ FD_QUIC_SANDBOX_SELF_PORT,
297 0 : /* server */ quic->config.role == FD_QUIC_ROLE_SERVER );
298 0 : if( FD_UNLIKELY( !conn ) ) {
299 0 : FD_LOG_WARNING(( "fd_quic_conn_create failed" ));
300 0 : return NULL;
301 0 : }
302 :
303 0 : conn->state = FD_QUIC_CONN_STATE_ACTIVE;
304 0 : conn->established = 1;
305 :
306 : /* Mock a completed handshake */
307 0 : conn->handshake_complete = 1;
308 0 : conn->keys_avail = 1U<<fd_quic_enc_level_appdata_id;
309 :
310 0 : conn->idle_timeout_ns = FD_QUIC_SANDBOX_IDLE_TIMEOUT;
311 0 : conn->last_activity = sandbox->wallclock;
312 :
313 : /* Reset flow control limits */
314 0 : conn->tx_max_data = 0UL;
315 0 : conn->tx_tot_data = 0UL;
316 0 : conn->srx->rx_max_data = 0UL;
317 0 : conn->srx->rx_tot_data = 0UL;
318 0 : conn->srx->rx_max_data_ackd = 0UL;
319 0 : conn->tx_initial_max_stream_data_uni = 0UL;
320 :
321 0 : fd_quic_state_t * state = fd_quic_get_state( quic );
322 0 : fd_quic_svc_timers_schedule( state->svc_timers, conn, sandbox->wallclock );
323 :
324 : /* TODO set a realistic packet number */
325 :
326 0 : return conn;
327 0 : }
328 :
329 : void
330 : fd_quic_sandbox_send_frame( fd_quic_sandbox_t * sandbox,
331 : fd_quic_conn_t * conn,
332 : fd_quic_pkt_t * pkt_meta,
333 : uchar const * frame_ptr,
334 0 : ulong frame_sz ) {
335 :
336 : /* TODO consider crafting a real app packet instead of bypassing
337 : packet processing checks */
338 :
339 0 : fd_quic_t * quic = sandbox->quic;
340 :
341 : /* set pkt_type to FD_QUIC_PKT_TYPE_ONE_RTT as it allows all
342 : * frame types */
343 0 : uint pkt_type = FD_QUIC_PKT_TYPE_ONE_RTT;
344 :
345 0 : ulong rc = fd_quic_handle_v1_frame( quic, conn, pkt_meta, pkt_type, frame_ptr, frame_sz );
346 0 : if( FD_UNLIKELY( rc==FD_QUIC_PARSE_FAIL ) ) return;
347 0 : if( FD_UNLIKELY( rc==0UL || rc>frame_sz ) ) {
348 0 : FD_LOG_CRIT(( "Invalid fd_quic_handle_v1_frame return value (rc=%#lx frame_sz=%#lx)", rc, frame_sz ));
349 0 : }
350 :
351 0 : }
352 :
353 : void
354 : fd_quic_sandbox_send_lone_frame( fd_quic_sandbox_t * sandbox,
355 : fd_quic_conn_t * conn,
356 : uchar const * frame,
357 0 : ulong frame_sz ) {
358 :
359 0 : FD_TEST( frame_sz <= sandbox->pkt_mtu );
360 :
361 0 : ulong pkt_num = conn->exp_pkt_number[2]++;
362 :
363 0 : ulong quic_pkt_sz = frame_sz; /* TODO mock some QUIC packetization overhead */
364 :
365 0 : fd_quic_pkt_t pkt_meta = {
366 0 : .ip4 = {{
367 0 : .verihl = FD_IP4_VERIHL(4,5),
368 0 : .net_tot_len = (ushort)( 20 + 8 + quic_pkt_sz ),
369 0 : .net_frag_off = 0x4000u, /* don't fragment */
370 0 : .ttl = 64,
371 0 : .protocol = FD_IP4_HDR_PROTOCOL_UDP,
372 0 : }},
373 0 : .udp = {{
374 0 : .net_sport = FD_QUIC_SANDBOX_PEER_PORT,
375 0 : .net_dport = FD_QUIC_SANDBOX_SELF_PORT,
376 0 : .net_len = (ushort)( 8 + quic_pkt_sz ),
377 0 : }},
378 0 : .pkt_number = pkt_num,
379 0 : .rcv_time = sandbox->wallclock,
380 0 : .enc_level = fd_quic_enc_level_appdata_id,
381 0 : };
382 :
383 0 : fd_quic_sandbox_send_frame( sandbox, conn, &pkt_meta, frame, frame_sz );
384 :
385 0 : fd_quic_lazy_ack_pkt( sandbox->quic, conn, &pkt_meta );
386 :
387 : /* Synchronize log seq[0] from tx to rx */
388 0 : fd_quic_log_tx_seq_update( fd_quic_get_state( sandbox->quic )->log_tx );
389 0 : }
390 :
391 : void
392 : fd_quic_sandbox_send_ping_pkt( fd_quic_sandbox_t * sandbox,
393 : fd_quic_conn_t * conn,
394 0 : ulong pktnum ) {
395 :
396 0 : uchar pkt_buf[ 256 ];
397 0 : pkt_buf[0] = fd_quic_one_rtt_h0( /* spin */ 0,
398 0 : /* key_phase */ !!conn->key_phase,
399 0 : /* pktnum_len-1 */ 3 );
400 0 : memcpy( pkt_buf+1, &conn->our_conn_id, FD_QUIC_CONN_ID_SZ );
401 0 : uint pktnum_comp = fd_uint_bswap( (uint)( pktnum & UINT_MAX ) );
402 0 : memcpy( pkt_buf+9, &pktnum_comp, 4 );
403 0 : pkt_buf[13] = 0x01; /* PING frame */
404 0 : memset( pkt_buf+14, 0, 18UL );
405 :
406 0 : fd_quic_crypto_keys_t * keys = &conn->keys[fd_quic_enc_level_appdata_id][0];
407 0 : ulong out_sz = 48UL;
408 0 : int crypt_res = fd_quic_crypto_encrypt( pkt_buf, &out_sz, pkt_buf, 13UL, pkt_buf+13, 19UL, keys, keys, pktnum );
409 0 : FD_TEST( crypt_res==FD_QUIC_SUCCESS );
410 :
411 0 : fd_quic_pkt_t pkt = {
412 0 : .ip4 = {{
413 0 : .verihl = FD_IP4_VERIHL(4,5),
414 0 : .net_tot_len = 28,
415 0 : .net_frag_off = 0x4000u, /* don't fragment */
416 0 : .ttl = 64,
417 0 : .protocol = FD_IP4_HDR_PROTOCOL_UDP,
418 0 : }},
419 0 : .udp = {{
420 0 : .net_sport = FD_QUIC_SANDBOX_PEER_PORT,
421 0 : .net_dport = FD_QUIC_SANDBOX_SELF_PORT,
422 0 : .net_len = 8,
423 0 : }},
424 0 : .pkt_number = pktnum,
425 0 : .rcv_time = sandbox->wallclock,
426 0 : .enc_level = fd_quic_enc_level_appdata_id,
427 0 : };
428 :
429 0 : fd_quic_process_quic_packet_v1( sandbox->quic, &pkt, pkt_buf, out_sz );
430 0 : }
|