Coverage Report

Created: 2025-10-29 07:05

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/ztunnel/src/admin.rs
Line
Count
Source
1
// Copyright Istio Authors
2
//
3
// Licensed under the Apache License, Version 2.0 (the "License");
4
// you may not use this file except in compliance with the License.
5
// You may obtain a copy of the License at
6
//
7
//     http://www.apache.org/licenses/LICENSE-2.0
8
//
9
// Unless required by applicable law or agreed to in writing, software
10
// distributed under the License is distributed on an "AS IS" BASIS,
11
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
// See the License for the specific language governing permissions and
13
// limitations under the License.
14
15
use crate::config::Config;
16
use crate::hyper_util::{Server, empty_response, plaintext_response};
17
use crate::identity::SecretManager;
18
use crate::state::DemandProxyState;
19
use crate::tls::Certificate;
20
use crate::version::BuildInfo;
21
use crate::xds::LocalConfig;
22
use crate::{signal, telemetry};
23
24
use base64::engine::general_purpose::STANDARD;
25
use bytes::Bytes;
26
use http_body_util::Full;
27
use hyper::body::Incoming;
28
use hyper::{Request, Response, header::CONTENT_TYPE, header::HeaderValue};
29
use std::borrow::Borrow;
30
use std::collections::HashMap;
31
32
use std::str::FromStr;
33
use std::sync::Arc;
34
use std::time::SystemTime;
35
use std::{net::SocketAddr, time::Duration};
36
37
use crate::drain::DrainWatcher;
38
use tokio::time;
39
use tracing::{error, info, warn};
40
use tracing_subscriber::filter;
41
42
pub trait AdminHandler: Sync + Send {
43
    fn key(&self) -> &'static str;
44
    // sadly can't use async trait because no Sync
45
    // see: https://github.com/dtolnay/async-trait/issues/248, https://github.com/dtolnay/async-trait/issues/142
46
    // we can't use FutureExt::shared because our result is not clonable
47
    fn handle(&self) -> anyhow::Result<serde_json::Value>;
48
}
49
50
struct State {
51
    proxy_state: DemandProxyState,
52
    config: Arc<Config>,
53
    shutdown_trigger: signal::ShutdownTrigger,
54
    cert_manager: Arc<SecretManager>,
55
    handlers: Vec<Arc<dyn AdminHandler>>,
56
}
57
58
pub struct Service {
59
    s: Server<State>,
60
}
61
62
#[derive(serde::Serialize, Clone)]
63
#[serde(rename_all = "camelCase")]
64
pub struct ConfigDump {
65
    #[serde(flatten)]
66
    proxy_state: DemandProxyState,
67
    static_config: LocalConfig,
68
    version: BuildInfo,
69
    config: Arc<Config>,
70
    certificates: Vec<CertsDump>,
71
}
72
73
#[derive(serde::Serialize, Debug, Clone, Default)]
74
#[serde(rename_all = "camelCase")]
75
pub struct CertDump {
76
    // Not available via Envoy, but still useful.
77
    pem: String,
78
    serial_number: String,
79
    valid_from: String,
80
    expiration_time: String,
81
}
82
83
#[derive(serde::Serialize, Debug, Clone, Default)]
84
#[serde(rename_all = "camelCase")]
85
pub struct CertsDump {
86
    identity: String,
87
    state: String,
88
    cert_chain: Vec<CertDump>,
89
    root_certs: Vec<CertDump>,
90
}
91
92
impl Service {
93
0
    pub async fn new(
94
0
        config: Arc<Config>,
95
0
        proxy_state: DemandProxyState,
96
0
        shutdown_trigger: signal::ShutdownTrigger,
97
0
        drain_rx: DrainWatcher,
98
0
        cert_manager: Arc<SecretManager>,
99
0
    ) -> anyhow::Result<Self> {
100
0
        Server::<State>::bind(
101
0
            "admin",
102
0
            config.admin_addr,
103
0
            drain_rx,
104
0
            State {
105
0
                config,
106
0
                proxy_state,
107
0
                shutdown_trigger,
108
0
                cert_manager,
109
0
                handlers: vec![],
110
0
            },
111
0
        )
112
0
        .await
113
0
        .map(|s| Service { s })
114
0
    }
115
116
0
    pub fn address(&self) -> SocketAddr {
117
0
        self.s.address()
118
0
    }
119
120
0
    pub fn add_handler(&mut self, handler: Arc<dyn AdminHandler>) {
121
0
        self.s.state_mut().handlers.push(handler);
122
0
    }
123
124
0
    pub fn spawn(self) {
125
0
        self.s.spawn(|state, req| async move {
126
0
            match req.uri().path() {
127
                #[cfg(target_os = "linux")]
128
0
                "/debug/pprof/profile" => handle_pprof(req).await,
129
                #[cfg(target_os = "linux")]
130
0
                "/debug/pprof/heap" => handle_jemalloc_pprof_heapgen(req).await,
131
0
                "/quitquitquit" => Ok(handle_server_shutdown(
132
0
                    state.shutdown_trigger.clone(),
133
0
                    req,
134
0
                    state.config.self_termination_deadline,
135
0
                )
136
0
                .await),
137
0
                "/config_dump" => {
138
0
                    handle_config_dump(
139
0
                        &state.handlers,
140
                        ConfigDump {
141
0
                            proxy_state: state.proxy_state.clone(),
142
0
                            static_config: Default::default(),
143
0
                            version: BuildInfo::new(),
144
0
                            config: state.config.clone(),
145
0
                            certificates: dump_certs(state.cert_manager.borrow()).await,
146
                        },
147
                    )
148
0
                    .await
149
                }
150
0
                "/logging" => Ok(handle_logging(req).await),
151
0
                "/" => Ok(handle_dashboard(req).await),
152
0
                _ => Ok(empty_response(hyper::StatusCode::NOT_FOUND)),
153
            }
154
0
        })
155
0
    }
156
}
157
158
0
async fn handle_dashboard(_req: Request<Incoming>) -> Response<Full<Bytes>> {
159
0
    let apis = &[
160
0
        (
161
0
            "debug/pprof/profile",
162
0
            "build profile using the pprof profiler (if supported)",
163
0
        ),
164
0
        (
165
0
            "debug/pprof/heap",
166
0
            "collect heap profiling data (if supported, requires jmalloc)",
167
0
        ),
168
0
        ("quitquitquit", "shut down the server"),
169
0
        ("config_dump", "dump the current Ztunnel configuration"),
170
0
        ("logging", "query/changing logging levels"),
171
0
    ];
172
173
0
    let mut api_rows = String::new();
174
175
0
    for (index, (path, description)) in apis.iter().copied().enumerate() {
176
0
        api_rows.push_str(&format!(
177
0
            "<tr class=\"{row_class}\"><td class=\"home-data\"><a href=\"{path}\">{path}</a></td><td class=\"home-data\">{description}</td></tr>\n",
178
0
            row_class = if index % 2 == 1 { "gray" } else { "vert-space" },
179
            path = path,
180
            description = description
181
        ));
182
    }
183
184
0
    let html_str = include_str!("./assets/dashboard.html");
185
0
    let html_str = html_str.replace("<!--API_ROWS_PLACEHOLDER-->", &api_rows);
186
187
0
    let mut response = plaintext_response(hyper::StatusCode::OK, html_str);
188
0
    response.headers_mut().insert(
189
0
        CONTENT_TYPE,
190
0
        HeaderValue::from_static("text/html; charset=utf-8"),
191
    );
192
193
0
    response
194
0
}
195
196
0
fn rfc3339(t: SystemTime) -> String {
197
    use chrono::prelude::{DateTime, Utc};
198
0
    let dt: DateTime<Utc> = t.into();
199
0
    dt.to_rfc3339_opts(chrono::SecondsFormat::Secs, true)
200
0
}
201
202
0
fn dump_cert(cert: &Certificate) -> CertDump {
203
0
    CertDump {
204
0
        pem: base64_encode(cert.as_pem()),
205
0
        serial_number: cert.serial(),
206
0
        valid_from: rfc3339(cert.expiration().not_before),
207
0
        expiration_time: rfc3339(cert.expiration().not_after),
208
0
    }
209
0
}
210
211
0
async fn dump_certs(cert_manager: &SecretManager) -> Vec<CertsDump> {
212
0
    let mut dump = cert_manager
213
0
        .collect_certs(|id, certs| {
214
0
            let mut dump = CertsDump {
215
0
                identity: id.to_string(),
216
0
                ..Default::default()
217
0
            };
218
            use crate::identity::CertState::*;
219
0
            match certs {
220
0
                Initializing(_) => dump.state = "Initializing".to_string(),
221
0
                Unavailable(err) => dump.state = format!("Unavailable: {err}"),
222
0
                Available(certs) => {
223
0
                    dump.state = "Available".to_string();
224
0
                    dump.cert_chain = certs
225
0
                        .cert_and_intermediates()
226
0
                        .iter()
227
0
                        .map(dump_cert)
228
0
                        .collect();
229
0
                    dump.root_certs = certs.roots.iter().map(dump_cert).collect();
230
0
                }
231
            };
232
0
            dump
233
0
        })
234
0
        .await;
235
    // Sort for determinism.
236
0
    dump.sort_by(|a, b| a.identity.cmp(&b.identity));
237
0
    dump
238
0
}
239
240
#[cfg(target_os = "linux")]
241
0
async fn handle_pprof(_req: Request<Incoming>) -> anyhow::Result<Response<Full<Bytes>>> {
242
    use pprof::protos::Message;
243
0
    let guard = pprof::ProfilerGuardBuilder::default()
244
0
        .frequency(1000)
245
        // .blocklist(&["libc", "libgcc", "pthread", "vdso"])
246
0
        .build()?;
247
248
0
    tokio::time::sleep(Duration::from_secs(10)).await;
249
0
    let report = guard.report().build()?;
250
0
    let profile = report.pprof()?;
251
252
0
    let body = profile.write_to_bytes()?;
253
254
0
    Ok(Response::builder()
255
0
        .status(hyper::StatusCode::OK)
256
0
        .body(body.into())
257
0
        .expect("builder with known status code should not fail"))
258
0
}
259
260
0
async fn handle_server_shutdown(
261
0
    shutdown_trigger: signal::ShutdownTrigger,
262
0
    _req: Request<Incoming>,
263
0
    self_term_wait: Duration,
264
0
) -> Response<Full<Bytes>> {
265
0
    match *_req.method() {
266
        hyper::Method::POST => {
267
0
            match time::timeout(self_term_wait, shutdown_trigger.shutdown_now()).await {
268
0
                Ok(()) => info!("Shutdown completed gracefully"),
269
0
                Err(_) => warn!(
270
0
                    "Graceful shutdown did not complete in {:?}, terminating now",
271
                    self_term_wait
272
                ),
273
            }
274
0
            plaintext_response(hyper::StatusCode::OK, "shutdown now\n".into())
275
        }
276
0
        _ => empty_response(hyper::StatusCode::METHOD_NOT_ALLOWED),
277
    }
278
0
}
279
280
0
async fn handle_config_dump(
281
0
    handlers: &[Arc<dyn AdminHandler>],
282
0
    mut dump: ConfigDump,
283
0
) -> anyhow::Result<Response<Full<Bytes>>> {
284
0
    if let Some(cfg) = dump.config.local_xds_config.clone() {
285
0
        match cfg.read_to_string().await {
286
0
            Ok(data) => match serde_yaml::from_str(&data) {
287
0
                Ok(c) => dump.static_config = c,
288
0
                Err(e) => error!(
289
0
                    "Failed to load static workloads from local XDS {:?}:{:?}",
290
0
                    dump.config.local_xds_config, e
291
                ),
292
            },
293
0
            Err(e) => error!(
294
0
                "Failed to read local XDS config {:?}:{:?}",
295
0
                dump.config.local_xds_config, e
296
            ),
297
        }
298
0
    }
299
300
0
    let serde_json::Value::Object(mut kv) = serde_json::to_value(&dump)? else {
301
0
        anyhow::bail!("config dump is not a key-value pair")
302
    };
303
304
0
    for h in handlers {
305
0
        let x = h.handle()?;
306
0
        kv.insert(h.key().to_string(), x);
307
    }
308
0
    let body = serde_json::to_string_pretty(&kv)?;
309
0
    Ok(Response::builder()
310
0
        .status(hyper::StatusCode::OK)
311
0
        .header(hyper::header::CONTENT_TYPE, "application/json")
312
0
        .body(body.into())
313
0
        .expect("builder with known status code should not fail"))
314
0
}
315
316
//mirror envoy's behavior: https://www.envoyproxy.io/docs/envoy/latest/operations/admin#post--logging
317
//NOTE: multiple query parameters is not supported, for example
318
//curl -X POST http://127.0.0.1:15000/logging?"tap=debug&router=debug"
319
static HELP_STRING: &str = "
320
usage: POST /logging\t\t\t\t\t\t(To list current level)
321
usage: POST /logging?level=<level>\t\t\t\t(To change global levels)
322
usage: POST /logging?level={mod1}:{level1},{mod2}:{level2}\t(To change specific mods' logging level)
323
324
hint: loglevel:\terror|warn|info|debug|trace|off
325
hint: mod_name:\tthe module name, i.e. ztunnel::proxy
326
";
327
0
async fn handle_logging(req: Request<Incoming>) -> Response<Full<Bytes>> {
328
0
    match *req.method() {
329
        hyper::Method::POST => {
330
0
            let qp: HashMap<String, String> = req
331
0
                .uri()
332
0
                .query()
333
0
                .map(|v| {
334
0
                    url::form_urlencoded::parse(v.as_bytes())
335
0
                        .into_owned()
336
0
                        .collect()
337
0
                })
338
0
                .unwrap_or_default();
339
0
            let level = qp.get("level").cloned();
340
0
            let reset = qp.get("reset").cloned();
341
0
            if level.is_some() || reset.is_some() {
342
0
                change_log_level(reset.is_some(), &level.unwrap_or_default())
343
            } else {
344
0
                list_loggers()
345
            }
346
        }
347
0
        _ => plaintext_response(
348
            hyper::StatusCode::METHOD_NOT_ALLOWED,
349
0
            format!("Invalid HTTP method\n {HELP_STRING}"),
350
        ),
351
    }
352
0
}
353
354
0
fn list_loggers() -> Response<Full<Bytes>> {
355
0
    match telemetry::get_current_loglevel() {
356
0
        Ok(loglevel) => plaintext_response(
357
            hyper::StatusCode::OK,
358
0
            format!("current log level is {loglevel}\n"),
359
        ),
360
0
        Err(err) => plaintext_response(
361
            hyper::StatusCode::INTERNAL_SERVER_ERROR,
362
0
            format!("failed to get the log level: {err}\n {HELP_STRING}"),
363
        ),
364
    }
365
0
}
366
367
0
fn validate_log_level(level: &str) -> anyhow::Result<()> {
368
0
    for clause in level.split(',') {
369
        // We support 2 forms, compared to the underlying library
370
        // <level>: supported, sets the default
371
        // <scope>:<level>: supported, sets a scope's level
372
        // <scope>: sets the scope to 'trace' level. NOT SUPPORTED.
373
0
        match clause {
374
0
            "off" | "error" | "warn" | "info" | "debug" | "trace" => continue,
375
0
            s if s.contains('=') => {
376
0
                filter::Targets::from_str(s)?;
377
            }
378
0
            s => anyhow::bail!("level {s} is invalid"),
379
        }
380
    }
381
0
    Ok(())
382
0
}
383
384
0
fn change_log_level(reset: bool, level: &str) -> Response<Full<Bytes>> {
385
0
    if !reset && level.is_empty() {
386
0
        return list_loggers();
387
0
    }
388
0
    if !level.is_empty()
389
0
        && let Err(_e) = validate_log_level(level)
390
    {
391
        // Invalid level provided
392
0
        return plaintext_response(
393
            hyper::StatusCode::BAD_REQUEST,
394
0
            format!("Invalid level provided: {level}\n{HELP_STRING}"),
395
        );
396
0
    };
397
0
    match telemetry::set_level(reset, level) {
398
0
        Ok(_) => list_loggers(),
399
0
        Err(e) => plaintext_response(
400
            hyper::StatusCode::BAD_REQUEST,
401
0
            format!("Failed to set new level: {e}\n{HELP_STRING}"),
402
        ),
403
    }
404
0
}
405
406
#[cfg(all(feature = "jemalloc", target_os = "linux"))]
407
async fn handle_jemalloc_pprof_heapgen(
408
    _req: Request<Incoming>,
409
) -> anyhow::Result<Response<Full<Bytes>>> {
410
    let Some(prof_ctrl) = jemalloc_pprof::PROF_CTL.as_ref() else {
411
        return Ok(Response::builder()
412
            .status(hyper::StatusCode::INTERNAL_SERVER_ERROR)
413
            .body("jemalloc profiling is not enabled".into())
414
            .expect("builder with known status code should not fail"));
415
    };
416
    let mut prof_ctl = prof_ctrl.lock().await;
417
    if !prof_ctl.activated() {
418
        return Ok(Response::builder()
419
            .status(hyper::StatusCode::INTERNAL_SERVER_ERROR)
420
            .body("jemalloc not enabled".into())
421
            .expect("builder with known status code should not fail"));
422
    }
423
    let pprof = prof_ctl.dump_pprof()?;
424
    Ok(Response::builder()
425
        .status(hyper::StatusCode::OK)
426
        .body(Bytes::from(pprof).into())
427
        .expect("builder with known status code should not fail"))
428
}
429
430
#[cfg(not(feature = "jemalloc"))]
431
0
async fn handle_jemalloc_pprof_heapgen(
432
0
    _req: Request<Incoming>,
433
0
) -> anyhow::Result<Response<Full<Bytes>>> {
434
0
    Ok(Response::builder()
435
0
        .status(hyper::StatusCode::INTERNAL_SERVER_ERROR)
436
0
        .body("jemalloc not enabled".into())
437
0
        .expect("builder with known status code should not fail"))
438
0
}
439
440
0
fn base64_encode(data: String) -> String {
441
    use base64::Engine;
442
0
    STANDARD.encode(data)
443
0
}
444
445
#[cfg(test)]
446
mod tests {
447
    use super::ConfigDump;
448
    use super::change_log_level;
449
    use super::dump_certs;
450
    use super::handle_config_dump;
451
    use crate::admin::HELP_STRING;
452
    use crate::config::ProxyConfig;
453
    use crate::config::construct_config;
454
    use crate::identity;
455
    use crate::strng;
456
    use crate::test_helpers::{get_response_str, helpers, new_proxy_state};
457
    use crate::xds::istio::security::Address as XdsAddress;
458
    use crate::xds::istio::security::Authorization as XdsAuthorization;
459
    use crate::xds::istio::security::Clause as XdsClause;
460
    use crate::xds::istio::security::Match as XdsMatch;
461
    use crate::xds::istio::security::Rule as XdsRule;
462
    use crate::xds::istio::security::ServiceAccountMatch as XdsServiceAccountMatch;
463
    use crate::xds::istio::security::StringMatch as XdsStringMatch;
464
    use crate::xds::istio::security::string_match::MatchType as XdsMatchType;
465
    use crate::xds::istio::workload::GatewayAddress as XdsGatewayAddress;
466
    use crate::xds::istio::workload::LoadBalancing as XdsLoadBalancing;
467
    use crate::xds::istio::workload::Locality as XdsLocality;
468
    use crate::xds::istio::workload::NetworkAddress as XdsNetworkAddress;
469
    use crate::xds::istio::workload::Port as XdsPort;
470
    use crate::xds::istio::workload::PortList as XdsPortList;
471
    use crate::xds::istio::workload::Service as XdsService;
472
    use crate::xds::istio::workload::Workload as XdsWorkload;
473
    use crate::xds::istio::workload::WorkloadType as XdsWorkloadType;
474
    use crate::xds::istio::workload::gateway_address::Destination as XdsDestination;
475
    use bytes::Bytes;
476
    use http_body_util::BodyExt;
477
    use std::collections::HashMap;
478
    use std::sync::Arc;
479
    use std::time::Duration;
480
481
    fn diff_json<'a>(a: &'a serde_json::Value, b: &'a serde_json::Value) -> String {
482
        let mut ret = String::new();
483
        let a = serde_json::to_string_pretty(a).unwrap();
484
        let b = serde_json::to_string_pretty(b).unwrap();
485
        for diff in diff::lines(&a, &b) {
486
            use diff::Result::*;
487
            use std::fmt::Write;
488
            match diff {
489
                Left(l) => writeln!(ret, " - {l}"),
490
                Right(r) => writeln!(ret, " + {r}"),
491
                Both(s, _) => writeln!(ret, "{s}"),
492
            }
493
            .unwrap();
494
        }
495
        ret
496
    }
497
498
    // Not really much to test, mostly to make sure things format as expected.
499
    #[tokio::test(start_paused = true)]
500
    async fn test_dump_certs() {
501
        fn identity(s: impl AsRef<str>) -> identity::Identity {
502
            use std::str::FromStr;
503
            identity::Identity::from_str(s.as_ref()).unwrap()
504
        }
505
506
        let manager = identity::mock::new_secret_manager_cfg(identity::mock::SecretManagerConfig {
507
            cert_lifetime: Duration::from_secs(7 * 60 * 60),
508
            fetch_latency: Duration::from_secs(1),
509
            epoch: Some(
510
                // Arbitrary point in time used to ensure deterministic certificate generation.
511
                chrono::DateTime::parse_from_rfc3339("2023-03-11T05:57:26Z")
512
                    .unwrap()
513
                    .into(),
514
            ),
515
        });
516
        for i in 0..2 {
517
            manager
518
                .fetch_certificate(&identity::Identity::Spiffe {
519
                    trust_domain: "trust_domain".into(),
520
                    namespace: "namespace".into(),
521
                    service_account: strng::format!("sa-{i}"),
522
                })
523
                .await
524
                .unwrap();
525
            // Make sure certificates are a significant amount of time apart, for better
526
            // readability.
527
            tokio::time::sleep(Duration::from_secs(60 * 60 - 1)).await;
528
        }
529
530
        manager
531
            .fetch_certificate(&identity("spiffe://error/ns/forgotten/sa/sa-failed"))
532
            .await
533
            .unwrap_err();
534
535
        // Start a fetch asynchronously and proceed enough to have it pending, but not finish.
536
        let pending_manager = manager.clone();
537
        let pending_fetch = tokio::task::spawn(async move {
538
            pending_manager
539
                .fetch_certificate(&identity("spiffe://test/ns/test/sa/sa-pending"))
540
                .await
541
        });
542
        tokio::time::sleep(Duration::from_nanos(1)).await;
543
544
        let got = serde_json::to_value(dump_certs(&manager).await).unwrap();
545
        let want = serde_json::json!([
546
          {
547
            "certChain": [],
548
            "rootCerts": [],
549
            "identity": "spiffe://error/ns/forgotten/sa/sa-failed",
550
            "state": "Unavailable: the identity is no longer needed"
551
          },
552
          {
553
            "certChain": [],
554
            "rootCerts": [],
555
            "identity": "spiffe://test/ns/test/sa/sa-pending",
556
            "state": "Initializing"
557
          },
558
          {
559
            "certChain": [
560
              {
561
                "expirationTime": "2023-03-11T12:57:26Z",
562
                "pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNXekNDQVVPZ0F3SUJBZ0lVTDVaZ0toTEI1YUt3YXRuZE1sR25CZWZ3Qkxnd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZUZ3MHlNekF6TVRFd05UVTMKTWpaYUZ3MHlNekF6TVRFeE1qVTNNalphTUJneEZqQVVCZ05WQkFvTURXTnNkWE4wWlhJdWJHOWpZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFSYXIyQm1JWUFndkptT3JTcENlRlE3OUpQeQo4Y3c0K3pFRThmcXI1N2svdW1NcDVqWFpFR0JwZWRCSVkrcWZtSlBYRWlyYTlFOTJkU21rZks1QUtNV3gKbzJnd1pqQTFCZ05WSFJFRUxqQXNoaXB6Y0dsbVptVTZMeTkwY25WemRGOWtiMjFoYVc0dmJuTXZibUZ0ClpYTndZV05sTDNOaEwzTmhMVEF3RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzRwpBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFsSW4xek1jTXdjbi8KUEFoN1JvRGI2dnFzZUx6T1RyU1NWMW5qNWt6aGNMdUU0YUNMNFNWbk54SytYTnJUVXdoU3dOdGVZbXFuCnVKTG5DUVVzdS9nVjVWZUt3OGRlNDErWjYvUVhjSzMwNHZXMVl5d2NMcVNWZWd5QkcvT0NzUndvRjIzSwpVMkg1ZXdKV1RSQi9YWGl2TERkMEZsOGIwTkNCN2ZtcmRsRDlZMXlaU1g2aXJwTk1QT1Y5L1B1ckllUUkKR2hvK2dsYjlIME96Tjc5Z2JudldGbEw0RzZVaTlLbzNmeGZhUWpVVVRWbFdpMlh4VlE0MGR6VHV2cG11Ci9qRVh4M0pOQ01zRU5hb3dNYnFTZTlqck9zd0UwMy80ejJCZjBTbkRkdGRwalloN0xZZkRqWkxldTIweAp6VzlNTFM3NU1qdG4vYjV4bHlXeGFyMWh5MnAxS1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
563
                "serialNumber": "271676055104741785552467469040731750696653685944",
564
                "validFrom": "2023-03-11T05:57:26Z"
565
              },
566
            ],
567
            "rootCerts": [
568
              {
569
                "expirationTime": "2299-01-17T23:35:46Z",
570
                "pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJRENDQWdpZ0F3SUJBZ0lVUmxsdFV1bTJRbTE1dFQ5end1MmtwaDR2ZWRjd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlOVEEwTURNeU16TTEKTkRaYUdBOHlNams1TURFeE56SXpNelUwTmxvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxxVHVwVXlMK2pvd3FOZQpMQUxFbnlXYS9VNmgyaktCYzFYWUFtekR1MDN4S0VhM3JhU1ZzU05BYjFnN1hybmgxaTViNEg0enBtY3gKdStsZURlMDh4OEdOOFJRVjBoUlE0bkkvb0lseHhmc2NOWDZoNGwyVlRRSGNLcnFaYUFRQ2NDTVJuc2EzCk9tUFNPQmRPdTR2ZkFxeVVxMS9ici82TEczRWFQMDYxQ09lMzVWUTFhbkZJYXQrVWJ6bEcrZmpGbXZXbwpxZFdFMVFaekV4UWdXV3VKNjh6RjJBN25MTXVxc0k5cG8wR2FKcHhwajZnc0tIZ3NRZ1JoYWR4UlR3ejAKc0hrVE0rS216SkY0aTJ1NDJ3VHc5YWpzME5NZmQ5WjdBbWlvRXpnS0J3bURBdGQra04zUFdyby8vaHAxClRtOUVqTVFac2s3QmV6NVVyUDA4Y09yTXNOTUNBd0VBQWFOZ01GNHdIUVlEVlIwT0JCWUVGRzlmWGRqQgo0THN2RUpxWUxZNllQc2xWMWxXVU1COEdBMVVkSXdRWU1CYUFGRzlmWGRqQjRMc3ZFSnFZTFk2WVBzbFYKMWxXVU1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0N3WURWUjBQQkFRREFnSUVNQTBHQ1NxR1NJYjNEUUVCCkN3VUFBNElCQVFDaXVMUzljZkNjRDNDblNGbUpOays5MkNhRXEyUmxTMXF1dmdTa3Z5ckhZNTV4cUxrYQpCbUVDU3VCT2FCT3lHNlZMaFlPMy9OeDBwRERJbUJYak1GZTRJRVJER3QvQTA0am41S2RFTGRiK1laOWUKdUZvY09xdWpucnFVYkxXT2Zra21rd3E5TDFWNjNsKzAxdGRFUlhYa0ZuWHM4QTFhUnh6U2RCSVUrZEtKCmpyRHNtUzdnK1B5dWNEZzJ2WWtTcExoMTdhTm1RdndrOWRPMlpvVHdMcW1JSEZYcHhlNW1PdmlyRVE1RQpYL1JzRW9IY0hURTNGUk0xaDBVdUI1SjN4ekVoOXpHUFRwNWljS2d1TC9vUElmUXVJdWhaRCtWNWg3ZzcKS3k1RHlNVWNLT0l1T0c2SStLdDJYaWpHMld5UHRwWEJBTXJoU2ZaM2ViQWd0WjZJdjZxdgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
571
                "serialNumber": "401623643733315109898464329860171355725264550359",
572
                "validFrom": "2025-04-03T23:35:46Z"
573
              }
574
            ],
575
            "identity": "spiffe://trust_domain/ns/namespace/sa/sa-0",
576
            "state": "Available"
577
          },
578
          {
579
            "certChain": [
580
              {
581
                "expirationTime": "2023-03-11T13:57:26Z",
582
                "pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNXekNDQVVPZ0F3SUJBZ0lVSlVGNVVGbU52OVhYQlFWaDFDbFk0VFNLRng4d0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZUZ3MHlNekF6TVRFd05qVTMKTWpaYUZ3MHlNekF6TVRFeE16VTNNalphTUJneEZqQVVCZ05WQkFvTURXTnNkWE4wWlhJdWJHOWpZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFSYXIyQm1JWUFndkptT3JTcENlRlE3OUpQeQo4Y3c0K3pFRThmcXI1N2svdW1NcDVqWFpFR0JwZWRCSVkrcWZtSlBYRWlyYTlFOTJkU21rZks1QUtNV3gKbzJnd1pqQTFCZ05WSFJFRUxqQXNoaXB6Y0dsbVptVTZMeTkwY25WemRGOWtiMjFoYVc0dmJuTXZibUZ0ClpYTndZV05sTDNOaEwzTmhMVEV3RGdZRFZSMFBBUUgvQkFRREFnV2dNQjBHQTFVZEpRUVdNQlFHQ0NzRwpBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFtZ2g1WENwMGp6OWEKS3NvTzZBUlBVWmlKbnhDY2xobHlleUJpbkE1cEFkY0F4V2hNN2xMdklxZXNCT3hpRFdhbFR0Z2QzV29OClJGak1VMUNOa0RmQWRoZDhLSTVoaCtpS0Z3eitYK3JIMThSM0c4SDAyQTZWMnpuYVdGald0a1dvc3c4eQpySHlIYjJBaThXakRVV1dwQ21KL0M3ZUJuVEl3OHMrM2ZMZ2o4Rm5rOVZwcjdSNEovc3ppcGVoczZyRHMKQ1pCQzFKVVA0cXovUis1L3VPWHE3cnBHY05SQVlibXVZNllKbXRWVUxKRXl3THFtUjJCckVvKzFZN0VkCkpxRWFPSUdFTEVrdENNazBvZUhkRmZoWWlqZXdmRXJVbVJFSzM2Yy8xY01XMk44MFlkVUMzd1UyWHlZdwpqWUswdkxWeng3U1Q4TmcwL0xlYUdJWGtrQW1PQ3c9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
583
                "serialNumber": "212692774886610945930036647276614034927450199839",
584
                "validFrom": "2023-03-11T06:57:26Z"
585
              },
586
            ],
587
            "rootCerts": [
588
              {
589
                "expirationTime": "2299-01-17T23:35:46Z",
590
                "pem": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJRENDQWdpZ0F3SUJBZ0lVUmxsdFV1bTJRbTE1dFQ5end1MmtwaDR2ZWRjd0RRWUpLb1pJaHZjTgpBUUVMQlFBd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oYkRBZ0Z3MHlOVEEwTURNeU16TTEKTkRaYUdBOHlNams1TURFeE56SXpNelUwTmxvd0dERVdNQlFHQTFVRUNnd05ZMngxYzNSbGNpNXNiMk5oCmJEQ0NBU0l3RFFZSktvWklodmNOQVFFQkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUxxVHVwVXlMK2pvd3FOZQpMQUxFbnlXYS9VNmgyaktCYzFYWUFtekR1MDN4S0VhM3JhU1ZzU05BYjFnN1hybmgxaTViNEg0enBtY3gKdStsZURlMDh4OEdOOFJRVjBoUlE0bkkvb0lseHhmc2NOWDZoNGwyVlRRSGNLcnFaYUFRQ2NDTVJuc2EzCk9tUFNPQmRPdTR2ZkFxeVVxMS9ici82TEczRWFQMDYxQ09lMzVWUTFhbkZJYXQrVWJ6bEcrZmpGbXZXbwpxZFdFMVFaekV4UWdXV3VKNjh6RjJBN25MTXVxc0k5cG8wR2FKcHhwajZnc0tIZ3NRZ1JoYWR4UlR3ejAKc0hrVE0rS216SkY0aTJ1NDJ3VHc5YWpzME5NZmQ5WjdBbWlvRXpnS0J3bURBdGQra04zUFdyby8vaHAxClRtOUVqTVFac2s3QmV6NVVyUDA4Y09yTXNOTUNBd0VBQWFOZ01GNHdIUVlEVlIwT0JCWUVGRzlmWGRqQgo0THN2RUpxWUxZNllQc2xWMWxXVU1COEdBMVVkSXdRWU1CYUFGRzlmWGRqQjRMc3ZFSnFZTFk2WVBzbFYKMWxXVU1BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0N3WURWUjBQQkFRREFnSUVNQTBHQ1NxR1NJYjNEUUVCCkN3VUFBNElCQVFDaXVMUzljZkNjRDNDblNGbUpOays5MkNhRXEyUmxTMXF1dmdTa3Z5ckhZNTV4cUxrYQpCbUVDU3VCT2FCT3lHNlZMaFlPMy9OeDBwRERJbUJYak1GZTRJRVJER3QvQTA0am41S2RFTGRiK1laOWUKdUZvY09xdWpucnFVYkxXT2Zra21rd3E5TDFWNjNsKzAxdGRFUlhYa0ZuWHM4QTFhUnh6U2RCSVUrZEtKCmpyRHNtUzdnK1B5dWNEZzJ2WWtTcExoMTdhTm1RdndrOWRPMlpvVHdMcW1JSEZYcHhlNW1PdmlyRVE1RQpYL1JzRW9IY0hURTNGUk0xaDBVdUI1SjN4ekVoOXpHUFRwNWljS2d1TC9vUElmUXVJdWhaRCtWNWg3ZzcKS3k1RHlNVWNLT0l1T0c2SStLdDJYaWpHMld5UHRwWEJBTXJoU2ZaM2ViQWd0WjZJdjZxdgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==",
591
                "serialNumber": "401623643733315109898464329860171355725264550359",
592
                "validFrom": "2025-04-03T23:35:46Z"
593
              }
594
            ],
595
            "identity": "spiffe://trust_domain/ns/namespace/sa/sa-1",
596
            "state": "Available"
597
          }
598
        ]);
599
        assert_eq!(
600
            got,
601
            want,
602
            "Certificate lists do not match (-want, +got):\n{}",
603
            diff_json(&want, &got)
604
        );
605
        pending_fetch.await.unwrap().unwrap();
606
    }
607
608
    #[tokio::test(start_paused = true)]
609
    async fn test_dump_config() {
610
        let manager = identity::mock::new_secret_manager_cfg(identity::mock::SecretManagerConfig {
611
            cert_lifetime: Duration::from_secs(7 * 60 * 60),
612
            fetch_latency: Duration::from_secs(1),
613
            epoch: Some(
614
                // Arbitrary point in time used to ensure deterministic certificate generation.
615
                chrono::DateTime::parse_from_rfc3339("2023-03-11T05:57:26Z")
616
                    .unwrap()
617
                    .into(),
618
            ),
619
        });
620
621
        let wl = XdsWorkload {
622
            addresses: vec![Bytes::copy_from_slice(&[127, 0, 0, 2])],
623
            hostname: "".to_string(),
624
            waypoint: Some(XdsGatewayAddress {
625
                destination: Some(XdsDestination::Address(XdsNetworkAddress {
626
                    network: "defaultnw".to_string(),
627
                    address: [127, 0, 0, 10].to_vec(),
628
                })),
629
                hbone_mtls_port: 15008,
630
            }),
631
            network_gateway: Some(XdsGatewayAddress {
632
                destination: Some(XdsDestination::Address(XdsNetworkAddress {
633
                    network: "defaultnw".to_string(),
634
                    address: [127, 0, 0, 11].to_vec(),
635
                })),
636
                hbone_mtls_port: 15008,
637
            }),
638
            tunnel_protocol: Default::default(),
639
            network_mode: Default::default(),
640
            uid: "uid".to_string(),
641
            name: "name".to_string(),
642
            namespace: "namespace".to_string(),
643
            trust_domain: "cluster.local".to_string(),
644
            service_account: "default".to_string(),
645
            network: "defaultnw".to_string(),
646
            workload_name: "workload_name".to_string(),
647
            canonical_name: "canonical_name".to_string(),
648
            canonical_revision: "canonical_revision".to_string(),
649
            node: "node".to_string(),
650
            status: Default::default(),
651
            cluster_id: "Kubernetes".to_string(),
652
            authorization_policies: Vec::new(),
653
            native_tunnel: false,
654
            application_tunnel: None,
655
            workload_type: XdsWorkloadType::Deployment.into(),
656
            services: HashMap::from([(
657
                "ns/svc1.ns.svc.cluster.local".to_string(),
658
                XdsPortList {
659
                    ports: vec![XdsPort {
660
                        service_port: 80,
661
                        target_port: 8080,
662
                    }],
663
                },
664
            )]),
665
            locality: Some(XdsLocality {
666
                region: "region".to_string(),
667
                zone: "zone".to_string(),
668
                subzone: "subezone".to_string(),
669
            }),
670
            extensions: Default::default(),
671
            capacity: Default::default(),
672
            // ..Default::default() // intentionally don't default. we want all fields populated
673
        };
674
675
        let svc = XdsService {
676
            name: "svc1".to_string(),
677
            namespace: "ns".to_string(),
678
            hostname: "svc1.ns.svc.cluster.local".to_string(),
679
            addresses: vec![XdsNetworkAddress {
680
                network: "defaultnw".to_string(),
681
                address: [127, 0, 1, 1].to_vec(),
682
            }],
683
            ports: vec![XdsPort {
684
                service_port: 80,
685
                target_port: 80,
686
            }],
687
            subject_alt_names: vec!["SAN1".to_string(), "SAN2".to_string()],
688
            waypoint: None,
689
            load_balancing: Some(XdsLoadBalancing {
690
                routing_preference: vec![1, 2],
691
                mode: 1,
692
                health_policy: 1,
693
            }), // ..Default::default() // intentionally don't default. we want all fields populated
694
            ip_families: 0,
695
            extensions: Default::default(),
696
        };
697
698
        let auth = XdsAuthorization {
699
            name: "svc1".to_string(),
700
            namespace: "ns".to_string(),
701
            scope: 0,
702
            action: 0,
703
            rules: vec![XdsRule {
704
                clauses: vec![XdsClause {
705
                    matches: vec![XdsMatch {
706
                        destination_ports: vec![80],
707
                        not_destination_ports: vec![8080],
708
                        source_ips: vec![XdsAddress {
709
                            address: Bytes::copy_from_slice(&[127, 0, 0, 2]),
710
                            length: 32,
711
                        }],
712
                        not_source_ips: vec![XdsAddress {
713
                            address: Bytes::copy_from_slice(&[127, 0, 0, 1]),
714
                            length: 32,
715
                        }],
716
                        destination_ips: vec![XdsAddress {
717
                            address: Bytes::copy_from_slice(&[127, 0, 0, 3]),
718
                            length: 32,
719
                        }],
720
                        not_destination_ips: vec![XdsAddress {
721
                            address: Bytes::copy_from_slice(&[127, 0, 0, 4]),
722
                            length: 32,
723
                        }],
724
                        namespaces: vec![XdsStringMatch {
725
                            match_type: Some(XdsMatchType::Exact("ns".to_string())),
726
                        }],
727
                        not_namespaces: vec![XdsStringMatch {
728
                            match_type: Some(XdsMatchType::Exact("not-ns".to_string())),
729
                        }],
730
                        service_accounts: vec![XdsServiceAccountMatch {
731
                            namespace: "ns".into(),
732
                            service_account: "sa".into(),
733
                        }],
734
                        not_service_accounts: vec![XdsServiceAccountMatch {
735
                            namespace: "ns".into(),
736
                            service_account: "sa".into(),
737
                        }],
738
                        principals: vec![XdsStringMatch {
739
                            match_type: Some(XdsMatchType::Exact(
740
                                "spiffe://cluster.local/ns/ns/sa/sa".to_string(),
741
                            )),
742
                        }],
743
                        not_principals: vec![XdsStringMatch {
744
                            match_type: Some(XdsMatchType::Exact(
745
                                "spiffe://cluster.local/ns/ns/sa/not-sa".to_string(),
746
                            )),
747
                        }],
748
                    }],
749
                }],
750
            }],
751
            // ..Default::default() // intentionally don't default. we want all fields populated
752
        };
753
754
        let proxy_state = new_proxy_state(&[wl], &[svc], &[auth]);
755
756
        let default_config = construct_config(ProxyConfig::default())
757
            .expect("could not build Config without ProxyConfig");
758
759
        let dump = ConfigDump {
760
            proxy_state,
761
            static_config: Default::default(),
762
            version: Default::default(),
763
            config: Arc::new(default_config),
764
            certificates: dump_certs(&manager).await,
765
        };
766
767
        // if for some reason we can't serialize the config dump, this will fail.
768
        //
769
        // this could happen for a variety of reasons; for example some types
770
        // may need custom serialize/deserialize to be keys in a map, like NetworkAddress
771
        let resp = handle_config_dump(&[], dump).await.unwrap();
772
773
        let resp_bytes = resp
774
            .body()
775
            .clone()
776
            .frame()
777
            .await
778
            .unwrap()
779
            .unwrap()
780
            .into_data()
781
            .unwrap();
782
        let resp_str = String::from(std::str::from_utf8(&resp_bytes).unwrap());
783
784
        // quick sanity check that our workload is there.
785
        // avoid stronger checks since serialization is not determinstic, and
786
        // most of the value of this test is ensuring that we can serialize
787
        // the config dump at all from our internal types
788
        assert!(resp_str.contains("127.0.0.2"), "{resp_str}");
789
        // Check a waypoint
790
        assert!(resp_str.contains(
791
            r#"waypoint": {
792
        "destination": "defaultnw/127.0.0.10",
793
        "hboneMtlsPort": 15008
794
      }"#
795
        ));
796
    }
797
798
    // each of these tests assert that we can change the log level and the
799
    // appropriate response string is returned.
800
    //
801
    // Note: tests need to be combined into one test function to be sure that
802
    // individual tests don't affect each other by asynchronously changing
803
    // the log level before the matching assert is called.
804
    #[tokio::test(start_paused = true)]
805
    async fn test_change_log_level() {
806
        helpers::initialize_telemetry();
807
808
        // no changes
809
        let resp = change_log_level(false, "");
810
        let resp_str = get_response_str(resp).await;
811
        assert_eq!(
812
            resp_str,
813
            "current log level is hickory_server::server=off,info\n"
814
        );
815
816
        let resp = change_log_level(true, "");
817
        let resp_str = get_response_str(resp).await;
818
        assert_eq!(
819
            resp_str,
820
            "current log level is hickory_server::server=off,info\n"
821
        );
822
823
        let resp = change_log_level(true, "invalid_level");
824
        let resp_str = get_response_str(resp).await;
825
        assert!(
826
            resp_str.contains(HELP_STRING),
827
            "got {resp_str} want {HELP_STRING}"
828
        );
829
830
        let resp = change_log_level(true, "debug");
831
        let resp_str = get_response_str(resp).await;
832
        assert_eq!(
833
            resp_str,
834
            "current log level is hickory_server::server=off,debug\n"
835
        );
836
837
        let resp = change_log_level(true, "access=debug,info");
838
        let resp_str = get_response_str(resp).await;
839
        assert_eq!(
840
            resp_str,
841
            "current log level is hickory_server::server=off,access=debug,info\n"
842
        );
843
844
        let resp = change_log_level(true, "warn");
845
        let resp_str = get_response_str(resp).await;
846
        assert_eq!(
847
            resp_str,
848
            "current log level is hickory_server::server=off,warn\n"
849
        );
850
851
        let resp = change_log_level(true, "error");
852
        let resp_str = get_response_str(resp).await;
853
        assert_eq!(
854
            resp_str,
855
            "current log level is hickory_server::server=off,error\n"
856
        );
857
858
        let resp = change_log_level(true, "trace");
859
        let resp_str = get_response_str(resp).await;
860
        assert!(resp_str.contains("current log level is hickory_server::server=off,trace\n"));
861
862
        let resp = change_log_level(true, "info");
863
        let resp_str = get_response_str(resp).await;
864
        assert!(resp_str.contains("current log level is hickory_server::server=off,info\n"));
865
866
        let resp = change_log_level(true, "off");
867
        let resp_str = get_response_str(resp).await;
868
        assert!(resp_str.contains("current log level is hickory_server::server=off,off\n"));
869
    }
870
}