// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"bytes"
"context"
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"errors"
"expvar"
"fmt"
"hash"
"io"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"path"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/caddyserver/certmagic"
"github.com/cespare/xxhash/v2"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
func init() {
// The hard-coded default `DefaultAdminListen` can be overridden
// by setting the `CADDY_ADMIN` environment variable.
// The environment variable may be used by packagers to change
// the default admin address to something more appropriate for
// that platform. See #5317 for discussion.
if env, exists := os.LookupEnv("CADDY_ADMIN"); exists {
DefaultAdminListen = env
}
}
// AdminConfig configures Caddy's API endpoint, which is used
// to manage Caddy while it is running.
type AdminConfig struct {
// If true, the admin endpoint will be completely disabled.
// Note that this makes any runtime changes to the config
// impossible, since the interface to do so is through the
// admin endpoint.
Disabled bool `json:"disabled,omitempty"`
// The address to which the admin endpoint's listener should
// bind itself. Can be any single network address that can be
// parsed by Caddy. Accepts placeholders.
// Default: the value of the `CADDY_ADMIN` environment variable,
// or `localhost:2019` otherwise.
//
// Remember: When changing this value through a config reload,
// be sure to use the `--address` CLI flag to specify the current
// admin address if the currently-running admin endpoint is not
// the default address.
Listen string `json:"listen,omitempty"`
// If true, CORS headers will be emitted, and requests to the
// API will be rejected if their `Host` and `Origin` headers
// do not match the expected value(s). Use `origins` to
// customize which origins/hosts are allowed. If `origins` is
// not set, the listen address is the only value allowed by
// default. Enforced only on local (plaintext) endpoint.
EnforceOrigin bool `json:"enforce_origin,omitempty"`
// The list of allowed origins/hosts for API requests. Only needed
// if accessing the admin endpoint from a host different from the
// socket's network interface or if `enforce_origin` is true. If not
// set, the listener address will be the default value. If set but
// empty, no origins will be allowed. Enforced only on local
// (plaintext) endpoint.
Origins []string `json:"origins,omitempty"`
// Options pertaining to configuration management.
Config *ConfigSettings `json:"config,omitempty"`
// Options that establish this server's identity. Identity refers to
// credentials which can be used to uniquely identify and authenticate
// this server instance. This is required if remote administration is
// enabled (but does not require remote administration to be enabled).
// Default: no identity management.
Identity *IdentityConfig `json:"identity,omitempty"`
// Options pertaining to remote administration. By default, remote
// administration is disabled. If enabled, identity management must
// also be configured, as that is how the endpoint is secured.
// See the neighboring "identity" object.
//
// EXPERIMENTAL: This feature is subject to change.
Remote *RemoteAdmin `json:"remote,omitempty"`
// Holds onto the routers so that we can later provision them
// if they require provisioning.
routers []AdminRouter
}
// ConfigSettings configures the management of configuration.
type ConfigSettings struct {
// Whether to keep a copy of the active config on disk. Default is true.
// Note that "pulled" dynamic configs (using the neighboring "load" module)
// are not persisted; only configs that are pushed to Caddy get persisted.
Persist *bool `json:"persist,omitempty"`
// Loads a new configuration. This is helpful if your configs are
// managed elsewhere and you want Caddy to pull its config dynamically
// when it starts. The pulled config completely replaces the current
// one, just like any other config load. It is an error if a pulled
// config is configured to pull another config without a load_delay,
// as this creates a tight loop.
//
// EXPERIMENTAL: Subject to change.
LoadRaw json.RawMessage `json:"load,omitempty" caddy:"namespace=caddy.config_loaders inline_key=module"`
// The duration after which to load config. If set, config will be pulled
// from the config loader after this duration. A delay is required if a
// dynamically-loaded config is configured to load yet another config. To
// load configs on a regular interval, ensure this value is set the same
// on all loaded configs; it can also be variable if needed, and to stop
// the loop, simply remove dynamic config loading from the next-loaded
// config.
//
// EXPERIMENTAL: Subject to change.
LoadDelay Duration `json:"load_delay,omitempty"`
}
// IdentityConfig configures management of this server's identity. An identity
// consists of credentials that uniquely verify this instance; for example,
// TLS certificates (public + private key pairs).
type IdentityConfig struct {
// List of names or IP addresses which refer to this server.
// Certificates will be obtained for these identifiers so
// secure TLS connections can be made using them.
Identifiers []string `json:"identifiers,omitempty"`
// Issuers that can provide this admin endpoint its identity
// certificate(s). Default: ACME issuers configured for
// ZeroSSL and Let's Encrypt. Be sure to change this if you
// require credentials for private identifiers.
IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"`
issuers []certmagic.Issuer
}
// RemoteAdmin enables and configures remote administration. If enabled,
// a secure listener enforcing mutual TLS authentication will be started
// on a different port from the standard plaintext admin server.
//
// This endpoint is secured using identity management, which must be
// configured separately (because identity management does not depend
// on remote administration). See the admin/identity config struct.
//
// EXPERIMENTAL: Subject to change.
type RemoteAdmin struct {
// The address on which to start the secure listener. Accepts placeholders.
// Default: :2021
Listen string `json:"listen,omitempty"`
// List of access controls for this secure admin endpoint.
// This configures TLS mutual authentication (i.e. authorized
// client certificates), but also application-layer permissions
// like which paths and methods each identity is authorized for.
AccessControl []*AdminAccess `json:"access_control,omitempty"`
}
// AdminAccess specifies what permissions an identity or group
// of identities are granted.
type AdminAccess struct {
// Base64-encoded DER certificates containing public keys to accept.
// (The contents of PEM certificate blocks are base64-encoded DER.)
// Any of these public keys can appear in any part of a verified chain.
PublicKeys []string `json:"public_keys,omitempty"`
// Limits what the associated identities are allowed to do.
// If unspecified, all permissions are granted.
Permissions []AdminPermissions `json:"permissions,omitempty"`
publicKeys []crypto.PublicKey
}
// AdminPermissions specifies what kinds of requests are allowed
// to be made to the admin endpoint.
type AdminPermissions struct {
// The API paths allowed. Paths are simple prefix matches.
// Any subpath of the specified paths will be allowed.
Paths []string `json:"paths,omitempty"`
// The HTTP methods allowed for the given paths.
Methods []string `json:"methods,omitempty"`
}
// newAdminHandler reads admin's config and returns an http.Handler suitable
// for use in an admin endpoint server, which will be listening on listenAddr.
func (admin *AdminConfig) newAdminHandler(addr NetworkAddress, remote bool) adminHandler {
muxWrap := adminHandler{mux: http.NewServeMux()}
// secure the local or remote endpoint respectively
if remote {
muxWrap.remoteControl = admin.Remote
} else {
muxWrap.enforceHost = !addr.isWildcardInterface()
muxWrap.allowedOrigins = admin.allowedOrigins(addr)
muxWrap.enforceOrigin = admin.EnforceOrigin
}
addRouteWithMetrics := func(pattern string, handlerLabel string, h http.Handler) {
labels := prometheus.Labels{"path": pattern, "handler": handlerLabel}
h = instrumentHandlerCounter(
adminMetrics.requestCount.MustCurryWith(labels),
h,
)
muxWrap.mux.Handle(pattern, h)
}
// addRoute just calls muxWrap.mux.Handle after
// wrapping the handler with error handling
addRoute := func(pattern string, handlerLabel string, h AdminHandler) {
wrapper := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
err := h.ServeHTTP(w, r)
if err != nil {
labels := prometheus.Labels{
"path": pattern,
"handler": handlerLabel,
"method": strings.ToUpper(r.Method),
}
adminMetrics.requestErrors.With(labels).Inc()
}
muxWrap.handleError(w, r, err)
})
addRouteWithMetrics(pattern, handlerLabel, wrapper)
}
const handlerLabel = "admin"
// register standard config control endpoints
addRoute("/"+rawConfigKey+"/", handlerLabel, AdminHandlerFunc(handleConfig))
addRoute("/id/", handlerLabel, AdminHandlerFunc(handleConfigID))
addRoute("/stop", handlerLabel, AdminHandlerFunc(handleStop))
// register debugging endpoints
addRouteWithMetrics("/debug/pprof/", handlerLabel, http.HandlerFunc(pprof.Index))
addRouteWithMetrics("/debug/pprof/cmdline", handlerLabel, http.HandlerFunc(pprof.Cmdline))
addRouteWithMetrics("/debug/pprof/profile", handlerLabel, http.HandlerFunc(pprof.Profile))
addRouteWithMetrics("/debug/pprof/symbol", handlerLabel, http.HandlerFunc(pprof.Symbol))
addRouteWithMetrics("/debug/pprof/trace", handlerLabel, http.HandlerFunc(pprof.Trace))
addRouteWithMetrics("/debug/vars", handlerLabel, expvar.Handler())
// register third-party module endpoints
for _, m := range GetModules("admin.api") {
router := m.New().(AdminRouter)
handlerLabel := m.ID.Name()
for _, route := range router.Routes() {
addRoute(route.Pattern, handlerLabel, route.Handler)
}
admin.routers = append(admin.routers, router)
}
return muxWrap
}
// provisionAdminRouters provisions all the router modules
// in the admin.api namespace that need provisioning.
func (admin *AdminConfig) provisionAdminRouters(ctx Context) error {
for _, router := range admin.routers {
provisioner, ok := router.(Provisioner)
if !ok {
continue
}
err := provisioner.Provision(ctx)
if err != nil {
return err
}
}
// We no longer need the routers once provisioned, allow for GC
admin.routers = nil
return nil
}
// allowedOrigins returns a list of origins that are allowed.
// If admin.Origins is nil (null), the provided listen address
// will be used as the default origin. If admin.Origins is
// empty, no origins will be allowed, effectively bricking the
// endpoint for non-unix-socket endpoints, but whatever.
func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL {
uniqueOrigins := make(map[string]struct{})
for _, o := range admin.Origins {
uniqueOrigins[o] = struct{}{}
}
if admin.Origins == nil {
if addr.isLoopback() {
if addr.IsUnixNetwork() {
// RFC 2616, Section 14.26:
// "A client MUST include a Host header field in all HTTP/1.1 request
// messages. If the requested URI does not include an Internet host
// name for the service being requested, then the Host header field MUST
// be given with an empty value."
//
// UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
// Understandable, but frustrating. See:
// https://github.com/golang/go/issues/60374
// See also the discussion here:
// https://github.com/golang/go/issues/61431
//
// We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
// in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
// bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
// security checks, the infosec community assures me that it is secure to do
// so, because:
// 1) Browsers do not allow access to unix sockets
// 2) DNS is irrelevant to unix sockets
//
// I am not quite ready to trust either of those external factors, so instead
// of disabling Host/Origin checks, we now allow specific Host values when
// accessing the admin endpoint over unix sockets. I definitely don't trust
// DNS (e.g. I don't trust 'localhost' to always resolve to the local host),
// and IP shouldn't even be used, but if it is for some reason, I think we can
// at least be reasonably assured that 127.0.0.1 and ::1 route to the local
// machine, meaning that a hypothetical browser origin would have to be on the
// local machine as well.
uniqueOrigins[""] = struct{}{}
uniqueOrigins["127.0.0.1"] = struct{}{}
uniqueOrigins["::1"] = struct{}{}
} else {
uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
}
}
if !addr.IsUnixNetwork() {
uniqueOrigins[addr.JoinHostPort(0)] = struct{}{}
}
}
allowed := make([]*url.URL, 0, len(uniqueOrigins))
for originStr := range uniqueOrigins {
var origin *url.URL
if strings.Contains(originStr, "://") {
var err error
origin, err = url.Parse(originStr)
if err != nil {
continue
}
origin.Path = ""
origin.RawPath = ""
origin.Fragment = ""
origin.RawFragment = ""
origin.RawQuery = ""
} else {
origin = &url.URL{Host: originStr}
}
allowed = append(allowed, origin)
}
return allowed
}
// replaceLocalAdminServer replaces the running local admin server
// according to the relevant configuration in cfg. If no configuration
// for the admin endpoint exists in cfg, a default one is used, so
// that there is always an admin server (unless it is explicitly
// configured to be disabled).
func replaceLocalAdminServer(cfg *Config) error {
// always* be sure to close down the old admin endpoint
// as gracefully as possible, even if the new one is
// disabled -- careful to use reference to the current
// (old) admin endpoint since it will be different
// when the function returns
// (* except if the new one fails to start)
oldAdminServer := localAdminServer
var err error
defer func() {
// do the shutdown asynchronously so that any
// current API request gets a response; this
// goroutine may last a few seconds
if oldAdminServer != nil && err == nil {
go func(oldAdminServer *http.Server) {
err := stopAdminServer(oldAdminServer)
if err != nil {
Log().Named("admin").Error("stopping current admin endpoint", zap.Error(err))
}
}(oldAdminServer)
}
}()
// set a default if admin wasn't otherwise configured
if cfg.Admin == nil {
cfg.Admin = &AdminConfig{
Listen: DefaultAdminListen,
}
}
// if new admin endpoint is to be disabled, we're done
if cfg.Admin.Disabled {
Log().Named("admin").Warn("admin endpoint disabled")
return nil
}
// extract a singular listener address
addr, err := parseAdminListenAddr(cfg.Admin.Listen, DefaultAdminListen)
if err != nil {
return err
}
handler := cfg.Admin.newAdminHandler(addr, false)
ln, err := addr.Listen(context.TODO(), 0, net.ListenConfig{})
if err != nil {
return err
}
serverMu.Lock()
localAdminServer = &http.Server{
Addr: addr.String(), // for logging purposes only
Handler: handler,
ReadTimeout: 10 * time.Second,
ReadHeaderTimeout: 5 * time.Second,
IdleTimeout: 60 * time.Second,
MaxHeaderBytes: 1024 * 64,
}
serverMu.Unlock()
adminLogger := Log().Named("admin")
go func() {
serverMu.Lock()
server := localAdminServer
serverMu.Unlock()
if err := server.Serve(ln.(net.Listener)); !errors.Is(err, http.ErrServerClosed) {
adminLogger.Error("admin server shutdown for unknown reason", zap.Error(err))
}
}()
adminLogger.Info("admin endpoint started",
zap.String("address", addr.String()),
zap.Bool("enforce_origin", cfg.Admin.EnforceOrigin),
zap.Array("origins", loggableURLArray(handler.allowedOrigins)))
if !handler.enforceHost {
adminLogger.Warn("admin endpoint on open interface; host checking disabled",
zap.String("address", addr.String()))
}
return nil
}
// manageIdentity sets up automated identity management for this server.
func manageIdentity(ctx Context, cfg *Config) error {
if cfg == nil || cfg.Admin == nil || cfg.Admin.Identity == nil {
return nil
}
// set default issuers; this is pretty hacky because we can't
// import the caddytls package -- but it works
if cfg.Admin.Identity.IssuersRaw == nil {
cfg.Admin.Identity.IssuersRaw = []json.RawMessage{
json.RawMessage(`{"module": "acme"}`),
}
}
// load and provision issuer modules
if cfg.Admin.Identity.IssuersRaw != nil {
val, err := ctx.LoadModule(cfg.Admin.Identity, "IssuersRaw")
if err != nil {
return fmt.Errorf("loading identity issuer modules: %s", err)
}
for _, issVal := range val.([]any) {
cfg.Admin.Identity.issuers = append(cfg.Admin.Identity.issuers, issVal.(certmagic.Issuer))
}
}
// we'll make a new cache when we make the CertMagic config, so stop any previous cache
if identityCertCache != nil {
identityCertCache.Stop()
}
logger := Log().Named("admin.identity")
cmCfg := cfg.Admin.Identity.certmagicConfig(logger, true)
// issuers have circular dependencies with the configs because,
// as explained in the caddytls package, they need access to the
// correct storage and cache to solve ACME challenges
for _, issuer := range cfg.Admin.Identity.issuers {
// avoid import cycle with caddytls package, so manually duplicate the interface here, yuck
if annoying, ok := issuer.(interface{ SetConfig(cfg *certmagic.Config) }); ok {
annoying.SetConfig(cmCfg)
}
}
// obtain and renew server identity certificate(s)
return cmCfg.ManageAsync(ctx, cfg.Admin.Identity.Identifiers)
}
// replaceRemoteAdminServer replaces the running remote admin server
// according to the relevant configuration in cfg. It stops any previous
// remote admin server and only starts a new one if configured.
func replaceRemoteAdminServer(ctx Context, cfg *Config) error {
if cfg == nil {
return nil
}
remoteLogger := Log().Named("admin.remote")
oldAdminServer := remoteAdminServer
defer func() {
if oldAdminServer != nil {
go func(oldAdminServer *http.Server) {
err := stopAdminServer(oldAdminServer)
if err != nil {
Log().Named("admin").Error("stopping current secure admin endpoint", zap.Error(err))
}
}(oldAdminServer)
}
}()
if cfg.Admin == nil || cfg.Admin.Remote == nil {
return nil
}
addr, err := parseAdminListenAddr(cfg.Admin.Remote.Listen, DefaultRemoteAdminListen)
if err != nil {
return err
}
// make the HTTP handler but disable Host/Origin enforcement
// because we are using TLS authentication instead
handler := cfg.Admin.newAdminHandler(addr, true)
// create client certificate pool for TLS mutual auth, and extract public keys
// so that we can enforce access controls at the application layer
clientCertPool := x509.NewCertPool()
for i, accessControl := range cfg.Admin.Remote.AccessControl {
for j, certBase64 := range accessControl.PublicKeys {
cert, err := decodeBase64DERCert(certBase64)
if err != nil {
return fmt.Errorf("access control %d public key %d: parsing base64 certificate DER: %v", i, j, err)
}
accessControl.publicKeys = append(accessControl.publicKeys, cert.PublicKey)
clientCertPool.AddCert(cert)
}
}
// create TLS config that will enforce mutual authentication
if identityCertCache == nil {
return fmt.Errorf("cannot enable remote admin without a certificate cache; configure identity management to initialize a certificate cache")
}
cmCfg := cfg.Admin.Identity.certmagicConfig(remoteLogger, false)
tlsConfig := cmCfg.TLSConfig()
tlsConfig.NextProtos = nil // this server does not solve ACME challenges
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
tlsConfig.ClientCAs = clientCertPool
// convert logger to stdlib so it can be used by HTTP server
serverLogger, err := zap.NewStdLogAt(remoteLogger, zap.DebugLevel)
if err != nil {
return err
}
serverMu.Lock()
// create secure HTTP server
remoteAdminServer = &http.Server{
Addr: addr.String(), // for logging purposes only
Handler: handler,
TLSConfig: tlsConfig,
ReadTimeout: 10 * time.Second,
ReadHeaderTimeout: 5 * time.Second,
IdleTimeout: 60 * time.Second,
MaxHeaderBytes: 1024 * 64,
ErrorLog: serverLogger,
}
serverMu.Unlock()
// start listener
lnAny, err := addr.Listen(ctx, 0, net.ListenConfig{})
if err != nil {
return err
}
ln := lnAny.(net.Listener)
ln = tls.NewListener(ln, tlsConfig)
go func() {
serverMu.Lock()
server := remoteAdminServer
serverMu.Unlock()
if err := server.Serve(ln); !errors.Is(err, http.ErrServerClosed) {
remoteLogger.Error("admin remote server shutdown for unknown reason", zap.Error(err))
}
}()
remoteLogger.Info("secure admin remote control endpoint started",
zap.String("address", addr.String()))
return nil
}
func (ident *IdentityConfig) certmagicConfig(logger *zap.Logger, makeCache bool) *certmagic.Config {
var cmCfg *certmagic.Config
if ident == nil {
// user might not have configured identity; that's OK, we can still make a
// certmagic config, although it'll be mostly useless for remote management
ident = new(IdentityConfig)
}
template := certmagic.Config{
Storage: DefaultStorage, // do not act as part of a cluster (this is for the server's local identity)
Logger: logger,
Issuers: ident.issuers,
}
if makeCache {
identityCertCache = certmagic.NewCache(certmagic.CacheOptions{
GetConfigForCert: func(certmagic.Certificate) (*certmagic.Config, error) {
return cmCfg, nil
},
Logger: logger.Named("cache"),
})
}
cmCfg = certmagic.New(identityCertCache, template)
return cmCfg
}
// IdentityCredentials returns this instance's configured, managed identity credentials
// that can be used in TLS client authentication.
func (ctx Context) IdentityCredentials(logger *zap.Logger) ([]tls.Certificate, error) {
if ctx.cfg == nil || ctx.cfg.Admin == nil || ctx.cfg.Admin.Identity == nil {
return nil, fmt.Errorf("no server identity configured")
}
ident := ctx.cfg.Admin.Identity
if len(ident.Identifiers) == 0 {
return nil, fmt.Errorf("no identifiers configured")
}
if logger == nil {
logger = Log()
}
magic := ident.certmagicConfig(logger, false)
return magic.ClientCredentials(ctx, ident.Identifiers)
}
// enforceAccessControls enforces application-layer access controls for r based on remote.
// It expects that the TLS server has already established at least one verified chain of
// trust, and then looks for a matching, authorized public key that is allowed to access
// the defined path(s) using the defined method(s).
func (remote RemoteAdmin) enforceAccessControls(r *http.Request) error {
for _, chain := range r.TLS.VerifiedChains {
for _, peerCert := range chain {
for _, adminAccess := range remote.AccessControl {
for _, allowedKey := range adminAccess.publicKeys {
// see if we found a matching public key; the TLS server already verified the chain
// so we know the client possesses the associated private key; this handy interface
// doesn't appear to be defined anywhere in the std lib, but was implemented here:
// https://github.com/golang/go/commit/b5f2c0f50297fa5cd14af668ddd7fd923626cf8c
comparer, ok := peerCert.PublicKey.(interface{ Equal(crypto.PublicKey) bool })
if !ok || !comparer.Equal(allowedKey) {
continue
}
// key recognized; make sure its HTTP request is permitted
for _, accessPerm := range adminAccess.Permissions {
// verify method
methodFound := accessPerm.Methods == nil
for _, method := range accessPerm.Methods {
if method == r.Method {
methodFound = true
break
}
}
if !methodFound {
return APIError{
HTTPStatus: http.StatusForbidden,
Message: "not authorized to use this method",
}
}
// verify path
pathFound := accessPerm.Paths == nil
for _, allowedPath := range accessPerm.Paths {
if strings.HasPrefix(r.URL.Path, allowedPath) {
pathFound = true
break
}
}
if !pathFound {
return APIError{
HTTPStatus: http.StatusForbidden,
Message: "not authorized to access this path",
}
}
}
// public key authorized, method and path allowed
return nil
}
}
}
}
// in theory, this should never happen; with an unverified chain, the TLS server
// should not accept the connection in the first place, and the acceptable cert
// pool is configured using the same list of public keys we verify against
return APIError{
HTTPStatus: http.StatusUnauthorized,
Message: "client identity not authorized",
}
}
func stopAdminServer(srv *http.Server) error {
if srv == nil {
return fmt.Errorf("no admin server")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
err := srv.Shutdown(ctx)
if err != nil {
return fmt.Errorf("shutting down admin server: %v", err)
}
Log().Named("admin").Info("stopped previous server", zap.String("address", srv.Addr))
return nil
}
// AdminRouter is a type which can return routes for the admin API.
type AdminRouter interface {
Routes() []AdminRoute
}
// AdminRoute represents a route for the admin endpoint.
type AdminRoute struct {
Pattern string
Handler AdminHandler
}
type adminHandler struct {
mux *http.ServeMux
// security for local/plaintext endpoint
enforceOrigin bool
enforceHost bool
allowedOrigins []*url.URL
// security for remote/encrypted endpoint
remoteControl *RemoteAdmin
}
// ServeHTTP is the external entry point for API requests.
// It will only be called once per request.
func (h adminHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ip, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
ip = r.RemoteAddr
port = ""
}
log := Log().Named("admin.api").With(
zap.String("method", r.Method),
zap.String("host", r.Host),
zap.String("uri", r.RequestURI),
zap.String("remote_ip", ip),
zap.String("remote_port", port),
zap.Reflect("headers", r.Header),
)
if r.TLS != nil {
log = log.With(
zap.Bool("secure", true),
zap.Int("verified_chains", len(r.TLS.VerifiedChains)),
)
}
if r.RequestURI == "/metrics" {
log.Debug("received request")
} else {
log.Info("received request")
}
h.serveHTTP(w, r)
}
// serveHTTP is the internal entry point for API requests. It may
// be called more than once per request, for example if a request
// is rewritten (i.e. internal redirect).
func (h adminHandler) serveHTTP(w http.ResponseWriter, r *http.Request) {
if h.remoteControl != nil {
// enforce access controls on secure endpoint
if err := h.remoteControl.enforceAccessControls(r); err != nil {
h.handleError(w, r, err)
return
}
}
if strings.Contains(r.Header.Get("Upgrade"), "websocket") {
// I've never been able demonstrate a vulnerability myself, but apparently
// WebSocket connections originating from browsers aren't subject to CORS
// restrictions, so we'll just be on the safe side
h.handleError(w, r, fmt.Errorf("websocket connections aren't allowed"))
return
}
if h.enforceHost {
// DNS rebinding mitigation
err := h.checkHost(r)
if err != nil {
h.handleError(w, r, err)
return
}
}
if h.enforceOrigin {
// cross-site mitigation
origin, err := h.checkOrigin(r)
if err != nil {
h.handleError(w, r, err)
return
}
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", "OPTIONS, GET, POST, PUT, PATCH, DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Cache-Control")
w.Header().Set("Access-Control-Allow-Credentials", "true")
}
w.Header().Set("Access-Control-Allow-Origin", origin)
}
h.mux.ServeHTTP(w, r)
}
func (h adminHandler) handleError(w http.ResponseWriter, r *http.Request, err error) {
if err == nil {
return
}
if err == errInternalRedir {
h.serveHTTP(w, r)
return
}
apiErr, ok := err.(APIError)
if !ok {
apiErr = APIError{
HTTPStatus: http.StatusInternalServerError,
Err: err,
}
}
if apiErr.HTTPStatus == 0 {
apiErr.HTTPStatus = http.StatusInternalServerError
}
if apiErr.Message == "" && apiErr.Err != nil {
apiErr.Message = apiErr.Err.Error()
}
Log().Named("admin.api").Error("request error",
zap.Error(err),
zap.Int("status_code", apiErr.HTTPStatus),
)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(apiErr.HTTPStatus)
encErr := json.NewEncoder(w).Encode(apiErr)
if encErr != nil {
Log().Named("admin.api").Error("failed to encode error response", zap.Error(encErr))
}
}
// checkHost returns a handler that wraps next such that
// it will only be called if the request's Host header matches
// a trustworthy/expected value. This helps to mitigate DNS
// rebinding attacks.
func (h adminHandler) checkHost(r *http.Request) error {
var allowed bool
for _, allowedOrigin := range h.allowedOrigins {
if r.Host == allowedOrigin.Host {
allowed = true
break
}
}
if !allowed {
return APIError{
HTTPStatus: http.StatusForbidden,
Err: fmt.Errorf("host not allowed: %s", r.Host),
}
}
return nil
}
// checkOrigin ensures that the Origin header, if
// set, matches the intended target; prevents arbitrary
// sites from issuing requests to our listener. It
// returns the origin that was obtained from r.
func (h adminHandler) checkOrigin(r *http.Request) (string, error) {
originStr, origin := h.getOrigin(r)
if origin == nil {
return "", APIError{
HTTPStatus: http.StatusForbidden,
Err: fmt.Errorf("required Origin header is missing or invalid"),
}
}
if !h.originAllowed(origin) {
return "", APIError{
HTTPStatus: http.StatusForbidden,
Err: fmt.Errorf("client is not allowed to access from origin '%s'", originStr),
}
}
return origin.String(), nil
}
func (h adminHandler) getOrigin(r *http.Request) (string, *url.URL) {
origin := r.Header.Get("Origin")
if origin == "" {
origin = r.Header.Get("Referer")
}
originURL, err := url.Parse(origin)
if err != nil {
return origin, nil
}
originURL.Path = ""
originURL.RawPath = ""
originURL.Fragment = ""
originURL.RawFragment = ""
originURL.RawQuery = ""
return origin, originURL
}
func (h adminHandler) originAllowed(origin *url.URL) bool {
for _, allowedOrigin := range h.allowedOrigins {
if allowedOrigin.Scheme != "" && origin.Scheme != allowedOrigin.Scheme {
continue
}
if origin.Host == allowedOrigin.Host {
return true
}
}
return false
}
// etagHasher returns a the hasher we used on the config to both
// produce and verify ETags.
func etagHasher() hash.Hash { return xxhash.New() }
// makeEtag returns an Etag header value (including quotes) for
// the given config path and hash of contents at that path.
func makeEtag(path string, hash hash.Hash) string {
return fmt.Sprintf(`"%s %x"`, path, hash.Sum(nil))
}
// This buffer pool is used to keep buffers for
// reading the config file during eTag header generation
var bufferPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
func handleConfig(w http.ResponseWriter, r *http.Request) error {
switch r.Method {
case http.MethodGet:
w.Header().Set("Content-Type", "application/json")
hash := etagHasher()
// Read the config into a buffer instead of writing directly to
// the response writer, as we want to set the ETag as the header,
// not the trailer.
buf := bufferPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufferPool.Put(buf)
configWriter := io.MultiWriter(buf, hash)
err := readConfig(r.URL.Path, configWriter)
if err != nil {
return APIError{HTTPStatus: http.StatusBadRequest, Err: err}
}
// we could consider setting up a sync.Pool for the summed
// hashes to reduce GC pressure.
w.Header().Set("Etag", makeEtag(r.URL.Path, hash))
_, err = w.Write(buf.Bytes())
if err != nil {
return APIError{HTTPStatus: http.StatusInternalServerError, Err: err}
}
return nil
case http.MethodPost,
http.MethodPut,
http.MethodPatch,
http.MethodDelete:
// DELETE does not use a body, but the others do
var body []byte
if r.Method != http.MethodDelete {
if ct := r.Header.Get("Content-Type"); !strings.Contains(ct, "/json") {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("unacceptable content-type: %v; 'application/json' required", ct),
}
}
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
_, err := io.Copy(buf, r.Body)
if err != nil {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("reading request body: %v", err),
}
}
body = buf.Bytes()
}
forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
err := changeConfig(r.Method, r.URL.Path, body, r.Header.Get("If-Match"), forceReload)
if err != nil && !errors.Is(err, errSameConfig) {
return err
}
default:
return APIError{
HTTPStatus: http.StatusMethodNotAllowed,
Err: fmt.Errorf("method %s not allowed", r.Method),
}
}
return nil
}
func handleConfigID(w http.ResponseWriter, r *http.Request) error {
idPath := r.URL.Path
parts := strings.Split(idPath, "/")
if len(parts) < 3 || parts[2] == "" {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("request path is missing object ID"),
}
}
if parts[0] != "" || parts[1] != "id" {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("malformed object path"),
}
}
id := parts[2]
// map the ID to the expanded path
rawCfgMu.RLock()
expanded, ok := rawCfgIndex[id]
rawCfgMu.RUnlock()
if !ok {
return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("unknown object ID '%s'", id),
}
}
// piece the full URL path back together
parts = append([]string{expanded}, parts[3:]...)
r.URL.Path = path.Join(parts...)
return errInternalRedir
}
func handleStop(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodPost {
return APIError{
HTTPStatus: http.StatusMethodNotAllowed,
Err: fmt.Errorf("method not allowed"),
}
}
exitProcess(context.Background(), Log().Named("admin.api"))
return nil
}
// unsyncedConfigAccess traverses into the current config and performs
// the operation at path according to method, using body and out as
// needed. This is a low-level, unsynchronized function; most callers
// will want to use changeConfig or readConfig instead. This requires a
// read or write lock on currentCtxMu, depending on method (GET needs
// only a read lock; all others need a write lock).
func unsyncedConfigAccess(method, path string, body []byte, out io.Writer) error {
var err error
var val any
// if there is a request body, decode it into the
// variable that will be set in the config according
// to method and path
if len(body) > 0 {
err = json.Unmarshal(body, &val)
if err != nil {
return fmt.Errorf("decoding request body: %v", err)
}
}
enc := json.NewEncoder(out)
cleanPath := strings.Trim(path, "/")
if cleanPath == "" {
return fmt.Errorf("no traversable path")
}
parts := strings.Split(cleanPath, "/")
if len(parts) == 0 {
return fmt.Errorf("path missing")
}
// A path that ends with "..." implies:
// 1) the part before it is an array
// 2) the payload is an array
// and means that the user wants to expand the elements
// in the payload array and append each one into the
// destination array, like so:
// array = append(array, elems...)
// This special case is handled below.
ellipses := parts[len(parts)-1] == "..."
if ellipses {
parts = parts[:len(parts)-1]
}
var ptr any = rawCfg
traverseLoop:
for i, part := range parts {
switch v := ptr.(type) {
case map[string]any:
// if the next part enters a slice, and the slice is our destination,
// handle it specially (because appending to the slice copies the slice
// header, which does not replace the original one like we want)
if arr, ok := v[part].([]any); ok && i == len(parts)-2 {
var idx int
if method != http.MethodPost {
idxStr := parts[len(parts)-1]
idx, err = strconv.Atoi(idxStr)
if err != nil {
return fmt.Errorf("[%s] invalid array index '%s': %v",
path, idxStr, err)
}
if idx < 0 || idx >= len(arr) {
return fmt.Errorf("[%s] array index out of bounds: %s", path, idxStr)
}
}
switch method {
case http.MethodGet:
err = enc.Encode(arr[idx])
if err != nil {
return fmt.Errorf("encoding config: %v", err)
}
case http.MethodPost:
if ellipses {
valArray, ok := val.([]any)
if !ok {
return fmt.Errorf("final element is not an array")
}
v[part] = append(arr, valArray...)
} else {
v[part] = append(arr, val)
}
case http.MethodPut:
// avoid creation of new slice and a second copy (see
// https://github.com/golang/go/wiki/SliceTricks#insert)
arr = append(arr, nil)
copy(arr[idx+1:], arr[idx:])
arr[idx] = val
v[part] = arr
case http.MethodPatch:
arr[idx] = val
case http.MethodDelete:
v[part] = append(arr[:idx], arr[idx+1:]...)
default:
return fmt.Errorf("unrecognized method %s", method)
}
break traverseLoop
}
if i == len(parts)-1 {
switch method {
case http.MethodGet:
err = enc.Encode(v[part])
if err != nil {
return fmt.Errorf("encoding config: %v", err)
}
case http.MethodPost:
// if the part is an existing list, POST appends to
// it, otherwise it just sets or creates the value
if arr, ok := v[part].([]any); ok {
if ellipses {
valArray, ok := val.([]any)
if !ok {
return fmt.Errorf("final element is not an array")
}
v[part] = append(arr, valArray...)
} else {
v[part] = append(arr, val)
}
} else {
v[part] = val
}
case http.MethodPut:
if _, ok := v[part]; ok {
return APIError{
HTTPStatus: http.StatusConflict,
Err: fmt.Errorf("[%s] key already exists: %s", path, part),
}
}
v[part] = val
case http.MethodPatch:
if _, ok := v[part]; !ok {
return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
}
}
v[part] = val
case http.MethodDelete:
if _, ok := v[part]; !ok {
return APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("[%s] key does not exist: %s", path, part),
}
}
delete(v, part)
default:
return fmt.Errorf("unrecognized method %s", method)
}
} else {
// if we are "PUTting" a new resource, the key(s) in its path
// might not exist yet; that's OK but we need to make them as
// we go, while we still have a pointer from the level above
if v[part] == nil && method == http.MethodPut {
v[part] = make(map[string]any)
}
ptr = v[part]
}
case []any:
partInt, err := strconv.Atoi(part)
if err != nil {
return fmt.Errorf("[/%s] invalid array index '%s': %v",
strings.Join(parts[:i+1], "/"), part, err)
}
if partInt < 0 || partInt >= len(v) {
return fmt.Errorf("[/%s] array index out of bounds: %s",
strings.Join(parts[:i+1], "/"), part)
}
ptr = v[partInt]
default:
return fmt.Errorf("invalid traversal path at: %s", strings.Join(parts[:i+1], "/"))
}
}
return nil
}
// RemoveMetaFields removes meta fields like "@id" from a JSON message
// by using a simple regular expression. (An alternate way to do this
// would be to delete them from the raw, map[string]any
// representation as they are indexed, then iterate the index we made
// and add them back after encoding as JSON, but this is simpler.)
func RemoveMetaFields(rawJSON []byte) []byte {
return idRegexp.ReplaceAllFunc(rawJSON, func(in []byte) []byte {
// matches with a comma on both sides (when "@id" property is
// not the first or last in the object) need to keep exactly
// one comma for correct JSON syntax
comma := []byte{','}
if bytes.HasPrefix(in, comma) && bytes.HasSuffix(in, comma) {
return comma
}
return []byte{}
})
}
// AdminHandler is like http.Handler except ServeHTTP may return an error.
//
// If any handler encounters an error, it should be returned for proper
// handling.
type AdminHandler interface {
ServeHTTP(http.ResponseWriter, *http.Request) error
}
// AdminHandlerFunc is a convenience type like http.HandlerFunc.
type AdminHandlerFunc func(http.ResponseWriter, *http.Request) error
// ServeHTTP implements the Handler interface.
func (f AdminHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
return f(w, r)
}
// APIError is a structured error that every API
// handler should return for consistency in logging
// and client responses. If Message is unset, then
// Err.Error() will be serialized in its place.
type APIError struct {
HTTPStatus int `json:"-"`
Err error `json:"-"`
Message string `json:"error"`
}
func (e APIError) Error() string {
if e.Err != nil {
return e.Err.Error()
}
return e.Message
}
// parseAdminListenAddr extracts a singular listen address from either addr
// or defaultAddr, returning the network and the address of the listener.
func parseAdminListenAddr(addr string, defaultAddr string) (NetworkAddress, error) {
input, err := NewReplacer().ReplaceOrErr(addr, true, true)
if err != nil {
return NetworkAddress{}, fmt.Errorf("replacing listen address: %v", err)
}
if input == "" {
input = defaultAddr
}
listenAddr, err := ParseNetworkAddress(input)
if err != nil {
return NetworkAddress{}, fmt.Errorf("parsing listener address: %v", err)
}
if listenAddr.PortRangeSize() != 1 {
return NetworkAddress{}, fmt.Errorf("must be exactly one listener address; cannot listen on: %s", listenAddr)
}
return listenAddr, nil
}
// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
derBytes, err := base64.StdEncoding.DecodeString(certStr)
if err != nil {
return nil, err
}
return x509.ParseCertificate(derBytes)
}
type loggableURLArray []*url.URL
func (ua loggableURLArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
if ua == nil {
return nil
}
for _, u := range ua {
enc.AppendString(u.String())
}
return nil
}
var (
// DefaultAdminListen is the address for the local admin
// listener, if none is specified at startup.
DefaultAdminListen = "localhost:2019"
// DefaultRemoteAdminListen is the address for the remote
// (TLS-authenticated) admin listener, if enabled and not
// specified otherwise.
DefaultRemoteAdminListen = ":2021"
)
// PIDFile writes a pidfile to the file at filename. It
// will get deleted before the process gracefully exits.
func PIDFile(filename string) error {
pid := []byte(strconv.Itoa(os.Getpid()) + "\n")
err := os.WriteFile(filename, pid, 0o600)
if err != nil {
return err
}
pidfile = filename
return nil
}
// idRegexp is used to match ID fields and their associated values
// in the config. It also matches adjacent commas so that syntax
// can be preserved no matter where in the object the field appears.
// It supports string and most numeric values.
var idRegexp = regexp.MustCompile(`(?m),?\s*"` + idKey + `"\s*:\s*(-?[0-9]+(\.[0-9]+)?|(?U)".*")\s*,?`)
// pidfile is the name of the pidfile, if any.
var pidfile string
// errInternalRedir indicates an internal redirect
// and is useful when admin API handlers rewrite
// the request; in that case, authentication and
// authorization needs to happen again for the
// rewritten request.
var errInternalRedir = fmt.Errorf("internal redirect; re-authorization required")
const (
rawConfigKey = "config"
idKey = "@id"
)
var bufPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
// keep a reference to admin endpoint singletons while they're active
var (
serverMu sync.Mutex
localAdminServer, remoteAdminServer *http.Server
identityCertCache *certmagic.Cache
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"log"
"net/http"
"os"
"path"
"path/filepath"
"runtime/debug"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/caddyserver/certmagic"
"github.com/google/uuid"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2/internal/filesystems"
"github.com/caddyserver/caddy/v2/notify"
)
// Config is the top (or beginning) of the Caddy configuration structure.
// Caddy config is expressed natively as a JSON document. If you prefer
// not to work with JSON directly, there are [many config adapters](/docs/config-adapters)
// available that can convert various inputs into Caddy JSON.
//
// Many parts of this config are extensible through the use of Caddy modules.
// Fields which have a json.RawMessage type and which appear as dots (•••) in
// the online docs can be fulfilled by modules in a certain module
// namespace. The docs show which modules can be used in a given place.
//
// Whenever a module is used, its name must be given either inline as part of
// the module, or as the key to the module's value. The docs will make it clear
// which to use.
//
// Generally, all config settings are optional, as it is Caddy convention to
// have good, documented default values. If a parameter is required, the docs
// should say so.
//
// Go programs which are directly building a Config struct value should take
// care to populate the JSON-encodable fields of the struct (i.e. the fields
// with `json` struct tags) if employing the module lifecycle (e.g. Provision
// method calls).
type Config struct {
Admin *AdminConfig `json:"admin,omitempty"`
Logging *Logging `json:"logging,omitempty"`
// StorageRaw is a storage module that defines how/where Caddy
// stores assets (such as TLS certificates). The default storage
// module is `caddy.storage.file_system` (the local file system),
// and the default path
// [depends on the OS and environment](/docs/conventions#data-directory).
StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
// AppsRaw are the apps that Caddy will load and run. The
// app module name is the key, and the app's config is the
// associated value.
AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="`
apps map[string]App
storage certmagic.Storage
cancelFunc context.CancelFunc
// filesystems is a dict of filesystems that will later be loaded from and added to.
filesystems FileSystems
}
// App is a thing that Caddy runs.
type App interface {
Start() error
Stop() error
}
// Run runs the given config, replacing any existing config.
func Run(cfg *Config) error {
cfgJSON, err := json.Marshal(cfg)
if err != nil {
return err
}
return Load(cfgJSON, true)
}
// Load loads the given config JSON and runs it only
// if it is different from the current config or
// forceReload is true.
func Load(cfgJSON []byte, forceReload bool) error {
if err := notify.Reloading(); err != nil {
Log().Error("unable to notify service manager of reloading state", zap.Error(err))
}
// after reload, notify system of success or, if
// failure, update with status (error message)
var err error
defer func() {
if err != nil {
if notifyErr := notify.Error(err, 0); notifyErr != nil {
Log().Error("unable to notify to service manager of reload error",
zap.Error(notifyErr),
zap.String("reload_err", err.Error()))
}
return
}
if err := notify.Ready(); err != nil {
Log().Error("unable to notify to service manager of ready state", zap.Error(err))
}
}()
err = changeConfig(http.MethodPost, "/"+rawConfigKey, cfgJSON, "", forceReload)
if errors.Is(err, errSameConfig) {
err = nil // not really an error
}
return err
}
// changeConfig changes the current config (rawCfg) according to the
// method, traversed via the given path, and uses the given input as
// the new value (if applicable; i.e. "DELETE" doesn't have an input).
// If the resulting config is the same as the previous, no reload will
// occur unless forceReload is true. If the config is unchanged and not
// forcefully reloaded, then errConfigUnchanged This function is safe for
// concurrent use.
// The ifMatchHeader can optionally be given a string of the format:
//
// "<path> <hash>"
//
// where <path> is the absolute path in the config and <hash> is the expected hash of
// the config at that path. If the hash in the ifMatchHeader doesn't match
// the hash of the config, then an APIError with status 412 will be returned.
func changeConfig(method, path string, input []byte, ifMatchHeader string, forceReload bool) error {
switch method {
case http.MethodGet,
http.MethodHead,
http.MethodOptions,
http.MethodConnect,
http.MethodTrace:
return fmt.Errorf("method not allowed")
}
rawCfgMu.Lock()
defer rawCfgMu.Unlock()
if ifMatchHeader != "" {
// expect the first and last character to be quotes
if len(ifMatchHeader) < 2 || ifMatchHeader[0] != '"' || ifMatchHeader[len(ifMatchHeader)-1] != '"' {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("malformed If-Match header; expect quoted string"),
}
}
// read out the parts
parts := strings.Fields(ifMatchHeader[1 : len(ifMatchHeader)-1])
if len(parts) != 2 {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("malformed If-Match header; expect format \"<path> <hash>\""),
}
}
// get the current hash of the config
// at the given path
hash := etagHasher()
err := unsyncedConfigAccess(http.MethodGet, parts[0], nil, hash)
if err != nil {
return err
}
if hex.EncodeToString(hash.Sum(nil)) != parts[1] {
return APIError{
HTTPStatus: http.StatusPreconditionFailed,
Err: fmt.Errorf("If-Match header did not match current config hash"),
}
}
}
err := unsyncedConfigAccess(method, path, input, nil)
if err != nil {
return err
}
// the mutation is complete, so encode the entire config as JSON
newCfg, err := json.Marshal(rawCfg[rawConfigKey])
if err != nil {
return APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("encoding new config: %v", err),
}
}
// if nothing changed, no need to do a whole reload unless the client forces it
if !forceReload && bytes.Equal(rawCfgJSON, newCfg) {
Log().Info("config is unchanged")
return errSameConfig
}
// find any IDs in this config and index them
idx := make(map[string]string)
err = indexConfigObjects(rawCfg[rawConfigKey], "/"+rawConfigKey, idx)
if err != nil {
return APIError{
HTTPStatus: http.StatusInternalServerError,
Err: fmt.Errorf("indexing config: %v", err),
}
}
// load this new config; if it fails, we need to revert to
// our old representation of caddy's actual config
err = unsyncedDecodeAndRun(newCfg, true)
if err != nil {
if len(rawCfgJSON) > 0 {
// restore old config state to keep it consistent
// with what caddy is still running; we need to
// unmarshal it again because it's likely that
// pointers deep in our rawCfg map were modified
var oldCfg any
err2 := json.Unmarshal(rawCfgJSON, &oldCfg)
if err2 != nil {
err = fmt.Errorf("%v; additionally, restoring old config: %v", err, err2)
}
rawCfg[rawConfigKey] = oldCfg
}
return fmt.Errorf("loading new config: %v", err)
}
// success, so update our stored copy of the encoded
// config to keep it consistent with what caddy is now
// running (storing an encoded copy is not strictly
// necessary, but avoids an extra json.Marshal for
// each config change)
rawCfgJSON = newCfg
rawCfgIndex = idx
return nil
}
// readConfig traverses the current config to path
// and writes its JSON encoding to out.
func readConfig(path string, out io.Writer) error {
rawCfgMu.RLock()
defer rawCfgMu.RUnlock()
return unsyncedConfigAccess(http.MethodGet, path, nil, out)
}
// indexConfigObjects recursively searches ptr for object fields named
// "@id" and maps that ID value to the full configPath in the index.
// This function is NOT safe for concurrent access; obtain a write lock
// on currentCtxMu.
func indexConfigObjects(ptr any, configPath string, index map[string]string) error {
switch val := ptr.(type) {
case map[string]any:
for k, v := range val {
if k == idKey {
switch idVal := v.(type) {
case string:
index[idVal] = configPath
case float64: // all JSON numbers decode as float64
index[fmt.Sprintf("%v", idVal)] = configPath
default:
return fmt.Errorf("%s: %s field must be a string or number", configPath, idKey)
}
continue
}
// traverse this object property recursively
err := indexConfigObjects(val[k], path.Join(configPath, k), index)
if err != nil {
return err
}
}
case []any:
// traverse each element of the array recursively
for i := range val {
err := indexConfigObjects(val[i], path.Join(configPath, strconv.Itoa(i)), index)
if err != nil {
return err
}
}
}
return nil
}
// unsyncedDecodeAndRun removes any meta fields (like @id tags)
// from cfgJSON, decodes the result into a *Config, and runs
// it as the new config, replacing any other current config.
// It does NOT update the raw config state, as this is a
// lower-level function; most callers will want to use Load
// instead. A write lock on rawCfgMu is required! If
// allowPersist is false, it will not be persisted to disk,
// even if it is configured to.
func unsyncedDecodeAndRun(cfgJSON []byte, allowPersist bool) error {
// remove any @id fields from the JSON, which would cause
// loading to break since the field wouldn't be recognized
strippedCfgJSON := RemoveMetaFields(cfgJSON)
var newCfg *Config
err := StrictUnmarshalJSON(strippedCfgJSON, &newCfg)
if err != nil {
return err
}
// prevent recursive config loads; that is a user error, and
// although frequent config loads should be safe, we cannot
// guarantee that in the presence of third party plugins, nor
// do we want this error to go unnoticed (we assume it was a
// pulled config if we're not allowed to persist it)
if !allowPersist &&
newCfg != nil &&
newCfg.Admin != nil &&
newCfg.Admin.Config != nil &&
newCfg.Admin.Config.LoadRaw != nil &&
newCfg.Admin.Config.LoadDelay <= 0 {
return fmt.Errorf("recursive config loading detected: pulled configs cannot pull other configs without positive load_delay")
}
// run the new config and start all its apps
ctx, err := run(newCfg, true)
if err != nil {
return err
}
// swap old context (including its config) with the new one
currentCtxMu.Lock()
oldCtx := currentCtx
currentCtx = ctx
currentCtxMu.Unlock()
// Stop, Cleanup each old app
unsyncedStop(oldCtx)
// autosave a non-nil config, if not disabled
if allowPersist &&
newCfg != nil &&
(newCfg.Admin == nil ||
newCfg.Admin.Config == nil ||
newCfg.Admin.Config.Persist == nil ||
*newCfg.Admin.Config.Persist) {
dir := filepath.Dir(ConfigAutosavePath)
err := os.MkdirAll(dir, 0o700)
if err != nil {
Log().Error("unable to create folder for config autosave",
zap.String("dir", dir),
zap.Error(err))
} else {
err := os.WriteFile(ConfigAutosavePath, cfgJSON, 0o600)
if err == nil {
Log().Info("autosaved config (load with --resume flag)", zap.String("file", ConfigAutosavePath))
} else {
Log().Error("unable to autosave config",
zap.String("file", ConfigAutosavePath),
zap.Error(err))
}
}
}
return nil
}
// run runs newCfg and starts all its apps if
// start is true. If any errors happen, cleanup
// is performed if any modules were provisioned;
// apps that were started already will be stopped,
// so this function should not leak resources if
// an error is returned. However, if no error is
// returned and start == false, you should cancel
// the config if you are not going to start it,
// so that each provisioned module will be
// cleaned up.
//
// This is a low-level function; most callers
// will want to use Run instead, which also
// updates the config's raw state.
func run(newCfg *Config, start bool) (Context, error) {
ctx, err := provisionContext(newCfg, start)
if err != nil {
return ctx, err
}
if !start {
return ctx, nil
}
// Provision any admin routers which may need to access
// some of the other apps at runtime
err = ctx.cfg.Admin.provisionAdminRouters(ctx)
if err != nil {
return ctx, err
}
// Start
err = func() error {
started := make([]string, 0, len(ctx.cfg.apps))
for name, a := range ctx.cfg.apps {
err := a.Start()
if err != nil {
// an app failed to start, so we need to stop
// all other apps that were already started
for _, otherAppName := range started {
err2 := ctx.cfg.apps[otherAppName].Stop()
if err2 != nil {
err = fmt.Errorf("%v; additionally, aborting app %s: %v",
err, otherAppName, err2)
}
}
return fmt.Errorf("%s app module: start: %v", name, err)
}
started = append(started, name)
}
return nil
}()
if err != nil {
return ctx, err
}
// now that the user's config is running, finish setting up anything else,
// such as remote admin endpoint, config loader, etc.
return ctx, finishSettingUp(ctx, ctx.cfg)
}
// provisionContext creates a new context from the given configuration and provisions
// storage and apps.
// If `newCfg` is nil a new empty configuration will be created.
// If `replaceAdminServer` is true any currently active admin server will be replaced
// with a new admin server based on the provided configuration.
func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error) {
// because we will need to roll back any state
// modifications if this function errors, we
// keep a single error value and scope all
// sub-operations to their own functions to
// ensure this error value does not get
// overridden or missed when it should have
// been set by a short assignment
var err error
if newCfg == nil {
newCfg = new(Config)
}
// create a context within which to load
// modules - essentially our new config's
// execution environment; be sure that
// cleanup occurs when we return if there
// was an error; if no error, it will get
// cleaned up on next config cycle
ctx, cancel := NewContext(Context{Context: context.Background(), cfg: newCfg})
defer func() {
if err != nil {
// if there were any errors during startup,
// we should cancel the new context we created
// since the associated config won't be used;
// this will cause all modules that were newly
// provisioned to clean themselves up
cancel()
// also undo any other state changes we made
if currentCtx.cfg != nil {
certmagic.Default.Storage = currentCtx.cfg.storage
}
}
}()
newCfg.cancelFunc = cancel // clean up later
// set up logging before anything bad happens
if newCfg.Logging == nil {
newCfg.Logging = new(Logging)
}
err = newCfg.Logging.openLogs(ctx)
if err != nil {
return ctx, err
}
// start the admin endpoint (and stop any prior one)
if replaceAdminServer {
err = replaceLocalAdminServer(newCfg)
if err != nil {
return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
}
}
// create the new filesystem map
newCfg.filesystems = &filesystems.FilesystemMap{}
// prepare the new config for use
newCfg.apps = make(map[string]App)
// set up global storage and make it CertMagic's default storage, too
err = func() error {
if newCfg.StorageRaw != nil {
val, err := ctx.LoadModule(newCfg, "StorageRaw")
if err != nil {
return fmt.Errorf("loading storage module: %v", err)
}
stor, err := val.(StorageConverter).CertMagicStorage()
if err != nil {
return fmt.Errorf("creating storage value: %v", err)
}
newCfg.storage = stor
}
if newCfg.storage == nil {
newCfg.storage = DefaultStorage
}
certmagic.Default.Storage = newCfg.storage
return nil
}()
if err != nil {
return ctx, err
}
// Load and Provision each app and their submodules
err = func() error {
for appName := range newCfg.AppsRaw {
if _, err := ctx.App(appName); err != nil {
return err
}
}
return nil
}()
return ctx, err
}
// ProvisionContext creates a new context from the configuration and provisions storage
// and app modules.
// The function is intended for testing and advanced use cases only, typically `Run` should be
// use to ensure a fully functional caddy instance.
// EXPERIMENTAL: While this is public the interface and implementation details of this function may change.
func ProvisionContext(newCfg *Config) (Context, error) {
return provisionContext(newCfg, false)
}
// finishSettingUp should be run after all apps have successfully started.
func finishSettingUp(ctx Context, cfg *Config) error {
// establish this server's identity (only after apps are loaded
// so that cert management of this endpoint doesn't prevent user's
// servers from starting which likely also use HTTP/HTTPS ports;
// but before remote management which may depend on these creds)
err := manageIdentity(ctx, cfg)
if err != nil {
return fmt.Errorf("provisioning remote admin endpoint: %v", err)
}
// replace any remote admin endpoint
err = replaceRemoteAdminServer(ctx, cfg)
if err != nil {
return fmt.Errorf("provisioning remote admin endpoint: %v", err)
}
// if dynamic config is requested, set that up and run it
if cfg != nil && cfg.Admin != nil && cfg.Admin.Config != nil && cfg.Admin.Config.LoadRaw != nil {
val, err := ctx.LoadModule(cfg.Admin.Config, "LoadRaw")
if err != nil {
return fmt.Errorf("loading config loader module: %s", err)
}
logger := Log().Named("config_loader").With(
zap.String("module", val.(Module).CaddyModule().ID.Name()),
zap.Int("load_delay", int(cfg.Admin.Config.LoadDelay)))
runLoadedConfig := func(config []byte) error {
logger.Info("applying dynamically-loaded config")
err := changeConfig(http.MethodPost, "/"+rawConfigKey, config, "", false)
if errors.Is(err, errSameConfig) {
return err
}
if err != nil {
logger.Error("failed to run dynamically-loaded config", zap.Error(err))
return err
}
logger.Info("successfully applied dynamically-loaded config")
return nil
}
if cfg.Admin.Config.LoadDelay > 0 {
go func() {
// the loop is here to iterate ONLY if there is an error, a no-op config load,
// or an unchanged config; in which case we simply wait the delay and try again
for {
timer := time.NewTimer(time.Duration(cfg.Admin.Config.LoadDelay))
select {
case <-timer.C:
loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx)
if err != nil {
logger.Error("failed loading dynamic config; will retry", zap.Error(err))
continue
}
if loadedConfig == nil {
logger.Info("dynamically-loaded config was nil; will retry")
continue
}
err = runLoadedConfig(loadedConfig)
if errors.Is(err, errSameConfig) {
logger.Info("dynamically-loaded config was unchanged; will retry")
continue
}
case <-ctx.Done():
if !timer.Stop() {
<-timer.C
}
logger.Info("stopping dynamic config loading")
}
break
}
}()
} else {
// if no LoadDelay is provided, will load config synchronously
loadedConfig, err := val.(ConfigLoader).LoadConfig(ctx)
if err != nil {
return fmt.Errorf("loading dynamic config from %T: %v", val, err)
}
// do this in a goroutine so current config can finish being loaded; otherwise deadlock
go func() { _ = runLoadedConfig(loadedConfig) }()
}
}
return nil
}
// ConfigLoader is a type that can load a Caddy config. If
// the return value is non-nil, it must be valid Caddy JSON;
// if nil or with non-nil error, it is considered to be a
// no-op load and may be retried later.
type ConfigLoader interface {
LoadConfig(Context) ([]byte, error)
}
// Stop stops running the current configuration.
// It is the antithesis of Run(). This function
// will log any errors that occur during the
// stopping of individual apps and continue to
// stop the others. Stop should only be called
// if not replacing with a new config.
func Stop() error {
currentCtxMu.RLock()
ctx := currentCtx
currentCtxMu.RUnlock()
rawCfgMu.Lock()
unsyncedStop(ctx)
currentCtxMu.Lock()
currentCtx = Context{}
currentCtxMu.Unlock()
rawCfgJSON = nil
rawCfgIndex = nil
rawCfg[rawConfigKey] = nil
rawCfgMu.Unlock()
return nil
}
// unsyncedStop stops ctx from running, but has
// no locking around ctx. It is a no-op if ctx has a
// nil cfg. If any app returns an error when stopping,
// it is logged and the function continues stopping
// the next app. This function assumes all apps in
// ctx were successfully started first.
//
// A lock on rawCfgMu is required, even though this
// function does not access rawCfg, that lock
// synchronizes the stop/start of apps.
func unsyncedStop(ctx Context) {
if ctx.cfg == nil {
return
}
// stop each app
for name, a := range ctx.cfg.apps {
err := a.Stop()
if err != nil {
log.Printf("[ERROR] stop %s: %v", name, err)
}
}
// clean up all modules
ctx.cfg.cancelFunc()
}
// Validate loads, provisions, and validates
// cfg, but does not start running it.
func Validate(cfg *Config) error {
_, err := run(cfg, false)
if err == nil {
cfg.cancelFunc() // call Cleanup on all modules
}
return err
}
// exitProcess exits the process as gracefully as possible,
// but it always exits, even if there are errors doing so.
// It stops all apps, cleans up external locks, removes any
// PID file, and shuts down admin endpoint(s) in a goroutine.
// Errors are logged along the way, and an appropriate exit
// code is emitted.
func exitProcess(ctx context.Context, logger *zap.Logger) {
// let the rest of the program know we're quitting
atomic.StoreInt32(exiting, 1)
// give the OS or service/process manager our 2 weeks' notice: we quit
if err := notify.Stopping(); err != nil {
Log().Error("unable to notify service manager of stopping state", zap.Error(err))
}
if logger == nil {
logger = Log()
}
logger.Warn("exiting; byeee!! 👋")
exitCode := ExitCodeSuccess
lastContext := ActiveContext()
// stop all apps
if err := Stop(); err != nil {
logger.Error("failed to stop apps", zap.Error(err))
exitCode = ExitCodeFailedQuit
}
// clean up certmagic locks
certmagic.CleanUpOwnLocks(ctx, logger)
// remove pidfile
if pidfile != "" {
err := os.Remove(pidfile)
if err != nil {
logger.Error("cleaning up PID file:",
zap.String("pidfile", pidfile),
zap.Error(err))
exitCode = ExitCodeFailedQuit
}
}
// execute any process-exit callbacks
for _, exitFunc := range lastContext.exitFuncs {
exitFunc(ctx)
}
exitFuncsMu.Lock()
for _, exitFunc := range exitFuncs {
exitFunc(ctx)
}
exitFuncsMu.Unlock()
// shut down admin endpoint(s) in goroutines so that
// if this function was called from an admin handler,
// it has a chance to return gracefully
// use goroutine so that we can finish responding to API request
go func() {
defer func() {
logger = logger.With(zap.Int("exit_code", exitCode))
if exitCode == ExitCodeSuccess {
logger.Info("shutdown complete")
} else {
logger.Error("unclean shutdown")
}
os.Exit(exitCode)
}()
if remoteAdminServer != nil {
err := stopAdminServer(remoteAdminServer)
if err != nil {
exitCode = ExitCodeFailedQuit
logger.Error("failed to stop remote admin server gracefully", zap.Error(err))
}
}
if localAdminServer != nil {
err := stopAdminServer(localAdminServer)
if err != nil {
exitCode = ExitCodeFailedQuit
logger.Error("failed to stop local admin server gracefully", zap.Error(err))
}
}
}()
}
var exiting = new(int32) // accessed atomically
// Exiting returns true if the process is exiting.
// EXPERIMENTAL API: subject to change or removal.
func Exiting() bool { return atomic.LoadInt32(exiting) == 1 }
// OnExit registers a callback to invoke during process exit.
// This registration is PROCESS-GLOBAL, meaning that each
// function should only be registered once forever, NOT once
// per config load (etc).
//
// EXPERIMENTAL API: subject to change or removal.
func OnExit(f func(context.Context)) {
exitFuncsMu.Lock()
exitFuncs = append(exitFuncs, f)
exitFuncsMu.Unlock()
}
var (
exitFuncs []func(context.Context)
exitFuncsMu sync.Mutex
)
// Duration can be an integer or a string. An integer is
// interpreted as nanoseconds. If a string, it is a Go
// time.Duration value such as `300ms`, `1.5h`, or `2h45m`;
// valid units are `ns`, `us`/`µs`, `ms`, `s`, `m`, `h`, and `d`.
type Duration time.Duration
// UnmarshalJSON satisfies json.Unmarshaler.
func (d *Duration) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return io.EOF
}
var dur time.Duration
var err error
if b[0] == byte('"') && b[len(b)-1] == byte('"') {
dur, err = ParseDuration(strings.Trim(string(b), `"`))
} else {
err = json.Unmarshal(b, &dur)
}
*d = Duration(dur)
return err
}
// ParseDuration parses a duration string, adding
// support for the "d" unit meaning number of days,
// where a day is assumed to be 24h. The maximum
// input string length is 1024.
func ParseDuration(s string) (time.Duration, error) {
if len(s) > 1024 {
return 0, fmt.Errorf("parsing duration: input string too long")
}
var inNumber bool
var numStart int
for i := 0; i < len(s); i++ {
ch := s[i]
if ch == 'd' {
daysStr := s[numStart:i]
days, err := strconv.ParseFloat(daysStr, 64)
if err != nil {
return 0, err
}
hours := days * 24.0
hoursStr := strconv.FormatFloat(hours, 'f', -1, 64)
s = s[:numStart] + hoursStr + "h" + s[i+1:]
i--
continue
}
if !inNumber {
numStart = i
}
inNumber = (ch >= '0' && ch <= '9') || ch == '.' || ch == '-' || ch == '+'
}
return time.ParseDuration(s)
}
// InstanceID returns the UUID for this instance, and generates one if it
// does not already exist. The UUID is stored in the local data directory,
// regardless of storage configuration, since each instance is intended to
// have its own unique ID.
func InstanceID() (uuid.UUID, error) {
appDataDir := AppDataDir()
uuidFilePath := filepath.Join(appDataDir, "instance.uuid")
uuidFileBytes, err := os.ReadFile(uuidFilePath)
if errors.Is(err, fs.ErrNotExist) {
uuid, err := uuid.NewRandom()
if err != nil {
return uuid, err
}
err = os.MkdirAll(appDataDir, 0o700)
if err != nil {
return uuid, err
}
err = os.WriteFile(uuidFilePath, []byte(uuid.String()), 0o600)
return uuid, err
} else if err != nil {
return [16]byte{}, err
}
return uuid.ParseBytes(uuidFileBytes)
}
// CustomVersion is an optional string that overrides Caddy's
// reported version. It can be helpful when downstream packagers
// need to manually set Caddy's version. If no other version
// information is available, the short form version (see
// Version()) will be set to CustomVersion, and the full version
// will include CustomVersion at the beginning.
//
// Set this variable during `go build` with `-ldflags`:
//
// -ldflags '-X github.com/caddyserver/caddy/v2.CustomVersion=v2.6.2'
//
// for example.
var CustomVersion string
// Version returns the Caddy version in a simple/short form, and
// a full version string. The short form will not have spaces and
// is intended for User-Agent strings and similar, but may be
// omitting valuable information. Note that Caddy must be compiled
// in a special way to properly embed complete version information.
// First this function tries to get the version from the embedded
// build info provided by go.mod dependencies; then it tries to
// get info from embedded VCS information, which requires having
// built Caddy from a git repository. If no version is available,
// this function returns "(devel)" because Go uses that, but for
// the simple form we change it to "unknown". If still no version
// is available (e.g. no VCS repo), then it will use CustomVersion;
// CustomVersion is always prepended to the full version string.
//
// See relevant Go issues: https://github.com/golang/go/issues/29228
// and https://github.com/golang/go/issues/50603.
//
// This function is experimental and subject to change or removal.
func Version() (simple, full string) {
// the currently-recommended way to build Caddy involves
// building it as a dependency so we can extract version
// information from go.mod tooling; once the upstream
// Go issues are fixed, we should just be able to use
// bi.Main... hopefully.
var module *debug.Module
bi, ok := debug.ReadBuildInfo()
if !ok {
if CustomVersion != "" {
full = CustomVersion
simple = CustomVersion
return
}
full = "unknown"
simple = "unknown"
return
}
// find the Caddy module in the dependency list
for _, dep := range bi.Deps {
if dep.Path == ImportPath {
module = dep
break
}
}
if module != nil {
simple, full = module.Version, module.Version
if module.Sum != "" {
full += " " + module.Sum
}
if module.Replace != nil {
full += " => " + module.Replace.Path
if module.Replace.Version != "" {
simple = module.Replace.Version + "_custom"
full += "@" + module.Replace.Version
}
if module.Replace.Sum != "" {
full += " " + module.Replace.Sum
}
}
}
if full == "" {
var vcsRevision string
var vcsTime time.Time
var vcsModified bool
for _, setting := range bi.Settings {
switch setting.Key {
case "vcs.revision":
vcsRevision = setting.Value
case "vcs.time":
vcsTime, _ = time.Parse(time.RFC3339, setting.Value)
case "vcs.modified":
vcsModified, _ = strconv.ParseBool(setting.Value)
}
}
if vcsRevision != "" {
var modified string
if vcsModified {
modified = "+modified"
}
full = fmt.Sprintf("%s%s (%s)", vcsRevision, modified, vcsTime.Format(time.RFC822))
simple = vcsRevision
// use short checksum for simple, if hex-only
if _, err := hex.DecodeString(simple); err == nil {
simple = simple[:8]
}
// append date to simple since it can be convenient
// to know the commit date as part of the version
if !vcsTime.IsZero() {
simple += "-" + vcsTime.Format("20060102")
}
}
}
if full == "" {
if CustomVersion != "" {
full = CustomVersion
} else {
full = "unknown"
}
} else if CustomVersion != "" {
full = CustomVersion + " " + full
}
if simple == "" || simple == "(devel)" {
if CustomVersion != "" {
simple = CustomVersion
} else {
simple = "unknown"
}
}
return
}
// ActiveContext returns the currently-active context.
// This function is experimental and might be changed
// or removed in the future.
func ActiveContext() Context {
currentCtxMu.RLock()
defer currentCtxMu.RUnlock()
return currentCtx
}
// CtxKey is a value type for use with context.WithValue.
type CtxKey string
// This group of variables pertains to the current configuration.
var (
// currentCtx is the root context for the currently-running
// configuration, which can be accessed through this value.
// If the Config contained in this value is not nil, then
// a config is currently active/running.
currentCtx Context
currentCtxMu sync.RWMutex
// rawCfg is the current, generic-decoded configuration;
// we initialize it as a map with one field ("config")
// to maintain parity with the API endpoint and to avoid
// the special case of having to access/mutate the variable
// directly without traversing into it.
rawCfg = map[string]any{
rawConfigKey: nil,
}
// rawCfgJSON is the JSON-encoded form of rawCfg. Keeping
// this around avoids an extra Marshal call during changes.
rawCfgJSON []byte
// rawCfgIndex is the map of user-assigned ID to expanded
// path, for converting /id/ paths to /config/ paths.
rawCfgIndex map[string]string
// rawCfgMu protects all the rawCfg fields and also
// essentially synchronizes config changes/reloads.
rawCfgMu sync.RWMutex
)
// errSameConfig is returned if the new config is the same
// as the old one. This isn't usually an actual, actionable
// error; it's mostly a sentinel value.
var errSameConfig = errors.New("config is unchanged")
// ImportPath is the package import path for Caddy core.
// This identifier may be removed in the future.
const ImportPath = "github.com/caddyserver/caddy/v2"
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"bytes"
"encoding/json"
"fmt"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
)
// Adapter adapts Caddyfile to Caddy JSON.
type Adapter struct {
ServerType ServerType
}
// Adapt converts the Caddyfile config in body to Caddy JSON.
func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconfig.Warning, error) {
if a.ServerType == nil {
return nil, nil, fmt.Errorf("no server type")
}
if options == nil {
options = make(map[string]any)
}
filename, _ := options["filename"].(string)
if filename == "" {
filename = "Caddyfile"
}
serverBlocks, err := Parse(filename, body)
if err != nil {
return nil, nil, err
}
cfg, warnings, err := a.ServerType.Setup(serverBlocks, options)
if err != nil {
return nil, warnings, err
}
// lint check: see if input was properly formatted; sometimes messy files parse
// successfully but result in logical errors (the Caddyfile is a bad format, I'm sorry)
if warning, different := FormattingDifference(filename, body); different {
warnings = append(warnings, warning)
}
result, err := json.Marshal(cfg)
return result, warnings, err
}
// FormattingDifference returns a warning and true if the formatted version
// is any different from the input; empty warning and false otherwise.
// TODO: also perform this check on imported files
func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) {
// replace windows-style newlines to normalize comparison
normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1)
formatted := Format(normalizedBody)
if bytes.Equal(formatted, normalizedBody) {
return caddyconfig.Warning{}, false
}
// find where the difference is
line := 1
for i, ch := range normalizedBody {
if i >= len(formatted) || ch != formatted[i] {
break
}
if ch == '\n' {
line++
}
}
return caddyconfig.Warning{
File: filename,
Line: line,
Message: "Caddyfile input is not formatted; run 'caddy fmt --overwrite' to fix inconsistencies",
}, true
}
// Unmarshaler is a type that can unmarshal Caddyfile tokens to
// set itself up for a JSON encoding. The goal of an unmarshaler
// is not to set itself up for actual use, but to set itself up for
// being marshaled into JSON. Caddyfile-unmarshaled values will not
// be used directly; they will be encoded as JSON and then used from
// that. Implementations _may_ be able to support multiple segments
// (instances of their directive or batch of tokens); typically this
// means wrapping parsing logic in a loop: `for d.Next() { ... }`.
// More commonly, only a single segment is supported, so a simple
// `d.Next()` at the start should be used to consume the module
// identifier token (directive name, etc).
type Unmarshaler interface {
UnmarshalCaddyfile(d *Dispenser) error
}
// ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
type ServerType interface {
// Setup takes the server blocks which contain tokens,
// as well as options (e.g. CLI flags) and creates a
// Caddy config, along with any warnings or an error.
Setup([]ServerBlock, map[string]any) (*caddy.Config, []caddyconfig.Warning, error)
}
// UnmarshalModule instantiates a module with the given ID and invokes
// UnmarshalCaddyfile on the new value using the immediate next segment
// of d as input. In other words, d's next token should be the first
// token of the module's Caddyfile input.
//
// This function is used when the next segment of Caddyfile tokens
// belongs to another Caddy module. The returned value is often
// type-asserted to the module's associated type for practical use
// when setting up a config.
func UnmarshalModule(d *Dispenser, moduleID string) (Unmarshaler, error) {
mod, err := caddy.GetModule(moduleID)
if err != nil {
return nil, d.Errf("getting module named '%s': %v", moduleID, err)
}
inst := mod.New()
unm, ok := inst.(Unmarshaler)
if !ok {
return nil, d.Errf("module %s is not a Caddyfile unmarshaler; is %T", mod.ID, inst)
}
err = unm.UnmarshalCaddyfile(d.NewFromNextSegment())
if err != nil {
return nil, err
}
return unm, nil
}
// Interface guard
var _ caddyconfig.Adapter = (*Adapter)(nil)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"errors"
"fmt"
"io"
"log"
"strconv"
"strings"
)
// Dispenser is a type that dispenses tokens, similarly to a lexer,
// except that it can do so with some notion of structure. An empty
// Dispenser is invalid; call NewDispenser to make a proper instance.
type Dispenser struct {
tokens []Token
cursor int
nesting int
// A map of arbitrary context data that can be used
// to pass through some information to unmarshalers.
context map[string]any
}
// NewDispenser returns a Dispenser filled with the given tokens.
func NewDispenser(tokens []Token) *Dispenser {
return &Dispenser{
tokens: tokens,
cursor: -1,
}
}
// NewTestDispenser parses input into tokens and creates a new
// Dispenser for test purposes only; any errors are fatal.
func NewTestDispenser(input string) *Dispenser {
tokens, err := allTokens("Testfile", []byte(input))
if err != nil && err != io.EOF {
log.Fatalf("getting all tokens from input: %v", err)
}
return NewDispenser(tokens)
}
// Next loads the next token. Returns true if a token
// was loaded; false otherwise. If false, all tokens
// have been consumed.
func (d *Dispenser) Next() bool {
if d.cursor < len(d.tokens)-1 {
d.cursor++
return true
}
return false
}
// Prev moves to the previous token. It does the inverse
// of Next(), except this function may decrement the cursor
// to -1 so that the next call to Next() points to the
// first token; this allows dispensing to "start over". This
// method returns true if the cursor ends up pointing to a
// valid token.
func (d *Dispenser) Prev() bool {
if d.cursor > -1 {
d.cursor--
return d.cursor > -1
}
return false
}
// NextArg loads the next token if it is on the same
// line and if it is not a block opening (open curly
// brace). Returns true if an argument token was
// loaded; false otherwise. If false, all tokens on
// the line have been consumed except for potentially
// a block opening. It handles imported tokens
// correctly.
func (d *Dispenser) NextArg() bool {
if !d.nextOnSameLine() {
return false
}
if d.Val() == "{" {
// roll back; a block opening is not an argument
d.cursor--
return false
}
return true
}
// nextOnSameLine advances the cursor if the next
// token is on the same line of the same file.
func (d *Dispenser) nextOnSameLine() bool {
if d.cursor < 0 {
d.cursor++
return true
}
if d.cursor >= len(d.tokens)-1 {
return false
}
curr := d.tokens[d.cursor]
next := d.tokens[d.cursor+1]
if !isNextOnNewLine(curr, next) {
d.cursor++
return true
}
return false
}
// NextLine loads the next token only if it is not on the same
// line as the current token, and returns true if a token was
// loaded; false otherwise. If false, there is not another token
// or it is on the same line. It handles imported tokens correctly.
func (d *Dispenser) NextLine() bool {
if d.cursor < 0 {
d.cursor++
return true
}
if d.cursor >= len(d.tokens)-1 {
return false
}
curr := d.tokens[d.cursor]
next := d.tokens[d.cursor+1]
if isNextOnNewLine(curr, next) {
d.cursor++
return true
}
return false
}
// NextBlock can be used as the condition of a for loop
// to load the next token as long as it opens a block or
// is already in a block nested more than initialNestingLevel.
// In other words, a loop over NextBlock() will iterate
// all tokens in the block assuming the next token is an
// open curly brace, until the matching closing brace.
// The open and closing brace tokens for the outer-most
// block will be consumed internally and omitted from
// the iteration.
//
// Proper use of this method looks like this:
//
// for nesting := d.Nesting(); d.NextBlock(nesting); {
// }
//
// However, in simple cases where it is known that the
// Dispenser is new and has not already traversed state
// by a loop over NextBlock(), this will do:
//
// for d.NextBlock(0) {
// }
//
// As with other token parsing logic, a loop over
// NextBlock() should be contained within a loop over
// Next(), as it is usually prudent to skip the initial
// token.
func (d *Dispenser) NextBlock(initialNestingLevel int) bool {
if d.nesting > initialNestingLevel {
if !d.Next() {
return false // should be EOF error
}
if d.Val() == "}" && !d.nextOnSameLine() {
d.nesting--
} else if d.Val() == "{" && !d.nextOnSameLine() {
d.nesting++
}
return d.nesting > initialNestingLevel
}
if !d.nextOnSameLine() { // block must open on same line
return false
}
if d.Val() != "{" {
d.cursor-- // roll back if not opening brace
return false
}
d.Next() // consume open curly brace
if d.Val() == "}" {
return false // open and then closed right away
}
d.nesting++
return true
}
// Nesting returns the current nesting level. Necessary
// if using NextBlock()
func (d *Dispenser) Nesting() int {
return d.nesting
}
// Val gets the text of the current token. If there is no token
// loaded, it returns empty string.
func (d *Dispenser) Val() string {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return ""
}
return d.tokens[d.cursor].Text
}
// ValRaw gets the raw text of the current token (including quotes).
// If the token was a heredoc, then the delimiter is not included,
// because that is not relevant to any unmarshaling logic at this time.
// If there is no token loaded, it returns empty string.
func (d *Dispenser) ValRaw() string {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return ""
}
quote := d.tokens[d.cursor].wasQuoted
if quote > 0 && quote != '<' {
// string literal
return string(quote) + d.tokens[d.cursor].Text + string(quote)
}
return d.tokens[d.cursor].Text
}
// ScalarVal gets value of the current token, converted to the closest
// scalar type. If there is no token loaded, it returns nil.
func (d *Dispenser) ScalarVal() any {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return nil
}
quote := d.tokens[d.cursor].wasQuoted
text := d.tokens[d.cursor].Text
if quote > 0 {
return text // string literal
}
if num, err := strconv.Atoi(text); err == nil {
return num
}
if num, err := strconv.ParseFloat(text, 64); err == nil {
return num
}
if bool, err := strconv.ParseBool(text); err == nil {
return bool
}
return text
}
// Line gets the line number of the current token.
// If there is no token loaded, it returns 0.
func (d *Dispenser) Line() int {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return 0
}
return d.tokens[d.cursor].Line
}
// File gets the filename where the current token originated.
func (d *Dispenser) File() string {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return ""
}
return d.tokens[d.cursor].File
}
// Args is a convenience function that loads the next arguments
// (tokens on the same line) into an arbitrary number of strings
// pointed to in targets. If there are not enough argument tokens
// available to fill targets, false is returned and the remaining
// targets are left unchanged. If all the targets are filled,
// then true is returned.
func (d *Dispenser) Args(targets ...*string) bool {
for i := 0; i < len(targets); i++ {
if !d.NextArg() {
return false
}
*targets[i] = d.Val()
}
return true
}
// AllArgs is like Args, but if there are more argument tokens
// available than there are targets, false is returned. The
// number of available argument tokens must match the number of
// targets exactly to return true.
func (d *Dispenser) AllArgs(targets ...*string) bool {
if !d.Args(targets...) {
return false
}
if d.NextArg() {
d.Prev()
return false
}
return true
}
// CountRemainingArgs counts the amount of remaining arguments
// (tokens on the same line) without consuming the tokens.
func (d *Dispenser) CountRemainingArgs() int {
count := 0
for d.NextArg() {
count++
}
for i := 0; i < count; i++ {
d.Prev()
}
return count
}
// RemainingArgs loads any more arguments (tokens on the same line)
// into a slice and returns them. Open curly brace tokens also indicate
// the end of arguments, and the curly brace is not included in
// the return value nor is it loaded.
func (d *Dispenser) RemainingArgs() []string {
var args []string
for d.NextArg() {
args = append(args, d.Val())
}
return args
}
// RemainingArgsRaw loads any more arguments (tokens on the same line,
// retaining quotes) into a slice and returns them. Open curly brace
// tokens also indicate the end of arguments, and the curly brace is
// not included in the return value nor is it loaded.
func (d *Dispenser) RemainingArgsRaw() []string {
var args []string
for d.NextArg() {
args = append(args, d.ValRaw())
}
return args
}
// NewFromNextSegment returns a new dispenser with a copy of
// the tokens from the current token until the end of the
// "directive" whether that be to the end of the line or
// the end of a block that starts at the end of the line;
// in other words, until the end of the segment.
func (d *Dispenser) NewFromNextSegment() *Dispenser {
return NewDispenser(d.NextSegment())
}
// NextSegment returns a copy of the tokens from the current
// token until the end of the line or block that starts at
// the end of the line.
func (d *Dispenser) NextSegment() Segment {
tkns := Segment{d.Token()}
for d.NextArg() {
tkns = append(tkns, d.Token())
}
var openedBlock bool
for nesting := d.Nesting(); d.NextBlock(nesting); {
if !openedBlock {
// because NextBlock() consumes the initial open
// curly brace, we rewind here to append it, since
// our case is special in that we want the new
// dispenser to have all the tokens including
// surrounding curly braces
d.Prev()
tkns = append(tkns, d.Token())
d.Next()
openedBlock = true
}
tkns = append(tkns, d.Token())
}
if openedBlock {
// include closing brace
tkns = append(tkns, d.Token())
// do not consume the closing curly brace; the
// next iteration of the enclosing loop will
// call Next() and consume it
}
return tkns
}
// Token returns the current token.
func (d *Dispenser) Token() Token {
if d.cursor < 0 || d.cursor >= len(d.tokens) {
return Token{}
}
return d.tokens[d.cursor]
}
// Reset sets d's cursor to the beginning, as
// if this was a new and unused dispenser.
func (d *Dispenser) Reset() {
d.cursor = -1
d.nesting = 0
}
// ArgErr returns an argument error, meaning that another
// argument was expected but not found. In other words,
// a line break or open curly brace was encountered instead of
// an argument.
func (d *Dispenser) ArgErr() error {
if d.Val() == "{" {
return d.Err("unexpected token '{', expecting argument")
}
return d.Errf("wrong argument count or unexpected line ending after '%s'", d.Val())
}
// SyntaxErr creates a generic syntax error which explains what was
// found and what was expected.
func (d *Dispenser) SyntaxErr(expected string) error {
msg := fmt.Sprintf("syntax error: unexpected token '%s', expecting '%s', at %s:%d import chain: ['%s']", d.Val(), expected, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
return errors.New(msg)
}
// EOFErr returns an error indicating that the dispenser reached
// the end of the input when searching for the next token.
func (d *Dispenser) EOFErr() error {
return d.Errf("unexpected EOF")
}
// Err generates a custom parse-time error with a message of msg.
func (d *Dispenser) Err(msg string) error {
return d.Errf(msg)
}
// Errf is like Err, but for formatted error messages
func (d *Dispenser) Errf(format string, args ...any) error {
return d.WrapErr(fmt.Errorf(format, args...))
}
// WrapErr takes an existing error and adds the Caddyfile file and line number.
func (d *Dispenser) WrapErr(err error) error {
if len(d.Token().imports) > 0 {
return fmt.Errorf("%w, at %s:%d import chain ['%s']", err, d.File(), d.Line(), strings.Join(d.Token().imports, "','"))
}
return fmt.Errorf("%w, at %s:%d", err, d.File(), d.Line())
}
// Delete deletes the current token and returns the updated slice
// of tokens. The cursor is not advanced to the next token.
// Because deletion modifies the underlying slice, this method
// should only be called if you have access to the original slice
// of tokens and/or are using the slice of tokens outside this
// Dispenser instance. If you do not re-assign the slice with the
// return value of this method, inconsistencies in the token
// array will become apparent (or worse, hide from you like they
// did me for 3 and a half freaking hours late one night).
func (d *Dispenser) Delete() []Token {
if d.cursor >= 0 && d.cursor <= len(d.tokens)-1 {
d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...)
d.cursor--
}
return d.tokens
}
// DeleteN is the same as Delete, but can delete many tokens at once.
// If there aren't N tokens available to delete, none are deleted.
func (d *Dispenser) DeleteN(amount int) []Token {
if amount > 0 && d.cursor >= (amount-1) && d.cursor <= len(d.tokens)-1 {
d.tokens = append(d.tokens[:d.cursor-(amount-1)], d.tokens[d.cursor+1:]...)
d.cursor -= amount
}
return d.tokens
}
// SetContext sets a key-value pair in the context map.
func (d *Dispenser) SetContext(key string, value any) {
if d.context == nil {
d.context = make(map[string]any)
}
d.context[key] = value
}
// GetContext gets the value of a key in the context map.
func (d *Dispenser) GetContext(key string) any {
if d.context == nil {
return nil
}
return d.context[key]
}
// GetContextString gets the value of a key in the context map
// as a string, or an empty string if the key does not exist.
func (d *Dispenser) GetContextString(key string) string {
if d.context == nil {
return ""
}
if val, ok := d.context[key].(string); ok {
return val
}
return ""
}
// isNewLine determines whether the current token is on a different
// line (higher line number) than the previous token. It handles imported
// tokens correctly. If there isn't a previous token, it returns true.
func (d *Dispenser) isNewLine() bool {
if d.cursor < 1 {
return true
}
if d.cursor > len(d.tokens)-1 {
return false
}
prev := d.tokens[d.cursor-1]
curr := d.tokens[d.cursor]
return isNextOnNewLine(prev, curr)
}
// isNextOnNewLine determines whether the current token is on a different
// line (higher line number) than the next token. It handles imported
// tokens correctly. If there isn't a next token, it returns true.
func (d *Dispenser) isNextOnNewLine() bool {
if d.cursor < 0 {
return false
}
if d.cursor >= len(d.tokens)-1 {
return true
}
curr := d.tokens[d.cursor]
next := d.tokens[d.cursor+1]
return isNextOnNewLine(curr, next)
}
const MatcherNameCtxKey = "matcher_name"
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"bytes"
"io"
"slices"
"unicode"
)
// Format formats the input Caddyfile to a standard, nice-looking
// appearance. It works by reading each rune of the input and taking
// control over all the bracing and whitespace that is written; otherwise,
// words, comments, placeholders, and escaped characters are all treated
// literally and written as they appear in the input.
func Format(input []byte) []byte {
input = bytes.TrimSpace(input)
out := new(bytes.Buffer)
rdr := bytes.NewReader(input)
type heredocState int
const (
heredocClosed heredocState = 0
heredocOpening heredocState = 1
heredocOpened heredocState = 2
)
var (
last rune // the last character that was written to the result
space = true // whether current/previous character was whitespace (beginning of input counts as space)
beginningOfLine = true // whether we are at beginning of line
openBrace bool // whether current word/token is or started with open curly brace
openBraceWritten bool // if openBrace, whether that brace was written or not
openBraceSpace bool // whether there was a non-newline space before open brace
newLines int // count of newlines consumed
comment bool // whether we're in a comment
quoted bool // whether we're in a quoted segment
escaped bool // whether current char is escaped
heredoc heredocState // whether we're in a heredoc
heredocEscaped bool // whether heredoc is escaped
heredocMarker []rune
heredocClosingMarker []rune
nesting int // indentation level
)
write := func(ch rune) {
out.WriteRune(ch)
last = ch
}
indent := func() {
for tabs := nesting; tabs > 0; tabs-- {
write('\t')
}
}
nextLine := func() {
write('\n')
beginningOfLine = true
}
for {
ch, _, err := rdr.ReadRune()
if err != nil {
if err == io.EOF {
break
}
panic(err)
}
// detect whether we have the start of a heredoc
if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
space && last == '<' && ch == '<' {
write(ch)
heredoc = heredocOpening
space = false
continue
}
if heredoc == heredocOpening {
if ch == '\n' {
if len(heredocMarker) > 0 && heredocMarkerRegexp.MatchString(string(heredocMarker)) {
heredoc = heredocOpened
} else {
heredocMarker = nil
heredoc = heredocClosed
nextLine()
continue
}
write(ch)
continue
}
if unicode.IsSpace(ch) {
// a space means it's just a regular token and not a heredoc
heredocMarker = nil
heredoc = heredocClosed
} else {
heredocMarker = append(heredocMarker, ch)
write(ch)
continue
}
}
// if we're in a heredoc, all characters are read&write as-is
if heredoc == heredocOpened {
heredocClosingMarker = append(heredocClosingMarker, ch)
if len(heredocClosingMarker) > len(heredocMarker)+1 { // We assert that the heredocClosingMarker is followed by a unicode.Space
heredocClosingMarker = heredocClosingMarker[1:]
}
// check if we're done
if unicode.IsSpace(ch) && slices.Equal(heredocClosingMarker[:len(heredocClosingMarker)-1], heredocMarker) {
heredocMarker = nil
heredocClosingMarker = nil
heredoc = heredocClosed
} else {
write(ch)
if ch == '\n' {
heredocClosingMarker = heredocClosingMarker[:0]
}
continue
}
}
if last == '<' && space {
space = false
}
if comment {
if ch == '\n' {
comment = false
space = true
nextLine()
continue
} else {
write(ch)
continue
}
}
if !escaped && ch == '\\' {
if space {
write(' ')
space = false
}
write(ch)
escaped = true
continue
}
if escaped {
if ch == '<' {
heredocEscaped = true
}
write(ch)
escaped = false
continue
}
if quoted {
if ch == '"' {
quoted = false
}
write(ch)
continue
}
if space && ch == '"' {
quoted = true
}
if unicode.IsSpace(ch) {
space = true
heredocEscaped = false
if ch == '\n' {
newLines++
}
continue
}
spacePrior := space
space = false
//////////////////////////////////////////////////////////
// I find it helpful to think of the formatting loop in two
// main sections; by the time we reach this point, we
// know we are in a "regular" part of the file: we know
// the character is not a space, not in a literal segment
// like a comment or quoted, it's not escaped, etc.
//////////////////////////////////////////////////////////
if ch == '#' {
comment = true
}
if openBrace && spacePrior && !openBraceWritten {
if nesting == 0 && last == '}' {
nextLine()
nextLine()
}
openBrace = false
if beginningOfLine {
indent()
} else if !openBraceSpace {
write(' ')
}
write('{')
openBraceWritten = true
nextLine()
newLines = 0
// prevent infinite nesting from ridiculous inputs (issue #4169)
if nesting < 10 {
nesting++
}
}
switch {
case ch == '{':
openBrace = true
openBraceWritten = false
openBraceSpace = spacePrior && !beginningOfLine
if openBraceSpace {
write(' ')
}
continue
case ch == '}' && (spacePrior || !openBrace):
if last != '\n' {
nextLine()
}
if nesting > 0 {
nesting--
}
indent()
write('}')
newLines = 0
continue
}
if newLines > 2 {
newLines = 2
}
for i := 0; i < newLines; i++ {
nextLine()
}
newLines = 0
if beginningOfLine {
indent()
}
if nesting == 0 && last == '}' && beginningOfLine {
nextLine()
nextLine()
}
if !beginningOfLine && spacePrior {
write(' ')
}
if openBrace && !openBraceWritten {
write('{')
openBraceWritten = true
}
if spacePrior && ch == '<' {
space = true
}
write(ch)
beginningOfLine = false
}
// the Caddyfile does not need any leading or trailing spaces, but...
trimmedResult := bytes.TrimSpace(out.Bytes())
// ...Caddyfiles should, however, end with a newline because
// newlines are significant to the syntax of the file
return append(trimmedResult, '\n')
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package caddyfile
import "bytes"
func FuzzFormat(input []byte) int {
formatted := Format(input)
if bytes.Equal(formatted, Format(formatted)) {
return 1
}
return 0
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"regexp"
"strconv"
"strings"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
// parseVariadic determines if the token is a variadic placeholder,
// and if so, determines the index range (start/end) of args to use.
// Returns a boolean signaling whether a variadic placeholder was found,
// and the start and end indices.
func parseVariadic(token Token, argCount int) (bool, int, int) {
if !strings.HasPrefix(token.Text, "{args[") {
return false, 0, 0
}
if !strings.HasSuffix(token.Text, "]}") {
return false, 0, 0
}
argRange := strings.TrimSuffix(strings.TrimPrefix(token.Text, "{args["), "]}")
if argRange == "" {
caddy.Log().Named("caddyfile").Warn(
"Placeholder "+token.Text+" cannot have an empty index",
zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
start, end, found := strings.Cut(argRange, ":")
// If no ":" delimiter is found, this is not a variadic.
// The replacer will pick this up.
if !found {
return false, 0, 0
}
// A valid token may contain several placeholders, and
// they may be separated by ":". It's not variadic.
// https://github.com/caddyserver/caddy/issues/5716
if strings.Contains(start, "}") || strings.Contains(end, "{") {
return false, 0, 0
}
var (
startIndex = 0
endIndex = argCount
err error
)
if start != "" {
startIndex, err = strconv.Atoi(start)
if err != nil {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder "+token.Text+" has an invalid start index",
zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
}
if end != "" {
endIndex, err = strconv.Atoi(end)
if err != nil {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder "+token.Text+" has an invalid end index",
zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
}
// bound check
if startIndex < 0 || startIndex > endIndex || endIndex > argCount {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder "+token.Text+" indices are out of bounds, only "+strconv.Itoa(argCount)+" argument(s) exist",
zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
return true, startIndex, endIndex
}
// makeArgsReplacer prepares a Replacer which can replace
// non-variadic args placeholders in imported tokens.
func makeArgsReplacer(args []string) *caddy.Replacer {
repl := caddy.NewEmptyReplacer()
repl.Map(func(key string) (any, bool) {
// TODO: Remove the deprecated {args.*} placeholder
// support at some point in the future
if matches := argsRegexpIndexDeprecated.FindStringSubmatch(key); len(matches) > 0 {
// What's matched may be a substring of the key
if matches[0] != key {
return nil, false
}
value, err := strconv.Atoi(matches[1])
if err != nil {
caddy.Log().Named("caddyfile").Warn(
"Placeholder {args." + matches[1] + "} has an invalid index")
return nil, false
}
if value >= len(args) {
caddy.Log().Named("caddyfile").Warn(
"Placeholder {args." + matches[1] + "} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist")
return nil, false
}
caddy.Log().Named("caddyfile").Warn(
"Placeholder {args." + matches[1] + "} deprecated, use {args[" + matches[1] + "]} instead")
return args[value], true
}
// Handle args[*] form
if matches := argsRegexpIndex.FindStringSubmatch(key); len(matches) > 0 {
// What's matched may be a substring of the key
if matches[0] != key {
return nil, false
}
if strings.Contains(matches[1], ":") {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder {args[" + matches[1] + "]} must be a token on its own")
return nil, false
}
value, err := strconv.Atoi(matches[1])
if err != nil {
caddy.Log().Named("caddyfile").Warn(
"Placeholder {args[" + matches[1] + "]} has an invalid index")
return nil, false
}
if value >= len(args) {
caddy.Log().Named("caddyfile").Warn(
"Placeholder {args[" + matches[1] + "]} index is out of bounds, only " + strconv.Itoa(len(args)) + " argument(s) exist")
return nil, false
}
return args[value], true
}
// Not an args placeholder, ignore
return nil, false
})
return repl
}
var (
argsRegexpIndexDeprecated = regexp.MustCompile(`args\.(.+)`)
argsRegexpIndex = regexp.MustCompile(`args\[(.+)]`)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"fmt"
)
type adjacency map[string][]string
type importGraph struct {
nodes map[string]struct{}
edges adjacency
}
func (i *importGraph) addNode(name string) {
if i.nodes == nil {
i.nodes = make(map[string]struct{})
}
if _, exists := i.nodes[name]; exists {
return
}
i.nodes[name] = struct{}{}
}
func (i *importGraph) addNodes(names []string) {
for _, name := range names {
i.addNode(name)
}
}
func (i *importGraph) removeNode(name string) {
delete(i.nodes, name)
}
func (i *importGraph) removeNodes(names []string) {
for _, name := range names {
i.removeNode(name)
}
}
func (i *importGraph) addEdge(from, to string) error {
if !i.exists(from) || !i.exists(to) {
return fmt.Errorf("one of the nodes does not exist")
}
if i.willCycle(to, from) {
return fmt.Errorf("a cycle of imports exists between %s and %s", from, to)
}
if i.areConnected(from, to) {
// if connected, there's nothing to do
return nil
}
if i.nodes == nil {
i.nodes = make(map[string]struct{})
}
if i.edges == nil {
i.edges = make(adjacency)
}
i.edges[from] = append(i.edges[from], to)
return nil
}
func (i *importGraph) addEdges(from string, tos []string) error {
for _, to := range tos {
err := i.addEdge(from, to)
if err != nil {
return err
}
}
return nil
}
func (i *importGraph) areConnected(from, to string) bool {
al, ok := i.edges[from]
if !ok {
return false
}
for _, v := range al {
if v == to {
return true
}
}
return false
}
func (i *importGraph) willCycle(from, to string) bool {
collector := make(map[string]bool)
var visit func(string)
visit = func(start string) {
if !collector[start] {
collector[start] = true
for _, v := range i.edges[start] {
visit(v)
}
}
}
for _, v := range i.edges[from] {
visit(v)
}
for k := range collector {
if to == k {
return true
}
}
return false
}
func (i *importGraph) exists(key string) bool {
_, exists := i.nodes[key]
return exists
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"bufio"
"bytes"
"fmt"
"io"
"regexp"
"strings"
"unicode"
)
type (
// lexer is a utility which can get values, token by
// token, from a Reader. A token is a word, and tokens
// are separated by whitespace. A word can be enclosed
// in quotes if it contains whitespace.
lexer struct {
reader *bufio.Reader
token Token
line int
skippedLines int
}
// Token represents a single parsable unit.
Token struct {
File string
imports []string
Line int
Text string
wasQuoted rune // enclosing quote character, if any
heredocMarker string
snippetName string
}
)
// Tokenize takes bytes as input and lexes it into
// a list of tokens that can be parsed as a Caddyfile.
// Also takes a filename to fill the token's File as
// the source of the tokens, which is important to
// determine relative paths for `import` directives.
func Tokenize(input []byte, filename string) ([]Token, error) {
l := lexer{}
if err := l.load(bytes.NewReader(input)); err != nil {
return nil, err
}
var tokens []Token
for {
found, err := l.next()
if err != nil {
return nil, err
}
if !found {
break
}
l.token.File = filename
tokens = append(tokens, l.token)
}
return tokens, nil
}
// load prepares the lexer to scan an input for tokens.
// It discards any leading byte order mark.
func (l *lexer) load(input io.Reader) error {
l.reader = bufio.NewReader(input)
l.line = 1
// discard byte order mark, if present
firstCh, _, err := l.reader.ReadRune()
if err != nil {
return err
}
if firstCh != 0xFEFF {
err := l.reader.UnreadRune()
if err != nil {
return err
}
}
return nil
}
// next loads the next token into the lexer.
// A token is delimited by whitespace, unless
// the token starts with a quotes character (")
// in which case the token goes until the closing
// quotes (the enclosing quotes are not included).
// Inside quoted strings, quotes may be escaped
// with a preceding \ character. No other chars
// may be escaped. The rest of the line is skipped
// if a "#" character is read in. Returns true if
// a token was loaded; false otherwise.
func (l *lexer) next() (bool, error) {
var val []rune
var comment, quoted, btQuoted, inHeredoc, heredocEscaped, escaped bool
var heredocMarker string
makeToken := func(quoted rune) bool {
l.token.Text = string(val)
l.token.wasQuoted = quoted
l.token.heredocMarker = heredocMarker
return true
}
for {
// Read a character in; if err then if we had
// read some characters, make a token. If we
// reached EOF, then no more tokens to read.
// If no EOF, then we had a problem.
ch, _, err := l.reader.ReadRune()
if err != nil {
if len(val) > 0 {
if inHeredoc {
return false, fmt.Errorf("incomplete heredoc <<%s on line #%d, expected ending marker %s", heredocMarker, l.line+l.skippedLines, heredocMarker)
}
return makeToken(0), nil
}
if err == io.EOF {
return false, nil
}
return false, err
}
// detect whether we have the start of a heredoc
if !(quoted || btQuoted) && !(inHeredoc || heredocEscaped) &&
len(val) > 1 && string(val[:2]) == "<<" {
// a space means it's just a regular token and not a heredoc
if ch == ' ' {
return makeToken(0), nil
}
// skip CR, we only care about LF
if ch == '\r' {
continue
}
// after hitting a newline, we know that the heredoc marker
// is the characters after the two << and the newline.
// we reset the val because the heredoc is syntax we don't
// want to keep.
if ch == '\n' {
if len(val) == 2 {
return false, fmt.Errorf("missing opening heredoc marker on line #%d; must contain only alpha-numeric characters, dashes and underscores; got empty string", l.line)
}
// check if there's too many <
if string(val[:3]) == "<<<" {
return false, fmt.Errorf("too many '<' for heredoc on line #%d; only use two, for example <<END", l.line)
}
heredocMarker = string(val[2:])
if !heredocMarkerRegexp.Match([]byte(heredocMarker)) {
return false, fmt.Errorf("heredoc marker on line #%d must contain only alpha-numeric characters, dashes and underscores; got '%s'", l.line, heredocMarker)
}
inHeredoc = true
l.skippedLines++
val = nil
continue
}
val = append(val, ch)
continue
}
// if we're in a heredoc, all characters are read as-is
if inHeredoc {
val = append(val, ch)
if ch == '\n' {
l.skippedLines++
}
// check if we're done, i.e. that the last few characters are the marker
if len(val) >= len(heredocMarker) && heredocMarker == string(val[len(val)-len(heredocMarker):]) {
// set the final value
val, err = l.finalizeHeredoc(val, heredocMarker)
if err != nil {
return false, err
}
// set the line counter, and make the token
l.line += l.skippedLines
l.skippedLines = 0
return makeToken('<'), nil
}
// stay in the heredoc until we find the ending marker
continue
}
// track whether we found an escape '\' for the next
// iteration to be contextually aware
if !escaped && !btQuoted && ch == '\\' {
escaped = true
continue
}
if quoted || btQuoted {
if quoted && escaped {
// all is literal in quoted area,
// so only escape quotes
if ch != '"' {
val = append(val, '\\')
}
escaped = false
} else {
if (quoted && ch == '"') || (btQuoted && ch == '`') {
return makeToken(ch), nil
}
}
// allow quoted text to wrap continue on multiple lines
if ch == '\n' {
l.line += 1 + l.skippedLines
l.skippedLines = 0
}
// collect this character as part of the quoted token
val = append(val, ch)
continue
}
if unicode.IsSpace(ch) {
// ignore CR altogether, we only actually care about LF (\n)
if ch == '\r' {
continue
}
// end of the line
if ch == '\n' {
// newlines can be escaped to chain arguments
// onto multiple lines; else, increment the line count
if escaped {
l.skippedLines++
escaped = false
} else {
l.line += 1 + l.skippedLines
l.skippedLines = 0
}
// comments (#) are single-line only
comment = false
}
// any kind of space means we're at the end of this token
if len(val) > 0 {
return makeToken(0), nil
}
continue
}
// comments must be at the start of a token,
// in other words, preceded by space or newline
if ch == '#' && len(val) == 0 {
comment = true
}
if comment {
continue
}
if len(val) == 0 {
l.token = Token{Line: l.line}
if ch == '"' {
quoted = true
continue
}
if ch == '`' {
btQuoted = true
continue
}
}
if escaped {
// allow escaping the first < to skip the heredoc syntax
if ch == '<' {
heredocEscaped = true
} else {
val = append(val, '\\')
}
escaped = false
}
val = append(val, ch)
}
}
// finalizeHeredoc takes the runes read as the heredoc text and the marker,
// and processes the text to strip leading whitespace, returning the final
// value without the leading whitespace.
func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
stringVal := string(val)
// find the last newline of the heredoc, which is where the contents end
lastNewline := strings.LastIndex(stringVal, "\n")
// collapse the content, then split into separate lines
lines := strings.Split(stringVal[:lastNewline+1], "\n")
// figure out how much whitespace we need to strip from the front of every line
// by getting the string that precedes the marker, on the last line
paddingToStrip := stringVal[lastNewline+1 : len(stringVal)-len(marker)]
// iterate over each line and strip the whitespace from the front
var out string
for lineNum, lineText := range lines[:len(lines)-1] {
if lineText == "" || lineText == "\r" {
out += "\n"
continue
}
// find an exact match for the padding
index := strings.Index(lineText, paddingToStrip)
// if the padding doesn't match exactly at the start then we can't safely strip
if index != 0 {
return nil, fmt.Errorf("mismatched leading whitespace in heredoc <<%s on line #%d [%s], expected whitespace [%s] to match the closing marker", marker, l.line+lineNum+1, lineText, paddingToStrip)
}
// strip, then append the line, with the newline, to the output.
// also removes all "\r" because Windows.
out += strings.ReplaceAll(lineText[len(paddingToStrip):]+"\n", "\r", "")
}
// Remove the trailing newline from the loop
if len(out) > 0 && out[len(out)-1] == '\n' {
out = out[:len(out)-1]
}
// return the final value
return []rune(out), nil
}
// Quoted returns true if the token was enclosed in quotes
// (i.e. double quotes, backticks, or heredoc).
func (t Token) Quoted() bool {
return t.wasQuoted > 0
}
// NumLineBreaks counts how many line breaks are in the token text.
func (t Token) NumLineBreaks() int {
lineBreaks := strings.Count(t.Text, "\n")
if t.wasQuoted == '<' {
// heredocs have an extra linebreak because the opening
// delimiter is on its own line and is not included in the
// token Text itself, and the trailing newline is removed.
lineBreaks += 2
}
return lineBreaks
}
// Clone returns a deep copy of the token.
func (t Token) Clone() Token {
return Token{
File: t.File,
imports: append([]string{}, t.imports...),
Line: t.Line,
Text: t.Text,
wasQuoted: t.wasQuoted,
heredocMarker: t.heredocMarker,
snippetName: t.snippetName,
}
}
var heredocMarkerRegexp = regexp.MustCompile("^[A-Za-z0-9_-]+$")
// isNextOnNewLine tests whether t2 is on a different line from t1
func isNextOnNewLine(t1, t2 Token) bool {
// If the second token is from a different file,
// we can assume it's from a different line
if t1.File != t2.File {
return true
}
// If the second token is from a different import chain,
// we can assume it's from a different line
if len(t1.imports) != len(t2.imports) {
return true
}
for i, im := range t1.imports {
if im != t2.imports[i] {
return true
}
}
// If the first token (incl line breaks) ends
// on a line earlier than the next token,
// then the second token is on a new line
return t1.Line+t1.NumLineBreaks() < t2.Line
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package caddyfile
func FuzzTokenize(input []byte) int {
tokens, err := Tokenize(input, "Caddyfile")
if err != nil {
return 0
}
if len(tokens) == 0 {
return -1
}
return 1
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyfile
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
// Parse parses the input just enough to group tokens, in
// order, by server block. No further parsing is performed.
// Server blocks are returned in the order in which they appear.
// Directives that do not appear in validDirectives will cause
// an error. If you do not want to check for valid directives,
// pass in nil instead.
//
// Environment variables in {$ENVIRONMENT_VARIABLE} notation
// will be replaced before parsing begins.
func Parse(filename string, input []byte) ([]ServerBlock, error) {
// unfortunately, we must copy the input because parsing must
// remain a read-only operation, but we have to expand environment
// variables before we parse, which changes the underlying array (#4422)
inputCopy := make([]byte, len(input))
copy(inputCopy, input)
tokens, err := allTokens(filename, inputCopy)
if err != nil {
return nil, err
}
p := parser{
Dispenser: NewDispenser(tokens),
importGraph: importGraph{
nodes: make(map[string]struct{}),
edges: make(adjacency),
},
}
return p.parseAll()
}
// allTokens lexes the entire input, but does not parse it.
// It returns all the tokens from the input, unstructured
// and in order. It may mutate input as it expands env vars.
func allTokens(filename string, input []byte) ([]Token, error) {
return Tokenize(replaceEnvVars(input), filename)
}
// replaceEnvVars replaces all occurrences of environment variables.
// It mutates the underlying array and returns the updated slice.
func replaceEnvVars(input []byte) []byte {
var offset int
for {
begin := bytes.Index(input[offset:], spanOpen)
if begin < 0 {
break
}
begin += offset // make beginning relative to input, not offset
end := bytes.Index(input[begin+len(spanOpen):], spanClose)
if end < 0 {
break
}
end += begin + len(spanOpen) // make end relative to input, not begin
// get the name; if there is no name, skip it
envString := input[begin+len(spanOpen) : end]
if len(envString) == 0 {
offset = end + len(spanClose)
continue
}
// split the string into a key and an optional default
envParts := strings.SplitN(string(envString), envVarDefaultDelimiter, 2)
// do a lookup for the env var, replace with the default if not found
envVarValue, found := os.LookupEnv(envParts[0])
if !found && len(envParts) == 2 {
envVarValue = envParts[1]
}
// get the value of the environment variable
// note that this causes one-level deep chaining
envVarBytes := []byte(envVarValue)
// splice in the value
input = append(input[:begin],
append(envVarBytes, input[end+len(spanClose):]...)...)
// continue at the end of the replacement
offset = begin + len(envVarBytes)
}
return input
}
type parser struct {
*Dispenser
block ServerBlock // current server block being parsed
eof bool // if we encounter a valid EOF in a hard place
definedSnippets map[string][]Token
nesting int
importGraph importGraph
}
func (p *parser) parseAll() ([]ServerBlock, error) {
var blocks []ServerBlock
for p.Next() {
err := p.parseOne()
if err != nil {
return blocks, err
}
if len(p.block.Keys) > 0 || len(p.block.Segments) > 0 {
blocks = append(blocks, p.block)
}
if p.nesting > 0 {
return blocks, p.EOFErr()
}
}
return blocks, nil
}
func (p *parser) parseOne() error {
p.block = ServerBlock{}
return p.begin()
}
func (p *parser) begin() error {
if len(p.tokens) == 0 {
return nil
}
err := p.addresses()
if err != nil {
return err
}
if p.eof {
// this happens if the Caddyfile consists of only
// a line of addresses and nothing else
return nil
}
if ok, name := p.isNamedRoute(); ok {
// we just need a dummy leading token to ease parsing later
nameToken := p.Token()
nameToken.Text = name
// named routes only have one key, the route name
p.block.Keys = []Token{nameToken}
p.block.IsNamedRoute = true
// get all the tokens from the block, including the braces
tokens, err := p.blockTokens(true)
if err != nil {
return err
}
tokens = append([]Token{nameToken}, tokens...)
p.block.Segments = []Segment{tokens}
return nil
}
if ok, name := p.isSnippet(); ok {
if p.definedSnippets == nil {
p.definedSnippets = map[string][]Token{}
}
if _, found := p.definedSnippets[name]; found {
return p.Errf("redeclaration of previously declared snippet %s", name)
}
// consume all tokens til matched close brace
tokens, err := p.blockTokens(false)
if err != nil {
return err
}
// Just as we need to track which file the token comes from, we need to
// keep track of which snippet the token comes from. This is helpful
// in tracking import cycles across files/snippets by namespacing them.
// Without this, we end up with false-positives in cycle-detection.
for k, v := range tokens {
v.snippetName = name
tokens[k] = v
}
p.definedSnippets[name] = tokens
// empty block keys so we don't save this block as a real server.
p.block.Keys = nil
return nil
}
return p.blockContents()
}
func (p *parser) addresses() error {
var expectingAnother bool
for {
value := p.Val()
token := p.Token()
// Reject request matchers if trying to define them globally
if strings.HasPrefix(value, "@") {
return p.Errf("request matchers may not be defined globally, they must be in a site block; found %s", value)
}
// Special case: import directive replaces tokens during parse-time
if value == "import" && p.isNewLine() {
err := p.doImport(0)
if err != nil {
return err
}
continue
}
// Open brace definitely indicates end of addresses
if value == "{" {
if expectingAnother {
return p.Errf("Expected another address but had '%s' - check for extra comma", value)
}
// Mark this server block as being defined with braces.
// This is used to provide a better error message when
// the user may have tried to define two server blocks
// without having used braces, which are required in
// that case.
p.block.HasBraces = true
break
}
// Users commonly forget to place a space between the address and the '{'
if strings.HasSuffix(value, "{") {
return p.Errf("Site addresses cannot end with a curly brace: '%s' - put a space between the token and the brace", value)
}
if value != "" { // empty token possible if user typed ""
// Trailing comma indicates another address will follow, which
// may possibly be on the next line
if value[len(value)-1] == ',' {
value = value[:len(value)-1]
expectingAnother = true
} else {
expectingAnother = false // but we may still see another one on this line
}
// If there's a comma here, it's probably because they didn't use a space
// between their two domains, e.g. "foo.com,bar.com", which would not be
// parsed as two separate site addresses.
if strings.Contains(value, ",") {
return p.Errf("Site addresses cannot contain a comma ',': '%s' - put a space after the comma to separate site addresses", value)
}
token.Text = value
p.block.Keys = append(p.block.Keys, token)
}
// Advance token and possibly break out of loop or return error
hasNext := p.Next()
if expectingAnother && !hasNext {
return p.EOFErr()
}
if !hasNext {
p.eof = true
break // EOF
}
if !expectingAnother && p.isNewLine() {
break
}
}
return nil
}
func (p *parser) blockContents() error {
errOpenCurlyBrace := p.openCurlyBrace()
if errOpenCurlyBrace != nil {
// single-server configs don't need curly braces
p.cursor--
}
err := p.directives()
if err != nil {
return err
}
// only look for close curly brace if there was an opening
if errOpenCurlyBrace == nil {
err = p.closeCurlyBrace()
if err != nil {
return err
}
}
return nil
}
// directives parses through all the lines for directives
// and it expects the next token to be the first
// directive. It goes until EOF or closing curly brace
// which ends the server block.
func (p *parser) directives() error {
for p.Next() {
// end of server block
if p.Val() == "}" {
// p.nesting has already been decremented
break
}
// special case: import directive replaces tokens during parse-time
if p.Val() == "import" {
err := p.doImport(1)
if err != nil {
return err
}
p.cursor-- // cursor is advanced when we continue, so roll back one more
continue
}
// normal case: parse a directive as a new segment
// (a "segment" is a line which starts with a directive
// and which ends at the end of the line or at the end of
// the block that is opened at the end of the line)
if err := p.directive(); err != nil {
return err
}
}
return nil
}
// doImport swaps out the import directive and its argument
// (a total of 2 tokens) with the tokens in the specified file
// or globbing pattern. When the function returns, the cursor
// is on the token before where the import directive was. In
// other words, call Next() to access the first token that was
// imported.
func (p *parser) doImport(nesting int) error {
// syntax checks
if !p.NextArg() {
return p.ArgErr()
}
importPattern := p.Val()
if importPattern == "" {
return p.Err("Import requires a non-empty filepath")
}
// grab remaining args as placeholder replacements
args := p.RemainingArgs()
// set up a replacer for non-variadic args replacement
repl := makeArgsReplacer(args)
// grab all the tokens (if it exists) from within a block that follows the import
var blockTokens []Token
for currentNesting := p.Nesting(); p.NextBlock(currentNesting); {
blockTokens = append(blockTokens, p.Token())
}
// initialize with size 1
blockMapping := make(map[string][]Token, 1)
if len(blockTokens) > 0 {
// use such tokens to create a new dispenser, and then use it to parse each block
bd := NewDispenser(blockTokens)
for bd.Next() {
// see if we can grab a key
var currentMappingKey string
if bd.Val() == "{" {
return p.Err("anonymous blocks are not supported")
}
currentMappingKey = bd.Val()
currentMappingTokens := []Token{}
// read all args until end of line / {
if bd.NextArg() {
currentMappingTokens = append(currentMappingTokens, bd.Token())
for bd.NextArg() {
currentMappingTokens = append(currentMappingTokens, bd.Token())
}
// TODO(elee1766): we don't enter another mapping here because it's annoying to extract the { and } properly.
// maybe someone can do that in the future
} else {
// attempt to enter a block and add tokens to the currentMappingTokens
for mappingNesting := bd.Nesting(); bd.NextBlock(mappingNesting); {
currentMappingTokens = append(currentMappingTokens, bd.Token())
}
}
blockMapping[currentMappingKey] = currentMappingTokens
}
}
// splice out the import directive and its arguments
// (2 tokens, plus the length of args)
tokensBefore := p.tokens[:p.cursor-1-len(args)-len(blockTokens)]
tokensAfter := p.tokens[p.cursor+1:]
var importedTokens []Token
var nodes []string
// first check snippets. That is a simple, non-recursive replacement
if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil {
importedTokens = p.definedSnippets[importPattern]
if len(importedTokens) > 0 {
// just grab the first one
nodes = append(nodes, fmt.Sprintf("%s:%s", importedTokens[0].File, importedTokens[0].snippetName))
}
} else {
// make path relative to the file of the _token_ being processed rather
// than current working directory (issue #867) and then use glob to get
// list of matching filenames
absFile, err := filepath.Abs(p.Dispenser.File())
if err != nil {
return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.File(), err)
}
var matches []string
var globPattern string
if !filepath.IsAbs(importPattern) {
globPattern = filepath.Join(filepath.Dir(absFile), importPattern)
} else {
globPattern = importPattern
}
if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 ||
(strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) {
// See issue #2096 - a pattern with many glob expansions can hang for too long
return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern)
}
matches, err = filepath.Glob(globPattern)
if err != nil {
return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
}
if len(matches) == 0 {
if strings.ContainsAny(globPattern, "*?[]") {
caddy.Log().Warn("No files matching import glob pattern", zap.String("pattern", importPattern))
} else {
return p.Errf("File to import not found: %s", importPattern)
}
} else {
// See issue #5295 - should skip any files that start with a . when iterating over them.
sep := string(filepath.Separator)
segGlobPattern := strings.Split(globPattern, sep)
if strings.HasPrefix(segGlobPattern[len(segGlobPattern)-1], "*") {
var tmpMatches []string
for _, m := range matches {
seg := strings.Split(m, sep)
if !strings.HasPrefix(seg[len(seg)-1], ".") {
tmpMatches = append(tmpMatches, m)
}
}
matches = tmpMatches
}
}
// collect all the imported tokens
for _, importFile := range matches {
newTokens, err := p.doSingleImport(importFile)
if err != nil {
return err
}
importedTokens = append(importedTokens, newTokens...)
}
nodes = matches
}
nodeName := p.File()
if p.Token().snippetName != "" {
nodeName += fmt.Sprintf(":%s", p.Token().snippetName)
}
p.importGraph.addNode(nodeName)
p.importGraph.addNodes(nodes)
if err := p.importGraph.addEdges(nodeName, nodes); err != nil {
p.importGraph.removeNodes(nodes)
return err
}
// copy the tokens so we don't overwrite p.definedSnippets
tokensCopy := make([]Token, 0, len(importedTokens))
var (
maybeSnippet bool
maybeSnippetId bool
index int
)
// run the argument replacer on the tokens
// golang for range slice return a copy of value
// similarly, append also copy value
for i, token := range importedTokens {
// update the token's imports to refer to import directive filename, line number and snippet name if there is one
if token.snippetName != "" {
token.imports = append(token.imports, fmt.Sprintf("%s:%d (import %s)", p.File(), p.Line(), token.snippetName))
} else {
token.imports = append(token.imports, fmt.Sprintf("%s:%d (import)", p.File(), p.Line()))
}
// naive way of determine snippets, as snippets definition can only follow name + block
// format, won't check for nesting correctness or any other error, that's what parser does.
if !maybeSnippet && nesting == 0 {
// first of the line
if i == 0 || isNextOnNewLine(tokensCopy[i-1], token) {
index = 0
} else {
index++
}
if index == 0 && len(token.Text) >= 3 && strings.HasPrefix(token.Text, "(") && strings.HasSuffix(token.Text, ")") {
maybeSnippetId = true
}
}
switch token.Text {
case "{":
nesting++
if index == 1 && maybeSnippetId && nesting == 1 {
maybeSnippet = true
maybeSnippetId = false
}
case "}":
nesting--
if nesting == 0 && maybeSnippet {
maybeSnippet = false
}
}
// if it is {block}, we substitute with all tokens in the block
// if it is {blocks.*}, we substitute with the tokens in the mapping for the *
var skip bool
var tokensToAdd []Token
switch {
case token.Text == "{block}":
tokensToAdd = blockTokens
case strings.HasPrefix(token.Text, "{blocks.") && strings.HasSuffix(token.Text, "}"):
// {blocks.foo.bar} will be extracted to key `foo.bar`
blockKey := strings.TrimPrefix(strings.TrimSuffix(token.Text, "}"), "{blocks.")
val, ok := blockMapping[blockKey]
if ok {
tokensToAdd = val
}
default:
skip = true
}
if !skip {
if len(tokensToAdd) == 0 {
// if there is no content in the snippet block, don't do any replacement
// this allows snippets which contained {block}/{block.*} before this change to continue functioning as normal
tokensCopy = append(tokensCopy, token)
} else {
tokensCopy = append(tokensCopy, tokensToAdd...)
}
continue
}
if maybeSnippet {
tokensCopy = append(tokensCopy, token)
continue
}
foundVariadic, startIndex, endIndex := parseVariadic(token, len(args))
if foundVariadic {
for _, arg := range args[startIndex:endIndex] {
token.Text = arg
tokensCopy = append(tokensCopy, token)
}
} else {
token.Text = repl.ReplaceKnown(token.Text, "")
tokensCopy = append(tokensCopy, token)
}
}
// splice the imported tokens in the place of the import statement
// and rewind cursor so Next() will land on first imported token
p.tokens = append(tokensBefore, append(tokensCopy, tokensAfter...)...)
p.cursor -= len(args) + len(blockTokens) + 1
return nil
}
// doSingleImport lexes the individual file at importFile and returns
// its tokens or an error, if any.
func (p *parser) doSingleImport(importFile string) ([]Token, error) {
file, err := os.Open(importFile)
if err != nil {
return nil, p.Errf("Could not import %s: %v", importFile, err)
}
defer file.Close()
if info, err := file.Stat(); err != nil {
return nil, p.Errf("Could not import %s: %v", importFile, err)
} else if info.IsDir() {
return nil, p.Errf("Could not import %s: is a directory", importFile)
}
input, err := io.ReadAll(file)
if err != nil {
return nil, p.Errf("Could not read imported file %s: %v", importFile, err)
}
// only warning in case of empty files
if len(input) == 0 || len(strings.TrimSpace(string(input))) == 0 {
caddy.Log().Warn("Import file is empty", zap.String("file", importFile))
return []Token{}, nil
}
importedTokens, err := allTokens(importFile, input)
if err != nil {
return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
}
// Tack the file path onto these tokens so errors show the imported file's name
// (we use full, absolute path to avoid bugs: issue #1892)
filename, err := filepath.Abs(importFile)
if err != nil {
return nil, p.Errf("Failed to get absolute path of file: %s: %v", importFile, err)
}
for i := 0; i < len(importedTokens); i++ {
importedTokens[i].File = filename
}
return importedTokens, nil
}
// directive collects tokens until the directive's scope
// closes (either end of line or end of curly brace block).
// It expects the currently-loaded token to be a directive
// (or } that ends a server block). The collected tokens
// are loaded into the current server block for later use
// by directive setup functions.
func (p *parser) directive() error {
// a segment is a list of tokens associated with this directive
var segment Segment
// the directive itself is appended as a relevant token
segment = append(segment, p.Token())
for p.Next() {
if p.Val() == "{" {
p.nesting++
if !p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
return p.Err("Unexpected next token after '{' on same line")
}
if p.isNewLine() {
return p.Err("Unexpected '{' on a new line; did you mean to place the '{' on the previous line?")
}
} else if p.Val() == "{}" {
if p.isNextOnNewLine() && p.Token().wasQuoted == 0 {
return p.Err("Unexpected '{}' at end of line")
}
} else if p.isNewLine() && p.nesting == 0 {
p.cursor-- // read too far
break
} else if p.Val() == "}" && p.nesting > 0 {
p.nesting--
} else if p.Val() == "}" && p.nesting == 0 {
return p.Err("Unexpected '}' because no matching opening brace")
} else if p.Val() == "import" && p.isNewLine() {
if err := p.doImport(1); err != nil {
return err
}
p.cursor-- // cursor is advanced when we continue, so roll back one more
continue
}
segment = append(segment, p.Token())
}
p.block.Segments = append(p.block.Segments, segment)
if p.nesting > 0 {
return p.EOFErr()
}
return nil
}
// openCurlyBrace expects the current token to be an
// opening curly brace. This acts like an assertion
// because it returns an error if the token is not
// a opening curly brace. It does NOT advance the token.
func (p *parser) openCurlyBrace() error {
if p.Val() != "{" {
return p.SyntaxErr("{")
}
return nil
}
// closeCurlyBrace expects the current token to be
// a closing curly brace. This acts like an assertion
// because it returns an error if the token is not
// a closing curly brace. It does NOT advance the token.
func (p *parser) closeCurlyBrace() error {
if p.Val() != "}" {
return p.SyntaxErr("}")
}
return nil
}
func (p *parser) isNamedRoute() (bool, string) {
keys := p.block.Keys
// A named route block is a single key with parens, prefixed with &.
if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "&(") && strings.HasSuffix(keys[0].Text, ")") {
return true, strings.TrimSuffix(keys[0].Text[2:], ")")
}
return false, ""
}
func (p *parser) isSnippet() (bool, string) {
keys := p.block.Keys
// A snippet block is a single key with parens. Nothing else qualifies.
if len(keys) == 1 && strings.HasPrefix(keys[0].Text, "(") && strings.HasSuffix(keys[0].Text, ")") {
return true, strings.TrimSuffix(keys[0].Text[1:], ")")
}
return false, ""
}
// read and store everything in a block for later replay.
func (p *parser) blockTokens(retainCurlies bool) ([]Token, error) {
// block must have curlies.
err := p.openCurlyBrace()
if err != nil {
return nil, err
}
nesting := 1 // count our own nesting
tokens := []Token{}
if retainCurlies {
tokens = append(tokens, p.Token())
}
for p.Next() {
if p.Val() == "}" {
nesting--
if nesting == 0 {
if retainCurlies {
tokens = append(tokens, p.Token())
}
break
}
}
if p.Val() == "{" {
nesting++
}
tokens = append(tokens, p.tokens[p.cursor])
}
// make sure we're matched up
if nesting != 0 {
return nil, p.SyntaxErr("}")
}
return tokens, nil
}
// ServerBlock associates any number of keys from the
// head of the server block with tokens, which are
// grouped by segments.
type ServerBlock struct {
HasBraces bool
Keys []Token
Segments []Segment
IsNamedRoute bool
}
func (sb ServerBlock) GetKeysText() []string {
res := []string{}
for _, k := range sb.Keys {
res = append(res, k.Text)
}
return res
}
// DispenseDirective returns a dispenser that contains
// all the tokens in the server block.
func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {
var tokens []Token
for _, seg := range sb.Segments {
if len(seg) > 0 && seg[0].Text == dir {
tokens = append(tokens, seg...)
}
}
return NewDispenser(tokens)
}
// Segment is a list of tokens which begins with a directive
// and ends at the end of the directive (either at the end of
// the line, or at the end of a block it opens).
type Segment []Token
// Directive returns the directive name for the segment.
// The directive name is the text of the first token.
func (s Segment) Directive() string {
if len(s) > 0 {
return s[0].Text
}
return ""
}
// spanOpen and spanClose are used to bound spans that
// contain the name of an environment variable.
var (
spanOpen, spanClose = []byte{'{', '$'}, []byte{'}'}
envVarDefaultDelimiter = ":"
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyconfig
import (
"encoding/json"
"fmt"
"github.com/caddyserver/caddy/v2"
)
// Adapter is a type which can adapt a configuration to Caddy JSON.
// It returns the results and any warnings, or an error.
type Adapter interface {
Adapt(body []byte, options map[string]any) ([]byte, []Warning, error)
}
// Warning represents a warning or notice related to conversion.
type Warning struct {
File string `json:"file,omitempty"`
Line int `json:"line,omitempty"`
Directive string `json:"directive,omitempty"`
Message string `json:"message,omitempty"`
}
func (w Warning) String() string {
var directive string
if w.Directive != "" {
directive = fmt.Sprintf(" (%s)", w.Directive)
}
return fmt.Sprintf("%s:%d%s: %s", w.File, w.Line, directive, w.Message)
}
// JSON encodes val as JSON, returning it as a json.RawMessage. Any
// marshaling errors (which are highly unlikely with correct code)
// are converted to warnings. This is convenient when filling config
// structs that require a json.RawMessage, without having to worry
// about errors.
func JSON(val any, warnings *[]Warning) json.RawMessage {
b, err := json.Marshal(val)
if err != nil {
if warnings != nil {
*warnings = append(*warnings, Warning{Message: err.Error()})
}
return nil
}
return b
}
// JSONModuleObject is like JSON(), except it marshals val into a JSON object
// with an added key named fieldName with the value fieldVal. This is useful
// for encoding module values where the module name has to be described within
// the object by a certain key; for example, `"handler": "file_server"` for a
// file server HTTP handler (fieldName="handler" and fieldVal="file_server").
// The val parameter must encode into a map[string]any (i.e. it must be
// a struct or map). Any errors are converted into warnings.
func JSONModuleObject(val any, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
// encode to a JSON object first
enc, err := json.Marshal(val)
if err != nil {
if warnings != nil {
*warnings = append(*warnings, Warning{Message: err.Error()})
}
return nil
}
// then decode the object
var tmp map[string]any
err = json.Unmarshal(enc, &tmp)
if err != nil {
if warnings != nil {
*warnings = append(*warnings, Warning{Message: err.Error()})
}
return nil
}
// so we can easily add the module's field with its appointed value
tmp[fieldName] = fieldVal
// then re-marshal as JSON
result, err := json.Marshal(tmp)
if err != nil {
if warnings != nil {
*warnings = append(*warnings, Warning{Message: err.Error()})
}
return nil
}
return result
}
// RegisterAdapter registers a config adapter with the given name.
// This should usually be done at init-time. It panics if the
// adapter cannot be registered successfully.
func RegisterAdapter(name string, adapter Adapter) {
if _, ok := configAdapters[name]; ok {
panic(fmt.Errorf("%s: already registered", name))
}
configAdapters[name] = adapter
caddy.RegisterModule(adapterModule{name, adapter})
}
// GetAdapter returns the adapter with the given name,
// or nil if one with that name is not registered.
func GetAdapter(name string) Adapter {
return configAdapters[name]
}
// adapterModule is a wrapper type that can turn any config
// adapter into a Caddy module, which has the benefit of being
// counted with other modules, even though they do not
// technically extend the Caddy configuration structure.
// See caddyserver/caddy#3132.
type adapterModule struct {
name string
Adapter
}
func (am adapterModule) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: caddy.ModuleID("caddy.adapters." + am.name),
New: func() caddy.Module { return am },
}
}
var configAdapters = make(map[string]Adapter)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"fmt"
"net"
"net/netip"
"reflect"
"sort"
"strconv"
"strings"
"unicode"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// mapAddressToServerBlocks returns a map of listener address to list of server
// blocks that will be served on that address. To do this, each server block is
// expanded so that each one is considered individually, although keys of a
// server block that share the same address stay grouped together so the config
// isn't repeated unnecessarily. For example, this Caddyfile:
//
// example.com {
// bind 127.0.0.1
// }
// www.example.com, example.net/path, localhost:9999 {
// bind 127.0.0.1 1.2.3.4
// }
//
// has two server blocks to start with. But expressed in this Caddyfile are
// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999,
// and 127.0.0.1:9999. This is because the bind directive is applied to each
// key of its server block (specifying the host part), and each key may have
// a different port. And we definitely need to be sure that a site which is
// bound to be served on a specific interface is not served on others just
// because that is more convenient: it would be a potential security risk
// if the difference between interfaces means private vs. public.
//
// So what this function does for the example above is iterate each server
// block, and for each server block, iterate its keys. For the first, it
// finds one key (example.com) and determines its listener address
// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds
// the listener address to the map value returned by this function, with
// the first server block as one of its associations.
//
// It then iterates each key on the second server block and associates them
// with one or more listener addresses. Indeed, each key in this block has
// two listener addresses because of the 'bind' directive. Once we know
// which addresses serve which keys, we can create a new server block for
// each address containing the contents of the server block and only those
// specific keys of the server block which use that address.
//
// It is possible and even likely that some keys in the returned map have
// the exact same list of server blocks (i.e. they are identical). This
// happens when multiple hosts are declared with a 'bind' directive and
// the resulting listener addresses are not shared by any other server
// block (or the other server blocks are exactly identical in their token
// contents). This happens with our example above because 1.2.3.4:443
// and 1.2.3.4:9999 are used exclusively with the second server block. This
// repetition may be undesirable, so call consolidateAddrMappings() to map
// multiple addresses to the same lists of server blocks (a many:many mapping).
// (Doing this is essentially a map-reduce technique.)
func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock,
options map[string]any,
) (map[string][]serverBlock, error) {
sbmap := make(map[string][]serverBlock)
for i, sblock := range originalServerBlocks {
// within a server block, we need to map all the listener addresses
// implied by the server block to the keys of the server block which
// will be served by them; this has the effect of treating each
// key of a server block as its own, but without having to repeat its
// contents in cases where multiple keys really can be served together
addrToKeys := make(map[string][]caddyfile.Token)
for j, key := range sblock.block.Keys {
// a key can have multiple listener addresses if there are multiple
// arguments to the 'bind' directive (although they will all have
// the same port, since the port is defined by the key or is implicit
// through automatic HTTPS)
addrs, err := st.listenerAddrsForServerBlockKey(sblock, key.Text, options)
if err != nil {
return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key.Text, err)
}
// associate this key with each listener address it is served on
for _, addr := range addrs {
addrToKeys[addr] = append(addrToKeys[addr], key)
}
}
// make a slice of the map keys so we can iterate in sorted order
addrs := make([]string, 0, len(addrToKeys))
for k := range addrToKeys {
addrs = append(addrs, k)
}
sort.Strings(addrs)
// now that we know which addresses serve which keys of this
// server block, we iterate that mapping and create a list of
// new server blocks for each address where the keys of the
// server block are only the ones which use the address; but
// the contents (tokens) are of course the same
for _, addr := range addrs {
keys := addrToKeys[addr]
// parse keys so that we only have to do it once
parsedKeys := make([]Address, 0, len(keys))
for _, key := range keys {
addr, err := ParseAddress(key.Text)
if err != nil {
return nil, fmt.Errorf("parsing key '%s': %v", key.Text, err)
}
parsedKeys = append(parsedKeys, addr.Normalize())
}
sbmap[addr] = append(sbmap[addr], serverBlock{
block: caddyfile.ServerBlock{
Keys: keys,
Segments: sblock.block.Segments,
},
pile: sblock.pile,
keys: parsedKeys,
})
}
}
return sbmap, nil
}
// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of
// single listener addresses to lists of server blocks. Since multiple addresses may serve
// identical sites (server block contents), this function turns a 1:many mapping into a
// many:many mapping. Server block contents (tokens) must be exactly identical so that
// reflect.DeepEqual returns true in order for the addresses to be combined. Identical
// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
// association from multiple addresses to multiple server blocks; i.e. each element of
// the returned slice) becomes a server definition in the output JSON.
func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]serverBlock) []sbAddrAssociation {
sbaddrs := make([]sbAddrAssociation, 0, len(addrToServerBlocks))
for addr, sblocks := range addrToServerBlocks {
// we start with knowing that at least this address
// maps to these server blocks
a := sbAddrAssociation{
addresses: []string{addr},
serverBlocks: sblocks,
}
// now find other addresses that map to identical
// server blocks and add them to our list of
// addresses, while removing them from the map
for otherAddr, otherSblocks := range addrToServerBlocks {
if addr == otherAddr {
continue
}
if reflect.DeepEqual(sblocks, otherSblocks) {
a.addresses = append(a.addresses, otherAddr)
delete(addrToServerBlocks, otherAddr)
}
}
sort.Strings(a.addresses)
sbaddrs = append(sbaddrs, a)
}
// sort them by their first address (we know there will always be at least one)
// to avoid problems with non-deterministic ordering (makes tests flaky)
sort.Slice(sbaddrs, func(i, j int) bool {
return sbaddrs[i].addresses[0] < sbaddrs[j].addresses[0]
})
return sbaddrs
}
// listenerAddrsForServerBlockKey essentially converts the Caddyfile
// site addresses to Caddy listener addresses for each server block.
func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string,
options map[string]any,
) ([]string, error) {
addr, err := ParseAddress(key)
if err != nil {
return nil, fmt.Errorf("parsing key: %v", err)
}
addr = addr.Normalize()
switch addr.Scheme {
case "wss":
return nil, fmt.Errorf("the scheme wss:// is only supported in browsers; use https:// instead")
case "ws":
return nil, fmt.Errorf("the scheme ws:// is only supported in browsers; use http:// instead")
case "https", "http", "":
// Do nothing or handle the valid schemes
default:
return nil, fmt.Errorf("unsupported URL scheme %s://", addr.Scheme)
}
// figure out the HTTP and HTTPS ports; either
// use defaults, or override with user config
httpPort, httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPPort), strconv.Itoa(caddyhttp.DefaultHTTPSPort)
if hport, ok := options["http_port"]; ok {
httpPort = strconv.Itoa(hport.(int))
}
if hsport, ok := options["https_port"]; ok {
httpsPort = strconv.Itoa(hsport.(int))
}
// default port is the HTTPS port
lnPort := httpsPort
if addr.Port != "" {
// port explicitly defined
lnPort = addr.Port
} else if addr.Scheme == "http" {
// port inferred from scheme
lnPort = httpPort
}
// error if scheme and port combination violate convention
if (addr.Scheme == "http" && lnPort == httpsPort) || (addr.Scheme == "https" && lnPort == httpPort) {
return nil, fmt.Errorf("[%s] scheme and port violate convention", key)
}
// the bind directive specifies hosts (and potentially network), but is optional
lnHosts := make([]string, 0, len(sblock.pile["bind"]))
for _, cfgVal := range sblock.pile["bind"] {
lnHosts = append(lnHosts, cfgVal.Value.([]string)...)
}
if len(lnHosts) == 0 {
if defaultBind, ok := options["default_bind"].([]string); ok {
lnHosts = defaultBind
} else {
lnHosts = []string{""}
}
}
// use a map to prevent duplication
listeners := make(map[string]struct{})
for _, lnHost := range lnHosts {
// normally we would simply append the port,
// but if lnHost is IPv6, we need to ensure it
// is enclosed in [ ]; net.JoinHostPort does
// this for us, but lnHost might also have a
// network type in front (e.g. "tcp/") leading
// to "[tcp/::1]" which causes parsing failures
// later; what we need is "tcp/[::1]", so we have
// to split the network and host, then re-combine
network, host, ok := strings.Cut(lnHost, "/")
if !ok {
host = network
network = ""
}
host = strings.Trim(host, "[]") // IPv6
networkAddr := caddy.JoinNetworkAddress(network, host, lnPort)
addr, err := caddy.ParseNetworkAddress(networkAddr)
if err != nil {
return nil, fmt.Errorf("parsing network address: %v", err)
}
listeners[addr.String()] = struct{}{}
}
// now turn map into list
listenersList := make([]string, 0, len(listeners))
for lnStr := range listeners {
listenersList = append(listenersList, lnStr)
}
sort.Strings(listenersList)
return listenersList, nil
}
// Address represents a site address. It contains
// the original input value, and the component
// parts of an address. The component parts may be
// updated to the correct values as setup proceeds,
// but the original value should never be changed.
//
// The Host field must be in a normalized form.
type Address struct {
Original, Scheme, Host, Port, Path string
}
// ParseAddress parses an address string into a structured format with separate
// scheme, host, port, and path portions, as well as the original input string.
func ParseAddress(str string) (Address, error) {
const maxLen = 4096
if len(str) > maxLen {
str = str[:maxLen]
}
remaining := strings.TrimSpace(str)
a := Address{Original: remaining}
// extract scheme
splitScheme := strings.SplitN(remaining, "://", 2)
switch len(splitScheme) {
case 0:
return a, nil
case 1:
remaining = splitScheme[0]
case 2:
a.Scheme = splitScheme[0]
remaining = splitScheme[1]
}
// extract host and port
hostSplit := strings.SplitN(remaining, "/", 2)
if len(hostSplit) > 0 {
host, port, err := net.SplitHostPort(hostSplit[0])
if err != nil {
host, port, err = net.SplitHostPort(hostSplit[0] + ":")
if err != nil {
host = hostSplit[0]
}
}
a.Host = host
a.Port = port
}
if len(hostSplit) == 2 {
// all that remains is the path
a.Path = "/" + hostSplit[1]
}
// make sure port is valid
if a.Port != "" {
if portNum, err := strconv.Atoi(a.Port); err != nil {
return Address{}, fmt.Errorf("invalid port '%s': %v", a.Port, err)
} else if portNum < 0 || portNum > 65535 {
return Address{}, fmt.Errorf("port %d is out of range", portNum)
}
}
return a, nil
}
// String returns a human-readable form of a. It will
// be a cleaned-up and filled-out URL string.
func (a Address) String() string {
if a.Host == "" && a.Port == "" {
return ""
}
scheme := a.Scheme
if scheme == "" {
if a.Port == strconv.Itoa(certmagic.HTTPSPort) {
scheme = "https"
} else {
scheme = "http"
}
}
s := scheme
if s != "" {
s += "://"
}
if a.Port != "" &&
((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) ||
(scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) {
s += net.JoinHostPort(a.Host, a.Port)
} else {
s += a.Host
}
if a.Path != "" {
s += a.Path
}
return s
}
// Normalize returns a normalized version of a.
func (a Address) Normalize() Address {
path := a.Path
// ensure host is normalized if it's an IP address
host := strings.TrimSpace(a.Host)
if ip, err := netip.ParseAddr(host); err == nil {
if ip.Is6() && !ip.Is4() && !ip.Is4In6() {
host = ip.String()
}
}
return Address{
Original: a.Original,
Scheme: lowerExceptPlaceholders(a.Scheme),
Host: lowerExceptPlaceholders(host),
Port: a.Port,
Path: path,
}
}
// lowerExceptPlaceholders lowercases s except within
// placeholders (substrings in non-escaped '{ }' spans).
// See https://github.com/caddyserver/caddy/issues/3264
func lowerExceptPlaceholders(s string) string {
var sb strings.Builder
var escaped, inPlaceholder bool
for _, ch := range s {
if ch == '\\' && !escaped {
escaped = true
sb.WriteRune(ch)
continue
}
if ch == '{' && !escaped {
inPlaceholder = true
}
if ch == '}' && inPlaceholder && !escaped {
inPlaceholder = false
}
if inPlaceholder {
sb.WriteRune(ch)
} else {
sb.WriteRune(unicode.ToLower(ch))
}
escaped = false
}
return sb.String()
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package httpcaddyfile
func FuzzParseAddress(data []byte) int {
addr, err := ParseAddress(string(data))
if err != nil {
if addr == (Address{}) {
return 1
}
return 0
}
return 1
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"fmt"
"html"
"net/http"
"reflect"
"strconv"
"strings"
"time"
"github.com/caddyserver/certmagic"
"github.com/mholt/acmez/v2/acme"
"go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
func init() {
RegisterDirective("bind", parseBind)
RegisterDirective("tls", parseTLS)
RegisterHandlerDirective("fs", parseFilesystem)
RegisterDirective("root", parseRoot)
RegisterHandlerDirective("vars", parseVars)
RegisterHandlerDirective("redir", parseRedir)
RegisterHandlerDirective("respond", parseRespond)
RegisterHandlerDirective("abort", parseAbort)
RegisterHandlerDirective("error", parseError)
RegisterHandlerDirective("route", parseRoute)
RegisterHandlerDirective("handle", parseHandle)
RegisterDirective("handle_errors", parseHandleErrors)
RegisterHandlerDirective("invoke", parseInvoke)
RegisterDirective("log", parseLog)
RegisterHandlerDirective("skip_log", parseLogSkip)
RegisterHandlerDirective("log_skip", parseLogSkip)
RegisterHandlerDirective("log_name", parseLogName)
}
// parseBind parses the bind directive. Syntax:
//
// bind <addresses...>
func parseBind(h Helper) ([]ConfigValue, error) {
h.Next() // consume directive name
return []ConfigValue{{Class: "bind", Value: h.RemainingArgs()}}, nil
}
// parseTLS parses the tls directive. Syntax:
//
// tls [<email>|internal]|[<cert_file> <key_file>] {
// protocols <min> [<max>]
// ciphers <cipher_suites...>
// curves <curves...>
// client_auth {
// mode [request|require|verify_if_given|require_and_verify]
// trust_pool <module_name> [...]
// trusted_leaf_cert <base64_der>
// trusted_leaf_cert_file <filename>
// }
// alpn <values...>
// load <paths...>
// ca <acme_ca_endpoint>
// ca_root <pem_file>
// key_type [ed25519|p256|p384|rsa2048|rsa4096]
// dns <provider_name> [...]
// propagation_delay <duration>
// propagation_timeout <duration>
// resolvers <dns_servers...>
// dns_ttl <duration>
// dns_challenge_override_domain <domain>
// on_demand
// reuse_private_keys
// eab <key_id> <mac_key>
// issuer <module_name> [...]
// get_certificate <module_name> [...]
// insecure_secrets_log <log_file>
// }
func parseTLS(h Helper) ([]ConfigValue, error) {
h.Next() // consume directive name
cp := new(caddytls.ConnectionPolicy)
var fileLoader caddytls.FileLoader
var folderLoader caddytls.FolderLoader
var certSelector caddytls.CustomCertSelectionPolicy
var acmeIssuer *caddytls.ACMEIssuer
var keyType string
var internalIssuer *caddytls.InternalIssuer
var issuers []certmagic.Issuer
var certManagers []certmagic.Manager
var onDemand bool
var reusePrivateKeys bool
firstLine := h.RemainingArgs()
switch len(firstLine) {
case 0:
case 1:
if firstLine[0] == "internal" {
internalIssuer = new(caddytls.InternalIssuer)
} else if !strings.Contains(firstLine[0], "@") {
return nil, h.Err("single argument must either be 'internal' or an email address")
} else {
acmeIssuer = &caddytls.ACMEIssuer{
Email: firstLine[0],
}
}
case 2:
// file certificate loader
certFilename := firstLine[0]
keyFilename := firstLine[1]
// tag this certificate so if multiple certs match, specifically
// this one that the user has provided will be used, see #2588:
// https://github.com/caddyserver/caddy/issues/2588 ... but we
// must be careful about how we do this; being careless will
// lead to failed handshakes
//
// we need to remember which cert files we've seen, since we
// must load each cert only once; otherwise, they each get a
// different tag... since a cert loaded twice has the same
// bytes, it will overwrite the first one in the cache, and
// only the last cert (and its tag) will survive, so any conn
// policy that is looking for any tag other than the last one
// to be loaded won't find it, and TLS handshakes will fail
// (see end of issue #3004)
//
// tlsCertTags maps certificate filenames to their tag.
// This is used to remember which tag is used for each
// certificate files, since we need to avoid loading
// the same certificate files more than once, overwriting
// previous tags
tlsCertTags, ok := h.State["tlsCertTags"].(map[string]string)
if !ok {
tlsCertTags = make(map[string]string)
h.State["tlsCertTags"] = tlsCertTags
}
tag, ok := tlsCertTags[certFilename]
if !ok {
// haven't seen this cert file yet, let's give it a tag
// and add a loader for it
tag = fmt.Sprintf("cert%d", len(tlsCertTags))
fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
Certificate: certFilename,
Key: keyFilename,
Tags: []string{tag},
})
// remember this for next time we see this cert file
tlsCertTags[certFilename] = tag
}
certSelector.AnyTag = append(certSelector.AnyTag, tag)
default:
return nil, h.ArgErr()
}
var hasBlock bool
for h.NextBlock(0) {
hasBlock = true
switch h.Val() {
case "protocols":
args := h.RemainingArgs()
if len(args) == 0 {
return nil, h.Errf("protocols requires one or two arguments")
}
if len(args) > 0 {
if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[0])
}
cp.ProtocolMin = args[0]
}
if len(args) > 1 {
if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
return nil, h.Errf("wrong protocol name or protocol not supported: '%s'", args[1])
}
cp.ProtocolMax = args[1]
}
case "ciphers":
for h.NextArg() {
if !caddytls.CipherSuiteNameSupported(h.Val()) {
return nil, h.Errf("wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
}
cp.CipherSuites = append(cp.CipherSuites, h.Val())
}
case "curves":
for h.NextArg() {
if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
}
cp.Curves = append(cp.Curves, h.Val())
}
case "client_auth":
cp.ClientAuthentication = &caddytls.ClientAuthentication{}
if err := cp.ClientAuthentication.UnmarshalCaddyfile(h.NewFromNextSegment()); err != nil {
return nil, err
}
case "alpn":
args := h.RemainingArgs()
if len(args) == 0 {
return nil, h.ArgErr()
}
cp.ALPN = args
case "load":
folderLoader = append(folderLoader, h.RemainingArgs()...)
case "ca":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
acmeIssuer.CA = arg[0]
case "key_type":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
keyType = arg[0]
case "eab":
arg := h.RemainingArgs()
if len(arg) != 2 {
return nil, h.ArgErr()
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
acmeIssuer.ExternalAccount = &acme.EAB{
KeyID: arg[0],
MACKey: arg[1],
}
case "issuer":
if !h.NextArg() {
return nil, h.ArgErr()
}
modName := h.Val()
modID := "tls.issuance." + modName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
if err != nil {
return nil, err
}
issuer, ok := unm.(certmagic.Issuer)
if !ok {
return nil, h.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
}
issuers = append(issuers, issuer)
case "get_certificate":
if !h.NextArg() {
return nil, h.ArgErr()
}
modName := h.Val()
modID := "tls.get_certificate." + modName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
if err != nil {
return nil, err
}
certManager, ok := unm.(certmagic.Manager)
if !ok {
return nil, h.Errf("module %s (%T) is not a certmagic.CertificateManager", modID, unm)
}
certManagers = append(certManagers, certManager)
case "dns":
if !h.NextArg() {
return nil, h.ArgErr()
}
provName := h.Val()
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.DNS == nil {
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
}
modID := "dns.providers." + provName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
if err != nil {
return nil, err
}
acmeIssuer.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, h.warnings)
case "resolvers":
args := h.RemainingArgs()
if len(args) == 0 {
return nil, h.ArgErr()
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.DNS == nil {
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
}
acmeIssuer.Challenges.DNS.Resolvers = args
case "propagation_delay":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
delayStr := arg[0]
delay, err := caddy.ParseDuration(delayStr)
if err != nil {
return nil, h.Errf("invalid propagation_delay duration %s: %v", delayStr, err)
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.DNS == nil {
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
}
acmeIssuer.Challenges.DNS.PropagationDelay = caddy.Duration(delay)
case "propagation_timeout":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
timeoutStr := arg[0]
var timeout time.Duration
if timeoutStr == "-1" {
timeout = time.Duration(-1)
} else {
var err error
timeout, err = caddy.ParseDuration(timeoutStr)
if err != nil {
return nil, h.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err)
}
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.DNS == nil {
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
}
acmeIssuer.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout)
case "dns_ttl":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
ttlStr := arg[0]
ttl, err := caddy.ParseDuration(ttlStr)
if err != nil {
return nil, h.Errf("invalid dns_ttl duration %s: %v", ttlStr, err)
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.DNS == nil {
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
}
acmeIssuer.Challenges.DNS.TTL = caddy.Duration(ttl)
case "dns_challenge_override_domain":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.DNS == nil {
acmeIssuer.Challenges.DNS = new(caddytls.DNSChallengeConfig)
}
acmeIssuer.Challenges.DNS.OverrideDomain = arg[0]
case "ca_root":
arg := h.RemainingArgs()
if len(arg) != 1 {
return nil, h.ArgErr()
}
if acmeIssuer == nil {
acmeIssuer = new(caddytls.ACMEIssuer)
}
acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, arg[0])
case "on_demand":
if h.NextArg() {
return nil, h.ArgErr()
}
onDemand = true
case "reuse_private_keys":
if h.NextArg() {
return nil, h.ArgErr()
}
reusePrivateKeys = true
case "insecure_secrets_log":
if !h.NextArg() {
return nil, h.ArgErr()
}
cp.InsecureSecretsLog = h.Val()
default:
return nil, h.Errf("unknown subdirective: %s", h.Val())
}
}
// a naked tls directive is not allowed
if len(firstLine) == 0 && !hasBlock {
return nil, h.ArgErr()
}
// begin building the final config values
configVals := []ConfigValue{}
// certificate loaders
if len(fileLoader) > 0 {
configVals = append(configVals, ConfigValue{
Class: "tls.cert_loader",
Value: fileLoader,
})
}
if len(folderLoader) > 0 {
configVals = append(configVals, ConfigValue{
Class: "tls.cert_loader",
Value: folderLoader,
})
}
// some tls subdirectives are shortcuts that implicitly configure issuers, and the
// user can also configure issuers explicitly using the issuer subdirective; the
// logic to support both would likely be complex, or at least unintuitive
if len(issuers) > 0 && (acmeIssuer != nil || internalIssuer != nil) {
return nil, h.Err("cannot mix issuer subdirective (explicit issuers) with other issuer-specific subdirectives (implicit issuers)")
}
if acmeIssuer != nil && internalIssuer != nil {
return nil, h.Err("cannot create both ACME and internal certificate issuers")
}
// now we should either have: explicitly-created issuers, or an implicitly-created
// ACME or internal issuer, or no issuers at all
switch {
case len(issuers) > 0:
for _, issuer := range issuers {
configVals = append(configVals, ConfigValue{
Class: "tls.cert_issuer",
Value: issuer,
})
}
case acmeIssuer != nil:
// implicit ACME issuers (from various subdirectives) - use defaults; there might be more than one
defaultIssuers := caddytls.DefaultIssuers(acmeIssuer.Email)
// if an ACME CA endpoint was set, the user expects to use that specific one,
// not any others that may be defaults, so replace all defaults with that ACME CA
if acmeIssuer.CA != "" {
defaultIssuers = []certmagic.Issuer{acmeIssuer}
}
for _, issuer := range defaultIssuers {
// apply settings from the implicitly-configured ACMEIssuer to any
// default ACMEIssuers, but preserve each default issuer's CA endpoint,
// because, for example, if you configure the DNS challenge, it should
// apply to any of the default ACMEIssuers, but you don't want to trample
// out their unique CA endpoints
if iss, ok := issuer.(*caddytls.ACMEIssuer); ok && iss != nil {
acmeCopy := *acmeIssuer
acmeCopy.CA = iss.CA
issuer = &acmeCopy
}
configVals = append(configVals, ConfigValue{
Class: "tls.cert_issuer",
Value: issuer,
})
}
case internalIssuer != nil:
configVals = append(configVals, ConfigValue{
Class: "tls.cert_issuer",
Value: internalIssuer,
})
}
// certificate key type
if keyType != "" {
configVals = append(configVals, ConfigValue{
Class: "tls.key_type",
Value: keyType,
})
}
// on-demand TLS
if onDemand {
configVals = append(configVals, ConfigValue{
Class: "tls.on_demand",
Value: true,
})
}
for _, certManager := range certManagers {
configVals = append(configVals, ConfigValue{
Class: "tls.cert_manager",
Value: certManager,
})
}
// reuse private keys TLS
if reusePrivateKeys {
configVals = append(configVals, ConfigValue{
Class: "tls.reuse_private_keys",
Value: true,
})
}
// custom certificate selection
if len(certSelector.AnyTag) > 0 {
cp.CertSelection = &certSelector
}
// connection policy -- always add one, to ensure that TLS
// is enabled, because this directive was used (this is
// needed, for instance, when a site block has a key of
// just ":5000" - i.e. no hostname, and only on-demand TLS
// is enabled)
configVals = append(configVals, ConfigValue{
Class: "tls.connection_policy",
Value: cp,
})
return configVals, nil
}
// parseRoot parses the root directive. Syntax:
//
// root [<matcher>] <path>
func parseRoot(h Helper) ([]ConfigValue, error) {
h.Next() // consume directive name
// count the tokens to determine what to do
argsCount := h.CountRemainingArgs()
if argsCount == 0 {
return nil, h.Errf("too few arguments; must have at least a root path")
}
if argsCount > 2 {
return nil, h.Errf("too many arguments; should only be a matcher and a path")
}
// with only one arg, assume it's a root path with no matcher token
if argsCount == 1 {
if !h.NextArg() {
return nil, h.ArgErr()
}
return h.NewRoute(nil, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
}
// parse the matcher token into a matcher set
userMatcherSet, err := h.ExtractMatcherSet()
if err != nil {
return nil, err
}
h.Next() // consume directive name again, matcher parsing does a reset
// advance to the root path
if !h.NextArg() {
return nil, h.ArgErr()
}
// make the route with the matcher
return h.NewRoute(userMatcherSet, caddyhttp.VarsMiddleware{"root": h.Val()}), nil
}
// parseFilesystem parses the fs directive. Syntax:
//
// fs <filesystem>
func parseFilesystem(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive name
if !h.NextArg() {
return nil, h.ArgErr()
}
if h.NextArg() {
return nil, h.ArgErr()
}
return caddyhttp.VarsMiddleware{"fs": h.Val()}, nil
}
// parseVars parses the vars directive. See its UnmarshalCaddyfile method for syntax.
func parseVars(h Helper) (caddyhttp.MiddlewareHandler, error) {
v := new(caddyhttp.VarsMiddleware)
err := v.UnmarshalCaddyfile(h.Dispenser)
return v, err
}
// parseRedir parses the redir directive. Syntax:
//
// redir [<matcher>] <to> [<code>]
//
// <code> can be "permanent" for 301, "temporary" for 302 (default),
// a placeholder, or any number in the 3xx range or 401. The special
// code "html" can be used to redirect only browser clients (will
// respond with HTTP 200 and no Location header; redirect is performed
// with JS and a meta tag).
func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive name
if !h.NextArg() {
return nil, h.ArgErr()
}
to := h.Val()
var code string
if h.NextArg() {
code = h.Val()
}
var body string
var hdr http.Header
switch code {
case "permanent":
code = "301"
case "temporary", "":
code = "302"
case "html":
// Script tag comes first since that will better imitate a redirect in the browser's
// history, but the meta tag is a fallback for most non-JS clients.
const metaRedir = `<!DOCTYPE html>
<html>
<head>
<title>Redirecting...</title>
<script>window.location.replace("%s");</script>
<meta http-equiv="refresh" content="0; URL='%s'">
</head>
<body>Redirecting to <a href="%s">%s</a>...</body>
</html>
`
safeTo := html.EscapeString(to)
body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
hdr = http.Header{"Content-Type": []string{"text/html; charset=utf-8"}}
code = "200" // don't redirect non-browser clients
default:
// Allow placeholders for the code
if strings.HasPrefix(code, "{") {
break
}
// Try to validate as an integer otherwise
codeInt, err := strconv.Atoi(code)
if err != nil {
return nil, h.Errf("Not a supported redir code type or not valid integer: '%s'", code)
}
// Sometimes, a 401 with Location header is desirable because
// requests made with XHR will "eat" the 3xx redirect; so if
// the intent was to redirect to an auth page, a 3xx won't
// work. Responding with 401 allows JS code to read the
// Location header and do a window.location redirect manually.
// see https://stackoverflow.com/a/2573589/846934
// see https://github.com/oauth2-proxy/oauth2-proxy/issues/1522
if codeInt < 300 || (codeInt > 399 && codeInt != 401) {
return nil, h.Errf("Redir code not in the 3xx range or 401: '%v'", codeInt)
}
}
// don't redirect non-browser clients
if code != "200" {
hdr = http.Header{"Location": []string{to}}
}
return caddyhttp.StaticResponse{
StatusCode: caddyhttp.WeakString(code),
Headers: hdr,
Body: body,
}, nil
}
// parseRespond parses the respond directive.
func parseRespond(h Helper) (caddyhttp.MiddlewareHandler, error) {
sr := new(caddyhttp.StaticResponse)
err := sr.UnmarshalCaddyfile(h.Dispenser)
return sr, err
}
// parseAbort parses the abort directive.
func parseAbort(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive
for h.Next() || h.NextBlock(0) {
return nil, h.ArgErr()
}
return &caddyhttp.StaticResponse{Abort: true}, nil
}
// parseError parses the error directive.
func parseError(h Helper) (caddyhttp.MiddlewareHandler, error) {
se := new(caddyhttp.StaticError)
err := se.UnmarshalCaddyfile(h.Dispenser)
return se, err
}
// parseRoute parses the route directive.
func parseRoute(h Helper) (caddyhttp.MiddlewareHandler, error) {
allResults, err := parseSegmentAsConfig(h)
if err != nil {
return nil, err
}
for _, result := range allResults {
switch result.Value.(type) {
case caddyhttp.Route, caddyhttp.Subroute:
default:
return nil, h.Errf("%s directive returned something other than an HTTP route or subroute: %#v (only handler directives can be used in routes)", result.directive, result.Value)
}
}
return buildSubroute(allResults, h.groupCounter, false)
}
func parseHandle(h Helper) (caddyhttp.MiddlewareHandler, error) {
return ParseSegmentAsSubroute(h)
}
func parseHandleErrors(h Helper) ([]ConfigValue, error) {
h.Next() // consume directive name
expression := ""
args := h.RemainingArgs()
if len(args) > 0 {
codes := []string{}
for _, val := range args {
if len(val) != 3 {
return nil, h.Errf("bad status value '%s'", val)
}
if strings.HasSuffix(val, "xx") {
val = val[:1]
_, err := strconv.Atoi(val)
if err != nil {
return nil, h.Errf("bad status value '%s': %v", val, err)
}
if expression != "" {
expression += " || "
}
expression += fmt.Sprintf("{http.error.status_code} >= %s00 && {http.error.status_code} <= %s99", val, val)
continue
}
_, err := strconv.Atoi(val)
if err != nil {
return nil, h.Errf("bad status value '%s': %v", val, err)
}
codes = append(codes, val)
}
if len(codes) > 0 {
if expression != "" {
expression += " || "
}
expression += "{http.error.status_code} in [" + strings.Join(codes, ", ") + "]"
}
// Reset cursor position to get ready for ParseSegmentAsSubroute
h.Reset()
h.Next()
h.RemainingArgs()
h.Prev()
} else {
// If no arguments present reset the cursor position to get ready for ParseSegmentAsSubroute
h.Prev()
}
handler, err := ParseSegmentAsSubroute(h)
if err != nil {
return nil, err
}
subroute, ok := handler.(*caddyhttp.Subroute)
if !ok {
return nil, h.Errf("segment was not parsed as a subroute")
}
if expression != "" {
statusMatcher := caddy.ModuleMap{
"expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}),
}
for i := range subroute.Routes {
subroute.Routes[i].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
}
}
return []ConfigValue{
{
Class: "error_route",
Value: subroute,
},
}, nil
}
// parseInvoke parses the invoke directive.
func parseInvoke(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive
if !h.NextArg() {
return nil, h.ArgErr()
}
for h.Next() || h.NextBlock(0) {
return nil, h.ArgErr()
}
// remember that we're invoking this name
// to populate the server with these named routes
if h.State[namedRouteKey] == nil {
h.State[namedRouteKey] = map[string]struct{}{}
}
h.State[namedRouteKey].(map[string]struct{})[h.Val()] = struct{}{}
// return the handler
return &caddyhttp.Invoke{Name: h.Val()}, nil
}
// parseLog parses the log directive. Syntax:
//
// log <logger_name> {
// hostnames <hostnames...>
// output <writer_module> ...
// core <core_module> ...
// format <encoder_module> ...
// level <level>
// }
func parseLog(h Helper) ([]ConfigValue, error) {
return parseLogHelper(h, nil)
}
// parseLogHelper is used both for the parseLog directive within Server Blocks,
// as well as the global "log" option for configuring loggers at the global
// level. The parseAsGlobalOption parameter is used to distinguish any differing logic
// between the two.
func parseLogHelper(h Helper, globalLogNames map[string]struct{}) ([]ConfigValue, error) {
h.Next() // consume option name
// When the globalLogNames parameter is passed in, we make
// modifications to the parsing behavior.
parseAsGlobalOption := globalLogNames != nil
var configValues []ConfigValue
// Logic below expects that a name is always present when a
// global option is being parsed; or an optional override
// is supported for access logs.
var logName string
if parseAsGlobalOption {
if h.NextArg() {
logName = h.Val()
// Only a single argument is supported.
if h.NextArg() {
return nil, h.ArgErr()
}
} else {
// If there is no log name specified, we
// reference the default logger. See the
// setupNewDefault function in the logging
// package for where this is configured.
logName = caddy.DefaultLoggerName
}
// Verify this name is unused.
_, used := globalLogNames[logName]
if used {
return nil, h.Err("duplicate global log option for: " + logName)
}
globalLogNames[logName] = struct{}{}
} else {
// An optional override of the logger name can be provided;
// otherwise a default will be used, like "log0", "log1", etc.
if h.NextArg() {
logName = h.Val()
// Only a single argument is supported.
if h.NextArg() {
return nil, h.ArgErr()
}
}
}
cl := new(caddy.CustomLog)
// allow overriding the current site block's hostnames for this logger;
// this is useful for setting up loggers per subdomain in a site block
// with a wildcard domain
customHostnames := []string{}
noHostname := false
for h.NextBlock(0) {
switch h.Val() {
case "hostnames":
if parseAsGlobalOption {
return nil, h.Err("hostnames is not allowed in the log global options")
}
args := h.RemainingArgs()
if len(args) == 0 {
return nil, h.ArgErr()
}
customHostnames = append(customHostnames, args...)
case "output":
if !h.NextArg() {
return nil, h.ArgErr()
}
moduleName := h.Val()
// can't use the usual caddyfile.Unmarshaler flow with the
// standard writers because they are in the caddy package
// (because they are the default) and implementing that
// interface there would unfortunately create circular import
var wo caddy.WriterOpener
switch moduleName {
case "stdout":
wo = caddy.StdoutWriter{}
case "stderr":
wo = caddy.StderrWriter{}
case "discard":
wo = caddy.DiscardWriter{}
default:
modID := "caddy.logging.writers." + moduleName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
if err != nil {
return nil, err
}
var ok bool
wo, ok = unm.(caddy.WriterOpener)
if !ok {
return nil, h.Errf("module %s (%T) is not a WriterOpener", modID, unm)
}
}
cl.WriterRaw = caddyconfig.JSONModuleObject(wo, "output", moduleName, h.warnings)
case "core":
if !h.NextArg() {
return nil, h.ArgErr()
}
moduleName := h.Val()
moduleID := "caddy.logging.cores." + moduleName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
if err != nil {
return nil, err
}
core, ok := unm.(zapcore.Core)
if !ok {
return nil, h.Errf("module %s (%T) is not a zapcore.Core", moduleID, unm)
}
cl.CoreRaw = caddyconfig.JSONModuleObject(core, "module", moduleName, h.warnings)
case "format":
if !h.NextArg() {
return nil, h.ArgErr()
}
moduleName := h.Val()
moduleID := "caddy.logging.encoders." + moduleName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, moduleID)
if err != nil {
return nil, err
}
enc, ok := unm.(zapcore.Encoder)
if !ok {
return nil, h.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm)
}
cl.EncoderRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, h.warnings)
case "level":
if !h.NextArg() {
return nil, h.ArgErr()
}
cl.Level = h.Val()
if h.NextArg() {
return nil, h.ArgErr()
}
case "include":
if !parseAsGlobalOption {
return nil, h.Err("include is not allowed in the log directive")
}
for h.NextArg() {
cl.Include = append(cl.Include, h.Val())
}
case "exclude":
if !parseAsGlobalOption {
return nil, h.Err("exclude is not allowed in the log directive")
}
for h.NextArg() {
cl.Exclude = append(cl.Exclude, h.Val())
}
case "no_hostname":
if h.NextArg() {
return nil, h.ArgErr()
}
noHostname = true
default:
return nil, h.Errf("unrecognized subdirective: %s", h.Val())
}
}
var val namedCustomLog
val.hostnames = customHostnames
val.noHostname = noHostname
isEmptyConfig := reflect.DeepEqual(cl, new(caddy.CustomLog))
// Skip handling of empty logging configs
if parseAsGlobalOption {
// Use indicated name for global log options
val.name = logName
} else {
if logName != "" {
val.name = logName
} else if !isEmptyConfig {
// Construct a log name for server log streams
logCounter, ok := h.State["logCounter"].(int)
if !ok {
logCounter = 0
}
val.name = fmt.Sprintf("log%d", logCounter)
logCounter++
h.State["logCounter"] = logCounter
}
if val.name != "" {
cl.Include = []string{"http.log.access." + val.name}
}
}
if !isEmptyConfig {
val.log = cl
}
configValues = append(configValues, ConfigValue{
Class: "custom_log",
Value: val,
})
return configValues, nil
}
// parseLogSkip parses the log_skip directive. Syntax:
//
// log_skip [<matcher>]
func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive name
// "skip_log" is deprecated, replaced by "log_skip"
if h.Val() == "skip_log" {
caddy.Log().Named("config.adapter.caddyfile").Warn("the 'skip_log' directive is deprecated, please use 'log_skip' instead!")
}
if h.NextArg() {
return nil, h.ArgErr()
}
return caddyhttp.VarsMiddleware{"log_skip": true}, nil
}
// parseLogName parses the log_name directive. Syntax:
//
// log_name <names...>
func parseLogName(h Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive name
return caddyhttp.VarsMiddleware{
caddyhttp.AccessLoggerNameVarKey: h.RemainingArgs(),
}, nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"net"
"sort"
"strconv"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// defaultDirectiveOrder specifies the default order
// to apply directives in HTTP routes. This must only
// consist of directives that are included in Caddy's
// standard distribution.
//
// e.g. The 'root' directive goes near the start in
// case rewrites or redirects depend on existence of
// files, i.e. the file matcher, which must know the
// root first.
//
// e.g. The 'header' directive goes before 'redir' so
// that headers can be manipulated before doing redirects.
//
// e.g. The 'respond' directive is near the end because it
// writes a response and terminates the middleware chain.
var defaultDirectiveOrder = []string{
"tracing",
// set variables that may be used by other directives
"map",
"vars",
"fs",
"root",
"log_append",
"skip_log", // TODO: deprecated, renamed to log_skip
"log_skip",
"log_name",
"header",
"copy_response_headers", // only in reverse_proxy's handle_response
"request_body",
"redir",
// incoming request manipulation
"method",
"rewrite",
"uri",
"try_files",
// middleware handlers; some wrap responses
"basicauth", // TODO: deprecated, renamed to basic_auth
"basic_auth",
"forward_auth",
"request_header",
"encode",
"push",
"intercept",
"templates",
// special routing & dispatching directives
"invoke",
"handle",
"handle_path",
"route",
// handlers that typically respond to requests
"abort",
"error",
"copy_response", // only in reverse_proxy's handle_response
"respond",
"metrics",
"reverse_proxy",
"php_fastcgi",
"file_server",
"acme_server",
}
// directiveOrder specifies the order to apply directives
// in HTTP routes, after being modified by either the
// plugins or by the user via the "order" global option.
var directiveOrder = defaultDirectiveOrder
// directiveIsOrdered returns true if dir is
// a known, ordered (sorted) directive.
func directiveIsOrdered(dir string) bool {
for _, d := range directiveOrder {
if d == dir {
return true
}
}
return false
}
// RegisterDirective registers a unique directive dir with an
// associated unmarshaling (setup) function. When directive dir
// is encountered in a Caddyfile, setupFunc will be called to
// unmarshal its tokens.
func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
if _, ok := registeredDirectives[dir]; ok {
panic("directive " + dir + " already registered")
}
registeredDirectives[dir] = setupFunc
}
// RegisterHandlerDirective is like RegisterDirective, but for
// directives which specifically output only an HTTP handler.
// Directives registered with this function will always have
// an optional matcher token as the first argument.
func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
if !h.Next() {
return nil, h.ArgErr()
}
matcherSet, err := h.ExtractMatcherSet()
if err != nil {
return nil, err
}
val, err := setupFunc(h)
if err != nil {
return nil, err
}
return h.NewRoute(matcherSet, val), nil
})
}
// RegisterDirectiveOrder registers the default order for a
// directive from a plugin.
//
// This is useful when a plugin has a well-understood place
// it should run in the middleware pipeline, and it allows
// users to avoid having to define the order themselves.
//
// The directive dir may be placed in the position relative
// to ('before' or 'after') a directive included in Caddy's
// standard distribution. It cannot be relative to another
// plugin's directive.
//
// EXPERIMENTAL: This API may change or be removed.
func RegisterDirectiveOrder(dir string, position Positional, standardDir string) {
// check if directive was already ordered
if directiveIsOrdered(dir) {
panic("directive '" + dir + "' already ordered")
}
if position != Before && position != After {
panic("the 2nd argument must be either 'before' or 'after', got '" + position + "'")
}
// check if directive exists in standard distribution, since
// we can't allow plugins to depend on one another; we can't
// guarantee the order that plugins are loaded in.
foundStandardDir := false
for _, d := range defaultDirectiveOrder {
if d == standardDir {
foundStandardDir = true
}
}
if !foundStandardDir {
panic("the 3rd argument '" + standardDir + "' must be a directive that exists in the standard distribution of Caddy")
}
// insert directive into proper position
newOrder := directiveOrder
for i, d := range newOrder {
if d != standardDir {
continue
}
if position == Before {
newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
} else if position == After {
newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
}
break
}
directiveOrder = newOrder
}
// RegisterGlobalOption registers a unique global option opt with
// an associated unmarshaling (setup) function. When the global
// option opt is encountered in a Caddyfile, setupFunc will be
// called to unmarshal its tokens.
func RegisterGlobalOption(opt string, setupFunc UnmarshalGlobalFunc) {
if _, ok := registeredGlobalOptions[opt]; ok {
panic("global option " + opt + " already registered")
}
registeredGlobalOptions[opt] = setupFunc
}
// Helper is a type which helps setup a value from
// Caddyfile tokens.
type Helper struct {
*caddyfile.Dispenser
// State stores intermediate variables during caddyfile adaptation.
State map[string]any
options map[string]any
warnings *[]caddyconfig.Warning
matcherDefs map[string]caddy.ModuleMap
parentBlock caddyfile.ServerBlock
groupCounter counter
}
// Option gets the option keyed by name.
func (h Helper) Option(name string) any {
return h.options[name]
}
// Caddyfiles returns the list of config files from
// which tokens in the current server block were loaded.
func (h Helper) Caddyfiles() []string {
// first obtain set of names of files involved
// in this server block, without duplicates
files := make(map[string]struct{})
for _, segment := range h.parentBlock.Segments {
for _, token := range segment {
files[token.File] = struct{}{}
}
}
// then convert the set into a slice
filesSlice := make([]string, 0, len(files))
for file := range files {
filesSlice = append(filesSlice, file)
}
sort.Strings(filesSlice)
return filesSlice
}
// JSON converts val into JSON. Any errors are added to warnings.
func (h Helper) JSON(val any) json.RawMessage {
return caddyconfig.JSON(val, h.warnings)
}
// MatcherToken assumes the next argument token is (possibly) a matcher,
// and if so, returns the matcher set along with a true value. If the next
// token is not a matcher, nil and false is returned. Note that a true
// value may be returned with a nil matcher set if it is a catch-all.
func (h Helper) MatcherToken() (caddy.ModuleMap, bool, error) {
if !h.NextArg() {
return nil, false, nil
}
return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
}
// ExtractMatcherSet is like MatcherToken, except this is a higher-level
// method that returns the matcher set described by the matcher token,
// or nil if there is none, and deletes the matcher token from the
// dispenser and resets it as if this look-ahead never happened. Useful
// when wrapping a route (one or more handlers) in a user-defined matcher.
func (h Helper) ExtractMatcherSet() (caddy.ModuleMap, error) {
matcherSet, hasMatcher, err := h.MatcherToken()
if err != nil {
return nil, err
}
if hasMatcher {
// strip matcher token; we don't need to
// use the return value here because a
// new dispenser should have been made
// solely for this directive's tokens,
// with no other uses of same slice
h.Dispenser.Delete()
}
h.Dispenser.Reset() // pretend this lookahead never happened
return matcherSet, nil
}
// NewRoute returns config values relevant to creating a new HTTP route.
func (h Helper) NewRoute(matcherSet caddy.ModuleMap,
handler caddyhttp.MiddlewareHandler,
) []ConfigValue {
mod, err := caddy.GetModule(caddy.GetModuleID(handler))
if err != nil {
*h.warnings = append(*h.warnings, caddyconfig.Warning{
File: h.File(),
Line: h.Line(),
Message: err.Error(),
})
return nil
}
var matcherSetsRaw []caddy.ModuleMap
if matcherSet != nil {
matcherSetsRaw = append(matcherSetsRaw, matcherSet)
}
return []ConfigValue{
{
Class: "route",
Value: caddyhttp.Route{
MatcherSetsRaw: matcherSetsRaw,
HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID.Name(), h.warnings)},
},
},
}
}
// GroupRoutes adds the routes (caddyhttp.Route type) in vals to the
// same group, if there is more than one route in vals.
func (h Helper) GroupRoutes(vals []ConfigValue) {
// ensure there's at least two routes; group of one is pointless
var count int
for _, v := range vals {
if _, ok := v.Value.(caddyhttp.Route); ok {
count++
if count > 1 {
break
}
}
}
if count < 2 {
return
}
// now that we know the group will have some effect, do it
groupName := h.groupCounter.nextGroup()
for i := range vals {
if route, ok := vals[i].Value.(caddyhttp.Route); ok {
route.Group = groupName
vals[i].Value = route
}
}
}
// WithDispenser returns a new instance based on d. All others Helper
// fields are copied, so typically maps are shared with this new instance.
func (h Helper) WithDispenser(d *caddyfile.Dispenser) Helper {
h.Dispenser = d
return h
}
// ParseSegmentAsSubroute parses the segment such that its subdirectives
// are themselves treated as directives, from which a subroute is built
// and returned.
func ParseSegmentAsSubroute(h Helper) (caddyhttp.MiddlewareHandler, error) {
allResults, err := parseSegmentAsConfig(h)
if err != nil {
return nil, err
}
return buildSubroute(allResults, h.groupCounter, true)
}
// parseSegmentAsConfig parses the segment such that its subdirectives
// are themselves treated as directives, including named matcher definitions,
// and the raw Config structs are returned.
func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) {
var allResults []ConfigValue
for h.Next() {
// don't allow non-matcher args on the first line
if h.NextArg() {
return nil, h.ArgErr()
}
// slice the linear list of tokens into top-level segments
var segments []caddyfile.Segment
for nesting := h.Nesting(); h.NextBlock(nesting); {
segments = append(segments, h.NextSegment())
}
// copy existing matcher definitions so we can augment
// new ones that are defined only in this scope
matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
for key, val := range h.matcherDefs {
matcherDefs[key] = val
}
// find and extract any embedded matcher definitions in this scope
for i := 0; i < len(segments); i++ {
seg := segments[i]
if strings.HasPrefix(seg.Directive(), matcherPrefix) {
// parse, then add the matcher to matcherDefs
err := parseMatcherDefinitions(caddyfile.NewDispenser(seg), matcherDefs)
if err != nil {
return nil, err
}
// remove the matcher segment (consumed), then step back the loop
segments = append(segments[:i], segments[i+1:]...)
i--
}
}
// with matchers ready to go, evaluate each directive's segment
for _, seg := range segments {
dir := seg.Directive()
dirFunc, ok := registeredDirectives[dir]
if !ok {
return nil, h.Errf("unrecognized directive: %s - are you sure your Caddyfile structure (nesting and braces) is correct?", dir)
}
subHelper := h
subHelper.Dispenser = caddyfile.NewDispenser(seg)
subHelper.matcherDefs = matcherDefs
results, err := dirFunc(subHelper)
if err != nil {
return nil, h.Errf("parsing caddyfile tokens for '%s': %v", dir, err)
}
dir = normalizeDirectiveName(dir)
for _, result := range results {
result.directive = dir
allResults = append(allResults, result)
}
}
}
return allResults, nil
}
// ConfigValue represents a value to be added to the final
// configuration, or a value to be consulted when building
// the final configuration.
type ConfigValue struct {
// The kind of value this is. As the config is
// being built, the adapter will look in the
// "pile" for values belonging to a certain
// class when it is setting up a certain part
// of the config. The associated value will be
// type-asserted and placed accordingly.
Class string
// The value to be used when building the config.
// Generally its type is associated with the
// name of the Class.
Value any
directive string
}
func sortRoutes(routes []ConfigValue) {
dirPositions := make(map[string]int)
for i, dir := range directiveOrder {
dirPositions[dir] = i
}
sort.SliceStable(routes, func(i, j int) bool {
// if the directives are different, just use the established directive order
iDir, jDir := routes[i].directive, routes[j].directive
if iDir != jDir {
return dirPositions[iDir] < dirPositions[jDir]
}
// directives are the same; sub-sort by path matcher length if there's
// only one matcher set and one path (this is a very common case and
// usually -- but not always -- helpful/expected, oh well; user can
// always take manual control of order using handler or route blocks)
iRoute, ok := routes[i].Value.(caddyhttp.Route)
if !ok {
return false
}
jRoute, ok := routes[j].Value.(caddyhttp.Route)
if !ok {
return false
}
// decode the path matchers if there is just one matcher set
var iPM, jPM caddyhttp.MatchPath
if len(iRoute.MatcherSetsRaw) == 1 {
_ = json.Unmarshal(iRoute.MatcherSetsRaw[0]["path"], &iPM)
}
if len(jRoute.MatcherSetsRaw) == 1 {
_ = json.Unmarshal(jRoute.MatcherSetsRaw[0]["path"], &jPM)
}
// if there is only one path in the path matcher, sort by longer path
// (more specific) first; missing path matchers or multi-matchers are
// treated as zero-length paths
var iPathLen, jPathLen int
if len(iPM) == 1 {
iPathLen = len(iPM[0])
}
if len(jPM) == 1 {
jPathLen = len(jPM[0])
}
sortByPath := func() bool {
// we can only confidently compare path lengths if both
// directives have a single path to match (issue #5037)
if iPathLen > 0 && jPathLen > 0 {
// if both paths are the same except for a trailing wildcard,
// sort by the shorter path first (which is more specific)
if strings.TrimSuffix(iPM[0], "*") == strings.TrimSuffix(jPM[0], "*") {
return iPathLen < jPathLen
}
// sort most-specific (longest) path first
return iPathLen > jPathLen
}
// if both directives don't have a single path to compare,
// sort whichever one has a matcher first; if both have
// a matcher, sort equally (stable sort preserves order)
return len(iRoute.MatcherSetsRaw) > 0 && len(jRoute.MatcherSetsRaw) == 0
}()
// some directives involve setting values which can overwrite
// each other, so it makes most sense to reverse the order so
// that the least-specific matcher is first, allowing the last
// matching one to win
if iDir == "vars" {
return !sortByPath
}
// everything else is most-specific matcher first
return sortByPath
})
}
// serverBlock pairs a Caddyfile server block with
// a "pile" of config values, keyed by class name,
// as well as its parsed keys for convenience.
type serverBlock struct {
block caddyfile.ServerBlock
pile map[string][]ConfigValue // config values obtained from directives
keys []Address
}
// hostsFromKeys returns a list of all the non-empty hostnames found in
// the keys of the server block sb. If logger mode is false, a key with
// an empty hostname portion will return an empty slice, since that
// server block is interpreted to effectively match all hosts. An empty
// string is never added to the slice.
//
// If loggerMode is true, then the non-standard ports of keys will be
// joined to the hostnames. This is to effectively match the Host
// header of requests that come in for that key.
//
// The resulting slice is not sorted but will never have duplicates.
func (sb serverBlock) hostsFromKeys(loggerMode bool) []string {
// ensure each entry in our list is unique
hostMap := make(map[string]struct{})
for _, addr := range sb.keys {
if addr.Host == "" {
if !loggerMode {
// server block contains a key like ":443", i.e. the host portion
// is empty / catch-all, which means to match all hosts
return []string{}
}
// never append an empty string
continue
}
if loggerMode &&
addr.Port != "" &&
addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort) &&
addr.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort) {
hostMap[net.JoinHostPort(addr.Host, addr.Port)] = struct{}{}
} else {
hostMap[addr.Host] = struct{}{}
}
}
// convert map to slice
sblockHosts := make([]string, 0, len(hostMap))
for host := range hostMap {
sblockHosts = append(sblockHosts, host)
}
return sblockHosts
}
func (sb serverBlock) hostsFromKeysNotHTTP(httpPort string) []string {
// ensure each entry in our list is unique
hostMap := make(map[string]struct{})
for _, addr := range sb.keys {
if addr.Host == "" {
continue
}
if addr.Scheme != "http" && addr.Port != httpPort {
hostMap[addr.Host] = struct{}{}
}
}
// convert map to slice
sblockHosts := make([]string, 0, len(hostMap))
for host := range hostMap {
sblockHosts = append(sblockHosts, host)
}
return sblockHosts
}
// hasHostCatchAllKey returns true if sb has a key that
// omits a host portion, i.e. it "catches all" hosts.
func (sb serverBlock) hasHostCatchAllKey() bool {
for _, addr := range sb.keys {
if addr.Host == "" {
return true
}
}
return false
}
// isAllHTTP returns true if all sb keys explicitly specify
// the http:// scheme
func (sb serverBlock) isAllHTTP() bool {
for _, addr := range sb.keys {
if addr.Scheme != "http" {
return false
}
}
return true
}
// Positional are the supported modes for ordering directives.
type Positional string
const (
Before Positional = "before"
After Positional = "after"
First Positional = "first"
Last Positional = "last"
)
type (
// UnmarshalFunc is a function which can unmarshal Caddyfile
// tokens into zero or more config values using a Helper type.
// These are passed in a call to RegisterDirective.
UnmarshalFunc func(h Helper) ([]ConfigValue, error)
// UnmarshalHandlerFunc is like UnmarshalFunc, except the
// output of the unmarshaling is an HTTP handler. This
// function does not need to deal with HTTP request matching
// which is abstracted away. Since writing HTTP handlers
// with Caddyfile support is very common, this is a more
// convenient way to add a handler to the chain since a lot
// of the details common to HTTP handlers are taken care of
// for you. These are passed to a call to
// RegisterHandlerDirective.
UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
// UnmarshalGlobalFunc is a function which can unmarshal Caddyfile
// tokens from a global option. It is passed the tokens to parse and
// existing value from the previous instance of this global option
// (if any). It returns the value to associate with this global option.
UnmarshalGlobalFunc func(d *caddyfile.Dispenser, existingVal any) (any, error)
)
var registeredDirectives = make(map[string]UnmarshalFunc)
var registeredGlobalOptions = make(map[string]UnmarshalGlobalFunc)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"fmt"
"net"
"reflect"
"slices"
"sort"
"strconv"
"strings"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddypki"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
func init() {
caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
}
// App represents the configuration for a non-standard
// Caddy app module (e.g. third-party plugin) which was
// parsed from a global options block.
type App struct {
// The JSON key for the app being configured
Name string
// The raw app config as JSON
Value json.RawMessage
}
// ServerType can set up a config from an HTTP Caddyfile.
type ServerType struct{}
// Setup makes a config from the tokens.
func (st ServerType) Setup(
inputServerBlocks []caddyfile.ServerBlock,
options map[string]any,
) (*caddy.Config, []caddyconfig.Warning, error) {
var warnings []caddyconfig.Warning
gc := counter{new(int)}
state := make(map[string]any)
// load all the server blocks and associate them with a "pile" of config values
originalServerBlocks := make([]serverBlock, 0, len(inputServerBlocks))
for _, sblock := range inputServerBlocks {
for j, k := range sblock.Keys {
if j == 0 && strings.HasPrefix(k.Text, "@") {
return nil, warnings, fmt.Errorf("%s:%d: cannot define a matcher outside of a site block: '%s'", k.File, k.Line, k.Text)
}
if _, ok := registeredDirectives[k.Text]; ok {
return nil, warnings, fmt.Errorf("%s:%d: parsed '%s' as a site address, but it is a known directive; directives must appear in a site block", k.File, k.Line, k.Text)
}
}
originalServerBlocks = append(originalServerBlocks, serverBlock{
block: sblock,
pile: make(map[string][]ConfigValue),
})
}
// apply any global options
var err error
originalServerBlocks, err = st.evaluateGlobalOptionsBlock(originalServerBlocks, options)
if err != nil {
return nil, warnings, err
}
// this will replace both static and user-defined placeholder shorthands
// with actual identifiers used by Caddy
replacer := NewShorthandReplacer()
originalServerBlocks, err = st.extractNamedRoutes(originalServerBlocks, options, &warnings, replacer)
if err != nil {
return nil, warnings, err
}
for _, sb := range originalServerBlocks {
for i := range sb.block.Segments {
replacer.ApplyToSegment(&sb.block.Segments[i])
}
if len(sb.block.Keys) == 0 {
return nil, warnings, fmt.Errorf("server block without any key is global configuration, and if used, it must be first")
}
// extract matcher definitions
matcherDefs := make(map[string]caddy.ModuleMap)
for _, segment := range sb.block.Segments {
if dir := segment.Directive(); strings.HasPrefix(dir, matcherPrefix) {
d := sb.block.DispenseDirective(dir)
err := parseMatcherDefinitions(d, matcherDefs)
if err != nil {
return nil, warnings, err
}
}
}
// evaluate each directive ("segment") in this block
for _, segment := range sb.block.Segments {
dir := segment.Directive()
if strings.HasPrefix(dir, matcherPrefix) {
// matcher definitions were pre-processed
continue
}
dirFunc, ok := registeredDirectives[dir]
if !ok {
tkn := segment[0]
message := "%s:%d: unrecognized directive: %s"
if !sb.block.HasBraces {
message += "\nDid you mean to define a second site? If so, you must use curly braces around each site to separate their configurations."
}
return nil, warnings, fmt.Errorf(message, tkn.File, tkn.Line, dir)
}
h := Helper{
Dispenser: caddyfile.NewDispenser(segment),
options: options,
warnings: &warnings,
matcherDefs: matcherDefs,
parentBlock: sb.block,
groupCounter: gc,
State: state,
}
results, err := dirFunc(h)
if err != nil {
return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
}
dir = normalizeDirectiveName(dir)
for _, result := range results {
result.directive = dir
sb.pile[result.Class] = append(sb.pile[result.Class], result)
}
// specially handle named routes that were pulled out from
// the invoke directive, which could be nested anywhere within
// some subroutes in this directive; we add them to the pile
// for this server block
if state[namedRouteKey] != nil {
for name := range state[namedRouteKey].(map[string]struct{}) {
result := ConfigValue{Class: namedRouteKey, Value: name}
sb.pile[result.Class] = append(sb.pile[result.Class], result)
}
state[namedRouteKey] = nil
}
}
}
// map
sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks, options)
if err != nil {
return nil, warnings, err
}
// reduce
pairings := st.consolidateAddrMappings(sbmap)
// each pairing of listener addresses to list of server
// blocks is basically a server definition
servers, err := st.serversFromPairings(pairings, options, &warnings, gc)
if err != nil {
return nil, warnings, err
}
// now that each server is configured, make the HTTP app
httpApp := caddyhttp.App{
HTTPPort: tryInt(options["http_port"], &warnings),
HTTPSPort: tryInt(options["https_port"], &warnings),
GracePeriod: tryDuration(options["grace_period"], &warnings),
ShutdownDelay: tryDuration(options["shutdown_delay"], &warnings),
Servers: servers,
}
// then make the TLS app
tlsApp, warnings, err := st.buildTLSApp(pairings, options, warnings)
if err != nil {
return nil, warnings, err
}
// then make the PKI app
pkiApp, warnings, err := st.buildPKIApp(pairings, options, warnings)
if err != nil {
return nil, warnings, err
}
// extract any custom logs, and enforce configured levels
var customLogs []namedCustomLog
var hasDefaultLog bool
addCustomLog := func(ncl namedCustomLog) {
if ncl.name == "" {
return
}
if ncl.name == caddy.DefaultLoggerName {
hasDefaultLog = true
}
if _, ok := options["debug"]; ok && ncl.log != nil && ncl.log.Level == "" {
ncl.log.Level = zap.DebugLevel.CapitalString()
}
customLogs = append(customLogs, ncl)
}
// Apply global log options, when set
if options["log"] != nil {
for _, logValue := range options["log"].([]ConfigValue) {
addCustomLog(logValue.Value.(namedCustomLog))
}
}
if !hasDefaultLog {
// if the default log was not customized, ensure we
// configure it with any applicable options
if _, ok := options["debug"]; ok {
customLogs = append(customLogs, namedCustomLog{
name: caddy.DefaultLoggerName,
log: &caddy.CustomLog{
BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()},
},
})
}
}
// Apply server-specific log options
for _, p := range pairings {
for _, sb := range p.serverBlocks {
for _, clVal := range sb.pile["custom_log"] {
addCustomLog(clVal.Value.(namedCustomLog))
}
}
}
// annnd the top-level config, then we're done!
cfg := &caddy.Config{AppsRaw: make(caddy.ModuleMap)}
// loop through the configured options, and if any of
// them are an httpcaddyfile App, then we insert them
// into the config as raw Caddy apps
for _, opt := range options {
if app, ok := opt.(App); ok {
cfg.AppsRaw[app.Name] = app.Value
}
}
// insert the standard Caddy apps into the config
if len(httpApp.Servers) > 0 {
cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
}
if !reflect.DeepEqual(tlsApp, &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}) {
cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
}
if !reflect.DeepEqual(pkiApp, &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}) {
cfg.AppsRaw["pki"] = caddyconfig.JSON(pkiApp, &warnings)
}
if filesystems, ok := options["filesystem"].(caddy.Module); ok {
cfg.AppsRaw["caddy.filesystems"] = caddyconfig.JSON(
filesystems,
&warnings)
}
if storageCvtr, ok := options["storage"].(caddy.StorageConverter); ok {
cfg.StorageRaw = caddyconfig.JSONModuleObject(storageCvtr,
"module",
storageCvtr.(caddy.Module).CaddyModule().ID.Name(),
&warnings)
}
if adminConfig, ok := options["admin"].(*caddy.AdminConfig); ok && adminConfig != nil {
cfg.Admin = adminConfig
}
if pc, ok := options["persist_config"].(string); ok && pc == "off" {
if cfg.Admin == nil {
cfg.Admin = new(caddy.AdminConfig)
}
if cfg.Admin.Config == nil {
cfg.Admin.Config = new(caddy.ConfigSettings)
}
cfg.Admin.Config.Persist = new(bool)
}
if len(customLogs) > 0 {
if cfg.Logging == nil {
cfg.Logging = &caddy.Logging{
Logs: make(map[string]*caddy.CustomLog),
}
}
// Add the default log first if defined, so that it doesn't
// accidentally get re-created below due to the Exclude logic
for _, ncl := range customLogs {
if ncl.name == caddy.DefaultLoggerName && ncl.log != nil {
cfg.Logging.Logs[caddy.DefaultLoggerName] = ncl.log
break
}
}
// Add the rest of the custom logs
for _, ncl := range customLogs {
if ncl.log == nil || ncl.name == caddy.DefaultLoggerName {
continue
}
if ncl.name != "" {
cfg.Logging.Logs[ncl.name] = ncl.log
}
// most users seem to prefer not writing access logs
// to the default log when they are directed to a
// file or have any other special customization
if ncl.name != caddy.DefaultLoggerName && len(ncl.log.Include) > 0 {
defaultLog, ok := cfg.Logging.Logs[caddy.DefaultLoggerName]
if !ok {
defaultLog = new(caddy.CustomLog)
cfg.Logging.Logs[caddy.DefaultLoggerName] = defaultLog
}
defaultLog.Exclude = append(defaultLog.Exclude, ncl.log.Include...)
// avoid duplicates by sorting + compacting
sort.Strings(defaultLog.Exclude)
defaultLog.Exclude = slices.Compact[[]string, string](defaultLog.Exclude)
}
}
// we may have not actually added anything, so remove if empty
if len(cfg.Logging.Logs) == 0 {
cfg.Logging = nil
}
}
return cfg, warnings, nil
}
// evaluateGlobalOptionsBlock evaluates the global options block,
// which is expected to be the first server block if it has zero
// keys. It returns the updated list of server blocks with the
// global options block removed, and updates options accordingly.
func (ServerType) evaluateGlobalOptionsBlock(serverBlocks []serverBlock, options map[string]any) ([]serverBlock, error) {
if len(serverBlocks) == 0 || len(serverBlocks[0].block.Keys) > 0 {
return serverBlocks, nil
}
for _, segment := range serverBlocks[0].block.Segments {
opt := segment.Directive()
var val any
var err error
disp := caddyfile.NewDispenser(segment)
optFunc, ok := registeredGlobalOptions[opt]
if !ok {
tkn := segment[0]
return nil, fmt.Errorf("%s:%d: unrecognized global option: %s", tkn.File, tkn.Line, opt)
}
val, err = optFunc(disp, options[opt])
if err != nil {
return nil, fmt.Errorf("parsing caddyfile tokens for '%s': %v", opt, err)
}
// As a special case, fold multiple "servers" options together
// in an array instead of overwriting a possible existing value
if opt == "servers" {
existingOpts, ok := options[opt].([]serverOptions)
if !ok {
existingOpts = []serverOptions{}
}
serverOpts, ok := val.(serverOptions)
if !ok {
return nil, fmt.Errorf("unexpected type from 'servers' global options: %T", val)
}
options[opt] = append(existingOpts, serverOpts)
continue
}
// Additionally, fold multiple "log" options together into an
// array so that multiple loggers can be configured.
if opt == "log" {
existingOpts, ok := options[opt].([]ConfigValue)
if !ok {
existingOpts = []ConfigValue{}
}
logOpts, ok := val.([]ConfigValue)
if !ok {
return nil, fmt.Errorf("unexpected type from 'log' global options: %T", val)
}
options[opt] = append(existingOpts, logOpts...)
continue
}
options[opt] = val
}
// If we got "servers" options, we'll sort them by their listener address
if serverOpts, ok := options["servers"].([]serverOptions); ok {
sort.Slice(serverOpts, func(i, j int) bool {
return len(serverOpts[i].ListenerAddress) > len(serverOpts[j].ListenerAddress)
})
// Reject the config if there are duplicate listener address
seen := make(map[string]bool)
for _, entry := range serverOpts {
if _, alreadySeen := seen[entry.ListenerAddress]; alreadySeen {
return nil, fmt.Errorf("cannot have 'servers' global options with duplicate listener addresses: %s", entry.ListenerAddress)
}
seen[entry.ListenerAddress] = true
}
}
return serverBlocks[1:], nil
}
// extractNamedRoutes pulls out any named route server blocks
// so they don't get parsed as sites, and stores them in options
// for later.
func (ServerType) extractNamedRoutes(
serverBlocks []serverBlock,
options map[string]any,
warnings *[]caddyconfig.Warning,
replacer ShorthandReplacer,
) ([]serverBlock, error) {
namedRoutes := map[string]*caddyhttp.Route{}
gc := counter{new(int)}
state := make(map[string]any)
// copy the server blocks so we can
// splice out the named route ones
filtered := append([]serverBlock{}, serverBlocks...)
index := -1
for _, sb := range serverBlocks {
index++
if !sb.block.IsNamedRoute {
continue
}
// splice out this block, because we know it's not a real server
filtered = append(filtered[:index], filtered[index+1:]...)
index--
if len(sb.block.Segments) == 0 {
continue
}
wholeSegment := caddyfile.Segment{}
for i := range sb.block.Segments {
// replace user-defined placeholder shorthands in extracted named routes
replacer.ApplyToSegment(&sb.block.Segments[i])
// zip up all the segments since ParseSegmentAsSubroute
// was designed to take a directive+
wholeSegment = append(wholeSegment, sb.block.Segments[i]...)
}
h := Helper{
Dispenser: caddyfile.NewDispenser(wholeSegment),
options: options,
warnings: warnings,
matcherDefs: nil,
parentBlock: sb.block,
groupCounter: gc,
State: state,
}
handler, err := ParseSegmentAsSubroute(h)
if err != nil {
return nil, err
}
subroute := handler.(*caddyhttp.Subroute)
route := caddyhttp.Route{}
if len(subroute.Routes) == 1 && len(subroute.Routes[0].MatcherSetsRaw) == 0 {
// if there's only one route with no matcher, then we can simplify
route.HandlersRaw = append(route.HandlersRaw, subroute.Routes[0].HandlersRaw[0])
} else {
// otherwise we need the whole subroute
route.HandlersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", subroute.CaddyModule().ID.Name(), h.warnings)}
}
namedRoutes[sb.block.GetKeysText()[0]] = &route
}
options["named_routes"] = namedRoutes
return filtered, nil
}
// serversFromPairings creates the servers for each pairing of addresses
// to server blocks. Each pairing is essentially a server definition.
func (st *ServerType) serversFromPairings(
pairings []sbAddrAssociation,
options map[string]any,
warnings *[]caddyconfig.Warning,
groupCounter counter,
) (map[string]*caddyhttp.Server, error) {
servers := make(map[string]*caddyhttp.Server)
defaultSNI := tryString(options["default_sni"], warnings)
fallbackSNI := tryString(options["fallback_sni"], warnings)
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
if hp, ok := options["http_port"].(int); ok {
httpPort = strconv.Itoa(hp)
}
httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
if hsp, ok := options["https_port"].(int); ok {
httpsPort = strconv.Itoa(hsp)
}
autoHTTPS := "on"
if ah, ok := options["auto_https"].(string); ok {
autoHTTPS = ah
}
for i, p := range pairings {
// detect ambiguous site definitions: server blocks which
// have the same host bound to the same interface (listener
// address), otherwise their routes will improperly be added
// to the same server (see issue #4635)
for j, sblock1 := range p.serverBlocks {
for _, key := range sblock1.block.GetKeysText() {
for k, sblock2 := range p.serverBlocks {
if k == j {
continue
}
if sliceContains(sblock2.block.GetKeysText(), key) {
return nil, fmt.Errorf("ambiguous site definition: %s", key)
}
}
}
}
srv := &caddyhttp.Server{
Listen: p.addresses,
}
// handle the auto_https global option
if autoHTTPS != "on" {
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
switch autoHTTPS {
case "off":
srv.AutoHTTPS.Disabled = true
case "disable_redirects":
srv.AutoHTTPS.DisableRedir = true
case "disable_certs":
srv.AutoHTTPS.DisableCerts = true
case "ignore_loaded_certs":
srv.AutoHTTPS.IgnoreLoadedCerts = true
}
}
// Using paths in site addresses is deprecated
// See ParseAddress() where parsing should later reject paths
// See https://github.com/caddyserver/caddy/pull/4728 for a full explanation
for _, sblock := range p.serverBlocks {
for _, addr := range sblock.keys {
if addr.Path != "" {
caddy.Log().Named("caddyfile").Warn("Using a path in a site address is deprecated; please use the 'handle' directive instead", zap.String("address", addr.String()))
}
}
}
// sort server blocks by their keys; this is important because
// only the first matching site should be evaluated, and we should
// attempt to match most specific site first (host and path), in
// case their matchers overlap; we do this somewhat naively by
// descending sort by length of host then path
sort.SliceStable(p.serverBlocks, func(i, j int) bool {
// TODO: we could pre-process the specificities for efficiency,
// but I don't expect many blocks will have THAT many keys...
var iLongestPath, jLongestPath string
var iLongestHost, jLongestHost string
var iWildcardHost, jWildcardHost bool
for _, addr := range p.serverBlocks[i].keys {
if strings.Contains(addr.Host, "*") || addr.Host == "" {
iWildcardHost = true
}
if specificity(addr.Host) > specificity(iLongestHost) {
iLongestHost = addr.Host
}
if specificity(addr.Path) > specificity(iLongestPath) {
iLongestPath = addr.Path
}
}
for _, addr := range p.serverBlocks[j].keys {
if strings.Contains(addr.Host, "*") || addr.Host == "" {
jWildcardHost = true
}
if specificity(addr.Host) > specificity(jLongestHost) {
jLongestHost = addr.Host
}
if specificity(addr.Path) > specificity(jLongestPath) {
jLongestPath = addr.Path
}
}
// catch-all blocks (blocks with no hostname) should always go
// last, even after blocks with wildcard hosts
if specificity(iLongestHost) == 0 {
return false
}
if specificity(jLongestHost) == 0 {
return true
}
if iWildcardHost != jWildcardHost {
// site blocks that have a key with a wildcard in the hostname
// must always be less specific than blocks without one; see
// https://github.com/caddyserver/caddy/issues/3410
return jWildcardHost && !iWildcardHost
}
if specificity(iLongestHost) == specificity(jLongestHost) {
return len(iLongestPath) > len(jLongestPath)
}
return specificity(iLongestHost) > specificity(jLongestHost)
})
var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
autoHTTPSWillAddConnPolicy := autoHTTPS != "off"
// if needed, the ServerLogConfig is initialized beforehand so
// that all server blocks can populate it with data, even when not
// coming with a log directive
for _, sblock := range p.serverBlocks {
if len(sblock.pile["custom_log"]) != 0 {
srv.Logs = new(caddyhttp.ServerLogConfig)
break
}
}
// add named routes to the server if 'invoke' was used inside of it
configuredNamedRoutes := options["named_routes"].(map[string]*caddyhttp.Route)
for _, sblock := range p.serverBlocks {
if len(sblock.pile[namedRouteKey]) == 0 {
continue
}
for _, value := range sblock.pile[namedRouteKey] {
if srv.NamedRoutes == nil {
srv.NamedRoutes = map[string]*caddyhttp.Route{}
}
name := value.Value.(string)
if configuredNamedRoutes[name] == nil {
return nil, fmt.Errorf("cannot invoke named route '%s', which was not defined", name)
}
srv.NamedRoutes[name] = configuredNamedRoutes[name]
}
}
// create a subroute for each site in the server block
for _, sblock := range p.serverBlocks {
matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock)
if err != nil {
return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
}
hosts := sblock.hostsFromKeys(false)
// emit warnings if user put unspecified IP addresses; they probably want the bind directive
for _, h := range hosts {
if h == "0.0.0.0" || h == "::" {
caddy.Log().Named("caddyfile").Warn("Site block has an unspecified IP address which only matches requests having that Host header; you probably want the 'bind' directive to configure the socket", zap.String("address", h))
}
}
// tls: connection policies
if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
// tls connection policies
for _, cpVal := range cpVals {
cp := cpVal.Value.(*caddytls.ConnectionPolicy)
// make sure the policy covers all hostnames from the block
for _, h := range hosts {
if h == defaultSNI {
hosts = append(hosts, "")
cp.DefaultSNI = defaultSNI
break
}
if h == fallbackSNI {
hosts = append(hosts, "")
cp.FallbackSNI = fallbackSNI
break
}
}
if len(hosts) > 0 {
slices.Sort(hosts) // for deterministic JSON output
cp.MatchersRaw = caddy.ModuleMap{
"sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
}
} else {
cp.DefaultSNI = defaultSNI
cp.FallbackSNI = fallbackSNI
}
// only append this policy if it actually changes something
if !cp.SettingsEmpty() {
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
hasCatchAllTLSConnPolicy = len(hosts) == 0
}
}
}
for _, addr := range sblock.keys {
// if server only uses HTTP port, auto-HTTPS will not apply
if listenersUseAnyPortOtherThan(srv.Listen, httpPort) {
// exclude any hosts that were defined explicitly with "http://"
// in the key from automated cert management (issue #2998)
if addr.Scheme == "http" && addr.Host != "" {
if srv.AutoHTTPS == nil {
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
}
if !sliceContains(srv.AutoHTTPS.Skip, addr.Host) {
srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, addr.Host)
}
}
}
// If TLS is specified as directive, it will also result in 1 or more connection policy being created
// Thus, catch-all address with non-standard port, e.g. :8443, can have TLS enabled without
// specifying prefix "https://"
// Second part of the condition is to allow creating TLS conn policy even though `auto_https` has been disabled
// ensuring compatibility with behavior described in below link
// https://caddy.community/t/making-sense-of-auto-https-and-why-disabling-it-still-serves-https-instead-of-http/9761
createdTLSConnPolicies, ok := sblock.pile["tls.connection_policy"]
hasTLSEnabled := (ok && len(createdTLSConnPolicies) > 0) ||
(addr.Host != "" && srv.AutoHTTPS != nil && !sliceContains(srv.AutoHTTPS.Skip, addr.Host))
// we'll need to remember if the address qualifies for auto-HTTPS, so we
// can add a TLS conn policy if necessary
if addr.Scheme == "https" ||
(addr.Scheme != "http" && addr.Port != httpPort && hasTLSEnabled) {
addressQualifiesForTLS = true
}
// predict whether auto-HTTPS will add the conn policy for us; if so, we
// may not need to add one for this server
autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
(addr.Port == httpsPort || (addr.Port != httpPort && addr.Host != ""))
}
// Look for any config values that provide listener wrappers on the server block
for _, listenerConfig := range sblock.pile["listener_wrapper"] {
listenerWrapper, ok := listenerConfig.Value.(caddy.ListenerWrapper)
if !ok {
return nil, fmt.Errorf("config for a listener wrapper did not provide a value that implements caddy.ListenerWrapper")
}
jsonListenerWrapper := caddyconfig.JSONModuleObject(
listenerWrapper,
"wrapper",
listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
warnings)
srv.ListenerWrappersRaw = append(srv.ListenerWrappersRaw, jsonListenerWrapper)
}
// set up each handler directive, making sure to honor directive order
dirRoutes := sblock.pile["route"]
siteSubroute, err := buildSubroute(dirRoutes, groupCounter, true)
if err != nil {
return nil, err
}
// add the site block's route(s) to the server
srv.Routes = appendSubrouteToRouteList(srv.Routes, siteSubroute, matcherSetsEnc, p, warnings)
// if error routes are defined, add those too
if errorSubrouteVals, ok := sblock.pile["error_route"]; ok {
if srv.Errors == nil {
srv.Errors = new(caddyhttp.HTTPErrorConfig)
}
sort.SliceStable(errorSubrouteVals, func(i, j int) bool {
sri, srj := errorSubrouteVals[i].Value.(*caddyhttp.Subroute), errorSubrouteVals[j].Value.(*caddyhttp.Subroute)
if len(sri.Routes[0].MatcherSetsRaw) == 0 && len(srj.Routes[0].MatcherSetsRaw) != 0 {
return false
}
return true
})
errorsSubroute := &caddyhttp.Subroute{}
for _, val := range errorSubrouteVals {
sr := val.Value.(*caddyhttp.Subroute)
errorsSubroute.Routes = append(errorsSubroute.Routes, sr.Routes...)
}
srv.Errors.Routes = appendSubrouteToRouteList(srv.Errors.Routes, errorsSubroute, matcherSetsEnc, p, warnings)
}
// add log associations
// see https://github.com/caddyserver/caddy/issues/3310
sblockLogHosts := sblock.hostsFromKeys(true)
for _, cval := range sblock.pile["custom_log"] {
ncl := cval.Value.(namedCustomLog)
// if `no_hostname` is set, then this logger will not
// be associated with any of the site block's hostnames,
// and only be usable via the `log_name` directive
// or the `access_logger_names` variable
if ncl.noHostname {
continue
}
if sblock.hasHostCatchAllKey() && len(ncl.hostnames) == 0 {
// all requests for hosts not able to be listed should use
// this log because it's a catch-all-hosts server block
srv.Logs.DefaultLoggerName = ncl.name
} else if len(ncl.hostnames) > 0 {
// if the logger overrides the hostnames, map that to the logger name
for _, h := range ncl.hostnames {
if srv.Logs.LoggerNames == nil {
srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray)
}
srv.Logs.LoggerNames[h] = append(srv.Logs.LoggerNames[h], ncl.name)
}
} else {
// otherwise, map each host to the logger name
for _, h := range sblockLogHosts {
// strip the port from the host, if any
host, _, err := net.SplitHostPort(h)
if err != nil {
host = h
}
if srv.Logs.LoggerNames == nil {
srv.Logs.LoggerNames = make(map[string]caddyhttp.StringArray)
}
srv.Logs.LoggerNames[host] = append(srv.Logs.LoggerNames[host], ncl.name)
}
}
}
if srv.Logs != nil && len(sblock.pile["custom_log"]) == 0 {
// server has access logs enabled, but this server block does not
// enable access logs; therefore, all hosts of this server block
// should not be access-logged
if len(hosts) == 0 {
// if the server block has a catch-all-hosts key, then we should
// not log reqs to any host unless it appears in the map
srv.Logs.SkipUnmappedHosts = true
}
srv.Logs.SkipHosts = append(srv.Logs.SkipHosts, sblockLogHosts...)
}
}
// sort for deterministic JSON output
if srv.Logs != nil {
slices.Sort(srv.Logs.SkipHosts)
}
// a server cannot (natively) serve both HTTP and HTTPS at the
// same time, so make sure the configuration isn't in conflict
err := detectConflictingSchemes(srv, p.serverBlocks, options)
if err != nil {
return nil, err
}
// a catch-all TLS conn policy is necessary to ensure TLS can
// be offered to all hostnames of the server; even though only
// one policy is needed to enable TLS for the server, that
// policy might apply to only certain TLS handshakes; but when
// using the Caddyfile, user would expect all handshakes to at
// least have a matching connection policy, so here we append a
// catch-all/default policy if there isn't one already (it's
// important that it goes at the end) - see issue #3004:
// https://github.com/caddyserver/caddy/issues/3004
// TODO: maybe a smarter way to handle this might be to just make the
// auto-HTTPS logic at provision-time detect if there is any connection
// policy missing for any HTTPS-enabled hosts, if so, add it... maybe?
if addressQualifiesForTLS &&
!hasCatchAllTLSConnPolicy &&
(len(srv.TLSConnPolicies) > 0 || !autoHTTPSWillAddConnPolicy || defaultSNI != "" || fallbackSNI != "") {
srv.TLSConnPolicies = append(srv.TLSConnPolicies, &caddytls.ConnectionPolicy{DefaultSNI: defaultSNI, FallbackSNI: fallbackSNI})
}
// tidy things up a bit
srv.TLSConnPolicies, err = consolidateConnPolicies(srv.TLSConnPolicies)
if err != nil {
return nil, fmt.Errorf("consolidating TLS connection policies for server %d: %v", i, err)
}
srv.Routes = consolidateRoutes(srv.Routes)
servers[fmt.Sprintf("srv%d", i)] = srv
}
err := applyServerOptions(servers, options, warnings)
if err != nil {
return nil, fmt.Errorf("applying global server options: %v", err)
}
return servers, nil
}
func detectConflictingSchemes(srv *caddyhttp.Server, serverBlocks []serverBlock, options map[string]any) error {
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
if hp, ok := options["http_port"].(int); ok {
httpPort = strconv.Itoa(hp)
}
httpsPort := strconv.Itoa(caddyhttp.DefaultHTTPSPort)
if hsp, ok := options["https_port"].(int); ok {
httpsPort = strconv.Itoa(hsp)
}
var httpOrHTTPS string
checkAndSetHTTP := func(addr Address) error {
if httpOrHTTPS == "HTTPS" {
errMsg := fmt.Errorf("server listening on %v is configured for HTTPS and cannot natively multiplex HTTP and HTTPS: %s",
srv.Listen, addr.Original)
if addr.Scheme == "" && addr.Host == "" {
errMsg = fmt.Errorf("%s (try specifying https:// in the address)", errMsg)
}
return errMsg
}
if len(srv.TLSConnPolicies) > 0 {
// any connection policies created for an HTTP server
// is a logical conflict, as it would enable HTTPS
return fmt.Errorf("server listening on %v is HTTP, but attempts to configure TLS connection policies", srv.Listen)
}
httpOrHTTPS = "HTTP"
return nil
}
checkAndSetHTTPS := func(addr Address) error {
if httpOrHTTPS == "HTTP" {
return fmt.Errorf("server listening on %v is configured for HTTP and cannot natively multiplex HTTP and HTTPS: %s",
srv.Listen, addr.Original)
}
httpOrHTTPS = "HTTPS"
return nil
}
for _, sblock := range serverBlocks {
for _, addr := range sblock.keys {
if addr.Scheme == "http" || addr.Port == httpPort {
if err := checkAndSetHTTP(addr); err != nil {
return err
}
} else if addr.Scheme == "https" || addr.Port == httpsPort || len(srv.TLSConnPolicies) > 0 {
if err := checkAndSetHTTPS(addr); err != nil {
return err
}
} else if addr.Host == "" {
if err := checkAndSetHTTP(addr); err != nil {
return err
}
}
}
}
return nil
}
// consolidateConnPolicies sorts any catch-all policy to the end, removes empty TLS connection
// policies, and combines equivalent ones for a cleaner overall output.
func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.ConnectionPolicies, error) {
// catch-all policies (those without any matcher) should be at the
// end, otherwise it nullifies any more specific policies
sort.SliceStable(cps, func(i, j int) bool {
return cps[j].MatchersRaw == nil && cps[i].MatchersRaw != nil
})
for i := 0; i < len(cps); i++ {
// compare it to the others
for j := 0; j < len(cps); j++ {
if j == i {
continue
}
// if they're exactly equal in every way, just keep one of them
if reflect.DeepEqual(cps[i], cps[j]) {
cps = append(cps[:j], cps[j+1:]...)
i--
break
}
// if they have the same matcher, try to reconcile each field: either they must
// be identical, or we have to be able to combine them safely
if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
if len(cps[i].ALPN) > 0 &&
len(cps[j].ALPN) > 0 &&
!reflect.DeepEqual(cps[i].ALPN, cps[j].ALPN) {
return nil, fmt.Errorf("two policies with same match criteria have conflicting ALPN: %v vs. %v",
cps[i].ALPN, cps[j].ALPN)
}
if len(cps[i].CipherSuites) > 0 &&
len(cps[j].CipherSuites) > 0 &&
!reflect.DeepEqual(cps[i].CipherSuites, cps[j].CipherSuites) {
return nil, fmt.Errorf("two policies with same match criteria have conflicting cipher suites: %v vs. %v",
cps[i].CipherSuites, cps[j].CipherSuites)
}
if cps[i].ClientAuthentication == nil &&
cps[j].ClientAuthentication != nil &&
!reflect.DeepEqual(cps[i].ClientAuthentication, cps[j].ClientAuthentication) {
return nil, fmt.Errorf("two policies with same match criteria have conflicting client auth configuration: %+v vs. %+v",
cps[i].ClientAuthentication, cps[j].ClientAuthentication)
}
if len(cps[i].Curves) > 0 &&
len(cps[j].Curves) > 0 &&
!reflect.DeepEqual(cps[i].Curves, cps[j].Curves) {
return nil, fmt.Errorf("two policies with same match criteria have conflicting curves: %v vs. %v",
cps[i].Curves, cps[j].Curves)
}
if cps[i].DefaultSNI != "" &&
cps[j].DefaultSNI != "" &&
cps[i].DefaultSNI != cps[j].DefaultSNI {
return nil, fmt.Errorf("two policies with same match criteria have conflicting default SNI: %s vs. %s",
cps[i].DefaultSNI, cps[j].DefaultSNI)
}
if cps[i].ProtocolMin != "" &&
cps[j].ProtocolMin != "" &&
cps[i].ProtocolMin != cps[j].ProtocolMin {
return nil, fmt.Errorf("two policies with same match criteria have conflicting min protocol: %s vs. %s",
cps[i].ProtocolMin, cps[j].ProtocolMin)
}
if cps[i].ProtocolMax != "" &&
cps[j].ProtocolMax != "" &&
cps[i].ProtocolMax != cps[j].ProtocolMax {
return nil, fmt.Errorf("two policies with same match criteria have conflicting max protocol: %s vs. %s",
cps[i].ProtocolMax, cps[j].ProtocolMax)
}
if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
// merging fields other than AnyTag is not implemented
if !reflect.DeepEqual(cps[i].CertSelection.SerialNumber, cps[j].CertSelection.SerialNumber) ||
!reflect.DeepEqual(cps[i].CertSelection.SubjectOrganization, cps[j].CertSelection.SubjectOrganization) ||
cps[i].CertSelection.PublicKeyAlgorithm != cps[j].CertSelection.PublicKeyAlgorithm ||
!reflect.DeepEqual(cps[i].CertSelection.AllTags, cps[j].CertSelection.AllTags) {
return nil, fmt.Errorf("two policies with same match criteria have conflicting cert selections: %+v vs. %+v",
cps[i].CertSelection, cps[j].CertSelection)
}
}
// by now we've decided that we can merge the two -- we'll keep i and drop j
if len(cps[i].ALPN) == 0 && len(cps[j].ALPN) > 0 {
cps[i].ALPN = cps[j].ALPN
}
if len(cps[i].CipherSuites) == 0 && len(cps[j].CipherSuites) > 0 {
cps[i].CipherSuites = cps[j].CipherSuites
}
if cps[i].ClientAuthentication == nil && cps[j].ClientAuthentication != nil {
cps[i].ClientAuthentication = cps[j].ClientAuthentication
}
if len(cps[i].Curves) == 0 && len(cps[j].Curves) > 0 {
cps[i].Curves = cps[j].Curves
}
if cps[i].DefaultSNI == "" && cps[j].DefaultSNI != "" {
cps[i].DefaultSNI = cps[j].DefaultSNI
}
if cps[i].ProtocolMin == "" && cps[j].ProtocolMin != "" {
cps[i].ProtocolMin = cps[j].ProtocolMin
}
if cps[i].ProtocolMax == "" && cps[j].ProtocolMax != "" {
cps[i].ProtocolMax = cps[j].ProtocolMax
}
if cps[i].CertSelection == nil && cps[j].CertSelection != nil {
// if j is the only one with a policy, move it over to i
cps[i].CertSelection = cps[j].CertSelection
} else if cps[i].CertSelection != nil && cps[j].CertSelection != nil {
// if both have one, then combine AnyTag
for _, tag := range cps[j].CertSelection.AnyTag {
if !sliceContains(cps[i].CertSelection.AnyTag, tag) {
cps[i].CertSelection.AnyTag = append(cps[i].CertSelection.AnyTag, tag)
}
}
}
cps = append(cps[:j], cps[j+1:]...)
i--
break
}
}
}
return cps, nil
}
// appendSubrouteToRouteList appends the routes in subroute
// to the routeList, optionally qualified by matchers.
func appendSubrouteToRouteList(routeList caddyhttp.RouteList,
subroute *caddyhttp.Subroute,
matcherSetsEnc []caddy.ModuleMap,
p sbAddrAssociation,
warnings *[]caddyconfig.Warning,
) caddyhttp.RouteList {
// nothing to do if... there's nothing to do
if len(matcherSetsEnc) == 0 && len(subroute.Routes) == 0 && subroute.Errors == nil {
return routeList
}
// No need to wrap the handlers in a subroute if this is the only server block
// and there is no matcher for it (doing so would produce unnecessarily nested
// JSON), *unless* there is a host matcher within this site block; if so, then
// we still need to wrap in a subroute because otherwise the host matcher from
// the inside of the site block would be a top-level host matcher, which is
// subject to auto-HTTPS (cert management), and using a host matcher within
// a site block is a valid, common pattern for excluding domains from cert
// management, leading to unexpected behavior; see issue #5124.
wrapInSubroute := true
if len(matcherSetsEnc) == 0 && len(p.serverBlocks) == 1 {
var hasHostMatcher bool
outer:
for _, route := range subroute.Routes {
for _, ms := range route.MatcherSetsRaw {
for matcherName := range ms {
if matcherName == "host" {
hasHostMatcher = true
break outer
}
}
}
}
wrapInSubroute = hasHostMatcher
}
if wrapInSubroute {
route := caddyhttp.Route{
// the semantics of a site block in the Caddyfile dictate
// that only the first matching one is evaluated, since
// site blocks do not cascade nor inherit
Terminal: true,
}
if len(matcherSetsEnc) > 0 {
route.MatcherSetsRaw = matcherSetsEnc
}
if len(subroute.Routes) > 0 || subroute.Errors != nil {
route.HandlersRaw = []json.RawMessage{
caddyconfig.JSONModuleObject(subroute, "handler", "subroute", warnings),
}
}
if len(route.MatcherSetsRaw) > 0 || len(route.HandlersRaw) > 0 {
routeList = append(routeList, route)
}
} else {
routeList = append(routeList, subroute.Routes...)
}
return routeList
}
// buildSubroute turns the config values, which are expected to be routes
// into a clean and orderly subroute that has all the routes within it.
func buildSubroute(routes []ConfigValue, groupCounter counter, needsSorting bool) (*caddyhttp.Subroute, error) {
if needsSorting {
for _, val := range routes {
if !directiveIsOrdered(val.directive) {
return nil, fmt.Errorf("directive '%s' is not an ordered HTTP handler, so it cannot be used here - try placing within a route block or using the order global option", val.directive)
}
}
sortRoutes(routes)
}
subroute := new(caddyhttp.Subroute)
// some directives are mutually exclusive (only first matching
// instance should be evaluated); this is done by putting their
// routes in the same group
mutuallyExclusiveDirs := map[string]*struct {
count int
groupName string
}{
// as a special case, group rewrite directives so that they are mutually exclusive;
// this means that only the first matching rewrite will be evaluated, and that's
// probably a good thing, since there should never be a need to do more than one
// rewrite (I think?), and cascading rewrites smell bad... imagine these rewrites:
// rewrite /docs/json/* /docs/json/index.html
// rewrite /docs/* /docs/index.html
// (We use this on the Caddy website, or at least we did once.) The first rewrite's
// result is also matched by the second rewrite, making the first rewrite pointless.
// See issue #2959.
"rewrite": {},
// handle blocks are also mutually exclusive by definition
"handle": {},
// root just sets a variable, so if it was not mutually exclusive, intersecting
// root directives would overwrite previously-matched ones; they should not cascade
"root": {},
}
// we need to deterministically loop over each of these directives
// in order to keep the group numbers consistent
keys := make([]string, 0, len(mutuallyExclusiveDirs))
for k := range mutuallyExclusiveDirs {
keys = append(keys, k)
}
sort.Strings(keys)
for _, meDir := range keys {
info := mutuallyExclusiveDirs[meDir]
// see how many instances of the directive there are
for _, r := range routes {
if r.directive == meDir {
info.count++
if info.count > 1 {
break
}
}
}
// if there is more than one, put them in a group
// (special case: "rewrite" directive must always be in
// its own group--even if there is only one--because we
// do not want a rewrite to be consolidated into other
// adjacent routes that happen to have the same matcher,
// see caddyserver/caddy#3108 - because the implied
// intent of rewrite is to do an internal redirect,
// we can't assume that the request will continue to
// match the same matcher; anyway, giving a route a
// unique group name should keep it from consolidating)
if info.count > 1 || meDir == "rewrite" {
info.groupName = groupCounter.nextGroup()
}
}
// add all the routes piled in from directives
for _, r := range routes {
// put this route into a group if it is mutually exclusive
if info, ok := mutuallyExclusiveDirs[r.directive]; ok {
route := r.Value.(caddyhttp.Route)
route.Group = info.groupName
r.Value = route
}
switch route := r.Value.(type) {
case caddyhttp.Subroute:
// if a route-class config value is actually a Subroute handler
// with nothing but a list of routes, then it is the intention
// of the directive to keep these handlers together and in this
// same order, but not necessarily in a subroute (if it wanted
// to keep them in a subroute, the directive would have returned
// a route with a Subroute as its handler); this is useful to
// keep multiple handlers/routes together and in the same order
// so that the sorting procedure we did above doesn't reorder them
if route.Errors != nil {
// if error handlers are also set, this is confusing; it's
// probably supposed to be wrapped in a Route and encoded
// as a regular handler route... programmer error.
panic("found subroute with more than just routes; perhaps it should have been wrapped in a route?")
}
subroute.Routes = append(subroute.Routes, route.Routes...)
case caddyhttp.Route:
subroute.Routes = append(subroute.Routes, route)
}
}
subroute.Routes = consolidateRoutes(subroute.Routes)
return subroute, nil
}
// normalizeDirectiveName ensures directives that should be sorted
// at the same level are named the same before sorting happens.
func normalizeDirectiveName(directive string) string {
// As a special case, we want "handle_path" to be sorted
// at the same level as "handle", so we force them to use
// the same directive name after their parsing is complete.
// See https://github.com/caddyserver/caddy/issues/3675#issuecomment-678042377
if directive == "handle_path" {
directive = "handle"
}
return directive
}
// consolidateRoutes combines routes with the same properties
// (same matchers, same Terminal and Group settings) for a
// cleaner overall output.
func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
for i := 0; i < len(routes)-1; i++ {
if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
routes[i].Terminal == routes[i+1].Terminal &&
routes[i].Group == routes[i+1].Group {
// keep the handlers in the same order, then splice out repetitive route
routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
routes = append(routes[:i+1], routes[i+2:]...)
i--
}
}
return routes
}
func matcherSetFromMatcherToken(
tkn caddyfile.Token,
matcherDefs map[string]caddy.ModuleMap,
warnings *[]caddyconfig.Warning,
) (caddy.ModuleMap, bool, error) {
// matcher tokens can be wildcards, simple path matchers,
// or refer to a pre-defined matcher by some name
if tkn.Text == "*" {
// match all requests == no matchers, so nothing to do
return nil, true, nil
}
// convenient way to specify a single path match
if strings.HasPrefix(tkn.Text, "/") {
return caddy.ModuleMap{
"path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
}, true, nil
}
// pre-defined matcher
if strings.HasPrefix(tkn.Text, matcherPrefix) {
m, ok := matcherDefs[tkn.Text]
if !ok {
return nil, false, fmt.Errorf("unrecognized matcher name: %+v", tkn.Text)
}
return m, true, nil
}
return nil, false, nil
}
func (st *ServerType) compileEncodedMatcherSets(sblock serverBlock) ([]caddy.ModuleMap, error) {
type hostPathPair struct {
hostm caddyhttp.MatchHost
pathm caddyhttp.MatchPath
}
// keep routes with common host and path matchers together
var matcherPairs []*hostPathPair
var catchAllHosts bool
for _, addr := range sblock.keys {
// choose a matcher pair that should be shared by this
// server block; if none exists yet, create one
var chosenMatcherPair *hostPathPair
for _, mp := range matcherPairs {
if (len(mp.pathm) == 0 && addr.Path == "") ||
(len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
chosenMatcherPair = mp
break
}
}
if chosenMatcherPair == nil {
chosenMatcherPair = new(hostPathPair)
if addr.Path != "" {
chosenMatcherPair.pathm = []string{addr.Path}
}
matcherPairs = append(matcherPairs, chosenMatcherPair)
}
// if one of the keys has no host (i.e. is a catch-all for
// any hostname), then we need to null out the host matcher
// entirely so that it matches all hosts
if addr.Host == "" && !catchAllHosts {
chosenMatcherPair.hostm = nil
catchAllHosts = true
}
if catchAllHosts {
continue
}
// add this server block's keys to the matcher
// pair if it doesn't already exist
if addr.Host != "" {
var found bool
for _, h := range chosenMatcherPair.hostm {
if h == addr.Host {
found = true
break
}
}
if !found {
chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
}
}
}
// iterate each pairing of host and path matchers and
// put them into a map for JSON encoding
var matcherSets []map[string]caddyhttp.RequestMatcher
for _, mp := range matcherPairs {
matcherSet := make(map[string]caddyhttp.RequestMatcher)
if len(mp.hostm) > 0 {
matcherSet["host"] = mp.hostm
}
if len(mp.pathm) > 0 {
matcherSet["path"] = mp.pathm
}
if len(matcherSet) > 0 {
matcherSets = append(matcherSets, matcherSet)
}
}
// finally, encode each of the matcher sets
matcherSetsEnc := make([]caddy.ModuleMap, 0, len(matcherSets))
for _, ms := range matcherSets {
msEncoded, err := encodeMatcherSet(ms)
if err != nil {
return nil, fmt.Errorf("server block %v: %v", sblock.block.Keys, err)
}
matcherSetsEnc = append(matcherSetsEnc, msEncoded)
}
return matcherSetsEnc, nil
}
func parseMatcherDefinitions(d *caddyfile.Dispenser, matchers map[string]caddy.ModuleMap) error {
d.Next() // advance to the first token
// this is the "name" for "named matchers"
definitionName := d.Val()
if _, ok := matchers[definitionName]; ok {
return fmt.Errorf("matcher is defined more than once: %s", definitionName)
}
matchers[definitionName] = make(caddy.ModuleMap)
// given a matcher name and the tokens following it, parse
// the tokens as a matcher module and record it
makeMatcher := func(matcherName string, tokens []caddyfile.Token) error {
// create a new dispenser from the tokens
dispenser := caddyfile.NewDispenser(tokens)
// set the matcher name (without @) in the dispenser context so
// that matcher modules can access it to use it as their name
// (e.g. regexp matchers which use the name for capture groups)
dispenser.SetContext(caddyfile.MatcherNameCtxKey, definitionName[1:])
mod, err := caddy.GetModule("http.matchers." + matcherName)
if err != nil {
return fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
}
unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
}
err = unm.UnmarshalCaddyfile(dispenser)
if err != nil {
return err
}
rm, ok := unm.(caddyhttp.RequestMatcher)
if !ok {
return fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
}
matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
return nil
}
// if the next token is quoted, we can assume it's not a matcher name
// and that it's probably an 'expression' matcher
if d.NextArg() {
if d.Token().Quoted() {
// since it was missing the matcher name, we insert a token
// in front of the expression token itself; we use Clone() to
// make the new token to keep the same the import location as
// the next token, if this is within a snippet or imported file.
// see https://github.com/caddyserver/caddy/issues/6287
expressionToken := d.Token().Clone()
expressionToken.Text = "expression"
err := makeMatcher("expression", []caddyfile.Token{expressionToken, d.Token()})
if err != nil {
return err
}
return nil
}
// if it wasn't quoted, then we need to rewind after calling
// d.NextArg() so the below properly grabs the matcher name
d.Prev()
}
// in case there are multiple instances of the same matcher, concatenate
// their tokens (we expect that UnmarshalCaddyfile should be able to
// handle more than one segment); otherwise, we'd overwrite other
// instances of the matcher in this set
tokensByMatcherName := make(map[string][]caddyfile.Token)
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
matcherName := d.Val()
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
}
for matcherName, tokens := range tokensByMatcherName {
err := makeMatcher(matcherName, tokens)
if err != nil {
return err
}
}
return nil
}
func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (caddy.ModuleMap, error) {
msEncoded := make(caddy.ModuleMap)
for matcherName, val := range matchers {
jsonBytes, err := json.Marshal(val)
if err != nil {
return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
}
msEncoded[matcherName] = jsonBytes
}
return msEncoded, nil
}
// WasReplacedPlaceholderShorthand checks if a token string was
// likely a replaced shorthand of the known Caddyfile placeholder
// replacement outputs. Useful to prevent some user-defined map
// output destinations from overlapping with one of the
// predefined shorthands.
func WasReplacedPlaceholderShorthand(token string) string {
prev := ""
for i, item := range placeholderShorthands() {
// only look at every 2nd item, which is the replacement
if i%2 == 0 {
prev = item
continue
}
if strings.Trim(token, "{}") == strings.Trim(item, "{}") {
// we return the original shorthand so it
// can be used for an error message
return prev
}
}
return ""
}
// tryInt tries to convert val to an integer. If it fails,
// it downgrades the error to a warning and returns 0.
func tryInt(val any, warnings *[]caddyconfig.Warning) int {
intVal, ok := val.(int)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not an integer type"})
}
return intVal
}
func tryString(val any, warnings *[]caddyconfig.Warning) string {
stringVal, ok := val.(string)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a string type"})
}
return stringVal
}
func tryDuration(val any, warnings *[]caddyconfig.Warning) caddy.Duration {
durationVal, ok := val.(caddy.Duration)
if val != nil && !ok && warnings != nil {
*warnings = append(*warnings, caddyconfig.Warning{Message: "not a duration type"})
}
return durationVal
}
// sliceContains returns true if needle is in haystack.
func sliceContains(haystack []string, needle string) bool {
for _, s := range haystack {
if s == needle {
return true
}
}
return false
}
// listenersUseAnyPortOtherThan returns true if there are any
// listeners in addresses that use a port which is not otherPort.
// Mostly borrowed from unexported method in caddyhttp package.
func listenersUseAnyPortOtherThan(addresses []string, otherPort string) bool {
otherPortInt, err := strconv.Atoi(otherPort)
if err != nil {
return false
}
for _, lnAddr := range addresses {
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if uint(otherPortInt) > laddrs.EndPort || uint(otherPortInt) < laddrs.StartPort {
return true
}
}
return false
}
// specificity returns len(s) minus any wildcards (*) and
// placeholders ({...}). Basically, it's a length count
// that penalizes the use of wildcards and placeholders.
// This is useful for comparing hostnames and paths.
// However, wildcards in paths are not a sure answer to
// the question of specificity. For example,
// '*.example.com' is clearly less specific than
// 'a.example.com', but is '/a' more or less specific
// than '/a*'?
func specificity(s string) int {
l := len(s) - strings.Count(s, "*")
for len(s) > 0 {
start := strings.Index(s, "{")
if start < 0 {
return l
}
end := strings.Index(s[start:], "}") + start + 1
if end <= start {
return l
}
l -= end - start
s = s[end:]
}
return l
}
type counter struct {
n *int
}
func (c counter) nextGroup() string {
name := fmt.Sprintf("group%d", *c.n)
*c.n++
return name
}
type namedCustomLog struct {
name string
hostnames []string
log *caddy.CustomLog
noHostname bool
}
// sbAddrAssociation is a mapping from a list of
// addresses to a list of server blocks that are
// served on those addresses.
type sbAddrAssociation struct {
addresses []string
serverBlocks []serverBlock
}
const (
matcherPrefix = "@"
namedRouteKey = "named_route"
)
// Interface guard
var _ caddyfile.ServerType = (*ServerType)(nil)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"strconv"
"github.com/caddyserver/certmagic"
"github.com/mholt/acmez/v2/acme"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
func init() {
RegisterGlobalOption("debug", parseOptTrue)
RegisterGlobalOption("http_port", parseOptHTTPPort)
RegisterGlobalOption("https_port", parseOptHTTPSPort)
RegisterGlobalOption("default_bind", parseOptStringList)
RegisterGlobalOption("grace_period", parseOptDuration)
RegisterGlobalOption("shutdown_delay", parseOptDuration)
RegisterGlobalOption("default_sni", parseOptSingleString)
RegisterGlobalOption("fallback_sni", parseOptSingleString)
RegisterGlobalOption("order", parseOptOrder)
RegisterGlobalOption("storage", parseOptStorage)
RegisterGlobalOption("storage_clean_interval", parseOptDuration)
RegisterGlobalOption("renew_interval", parseOptDuration)
RegisterGlobalOption("ocsp_interval", parseOptDuration)
RegisterGlobalOption("acme_ca", parseOptSingleString)
RegisterGlobalOption("acme_ca_root", parseOptSingleString)
RegisterGlobalOption("acme_dns", parseOptACMEDNS)
RegisterGlobalOption("acme_eab", parseOptACMEEAB)
RegisterGlobalOption("cert_issuer", parseOptCertIssuer)
RegisterGlobalOption("skip_install_trust", parseOptTrue)
RegisterGlobalOption("email", parseOptSingleString)
RegisterGlobalOption("admin", parseOptAdmin)
RegisterGlobalOption("on_demand_tls", parseOptOnDemand)
RegisterGlobalOption("local_certs", parseOptTrue)
RegisterGlobalOption("key_type", parseOptSingleString)
RegisterGlobalOption("auto_https", parseOptAutoHTTPS)
RegisterGlobalOption("servers", parseServerOptions)
RegisterGlobalOption("ocsp_stapling", parseOCSPStaplingOptions)
RegisterGlobalOption("cert_lifetime", parseOptDuration)
RegisterGlobalOption("log", parseLogOptions)
RegisterGlobalOption("preferred_chains", parseOptPreferredChains)
RegisterGlobalOption("persist_config", parseOptPersistConfig)
}
func parseOptTrue(d *caddyfile.Dispenser, _ any) (any, error) { return true, nil }
func parseOptHTTPPort(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var httpPort int
var httpPortStr string
if !d.AllArgs(&httpPortStr) {
return 0, d.ArgErr()
}
var err error
httpPort, err = strconv.Atoi(httpPortStr)
if err != nil {
return 0, d.Errf("converting port '%s' to integer value: %v", httpPortStr, err)
}
return httpPort, nil
}
func parseOptHTTPSPort(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var httpsPort int
var httpsPortStr string
if !d.AllArgs(&httpsPortStr) {
return 0, d.ArgErr()
}
var err error
httpsPort, err = strconv.Atoi(httpsPortStr)
if err != nil {
return 0, d.Errf("converting port '%s' to integer value: %v", httpsPortStr, err)
}
return httpsPort, nil
}
func parseOptOrder(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
// get directive name
if !d.Next() {
return nil, d.ArgErr()
}
dirName := d.Val()
if _, ok := registeredDirectives[dirName]; !ok {
return nil, d.Errf("%s is not a registered directive", dirName)
}
// get positional token
if !d.Next() {
return nil, d.ArgErr()
}
pos := Positional(d.Val())
newOrder := directiveOrder
// if directive exists, first remove it
for i, d := range newOrder {
if d == dirName {
newOrder = append(newOrder[:i], newOrder[i+1:]...)
break
}
}
// act on the positional
switch pos {
case First:
newOrder = append([]string{dirName}, newOrder...)
if d.NextArg() {
return nil, d.ArgErr()
}
directiveOrder = newOrder
return newOrder, nil
case Last:
newOrder = append(newOrder, dirName)
if d.NextArg() {
return nil, d.ArgErr()
}
directiveOrder = newOrder
return newOrder, nil
case Before:
case After:
default:
return nil, d.Errf("unknown positional '%s'", pos)
}
// get name of other directive
if !d.NextArg() {
return nil, d.ArgErr()
}
otherDir := d.Val()
if d.NextArg() {
return nil, d.ArgErr()
}
// insert directive into proper position
for i, d := range newOrder {
if d == otherDir {
if pos == Before {
newOrder = append(newOrder[:i], append([]string{dirName}, newOrder[i:]...)...)
} else if pos == After {
newOrder = append(newOrder[:i+1], append([]string{dirName}, newOrder[i+1:]...)...)
}
break
}
}
directiveOrder = newOrder
return newOrder, nil
}
func parseOptStorage(d *caddyfile.Dispenser, _ any) (any, error) {
if !d.Next() { // consume option name
return nil, d.ArgErr()
}
if !d.Next() { // get storage module name
return nil, d.ArgErr()
}
modID := "caddy.storage." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
storage, ok := unm.(caddy.StorageConverter)
if !ok {
return nil, d.Errf("module %s is not a caddy.StorageConverter", modID)
}
return storage, nil
}
func parseOptDuration(d *caddyfile.Dispenser, _ any) (any, error) {
if !d.Next() { // consume option name
return nil, d.ArgErr()
}
if !d.Next() { // get duration value
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, err
}
return caddy.Duration(dur), nil
}
func parseOptACMEDNS(d *caddyfile.Dispenser, _ any) (any, error) {
if !d.Next() { // consume option name
return nil, d.ArgErr()
}
if !d.Next() { // get DNS module name
return nil, d.ArgErr()
}
modID := "dns.providers." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
prov, ok := unm.(certmagic.DNSProvider)
if !ok {
return nil, d.Errf("module %s (%T) is not a certmagic.DNSProvider", modID, unm)
}
return prov, nil
}
func parseOptACMEEAB(d *caddyfile.Dispenser, _ any) (any, error) {
eab := new(acme.EAB)
d.Next() // consume option name
if d.NextArg() {
return nil, d.ArgErr()
}
for d.NextBlock(0) {
switch d.Val() {
case "key_id":
if !d.NextArg() {
return nil, d.ArgErr()
}
eab.KeyID = d.Val()
case "mac_key":
if !d.NextArg() {
return nil, d.ArgErr()
}
eab.MACKey = d.Val()
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
}
}
return eab, nil
}
func parseOptCertIssuer(d *caddyfile.Dispenser, existing any) (any, error) {
d.Next() // consume option name
var issuers []certmagic.Issuer
if existing != nil {
issuers = existing.([]certmagic.Issuer)
}
// get issuer module name
if !d.Next() {
return nil, d.ArgErr()
}
modID := "tls.issuance." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
iss, ok := unm.(certmagic.Issuer)
if !ok {
return nil, d.Errf("module %s (%T) is not a certmagic.Issuer", modID, unm)
}
issuers = append(issuers, iss)
return issuers, nil
}
func parseOptSingleString(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
if !d.Next() {
return "", d.ArgErr()
}
val := d.Val()
if d.Next() {
return "", d.ArgErr()
}
return val, nil
}
func parseOptStringList(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
val := d.RemainingArgs()
if len(val) == 0 {
return "", d.ArgErr()
}
return val, nil
}
func parseOptAdmin(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
adminCfg := new(caddy.AdminConfig)
if d.NextArg() {
listenAddress := d.Val()
if listenAddress == "off" {
adminCfg.Disabled = true
if d.Next() { // Do not accept any remaining options including block
return nil, d.Err("No more option is allowed after turning off admin config")
}
} else {
adminCfg.Listen = listenAddress
if d.NextArg() { // At most 1 arg is allowed
return nil, d.ArgErr()
}
}
}
for d.NextBlock(0) {
switch d.Val() {
case "enforce_origin":
adminCfg.EnforceOrigin = true
case "origins":
adminCfg.Origins = d.RemainingArgs()
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
}
}
if adminCfg.Listen == "" && !adminCfg.Disabled {
adminCfg.Listen = caddy.DefaultAdminListen
}
return adminCfg, nil
}
func parseOptOnDemand(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
if d.NextArg() {
return nil, d.ArgErr()
}
var ond *caddytls.OnDemandConfig
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "ask":
if !d.NextArg() {
return nil, d.ArgErr()
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.PermissionRaw != nil {
return nil, d.Err("on-demand TLS permission module (or 'ask') already specified")
}
perm := caddytls.PermissionByHTTP{Endpoint: d.Val()}
ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", "http", nil)
case "permission":
if !d.NextArg() {
return nil, d.ArgErr()
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.PermissionRaw != nil {
return nil, d.Err("on-demand TLS permission module (or 'ask') already specified")
}
modName := d.Val()
modID := "tls.permission." + modName
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
perm, ok := unm.(caddytls.OnDemandPermission)
if !ok {
return nil, d.Errf("module %s (%T) is not an on-demand TLS permission module", modID, unm)
}
ond.PermissionRaw = caddyconfig.JSONModuleObject(perm, "module", modName, nil)
case "interval":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, err
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.RateLimit == nil {
ond.RateLimit = new(caddytls.RateLimit)
}
ond.RateLimit.Interval = caddy.Duration(dur)
case "burst":
if !d.NextArg() {
return nil, d.ArgErr()
}
burst, err := strconv.Atoi(d.Val())
if err != nil {
return nil, err
}
if ond == nil {
ond = new(caddytls.OnDemandConfig)
}
if ond.RateLimit == nil {
ond.RateLimit = new(caddytls.RateLimit)
}
ond.RateLimit.Burst = burst
default:
return nil, d.Errf("unrecognized parameter '%s'", d.Val())
}
}
if ond == nil {
return nil, d.Err("expected at least one config parameter for on_demand_tls")
}
return ond, nil
}
func parseOptPersistConfig(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
if !d.Next() {
return "", d.ArgErr()
}
val := d.Val()
if d.Next() {
return "", d.ArgErr()
}
if val != "off" {
return "", d.Errf("persist_config must be 'off'")
}
return val, nil
}
func parseOptAutoHTTPS(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
if !d.Next() {
return "", d.ArgErr()
}
val := d.Val()
if d.Next() {
return "", d.ArgErr()
}
if val != "off" && val != "disable_redirects" && val != "disable_certs" && val != "ignore_loaded_certs" {
return "", d.Errf("auto_https must be one of 'off', 'disable_redirects', 'disable_certs', or 'ignore_loaded_certs'")
}
return val, nil
}
func parseServerOptions(d *caddyfile.Dispenser, _ any) (any, error) {
return unmarshalCaddyfileServerOptions(d)
}
func parseOCSPStaplingOptions(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next() // consume option name
var val string
if !d.AllArgs(&val) {
return nil, d.ArgErr()
}
if val != "off" {
return nil, d.Errf("invalid argument '%s'", val)
}
return certmagic.OCSPConfig{
DisableStapling: val == "off",
}, nil
}
// parseLogOptions parses the global log option. Syntax:
//
// log [name] {
// output <writer_module> ...
// format <encoder_module> ...
// level <level>
// include <namespaces...>
// exclude <namespaces...>
// }
//
// When the name argument is unspecified, this directive modifies the default
// logger.
func parseLogOptions(d *caddyfile.Dispenser, existingVal any) (any, error) {
currentNames := make(map[string]struct{})
if existingVal != nil {
innerVals, ok := existingVal.([]ConfigValue)
if !ok {
return nil, d.Errf("existing log values of unexpected type: %T", existingVal)
}
for _, rawVal := range innerVals {
val, ok := rawVal.Value.(namedCustomLog)
if !ok {
return nil, d.Errf("existing log value of unexpected type: %T", existingVal)
}
currentNames[val.name] = struct{}{}
}
}
var warnings []caddyconfig.Warning
// Call out the same parser that handles server-specific log configuration.
configValues, err := parseLogHelper(
Helper{
Dispenser: d,
warnings: &warnings,
},
currentNames,
)
if err != nil {
return nil, err
}
if len(warnings) > 0 {
return nil, d.Errf("warnings found in parsing global log options: %+v", warnings)
}
return configValues, nil
}
func parseOptPreferredChains(d *caddyfile.Dispenser, _ any) (any, error) {
d.Next()
return caddytls.ParseCaddyfilePreferredChainsOptions(d)
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddypki"
)
func init() {
RegisterGlobalOption("pki", parsePKIApp)
}
// parsePKIApp parses the global log option. Syntax:
//
// pki {
// ca [<id>] {
// name <name>
// root_cn <name>
// intermediate_cn <name>
// intermediate_lifetime <duration>
// root {
// cert <path>
// key <path>
// format <format>
// }
// intermediate {
// cert <path>
// key <path>
// format <format>
// }
// }
// }
//
// When the CA ID is unspecified, 'local' is assumed.
func parsePKIApp(d *caddyfile.Dispenser, existingVal any) (any, error) {
d.Next() // consume app name
pki := &caddypki.PKI{
CAs: make(map[string]*caddypki.CA),
}
for d.NextBlock(0) {
switch d.Val() {
case "ca":
pkiCa := new(caddypki.CA)
if d.NextArg() {
pkiCa.ID = d.Val()
if d.NextArg() {
return nil, d.ArgErr()
}
}
if pkiCa.ID == "" {
pkiCa.ID = caddypki.DefaultCAID
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "name":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Name = d.Val()
case "root_cn":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.RootCommonName = d.Val()
case "intermediate_cn":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.IntermediateCommonName = d.Val()
case "intermediate_lifetime":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, err
}
pkiCa.IntermediateLifetime = caddy.Duration(dur)
case "root":
if pkiCa.Root == nil {
pkiCa.Root = new(caddypki.KeyPair)
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "cert":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.Certificate = d.Val()
case "key":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.PrivateKey = d.Val()
case "format":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Root.Format = d.Val()
default:
return nil, d.Errf("unrecognized pki ca root option '%s'", d.Val())
}
}
case "intermediate":
if pkiCa.Intermediate == nil {
pkiCa.Intermediate = new(caddypki.KeyPair)
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "cert":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.Certificate = d.Val()
case "key":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.PrivateKey = d.Val()
case "format":
if !d.NextArg() {
return nil, d.ArgErr()
}
pkiCa.Intermediate.Format = d.Val()
default:
return nil, d.Errf("unrecognized pki ca intermediate option '%s'", d.Val())
}
}
default:
return nil, d.Errf("unrecognized pki ca option '%s'", d.Val())
}
}
pki.CAs[pkiCa.ID] = pkiCa
default:
return nil, d.Errf("unrecognized pki option '%s'", d.Val())
}
}
return pki, nil
}
func (st ServerType) buildPKIApp(
pairings []sbAddrAssociation,
options map[string]any,
warnings []caddyconfig.Warning,
) (*caddypki.PKI, []caddyconfig.Warning, error) {
skipInstallTrust := false
if _, ok := options["skip_install_trust"]; ok {
skipInstallTrust = true
}
falseBool := false
// Load the PKI app configured via global options
var pkiApp *caddypki.PKI
unwrappedPki, ok := options["pki"].(*caddypki.PKI)
if ok {
pkiApp = unwrappedPki
} else {
pkiApp = &caddypki.PKI{CAs: make(map[string]*caddypki.CA)}
}
for _, ca := range pkiApp.CAs {
if skipInstallTrust {
ca.InstallTrust = &falseBool
}
pkiApp.CAs[ca.ID] = ca
}
// Add in the CAs configured via directives
for _, p := range pairings {
for _, sblock := range p.serverBlocks {
// find all the CAs that were defined and add them to the app config
// i.e. from any "acme_server" directives
for _, caCfgValue := range sblock.pile["pki.ca"] {
ca := caCfgValue.Value.(*caddypki.CA)
if skipInstallTrust {
ca.InstallTrust = &falseBool
}
// the CA might already exist from global options, so
// don't overwrite it in that case
if _, ok := pkiApp.CAs[ca.ID]; !ok {
pkiApp.CAs[ca.ID] = ca
}
}
}
}
// if there was no CAs defined in any of the servers,
// and we were requested to not install trust, then
// add one for the default/local CA to do so
if len(pkiApp.CAs) == 0 && skipInstallTrust {
ca := new(caddypki.CA)
ca.ID = caddypki.DefaultCAID
ca.InstallTrust = &falseBool
pkiApp.CAs[ca.ID] = ca
}
return pkiApp, warnings, nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"encoding/json"
"fmt"
"github.com/dustin/go-humanize"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// serverOptions collects server config overrides parsed from Caddyfile global options
type serverOptions struct {
// If set, will only apply these options to servers that contain a
// listener address that matches exactly. If empty, will apply to all
// servers that were not already matched by another serverOptions.
ListenerAddress string
// These will all map 1:1 to the caddyhttp.Server struct
Name string
ListenerWrappersRaw []json.RawMessage
ReadTimeout caddy.Duration
ReadHeaderTimeout caddy.Duration
WriteTimeout caddy.Duration
IdleTimeout caddy.Duration
KeepAliveInterval caddy.Duration
MaxHeaderBytes int
EnableFullDuplex bool
Protocols []string
StrictSNIHost *bool
TrustedProxiesRaw json.RawMessage
TrustedProxiesStrict int
ClientIPHeaders []string
ShouldLogCredentials bool
Metrics *caddyhttp.Metrics
Trace bool // TODO: EXPERIMENTAL
}
func unmarshalCaddyfileServerOptions(d *caddyfile.Dispenser) (any, error) {
d.Next() // consume option name
serverOpts := serverOptions{}
if d.NextArg() {
serverOpts.ListenerAddress = d.Val()
if d.NextArg() {
return nil, d.ArgErr()
}
}
for d.NextBlock(0) {
switch d.Val() {
case "name":
if serverOpts.ListenerAddress == "" {
return nil, d.Errf("cannot set a name for a server without a listener address")
}
if !d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.Name = d.Val()
case "listener_wrappers":
for nesting := d.Nesting(); d.NextBlock(nesting); {
modID := "caddy.listeners." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
listenerWrapper, ok := unm.(caddy.ListenerWrapper)
if !ok {
return nil, fmt.Errorf("module %s (%T) is not a listener wrapper", modID, unm)
}
jsonListenerWrapper := caddyconfig.JSONModuleObject(
listenerWrapper,
"wrapper",
listenerWrapper.(caddy.Module).CaddyModule().ID.Name(),
nil,
)
serverOpts.ListenerWrappersRaw = append(serverOpts.ListenerWrappersRaw, jsonListenerWrapper)
}
case "timeouts":
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "read_body":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing read_body timeout duration: %v", err)
}
serverOpts.ReadTimeout = caddy.Duration(dur)
case "read_header":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing read_header timeout duration: %v", err)
}
serverOpts.ReadHeaderTimeout = caddy.Duration(dur)
case "write":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing write timeout duration: %v", err)
}
serverOpts.WriteTimeout = caddy.Duration(dur)
case "idle":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing idle timeout duration: %v", err)
}
serverOpts.IdleTimeout = caddy.Duration(dur)
default:
return nil, d.Errf("unrecognized timeouts option '%s'", d.Val())
}
}
case "keepalive_interval":
if !d.NextArg() {
return nil, d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return nil, d.Errf("parsing keepalive interval duration: %v", err)
}
serverOpts.KeepAliveInterval = caddy.Duration(dur)
case "max_header_size":
var sizeStr string
if !d.AllArgs(&sizeStr) {
return nil, d.ArgErr()
}
size, err := humanize.ParseBytes(sizeStr)
if err != nil {
return nil, d.Errf("parsing max_header_size: %v", err)
}
serverOpts.MaxHeaderBytes = int(size)
case "enable_full_duplex":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.EnableFullDuplex = true
case "log_credentials":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.ShouldLogCredentials = true
case "protocols":
protos := d.RemainingArgs()
for _, proto := range protos {
if proto != "h1" && proto != "h2" && proto != "h2c" && proto != "h3" {
return nil, d.Errf("unknown protocol '%s': expected h1, h2, h2c, or h3", proto)
}
if sliceContains(serverOpts.Protocols, proto) {
return nil, d.Errf("protocol %s specified more than once", proto)
}
serverOpts.Protocols = append(serverOpts.Protocols, proto)
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
case "strict_sni_host":
if d.NextArg() && d.Val() != "insecure_off" && d.Val() != "on" {
return nil, d.Errf("strict_sni_host only supports 'on' or 'insecure_off', got '%s'", d.Val())
}
boolVal := true
if d.Val() == "insecure_off" {
boolVal = false
}
serverOpts.StrictSNIHost = &boolVal
case "trusted_proxies":
if !d.NextArg() {
return nil, d.Err("trusted_proxies expects an IP range source module name as its first argument")
}
modID := "http.ip_sources." + d.Val()
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return nil, err
}
source, ok := unm.(caddyhttp.IPRangeSource)
if !ok {
return nil, fmt.Errorf("module %s (%T) is not an IP range source", modID, unm)
}
jsonSource := caddyconfig.JSONModuleObject(
source,
"source",
source.(caddy.Module).CaddyModule().ID.Name(),
nil,
)
serverOpts.TrustedProxiesRaw = jsonSource
case "trusted_proxies_strict":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.TrustedProxiesStrict = 1
case "client_ip_headers":
headers := d.RemainingArgs()
for _, header := range headers {
if sliceContains(serverOpts.ClientIPHeaders, header) {
return nil, d.Errf("client IP header %s specified more than once", header)
}
serverOpts.ClientIPHeaders = append(serverOpts.ClientIPHeaders, header)
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
case "metrics":
if d.NextArg() {
return nil, d.ArgErr()
}
if nesting := d.Nesting(); d.NextBlock(nesting) {
return nil, d.ArgErr()
}
serverOpts.Metrics = new(caddyhttp.Metrics)
case "trace":
if d.NextArg() {
return nil, d.ArgErr()
}
serverOpts.Trace = true
default:
return nil, d.Errf("unrecognized servers option '%s'", d.Val())
}
}
return serverOpts, nil
}
// applyServerOptions sets the server options on the appropriate servers
func applyServerOptions(
servers map[string]*caddyhttp.Server,
options map[string]any,
_ *[]caddyconfig.Warning,
) error {
serverOpts, ok := options["servers"].([]serverOptions)
if !ok {
return nil
}
// check for duplicate names, which would clobber the config
existingNames := map[string]bool{}
for _, opts := range serverOpts {
if opts.Name == "" {
continue
}
if existingNames[opts.Name] {
return fmt.Errorf("cannot use duplicate server name '%s'", opts.Name)
}
existingNames[opts.Name] = true
}
// collect the server name overrides
nameReplacements := map[string]string{}
for key, server := range servers {
// find the options that apply to this server
opts := func() *serverOptions {
for _, entry := range serverOpts {
if entry.ListenerAddress == "" {
return &entry
}
for _, listener := range server.Listen {
if entry.ListenerAddress == listener {
return &entry
}
}
}
return nil
}()
// if none apply, then move to the next server
if opts == nil {
continue
}
// set all the options
server.ListenerWrappersRaw = opts.ListenerWrappersRaw
server.ReadTimeout = opts.ReadTimeout
server.ReadHeaderTimeout = opts.ReadHeaderTimeout
server.WriteTimeout = opts.WriteTimeout
server.IdleTimeout = opts.IdleTimeout
server.KeepAliveInterval = opts.KeepAliveInterval
server.MaxHeaderBytes = opts.MaxHeaderBytes
server.EnableFullDuplex = opts.EnableFullDuplex
server.Protocols = opts.Protocols
server.StrictSNIHost = opts.StrictSNIHost
server.TrustedProxiesRaw = opts.TrustedProxiesRaw
server.ClientIPHeaders = opts.ClientIPHeaders
server.TrustedProxiesStrict = opts.TrustedProxiesStrict
server.Metrics = opts.Metrics
if opts.ShouldLogCredentials {
if server.Logs == nil {
server.Logs = new(caddyhttp.ServerLogConfig)
}
server.Logs.ShouldLogCredentials = opts.ShouldLogCredentials
}
if opts.Trace {
// TODO: THIS IS EXPERIMENTAL (MAY 2024)
if server.Logs == nil {
server.Logs = new(caddyhttp.ServerLogConfig)
}
server.Logs.Trace = opts.Trace
}
if opts.Name != "" {
nameReplacements[key] = opts.Name
}
}
// rename the servers if marked to do so
for old, new := range nameReplacements {
servers[new] = servers[old]
delete(servers, old)
}
return nil
}
package httpcaddyfile
import (
"regexp"
"strings"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
type ComplexShorthandReplacer struct {
search *regexp.Regexp
replace string
}
type ShorthandReplacer struct {
complex []ComplexShorthandReplacer
simple *strings.Replacer
}
func NewShorthandReplacer() ShorthandReplacer {
// replace shorthand placeholders (which are convenient
// when writing a Caddyfile) with their actual placeholder
// identifiers or variable names
replacer := strings.NewReplacer(placeholderShorthands()...)
// these are placeholders that allow a user-defined final
// parameters, but we still want to provide a shorthand
// for those, so we use a regexp to replace
regexpReplacements := []ComplexShorthandReplacer{
{regexp.MustCompile(`{header\.([\w-]*)}`), "{http.request.header.$1}"},
{regexp.MustCompile(`{cookie\.([\w-]*)}`), "{http.request.cookie.$1}"},
{regexp.MustCompile(`{labels\.([\w-]*)}`), "{http.request.host.labels.$1}"},
{regexp.MustCompile(`{path\.([\w-]*)}`), "{http.request.uri.path.$1}"},
{regexp.MustCompile(`{file\.([\w-]*)}`), "{http.request.uri.path.file.$1}"},
{regexp.MustCompile(`{query\.([\w-]*)}`), "{http.request.uri.query.$1}"},
{regexp.MustCompile(`{re\.([\w-\.]*)}`), "{http.regexp.$1}"},
{regexp.MustCompile(`{vars\.([\w-]*)}`), "{http.vars.$1}"},
{regexp.MustCompile(`{rp\.([\w-\.]*)}`), "{http.reverse_proxy.$1}"},
{regexp.MustCompile(`{resp\.([\w-\.]*)}`), "{http.intercept.$1}"},
{regexp.MustCompile(`{err\.([\w-\.]*)}`), "{http.error.$1}"},
{regexp.MustCompile(`{file_match\.([\w-]*)}`), "{http.matchers.file.$1}"},
}
return ShorthandReplacer{
complex: regexpReplacements,
simple: replacer,
}
}
// placeholderShorthands returns a slice of old-new string pairs,
// where the left of the pair is a placeholder shorthand that may
// be used in the Caddyfile, and the right is the replacement.
func placeholderShorthands() []string {
return []string{
"{dir}", "{http.request.uri.path.dir}",
"{file}", "{http.request.uri.path.file}",
"{host}", "{http.request.host}",
"{hostport}", "{http.request.hostport}",
"{port}", "{http.request.port}",
"{method}", "{http.request.method}",
"{path}", "{http.request.uri.path}",
"{query}", "{http.request.uri.query}",
"{remote}", "{http.request.remote}",
"{remote_host}", "{http.request.remote.host}",
"{remote_port}", "{http.request.remote.port}",
"{scheme}", "{http.request.scheme}",
"{uri}", "{http.request.uri}",
"{uuid}", "{http.request.uuid}",
"{tls_cipher}", "{http.request.tls.cipher_suite}",
"{tls_version}", "{http.request.tls.version}",
"{tls_client_fingerprint}", "{http.request.tls.client.fingerprint}",
"{tls_client_issuer}", "{http.request.tls.client.issuer}",
"{tls_client_serial}", "{http.request.tls.client.serial}",
"{tls_client_subject}", "{http.request.tls.client.subject}",
"{tls_client_certificate_pem}", "{http.request.tls.client.certificate_pem}",
"{tls_client_certificate_der_base64}", "{http.request.tls.client.certificate_der_base64}",
"{upstream_hostport}", "{http.reverse_proxy.upstream.hostport}",
"{client_ip}", "{http.vars.client_ip}",
}
}
// ApplyToSegment replaces shorthand placeholder to its full placeholder, understandable by Caddy.
func (s ShorthandReplacer) ApplyToSegment(segment *caddyfile.Segment) {
if segment != nil {
for i := 0; i < len(*segment); i++ {
// simple string replacements
(*segment)[i].Text = s.simple.Replace((*segment)[i].Text)
// complex regexp replacements
for _, r := range s.complex {
(*segment)[i].Text = r.search.ReplaceAllString((*segment)[i].Text, r.replace)
}
}
}
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httpcaddyfile
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"github.com/caddyserver/certmagic"
"github.com/mholt/acmez/v2/acme"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
func (st ServerType) buildTLSApp(
pairings []sbAddrAssociation,
options map[string]any,
warnings []caddyconfig.Warning,
) (*caddytls.TLS, []caddyconfig.Warning, error) {
tlsApp := &caddytls.TLS{CertificatesRaw: make(caddy.ModuleMap)}
var certLoaders []caddytls.CertificateLoader
httpPort := strconv.Itoa(caddyhttp.DefaultHTTPPort)
if hp, ok := options["http_port"].(int); ok {
httpPort = strconv.Itoa(hp)
}
autoHTTPS := "on"
if ah, ok := options["auto_https"].(string); ok {
autoHTTPS = ah
}
// find all hosts that share a server block with a hostless
// key, so that they don't get forgotten/omitted by auto-HTTPS
// (since they won't appear in route matchers)
httpsHostsSharedWithHostlessKey := make(map[string]struct{})
if autoHTTPS != "off" {
for _, pair := range pairings {
for _, sb := range pair.serverBlocks {
for _, addr := range sb.keys {
if addr.Host == "" {
// this server block has a hostless key, now
// go through and add all the hosts to the set
for _, otherAddr := range sb.keys {
if otherAddr.Original == addr.Original {
continue
}
if otherAddr.Host != "" && otherAddr.Scheme != "http" && otherAddr.Port != httpPort {
httpsHostsSharedWithHostlessKey[otherAddr.Host] = struct{}{}
}
}
break
}
}
}
}
}
// a catch-all automation policy is used as a "default" for all subjects that
// don't have custom configuration explicitly associated with them; this
// is only to add if the global settings or defaults are non-empty
catchAllAP, err := newBaseAutomationPolicy(options, warnings, false)
if err != nil {
return nil, warnings, err
}
if catchAllAP != nil {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
}
for _, p := range pairings {
// avoid setting up TLS automation policies for a server that is HTTP-only
if !listenersUseAnyPortOtherThan(p.addresses, httpPort) {
continue
}
for _, sblock := range p.serverBlocks {
// check the scheme of all the site addresses,
// skip building AP if they all had http://
if sblock.isAllHTTP() {
continue
}
// get values that populate an automation policy for this block
ap, err := newBaseAutomationPolicy(options, warnings, true)
if err != nil {
return nil, warnings, err
}
sblockHosts := sblock.hostsFromKeys(false)
if len(sblockHosts) == 0 && catchAllAP != nil {
ap = catchAllAP
}
// on-demand tls
if _, ok := sblock.pile["tls.on_demand"]; ok {
ap.OnDemand = true
}
// reuse private keys tls
if _, ok := sblock.pile["tls.reuse_private_keys"]; ok {
ap.ReusePrivateKeys = true
}
if keyTypeVals, ok := sblock.pile["tls.key_type"]; ok {
ap.KeyType = keyTypeVals[0].Value.(string)
}
// certificate issuers
if issuerVals, ok := sblock.pile["tls.cert_issuer"]; ok {
var issuers []certmagic.Issuer
for _, issuerVal := range issuerVals {
issuers = append(issuers, issuerVal.Value.(certmagic.Issuer))
}
if ap == catchAllAP && !reflect.DeepEqual(ap.Issuers, issuers) {
// this more correctly implements an error check that was removed
// below; try it with this config:
//
// :443 {
// bind 127.0.0.1
// }
//
// :443 {
// bind ::1
// tls {
// issuer acme
// }
// }
return nil, warnings, fmt.Errorf("automation policy from site block is also default/catch-all policy because of key without hostname, and the two are in conflict: %#v != %#v", ap.Issuers, issuers)
}
ap.Issuers = issuers
}
// certificate managers
if certManagerVals, ok := sblock.pile["tls.cert_manager"]; ok {
for _, certManager := range certManagerVals {
certGetterName := certManager.Value.(caddy.Module).CaddyModule().ID.Name()
ap.ManagersRaw = append(ap.ManagersRaw, caddyconfig.JSONModuleObject(certManager.Value, "via", certGetterName, &warnings))
}
}
// custom bind host
for _, cfgVal := range sblock.pile["bind"] {
for _, iss := range ap.Issuers {
// if an issuer was already configured and it is NOT an ACME issuer,
// skip, since we intend to adjust only ACME issuers; ensure we
// include any issuer that embeds/wraps an underlying ACME issuer
var acmeIssuer *caddytls.ACMEIssuer
if acmeWrapper, ok := iss.(acmeCapable); ok {
acmeIssuer = acmeWrapper.GetACMEIssuer()
}
if acmeIssuer == nil {
continue
}
// proceed to configure the ACME issuer's bind host, without
// overwriting any existing settings
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.BindHost == "" {
// only binding to one host is supported
var bindHost string
if bindHosts, ok := cfgVal.Value.([]string); ok && len(bindHosts) > 0 {
bindHost = bindHosts[0]
}
acmeIssuer.Challenges.BindHost = bindHost
}
}
}
// we used to ensure this block is allowed to create an automation policy;
// doing so was forbidden if it has a key with no host (i.e. ":443")
// and if there is a different server block that also has a key with no
// host -- since a key with no host matches any host, we need its
// associated automation policy to have an empty Subjects list, i.e. no
// host filter, which is indistinguishable between the two server blocks
// because automation is not done in the context of a particular server...
// this is an example of a poor mapping from Caddyfile to JSON but that's
// the least-leaky abstraction I could figure out -- however, this check
// was preventing certain listeners, like those provided by plugins, from
// being used as desired (see the Tailscale listener plugin), so I removed
// the check: and I think since I originally wrote the check I added a new
// check above which *properly* detects this ambiguity without breaking the
// listener plugin; see the check above with a commented example config
if len(sblockHosts) == 0 && catchAllAP == nil {
// this server block has a key with no hosts, but there is not yet
// a catch-all automation policy (probably because no global options
// were set), so this one becomes it
catchAllAP = ap
}
// associate our new automation policy with this server block's hosts
ap.SubjectsRaw = sblock.hostsFromKeysNotHTTP(httpPort)
sort.Strings(ap.SubjectsRaw) // solely for deterministic test results
// if a combination of public and internal names were given
// for this same server block and no issuer was specified, we
// need to separate them out in the automation policies so
// that the internal names can use the internal issuer and
// the other names can use the default/public/ACME issuer
var ap2 *caddytls.AutomationPolicy
if len(ap.Issuers) == 0 {
var internal, external []string
for _, s := range ap.SubjectsRaw {
// do not create Issuers for Tailscale domains; they will be given a Manager instead
if isTailscaleDomain(s) {
continue
}
if !certmagic.SubjectQualifiesForCert(s) {
return nil, warnings, fmt.Errorf("subject does not qualify for certificate: '%s'", s)
}
// we don't use certmagic.SubjectQualifiesForPublicCert() because of one nuance:
// names like *.*.tld that may not qualify for a public certificate are actually
// fine when used with OnDemand, since OnDemand (currently) does not obtain
// wildcards (if it ever does, there will be a separate config option to enable
// it that we would need to check here) since the hostname is known at handshake;
// and it is unexpected to switch to internal issuer when the user wants to get
// regular certificates on-demand for a class of certs like *.*.tld.
if subjectQualifiesForPublicCert(ap, s) {
external = append(external, s)
} else {
internal = append(internal, s)
}
}
if len(external) > 0 && len(internal) > 0 {
ap.SubjectsRaw = external
apCopy := *ap
ap2 = &apCopy
ap2.SubjectsRaw = internal
ap2.IssuersRaw = []json.RawMessage{caddyconfig.JSONModuleObject(caddytls.InternalIssuer{}, "module", "internal", &warnings)}
}
}
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap)
if ap2 != nil {
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, ap2)
}
// certificate loaders
if clVals, ok := sblock.pile["tls.cert_loader"]; ok {
for _, clVal := range clVals {
certLoaders = append(certLoaders, clVal.Value.(caddytls.CertificateLoader))
}
}
}
}
// group certificate loaders by module name, then add to config
if len(certLoaders) > 0 {
loadersByName := make(map[string]caddytls.CertificateLoader)
for _, cl := range certLoaders {
name := caddy.GetModuleName(cl)
// ugh... technically, we may have multiple FileLoader and FolderLoader
// modules (because the tls directive returns one per occurrence), but
// the config structure expects only one instance of each kind of loader
// module, so we have to combine them... instead of enumerating each
// possible cert loader module in a type switch, we can use reflection,
// which works on any cert loaders that are slice types
if reflect.TypeOf(cl).Kind() == reflect.Slice {
combined := reflect.ValueOf(loadersByName[name])
if !combined.IsValid() {
combined = reflect.New(reflect.TypeOf(cl)).Elem()
}
clVal := reflect.ValueOf(cl)
for i := 0; i < clVal.Len(); i++ {
combined = reflect.Append(combined, clVal.Index(i))
}
loadersByName[name] = combined.Interface().(caddytls.CertificateLoader)
}
}
for certLoaderName, loaders := range loadersByName {
tlsApp.CertificatesRaw[certLoaderName] = caddyconfig.JSON(loaders, &warnings)
}
}
// set any of the on-demand options, for if/when on-demand TLS is enabled
if onDemand, ok := options["on_demand_tls"].(*caddytls.OnDemandConfig); ok {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.OnDemand = onDemand
}
// set the storage clean interval if configured
if storageCleanInterval, ok := options["storage_clean_interval"].(caddy.Duration); ok {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.StorageCleanInterval = storageCleanInterval
}
// set the expired certificates renew interval if configured
if renewCheckInterval, ok := options["renew_interval"].(caddy.Duration); ok {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.RenewCheckInterval = renewCheckInterval
}
// set the OCSP check interval if configured
if ocspCheckInterval, ok := options["ocsp_interval"].(caddy.Duration); ok {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.OCSPCheckInterval = ocspCheckInterval
}
// set whether OCSP stapling should be disabled for manually-managed certificates
if ocspConfig, ok := options["ocsp_stapling"].(certmagic.OCSPConfig); ok {
tlsApp.DisableOCSPStapling = ocspConfig.DisableStapling
}
// if any hostnames appear on the same server block as a key with
// no host, they will not be used with route matchers because the
// hostless key matches all hosts, therefore, it wouldn't be
// considered for auto-HTTPS, so we need to make sure those hosts
// are manually considered for managed certificates; we also need
// to make sure that any of these names which are internal-only
// get internal certificates by default rather than ACME
var al caddytls.AutomateLoader
internalAP := &caddytls.AutomationPolicy{
IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)},
}
if autoHTTPS != "off" && autoHTTPS != "disable_certs" {
for h := range httpsHostsSharedWithHostlessKey {
al = append(al, h)
if !certmagic.SubjectQualifiesForPublicCert(h) {
internalAP.SubjectsRaw = append(internalAP.SubjectsRaw, h)
}
}
}
if len(al) > 0 {
tlsApp.CertificatesRaw["automate"] = caddyconfig.JSON(al, &warnings)
}
if len(internalAP.SubjectsRaw) > 0 {
if tlsApp.Automation == nil {
tlsApp.Automation = new(caddytls.AutomationConfig)
}
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, internalAP)
}
// if there are any global options set for issuers (ACME ones in particular), make sure they
// take effect in every automation policy that does not have any issuers
if tlsApp.Automation != nil {
globalEmail := options["email"]
globalACMECA := options["acme_ca"]
globalACMECARoot := options["acme_ca_root"]
globalACMEDNS := options["acme_dns"]
globalACMEEAB := options["acme_eab"]
globalPreferredChains := options["preferred_chains"]
hasGlobalACMEDefaults := globalEmail != nil || globalACMECA != nil || globalACMECARoot != nil || globalACMEDNS != nil || globalACMEEAB != nil || globalPreferredChains != nil
if hasGlobalACMEDefaults {
for i := 0; i < len(tlsApp.Automation.Policies); i++ {
ap := tlsApp.Automation.Policies[i]
if len(ap.Issuers) == 0 && automationPolicyHasAllPublicNames(ap) {
// for public names, create default issuers which will later be filled in with configured global defaults
// (internal names will implicitly use the internal issuer at auto-https time)
emailStr, _ := globalEmail.(string)
ap.Issuers = caddytls.DefaultIssuers(emailStr)
// if a specific endpoint is configured, can't use multiple default issuers
if globalACMECA != nil {
ap.Issuers = []certmagic.Issuer{new(caddytls.ACMEIssuer)}
}
}
}
}
}
// finalize and verify policies; do cleanup
if tlsApp.Automation != nil {
for i, ap := range tlsApp.Automation.Policies {
// ensure all issuers have global defaults filled in
for j, issuer := range ap.Issuers {
err := fillInGlobalACMEDefaults(issuer, options)
if err != nil {
return nil, warnings, fmt.Errorf("filling in global issuer defaults for AP %d, issuer %d: %v", i, j, err)
}
}
// encode all issuer values we created, so they will be rendered in the output
if len(ap.Issuers) > 0 && ap.IssuersRaw == nil {
for _, iss := range ap.Issuers {
issuerName := iss.(caddy.Module).CaddyModule().ID.Name()
ap.IssuersRaw = append(ap.IssuersRaw, caddyconfig.JSONModuleObject(iss, "module", issuerName, &warnings))
}
}
}
// consolidate automation policies that are the exact same
tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
// ensure automation policies don't overlap subjects (this should be
// an error at provision-time as well, but catch it in the adapt phase
// for convenience)
automationHostSet := make(map[string]struct{})
for _, ap := range tlsApp.Automation.Policies {
for _, s := range ap.SubjectsRaw {
if _, ok := automationHostSet[s]; ok {
return nil, warnings, fmt.Errorf("hostname appears in more than one automation policy, making certificate management ambiguous: %s", s)
}
automationHostSet[s] = struct{}{}
}
}
// if nothing remains, remove any excess values to clean up the resulting config
if len(tlsApp.Automation.Policies) == 0 {
tlsApp.Automation.Policies = nil
}
if reflect.DeepEqual(tlsApp.Automation, new(caddytls.AutomationConfig)) {
tlsApp.Automation = nil
}
}
return tlsApp, warnings, nil
}
type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
func fillInGlobalACMEDefaults(issuer certmagic.Issuer, options map[string]any) error {
acmeWrapper, ok := issuer.(acmeCapable)
if !ok {
return nil
}
acmeIssuer := acmeWrapper.GetACMEIssuer()
if acmeIssuer == nil {
return nil
}
globalEmail := options["email"]
globalACMECA := options["acme_ca"]
globalACMECARoot := options["acme_ca_root"]
globalACMEDNS := options["acme_dns"]
globalACMEEAB := options["acme_eab"]
globalPreferredChains := options["preferred_chains"]
globalCertLifetime := options["cert_lifetime"]
globalHTTPPort, globalHTTPSPort := options["http_port"], options["https_port"]
if globalEmail != nil && acmeIssuer.Email == "" {
acmeIssuer.Email = globalEmail.(string)
}
if globalACMECA != nil && acmeIssuer.CA == "" {
acmeIssuer.CA = globalACMECA.(string)
}
if globalACMECARoot != nil && !sliceContains(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string)) {
acmeIssuer.TrustedRootsPEMFiles = append(acmeIssuer.TrustedRootsPEMFiles, globalACMECARoot.(string))
}
if globalACMEDNS != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.DNS == nil) {
acmeIssuer.Challenges = &caddytls.ChallengesConfig{
DNS: &caddytls.DNSChallengeConfig{
ProviderRaw: caddyconfig.JSONModuleObject(globalACMEDNS, "name", globalACMEDNS.(caddy.Module).CaddyModule().ID.Name(), nil),
},
}
}
if globalACMEEAB != nil && acmeIssuer.ExternalAccount == nil {
acmeIssuer.ExternalAccount = globalACMEEAB.(*acme.EAB)
}
if globalPreferredChains != nil && acmeIssuer.PreferredChains == nil {
acmeIssuer.PreferredChains = globalPreferredChains.(*caddytls.ChainPreference)
}
if globalHTTPPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.HTTP == nil || acmeIssuer.Challenges.HTTP.AlternatePort == 0) {
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.HTTP == nil {
acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig)
}
acmeIssuer.Challenges.HTTP.AlternatePort = globalHTTPPort.(int)
}
if globalHTTPSPort != nil && (acmeIssuer.Challenges == nil || acmeIssuer.Challenges.TLSALPN == nil || acmeIssuer.Challenges.TLSALPN.AlternatePort == 0) {
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
if acmeIssuer.Challenges.TLSALPN == nil {
acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig)
}
acmeIssuer.Challenges.TLSALPN.AlternatePort = globalHTTPSPort.(int)
}
if globalCertLifetime != nil && acmeIssuer.CertificateLifetime == 0 {
acmeIssuer.CertificateLifetime = globalCertLifetime.(caddy.Duration)
}
return nil
}
// newBaseAutomationPolicy returns a new TLS automation policy that gets
// its values from the global options map. It should be used as the base
// for any other automation policies. A nil policy (and no error) will be
// returned if there are no default/global options. However, if always is
// true, a non-nil value will always be returned (unless there is an error).
func newBaseAutomationPolicy(
options map[string]any,
_ []caddyconfig.Warning,
always bool,
) (*caddytls.AutomationPolicy, error) {
issuers, hasIssuers := options["cert_issuer"]
_, hasLocalCerts := options["local_certs"]
keyType, hasKeyType := options["key_type"]
ocspStapling, hasOCSPStapling := options["ocsp_stapling"]
hasGlobalAutomationOpts := hasIssuers || hasLocalCerts || hasKeyType || hasOCSPStapling
// if there are no global options related to automation policies
// set, then we can just return right away
if !hasGlobalAutomationOpts {
if always {
return new(caddytls.AutomationPolicy), nil
}
return nil, nil
}
ap := new(caddytls.AutomationPolicy)
if hasKeyType {
ap.KeyType = keyType.(string)
}
if hasIssuers && hasLocalCerts {
return nil, fmt.Errorf("global options are ambiguous: local_certs is confusing when combined with cert_issuer, because local_certs is also a specific kind of issuer")
}
if hasIssuers {
ap.Issuers = issuers.([]certmagic.Issuer)
} else if hasLocalCerts {
ap.Issuers = []certmagic.Issuer{new(caddytls.InternalIssuer)}
}
if hasOCSPStapling {
ocspConfig := ocspStapling.(certmagic.OCSPConfig)
ap.DisableOCSPStapling = ocspConfig.DisableStapling
ap.OCSPOverrides = ocspConfig.ResponderOverrides
}
return ap, nil
}
// consolidateAutomationPolicies combines automation policies that are the same,
// for a cleaner overall output.
func consolidateAutomationPolicies(aps []*caddytls.AutomationPolicy) []*caddytls.AutomationPolicy {
// sort from most specific to least specific; we depend on this ordering
sort.SliceStable(aps, func(i, j int) bool {
if automationPolicyIsSubset(aps[i], aps[j]) {
return true
}
if automationPolicyIsSubset(aps[j], aps[i]) {
return false
}
return len(aps[i].SubjectsRaw) > len(aps[j].SubjectsRaw)
})
emptyAPCount := 0
origLenAPs := len(aps)
// compute the number of empty policies (disregarding subjects) - see #4128
emptyAP := new(caddytls.AutomationPolicy)
for i := 0; i < len(aps); i++ {
emptyAP.SubjectsRaw = aps[i].SubjectsRaw
if reflect.DeepEqual(aps[i], emptyAP) {
emptyAPCount++
if !automationPolicyHasAllPublicNames(aps[i]) {
// if this automation policy has internal names, we might as well remove it
// so auto-https can implicitly use the internal issuer
aps = append(aps[:i], aps[i+1:]...)
i--
}
}
}
// If all policies are empty, we can return nil, as there is no need to set any policy
if emptyAPCount == origLenAPs {
return nil
}
// remove or combine duplicate policies
outer:
for i := 0; i < len(aps); i++ {
// compare only with next policies; we sorted by specificity so we must not delete earlier policies
for j := i + 1; j < len(aps); j++ {
// if they're exactly equal in every way, just keep one of them
if reflect.DeepEqual(aps[i], aps[j]) {
aps = append(aps[:j], aps[j+1:]...)
// must re-evaluate current i against next j; can't skip it!
// even if i decrements to -1, will be incremented to 0 immediately
i--
continue outer
}
// if the policy is the same, we can keep just one, but we have
// to be careful which one we keep; if only one has any hostnames
// defined, then we need to keep the one without any hostnames,
// otherwise the one without any subjects (a catch-all) would be
// eaten up by the one with subjects; and if both have subjects, we
// need to combine their lists
if reflect.DeepEqual(aps[i].IssuersRaw, aps[j].IssuersRaw) &&
reflect.DeepEqual(aps[i].ManagersRaw, aps[j].ManagersRaw) &&
bytes.Equal(aps[i].StorageRaw, aps[j].StorageRaw) &&
aps[i].MustStaple == aps[j].MustStaple &&
aps[i].KeyType == aps[j].KeyType &&
aps[i].OnDemand == aps[j].OnDemand &&
aps[i].ReusePrivateKeys == aps[j].ReusePrivateKeys &&
aps[i].RenewalWindowRatio == aps[j].RenewalWindowRatio {
if len(aps[i].SubjectsRaw) > 0 && len(aps[j].SubjectsRaw) == 0 {
// later policy (at j) has no subjects ("catch-all"), so we can
// remove the identical-but-more-specific policy that comes first
// AS LONG AS it is not shadowed by another policy before it; e.g.
// if policy i is for example.com, policy i+1 is '*.com', and policy
// j is catch-all, we cannot remove policy i because that would
// cause example.com to be served by the less specific policy for
// '*.com', which might be different (yes we've seen this happen)
if automationPolicyShadows(i, aps) >= j {
aps = append(aps[:i], aps[i+1:]...)
i--
continue outer
}
} else {
// avoid repeated subjects
for _, subj := range aps[j].SubjectsRaw {
if !sliceContains(aps[i].SubjectsRaw, subj) {
aps[i].SubjectsRaw = append(aps[i].SubjectsRaw, subj)
}
}
aps = append(aps[:j], aps[j+1:]...)
j--
}
}
}
}
return aps
}
// automationPolicyIsSubset returns true if a's subjects are a subset
// of b's subjects.
func automationPolicyIsSubset(a, b *caddytls.AutomationPolicy) bool {
if len(b.SubjectsRaw) == 0 {
return true
}
if len(a.SubjectsRaw) == 0 {
return false
}
for _, aSubj := range a.SubjectsRaw {
var inSuperset bool
for _, bSubj := range b.SubjectsRaw {
if certmagic.MatchWildcard(aSubj, bSubj) {
inSuperset = true
break
}
}
if !inSuperset {
return false
}
}
return true
}
// automationPolicyShadows returns the index of a policy that aps[i] shadows;
// in other words, for all policies after position i, if that policy covers
// the same subjects but is less specific, that policy's position is returned,
// or -1 if no shadowing is found. For example, if policy i is for
// "foo.example.com" and policy i+2 is for "*.example.com", then i+2 will be
// returned, since that policy is shadowed by i, which is in front.
func automationPolicyShadows(i int, aps []*caddytls.AutomationPolicy) int {
for j := i + 1; j < len(aps); j++ {
if automationPolicyIsSubset(aps[i], aps[j]) {
return j
}
}
return -1
}
// subjectQualifiesForPublicCert is like certmagic.SubjectQualifiesForPublicCert() except
// that this allows domains with multiple wildcard levels like '*.*.example.com' to qualify
// if the automation policy has OnDemand enabled (i.e. this function is more lenient).
//
// IP subjects are considered as non-qualifying for public certs. Technically, there are
// now public ACME CAs as well as non-ACME CAs that issue IP certificates. But this function
// is used solely for implicit automation (defaults), where it gets really complicated to
// keep track of which issuers support IP certificates in which circumstances. Currently,
// issuers that support IP certificates are very few, and all require some sort of config
// from the user anyway (such as an account credential). Since we cannot implicitly and
// automatically get public IP certs without configuration from the user, we treat IPs as
// not qualifying for public certificates. Users should expressly configure an issuer
// that supports IP certs for that purpose.
func subjectQualifiesForPublicCert(ap *caddytls.AutomationPolicy, subj string) bool {
return !certmagic.SubjectIsIP(subj) &&
!certmagic.SubjectIsInternal(subj) &&
(strings.Count(subj, "*.") < 2 || ap.OnDemand)
}
// automationPolicyHasAllPublicNames returns true if all the names on the policy
// do NOT qualify for public certs OR are tailscale domains.
func automationPolicyHasAllPublicNames(ap *caddytls.AutomationPolicy) bool {
for _, subj := range ap.SubjectsRaw {
if !subjectQualifiesForPublicCert(ap, subj) || isTailscaleDomain(subj) {
return false
}
}
return true
}
func isTailscaleDomain(name string) bool {
return strings.HasSuffix(strings.ToLower(name), ".ts.net")
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyconfig
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io"
"net/http"
"os"
"time"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(HTTPLoader{})
}
// HTTPLoader can load Caddy configs over HTTP(S).
//
// If the response is not a JSON config, a config adapter must be specified
// either in the loader config (`adapter`), or in the Content-Type HTTP header
// returned in the HTTP response from the server. The Content-Type header is
// read just like the admin API's `/load` endpoint. Uf you don't have control
// over the HTTP server (but can still trust its response), you can override
// the Content-Type header by setting the `adapter` property in this config.
type HTTPLoader struct {
// The method for the request. Default: GET
Method string `json:"method,omitempty"`
// The URL of the request.
URL string `json:"url,omitempty"`
// HTTP headers to add to the request.
Headers http.Header `json:"header,omitempty"`
// Maximum time allowed for a complete connection and request.
Timeout caddy.Duration `json:"timeout,omitempty"`
// The name of the config adapter to use, if any. Only needed
// if the HTTP response is not a JSON config and if the server's
// Content-Type header is missing or incorrect.
Adapter string `json:"adapter,omitempty"`
TLS *struct {
// Present this instance's managed remote identity credentials to the server.
UseServerIdentity bool `json:"use_server_identity,omitempty"`
// PEM-encoded client certificate filename to present to the server.
ClientCertificateFile string `json:"client_certificate_file,omitempty"`
// PEM-encoded key to use with the client certificate.
ClientCertificateKeyFile string `json:"client_certificate_key_file,omitempty"`
// List of PEM-encoded CA certificate files to add to the same trust
// store as RootCAPool (or root_ca_pool in the JSON).
RootCAPEMFiles []string `json:"root_ca_pem_files,omitempty"`
} `json:"tls,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (HTTPLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.config_loaders.http",
New: func() caddy.Module { return new(HTTPLoader) },
}
}
// LoadConfig loads a Caddy config.
func (hl HTTPLoader) LoadConfig(ctx caddy.Context) ([]byte, error) {
repl := caddy.NewReplacer()
client, err := hl.makeClient(ctx)
if err != nil {
return nil, err
}
method := repl.ReplaceAll(hl.Method, "")
if method == "" {
method = http.MethodGet
}
url := repl.ReplaceAll(hl.URL, "")
req, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
for key, vals := range hl.Headers {
for _, val := range vals {
req.Header.Add(repl.ReplaceAll(key, ""), repl.ReplaceKnown(val, ""))
}
}
resp, err := doHttpCallWithRetries(ctx, client, req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("server responded with HTTP %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
// adapt the config based on either manually-configured adapter or server's response header
ct := resp.Header.Get("Content-Type")
if hl.Adapter != "" {
ct = "text/" + hl.Adapter
}
result, warnings, err := adaptByContentType(ct, body)
if err != nil {
return nil, err
}
for _, warn := range warnings {
ctx.Logger().Warn(warn.String())
}
return result, nil
}
func attemptHttpCall(client *http.Client, request *http.Request) (*http.Response, error) {
resp, err := client.Do(request)
if err != nil {
return nil, fmt.Errorf("problem calling http loader url: %v", err)
} else if resp.StatusCode < 200 || resp.StatusCode > 499 {
resp.Body.Close()
return nil, fmt.Errorf("bad response status code from http loader url: %v", resp.StatusCode)
}
return resp, nil
}
func doHttpCallWithRetries(ctx caddy.Context, client *http.Client, request *http.Request) (*http.Response, error) {
var resp *http.Response
var err error
const maxAttempts = 10
for i := 0; i < maxAttempts; i++ {
resp, err = attemptHttpCall(client, request)
if err != nil && i < maxAttempts-1 {
select {
case <-time.After(time.Millisecond * 500):
case <-ctx.Done():
return resp, ctx.Err()
}
} else {
break
}
}
return resp, err
}
func (hl HTTPLoader) makeClient(ctx caddy.Context) (*http.Client, error) {
client := &http.Client{
Timeout: time.Duration(hl.Timeout),
}
if hl.TLS != nil {
var tlsConfig *tls.Config
// client authentication
if hl.TLS.UseServerIdentity {
certs, err := ctx.IdentityCredentials(ctx.Logger())
if err != nil {
return nil, fmt.Errorf("getting server identity credentials: %v", err)
}
// See https://github.com/securego/gosec/issues/1054#issuecomment-2072235199
//nolint:gosec
tlsConfig = &tls.Config{Certificates: certs}
} else if hl.TLS.ClientCertificateFile != "" && hl.TLS.ClientCertificateKeyFile != "" {
cert, err := tls.LoadX509KeyPair(hl.TLS.ClientCertificateFile, hl.TLS.ClientCertificateKeyFile)
if err != nil {
return nil, err
}
//nolint:gosec
tlsConfig = &tls.Config{Certificates: []tls.Certificate{cert}}
}
// trusted server certs
if len(hl.TLS.RootCAPEMFiles) > 0 {
rootPool := x509.NewCertPool()
for _, pemFile := range hl.TLS.RootCAPEMFiles {
pemData, err := os.ReadFile(pemFile)
if err != nil {
return nil, fmt.Errorf("failed reading ca cert: %v", err)
}
rootPool.AppendCertsFromPEM(pemData)
}
if tlsConfig == nil {
tlsConfig = new(tls.Config)
}
tlsConfig.RootCAs = rootPool
}
client.Transport = &http.Transport{TLSClientConfig: tlsConfig}
}
return client, nil
}
var _ caddy.ConfigLoader = (*HTTPLoader)(nil)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyconfig
import (
"bytes"
"encoding/json"
"fmt"
"io"
"mime"
"net/http"
"strings"
"sync"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(adminLoad{})
}
// adminLoad is a module that provides the /load endpoint
// for the Caddy admin API. The only reason it's not baked
// into the caddy package directly is because of the import
// of the caddyconfig package for its GetAdapter function.
// If the caddy package depends on the caddyconfig package,
// then the caddyconfig package will not be able to import
// the caddy package, and it can more easily cause backward
// edges in the dependency tree (i.e. import cycle).
// Fortunately, the admin API has first-class support for
// adding endpoints from modules.
type adminLoad struct{}
// CaddyModule returns the Caddy module information.
func (adminLoad) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "admin.api.load",
New: func() caddy.Module { return new(adminLoad) },
}
}
// Routes returns a route for the /load endpoint.
func (al adminLoad) Routes() []caddy.AdminRoute {
return []caddy.AdminRoute{
{
Pattern: "/load",
Handler: caddy.AdminHandlerFunc(al.handleLoad),
},
{
Pattern: "/adapt",
Handler: caddy.AdminHandlerFunc(al.handleAdapt),
},
}
}
// handleLoad replaces the entire current configuration with
// a new one provided in the response body. It supports config
// adapters through the use of the Content-Type header. A
// config that is identical to the currently-running config
// will be a no-op unless Cache-Control: must-revalidate is set.
func (adminLoad) handleLoad(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodPost {
return caddy.APIError{
HTTPStatus: http.StatusMethodNotAllowed,
Err: fmt.Errorf("method not allowed"),
}
}
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
_, err := io.Copy(buf, r.Body)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("reading request body: %v", err),
}
}
body := buf.Bytes()
// if the config is formatted other than Caddy's native
// JSON, we need to adapt it before loading it
if ctHeader := r.Header.Get("Content-Type"); ctHeader != "" {
result, warnings, err := adaptByContentType(ctHeader, body)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: err,
}
}
if len(warnings) > 0 {
respBody, err := json.Marshal(warnings)
if err != nil {
caddy.Log().Named("admin.api.load").Error(err.Error())
}
_, _ = w.Write(respBody)
}
body = result
}
forceReload := r.Header.Get("Cache-Control") == "must-revalidate"
err = caddy.Load(body, forceReload)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("loading config: %v", err),
}
}
caddy.Log().Named("admin.api").Info("load complete")
return nil
}
// handleAdapt adapts the given Caddy config to JSON and responds with the result.
func (adminLoad) handleAdapt(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodPost {
return caddy.APIError{
HTTPStatus: http.StatusMethodNotAllowed,
Err: fmt.Errorf("method not allowed"),
}
}
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
_, err := io.Copy(buf, r.Body)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("reading request body: %v", err),
}
}
result, warnings, err := adaptByContentType(r.Header.Get("Content-Type"), buf.Bytes())
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: err,
}
}
out := struct {
Warnings []Warning `json:"warnings,omitempty"`
Result json.RawMessage `json:"result"`
}{
Warnings: warnings,
Result: result,
}
w.Header().Set("Content-Type", "application/json")
return json.NewEncoder(w).Encode(out)
}
// adaptByContentType adapts body to Caddy JSON using the adapter specified by contentType.
// If contentType is empty or ends with "/json", the input will be returned, as a no-op.
func adaptByContentType(contentType string, body []byte) ([]byte, []Warning, error) {
// assume JSON as the default
if contentType == "" {
return body, nil, nil
}
ct, _, err := mime.ParseMediaType(contentType)
if err != nil {
return nil, nil, caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("invalid Content-Type: %v", err),
}
}
// if already JSON, no need to adapt
if strings.HasSuffix(ct, "/json") {
return body, nil, nil
}
// adapter name should be suffix of MIME type
_, adapterName, slashFound := strings.Cut(ct, "/")
if !slashFound {
return nil, nil, fmt.Errorf("malformed Content-Type")
}
cfgAdapter := GetAdapter(adapterName)
if cfgAdapter == nil {
return nil, nil, fmt.Errorf("unrecognized config adapter '%s'", adapterName)
}
result, warnings, err := cfgAdapter.Adapt(body, nil)
if err != nil {
return nil, nil, fmt.Errorf("adapting config using %s adapter: %v", adapterName, err)
}
return result, warnings, nil
}
var bufPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
package caddycmd
import (
"fmt"
"github.com/spf13/cobra"
"github.com/caddyserver/caddy/v2"
)
var rootCmd = &cobra.Command{
Use: "caddy",
Long: `Caddy is an extensible server platform written in Go.
At its core, Caddy merely manages configuration. Modules are plugged
in statically at compile-time to provide useful functionality. Caddy's
standard distribution includes common modules to serve HTTP, TLS,
and PKI applications, including the automation of certificates.
To run Caddy, use:
- 'caddy run' to run Caddy in the foreground (recommended).
- 'caddy start' to start Caddy in the background; only do this
if you will be keeping the terminal window open until you run
'caddy stop' to close the server.
When Caddy is started, it opens a locally-bound administrative socket
to which configuration can be POSTed via a restful HTTP API (see
https://caddyserver.com/docs/api).
Caddy's native configuration format is JSON. However, config adapters
can be used to convert other config formats to JSON when Caddy receives
its configuration. The Caddyfile is a built-in config adapter that is
popular for hand-written configurations due to its straightforward
syntax (see https://caddyserver.com/docs/caddyfile). Many third-party
adapters are available (see https://caddyserver.com/docs/config-adapters).
Use 'caddy adapt' to see how a config translates to JSON.
For convenience, the CLI can act as an HTTP client to give Caddy its
initial configuration for you. If a file named Caddyfile is in the
current working directory, it will do this automatically. Otherwise,
you can use the --config flag to specify the path to a config file.
Some special-purpose subcommands build and load a configuration file
for you directly from command line input; for example:
- caddy file-server
- caddy reverse-proxy
- caddy respond
These commands disable the administration endpoint because their
configuration is specified solely on the command line.
In general, the most common way to run Caddy is simply:
$ caddy run
Or, with a configuration file:
$ caddy run --config caddy.json
If running interactively in a terminal, running Caddy in the
background may be more convenient:
$ caddy start
...
$ caddy stop
This allows you to run other commands while Caddy stays running.
Be sure to stop Caddy before you close the terminal!
Depending on the system, Caddy may need permission to bind to low
ports. One way to do this on Linux is to use setcap:
$ sudo setcap cap_net_bind_service=+ep $(which caddy)
Remember to run that command again after replacing the binary.
See the Caddy website for tutorials, configuration structure,
syntax, and module documentation: https://caddyserver.com/docs/
Custom Caddy builds are available on the Caddy download page at:
https://caddyserver.com/download
The xcaddy command can be used to build Caddy from source with or
without additional plugins: https://github.com/caddyserver/xcaddy
Where possible, Caddy should be installed using officially-supported
package installers: https://caddyserver.com/docs/install
Instructions for running Caddy in production are also available:
https://caddyserver.com/docs/running
`,
Example: ` $ caddy run
$ caddy run --config caddy.json
$ caddy reload --config caddy.json
$ caddy stop`,
// kind of annoying to have all the help text printed out if
// caddy has an error provisioning its modules, for instance...
SilenceUsage: true,
Version: onlyVersionText(),
}
const fullDocsFooter = `Full documentation is available at:
https://caddyserver.com/docs/command-line`
func init() {
rootCmd.SetVersionTemplate("{{.Version}}\n")
rootCmd.SetHelpTemplate(rootCmd.HelpTemplate() + "\n" + fullDocsFooter + "\n")
}
func onlyVersionText() string {
_, f := caddy.Version()
return f
}
func caddyCmdToCobra(caddyCmd Command) *cobra.Command {
cmd := &cobra.Command{
Use: caddyCmd.Name + " " + caddyCmd.Usage,
Short: caddyCmd.Short,
Long: caddyCmd.Long,
}
if caddyCmd.CobraFunc != nil {
caddyCmd.CobraFunc(cmd)
} else {
cmd.RunE = WrapCommandFuncForCobra(caddyCmd.Func)
cmd.Flags().AddGoFlagSet(caddyCmd.Flags)
}
return cmd
}
// WrapCommandFuncForCobra wraps a Caddy CommandFunc for use
// in a cobra command's RunE field.
func WrapCommandFuncForCobra(f CommandFunc) func(cmd *cobra.Command, _ []string) error {
return func(cmd *cobra.Command, _ []string) error {
status, err := f(Flags{cmd.Flags()})
if status > 1 {
cmd.SilenceErrors = true
return &exitError{ExitCode: status, Err: err}
}
return err
}
}
// exitError carries the exit code from CommandFunc to Main()
type exitError struct {
ExitCode int
Err error
}
func (e *exitError) Error() string {
if e.Err == nil {
return fmt.Sprintf("exiting with status %d", e.ExitCode)
}
return e.Err.Error()
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"io/fs"
"log"
"net"
"net/http"
"os"
"os/exec"
"runtime"
"runtime/debug"
"strings"
"github.com/aryann/difflib"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/internal"
)
func cmdStart(fl Flags) (int, error) {
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
pidfileFlag := fl.String("pidfile")
watchFlag := fl.Bool("watch")
var err error
var envfileFlag []string
envfileFlag, err = fl.GetStringSlice("envfile")
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading envfile flag: %v", err)
}
// open a listener to which the child process will connect when
// it is ready to confirm that it has successfully started
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("opening listener for success confirmation: %v", err)
}
defer ln.Close()
// craft the command with a pingback address and with a
// pipe for its stdin, so we can tell it our confirmation
// code that we expect so that some random port scan at
// the most unfortunate time won't fool us into thinking
// the child succeeded (i.e. the alternative is to just
// wait for any connection on our listener, but better to
// ensure it's the process we're expecting - we can be
// sure by giving it some random bytes and having it echo
// them back to us)
cmd := exec.Command(os.Args[0], "run", "--pingback", ln.Addr().String())
if configFlag != "" {
cmd.Args = append(cmd.Args, "--config", configFlag)
}
for _, envfile := range envfileFlag {
cmd.Args = append(cmd.Args, "--envfile", envfile)
}
if configAdapterFlag != "" {
cmd.Args = append(cmd.Args, "--adapter", configAdapterFlag)
}
if watchFlag {
cmd.Args = append(cmd.Args, "--watch")
}
if pidfileFlag != "" {
cmd.Args = append(cmd.Args, "--pidfile", pidfileFlag)
}
stdinPipe, err := cmd.StdinPipe()
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("creating stdin pipe: %v", err)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
// generate the random bytes we'll send to the child process
expect := make([]byte, 32)
_, err = rand.Read(expect)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("generating random confirmation bytes: %v", err)
}
// begin writing the confirmation bytes to the child's
// stdin; use a goroutine since the child hasn't been
// started yet, and writing synchronously would result
// in a deadlock
go func() {
_, _ = stdinPipe.Write(expect)
stdinPipe.Close()
}()
// start the process
err = cmd.Start()
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("starting caddy process: %v", err)
}
// there are two ways we know we're done: either
// the process will connect to our listener, or
// it will exit with an error
success, exit := make(chan struct{}), make(chan error)
// in one goroutine, we await the success of the child process
go func() {
for {
conn, err := ln.Accept()
if err != nil {
if !errors.Is(err, net.ErrClosed) {
log.Println(err)
}
break
}
err = handlePingbackConn(conn, expect)
if err == nil {
close(success)
break
}
log.Println(err)
}
}()
// in another goroutine, we await the failure of the child process
go func() {
err := cmd.Wait() // don't send on this line! Wait blocks, but send starts before it unblocks
exit <- err // sending on separate line ensures select won't trigger until after Wait unblocks
}()
// when one of the goroutines unblocks, we're done and can exit
select {
case <-success:
fmt.Printf("Successfully started Caddy (pid=%d) - Caddy is running in the background\n", cmd.Process.Pid)
case err := <-exit:
return caddy.ExitCodeFailedStartup,
fmt.Errorf("caddy process exited with error: %v", err)
}
return caddy.ExitCodeSuccess, nil
}
func cmdRun(fl Flags) (int, error) {
caddy.TrapSignals()
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
resumeFlag := fl.Bool("resume")
printEnvFlag := fl.Bool("environ")
watchFlag := fl.Bool("watch")
pidfileFlag := fl.String("pidfile")
pingbackFlag := fl.String("pingback")
// load all additional envs as soon as possible
err := handleEnvFileFlag(fl)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// if we are supposed to print the environment, do that first
if printEnvFlag {
printEnvironment()
}
// load the config, depending on flags
var config []byte
if resumeFlag {
config, err = os.ReadFile(caddy.ConfigAutosavePath)
if errors.Is(err, fs.ErrNotExist) {
// not a bad error; just can't resume if autosave file doesn't exist
caddy.Log().Info("no autosave file exists", zap.String("autosave_file", caddy.ConfigAutosavePath))
resumeFlag = false
} else if err != nil {
return caddy.ExitCodeFailedStartup, err
} else {
if configFlag == "" {
caddy.Log().Info("resuming from last configuration",
zap.String("autosave_file", caddy.ConfigAutosavePath))
} else {
// if they also specified a config file, user should be aware that we're not
// using it (doing so could lead to data/config loss by overwriting!)
caddy.Log().Warn("--config and --resume flags were used together; ignoring --config and resuming from last configuration",
zap.String("autosave_file", caddy.ConfigAutosavePath))
}
}
}
// we don't use 'else' here since this value might have been changed in 'if' block; i.e. not mutually exclusive
var configFile string
if !resumeFlag {
config, configFile, err = LoadConfig(configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
}
// create pidfile now, in case loading config takes a while (issue #5477)
if pidfileFlag != "" {
err := caddy.PIDFile(pidfileFlag)
if err != nil {
caddy.Log().Error("unable to write PID file",
zap.String("pidfile", pidfileFlag),
zap.Error(err))
}
}
// run the initial config
err = caddy.Load(config, true)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("loading initial config: %v", err)
}
caddy.Log().Info("serving initial configuration")
// if we are to report to another process the successful start
// of the server, do so now by echoing back contents of stdin
if pingbackFlag != "" {
confirmationBytes, err := io.ReadAll(os.Stdin)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading confirmation bytes from stdin: %v", err)
}
conn, err := net.Dial("tcp", pingbackFlag)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("dialing confirmation address: %v", err)
}
defer conn.Close()
_, err = conn.Write(confirmationBytes)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("writing confirmation bytes to %s: %v", pingbackFlag, err)
}
}
// if enabled, reload config file automatically on changes
// (this better only be used in dev!)
if watchFlag {
go watchConfigFile(configFile, configAdapterFlag)
}
// warn if the environment does not provide enough information about the disk
hasXDG := os.Getenv("XDG_DATA_HOME") != "" &&
os.Getenv("XDG_CONFIG_HOME") != "" &&
os.Getenv("XDG_CACHE_HOME") != ""
switch runtime.GOOS {
case "windows":
if os.Getenv("HOME") == "" && os.Getenv("USERPROFILE") == "" && !hasXDG {
caddy.Log().Warn("neither HOME nor USERPROFILE environment variables are set - please fix; some assets might be stored in ./caddy")
}
case "plan9":
if os.Getenv("home") == "" && !hasXDG {
caddy.Log().Warn("$home environment variable is empty - please fix; some assets might be stored in ./caddy")
}
default:
if os.Getenv("HOME") == "" && !hasXDG {
caddy.Log().Warn("$HOME environment variable is empty - please fix; some assets might be stored in ./caddy")
}
}
select {}
}
func cmdStop(fl Flags) (int, error) {
addressFlag := fl.String("address")
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
adminAddr, err := DetermineAdminAPIAddress(addressFlag, nil, configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/stop", nil, nil)
if err != nil {
caddy.Log().Warn("failed using API to stop instance", zap.Error(err))
return caddy.ExitCodeFailedStartup, err
}
defer resp.Body.Close()
return caddy.ExitCodeSuccess, nil
}
func cmdReload(fl Flags) (int, error) {
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
addressFlag := fl.String("address")
forceFlag := fl.Bool("force")
// get the config in caddy's native format
config, configFile, err := LoadConfig(configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if configFile == "" {
return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load")
}
adminAddr, err := DetermineAdminAPIAddress(addressFlag, config, configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
// optionally force a config reload
headers := make(http.Header)
if forceFlag {
headers.Set("Cache-Control", "must-revalidate")
}
resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/load", headers, bytes.NewReader(config))
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("sending configuration to instance: %v", err)
}
defer resp.Body.Close()
return caddy.ExitCodeSuccess, nil
}
func cmdVersion(_ Flags) (int, error) {
_, full := caddy.Version()
fmt.Println(full)
return caddy.ExitCodeSuccess, nil
}
func cmdBuildInfo(_ Flags) (int, error) {
bi, ok := debug.ReadBuildInfo()
if !ok {
return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information")
}
fmt.Println(bi)
return caddy.ExitCodeSuccess, nil
}
func cmdListModules(fl Flags) (int, error) {
packages := fl.Bool("packages")
versions := fl.Bool("versions")
skipStandard := fl.Bool("skip-standard")
printModuleInfo := func(mi moduleInfo) {
fmt.Print(mi.caddyModuleID)
if versions && mi.goModule != nil {
fmt.Print(" " + mi.goModule.Version)
}
if packages && mi.goModule != nil {
fmt.Print(" " + mi.goModule.Path)
if mi.goModule.Replace != nil {
fmt.Print(" => " + mi.goModule.Replace.Path)
}
}
if mi.err != nil {
fmt.Printf(" [%v]", mi.err)
}
fmt.Println()
}
// organize modules by whether they come with the standard distribution
standard, nonstandard, unknown, err := getModules()
if err != nil {
// oh well, just print the module IDs and exit
for _, m := range caddy.Modules() {
fmt.Println(m)
}
return caddy.ExitCodeSuccess, nil
}
// Standard modules (always shipped with Caddy)
if !skipStandard {
if len(standard) > 0 {
for _, mod := range standard {
printModuleInfo(mod)
}
}
fmt.Printf("\n Standard modules: %d\n", len(standard))
}
// Non-standard modules (third party plugins)
if len(nonstandard) > 0 {
if len(standard) > 0 && !skipStandard {
fmt.Println()
}
for _, mod := range nonstandard {
printModuleInfo(mod)
}
}
fmt.Printf("\n Non-standard modules: %d\n", len(nonstandard))
// Unknown modules (couldn't get Caddy module info)
if len(unknown) > 0 {
if (len(standard) > 0 && !skipStandard) || len(nonstandard) > 0 {
fmt.Println()
}
for _, mod := range unknown {
printModuleInfo(mod)
}
}
fmt.Printf("\n Unknown modules: %d\n", len(unknown))
return caddy.ExitCodeSuccess, nil
}
func cmdEnviron(fl Flags) (int, error) {
// load all additional envs as soon as possible
err := handleEnvFileFlag(fl)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
printEnvironment()
return caddy.ExitCodeSuccess, nil
}
func cmdAdaptConfig(fl Flags) (int, error) {
inputFlag := fl.String("config")
adapterFlag := fl.String("adapter")
prettyFlag := fl.Bool("pretty")
validateFlag := fl.Bool("validate")
var err error
inputFlag, err = configFileWithRespectToDefault(caddy.Log(), inputFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// load all additional envs as soon as possible
err = handleEnvFileFlag(fl)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if adapterFlag == "" {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("adapter name is required (use --adapt flag or leave unspecified for default)")
}
cfgAdapter := caddyconfig.GetAdapter(adapterFlag)
if cfgAdapter == nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("unrecognized config adapter: %s", adapterFlag)
}
input, err := os.ReadFile(inputFlag)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading input file: %v", err)
}
opts := map[string]any{"filename": inputFlag}
adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if prettyFlag {
var prettyBuf bytes.Buffer
err = json.Indent(&prettyBuf, adaptedConfig, "", "\t")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
adaptedConfig = prettyBuf.Bytes()
}
// print result to stdout
fmt.Println(string(adaptedConfig))
// print warnings to stderr
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
caddy.Log().Named(adapterFlag).Warn(msg,
zap.String("file", warn.File),
zap.Int("line", warn.Line))
}
// validate output if requested
if validateFlag {
var cfg *caddy.Config
err = caddy.StrictUnmarshalJSON(adaptedConfig, &cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
}
err = caddy.Validate(cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("validation: %v", err)
}
}
return caddy.ExitCodeSuccess, nil
}
func cmdValidateConfig(fl Flags) (int, error) {
configFlag := fl.String("config")
adapterFlag := fl.String("adapter")
// load all additional envs as soon as possible
err := handleEnvFileFlag(fl)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// use default config and ensure a config file is specified
configFlag, err = configFileWithRespectToDefault(caddy.Log(), configFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if configFlag == "" {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)")
}
input, _, err := LoadConfig(configFlag, adapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
input = caddy.RemoveMetaFields(input)
var cfg *caddy.Config
err = caddy.StrictUnmarshalJSON(input, &cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
}
err = caddy.Validate(cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
fmt.Println("Valid configuration")
return caddy.ExitCodeSuccess, nil
}
func cmdFmt(fl Flags) (int, error) {
configFile := fl.Arg(0)
if configFile == "" {
configFile = "Caddyfile"
}
// as a special case, read from stdin if the file name is "-"
if configFile == "-" {
input, err := io.ReadAll(os.Stdin)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading stdin: %v", err)
}
fmt.Print(string(caddyfile.Format(input)))
return caddy.ExitCodeSuccess, nil
}
input, err := os.ReadFile(configFile)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading input file: %v", err)
}
output := caddyfile.Format(input)
if fl.Bool("overwrite") {
if err := os.WriteFile(configFile, output, 0o600); err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("overwriting formatted file: %v", err)
}
return caddy.ExitCodeSuccess, nil
}
if fl.Bool("diff") {
diff := difflib.Diff(
strings.Split(string(input), "\n"),
strings.Split(string(output), "\n"))
for _, d := range diff {
switch d.Delta {
case difflib.Common:
fmt.Printf(" %s\n", d.Payload)
case difflib.LeftOnly:
fmt.Printf("- %s\n", d.Payload)
case difflib.RightOnly:
fmt.Printf("+ %s\n", d.Payload)
}
}
} else {
fmt.Print(string(output))
}
if warning, diff := caddyfile.FormattingDifference(configFile, input); diff {
return caddy.ExitCodeFailedStartup, fmt.Errorf(`%s:%d: Caddyfile input is not formatted; Tip: use '--overwrite' to update your Caddyfile in-place instead of previewing it. Consult '--help' for more options`,
warning.File,
warning.Line,
)
}
return caddy.ExitCodeSuccess, nil
}
// handleEnvFileFlag loads the environment variables from the given --envfile
// flag if specified. This should be called as early in the command function.
func handleEnvFileFlag(fl Flags) error {
var err error
var envfileFlag []string
envfileFlag, err = fl.GetStringSlice("envfile")
if err != nil {
return fmt.Errorf("reading envfile flag: %v", err)
}
for _, envfile := range envfileFlag {
if err := loadEnvFromFile(envfile); err != nil {
return fmt.Errorf("loading additional environment variables: %v", err)
}
}
return nil
}
// AdminAPIRequest makes an API request according to the CLI flags given,
// with the given HTTP method and request URI. If body is non-nil, it will
// be assumed to be Content-Type application/json. The caller should close
// the response body. Should only be used by Caddy CLI commands which
// need to interact with a running instance of Caddy via the admin API.
func AdminAPIRequest(adminAddr, method, uri string, headers http.Header, body io.Reader) (*http.Response, error) {
parsedAddr, err := caddy.ParseNetworkAddress(adminAddr)
if err != nil || parsedAddr.PortRangeSize() > 1 {
return nil, fmt.Errorf("invalid admin address %s: %v", adminAddr, err)
}
origin := "http://" + parsedAddr.JoinHostPort(0)
if parsedAddr.IsUnixNetwork() {
origin = "http://127.0.0.1" // bogus host is a hack so that http.NewRequest() is happy
// the unix address at this point might still contain the optional
// unix socket permissions, which are part of the address/host.
// those need to be removed first, as they aren't part of the
// resulting unix file path
addr, _, err := internal.SplitUnixSocketPermissionsBits(parsedAddr.Host)
if err != nil {
return nil, err
}
parsedAddr.Host = addr
}
// form the request
req, err := http.NewRequest(method, origin+uri, body)
if err != nil {
return nil, fmt.Errorf("making request: %v", err)
}
if parsedAddr.IsUnixNetwork() {
// We used to conform to RFC 2616 Section 14.26 which requires
// an empty host header when there is no host, as is the case
// with unix sockets. However, Go required a Host value so we
// used a hack of a space character as the host (it would see
// the Host was non-empty, then trim the space later). As of
// Go 1.20.6 (July 2023), this hack no longer works. See:
// https://github.com/golang/go/issues/60374
// See also the discussion here:
// https://github.com/golang/go/issues/61431
//
// After that, we now require a Host value of either 127.0.0.1
// or ::1 if one is set. Above I choose to use 127.0.0.1. Even
// though the value should be completely irrelevant (it could be
// "srldkjfsd"), if for some reason the Host *is* used, at least
// we can have some reasonable assurance it will stay on the local
// machine and that browsers, if they ever allow access to unix
// sockets, can still enforce CORS, ensuring it is still coming
// from the local machine.
} else {
req.Header.Set("Origin", origin)
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
for k, v := range headers {
req.Header[k] = v
}
// make an HTTP client that dials our network type, since admin
// endpoints aren't always TCP, which is what the default transport
// expects; reuse is not of particular concern here
client := http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial(parsedAddr.Network, parsedAddr.JoinHostPort(0))
},
},
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("performing request: %v", err)
}
// if it didn't work, let the user know
if resp.StatusCode >= 400 {
respBody, err := io.ReadAll(io.LimitReader(resp.Body, 1024*10))
if err != nil {
return nil, fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err)
}
return nil, fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody)
}
return resp, nil
}
// DetermineAdminAPIAddress determines which admin API endpoint address should
// be used based on the inputs. By priority: if `address` is specified, then
// it is returned; if `config` is specified, then that config will be used for
// finding the admin address; if `configFile` (and `configAdapter`) are specified,
// then that config will be loaded to find the admin address; otherwise, the
// default admin listen address will be returned.
func DetermineAdminAPIAddress(address string, config []byte, configFile, configAdapter string) (string, error) {
// Prefer the address if specified and non-empty
if address != "" {
return address, nil
}
// Try to load the config from file if specified, with the given adapter name
if configFile != "" {
var loadedConfigFile string
var err error
// use the provided loaded config if non-empty
// otherwise, load it from the specified file/adapter
loadedConfig := config
if len(loadedConfig) == 0 {
// get the config in caddy's native format
loadedConfig, loadedConfigFile, err = LoadConfig(configFile, configAdapter)
if err != nil {
return "", err
}
if loadedConfigFile == "" {
return "", fmt.Errorf("no config file to load; either use --config flag or ensure Caddyfile exists in current directory")
}
}
// get the address of the admin listener from the config
if len(loadedConfig) > 0 {
var tmpStruct struct {
Admin caddy.AdminConfig `json:"admin"`
}
err := json.Unmarshal(loadedConfig, &tmpStruct)
if err != nil {
return "", fmt.Errorf("unmarshaling admin listener address from config: %v", err)
}
if tmpStruct.Admin.Listen != "" {
return tmpStruct.Admin.Listen, nil
}
}
}
// Fallback to the default listen address otherwise
return caddy.DefaultAdminListen, nil
}
// configFileWithRespectToDefault returns the filename to use for loading the config, based
// on whether a config file is already specified and a supported default config file exists.
func configFileWithRespectToDefault(logger *zap.Logger, configFile string) (string, error) {
const defaultCaddyfile = "Caddyfile"
// if no input file was specified, try a default Caddyfile if the Caddyfile adapter is plugged in
if configFile == "" && caddyconfig.GetAdapter("caddyfile") != nil {
_, err := os.Stat(defaultCaddyfile)
if err == nil {
// default Caddyfile exists
if logger != nil {
logger.Info("using adjacent Caddyfile")
}
return defaultCaddyfile, nil
}
if !errors.Is(err, fs.ErrNotExist) {
// problem checking
return configFile, fmt.Errorf("checking if default Caddyfile exists: %v", err)
}
}
// default config file does not exist or is irrelevant
return configFile, nil
}
type moduleInfo struct {
caddyModuleID string
goModule *debug.Module
err error
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"flag"
"fmt"
"os"
"regexp"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
"github.com/caddyserver/caddy/v2"
)
// Command represents a subcommand. Name, Func,
// and Short are required.
type Command struct {
// The name of the subcommand. Must conform to the
// format described by the RegisterCommand() godoc.
// Required.
Name string
// Usage is a brief message describing the syntax of
// the subcommand's flags and args. Use [] to indicate
// optional parameters and <> to enclose literal values
// intended to be replaced by the user. Do not prefix
// the string with "caddy" or the name of the command
// since these will be prepended for you; only include
// the actual parameters for this command.
Usage string
// Short is a one-line message explaining what the
// command does. Should not end with punctuation.
// Required.
Short string
// Long is the full help text shown to the user.
// Will be trimmed of whitespace on both ends before
// being printed.
Long string
// Flags is the flagset for command.
// This is ignored if CobraFunc is set.
Flags *flag.FlagSet
// Func is a function that executes a subcommand using
// the parsed flags. It returns an exit code and any
// associated error.
// Required if CobraFunc is not set.
Func CommandFunc
// CobraFunc allows further configuration of the command
// via cobra's APIs. If this is set, then Func and Flags
// are ignored, with the assumption that they are set in
// this function. A caddycmd.WrapCommandFuncForCobra helper
// exists to simplify porting CommandFunc to Cobra's RunE.
CobraFunc func(*cobra.Command)
}
// CommandFunc is a command's function. It runs the
// command and returns the proper exit code along with
// any error that occurred.
type CommandFunc func(Flags) (int, error)
// Commands returns a list of commands initialised by
// RegisterCommand
func Commands() map[string]Command {
return commands
}
var commands = make(map[string]Command)
func init() {
RegisterCommand(Command{
Name: "start",
Usage: "[--config <path> [--adapter <name>]] [--envfile <path>] [--watch] [--pidfile <file>]",
Short: "Starts the Caddy process in the background and then returns",
Long: `
Starts the Caddy process, optionally bootstrapped with an initial config file.
This command unblocks after the server starts running or fails to run.
If --envfile is specified, an environment file with environment variables
in the KEY=VALUE format will be loaded into the Caddy process.
On Windows, the spawned child process will remain attached to the terminal, so
closing the window will forcefully stop Caddy; to avoid forgetting this, try
using 'caddy run' instead to keep it in the foreground.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("config", "c", "", "Configuration file")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply")
cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
cmd.Flags().BoolP("watch", "w", false, "Reload changed config file automatically")
cmd.Flags().StringP("pidfile", "", "", "Path of file to which to write process ID")
cmd.RunE = WrapCommandFuncForCobra(cmdStart)
},
})
RegisterCommand(Command{
Name: "run",
Usage: "[--config <path> [--adapter <name>]] [--envfile <path>] [--environ] [--resume] [--watch] [--pidfile <file>]",
Short: `Starts the Caddy process and blocks indefinitely`,
Long: `
Starts the Caddy process, optionally bootstrapped with an initial config file,
and blocks indefinitely until the server is stopped; i.e. runs Caddy in
"daemon" mode (foreground).
If a config file is specified, it will be applied immediately after the process
is running. If the config file is not in Caddy's native JSON format, you can
specify an adapter with --adapter to adapt the given config file to
Caddy's native format. The config adapter must be a registered module. Any
warnings will be printed to the log, but beware that any adaptation without
errors will immediately be used. If you want to review the results of the
adaptation first, use the 'adapt' subcommand.
As a special case, if the current working directory has a file called
"Caddyfile" and the caddyfile config adapter is plugged in (default), then
that file will be loaded and used to configure Caddy, even without any command
line flags.
If --envfile is specified, an environment file with environment variables
in the KEY=VALUE format will be loaded into the Caddy process.
If --environ is specified, the environment as seen by the Caddy process will
be printed before starting. This is the same as the environ command but does
not quit after printing, and can be useful for troubleshooting.
The --resume flag will override the --config flag if there is a config auto-
save file. It is not an error if --resume is used and no autosave file exists.
If --watch is specified, the config file will be loaded automatically after
changes. ⚠️ This can make unintentional config changes easier; only use this
option in a local development environment.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("config", "c", "", "Configuration file")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply")
cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
cmd.Flags().BoolP("environ", "e", false, "Print environment")
cmd.Flags().BoolP("resume", "r", false, "Use saved config, if any (and prefer over --config file)")
cmd.Flags().BoolP("watch", "w", false, "Watch config file for changes and reload it automatically")
cmd.Flags().StringP("pidfile", "", "", "Path of file to which to write process ID")
cmd.Flags().StringP("pingback", "", "", "Echo confirmation bytes to this address on success")
cmd.RunE = WrapCommandFuncForCobra(cmdRun)
},
})
RegisterCommand(Command{
Name: "stop",
Usage: "[--config <path> [--adapter <name>]] [--address <interface>]",
Short: "Gracefully stops a started Caddy process",
Long: `
Stops the background Caddy process as gracefully as possible.
It requires that the admin API is enabled and accessible, since it will
use the API's /stop endpoint. The address of this request can be customized
using the --address flag, or from the given --config, if not the default.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("config", "c", "", "Configuration file to use to parse the admin address, if --address is not used")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (when --config is used)")
cmd.Flags().StringP("address", "", "", "The address to use to reach the admin API endpoint, if not the default")
cmd.RunE = WrapCommandFuncForCobra(cmdStop)
},
})
RegisterCommand(Command{
Name: "reload",
Usage: "--config <path> [--adapter <name>] [--address <interface>]",
Short: "Changes the config of the running Caddy instance",
Long: `
Gives the running Caddy instance a new configuration. This has the same effect
as POSTing a document to the /load API endpoint, but is convenient for simple
workflows revolving around config files.
Since the admin endpoint is configurable, the endpoint configuration is loaded
from the --address flag if specified; otherwise it is loaded from the given
config file; otherwise the default is assumed.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("config", "c", "", "Configuration file (required)")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply")
cmd.Flags().StringP("address", "", "", "Address of the administration listener, if different from config")
cmd.Flags().BoolP("force", "f", false, "Force config reload, even if it is the same")
cmd.RunE = WrapCommandFuncForCobra(cmdReload)
},
})
RegisterCommand(Command{
Name: "version",
Short: "Prints the version",
Long: `
Prints the version of this Caddy binary.
Version information must be embedded into the binary at compile-time in
order for Caddy to display anything useful with this command. If Caddy
is built from within a version control repository, the Go command will
embed the revision hash if available. However, if Caddy is built in the
way specified by our online documentation (or by using xcaddy), more
detailed version information is printed as given by Go modules.
For more details about the full version string, see the Go module
documentation: https://go.dev/doc/modules/version-numbers
`,
Func: cmdVersion,
})
RegisterCommand(Command{
Name: "list-modules",
Usage: "[--packages] [--versions] [--skip-standard]",
Short: "Lists the installed Caddy modules",
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().BoolP("packages", "", false, "Print package paths")
cmd.Flags().BoolP("versions", "", false, "Print version information")
cmd.Flags().BoolP("skip-standard", "s", false, "Skip printing standard modules")
cmd.RunE = WrapCommandFuncForCobra(cmdListModules)
},
})
RegisterCommand(Command{
Name: "build-info",
Short: "Prints information about this build",
Func: cmdBuildInfo,
})
RegisterCommand(Command{
Name: "environ",
Usage: "[--envfile <path>]",
Short: "Prints the environment",
Long: `
Prints the environment as seen by this Caddy process.
The environment includes variables set in the system. If your Caddy
configuration uses environment variables (e.g. "{env.VARIABLE}") then
this command can be useful for verifying that the variables will have
the values you expect in your config.
If --envfile is specified, an environment file with environment variables
in the KEY=VALUE format will be loaded into the Caddy process.
Note that environments may be different depending on how you run Caddy.
Environments for Caddy instances started by service managers such as
systemd are often different than the environment inherited from your
shell or terminal.
You can also print the environment the same time you use "caddy run"
by adding the "--environ" flag.
Environments may contain sensitive data.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
cmd.RunE = WrapCommandFuncForCobra(cmdEnviron)
},
})
RegisterCommand(Command{
Name: "adapt",
Usage: "--config <path> [--adapter <name>] [--pretty] [--validate] [--envfile <path>]",
Short: "Adapts a configuration to Caddy's native JSON",
Long: `
Adapts a configuration to Caddy's native JSON format and writes the
output to stdout, along with any warnings to stderr.
If --pretty is specified, the output will be formatted with indentation
for human readability.
If --validate is used, the adapted config will be checked for validity.
If the config is invalid, an error will be printed to stderr and a non-
zero exit status will be returned.
If --envfile is specified, an environment file with environment variables
in the KEY=VALUE format will be loaded into the Caddy process.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("config", "c", "", "Configuration file to adapt (required)")
cmd.Flags().StringP("adapter", "a", "caddyfile", "Name of config adapter")
cmd.Flags().BoolP("pretty", "p", false, "Format the output for human readability")
cmd.Flags().BoolP("validate", "", false, "Validate the output")
cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
cmd.RunE = WrapCommandFuncForCobra(cmdAdaptConfig)
},
})
RegisterCommand(Command{
Name: "validate",
Usage: "--config <path> [--adapter <name>] [--envfile <path>]",
Short: "Tests whether a configuration file is valid",
Long: `
Loads and provisions the provided config, but does not start running it.
This reveals any errors with the configuration through the loading and
provisioning stages.
If --envfile is specified, an environment file with environment variables
in the KEY=VALUE format will be loaded into the Caddy process.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("config", "c", "", "Input configuration file")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter")
cmd.Flags().StringSliceP("envfile", "", []string{}, "Environment file(s) to load")
cmd.RunE = WrapCommandFuncForCobra(cmdValidateConfig)
},
})
RegisterCommand(Command{
Name: "storage",
Short: "Commands for working with Caddy's storage (EXPERIMENTAL)",
Long: `
Allows exporting and importing Caddy's storage contents. The two commands can be
combined in a pipeline to transfer directly from one storage to another:
$ caddy storage export --config Caddyfile.old --output - |
> caddy storage import --config Caddyfile.new --input -
The - argument refers to stdout and stdin, respectively.
NOTE: When importing to or exporting from file_system storage (the default), the command
should be run as the user that owns the associated root path.
EXPERIMENTAL: May be changed or removed.
`,
CobraFunc: func(cmd *cobra.Command) {
exportCmd := &cobra.Command{
Use: "export --config <path> --output <path>",
Short: "Exports storage assets as a tarball",
Long: `
The contents of the configured storage module (TLS certificates, etc)
are exported via a tarball.
--output is required, - can be given for stdout.
`,
RunE: WrapCommandFuncForCobra(cmdExportStorage),
}
exportCmd.Flags().StringP("config", "c", "", "Input configuration file (required)")
exportCmd.Flags().StringP("output", "o", "", "Output path")
cmd.AddCommand(exportCmd)
importCmd := &cobra.Command{
Use: "import --config <path> --input <path>",
Short: "Imports storage assets from a tarball.",
Long: `
Imports storage assets to the configured storage module. The import file must be
a tar archive.
--input is required, - can be given for stdin.
`,
RunE: WrapCommandFuncForCobra(cmdImportStorage),
}
importCmd.Flags().StringP("config", "c", "", "Configuration file to load (required)")
importCmd.Flags().StringP("input", "i", "", "Tar of assets to load (required)")
cmd.AddCommand(importCmd)
},
})
RegisterCommand(Command{
Name: "fmt",
Usage: "[--overwrite] [--diff] [<path>]",
Short: "Formats a Caddyfile",
Long: `
Formats the Caddyfile by adding proper indentation and spaces to improve
human readability. It prints the result to stdout.
If --overwrite is specified, the output will be written to the config file
directly instead of printing it.
If --diff is specified, the output will be compared against the input, and
lines will be prefixed with '-' and '+' where they differ. Note that
unchanged lines are prefixed with two spaces for alignment, and that this
is not a valid patch format.
If you wish you use stdin instead of a regular file, use - as the path.
When reading from stdin, the --overwrite flag has no effect: the result
is always printed to stdout.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().BoolP("overwrite", "w", false, "Overwrite the input file with the results")
cmd.Flags().BoolP("diff", "d", false, "Print the differences between the input file and the formatted output")
cmd.RunE = WrapCommandFuncForCobra(cmdFmt)
},
})
RegisterCommand(Command{
Name: "upgrade",
Short: "Upgrade Caddy (EXPERIMENTAL)",
Long: `
Downloads an updated Caddy binary with the same modules/plugins at the
latest versions. EXPERIMENTAL: May be changed or removed.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it")
cmd.RunE = WrapCommandFuncForCobra(cmdUpgrade)
},
})
RegisterCommand(Command{
Name: "add-package",
Usage: "<packages...>",
Short: "Adds Caddy packages (EXPERIMENTAL)",
Long: `
Downloads an updated Caddy binary with the specified packages (module/plugin)
added. Retains existing packages. Returns an error if the any of packages are
already included. EXPERIMENTAL: May be changed or removed.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it")
cmd.RunE = WrapCommandFuncForCobra(cmdAddPackage)
},
})
RegisterCommand(Command{
Name: "remove-package",
Func: cmdRemovePackage,
Usage: "<packages...>",
Short: "Removes Caddy packages (EXPERIMENTAL)",
Long: `
Downloads an updated Caddy binaries without the specified packages (module/plugin).
Returns an error if any of the packages are not included.
EXPERIMENTAL: May be changed or removed.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().BoolP("keep-backup", "k", false, "Keep the backed up binary, instead of deleting it")
cmd.RunE = WrapCommandFuncForCobra(cmdRemovePackage)
},
})
RegisterCommand(Command{
Name: "manpage",
Usage: "--directory <path>",
Short: "Generates the manual pages for Caddy commands",
Long: `
Generates the manual pages for Caddy commands into the designated directory
tagged into section 8 (System Administration).
The manual page files are generated into the directory specified by the
argument of --directory. If the directory does not exist, it will be created.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("directory", "o", "", "The output directory where the manpages are generated")
cmd.RunE = WrapCommandFuncForCobra(func(fl Flags) (int, error) {
dir := strings.TrimSpace(fl.String("directory"))
if dir == "" {
return caddy.ExitCodeFailedQuit, fmt.Errorf("designated output directory and specified section are required")
}
if err := os.MkdirAll(dir, 0o755); err != nil {
return caddy.ExitCodeFailedQuit, err
}
if err := doc.GenManTree(rootCmd, &doc.GenManHeader{
Title: "Caddy",
Section: "8", // https://en.wikipedia.org/wiki/Man_page#Manual_sections
}, dir); err != nil {
return caddy.ExitCodeFailedQuit, err
}
return caddy.ExitCodeSuccess, nil
})
},
})
// source: https://github.com/spf13/cobra/blob/main/shell_completions.md
rootCmd.AddCommand(&cobra.Command{
Use: "completion [bash|zsh|fish|powershell]",
Short: "Generate completion script",
Long: fmt.Sprintf(`To load completions:
Bash:
$ source <(%[1]s completion bash)
# To load completions for each session, execute once:
# Linux:
$ %[1]s completion bash > /etc/bash_completion.d/%[1]s
# macOS:
$ %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
Zsh:
# If shell completion is not already enabled in your environment,
# you will need to enable it. You can execute the following once:
$ echo "autoload -U compinit; compinit" >> ~/.zshrc
# To load completions for each session, execute once:
$ %[1]s completion zsh > "${fpath[1]}/_%[1]s"
# You will need to start a new shell for this setup to take effect.
fish:
$ %[1]s completion fish | source
# To load completions for each session, execute once:
$ %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
PowerShell:
PS> %[1]s completion powershell | Out-String | Invoke-Expression
# To load completions for every new session, run:
PS> %[1]s completion powershell > %[1]s.ps1
# and source this file from your PowerShell profile.
`, rootCmd.Root().Name()),
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
RunE: func(cmd *cobra.Command, args []string) error {
switch args[0] {
case "bash":
return cmd.Root().GenBashCompletion(os.Stdout)
case "zsh":
return cmd.Root().GenZshCompletion(os.Stdout)
case "fish":
return cmd.Root().GenFishCompletion(os.Stdout, true)
case "powershell":
return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
default:
return fmt.Errorf("unrecognized shell: %s", args[0])
}
},
})
}
// RegisterCommand registers the command cmd.
// cmd.Name must be unique and conform to the
// following format:
//
// - lowercase
// - alphanumeric and hyphen characters only
// - cannot start or end with a hyphen
// - hyphen cannot be adjacent to another hyphen
//
// This function panics if the name is already registered,
// if the name does not meet the described format, or if
// any of the fields are missing from cmd.
//
// This function should be used in init().
func RegisterCommand(cmd Command) {
if cmd.Name == "" {
panic("command name is required")
}
if cmd.Func == nil && cmd.CobraFunc == nil {
panic("command function missing")
}
if cmd.Short == "" {
panic("command short string is required")
}
if _, exists := commands[cmd.Name]; exists {
panic("command already registered: " + cmd.Name)
}
if !commandNameRegex.MatchString(cmd.Name) {
panic("invalid command name")
}
rootCmd.AddCommand(caddyCmdToCobra(cmd))
}
var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/fs"
"log"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/caddyserver/certmagic"
"github.com/spf13/pflag"
"go.uber.org/automaxprocs/maxprocs"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
)
func init() {
// set a fitting User-Agent for ACME requests
version, _ := caddy.Version()
cleanModVersion := strings.TrimPrefix(version, "v")
ua := "Caddy/" + cleanModVersion
if uaEnv, ok := os.LookupEnv("USERAGENT"); ok {
ua = uaEnv + " " + ua
}
certmagic.UserAgent = ua
// by using Caddy, user indicates agreement to CA terms
// (very important, as Caddy is often non-interactive
// and thus ACME account creation will fail!)
certmagic.DefaultACME.Agreed = true
}
// Main implements the main function of the caddy command.
// Call this if Caddy is to be the main() of your program.
func Main() {
if len(os.Args) == 0 {
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
os.Exit(caddy.ExitCodeFailedStartup)
}
undo, err := maxprocs.Set()
defer undo()
if err != nil {
caddy.Log().Warn("failed to set GOMAXPROCS", zap.Error(err))
}
if err := rootCmd.Execute(); err != nil {
var exitError *exitError
if errors.As(err, &exitError) {
os.Exit(exitError.ExitCode)
}
os.Exit(1)
}
}
// handlePingbackConn reads from conn and ensures it matches
// the bytes in expect, or returns an error if it doesn't.
func handlePingbackConn(conn net.Conn, expect []byte) error {
defer conn.Close()
confirmationBytes, err := io.ReadAll(io.LimitReader(conn, 32))
if err != nil {
return err
}
if !bytes.Equal(confirmationBytes, expect) {
return fmt.Errorf("wrong confirmation: %x", confirmationBytes)
}
return nil
}
// LoadConfig loads the config from configFile and adapts it
// using adapterName. If adapterName is specified, configFile
// must be also. If no configFile is specified, it tries
// loading a default config file. The lack of a config file is
// not treated as an error, but false will be returned if
// there is no config available. It prints any warnings to stderr,
// and returns the resulting JSON config bytes along with
// the name of the loaded config file (if any).
func LoadConfig(configFile, adapterName string) ([]byte, string, error) {
return loadConfigWithLogger(caddy.Log(), configFile, adapterName)
}
func isCaddyfile(configFile, adapterName string) (bool, error) {
if adapterName == "caddyfile" {
return true, nil
}
// as a special case, if a config file starts with "caddyfile" or
// has a ".caddyfile" extension, and no adapter is specified, and
// no adapter module name matches the extension, assume
// caddyfile adapter for convenience
baseConfig := strings.ToLower(filepath.Base(configFile))
baseConfigExt := filepath.Ext(baseConfig)
startsOrEndsInCaddyfile := strings.HasPrefix(baseConfig, "caddyfile") || strings.HasSuffix(baseConfig, ".caddyfile")
if baseConfigExt == ".json" {
return false, nil
}
// If the adapter is not specified,
// the config file starts with "caddyfile",
// the config file has an extension,
// and isn't a JSON file (e.g. Caddyfile.yaml),
// then we don't know what the config format is.
if adapterName == "" && startsOrEndsInCaddyfile {
return true, nil
}
// adapter is not empty,
// adapter is not "caddyfile",
// extension is not ".json",
// extension is not ".caddyfile"
// file does not start with "Caddyfile"
return false, nil
}
func loadConfigWithLogger(logger *zap.Logger, configFile, adapterName string) ([]byte, string, error) {
// if no logger is provided, use a nop logger
// just so we don't have to check for nil
if logger == nil {
logger = zap.NewNop()
}
// specifying an adapter without a config file is ambiguous
if adapterName != "" && configFile == "" {
return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)")
}
// load initial config and adapter
var config []byte
var cfgAdapter caddyconfig.Adapter
var err error
if configFile != "" {
if configFile == "-" {
config, err = io.ReadAll(os.Stdin)
if err != nil {
return nil, "", fmt.Errorf("reading config from stdin: %v", err)
}
logger.Info("using config from stdin")
} else {
config, err = os.ReadFile(configFile)
if err != nil {
return nil, "", fmt.Errorf("reading config from file: %v", err)
}
logger.Info("using config from file", zap.String("file", configFile))
}
} else if adapterName == "" {
// if the Caddyfile adapter is plugged in, we can try using an
// adjacent Caddyfile by default
cfgAdapter = caddyconfig.GetAdapter("caddyfile")
if cfgAdapter != nil {
config, err = os.ReadFile("Caddyfile")
if errors.Is(err, fs.ErrNotExist) {
// okay, no default Caddyfile; pretend like this never happened
cfgAdapter = nil
} else if err != nil {
// default Caddyfile exists, but error reading it
return nil, "", fmt.Errorf("reading default Caddyfile: %v", err)
} else {
// success reading default Caddyfile
configFile = "Caddyfile"
logger.Info("using adjacent Caddyfile")
}
}
}
if yes, err := isCaddyfile(configFile, adapterName); yes {
adapterName = "caddyfile"
} else if err != nil {
return nil, "", err
}
// load config adapter
if adapterName != "" {
cfgAdapter = caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil {
return nil, "", fmt.Errorf("unrecognized config adapter: %s", adapterName)
}
}
// adapt config
if cfgAdapter != nil {
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]any{
"filename": configFile,
})
if err != nil {
return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err)
}
logger.Info("adapted config to JSON", zap.String("adapter", adapterName))
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
logger.Warn(msg,
zap.String("adapter", adapterName),
zap.String("file", warn.File),
zap.Int("line", warn.Line))
}
config = adaptedConfig
} else if len(config) != 0 {
// validate that the config is at least valid JSON
err = json.Unmarshal(config, new(any))
if err != nil {
return nil, "", fmt.Errorf("config is not valid JSON: %v; did you mean to use a config adapter (the --adapter flag)?", err)
}
}
return config, configFile, nil
}
// watchConfigFile watches the config file at filename for changes
// and reloads the config if the file was updated. This function
// blocks indefinitely; it only quits if the poller has errors for
// long enough time. The filename passed in must be the actual
// config file used, not one to be discovered.
// Each second the config files is loaded and parsed into an object
// and is compared to the last config object that was loaded
func watchConfigFile(filename, adapterName string) {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack())
}
}()
// make our logger; since config reloads can change the
// default logger, we need to get it dynamically each time
logger := func() *zap.Logger {
return caddy.Log().
Named("watcher").
With(zap.String("config_file", filename))
}
// get current config
lastCfg, _, err := loadConfigWithLogger(nil, filename, adapterName)
if err != nil {
logger().Error("unable to load latest config", zap.Error(err))
return
}
logger().Info("watching config file for changes")
// begin poller
//nolint:staticcheck
for range time.Tick(1 * time.Second) {
// get current config
newCfg, _, err := loadConfigWithLogger(nil, filename, adapterName)
if err != nil {
logger().Error("unable to load latest config", zap.Error(err))
return
}
// if it hasn't changed, nothing to do
if bytes.Equal(lastCfg, newCfg) {
continue
}
logger().Info("config file changed; reloading")
// remember the current config
lastCfg = newCfg
// apply the updated config
err = caddy.Load(lastCfg, false)
if err != nil {
logger().Error("applying latest config", zap.Error(err))
continue
}
}
}
// Flags wraps a FlagSet so that typed values
// from flags can be easily retrieved.
type Flags struct {
*pflag.FlagSet
}
// String returns the string representation of the
// flag given by name. It panics if the flag is not
// in the flag set.
func (f Flags) String(name string) string {
return f.FlagSet.Lookup(name).Value.String()
}
// Bool returns the boolean representation of the
// flag given by name. It returns false if the flag
// is not a boolean type. It panics if the flag is
// not in the flag set.
func (f Flags) Bool(name string) bool {
val, _ := strconv.ParseBool(f.String(name))
return val
}
// Int returns the integer representation of the
// flag given by name. It returns 0 if the flag
// is not an integer type. It panics if the flag is
// not in the flag set.
func (f Flags) Int(name string) int {
val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize)
return int(val)
}
// Float64 returns the float64 representation of the
// flag given by name. It returns false if the flag
// is not a float64 type. It panics if the flag is
// not in the flag set.
func (f Flags) Float64(name string) float64 {
val, _ := strconv.ParseFloat(f.String(name), 64)
return val
}
// Duration returns the duration representation of the
// flag given by name. It returns false if the flag
// is not a duration type. It panics if the flag is
// not in the flag set.
func (f Flags) Duration(name string) time.Duration {
val, _ := caddy.ParseDuration(f.String(name))
return val
}
func loadEnvFromFile(envFile string) error {
file, err := os.Open(envFile)
if err != nil {
return fmt.Errorf("reading environment file: %v", err)
}
defer file.Close()
envMap, err := parseEnvFile(file)
if err != nil {
return fmt.Errorf("parsing environment file: %v", err)
}
for k, v := range envMap {
// do not overwrite existing environment variables
_, exists := os.LookupEnv(k)
if !exists {
if err := os.Setenv(k, v); err != nil {
return fmt.Errorf("setting environment variables: %v", err)
}
}
}
// Update the storage paths to ensure they have the proper
// value after loading a specified env file.
caddy.ConfigAutosavePath = filepath.Join(caddy.AppConfigDir(), "autosave.json")
caddy.DefaultStorage = &certmagic.FileStorage{Path: caddy.AppDataDir()}
return nil
}
// parseEnvFile parses an env file from KEY=VALUE format.
// It's pretty naive. Limited value quotation is supported,
// but variable and command expansions are not supported.
func parseEnvFile(envInput io.Reader) (map[string]string, error) {
envMap := make(map[string]string)
scanner := bufio.NewScanner(envInput)
var lineNumber int
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
lineNumber++
// skip empty lines and lines starting with comment
if line == "" || strings.HasPrefix(line, "#") {
continue
}
// split line into key and value
before, after, isCut := strings.Cut(line, "=")
if !isCut {
return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
}
key, val := before, after
// sometimes keys are prefixed by "export " so file can be sourced in bash; ignore it here
key = strings.TrimPrefix(key, "export ")
// validate key and value
if key == "" {
return nil, fmt.Errorf("missing or empty key on line %d", lineNumber)
}
if strings.Contains(key, " ") {
return nil, fmt.Errorf("invalid key on line %d: contains whitespace: %s", lineNumber, key)
}
if strings.HasPrefix(val, " ") || strings.HasPrefix(val, "\t") {
return nil, fmt.Errorf("invalid value on line %d: whitespace before value: '%s'", lineNumber, val)
}
// remove any trailing comment after value
if commentStart, _, found := strings.Cut(val, "#"); found {
val = strings.TrimRight(commentStart, " \t")
}
// quoted value: support newlines
if strings.HasPrefix(val, `"`) || strings.HasPrefix(val, "'") {
quote := string(val[0])
for !(strings.HasSuffix(line, quote) && !strings.HasSuffix(line, `\`+quote)) {
val = strings.ReplaceAll(val, `\`+quote, quote)
if !scanner.Scan() {
break
}
lineNumber++
line = strings.ReplaceAll(scanner.Text(), `\`+quote, quote)
val += "\n" + line
}
val = strings.TrimPrefix(val, quote)
val = strings.TrimSuffix(val, quote)
}
envMap[key] = val
}
if err := scanner.Err(); err != nil {
return nil, err
}
return envMap, nil
}
func printEnvironment() {
_, version := caddy.Version()
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
fmt.Printf("caddy.Version=%s\n", version)
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU())
fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
fmt.Printf("runtime.Version=%s\n", runtime.Version())
cwd, err := os.Getwd()
if err != nil {
cwd = fmt.Sprintf("<error: %v>", err)
}
fmt.Printf("os.Getwd=%s\n\n", cwd)
for _, v := range os.Environ() {
fmt.Println(v)
}
}
// StringSlice is a flag.Value that enables repeated use of a string flag.
type StringSlice []string
func (ss StringSlice) String() string { return "[" + strings.Join(ss, ", ") + "]" }
func (ss *StringSlice) Set(value string) error {
*ss = append(*ss, value)
return nil
}
// Interface guard
var _ flag.Value = (*StringSlice)(nil)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"runtime/debug"
"strings"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
func cmdUpgrade(fl Flags) (int, error) {
_, nonstandard, _, err := getModules()
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
}
pluginPkgs, err := getPluginPackages(nonstandard)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
return upgradeBuild(pluginPkgs, fl)
}
func cmdAddPackage(fl Flags) (int, error) {
if len(fl.Args()) == 0 {
return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified")
}
_, nonstandard, _, err := getModules()
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
}
pluginPkgs, err := getPluginPackages(nonstandard)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
for _, arg := range fl.Args() {
if _, ok := pluginPkgs[arg]; ok {
return caddy.ExitCodeFailedStartup, fmt.Errorf("package is already added")
}
pluginPkgs[arg] = struct{}{}
}
return upgradeBuild(pluginPkgs, fl)
}
func cmdRemovePackage(fl Flags) (int, error) {
if len(fl.Args()) == 0 {
return caddy.ExitCodeFailedStartup, fmt.Errorf("at least one package name must be specified")
}
_, nonstandard, _, err := getModules()
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("unable to enumerate installed plugins: %v", err)
}
pluginPkgs, err := getPluginPackages(nonstandard)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
for _, arg := range fl.Args() {
if _, ok := pluginPkgs[arg]; !ok {
// package does not exist
return caddy.ExitCodeFailedStartup, fmt.Errorf("package is not added")
}
delete(pluginPkgs, arg)
}
return upgradeBuild(pluginPkgs, fl)
}
func upgradeBuild(pluginPkgs map[string]struct{}, fl Flags) (int, error) {
l := caddy.Log()
thisExecPath, err := os.Executable()
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("determining current executable path: %v", err)
}
thisExecStat, err := os.Stat(thisExecPath)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("retrieving current executable permission bits: %v", err)
}
if thisExecStat.Mode()&os.ModeSymlink == os.ModeSymlink {
symSource := thisExecPath
// we are a symlink; resolve it
thisExecPath, err = filepath.EvalSymlinks(thisExecPath)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("resolving current executable symlink: %v", err)
}
l.Info("this executable is a symlink", zap.String("source", symSource), zap.String("target", thisExecPath))
}
l.Info("this executable will be replaced", zap.String("path", thisExecPath))
// build the request URL to download this custom build
qs := url.Values{
"os": {runtime.GOOS},
"arch": {runtime.GOARCH},
}
for pkg := range pluginPkgs {
qs.Add("p", pkg)
}
// initiate the build
resp, err := downloadBuild(qs)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("download failed: %v", err)
}
defer resp.Body.Close()
// back up the current binary, in case something goes wrong we can replace it
backupExecPath := thisExecPath + ".tmp"
l.Info("build acquired; backing up current executable",
zap.String("current_path", thisExecPath),
zap.String("backup_path", backupExecPath))
err = os.Rename(thisExecPath, backupExecPath)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("backing up current binary: %v", err)
}
defer func() {
if err != nil {
err2 := os.Rename(backupExecPath, thisExecPath)
if err2 != nil {
l.Error("restoring original executable failed; will need to be restored manually",
zap.String("backup_path", backupExecPath),
zap.String("original_path", thisExecPath),
zap.Error(err2))
}
}
}()
// download the file; do this in a closure to close reliably before we execute it
err = writeCaddyBinary(thisExecPath, &resp.Body, thisExecStat)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
l.Info("download successful; displaying new binary details", zap.String("location", thisExecPath))
// use the new binary to print out version and module info
fmt.Print("\nModule versions:\n\n")
if err = listModules(thisExecPath); err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute 'caddy list-modules': %v", err)
}
fmt.Println("\nVersion:")
if err = showVersion(thisExecPath); err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to execute 'caddy version': %v", err)
}
fmt.Println()
// clean up the backup file
if !fl.Bool("keep-backup") {
if err = removeCaddyBinary(backupExecPath); err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("download succeeded, but unable to clean up backup binary: %v", err)
}
} else {
l.Info("skipped cleaning up the backup file", zap.String("backup_path", backupExecPath))
}
l.Info("upgrade successful; please restart any running Caddy instances", zap.String("executable", thisExecPath))
return caddy.ExitCodeSuccess, nil
}
func getModules() (standard, nonstandard, unknown []moduleInfo, err error) {
bi, ok := debug.ReadBuildInfo()
if !ok {
err = fmt.Errorf("no build info")
return
}
for _, modID := range caddy.Modules() {
modInfo, err := caddy.GetModule(modID)
if err != nil {
// that's weird, shouldn't happen
unknown = append(unknown, moduleInfo{caddyModuleID: modID, err: err})
continue
}
// to get the Caddy plugin's version info, we need to know
// the package that the Caddy module's value comes from; we
// can use reflection but we need a non-pointer value (I'm
// not sure why), and since New() should return a pointer
// value, we need to dereference it first
iface := any(modInfo.New())
if rv := reflect.ValueOf(iface); rv.Kind() == reflect.Ptr {
iface = reflect.New(reflect.TypeOf(iface).Elem()).Elem().Interface()
}
modPkgPath := reflect.TypeOf(iface).PkgPath()
// now we find the Go module that the Caddy module's package
// belongs to; we assume the Caddy module package path will
// be prefixed by its Go module path, and we will choose the
// longest matching prefix in case there are nested modules
var matched *debug.Module
for _, dep := range bi.Deps {
if strings.HasPrefix(modPkgPath, dep.Path) {
if matched == nil || len(dep.Path) > len(matched.Path) {
matched = dep
}
}
}
caddyModGoMod := moduleInfo{caddyModuleID: modID, goModule: matched}
if strings.HasPrefix(modPkgPath, caddy.ImportPath) {
standard = append(standard, caddyModGoMod)
} else {
nonstandard = append(nonstandard, caddyModGoMod)
}
}
return
}
func listModules(path string) error {
cmd := exec.Command(path, "list-modules", "--versions", "--skip-standard")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func showVersion(path string) error {
cmd := exec.Command(path, "version")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func downloadBuild(qs url.Values) (*http.Response, error) {
l := caddy.Log()
l.Info("requesting build",
zap.String("os", qs.Get("os")),
zap.String("arch", qs.Get("arch")),
zap.Strings("packages", qs["p"]))
resp, err := http.Get(fmt.Sprintf("%s?%s", downloadPath, qs.Encode()))
if err != nil {
return nil, fmt.Errorf("secure request failed: %v", err)
}
if resp.StatusCode >= 400 {
var details struct {
StatusCode int `json:"status_code"`
Error struct {
Message string `json:"message"`
ID string `json:"id"`
} `json:"error"`
}
err2 := json.NewDecoder(resp.Body).Decode(&details)
if err2 != nil {
return nil, fmt.Errorf("download and error decoding failed: HTTP %d: %v", resp.StatusCode, err2)
}
return nil, fmt.Errorf("download failed: HTTP %d: %s (id=%s)", resp.StatusCode, details.Error.Message, details.Error.ID)
}
return resp, nil
}
func getPluginPackages(modules []moduleInfo) (map[string]struct{}, error) {
pluginPkgs := make(map[string]struct{})
for _, mod := range modules {
if mod.goModule.Replace != nil {
return nil, fmt.Errorf("cannot auto-upgrade when Go module has been replaced: %s => %s",
mod.goModule.Path, mod.goModule.Replace.Path)
}
pluginPkgs[mod.goModule.Path] = struct{}{}
}
return pluginPkgs, nil
}
func writeCaddyBinary(path string, body *io.ReadCloser, fileInfo os.FileInfo) error {
l := caddy.Log()
destFile, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileInfo.Mode())
if err != nil {
return fmt.Errorf("unable to open destination file: %v", err)
}
defer destFile.Close()
l.Info("downloading binary", zap.String("destination", path))
_, err = io.Copy(destFile, *body)
if err != nil {
return fmt.Errorf("unable to download file: %v", err)
}
err = destFile.Sync()
if err != nil {
return fmt.Errorf("syncing downloaded file to device: %v", err)
}
return nil
}
const downloadPath = "https://caddyserver.com/api/download"
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
package caddycmd
import (
"os"
)
// removeCaddyBinary removes the Caddy binary at the given path.
//
// On any non-Windows OS, this simply calls os.Remove, since they should
// probably not exhibit any issue with processes deleting themselves.
func removeCaddyBinary(path string) error {
return os.Remove(path)
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"archive/tar"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/caddy/v2"
)
type storVal struct {
StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
}
// determineStorage returns the top-level storage module from the given config.
// It may return nil even if no error.
func determineStorage(configFile string, configAdapter string) (*storVal, error) {
cfg, _, err := LoadConfig(configFile, configAdapter)
if err != nil {
return nil, err
}
// storage defaults to FileStorage if not explicitly
// defined in the config, so the config can be valid
// json but unmarshaling will fail.
if !json.Valid(cfg) {
return nil, &json.SyntaxError{}
}
var tmpStruct storVal
err = json.Unmarshal(cfg, &tmpStruct)
if err != nil {
// default case, ignore the error
var jsonError *json.SyntaxError
if errors.As(err, &jsonError) {
return nil, nil
}
return nil, err
}
return &tmpStruct, nil
}
func cmdImportStorage(fl Flags) (int, error) {
importStorageCmdConfigFlag := fl.String("config")
importStorageCmdImportFile := fl.String("input")
if importStorageCmdConfigFlag == "" {
return caddy.ExitCodeFailedStartup, errors.New("--config is required")
}
if importStorageCmdImportFile == "" {
return caddy.ExitCodeFailedStartup, errors.New("--input is required")
}
// extract storage from config if possible
storageCfg, err := determineStorage(importStorageCmdConfigFlag, "")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// load specified storage or fallback to default
var stor certmagic.Storage
ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
defer cancel()
if storageCfg != nil && storageCfg.StorageRaw != nil {
val, err := ctx.LoadModule(storageCfg, "StorageRaw")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
stor, err = val.(caddy.StorageConverter).CertMagicStorage()
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
} else {
stor = caddy.DefaultStorage
}
// setup input
var f *os.File
if importStorageCmdImportFile == "-" {
f = os.Stdin
} else {
f, err = os.Open(importStorageCmdImportFile)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("opening input file: %v", err)
}
defer f.Close()
}
// store each archive element
tr := tar.NewReader(f)
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err)
}
b, err := io.ReadAll(tr)
if err != nil {
return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err)
}
err = stor.Store(ctx, hdr.Name, b)
if err != nil {
return caddy.ExitCodeFailedQuit, fmt.Errorf("reading archive: %v", err)
}
}
fmt.Println("Successfully imported storage")
return caddy.ExitCodeSuccess, nil
}
func cmdExportStorage(fl Flags) (int, error) {
exportStorageCmdConfigFlag := fl.String("config")
exportStorageCmdOutputFlag := fl.String("output")
if exportStorageCmdConfigFlag == "" {
return caddy.ExitCodeFailedStartup, errors.New("--config is required")
}
if exportStorageCmdOutputFlag == "" {
return caddy.ExitCodeFailedStartup, errors.New("--output is required")
}
// extract storage from config if possible
storageCfg, err := determineStorage(exportStorageCmdConfigFlag, "")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// load specified storage or fallback to default
var stor certmagic.Storage
ctx, cancel := caddy.NewContext(caddy.Context{Context: context.Background()})
defer cancel()
if storageCfg != nil && storageCfg.StorageRaw != nil {
val, err := ctx.LoadModule(storageCfg, "StorageRaw")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
stor, err = val.(caddy.StorageConverter).CertMagicStorage()
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
} else {
stor = caddy.DefaultStorage
}
// enumerate all keys
keys, err := stor.List(ctx, "", true)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// setup output
var f *os.File
if exportStorageCmdOutputFlag == "-" {
f = os.Stdout
} else {
f, err = os.Create(exportStorageCmdOutputFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("opening output file: %v", err)
}
defer f.Close()
}
// `IsTerminal: true` keys hold the values we
// care about, write them out
tw := tar.NewWriter(f)
for _, k := range keys {
info, err := stor.Stat(ctx, k)
if err != nil {
return caddy.ExitCodeFailedQuit, err
}
if info.IsTerminal {
v, err := stor.Load(ctx, k)
if err != nil {
return caddy.ExitCodeFailedQuit, err
}
hdr := &tar.Header{
Name: k,
Mode: 0o600,
Size: int64(len(v)),
ModTime: info.Modified,
}
if err = tw.WriteHeader(hdr); err != nil {
return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err)
}
if _, err = tw.Write(v); err != nil {
return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err)
}
}
}
if err = tw.Close(); err != nil {
return caddy.ExitCodeFailedQuit, fmt.Errorf("writing archive: %v", err)
}
return caddy.ExitCodeSuccess, nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"context"
"encoding/json"
"fmt"
"log"
"log/slog"
"reflect"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
"go.uber.org/zap/exp/zapslog"
"github.com/caddyserver/caddy/v2/internal/filesystems"
)
// Context is a type which defines the lifetime of modules that
// are loaded and provides access to the parent configuration
// that spawned the modules which are loaded. It should be used
// with care and wrapped with derivation functions from the
// standard context package only if you don't need the Caddy
// specific features. These contexts are canceled when the
// lifetime of the modules loaded from it is over.
//
// Use NewContext() to get a valid value (but most modules will
// not actually need to do this).
type Context struct {
context.Context
moduleInstances map[string][]Module
cfg *Config
ancestry []Module
cleanupFuncs []func() // invoked at every config unload
exitFuncs []func(context.Context) // invoked at config unload ONLY IF the process is exiting (EXPERIMENTAL)
}
// NewContext provides a new context derived from the given
// context ctx. Normally, you will not need to call this
// function unless you are loading modules which have a
// different lifespan than the ones for the context the
// module was provisioned with. Be sure to call the cancel
// func when the context is to be cleaned up so that
// modules which are loaded will be properly unloaded.
// See standard library context package's documentation.
func NewContext(ctx Context) (Context, context.CancelFunc) {
newCtx := Context{moduleInstances: make(map[string][]Module), cfg: ctx.cfg}
c, cancel := context.WithCancel(ctx.Context)
wrappedCancel := func() {
cancel()
for _, f := range ctx.cleanupFuncs {
f()
}
for modName, modInstances := range newCtx.moduleInstances {
for _, inst := range modInstances {
if cu, ok := inst.(CleanerUpper); ok {
err := cu.Cleanup()
if err != nil {
log.Printf("[ERROR] %s (%p): cleanup: %v", modName, inst, err)
}
}
}
}
}
newCtx.Context = c
return newCtx, wrappedCancel
}
// OnCancel executes f when ctx is canceled.
func (ctx *Context) OnCancel(f func()) {
ctx.cleanupFuncs = append(ctx.cleanupFuncs, f)
}
// Filesystems returns a ref to the FilesystemMap.
// EXPERIMENTAL: This API is subject to change.
func (ctx *Context) Filesystems() FileSystems {
// if no config is loaded, we use a default filesystemmap, which includes the osfs
if ctx.cfg == nil {
return &filesystems.FilesystemMap{}
}
return ctx.cfg.filesystems
}
// OnExit executes f when the process exits gracefully.
// The function is only executed if the process is gracefully
// shut down while this context is active.
//
// EXPERIMENTAL API: subject to change or removal.
func (ctx *Context) OnExit(f func(context.Context)) {
ctx.exitFuncs = append(ctx.exitFuncs, f)
}
// LoadModule loads the Caddy module(s) from the specified field of the parent struct
// pointer and returns the loaded module(s). The struct pointer and its field name as
// a string are necessary so that reflection can be used to read the struct tag on the
// field to get the module namespace and inline module name key (if specified).
//
// The field can be any one of the supported raw module types: json.RawMessage,
// []json.RawMessage, map[string]json.RawMessage, or []map[string]json.RawMessage.
// ModuleMap may be used in place of map[string]json.RawMessage. The return value's
// underlying type mirrors the input field's type:
//
// json.RawMessage => any
// []json.RawMessage => []any
// [][]json.RawMessage => [][]any
// map[string]json.RawMessage => map[string]any
// []map[string]json.RawMessage => []map[string]any
//
// The field must have a "caddy" struct tag in this format:
//
// caddy:"key1=val1 key2=val2"
//
// To load modules, a "namespace" key is required. For example, to load modules
// in the "http.handlers" namespace, you'd put: `namespace=http.handlers` in the
// Caddy struct tag.
//
// The module name must also be available. If the field type is a map or slice of maps,
// then key is assumed to be the module name if an "inline_key" is NOT specified in the
// caddy struct tag. In this case, the module name does NOT need to be specified in-line
// with the module itself.
//
// If not a map, or if inline_key is non-empty, then the module name must be embedded
// into the values, which must be objects; then there must be a key in those objects
// where its associated value is the module name. This is called the "inline key",
// meaning the key containing the module's name that is defined inline with the module
// itself. You must specify the inline key in a struct tag, along with the namespace:
//
// caddy:"namespace=http.handlers inline_key=handler"
//
// This will look for a key/value pair like `"handler": "..."` in the json.RawMessage
// in order to know the module name.
//
// To make use of the loaded module(s) (the return value), you will probably want
// to type-assert each 'any' value(s) to the types that are useful to you
// and store them on the same struct. Storing them on the same struct makes for
// easy garbage collection when your host module is no longer needed.
//
// Loaded modules have already been provisioned and validated. Upon returning
// successfully, this method clears the json.RawMessage(s) in the field since
// the raw JSON is no longer needed, and this allows the GC to free up memory.
func (ctx Context) LoadModule(structPointer any, fieldName string) (any, error) {
val := reflect.ValueOf(structPointer).Elem().FieldByName(fieldName)
typ := val.Type()
field, ok := reflect.TypeOf(structPointer).Elem().FieldByName(fieldName)
if !ok {
panic(fmt.Sprintf("field %s does not exist in %#v", fieldName, structPointer))
}
opts, err := ParseStructTag(field.Tag.Get("caddy"))
if err != nil {
panic(fmt.Sprintf("malformed tag on field %s: %v", fieldName, err))
}
moduleNamespace, ok := opts["namespace"]
if !ok {
panic(fmt.Sprintf("missing 'namespace' key in struct tag on field %s", fieldName))
}
inlineModuleKey := opts["inline_key"]
var result any
switch val.Kind() {
case reflect.Slice:
if isJSONRawMessage(typ) {
// val is `json.RawMessage` ([]uint8 under the hood)
if inlineModuleKey == "" {
panic("unable to determine module name without inline_key when type is not a ModuleMap")
}
val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Interface().(json.RawMessage))
if err != nil {
return nil, err
}
result = val
} else if isJSONRawMessage(typ.Elem()) {
// val is `[]json.RawMessage`
if inlineModuleKey == "" {
panic("unable to determine module name without inline_key because type is not a ModuleMap")
}
var all []any
for i := 0; i < val.Len(); i++ {
val, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, val.Index(i).Interface().(json.RawMessage))
if err != nil {
return nil, fmt.Errorf("position %d: %v", i, err)
}
all = append(all, val)
}
result = all
} else if typ.Elem().Kind() == reflect.Slice && isJSONRawMessage(typ.Elem().Elem()) {
// val is `[][]json.RawMessage`
if inlineModuleKey == "" {
panic("unable to determine module name without inline_key because type is not a ModuleMap")
}
var all [][]any
for i := 0; i < val.Len(); i++ {
innerVal := val.Index(i)
var allInner []any
for j := 0; j < innerVal.Len(); j++ {
innerInnerVal, err := ctx.loadModuleInline(inlineModuleKey, moduleNamespace, innerVal.Index(j).Interface().(json.RawMessage))
if err != nil {
return nil, fmt.Errorf("position %d: %v", j, err)
}
allInner = append(allInner, innerInnerVal)
}
all = append(all, allInner)
}
result = all
} else if isModuleMapType(typ.Elem()) {
// val is `[]map[string]json.RawMessage`
var all []map[string]any
for i := 0; i < val.Len(); i++ {
thisSet, err := ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val.Index(i))
if err != nil {
return nil, err
}
all = append(all, thisSet)
}
result = all
}
case reflect.Map:
// val is a ModuleMap or some other kind of map
result, err = ctx.loadModulesFromSomeMap(moduleNamespace, inlineModuleKey, val)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("unrecognized type for module: %s", typ)
}
// we're done with the raw bytes; allow GC to deallocate
val.Set(reflect.Zero(typ))
return result, nil
}
// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]any.
// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module
// name) or as a regular map (key is not the module name, and module name is defined inline).
func (ctx Context) loadModulesFromSomeMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) {
// if no inline_key is specified, then val must be a ModuleMap,
// where the key is the module name
if inlineModuleKey == "" {
if !isModuleMapType(val.Type()) {
panic(fmt.Sprintf("expected ModuleMap because inline_key is empty; but we do not recognize this type: %s", val.Type()))
}
return ctx.loadModuleMap(namespace, val)
}
// otherwise, val is a map with modules, but the module name is
// inline with each value (the key means something else)
return ctx.loadModulesFromRegularMap(namespace, inlineModuleKey, val)
}
// loadModulesFromRegularMap loads modules from val, where val is a map[string]json.RawMessage.
// Map keys are NOT interpreted as module names, so module names are still expected to appear
// inline with the objects.
func (ctx Context) loadModulesFromRegularMap(namespace, inlineModuleKey string, val reflect.Value) (map[string]any, error) {
mods := make(map[string]any)
iter := val.MapRange()
for iter.Next() {
k := iter.Key()
v := iter.Value()
mod, err := ctx.loadModuleInline(inlineModuleKey, namespace, v.Interface().(json.RawMessage))
if err != nil {
return nil, fmt.Errorf("key %s: %v", k, err)
}
mods[k.String()] = mod
}
return mods, nil
}
// loadModuleMap loads modules from a ModuleMap, i.e. map[string]any, where the key is the
// module name. With a module map, module names do not need to be defined inline with their values.
func (ctx Context) loadModuleMap(namespace string, val reflect.Value) (map[string]any, error) {
all := make(map[string]any)
iter := val.MapRange()
for iter.Next() {
k := iter.Key().Interface().(string)
v := iter.Value().Interface().(json.RawMessage)
moduleName := namespace + "." + k
if namespace == "" {
moduleName = k
}
val, err := ctx.LoadModuleByID(moduleName, v)
if err != nil {
return nil, fmt.Errorf("module name '%s': %v", k, err)
}
all[k] = val
}
return all, nil
}
// LoadModuleByID decodes rawMsg into a new instance of mod and
// returns the value. If mod.New is nil, an error is returned.
// If the module implements Validator or Provisioner interfaces,
// those methods are invoked to ensure the module is fully
// configured and valid before being used.
//
// This is a lower-level method and will usually not be called
// directly by most modules. However, this method is useful when
// dynamically loading/unloading modules in their own context,
// like from embedded scripts, etc.
func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (any, error) {
modulesMu.RLock()
modInfo, ok := modules[id]
modulesMu.RUnlock()
if !ok {
return nil, fmt.Errorf("unknown module: %s", id)
}
if modInfo.New == nil {
return nil, fmt.Errorf("module '%s' has no constructor", modInfo.ID)
}
val := modInfo.New()
// value must be a pointer for unmarshaling into concrete type, even if
// the module's concrete type is a slice or map; New() *should* return
// a pointer, otherwise unmarshaling errors or panics will occur
if rv := reflect.ValueOf(val); rv.Kind() != reflect.Ptr {
log.Printf("[WARNING] ModuleInfo.New() for module '%s' did not return a pointer,"+
" so we are using reflection to make a pointer instead; please fix this by"+
" using new(Type) or &Type notation in your module's New() function.", id)
val = reflect.New(rv.Type()).Elem().Addr().Interface().(Module)
}
// fill in its config only if there is a config to fill in
if len(rawMsg) > 0 {
err := StrictUnmarshalJSON(rawMsg, &val)
if err != nil {
return nil, fmt.Errorf("decoding module config: %s: %v", modInfo, err)
}
}
if val == nil {
// returned module values are almost always type-asserted
// before being used, so a nil value would panic; and there
// is no good reason to explicitly declare null modules in
// a config; it might be because the user is trying to achieve
// a result the developer isn't expecting, which is a smell
return nil, fmt.Errorf("module value cannot be null")
}
ctx.ancestry = append(ctx.ancestry, val)
if prov, ok := val.(Provisioner); ok {
err := prov.Provision(ctx)
if err != nil {
// incomplete provisioning could have left state
// dangling, so make sure it gets cleaned up
if cleanerUpper, ok := val.(CleanerUpper); ok {
err2 := cleanerUpper.Cleanup()
if err2 != nil {
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
}
}
return nil, fmt.Errorf("provision %s: %v", modInfo, err)
}
}
if validator, ok := val.(Validator); ok {
err := validator.Validate()
if err != nil {
// since the module was already provisioned, make sure we clean up
if cleanerUpper, ok := val.(CleanerUpper); ok {
err2 := cleanerUpper.Cleanup()
if err2 != nil {
err = fmt.Errorf("%v; additionally, cleanup: %v", err, err2)
}
}
return nil, fmt.Errorf("%s: invalid configuration: %v", modInfo, err)
}
}
ctx.moduleInstances[id] = append(ctx.moduleInstances[id], val)
return val, nil
}
// loadModuleInline loads a module from a JSON raw message which decodes to
// a map[string]any, where one of the object keys is moduleNameKey
// and the corresponding value is the module name (as a string) which can
// be found in the given scope. In other words, the module name is declared
// in-line with the module itself.
//
// This allows modules to be decoded into their concrete types and used when
// their names cannot be the unique key in a map, such as when there are
// multiple instances in the map or it appears in an array (where there are
// no custom keys). In other words, the key containing the module name is
// treated special/separate from all the other keys in the object.
func (ctx Context) loadModuleInline(moduleNameKey, moduleScope string, raw json.RawMessage) (any, error) {
moduleName, raw, err := getModuleNameInline(moduleNameKey, raw)
if err != nil {
return nil, err
}
val, err := ctx.LoadModuleByID(moduleScope+"."+moduleName, raw)
if err != nil {
return nil, fmt.Errorf("loading module '%s': %v", moduleName, err)
}
return val, nil
}
// App returns the configured app named name. If that app has
// not yet been loaded and provisioned, it will be immediately
// loaded and provisioned. If no app with that name is
// configured, a new empty one will be instantiated instead.
// (The app module must still be registered.) This must not be
// called during the Provision/Validate phase to reference a
// module's own host app (since the parent app module is still
// in the process of being provisioned, it is not yet ready).
//
// We return any type instead of the App type because it is NOT
// intended for the caller of this method to be the one to start
// or stop App modules. The caller is expected to assert to the
// concrete type.
func (ctx Context) App(name string) (any, error) {
if app, ok := ctx.cfg.apps[name]; ok {
return app, nil
}
appRaw := ctx.cfg.AppsRaw[name]
modVal, err := ctx.LoadModuleByID(name, appRaw)
if err != nil {
return nil, fmt.Errorf("loading %s app module: %v", name, err)
}
if appRaw != nil {
ctx.cfg.AppsRaw[name] = nil // allow GC to deallocate
}
ctx.cfg.apps[name] = modVal.(App)
return modVal, nil
}
// AppIfConfigured is like App, but it returns an error if the
// app has not been configured. This is useful when the app is
// required and its absence is a configuration error; or when
// the app is optional and you don't want to instantiate a
// new one that hasn't been explicitly configured. If the app
// is not in the configuration, the error wraps ErrNotConfigured.
func (ctx Context) AppIfConfigured(name string) (any, error) {
if ctx.cfg == nil {
return nil, fmt.Errorf("app module %s: %w", name, ErrNotConfigured)
}
if app, ok := ctx.cfg.apps[name]; ok {
return app, nil
}
appRaw := ctx.cfg.AppsRaw[name]
if appRaw == nil {
return nil, fmt.Errorf("app module %s: %w", name, ErrNotConfigured)
}
return ctx.App(name)
}
// ErrNotConfigured indicates a module is not configured.
var ErrNotConfigured = fmt.Errorf("module not configured")
// Storage returns the configured Caddy storage implementation.
func (ctx Context) Storage() certmagic.Storage {
return ctx.cfg.storage
}
// Logger returns a logger that is intended for use by the most
// recent module associated with the context. Callers should not
// pass in any arguments unless they want to associate with a
// different module; it panics if more than 1 value is passed in.
//
// Originally, this method's signature was `Logger(mod Module)`,
// requiring that an instance of a Caddy module be passed in.
// However, that is no longer necessary, as the closest module
// most recently associated with the context will be automatically
// assumed. To prevent a sudden breaking change, this method's
// signature has been changed to be variadic, but we may remove
// the parameter altogether in the future. Callers should not
// pass in any argument. If there is valid need to specify a
// different module, please open an issue to discuss.
//
// PARTIALLY DEPRECATED: The Logger(module) form is deprecated and
// may be removed in the future. Do not pass in any arguments.
func (ctx Context) Logger(module ...Module) *zap.Logger {
if len(module) > 1 {
panic("more than 1 module passed in")
}
if ctx.cfg == nil {
// often the case in tests; just use a dev logger
l, err := zap.NewDevelopment()
if err != nil {
panic("config missing, unable to create dev logger: " + err.Error())
}
return l
}
mod := ctx.Module()
if len(module) > 0 {
mod = module[0]
}
if mod == nil {
return Log()
}
return ctx.cfg.Logging.Logger(mod)
}
// Slogger returns a slog logger that is intended for use by
// the most recent module associated with the context.
func (ctx Context) Slogger() *slog.Logger {
if ctx.cfg == nil {
// often the case in tests; just use a dev logger
l, err := zap.NewDevelopment()
if err != nil {
panic("config missing, unable to create dev logger: " + err.Error())
}
return slog.New(zapslog.NewHandler(l.Core(), nil))
}
mod := ctx.Module()
if mod == nil {
return slog.New(zapslog.NewHandler(Log().Core(), nil))
}
return slog.New(zapslog.NewHandler(
ctx.cfg.Logging.Logger(mod).Core(),
&zapslog.HandlerOptions{
LoggerName: string(mod.CaddyModule().ID),
},
))
}
// Modules returns the lineage of modules that this context provisioned,
// with the most recent/current module being last in the list.
func (ctx Context) Modules() []Module {
mods := make([]Module, len(ctx.ancestry))
copy(mods, ctx.ancestry)
return mods
}
// Module returns the current module, or the most recent one
// provisioned by the context.
func (ctx Context) Module() Module {
if len(ctx.ancestry) == 0 {
return nil
}
return ctx.ancestry[len(ctx.ancestry)-1]
}
// WithValue returns a new context with the given key-value pair.
func (ctx *Context) WithValue(key, value any) Context {
return Context{
Context: context.WithValue(ctx.Context, key, value),
moduleInstances: ctx.moduleInstances,
cfg: ctx.cfg,
ancestry: ctx.ancestry,
cleanupFuncs: ctx.cleanupFuncs,
exitFuncs: ctx.exitFuncs,
}
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package caddy
func FuzzParseDuration(data []byte) int {
_, err := ParseDuration(string(data))
if err != nil {
return 0
}
return 1
}
package filesystems
import (
"io/fs"
"strings"
"sync"
)
const (
DefaultFilesystemKey = "default"
)
var DefaultFilesystem = &wrapperFs{key: DefaultFilesystemKey, FS: OsFS{}}
// wrapperFs exists so can easily add to wrapperFs down the line
type wrapperFs struct {
key string
fs.FS
}
// FilesystemMap stores a map of filesystems
// the empty key will be overwritten to be the default key
// it includes a default filesystem, based off the os fs
type FilesystemMap struct {
m sync.Map
}
// note that the first invocation of key cannot be called in a racy context.
func (f *FilesystemMap) key(k string) string {
if k == "" {
k = DefaultFilesystemKey
}
return k
}
// Register will add the filesystem with key to later be retrieved
// A call with a nil fs will call unregister, ensuring that a call to Default() will never be nil
func (f *FilesystemMap) Register(k string, v fs.FS) {
k = f.key(k)
if v == nil {
f.Unregister(k)
return
}
f.m.Store(k, &wrapperFs{key: k, FS: v})
}
// Unregister will remove the filesystem with key from the filesystem map
// if the key is the default key, it will set the default to the osFS instead of deleting it
// modules should call this on cleanup to be safe
func (f *FilesystemMap) Unregister(k string) {
k = f.key(k)
if k == DefaultFilesystemKey {
f.m.Store(k, DefaultFilesystem)
} else {
f.m.Delete(k)
}
}
// Get will get a filesystem with a given key
func (f *FilesystemMap) Get(k string) (v fs.FS, ok bool) {
k = f.key(k)
c, ok := f.m.Load(strings.TrimSpace(k))
if !ok {
if k == DefaultFilesystemKey {
f.m.Store(k, DefaultFilesystem)
return DefaultFilesystem, true
}
return nil, ok
}
return c.(fs.FS), true
}
// Default will get the default filesystem in the filesystem map
func (f *FilesystemMap) Default() fs.FS {
val, _ := f.Get(DefaultFilesystemKey)
return val
}
package filesystems
import (
"io/fs"
"os"
"path/filepath"
)
// OsFS is a simple fs.FS implementation that uses the local
// file system. (We do not use os.DirFS because we do our own
// rooting or path prefixing without being constrained to a single
// root folder. The standard os.DirFS implementation is problematic
// since roots can be dynamic in our application.)
//
// OsFS also implements fs.StatFS, fs.GlobFS, fs.ReadDirFS, and fs.ReadFileFS.
type OsFS struct{}
func (OsFS) Open(name string) (fs.File, error) { return os.Open(name) }
func (OsFS) Stat(name string) (fs.FileInfo, error) { return os.Stat(name) }
func (OsFS) Glob(pattern string) ([]string, error) { return filepath.Glob(pattern) }
func (OsFS) ReadDir(name string) ([]fs.DirEntry, error) { return os.ReadDir(name) }
func (OsFS) ReadFile(name string) ([]byte, error) { return os.ReadFile(name) }
var (
_ fs.StatFS = (*OsFS)(nil)
_ fs.GlobFS = (*OsFS)(nil)
_ fs.ReadDirFS = (*OsFS)(nil)
_ fs.ReadFileFS = (*OsFS)(nil)
)
package metrics
import (
"net/http"
"strconv"
)
func SanitizeCode(s int) string {
switch s {
case 0, 200:
return "200"
default:
return strconv.Itoa(s)
}
}
// Only support the list of "regular" HTTP methods, see
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
var methodMap = map[string]string{
"GET": http.MethodGet, "get": http.MethodGet,
"HEAD": http.MethodHead, "head": http.MethodHead,
"PUT": http.MethodPut, "put": http.MethodPut,
"POST": http.MethodPost, "post": http.MethodPost,
"DELETE": http.MethodDelete, "delete": http.MethodDelete,
"CONNECT": http.MethodConnect, "connect": http.MethodConnect,
"OPTIONS": http.MethodOptions, "options": http.MethodOptions,
"TRACE": http.MethodTrace, "trace": http.MethodTrace,
"PATCH": http.MethodPatch, "patch": http.MethodPatch,
}
// SanitizeMethod sanitizes the method for use as a metric label. This helps
// prevent high cardinality on the method label. The name is always upper case.
func SanitizeMethod(m string) string {
if m, ok := methodMap[m]; ok {
return m
}
return "OTHER"
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"io/fs"
"strconv"
"strings"
)
// SplitUnixSocketPermissionsBits takes a unix socket address in the
// unusual "path|bits" format (e.g. /run/caddy.sock|0222) and tries
// to split it into socket path (host) and permissions bits (port).
// Colons (":") can't be used as separator, as socket paths on Windows
// may include a drive letter (e.g. `unix/c:\absolute\path.sock`).
// Permission bits will default to 0200 if none are specified.
// Throws an error, if the first carrying bit does not
// include write perms (e.g. `0422` or `022`).
// Symbolic permission representation (e.g. `u=w,g=w,o=w`)
// is not supported and will throw an error for now!
func SplitUnixSocketPermissionsBits(addr string) (path string, fileMode fs.FileMode, err error) {
addrSplit := strings.SplitN(addr, "|", 2)
if len(addrSplit) == 2 {
// parse octal permission bit string as uint32
fileModeUInt64, err := strconv.ParseUint(addrSplit[1], 8, 32)
if err != nil {
return "", 0, fmt.Errorf("could not parse octal permission bits in %s: %v", addr, err)
}
fileMode = fs.FileMode(fileModeUInt64)
// FileMode.String() returns a string like `-rwxr-xr--` for `u=rwx,g=rx,o=r` (`0754`)
if string(fileMode.String()[2]) != "w" {
return "", 0, fmt.Errorf("owner of the socket requires '-w-' (write, octal: '2') permissions at least; got '%s' in %s", fileMode.String()[1:4], addr)
}
return addrSplit[0], fileMode, nil
}
// default to 0200 (symbolic: `u=w,g=,o=`)
// if no permission bits are specified
return addr, 0o200, nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Even though the filename ends in _unix.go, we still have to specify the
// build constraint here, because the filename convention only works for
// literal GOOS values, and "unix" is a shortcut unique to build tags.
//go:build unix && !solaris
package caddy
import (
"context"
"errors"
"io"
"io/fs"
"net"
"os"
"sync/atomic"
"syscall"
"go.uber.org/zap"
"golang.org/x/sys/unix"
)
// reuseUnixSocket copies and reuses the unix domain socket (UDS) if we already
// have it open; if not, unlink it so we can have it. No-op if not a unix network.
func reuseUnixSocket(network, addr string) (any, error) {
if !IsUnixNetwork(network) {
return nil, nil
}
socketKey := listenerKey(network, addr)
socket, exists := unixSockets[socketKey]
if exists {
// make copy of file descriptor
socketFile, err := socket.File() // does dup() deep down
if err != nil {
return nil, err
}
// use copied fd to make new Listener or PacketConn, then replace
// it in the map so that future copies always come from the most
// recent fd (as the previous ones will be closed, and we'd get
// "use of closed network connection" errors) -- note that we
// preserve the *pointer* to the counter (not just the value) so
// that all socket wrappers will refer to the same value
switch unixSocket := socket.(type) {
case *unixListener:
ln, err := net.FileListener(socketFile)
if err != nil {
return nil, err
}
atomic.AddInt32(unixSocket.count, 1)
unixSockets[socketKey] = &unixListener{ln.(*net.UnixListener), socketKey, unixSocket.count}
case *unixConn:
pc, err := net.FilePacketConn(socketFile)
if err != nil {
return nil, err
}
atomic.AddInt32(unixSocket.count, 1)
unixSockets[socketKey] = &unixConn{pc.(*net.UnixConn), addr, socketKey, unixSocket.count}
}
return unixSockets[socketKey], nil
}
// from what I can tell after some quick research, it's quite common for programs to
// leave their socket file behind after they close, so the typical pattern is to
// unlink it before you bind to it -- this is often crucial if the last program using
// it was killed forcefully without a chance to clean up the socket, but there is a
// race, as the comment in net.UnixListener.close() explains... oh well, I guess?
if err := syscall.Unlink(addr); err != nil && !errors.Is(err, fs.ErrNotExist) {
return nil, err
}
return nil, nil
}
func listenReusable(ctx context.Context, lnKey string, network, address string, config net.ListenConfig) (any, error) {
// wrap any Control function set by the user so we can also add our reusePort control without clobbering theirs
oldControl := config.Control
config.Control = func(network, address string, c syscall.RawConn) error {
if oldControl != nil {
if err := oldControl(network, address, c); err != nil {
return err
}
}
return reusePort(network, address, c)
}
// even though SO_REUSEPORT lets us bind the socket multiple times,
// we still put it in the listenerPool so we can count how many
// configs are using this socket; necessary to ensure we can know
// whether to enforce shutdown delays, for example (see #5393).
var ln io.Closer
var err error
switch network {
case "udp", "udp4", "udp6", "unixgram":
ln, err = config.ListenPacket(ctx, network, address)
default:
ln, err = config.Listen(ctx, network, address)
}
if err == nil {
listenerPool.LoadOrStore(lnKey, nil)
}
// if new listener is a unix socket, make sure we can reuse it later
// (we do our own "unlink on close" -- not required, but more tidy)
one := int32(1)
if unix, ok := ln.(*net.UnixListener); ok {
unix.SetUnlinkOnClose(false)
ln = &unixListener{unix, lnKey, &one}
unixSockets[lnKey] = ln.(*unixListener)
}
// TODO: Not 100% sure this is necessary, but we do this for net.UnixListener in listen_unix.go, so...
if unix, ok := ln.(*net.UnixConn); ok {
ln = &unixConn{unix, address, lnKey, &one}
unixSockets[lnKey] = ln.(*unixConn)
}
// lightly wrap the listener so that when it is closed,
// we can decrement the usage pool counter
switch specificLn := ln.(type) {
case net.Listener:
return deleteListener{specificLn, lnKey}, err
case net.PacketConn:
return deletePacketConn{specificLn, lnKey}, err
}
// other types, I guess we just return them directly
return ln, err
}
// reusePort sets SO_REUSEPORT. Ineffective for unix sockets.
func reusePort(network, address string, conn syscall.RawConn) error {
if IsUnixNetwork(network) {
return nil
}
return conn.Control(func(descriptor uintptr) {
if err := unix.SetsockoptInt(int(descriptor), unix.SOL_SOCKET, unixSOREUSEPORT, 1); err != nil {
Log().Error("setting SO_REUSEPORT",
zap.String("network", network),
zap.String("address", address),
zap.Uintptr("descriptor", descriptor),
zap.Error(err))
}
})
}
type unixListener struct {
*net.UnixListener
mapKey string
count *int32 // accessed atomically
}
func (uln *unixListener) Close() error {
newCount := atomic.AddInt32(uln.count, -1)
if newCount == 0 {
defer func() {
addr := uln.Addr().String()
unixSocketsMu.Lock()
delete(unixSockets, uln.mapKey)
unixSocketsMu.Unlock()
_ = syscall.Unlink(addr)
}()
}
return uln.UnixListener.Close()
}
type unixConn struct {
*net.UnixConn
filename string
mapKey string
count *int32 // accessed atomically
}
func (uc *unixConn) Close() error {
newCount := atomic.AddInt32(uc.count, -1)
if newCount == 0 {
defer func() {
unixSocketsMu.Lock()
delete(unixSockets, uc.mapKey)
unixSocketsMu.Unlock()
_ = syscall.Unlink(uc.filename)
}()
}
return uc.UnixConn.Close()
}
func (uc *unixConn) Unwrap() net.PacketConn {
return uc.UnixConn
}
// unixSockets keeps track of the currently-active unix sockets
// so we can transfer their FDs gracefully during reloads.
var unixSockets = make(map[string]interface {
File() (*os.File, error)
})
// deleteListener is a type that simply deletes itself
// from the listenerPool when it closes. It is used
// solely for the purpose of reference counting (i.e.
// counting how many configs are using a given socket).
type deleteListener struct {
net.Listener
lnKey string
}
func (dl deleteListener) Close() error {
_, _ = listenerPool.Delete(dl.lnKey)
return dl.Listener.Close()
}
// deletePacketConn is like deleteListener, but
// for net.PacketConns.
type deletePacketConn struct {
net.PacketConn
lnKey string
}
func (dl deletePacketConn) Close() error {
_, _ = listenerPool.Delete(dl.lnKey)
return dl.PacketConn.Close()
}
func (dl deletePacketConn) Unwrap() net.PacketConn {
return dl.PacketConn
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"io/fs"
"net"
"net/netip"
"os"
"strconv"
"strings"
"sync"
"sync/atomic"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
"go.uber.org/zap"
"golang.org/x/time/rate"
"github.com/caddyserver/caddy/v2/internal"
)
// NetworkAddress represents one or more network addresses.
// It contains the individual components for a parsed network
// address of the form accepted by ParseNetworkAddress().
type NetworkAddress struct {
// Should be a network value accepted by Go's net package or
// by a plugin providing a listener for that network type.
Network string
// The "main" part of the network address is the host, which
// often takes the form of a hostname, DNS name, IP address,
// or socket path.
Host string
// For addresses that contain a port, ranges are given by
// [StartPort, EndPort]; i.e. for a single port, StartPort
// and EndPort are the same. For no port, they are 0.
StartPort uint
EndPort uint
}
// ListenAll calls Listen() for all addresses represented by this struct, i.e. all ports in the range.
// (If the address doesn't use ports or has 1 port only, then only 1 listener will be created.)
// It returns an error if any listener failed to bind, and closes any listeners opened up to that point.
func (na NetworkAddress) ListenAll(ctx context.Context, config net.ListenConfig) ([]any, error) {
var listeners []any
var err error
// if one of the addresses has a failure, we need to close
// any that did open a socket to avoid leaking resources
defer func() {
if err == nil {
return
}
for _, ln := range listeners {
if cl, ok := ln.(io.Closer); ok {
cl.Close()
}
}
}()
// an address can contain a port range, which represents multiple addresses;
// some addresses don't use ports at all and have a port range size of 1;
// whatever the case, iterate each address represented and bind a socket
for portOffset := uint(0); portOffset < na.PortRangeSize(); portOffset++ {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
// create (or reuse) the listener ourselves
var ln any
ln, err = na.Listen(ctx, portOffset, config)
if err != nil {
return nil, err
}
listeners = append(listeners, ln)
}
return listeners, nil
}
// Listen is similar to net.Listen, with a few differences:
//
// Listen announces on the network address using the port calculated by adding
// portOffset to the start port. (For network types that do not use ports, the
// portOffset is ignored.)
//
// The provided ListenConfig is used to create the listener. Its Control function,
// if set, may be wrapped by an internally-used Control function. The provided
// context may be used to cancel long operations early. The context is not used
// to close the listener after it has been created.
//
// Caddy's listeners can overlap each other: multiple listeners may be created on
// the same socket at the same time. This is useful because during config changes,
// the new config is started while the old config is still running. How this is
// accomplished varies by platform and network type. For example, on Unix, SO_REUSEPORT
// is set except on Unix sockets, for which the file descriptor is duplicated and
// reused; on Windows, the close logic is virtualized using timeouts. Like normal
// listeners, be sure to Close() them when you are done.
//
// This method returns any type, as the implementations of listeners for various
// network types are not interchangeable. The type of listener returned is switched
// on the network type. Stream-based networks ("tcp", "unix", "unixpacket", etc.)
// return a net.Listener; datagram-based networks ("udp", "unixgram", etc.) return
// a net.PacketConn; and so forth. The actual concrete types are not guaranteed to
// be standard, exported types (wrapping is necessary to provide graceful reloads).
//
// Unix sockets will be unlinked before being created, to ensure we can bind to
// it even if the previous program using it exited uncleanly; it will also be
// unlinked upon a graceful exit (or when a new config does not use that socket).
func (na NetworkAddress) Listen(ctx context.Context, portOffset uint, config net.ListenConfig) (any, error) {
if na.IsUnixNetwork() {
unixSocketsMu.Lock()
defer unixSocketsMu.Unlock()
}
// check to see if plugin provides listener
if ln, err := getListenerFromPlugin(ctx, na.Network, na.JoinHostPort(portOffset), config); ln != nil || err != nil {
return ln, err
}
// create (or reuse) the listener ourselves
return na.listen(ctx, portOffset, config)
}
func (na NetworkAddress) listen(ctx context.Context, portOffset uint, config net.ListenConfig) (any, error) {
var (
ln any
err error
address string
unixFileMode fs.FileMode
isAbstractUnixSocket bool
)
// split unix socket addr early so lnKey
// is independent of permissions bits
if na.IsUnixNetwork() {
var err error
address, unixFileMode, err = internal.SplitUnixSocketPermissionsBits(na.Host)
if err != nil {
return nil, err
}
isAbstractUnixSocket = strings.HasPrefix(address, "@")
} else {
address = na.JoinHostPort(portOffset)
}
// if this is a unix socket, see if we already have it open,
// force socket permissions on it and return early
if socket, err := reuseUnixSocket(na.Network, address); socket != nil || err != nil {
if !isAbstractUnixSocket {
if err := os.Chmod(address, unixFileMode); err != nil {
return nil, fmt.Errorf("unable to set permissions (%s) on %s: %v", unixFileMode, address, err)
}
}
return socket, err
}
lnKey := listenerKey(na.Network, address)
if strings.HasPrefix(na.Network, "ip") {
ln, err = config.ListenPacket(ctx, na.Network, address)
} else {
ln, err = listenReusable(ctx, lnKey, na.Network, address, config)
}
if err != nil {
return nil, err
}
if ln == nil {
return nil, fmt.Errorf("unsupported network type: %s", na.Network)
}
if IsUnixNetwork(na.Network) {
if !isAbstractUnixSocket {
if err := os.Chmod(address, unixFileMode); err != nil {
return nil, fmt.Errorf("unable to set permissions (%s) on %s: %v", unixFileMode, address, err)
}
}
}
return ln, nil
}
// IsUnixNetwork returns true if na.Network is
// unix, unixgram, or unixpacket.
func (na NetworkAddress) IsUnixNetwork() bool {
return IsUnixNetwork(na.Network)
}
// JoinHostPort is like net.JoinHostPort, but where the port
// is StartPort + offset.
func (na NetworkAddress) JoinHostPort(offset uint) string {
if na.IsUnixNetwork() {
return na.Host
}
return net.JoinHostPort(na.Host, strconv.Itoa(int(na.StartPort+offset)))
}
// Expand returns one NetworkAddress for each port in the port range.
func (na NetworkAddress) Expand() []NetworkAddress {
size := na.PortRangeSize()
addrs := make([]NetworkAddress, size)
for portOffset := uint(0); portOffset < size; portOffset++ {
addrs[portOffset] = na.At(portOffset)
}
return addrs
}
// At returns a NetworkAddress with a port range of just 1
// at the given port offset; i.e. a NetworkAddress that
// represents precisely 1 address only.
func (na NetworkAddress) At(portOffset uint) NetworkAddress {
na2 := na
na2.StartPort, na2.EndPort = na.StartPort+portOffset, na.StartPort+portOffset
return na2
}
// PortRangeSize returns how many ports are in
// pa's port range. Port ranges are inclusive,
// so the size is the difference of start and
// end ports plus one.
func (na NetworkAddress) PortRangeSize() uint {
if na.EndPort < na.StartPort {
return 0
}
return (na.EndPort - na.StartPort) + 1
}
func (na NetworkAddress) isLoopback() bool {
if na.IsUnixNetwork() {
return true
}
if na.Host == "localhost" {
return true
}
if ip, err := netip.ParseAddr(na.Host); err == nil {
return ip.IsLoopback()
}
return false
}
func (na NetworkAddress) isWildcardInterface() bool {
if na.Host == "" {
return true
}
if ip, err := netip.ParseAddr(na.Host); err == nil {
return ip.IsUnspecified()
}
return false
}
func (na NetworkAddress) port() string {
if na.StartPort == na.EndPort {
return strconv.FormatUint(uint64(na.StartPort), 10)
}
return fmt.Sprintf("%d-%d", na.StartPort, na.EndPort)
}
// String reconstructs the address string for human display.
// The output can be parsed by ParseNetworkAddress(). If the
// address is a unix socket, any non-zero port will be dropped.
func (na NetworkAddress) String() string {
if na.Network == "tcp" && (na.Host != "" || na.port() != "") {
na.Network = "" // omit default network value for brevity
}
return JoinNetworkAddress(na.Network, na.Host, na.port())
}
// IsUnixNetwork returns true if the netw is a unix network.
func IsUnixNetwork(netw string) bool {
return strings.HasPrefix(netw, "unix")
}
// ParseNetworkAddress parses addr into its individual
// components. The input string is expected to be of
// the form "network/host:port-range" where any part is
// optional. The default network, if unspecified, is tcp.
// Port ranges are inclusive.
//
// Network addresses are distinct from URLs and do not
// use URL syntax.
func ParseNetworkAddress(addr string) (NetworkAddress, error) {
return ParseNetworkAddressWithDefaults(addr, "tcp", 0)
}
// ParseNetworkAddressWithDefaults is like ParseNetworkAddress but allows
// the default network and port to be specified.
func ParseNetworkAddressWithDefaults(addr, defaultNetwork string, defaultPort uint) (NetworkAddress, error) {
var host, port string
network, host, port, err := SplitNetworkAddress(addr)
if err != nil {
return NetworkAddress{}, err
}
if network == "" {
network = defaultNetwork
}
if IsUnixNetwork(network) {
_, _, err := internal.SplitUnixSocketPermissionsBits(host)
return NetworkAddress{
Network: network,
Host: host,
}, err
}
var start, end uint64
if port == "" {
start = uint64(defaultPort)
end = uint64(defaultPort)
} else {
before, after, found := strings.Cut(port, "-")
if !found {
after = before
}
start, err = strconv.ParseUint(before, 10, 16)
if err != nil {
return NetworkAddress{}, fmt.Errorf("invalid start port: %v", err)
}
end, err = strconv.ParseUint(after, 10, 16)
if err != nil {
return NetworkAddress{}, fmt.Errorf("invalid end port: %v", err)
}
if end < start {
return NetworkAddress{}, fmt.Errorf("end port must not be less than start port")
}
if (end - start) > maxPortSpan {
return NetworkAddress{}, fmt.Errorf("port range exceeds %d ports", maxPortSpan)
}
}
return NetworkAddress{
Network: network,
Host: host,
StartPort: uint(start),
EndPort: uint(end),
}, nil
}
// SplitNetworkAddress splits a into its network, host, and port components.
// Note that port may be a port range (:X-Y), or omitted for unix sockets.
func SplitNetworkAddress(a string) (network, host, port string, err error) {
beforeSlash, afterSlash, slashFound := strings.Cut(a, "/")
if slashFound {
network = strings.ToLower(strings.TrimSpace(beforeSlash))
a = afterSlash
}
if IsUnixNetwork(network) {
host = a
return
}
host, port, err = net.SplitHostPort(a)
if err == nil || a == "" {
return
}
// in general, if there was an error, it was likely "missing port",
// so try adding a bogus port to take advantage of standard library's
// robust parser, then strip the artificial port before returning
// (don't overwrite original error though; might still be relevant)
var err2 error
host, port, err2 = net.SplitHostPort(a + ":0")
if err2 == nil {
err = nil
port = ""
}
return
}
// JoinNetworkAddress combines network, host, and port into a single
// address string of the form accepted by ParseNetworkAddress(). For
// unix sockets, the network should be "unix" (or "unixgram" or
// "unixpacket") and the path to the socket should be given as the
// host parameter.
func JoinNetworkAddress(network, host, port string) string {
var a string
if network != "" {
a = network + "/"
}
if (host != "" && port == "") || IsUnixNetwork(network) {
a += host
} else if port != "" {
a += net.JoinHostPort(host, port)
}
return a
}
// ListenQUIC returns a quic.EarlyListener suitable for use in a Caddy module.
// The network will be transformed into a QUIC-compatible type (if unix, then
// unixgram will be used; otherwise, udp will be used).
//
// NOTE: This API is EXPERIMENTAL and may be changed or removed.
func (na NetworkAddress) ListenQUIC(ctx context.Context, portOffset uint, config net.ListenConfig, tlsConf *tls.Config) (http3.QUICEarlyListener, error) {
lnKey := listenerKey("quic"+na.Network, na.JoinHostPort(portOffset))
sharedEarlyListener, _, err := listenerPool.LoadOrNew(lnKey, func() (Destructor, error) {
lnAny, err := na.Listen(ctx, portOffset, config)
if err != nil {
return nil, err
}
ln := lnAny.(net.PacketConn)
h3ln := ln
for {
// retrieve the underlying socket, so quic-go can optimize.
if unwrapper, ok := h3ln.(interface{ Unwrap() net.PacketConn }); ok {
h3ln = unwrapper.Unwrap()
} else {
break
}
}
sqs := newSharedQUICState(tlsConf)
// http3.ConfigureTLSConfig only uses this field and tls App sets this field as well
//nolint:gosec
quicTlsConfig := &tls.Config{GetConfigForClient: sqs.getConfigForClient}
// Require clients to verify their source address when we're handling more than 1000 handshakes per second.
// TODO: make tunable?
limiter := rate.NewLimiter(1000, 1000)
tr := &quic.Transport{
Conn: h3ln,
VerifySourceAddress: func(addr net.Addr) bool { return !limiter.Allow() },
}
earlyLn, err := tr.ListenEarly(http3.ConfigureTLSConfig(quicTlsConfig), &quic.Config{Allow0RTT: true})
if err != nil {
return nil, err
}
// TODO: figure out when to close the listener and the transport
// using the original net.PacketConn to close them properly
return &sharedQuicListener{EarlyListener: earlyLn, packetConn: ln, sqs: sqs, key: lnKey}, nil
})
if err != nil {
return nil, err
}
sql := sharedEarlyListener.(*sharedQuicListener)
// add current tls.Config to sqs, so GetConfigForClient will always return the latest tls.Config in case of context cancellation
ctx, cancel := sql.sqs.addState(tlsConf)
return &fakeCloseQuicListener{
sharedQuicListener: sql,
context: ctx,
contextCancel: cancel,
}, nil
}
// ListenerUsage returns the current usage count of the given listener address.
func ListenerUsage(network, addr string) int {
count, _ := listenerPool.References(listenerKey(network, addr))
return count
}
// contextAndCancelFunc groups context and its cancelFunc
type contextAndCancelFunc struct {
context.Context
context.CancelFunc
}
// sharedQUICState manages GetConfigForClient
// see issue: https://github.com/caddyserver/caddy/pull/4849
type sharedQUICState struct {
rmu sync.RWMutex
tlsConfs map[*tls.Config]contextAndCancelFunc
activeTlsConf *tls.Config
}
// newSharedQUICState creates a new sharedQUICState
func newSharedQUICState(tlsConfig *tls.Config) *sharedQUICState {
sqtc := &sharedQUICState{
tlsConfs: make(map[*tls.Config]contextAndCancelFunc),
activeTlsConf: tlsConfig,
}
sqtc.addState(tlsConfig)
return sqtc
}
// getConfigForClient is used as tls.Config's GetConfigForClient field
func (sqs *sharedQUICState) getConfigForClient(ch *tls.ClientHelloInfo) (*tls.Config, error) {
sqs.rmu.RLock()
defer sqs.rmu.RUnlock()
return sqs.activeTlsConf.GetConfigForClient(ch)
}
// addState adds tls.Config and activeRequests to the map if not present and returns the corresponding context and its cancelFunc
// so that when cancelled, the active tls.Config will change
func (sqs *sharedQUICState) addState(tlsConfig *tls.Config) (context.Context, context.CancelFunc) {
sqs.rmu.Lock()
defer sqs.rmu.Unlock()
if cacc, ok := sqs.tlsConfs[tlsConfig]; ok {
return cacc.Context, cacc.CancelFunc
}
ctx, cancel := context.WithCancel(context.Background())
wrappedCancel := func() {
cancel()
sqs.rmu.Lock()
defer sqs.rmu.Unlock()
delete(sqs.tlsConfs, tlsConfig)
if sqs.activeTlsConf == tlsConfig {
// select another tls.Config, if there is none,
// related sharedQuicListener will be destroyed anyway
for tc := range sqs.tlsConfs {
sqs.activeTlsConf = tc
break
}
}
}
sqs.tlsConfs[tlsConfig] = contextAndCancelFunc{ctx, wrappedCancel}
// there should be at most 2 tls.Configs
if len(sqs.tlsConfs) > 2 {
Log().Warn("quic listener tls configs are more than 2", zap.Int("number of configs", len(sqs.tlsConfs)))
}
return ctx, wrappedCancel
}
// sharedQuicListener is like sharedListener, but for quic.EarlyListeners.
type sharedQuicListener struct {
*quic.EarlyListener
packetConn net.PacketConn // we have to hold these because quic-go won't close listeners it didn't create
sqs *sharedQUICState
key string
}
// Destruct closes the underlying QUIC listener and its associated net.PacketConn.
func (sql *sharedQuicListener) Destruct() error {
// close EarlyListener first to stop any operations being done to the net.PacketConn
_ = sql.EarlyListener.Close()
// then close the net.PacketConn
return sql.packetConn.Close()
}
// fakeClosedErr returns an error value that is not temporary
// nor a timeout, suitable for making the caller think the
// listener is actually closed
func fakeClosedErr(l interface{ Addr() net.Addr }) error {
return &net.OpError{
Op: "accept",
Net: l.Addr().Network(),
Addr: l.Addr(),
Err: errFakeClosed,
}
}
// errFakeClosed is the underlying error value returned by
// fakeCloseListener.Accept() after Close() has been called,
// indicating that it is pretending to be closed so that the
// server using it can terminate, while the underlying
// socket is actually left open.
var errFakeClosed = fmt.Errorf("listener 'closed' 😉")
type fakeCloseQuicListener struct {
closed int32 // accessed atomically; belongs to this struct only
*sharedQuicListener // embedded, so we also become a quic.EarlyListener
context context.Context
contextCancel context.CancelFunc
}
// Currently Accept ignores the passed context, however a situation where
// someone would need a hotswappable QUIC-only (not http3, since it uses context.Background here)
// server on which Accept would be called with non-empty contexts
// (mind that the default net listeners' Accept doesn't take a context argument)
// sounds way too rare for us to sacrifice efficiency here.
func (fcql *fakeCloseQuicListener) Accept(_ context.Context) (quic.EarlyConnection, error) {
conn, err := fcql.sharedQuicListener.Accept(fcql.context)
if err == nil {
return conn, nil
}
// if the listener is "closed", return a fake closed error instead
if atomic.LoadInt32(&fcql.closed) == 1 && errors.Is(err, context.Canceled) {
return nil, fakeClosedErr(fcql)
}
return nil, err
}
func (fcql *fakeCloseQuicListener) Close() error {
if atomic.CompareAndSwapInt32(&fcql.closed, 0, 1) {
fcql.contextCancel()
_, _ = listenerPool.Delete(fcql.sharedQuicListener.key)
}
return nil
}
// RegisterNetwork registers a network type with Caddy so that if a listener is
// created for that network type, getListener will be invoked to get the listener.
// This should be called during init() and will panic if the network type is standard
// or reserved, or if it is already registered. EXPERIMENTAL and subject to change.
func RegisterNetwork(network string, getListener ListenerFunc) {
network = strings.TrimSpace(strings.ToLower(network))
if network == "tcp" || network == "tcp4" || network == "tcp6" ||
network == "udp" || network == "udp4" || network == "udp6" ||
network == "unix" || network == "unixpacket" || network == "unixgram" ||
strings.HasPrefix("ip:", network) || strings.HasPrefix("ip4:", network) || strings.HasPrefix("ip6:", network) {
panic("network type " + network + " is reserved")
}
if _, ok := networkTypes[strings.ToLower(network)]; ok {
panic("network type " + network + " is already registered")
}
networkTypes[network] = getListener
}
var unixSocketsMu sync.Mutex
// getListenerFromPlugin returns a listener on the given network and address
// if a plugin has registered the network name. It may return (nil, nil) if
// no plugin can provide a listener.
func getListenerFromPlugin(ctx context.Context, network, addr string, config net.ListenConfig) (any, error) {
// get listener from plugin if network type is registered
if getListener, ok := networkTypes[network]; ok {
Log().Debug("getting listener from plugin", zap.String("network", network))
return getListener(ctx, network, addr, config)
}
return nil, nil
}
func listenerKey(network, addr string) string {
return network + "/" + addr
}
// ListenerFunc is a function that can return a listener given a network and address.
// The listeners must be capable of overlapping: with Caddy, new configs are loaded
// before old ones are unloaded, so listeners may overlap briefly if the configs
// both need the same listener. EXPERIMENTAL and subject to change.
type ListenerFunc func(ctx context.Context, network, addr string, cfg net.ListenConfig) (any, error)
var networkTypes = map[string]ListenerFunc{}
// ListenerWrapper is a type that wraps a listener
// so it can modify the input listener's methods.
// Modules that implement this interface are found
// in the caddy.listeners namespace. Usually, to
// wrap a listener, you will define your own struct
// type that embeds the input listener, then
// implement your own methods that you want to wrap,
// calling the underlying listener's methods where
// appropriate.
type ListenerWrapper interface {
WrapListener(net.Listener) net.Listener
}
// listenerPool stores and allows reuse of active listeners.
var listenerPool = NewUsagePool()
const maxPortSpan = 65535
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package caddy
func FuzzParseNetworkAddress(data []byte) int {
_, err := ParseNetworkAddress(string(data))
if err != nil {
return 0
}
return 1
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"strings"
"sync"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/term"
)
func init() {
RegisterModule(StdoutWriter{})
RegisterModule(StderrWriter{})
RegisterModule(DiscardWriter{})
}
// Logging facilitates logging within Caddy. The default log is
// called "default" and you can customize it. You can also define
// additional logs.
//
// By default, all logs at INFO level and higher are written to
// standard error ("stderr" writer) in a human-readable format
// ("console" encoder if stdout is an interactive terminal, "json"
// encoder otherwise).
//
// All defined logs accept all log entries by default, but you
// can filter by level and module/logger names. A logger's name
// is the same as the module's name, but a module may append to
// logger names for more specificity. For example, you can
// filter logs emitted only by HTTP handlers using the name
// "http.handlers", because all HTTP handler module names have
// that prefix.
//
// Caddy logs (except the sink) are zero-allocation, so they are
// very high-performing in terms of memory and CPU time. Enabling
// sampling can further increase throughput on extremely high-load
// servers.
type Logging struct {
// Sink is the destination for all unstructured logs emitted
// from Go's standard library logger. These logs are common
// in dependencies that are not designed specifically for use
// in Caddy. Because it is global and unstructured, the sink
// lacks most advanced features and customizations.
Sink *SinkLog `json:"sink,omitempty"`
// Logs are your logs, keyed by an arbitrary name of your
// choosing. The default log can be customized by defining
// a log called "default". You can further define other logs
// and filter what kinds of entries they accept.
Logs map[string]*CustomLog `json:"logs,omitempty"`
// a list of all keys for open writers; all writers
// that are opened to provision this logging config
// must have their keys added to this list so they
// can be closed when cleaning up
writerKeys []string
}
// openLogs sets up the config and opens all the configured writers.
// It closes its logs when ctx is canceled, so it should clean up
// after itself.
func (logging *Logging) openLogs(ctx Context) error {
// make sure to deallocate resources when context is done
ctx.OnCancel(func() {
err := logging.closeLogs()
if err != nil {
Log().Error("closing logs", zap.Error(err))
}
})
// set up the "sink" log first (std lib's default global logger)
if logging.Sink != nil {
err := logging.Sink.provision(ctx, logging)
if err != nil {
return fmt.Errorf("setting up sink log: %v", err)
}
}
// as a special case, set up the default structured Caddy log next
if err := logging.setupNewDefault(ctx); err != nil {
return err
}
// then set up any other custom logs
for name, l := range logging.Logs {
// the default log is already set up
if name == DefaultLoggerName {
continue
}
err := l.provision(ctx, logging)
if err != nil {
return fmt.Errorf("setting up custom log '%s': %v", name, err)
}
// Any other logs that use the discard writer can be deleted
// entirely. This avoids encoding and processing of each
// log entry that would just be thrown away anyway. Notably,
// we do not reach this point for the default log, which MUST
// exist, otherwise core log emissions would panic because
// they use the Log() function directly which expects a non-nil
// logger. Even if we keep logs with a discard writer, they
// have a nop core, and keeping them at all seems unnecessary.
if _, ok := l.writerOpener.(*DiscardWriter); ok {
delete(logging.Logs, name)
continue
}
}
return nil
}
func (logging *Logging) setupNewDefault(ctx Context) error {
if logging.Logs == nil {
logging.Logs = make(map[string]*CustomLog)
}
// extract the user-defined default log, if any
newDefault := new(defaultCustomLog)
if userDefault, ok := logging.Logs[DefaultLoggerName]; ok {
newDefault.CustomLog = userDefault
} else {
// if none, make one with our own default settings
var err error
newDefault, err = newDefaultProductionLog()
if err != nil {
return fmt.Errorf("setting up default Caddy log: %v", err)
}
logging.Logs[DefaultLoggerName] = newDefault.CustomLog
}
// options for the default logger
options, err := newDefault.CustomLog.buildOptions()
if err != nil {
return fmt.Errorf("setting up default log: %v", err)
}
// set up this new log
err = newDefault.CustomLog.provision(ctx, logging)
if err != nil {
return fmt.Errorf("setting up default log: %v", err)
}
newDefault.logger = zap.New(newDefault.CustomLog.core, options...)
// redirect the default caddy logs
defaultLoggerMu.Lock()
oldDefault := defaultLogger
defaultLogger = newDefault
defaultLoggerMu.Unlock()
// if the new writer is different, indicate it in the logs for convenience
var newDefaultLogWriterKey, currentDefaultLogWriterKey string
var newDefaultLogWriterStr, currentDefaultLogWriterStr string
if newDefault.writerOpener != nil {
newDefaultLogWriterKey = newDefault.writerOpener.WriterKey()
newDefaultLogWriterStr = newDefault.writerOpener.String()
}
if oldDefault.writerOpener != nil {
currentDefaultLogWriterKey = oldDefault.writerOpener.WriterKey()
currentDefaultLogWriterStr = oldDefault.writerOpener.String()
}
if newDefaultLogWriterKey != currentDefaultLogWriterKey {
oldDefault.logger.Info("redirected default logger",
zap.String("from", currentDefaultLogWriterStr),
zap.String("to", newDefaultLogWriterStr),
)
}
return nil
}
// closeLogs cleans up resources allocated during openLogs.
// A successful call to openLogs calls this automatically
// when the context is canceled.
func (logging *Logging) closeLogs() error {
for _, key := range logging.writerKeys {
_, err := writers.Delete(key)
if err != nil {
log.Printf("[ERROR] Closing log writer %v: %v", key, err)
}
}
return nil
}
// Logger returns a logger that is ready for the module to use.
func (logging *Logging) Logger(mod Module) *zap.Logger {
modID := string(mod.CaddyModule().ID)
var cores []zapcore.Core
var options []zap.Option
if logging != nil {
for _, l := range logging.Logs {
if l.matchesModule(modID) {
if len(l.Include) == 0 && len(l.Exclude) == 0 {
cores = append(cores, l.core)
continue
}
if len(options) == 0 {
newOptions, err := l.buildOptions()
if err != nil {
Log().Error("building options for logger", zap.String("module", modID), zap.Error(err))
}
options = newOptions
}
cores = append(cores, &filteringCore{Core: l.core, cl: l})
}
}
}
multiCore := zapcore.NewTee(cores...)
return zap.New(multiCore, options...).Named(modID)
}
// openWriter opens a writer using opener, and returns true if
// the writer is new, or false if the writer already exists.
func (logging *Logging) openWriter(opener WriterOpener) (io.WriteCloser, bool, error) {
key := opener.WriterKey()
writer, loaded, err := writers.LoadOrNew(key, func() (Destructor, error) {
w, err := opener.OpenWriter()
return writerDestructor{w}, err
})
if err != nil {
return nil, false, err
}
logging.writerKeys = append(logging.writerKeys, key)
return writer.(io.WriteCloser), !loaded, nil
}
// WriterOpener is a module that can open a log writer.
// It can return a human-readable string representation
// of itself so that operators can understand where
// the logs are going.
type WriterOpener interface {
fmt.Stringer
// WriterKey is a string that uniquely identifies this
// writer configuration. It is not shown to humans.
WriterKey() string
// OpenWriter opens a log for writing. The writer
// should be safe for concurrent use but need not
// be synchronous.
OpenWriter() (io.WriteCloser, error)
}
// IsWriterStandardStream returns true if the input is a
// writer-opener to a standard stream (stdout, stderr).
func IsWriterStandardStream(wo WriterOpener) bool {
switch wo.(type) {
case StdoutWriter, StderrWriter,
*StdoutWriter, *StderrWriter:
return true
}
return false
}
type writerDestructor struct {
io.WriteCloser
}
func (wdest writerDestructor) Destruct() error {
return wdest.Close()
}
// BaseLog contains the common logging parameters for logging.
type BaseLog struct {
// The module that writes out log entries for the sink.
WriterRaw json.RawMessage `json:"writer,omitempty" caddy:"namespace=caddy.logging.writers inline_key=output"`
// The encoder is how the log entries are formatted or encoded.
EncoderRaw json.RawMessage `json:"encoder,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"`
// Tees entries through a zap.Core module which can extract
// log entry metadata and fields for further processing.
CoreRaw json.RawMessage `json:"core,omitempty" caddy:"namespace=caddy.logging.cores inline_key=module"`
// Level is the minimum level to emit, and is inclusive.
// Possible levels: DEBUG, INFO, WARN, ERROR, PANIC, and FATAL
Level string `json:"level,omitempty"`
// Sampling configures log entry sampling. If enabled,
// only some log entries will be emitted. This is useful
// for improving performance on extremely high-pressure
// servers.
Sampling *LogSampling `json:"sampling,omitempty"`
// If true, the log entry will include the caller's
// file name and line number. Default off.
WithCaller bool `json:"with_caller,omitempty"`
// If non-zero, and `with_caller` is true, this many
// stack frames will be skipped when determining the
// caller. Default 0.
WithCallerSkip int `json:"with_caller_skip,omitempty"`
// If not empty, the log entry will include a stack trace
// for all logs at the given level or higher. See `level`
// for possible values. Default off.
WithStacktrace string `json:"with_stacktrace,omitempty"`
writerOpener WriterOpener
writer io.WriteCloser
encoder zapcore.Encoder
levelEnabler zapcore.LevelEnabler
core zapcore.Core
}
func (cl *BaseLog) provisionCommon(ctx Context, logging *Logging) error {
if cl.WriterRaw != nil {
mod, err := ctx.LoadModule(cl, "WriterRaw")
if err != nil {
return fmt.Errorf("loading log writer module: %v", err)
}
cl.writerOpener = mod.(WriterOpener)
}
if cl.writerOpener == nil {
cl.writerOpener = StderrWriter{}
}
var err error
cl.writer, _, err = logging.openWriter(cl.writerOpener)
if err != nil {
return fmt.Errorf("opening log writer using %#v: %v", cl.writerOpener, err)
}
// set up the log level
cl.levelEnabler, err = parseLevel(cl.Level)
if err != nil {
return err
}
if cl.EncoderRaw != nil {
mod, err := ctx.LoadModule(cl, "EncoderRaw")
if err != nil {
return fmt.Errorf("loading log encoder module: %v", err)
}
cl.encoder = mod.(zapcore.Encoder)
// if the encoder module needs the writer to determine
// the correct default to use for a nested encoder, we
// pass it down as a secondary provisioning step
if cfd, ok := mod.(ConfiguresFormatterDefault); ok {
if err := cfd.ConfigureDefaultFormat(cl.writerOpener); err != nil {
return fmt.Errorf("configuring default format for encoder module: %v", err)
}
}
}
if cl.encoder == nil {
cl.encoder = newDefaultProductionLogEncoder(cl.writerOpener)
}
cl.buildCore()
if cl.CoreRaw != nil {
mod, err := ctx.LoadModule(cl, "CoreRaw")
if err != nil {
return fmt.Errorf("loading log core module: %v", err)
}
core := mod.(zapcore.Core)
cl.core = zapcore.NewTee(cl.core, core)
}
return nil
}
func (cl *BaseLog) buildCore() {
// logs which only discard their output don't need
// to perform encoding or any other processing steps
// at all, so just shortcut to a nop core instead
if _, ok := cl.writerOpener.(*DiscardWriter); ok {
cl.core = zapcore.NewNopCore()
return
}
c := zapcore.NewCore(
cl.encoder,
zapcore.AddSync(cl.writer),
cl.levelEnabler,
)
if cl.Sampling != nil {
if cl.Sampling.Interval == 0 {
cl.Sampling.Interval = 1 * time.Second
}
if cl.Sampling.First == 0 {
cl.Sampling.First = 100
}
if cl.Sampling.Thereafter == 0 {
cl.Sampling.Thereafter = 100
}
c = zapcore.NewSamplerWithOptions(c, cl.Sampling.Interval,
cl.Sampling.First, cl.Sampling.Thereafter)
}
cl.core = c
}
func (cl *BaseLog) buildOptions() ([]zap.Option, error) {
var options []zap.Option
if cl.WithCaller {
options = append(options, zap.AddCaller())
if cl.WithCallerSkip != 0 {
options = append(options, zap.AddCallerSkip(cl.WithCallerSkip))
}
}
if cl.WithStacktrace != "" {
levelEnabler, err := parseLevel(cl.WithStacktrace)
if err != nil {
return options, fmt.Errorf("setting up default Caddy log: %v", err)
}
options = append(options, zap.AddStacktrace(levelEnabler))
}
return options, nil
}
// SinkLog configures the default Go standard library
// global logger in the log package. This is necessary because
// module dependencies which are not built specifically for
// Caddy will use the standard logger. This is also known as
// the "sink" logger.
type SinkLog struct {
BaseLog
}
func (sll *SinkLog) provision(ctx Context, logging *Logging) error {
if err := sll.provisionCommon(ctx, logging); err != nil {
return err
}
options, err := sll.buildOptions()
if err != nil {
return err
}
logger := zap.New(sll.core, options...)
ctx.cleanupFuncs = append(ctx.cleanupFuncs, zap.RedirectStdLog(logger))
return nil
}
// CustomLog represents a custom logger configuration.
//
// By default, a log will emit all log entries. Some entries
// will be skipped if sampling is enabled. Further, the Include
// and Exclude parameters define which loggers (by name) are
// allowed or rejected from emitting in this log. If both Include
// and Exclude are populated, their values must be mutually
// exclusive, and longer namespaces have priority. If neither
// are populated, all logs are emitted.
type CustomLog struct {
BaseLog
// Include defines the names of loggers to emit in this
// log. For example, to include only logs emitted by the
// admin API, you would include "admin.api".
Include []string `json:"include,omitempty"`
// Exclude defines the names of loggers that should be
// skipped by this log. For example, to exclude only
// HTTP access logs, you would exclude "http.log.access".
Exclude []string `json:"exclude,omitempty"`
}
func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
if err := cl.provisionCommon(ctx, logging); err != nil {
return err
}
// If both Include and Exclude lists are populated, then each item must
// be a superspace or subspace of an item in the other list, because
// populating both lists means that any given item is either a rule
// or an exception to another rule. But if the item is not a super-
// or sub-space of any item in the other list, it is neither a rule
// nor an exception, and is a contradiction. Ensure, too, that the
// sets do not intersect, which is also a contradiction.
if len(cl.Include) > 0 && len(cl.Exclude) > 0 {
// prevent intersections
for _, allow := range cl.Include {
for _, deny := range cl.Exclude {
if allow == deny {
return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
}
}
}
// ensure namespaces are nested
outer:
for _, allow := range cl.Include {
for _, deny := range cl.Exclude {
if strings.HasPrefix(allow+".", deny+".") ||
strings.HasPrefix(deny+".", allow+".") {
continue outer
}
}
return fmt.Errorf("when both include and exclude are populated, each element must be a superspace or subspace of one in the other list; check '%s' in include", allow)
}
}
return nil
}
func (cl *CustomLog) matchesModule(moduleID string) bool {
return cl.loggerAllowed(moduleID, true)
}
// loggerAllowed returns true if name is allowed to emit
// to cl. isModule should be true if name is the name of
// a module and you want to see if ANY of that module's
// logs would be permitted.
func (cl *CustomLog) loggerAllowed(name string, isModule bool) bool {
// accept all loggers by default
if len(cl.Include) == 0 && len(cl.Exclude) == 0 {
return true
}
// append a dot so that partial names don't match
// (i.e. we don't want "foo.b" to match "foo.bar"); we
// will also have to append a dot when we do HasPrefix
// below to compensate for when namespaces are equal
if name != "" && name != "*" && name != "." {
name += "."
}
var longestAccept, longestReject int
if len(cl.Include) > 0 {
for _, namespace := range cl.Include {
var hasPrefix bool
if isModule {
hasPrefix = strings.HasPrefix(namespace+".", name)
} else {
hasPrefix = strings.HasPrefix(name, namespace+".")
}
if hasPrefix && len(namespace) > longestAccept {
longestAccept = len(namespace)
}
}
// the include list was populated, meaning that
// a match in this list is absolutely required
// if we are to accept the entry
if longestAccept == 0 {
return false
}
}
if len(cl.Exclude) > 0 {
for _, namespace := range cl.Exclude {
// * == all logs emitted by modules
// . == all logs emitted by core
if (namespace == "*" && name != ".") ||
(namespace == "." && name == ".") {
return false
}
if strings.HasPrefix(name, namespace+".") &&
len(namespace) > longestReject {
longestReject = len(namespace)
}
}
// the reject list is populated, so we have to
// reject this entry if its match is better
// than the best from the accept list
if longestReject > longestAccept {
return false
}
}
return (longestAccept > longestReject) ||
(len(cl.Include) == 0 && longestReject == 0)
}
// filteringCore filters log entries based on logger name,
// according to the rules of a CustomLog.
type filteringCore struct {
zapcore.Core
cl *CustomLog
}
// With properly wraps With.
func (fc *filteringCore) With(fields []zapcore.Field) zapcore.Core {
return &filteringCore{
Core: fc.Core.With(fields),
cl: fc.cl,
}
}
// Check only allows the log entry if its logger name
// is allowed from the include/exclude rules of fc.cl.
func (fc *filteringCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
if fc.cl.loggerAllowed(e.LoggerName, false) {
return fc.Core.Check(e, ce)
}
return ce
}
// LogSampling configures log entry sampling.
type LogSampling struct {
// The window over which to conduct sampling.
Interval time.Duration `json:"interval,omitempty"`
// Log this many entries within a given level and
// message for each interval.
First int `json:"first,omitempty"`
// If more entries with the same level and message
// are seen during the same interval, keep one in
// this many entries until the end of the interval.
Thereafter int `json:"thereafter,omitempty"`
}
type (
// StdoutWriter writes logs to standard out.
StdoutWriter struct{}
// StderrWriter writes logs to standard error.
StderrWriter struct{}
// DiscardWriter discards all writes.
DiscardWriter struct{}
)
// CaddyModule returns the Caddy module information.
func (StdoutWriter) CaddyModule() ModuleInfo {
return ModuleInfo{
ID: "caddy.logging.writers.stdout",
New: func() Module { return new(StdoutWriter) },
}
}
// CaddyModule returns the Caddy module information.
func (StderrWriter) CaddyModule() ModuleInfo {
return ModuleInfo{
ID: "caddy.logging.writers.stderr",
New: func() Module { return new(StderrWriter) },
}
}
// CaddyModule returns the Caddy module information.
func (DiscardWriter) CaddyModule() ModuleInfo {
return ModuleInfo{
ID: "caddy.logging.writers.discard",
New: func() Module { return new(DiscardWriter) },
}
}
func (StdoutWriter) String() string { return "stdout" }
func (StderrWriter) String() string { return "stderr" }
func (DiscardWriter) String() string { return "discard" }
// WriterKey returns a unique key representing stdout.
func (StdoutWriter) WriterKey() string { return "std:out" }
// WriterKey returns a unique key representing stderr.
func (StderrWriter) WriterKey() string { return "std:err" }
// WriterKey returns a unique key representing discard.
func (DiscardWriter) WriterKey() string { return "discard" }
// OpenWriter returns os.Stdout that can't be closed.
func (StdoutWriter) OpenWriter() (io.WriteCloser, error) {
return notClosable{os.Stdout}, nil
}
// OpenWriter returns os.Stderr that can't be closed.
func (StderrWriter) OpenWriter() (io.WriteCloser, error) {
return notClosable{os.Stderr}, nil
}
// OpenWriter returns io.Discard that can't be closed.
func (DiscardWriter) OpenWriter() (io.WriteCloser, error) {
return notClosable{io.Discard}, nil
}
// notClosable is an io.WriteCloser that can't be closed.
type notClosable struct{ io.Writer }
func (fc notClosable) Close() error { return nil }
type defaultCustomLog struct {
*CustomLog
logger *zap.Logger
}
// newDefaultProductionLog configures a custom log that is
// intended for use by default if no other log is specified
// in a config. It writes to stderr, uses the console encoder,
// and enables INFO-level logs and higher.
func newDefaultProductionLog() (*defaultCustomLog, error) {
cl := new(CustomLog)
cl.writerOpener = StderrWriter{}
var err error
cl.writer, err = cl.writerOpener.OpenWriter()
if err != nil {
return nil, err
}
cl.encoder = newDefaultProductionLogEncoder(cl.writerOpener)
cl.levelEnabler = zapcore.InfoLevel
cl.buildCore()
logger := zap.New(cl.core)
// capture logs from other libraries which
// may not be using zap logging directly
_ = zap.RedirectStdLog(logger)
return &defaultCustomLog{
CustomLog: cl,
logger: logger,
}, nil
}
func newDefaultProductionLogEncoder(wo WriterOpener) zapcore.Encoder {
encCfg := zap.NewProductionEncoderConfig()
if IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) {
// if interactive terminal, make output more human-readable by default
encCfg.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
encoder.AppendString(ts.UTC().Format("2006/01/02 15:04:05.000"))
}
if coloringEnabled {
encCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
}
return zapcore.NewConsoleEncoder(encCfg)
}
return zapcore.NewJSONEncoder(encCfg)
}
func parseLevel(levelInput string) (zapcore.LevelEnabler, error) {
repl := NewReplacer()
level, err := repl.ReplaceOrErr(levelInput, true, true)
if err != nil {
return nil, fmt.Errorf("invalid log level: %v", err)
}
level = strings.ToLower(level)
// set up the log level
switch level {
case "debug":
return zapcore.DebugLevel, nil
case "", "info":
return zapcore.InfoLevel, nil
case "warn":
return zapcore.WarnLevel, nil
case "error":
return zapcore.ErrorLevel, nil
case "panic":
return zapcore.PanicLevel, nil
case "fatal":
return zapcore.FatalLevel, nil
default:
return nil, fmt.Errorf("unrecognized log level: %s", level)
}
}
// Log returns the current default logger.
func Log() *zap.Logger {
defaultLoggerMu.RLock()
defer defaultLoggerMu.RUnlock()
return defaultLogger.logger
}
var (
coloringEnabled = os.Getenv("NO_COLOR") == "" && os.Getenv("TERM") != "xterm-mono"
defaultLogger, _ = newDefaultProductionLog()
defaultLoggerMu sync.RWMutex
)
var writers = NewUsagePool()
// ConfiguresFormatterDefault is an optional interface that
// encoder modules can implement to configure the default
// format of their encoder. This is useful for encoders
// which nest an encoder, that needs to know the writer
// in order to determine the correct default.
type ConfiguresFormatterDefault interface {
ConfigureDefaultFormat(WriterOpener) error
}
const DefaultLoggerName = "default"
// Interface guards
var (
_ io.WriteCloser = (*notClosable)(nil)
_ WriterOpener = (*StdoutWriter)(nil)
_ WriterOpener = (*StderrWriter)(nil)
)
package caddy
import (
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/caddyserver/caddy/v2/internal/metrics"
)
// define and register the metrics used in this package.
func init() {
prometheus.MustRegister(collectors.NewBuildInfoCollector())
const ns, sub = "caddy", "admin"
adminMetrics.requestCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: sub,
Name: "http_requests_total",
Help: "Counter of requests made to the Admin API's HTTP endpoints.",
}, []string{"handler", "path", "code", "method"})
adminMetrics.requestErrors = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: sub,
Name: "http_request_errors_total",
Help: "Number of requests resulting in middleware errors.",
}, []string{"handler", "path", "method"})
}
// adminMetrics is a collection of metrics that can be tracked for the admin API.
var adminMetrics = struct {
requestCount *prometheus.CounterVec
requestErrors *prometheus.CounterVec
}{}
// Similar to promhttp.InstrumentHandlerCounter, but upper-cases method names
// instead of lower-casing them.
//
// Unlike promhttp.InstrumentHandlerCounter, this assumes a "code" and "method"
// label is present, and will panic otherwise.
func instrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w)
next.ServeHTTP(d, r)
counter.With(prometheus.Labels{
"code": metrics.SanitizeCode(d.status),
"method": metrics.SanitizeMethod(r.Method),
}).Inc()
})
}
func newDelegator(w http.ResponseWriter) *delegator {
return &delegator{
ResponseWriter: w,
}
}
type delegator struct {
http.ResponseWriter
status int
}
func (d *delegator) WriteHeader(code int) {
d.status = code
d.ResponseWriter.WriteHeader(code)
}
// Unwrap returns the underlying ResponseWriter, necessary for
// http.ResponseController to work correctly.
func (d *delegator) Unwrap() http.ResponseWriter {
return d.ResponseWriter
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"sync"
)
// Module is a type that is used as a Caddy module. In
// addition to this interface, most modules will implement
// some interface expected by their host module in order
// to be useful. To learn which interface(s) to implement,
// see the documentation for the host module. At a bare
// minimum, this interface, when implemented, only provides
// the module's ID and constructor function.
//
// Modules will often implement additional interfaces
// including Provisioner, Validator, and CleanerUpper.
// If a module implements these interfaces, their
// methods are called during the module's lifespan.
//
// When a module is loaded by a host module, the following
// happens: 1) ModuleInfo.New() is called to get a new
// instance of the module. 2) The module's configuration is
// unmarshaled into that instance. 3) If the module is a
// Provisioner, the Provision() method is called. 4) If the
// module is a Validator, the Validate() method is called.
// 5) The module will probably be type-asserted from
// 'any' to some other, more useful interface expected
// by the host module. For example, HTTP handler modules are
// type-asserted as caddyhttp.MiddlewareHandler values.
// 6) When a module's containing Context is canceled, if it is
// a CleanerUpper, its Cleanup() method is called.
type Module interface {
// This method indicates that the type is a Caddy
// module. The returned ModuleInfo must have both
// a name and a constructor function. This method
// must not have any side-effects.
CaddyModule() ModuleInfo
}
// ModuleInfo represents a registered Caddy module.
type ModuleInfo struct {
// ID is the "full name" of the module. It
// must be unique and properly namespaced.
ID ModuleID
// New returns a pointer to a new, empty
// instance of the module's type. This
// method must not have any side-effects,
// and no other initialization should
// occur within it. Any initialization
// of the returned value should be done
// in a Provision() method (see the
// Provisioner interface).
New func() Module
}
// ModuleID is a string that uniquely identifies a Caddy module. A
// module ID is lightly structured. It consists of dot-separated
// labels which form a simple hierarchy from left to right. The last
// label is the module name, and the labels before that constitute
// the namespace (or scope).
//
// Thus, a module ID has the form: <namespace>.<name>
//
// An ID with no dot has the empty namespace, which is appropriate
// for app modules (these are "top-level" modules that Caddy core
// loads and runs).
//
// Module IDs should be lowercase and use underscores (_) instead of
// spaces.
//
// Examples of valid IDs:
// - http
// - http.handlers.file_server
// - caddy.logging.encoders.json
type ModuleID string
// Namespace returns the namespace (or scope) portion of a module ID,
// which is all but the last label of the ID. If the ID has only one
// label, then the namespace is empty.
func (id ModuleID) Namespace() string {
lastDot := strings.LastIndex(string(id), ".")
if lastDot < 0 {
return ""
}
return string(id)[:lastDot]
}
// Name returns the Name (last element) of a module ID.
func (id ModuleID) Name() string {
if id == "" {
return ""
}
parts := strings.Split(string(id), ".")
return parts[len(parts)-1]
}
func (mi ModuleInfo) String() string { return string(mi.ID) }
// ModuleMap is a map that can contain multiple modules,
// where the map key is the module's name. (The namespace
// is usually read from an associated field's struct tag.)
// Because the module's name is given as the key in a
// module map, the name does not have to be given in the
// json.RawMessage.
type ModuleMap map[string]json.RawMessage
// RegisterModule registers a module by receiving a
// plain/empty value of the module. For registration to
// be properly recorded, this should be called in the
// init phase of runtime. Typically, the module package
// will do this as a side-effect of being imported.
// This function panics if the module's info is
// incomplete or invalid, or if the module is already
// registered.
func RegisterModule(instance Module) {
mod := instance.CaddyModule()
if mod.ID == "" {
panic("module ID missing")
}
if mod.ID == "caddy" || mod.ID == "admin" {
panic(fmt.Sprintf("module ID '%s' is reserved", mod.ID))
}
if mod.New == nil {
panic("missing ModuleInfo.New")
}
if val := mod.New(); val == nil {
panic("ModuleInfo.New must return a non-nil module instance")
}
modulesMu.Lock()
defer modulesMu.Unlock()
if _, ok := modules[string(mod.ID)]; ok {
panic(fmt.Sprintf("module already registered: %s", mod.ID))
}
modules[string(mod.ID)] = mod
}
// GetModule returns module information from its ID (full name).
func GetModule(name string) (ModuleInfo, error) {
modulesMu.RLock()
defer modulesMu.RUnlock()
m, ok := modules[name]
if !ok {
return ModuleInfo{}, fmt.Errorf("module not registered: %s", name)
}
return m, nil
}
// GetModuleName returns a module's name (the last label of its ID)
// from an instance of its value. If the value is not a module, an
// empty string will be returned.
func GetModuleName(instance any) string {
var name string
if mod, ok := instance.(Module); ok {
name = mod.CaddyModule().ID.Name()
}
return name
}
// GetModuleID returns a module's ID from an instance of its value.
// If the value is not a module, an empty string will be returned.
func GetModuleID(instance any) string {
var id string
if mod, ok := instance.(Module); ok {
id = string(mod.CaddyModule().ID)
}
return id
}
// GetModules returns all modules in the given scope/namespace.
// For example, a scope of "foo" returns modules named "foo.bar",
// "foo.loo", but not "bar", "foo.bar.loo", etc. An empty scope
// returns top-level modules, for example "foo" or "bar". Partial
// scopes are not matched (i.e. scope "foo.ba" does not match
// name "foo.bar").
//
// Because modules are registered to a map under the hood, the
// returned slice will be sorted to keep it deterministic.
func GetModules(scope string) []ModuleInfo {
modulesMu.RLock()
defer modulesMu.RUnlock()
scopeParts := strings.Split(scope, ".")
// handle the special case of an empty scope, which
// should match only the top-level modules
if scope == "" {
scopeParts = []string{}
}
var mods []ModuleInfo
iterateModules:
for id, m := range modules {
modParts := strings.Split(id, ".")
// match only the next level of nesting
if len(modParts) != len(scopeParts)+1 {
continue
}
// specified parts must be exact matches
for i := range scopeParts {
if modParts[i] != scopeParts[i] {
continue iterateModules
}
}
mods = append(mods, m)
}
// make return value deterministic
sort.Slice(mods, func(i, j int) bool {
return mods[i].ID < mods[j].ID
})
return mods
}
// Modules returns the names of all registered modules
// in ascending lexicographical order.
func Modules() []string {
modulesMu.RLock()
defer modulesMu.RUnlock()
names := make([]string, 0, len(modules))
for name := range modules {
names = append(names, name)
}
sort.Strings(names)
return names
}
// getModuleNameInline loads the string value from raw of moduleNameKey,
// where raw must be a JSON encoding of a map. It returns that value,
// along with the result of removing that key from raw.
func getModuleNameInline(moduleNameKey string, raw json.RawMessage) (string, json.RawMessage, error) {
var tmp map[string]any
err := json.Unmarshal(raw, &tmp)
if err != nil {
return "", nil, err
}
moduleName, ok := tmp[moduleNameKey].(string)
if !ok || moduleName == "" {
return "", nil, fmt.Errorf("module name not specified with key '%s' in %+v", moduleNameKey, tmp)
}
// remove key from the object, otherwise decoding it later
// will yield an error because the struct won't recognize it
// (this is only needed because we strictly enforce that
// all keys are recognized when loading modules)
delete(tmp, moduleNameKey)
result, err := json.Marshal(tmp)
if err != nil {
return "", nil, fmt.Errorf("re-encoding module configuration: %v", err)
}
return moduleName, result, nil
}
// Provisioner is implemented by modules which may need to perform
// some additional "setup" steps immediately after being loaded.
// Provisioning should be fast (imperceptible running time). If
// any side-effects result in the execution of this function (e.g.
// creating global state, any other allocations which require
// garbage collection, opening files, starting goroutines etc.),
// be sure to clean up properly by implementing the CleanerUpper
// interface to avoid leaking resources.
type Provisioner interface {
Provision(Context) error
}
// Validator is implemented by modules which can verify that their
// configurations are valid. This method will be called after
// Provision() (if implemented). Validation should always be fast
// (imperceptible running time) and an error must be returned if
// the module's configuration is invalid.
type Validator interface {
Validate() error
}
// CleanerUpper is implemented by modules which may have side-effects
// such as opened files, spawned goroutines, or allocated some sort
// of non-stack state when they were provisioned. This method should
// deallocate/cleanup those resources to prevent memory leaks. Cleanup
// should be fast and efficient. Cleanup should work even if Provision
// returns an error, to allow cleaning up from partial provisionings.
type CleanerUpper interface {
Cleanup() error
}
// ParseStructTag parses a caddy struct tag into its keys and values.
// It is very simple. The expected syntax is:
// `caddy:"key1=val1 key2=val2 ..."`
func ParseStructTag(tag string) (map[string]string, error) {
results := make(map[string]string)
pairs := strings.Split(tag, " ")
for i, pair := range pairs {
if pair == "" {
continue
}
before, after, isCut := strings.Cut(pair, "=")
if !isCut {
return nil, fmt.Errorf("missing key in '%s' (pair %d)", pair, i)
}
results[before] = after
}
return results, nil
}
// StrictUnmarshalJSON is like json.Unmarshal but returns an error
// if any of the fields are unrecognized. Useful when decoding
// module configurations, where you want to be more sure they're
// correct.
func StrictUnmarshalJSON(data []byte, v any) error {
dec := json.NewDecoder(bytes.NewReader(data))
dec.DisallowUnknownFields()
return dec.Decode(v)
}
// isJSONRawMessage returns true if the type is encoding/json.RawMessage.
func isJSONRawMessage(typ reflect.Type) bool {
return typ.PkgPath() == "encoding/json" && typ.Name() == "RawMessage"
}
// isModuleMapType returns true if the type is map[string]json.RawMessage.
// It assumes that the string key is the module name, but this is not
// always the case. To know for sure, this function must return true, but
// also the struct tag where this type appears must NOT define an inline_key
// attribute, which would mean that the module names appear inline with the
// values, not in the key.
func isModuleMapType(typ reflect.Type) bool {
return typ.Kind() == reflect.Map &&
typ.Key().Kind() == reflect.String &&
isJSONRawMessage(typ.Elem())
}
var (
modules = make(map[string]ModuleInfo)
modulesMu sync.RWMutex
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyevents
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(App{})
}
// App implements a global eventing system within Caddy.
// Modules can emit and subscribe to events, providing
// hooks into deep parts of the code base that aren't
// otherwise accessible. Events provide information about
// what and when things are happening, and this facility
// allows handlers to take action when events occur,
// add information to the event's metadata, and even
// control program flow in some cases.
//
// Events are propagated in a DOM-like fashion. An event
// emitted from module `a.b.c` (the "origin") will first
// invoke handlers listening to `a.b.c`, then `a.b`,
// then `a`, then those listening regardless of origin.
// If a handler returns the special error Aborted, then
// propagation immediately stops and the event is marked
// as aborted. Emitters may optionally choose to adjust
// program flow based on an abort.
//
// Modules can subscribe to events by origin and/or name.
// A handler is invoked only if it is subscribed to the
// event by name and origin. Subscriptions should be
// registered during the provisioning phase, before apps
// are started.
//
// Event handlers are fired synchronously as part of the
// regular flow of the program. This allows event handlers
// to control the flow of the program if the origin permits
// it and also allows handlers to convey new information
// back into the origin module before it continues.
// In essence, event handlers are similar to HTTP
// middleware handlers.
//
// Event bindings/subscribers are unordered; i.e.
// event handlers are invoked in an arbitrary order.
// Event handlers should not rely on the logic of other
// handlers to succeed.
//
// The entirety of this app module is EXPERIMENTAL and
// subject to change. Pay attention to release notes.
type App struct {
// Subscriptions bind handlers to one or more events
// either globally or scoped to specific modules or module
// namespaces.
Subscriptions []*Subscription `json:"subscriptions,omitempty"`
// Map of event name to map of module ID/namespace to handlers
subscriptions map[string]map[caddy.ModuleID][]Handler
logger *zap.Logger
started bool
}
// Subscription represents binding of one or more handlers to
// one or more events.
type Subscription struct {
// The name(s) of the event(s) to bind to. Default: all events.
Events []string `json:"events,omitempty"`
// The ID or namespace of the module(s) from which events
// originate to listen to for events. Default: all modules.
//
// Events propagate up, so events emitted by module "a.b.c"
// will also trigger the event for "a.b" and "a". Thus, to
// receive all events from "a.b.c" and "a.b.d", for example,
// one can subscribe to either "a.b" or all of "a" entirely.
Modules []caddy.ModuleID `json:"modules,omitempty"`
// The event handler modules. These implement the actual
// behavior to invoke when an event occurs. At least one
// handler is required.
HandlersRaw []json.RawMessage `json:"handlers,omitempty" caddy:"namespace=events.handlers inline_key=handler"`
// The decoded handlers; Go code that is subscribing to
// an event should set this field directly; HandlersRaw
// is meant for JSON configuration to fill out this field.
Handlers []Handler `json:"-"`
}
// CaddyModule returns the Caddy module information.
func (App) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "events",
New: func() caddy.Module { return new(App) },
}
}
// Provision sets up the app.
func (app *App) Provision(ctx caddy.Context) error {
app.logger = ctx.Logger()
app.subscriptions = make(map[string]map[caddy.ModuleID][]Handler)
for _, sub := range app.Subscriptions {
if sub.HandlersRaw != nil {
handlersIface, err := ctx.LoadModule(sub, "HandlersRaw")
if err != nil {
return fmt.Errorf("loading event subscriber modules: %v", err)
}
for _, h := range handlersIface.([]any) {
sub.Handlers = append(sub.Handlers, h.(Handler))
}
if len(sub.Handlers) == 0 {
// pointless to bind without any handlers
return fmt.Errorf("no handlers defined")
}
}
}
return nil
}
// Start runs the app.
func (app *App) Start() error {
for _, sub := range app.Subscriptions {
if err := app.Subscribe(sub); err != nil {
return err
}
}
app.started = true
return nil
}
// Stop gracefully shuts down the app.
func (app *App) Stop() error {
return nil
}
// Subscribe binds one or more event handlers to one or more events
// according to the subscription s. For now, subscriptions can only
// be created during the provision phase; new bindings cannot be
// created after the events app has started.
func (app *App) Subscribe(s *Subscription) error {
if app.started {
return fmt.Errorf("events already started; new subscriptions closed")
}
// handle special case of catch-alls (omission of event name or module space implies all)
if len(s.Events) == 0 {
s.Events = []string{""}
}
if len(s.Modules) == 0 {
s.Modules = []caddy.ModuleID{""}
}
for _, eventName := range s.Events {
if app.subscriptions[eventName] == nil {
app.subscriptions[eventName] = make(map[caddy.ModuleID][]Handler)
}
for _, originModule := range s.Modules {
app.subscriptions[eventName][originModule] = append(app.subscriptions[eventName][originModule], s.Handlers...)
}
}
return nil
}
// On is syntactic sugar for Subscribe() that binds a single handler
// to a single event from any module. If the eventName is empty string,
// it counts for all events.
func (app *App) On(eventName string, handler Handler) error {
return app.Subscribe(&Subscription{
Events: []string{eventName},
Handlers: []Handler{handler},
})
}
// Emit creates and dispatches an event named eventName to all relevant handlers with
// the metadata data. Events are emitted and propagated synchronously. The returned Event
// value will have any additional information from the invoked handlers.
//
// Note that the data map is not copied, for efficiency. After Emit() is called, the
// data passed in should not be changed in other goroutines.
func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) Event {
logger := app.logger.With(zap.String("name", eventName))
id, err := uuid.NewRandom()
if err != nil {
logger.Error("failed generating new event ID", zap.Error(err))
}
eventName = strings.ToLower(eventName)
e := Event{
Data: data,
id: id,
ts: time.Now(),
name: eventName,
origin: ctx.Module(),
}
logger = logger.With(
zap.String("id", e.id.String()),
zap.String("origin", e.origin.CaddyModule().String()))
// add event info to replacer, make sure it's in the context
repl, ok := ctx.Context.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
ctx.Context = context.WithValue(ctx.Context, caddy.ReplacerCtxKey, repl)
}
repl.Map(func(key string) (any, bool) {
switch key {
case "event":
return e, true
case "event.id":
return e.id, true
case "event.name":
return e.name, true
case "event.time":
return e.ts, true
case "event.time_unix":
return e.ts.UnixMilli(), true
case "event.module":
return e.origin.CaddyModule().ID, true
case "event.data":
return e.Data, true
}
if strings.HasPrefix(key, "event.data.") {
key = strings.TrimPrefix(key, "event.data.")
if val, ok := e.Data[key]; ok {
return val, true
}
}
return nil, false
})
logger = logger.With(zap.Any("data", e.Data))
logger.Debug("event")
// invoke handlers bound to the event by name and also all events; this for loop
// iterates twice at most: once for the event name, once for "" (all events)
for {
moduleID := e.origin.CaddyModule().ID
// implement propagation up the module tree (i.e. start with "a.b.c" then "a.b" then "a" then "")
for {
if app.subscriptions[eventName] == nil {
break // shortcut if event not bound at all
}
for _, handler := range app.subscriptions[eventName][moduleID] {
select {
case <-ctx.Done():
logger.Error("context canceled; event handling stopped")
return e
default:
}
// this log can be a useful sanity check to ensure your handlers are in fact being invoked
// (see https://github.com/mholt/caddy-events-exec/issues/6)
logger.Debug("invoking subscribed handler",
zap.String("subscribed_to", eventName),
zap.Any("handler", handler))
if err := handler.Handle(ctx, e); err != nil {
aborted := errors.Is(err, ErrAborted)
logger.Error("handler error",
zap.Error(err),
zap.Bool("aborted", aborted))
if aborted {
e.Aborted = err
return e
}
}
}
if moduleID == "" {
break
}
lastDot := strings.LastIndex(string(moduleID), ".")
if lastDot < 0 {
moduleID = "" // include handlers bound to events regardless of module
} else {
moduleID = moduleID[:lastDot]
}
}
// include handlers listening to all events
if eventName == "" {
break
}
eventName = ""
}
return e
}
// Event represents something that has happened or is happening.
// An Event value is not synchronized, so it should be copied if
// being used in goroutines.
//
// EXPERIMENTAL: As with the rest of this package, events are
// subject to change.
type Event struct {
// If non-nil, the event has been aborted, meaning
// propagation has stopped to other handlers and
// the code should stop what it was doing. Emitters
// may choose to use this as a signal to adjust their
// code path appropriately.
Aborted error
// The data associated with the event. Usually the
// original emitter will be the only one to set or
// change these values, but the field is exported
// so handlers can have full access if needed.
// However, this map is not synchronized, so
// handlers must not use this map directly in new
// goroutines; instead, copy the map to use it in a
// goroutine.
Data map[string]any
id uuid.UUID
ts time.Time
name string
origin caddy.Module
}
func (e Event) ID() uuid.UUID { return e.id }
func (e Event) Timestamp() time.Time { return e.ts }
func (e Event) Name() string { return e.name }
func (e Event) Origin() caddy.Module { return e.origin }
// CloudEvent exports event e as a structure that, when
// serialized as JSON, is compatible with the
// CloudEvents spec.
func (e Event) CloudEvent() CloudEvent {
dataJSON, _ := json.Marshal(e.Data)
return CloudEvent{
ID: e.id.String(),
Source: e.origin.CaddyModule().String(),
SpecVersion: "1.0",
Type: e.name,
Time: e.ts,
DataContentType: "application/json",
Data: dataJSON,
}
}
// CloudEvent is a JSON-serializable structure that
// is compatible with the CloudEvents specification.
// See https://cloudevents.io.
type CloudEvent struct {
ID string `json:"id"`
Source string `json:"source"`
SpecVersion string `json:"specversion"`
Type string `json:"type"`
Time time.Time `json:"time"`
DataContentType string `json:"datacontenttype,omitempty"`
Data json.RawMessage `json:"data,omitempty"`
}
// ErrAborted cancels an event.
var ErrAborted = errors.New("event aborted")
// Handler is a type that can handle events.
type Handler interface {
Handle(context.Context, Event) error
}
// Interface guards
var (
_ caddy.App = (*App)(nil)
_ caddy.Provisioner = (*App)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"strconv"
"sync"
"time"
"go.uber.org/zap"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyevents"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
func init() {
caddy.RegisterModule(App{})
}
// App is a robust, production-ready HTTP server.
//
// HTTPS is enabled by default if host matchers with qualifying names are used
// in any of routes; certificates are automatically provisioned and renewed.
// Additionally, automatic HTTPS will also enable HTTPS for servers that listen
// only on the HTTPS port but which do not have any TLS connection policies
// defined by adding a good, default TLS connection policy.
//
// In HTTP routes, additional placeholders are available (replace any `*`):
//
// Placeholder | Description
// ------------|---------------
// `{http.request.body}` | The request body (⚠️ inefficient; use only for debugging)
// `{http.request.cookie.*}` | HTTP request cookie
// `{http.request.duration}` | Time up to now spent handling the request (after decoding headers from client)
// `{http.request.duration_ms}` | Same as 'duration', but in milliseconds.
// `{http.request.uuid}` | The request unique identifier
// `{http.request.header.*}` | Specific request header field
// `{http.request.host}` | The host part of the request's Host header
// `{http.request.host.labels.*}` | Request host labels (0-based from right); e.g. for foo.example.com: 0=com, 1=example, 2=foo
// `{http.request.hostport}` | The host and port from the request's Host header
// `{http.request.method}` | The request method
// `{http.request.orig_method}` | The request's original method
// `{http.request.orig_uri}` | The request's original URI
// `{http.request.orig_uri.path}` | The request's original path
// `{http.request.orig_uri.path.*}` | Parts of the original path, split by `/` (0-based from left)
// `{http.request.orig_uri.path.dir}` | The request's original directory
// `{http.request.orig_uri.path.file}` | The request's original filename
// `{http.request.orig_uri.query}` | The request's original query string (without `?`)
// `{http.request.port}` | The port part of the request's Host header
// `{http.request.proto}` | The protocol of the request
// `{http.request.local.host}` | The host (IP) part of the local address the connection arrived on
// `{http.request.local.port}` | The port part of the local address the connection arrived on
// `{http.request.local}` | The local address the connection arrived on
// `{http.request.remote.host}` | The host (IP) part of the remote client's address
// `{http.request.remote.port}` | The port part of the remote client's address
// `{http.request.remote}` | The address of the remote client
// `{http.request.scheme}` | The request scheme, typically `http` or `https`
// `{http.request.tls.version}` | The TLS version name
// `{http.request.tls.cipher_suite}` | The TLS cipher suite
// `{http.request.tls.resumed}` | The TLS connection resumed a previous connection
// `{http.request.tls.proto}` | The negotiated next protocol
// `{http.request.tls.proto_mutual}` | The negotiated next protocol was advertised by the server
// `{http.request.tls.server_name}` | The server name requested by the client, if any
// `{http.request.tls.client.fingerprint}` | The SHA256 checksum of the client certificate
// `{http.request.tls.client.public_key}` | The public key of the client certificate.
// `{http.request.tls.client.public_key_sha256}` | The SHA256 checksum of the client's public key.
// `{http.request.tls.client.certificate_pem}` | The PEM-encoded value of the certificate.
// `{http.request.tls.client.certificate_der_base64}` | The base64-encoded value of the certificate.
// `{http.request.tls.client.issuer}` | The issuer DN of the client certificate
// `{http.request.tls.client.serial}` | The serial number of the client certificate
// `{http.request.tls.client.subject}` | The subject DN of the client certificate
// `{http.request.tls.client.san.dns_names.*}` | SAN DNS names(index optional)
// `{http.request.tls.client.san.emails.*}` | SAN email addresses (index optional)
// `{http.request.tls.client.san.ips.*}` | SAN IP addresses (index optional)
// `{http.request.tls.client.san.uris.*}` | SAN URIs (index optional)
// `{http.request.uri}` | The full request URI
// `{http.request.uri.path}` | The path component of the request URI
// `{http.request.uri.path.*}` | Parts of the path, split by `/` (0-based from left)
// `{http.request.uri.path.dir}` | The directory, excluding leaf filename
// `{http.request.uri.path.file}` | The filename of the path, excluding directory
// `{http.request.uri.query}` | The query string (without `?`)
// `{http.request.uri.query.*}` | Individual query string value
// `{http.response.header.*}` | Specific response header field
// `{http.vars.*}` | Custom variables in the HTTP handler chain
// `{http.shutting_down}` | True if the HTTP app is shutting down
// `{http.time_until_shutdown}` | Time until HTTP server shutdown, if scheduled
type App struct {
// HTTPPort specifies the port to use for HTTP (as opposed to HTTPS),
// which is used when setting up HTTP->HTTPS redirects or ACME HTTP
// challenge solvers. Default: 80.
HTTPPort int `json:"http_port,omitempty"`
// HTTPSPort specifies the port to use for HTTPS, which is used when
// solving the ACME TLS-ALPN challenges, or whenever HTTPS is needed
// but no specific port number is given. Default: 443.
HTTPSPort int `json:"https_port,omitempty"`
// GracePeriod is how long to wait for active connections when shutting
// down the servers. During the grace period, no new connections are
// accepted, idle connections are closed, and active connections will
// be given the full length of time to become idle and close.
// Once the grace period is over, connections will be forcefully closed.
// If zero, the grace period is eternal. Default: 0.
GracePeriod caddy.Duration `json:"grace_period,omitempty"`
// ShutdownDelay is how long to wait before initiating the grace
// period. When this app is stopping (e.g. during a config reload or
// process exit), all servers will be shut down. Normally this immediately
// initiates the grace period. However, if this delay is configured, servers
// will not be shut down until the delay is over. During this time, servers
// continue to function normally and allow new connections. At the end, the
// grace period will begin. This can be useful to allow downstream load
// balancers time to move this instance out of the rotation without hiccups.
//
// When shutdown has been scheduled, placeholders {http.shutting_down} (bool)
// and {http.time_until_shutdown} (duration) may be useful for health checks.
ShutdownDelay caddy.Duration `json:"shutdown_delay,omitempty"`
// Servers is the list of servers, keyed by arbitrary names chosen
// at your discretion for your own convenience; the keys do not
// affect functionality.
Servers map[string]*Server `json:"servers,omitempty"`
ctx caddy.Context
logger *zap.Logger
tlsApp *caddytls.TLS
// used temporarily between phases 1 and 2 of auto HTTPS
allCertDomains []string
}
// CaddyModule returns the Caddy module information.
func (App) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http",
New: func() caddy.Module { return new(App) },
}
}
// Provision sets up the app.
func (app *App) Provision(ctx caddy.Context) error {
// store some references
tlsAppIface, err := ctx.App("tls")
if err != nil {
return fmt.Errorf("getting tls app: %v", err)
}
app.tlsApp = tlsAppIface.(*caddytls.TLS)
app.ctx = ctx
app.logger = ctx.Logger()
eventsAppIface, err := ctx.App("events")
if err != nil {
return fmt.Errorf("getting events app: %v", err)
}
repl := caddy.NewReplacer()
// this provisions the matchers for each route,
// and prepares auto HTTP->HTTPS redirects, and
// is required before we provision each server
err = app.automaticHTTPSPhase1(ctx, repl)
if err != nil {
return err
}
// prepare each server
oldContext := ctx.Context
for srvName, srv := range app.Servers {
ctx.Context = context.WithValue(oldContext, ServerCtxKey, srv)
srv.name = srvName
srv.tlsApp = app.tlsApp
srv.events = eventsAppIface.(*caddyevents.App)
srv.ctx = ctx
srv.logger = app.logger.Named("log")
srv.errorLogger = app.logger.Named("log.error")
srv.shutdownAtMu = new(sync.RWMutex)
// only enable access logs if configured
if srv.Logs != nil {
srv.accessLogger = app.logger.Named("log.access")
if srv.Logs.Trace {
srv.traceLogger = app.logger.Named("log.trace")
}
}
// the Go standard library does not let us serve only HTTP/2 using
// http.Server; we would probably need to write our own server
if !srv.protocol("h1") && (srv.protocol("h2") || srv.protocol("h2c")) {
return fmt.Errorf("server %s: cannot enable HTTP/2 or H2C without enabling HTTP/1.1; add h1 to protocols or remove h2/h2c", srvName)
}
// if no protocols configured explicitly, enable all except h2c
if len(srv.Protocols) == 0 {
srv.Protocols = []string{"h1", "h2", "h3"}
}
// if not explicitly configured by the user, disallow TLS
// client auth bypass (domain fronting) which could
// otherwise be exploited by sending an unprotected SNI
// value during a TLS handshake, then putting a protected
// domain in the Host header after establishing connection;
// this is a safe default, but we allow users to override
// it for example in the case of running a proxy where
// domain fronting is desired and access is not restricted
// based on hostname
if srv.StrictSNIHost == nil && srv.hasTLSClientAuth() {
app.logger.Warn("enabling strict SNI-Host enforcement because TLS client auth is configured",
zap.String("server_id", srvName))
trueBool := true
srv.StrictSNIHost = &trueBool
}
// set up the trusted proxies source
for srv.TrustedProxiesRaw != nil {
val, err := ctx.LoadModule(srv, "TrustedProxiesRaw")
if err != nil {
return fmt.Errorf("loading trusted proxies modules: %v", err)
}
srv.trustedProxies = val.(IPRangeSource)
}
// set the default client IP header to read from
if srv.ClientIPHeaders == nil {
srv.ClientIPHeaders = []string{"X-Forwarded-For"}
}
// process each listener address
for i := range srv.Listen {
lnOut, err := repl.ReplaceOrErr(srv.Listen[i], true, true)
if err != nil {
return fmt.Errorf("server %s, listener %d: %v", srvName, i, err)
}
srv.Listen[i] = lnOut
}
// set up each listener modifier
if srv.ListenerWrappersRaw != nil {
vals, err := ctx.LoadModule(srv, "ListenerWrappersRaw")
if err != nil {
return fmt.Errorf("loading listener wrapper modules: %v", err)
}
var hasTLSPlaceholder bool
for i, val := range vals.([]any) {
if _, ok := val.(*tlsPlaceholderWrapper); ok {
if i == 0 {
// putting the tls placeholder wrapper first is nonsensical because
// that is the default, implicit setting: without it, all wrappers
// will go after the TLS listener anyway
return fmt.Errorf("it is unnecessary to specify the TLS listener wrapper in the first position because that is the default")
}
if hasTLSPlaceholder {
return fmt.Errorf("TLS listener wrapper can only be specified once")
}
hasTLSPlaceholder = true
}
srv.listenerWrappers = append(srv.listenerWrappers, val.(caddy.ListenerWrapper))
}
// if any wrappers were configured but the TLS placeholder wrapper is
// absent, prepend it so all defined wrappers come after the TLS
// handshake; this simplifies logic when starting the server, since we
// can simply assume the TLS placeholder will always be there
if !hasTLSPlaceholder && len(srv.listenerWrappers) > 0 {
srv.listenerWrappers = append([]caddy.ListenerWrapper{new(tlsPlaceholderWrapper)}, srv.listenerWrappers...)
}
}
// pre-compile the primary handler chain, and be sure to wrap it in our
// route handler so that important security checks are done, etc.
primaryRoute := emptyHandler
if srv.Routes != nil {
err := srv.Routes.ProvisionHandlers(ctx, srv.Metrics)
if err != nil {
return fmt.Errorf("server %s: setting up route handlers: %v", srvName, err)
}
primaryRoute = srv.Routes.Compile(emptyHandler)
}
srv.primaryHandlerChain = srv.wrapPrimaryRoute(primaryRoute)
// pre-compile the error handler chain
if srv.Errors != nil {
err := srv.Errors.Routes.Provision(ctx)
if err != nil {
return fmt.Errorf("server %s: setting up error handling routes: %v", srvName, err)
}
srv.errorHandlerChain = srv.Errors.Routes.Compile(errorEmptyHandler)
}
// provision the named routes (they get compiled at runtime)
for name, route := range srv.NamedRoutes {
err := route.Provision(ctx, srv.Metrics)
if err != nil {
return fmt.Errorf("server %s: setting up named route '%s' handlers: %v", name, srvName, err)
}
}
// prepare the TLS connection policies
err = srv.TLSConnPolicies.Provision(ctx)
if err != nil {
return fmt.Errorf("server %s: setting up TLS connection policies: %v", srvName, err)
}
// if there is no idle timeout, set a sane default; users have complained
// before that aggressive CDNs leave connections open until the server
// closes them, so if we don't close them it leads to resource exhaustion
if srv.IdleTimeout == 0 {
srv.IdleTimeout = defaultIdleTimeout
}
}
ctx.Context = oldContext
return nil
}
// Validate ensures the app's configuration is valid.
func (app *App) Validate() error {
lnAddrs := make(map[string]string)
for srvName, srv := range app.Servers {
// each server must use distinct listener addresses
for _, addr := range srv.Listen {
listenAddr, err := caddy.ParseNetworkAddress(addr)
if err != nil {
return fmt.Errorf("invalid listener address '%s': %v", addr, err)
}
// check that every address in the port range is unique to this server;
// we do not use <= here because PortRangeSize() adds 1 to EndPort for us
for i := uint(0); i < listenAddr.PortRangeSize(); i++ {
addr := caddy.JoinNetworkAddress(listenAddr.Network, listenAddr.Host, strconv.Itoa(int(listenAddr.StartPort+i)))
if sn, ok := lnAddrs[addr]; ok {
return fmt.Errorf("server %s: listener address repeated: %s (already claimed by server '%s')", srvName, addr, sn)
}
lnAddrs[addr] = srvName
}
}
// logger names must not have ports
if srv.Logs != nil {
for host := range srv.Logs.LoggerNames {
if _, _, err := net.SplitHostPort(host); err == nil {
return fmt.Errorf("server %s: logger name must not have a port: %s", srvName, host)
}
}
}
}
return nil
}
// Start runs the app. It finishes automatic HTTPS if enabled,
// including management of certificates.
func (app *App) Start() error {
// get a logger compatible with http.Server
serverLogger, err := zap.NewStdLogAt(app.logger.Named("stdlib"), zap.DebugLevel)
if err != nil {
return fmt.Errorf("failed to set up server logger: %v", err)
}
for srvName, srv := range app.Servers {
srv.server = &http.Server{
ReadTimeout: time.Duration(srv.ReadTimeout),
ReadHeaderTimeout: time.Duration(srv.ReadHeaderTimeout),
WriteTimeout: time.Duration(srv.WriteTimeout),
IdleTimeout: time.Duration(srv.IdleTimeout),
MaxHeaderBytes: srv.MaxHeaderBytes,
Handler: srv,
ErrorLog: serverLogger,
ConnContext: func(ctx context.Context, c net.Conn) context.Context {
return context.WithValue(ctx, ConnCtxKey, c)
},
}
h2server := new(http2.Server)
// disable HTTP/2, which we enabled by default during provisioning
if !srv.protocol("h2") {
srv.server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
for _, cp := range srv.TLSConnPolicies {
// the TLSConfig was already provisioned, so... manually remove it
for i, np := range cp.TLSConfig.NextProtos {
if np == "h2" {
cp.TLSConfig.NextProtos = append(cp.TLSConfig.NextProtos[:i], cp.TLSConfig.NextProtos[i+1:]...)
break
}
}
// remove it from the parent connection policy too, just to keep things tidy
for i, alpn := range cp.ALPN {
if alpn == "h2" {
cp.ALPN = append(cp.ALPN[:i], cp.ALPN[i+1:]...)
break
}
}
}
} else {
//nolint:errcheck
http2.ConfigureServer(srv.server, h2server)
}
// this TLS config is used by the std lib to choose the actual TLS config for connections
// by looking through the connection policies to find the first one that matches
tlsCfg := srv.TLSConnPolicies.TLSConfig(app.ctx)
srv.configureServer(srv.server)
// enable H2C if configured
if srv.protocol("h2c") {
srv.server.Handler = h2c.NewHandler(srv, h2server)
}
for _, lnAddr := range srv.Listen {
listenAddr, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
return fmt.Errorf("%s: parsing listen address '%s': %v", srvName, lnAddr, err)
}
srv.addresses = append(srv.addresses, listenAddr)
for portOffset := uint(0); portOffset < listenAddr.PortRangeSize(); portOffset++ {
// create the listener for this socket
hostport := listenAddr.JoinHostPort(portOffset)
lnAny, err := listenAddr.Listen(app.ctx, portOffset, net.ListenConfig{KeepAlive: time.Duration(srv.KeepAliveInterval)})
if err != nil {
return fmt.Errorf("listening on %s: %v", listenAddr.At(portOffset), err)
}
ln := lnAny.(net.Listener)
// wrap listener before TLS (up to the TLS placeholder wrapper)
var lnWrapperIdx int
for i, lnWrapper := range srv.listenerWrappers {
if _, ok := lnWrapper.(*tlsPlaceholderWrapper); ok {
lnWrapperIdx = i + 1 // mark the next wrapper's spot
break
}
ln = lnWrapper.WrapListener(ln)
}
// enable TLS if there is a policy and if this is not the HTTP port
useTLS := len(srv.TLSConnPolicies) > 0 && int(listenAddr.StartPort+portOffset) != app.httpPort()
if useTLS {
// create TLS listener - this enables and terminates TLS
ln = tls.NewListener(ln, tlsCfg)
// enable HTTP/3 if configured
if srv.protocol("h3") {
// Can't serve HTTP/3 on the same socket as HTTP/1 and 2 because it uses
// a different transport mechanism... which is fine, but the OS doesn't
// differentiate between a SOCK_STREAM file and a SOCK_DGRAM file; they
// are still one file on the system. So even though "unixpacket" and
// "unixgram" are different network types just as "tcp" and "udp" are,
// the OS will not let us use the same file as both STREAM and DGRAM.
if len(srv.Protocols) > 1 && listenAddr.IsUnixNetwork() {
app.logger.Warn("HTTP/3 disabled because Unix can't multiplex STREAM and DGRAM on same socket",
zap.String("file", hostport))
for i := range srv.Protocols {
if srv.Protocols[i] == "h3" {
srv.Protocols = append(srv.Protocols[:i], srv.Protocols[i+1:]...)
break
}
}
} else {
app.logger.Info("enabling HTTP/3 listener", zap.String("addr", hostport))
if err := srv.serveHTTP3(listenAddr.At(portOffset), tlsCfg); err != nil {
return err
}
}
}
}
// finish wrapping listener where we left off before TLS
for i := lnWrapperIdx; i < len(srv.listenerWrappers); i++ {
ln = srv.listenerWrappers[i].WrapListener(ln)
}
// handle http2 if use tls listener wrapper
if useTLS {
http2lnWrapper := &http2Listener{
Listener: ln,
server: srv.server,
h2server: h2server,
}
srv.h2listeners = append(srv.h2listeners, http2lnWrapper)
ln = http2lnWrapper
}
// if binding to port 0, the OS chooses a port for us;
// but the user won't know the port unless we print it
if !listenAddr.IsUnixNetwork() && listenAddr.StartPort == 0 && listenAddr.EndPort == 0 {
app.logger.Info("port 0 listener",
zap.String("input_address", lnAddr),
zap.String("actual_address", ln.Addr().String()))
}
app.logger.Debug("starting server loop",
zap.String("address", ln.Addr().String()),
zap.Bool("tls", useTLS),
zap.Bool("http3", srv.h3server != nil))
srv.listeners = append(srv.listeners, ln)
// enable HTTP/1 if configured
if srv.protocol("h1") {
//nolint:errcheck
go srv.server.Serve(ln)
}
}
}
srv.logger.Info("server running",
zap.String("name", srvName),
zap.Strings("protocols", srv.Protocols))
}
// finish automatic HTTPS by finally beginning
// certificate management
err = app.automaticHTTPSPhase2()
if err != nil {
return fmt.Errorf("finalizing automatic HTTPS: %v", err)
}
return nil
}
// Stop gracefully shuts down the HTTP server.
func (app *App) Stop() error {
ctx := context.Background()
// see if any listeners in our config will be closing or if they are continuing
// through a reload; because if any are closing, we will enforce shutdown delay
var delay bool
scheduledTime := time.Now().Add(time.Duration(app.ShutdownDelay))
if app.ShutdownDelay > 0 {
for _, server := range app.Servers {
for _, na := range server.addresses {
for _, addr := range na.Expand() {
if caddy.ListenerUsage(addr.Network, addr.JoinHostPort(0)) < 2 {
app.logger.Debug("listener closing and shutdown delay is configured", zap.String("address", addr.String()))
server.shutdownAtMu.Lock()
server.shutdownAt = scheduledTime
server.shutdownAtMu.Unlock()
delay = true
} else {
app.logger.Debug("shutdown delay configured but listener will remain open", zap.String("address", addr.String()))
}
}
}
}
}
// honor scheduled/delayed shutdown time
if delay {
app.logger.Info("shutdown scheduled",
zap.Duration("delay_duration", time.Duration(app.ShutdownDelay)),
zap.Time("time", scheduledTime))
time.Sleep(time.Duration(app.ShutdownDelay))
}
// enforce grace period if configured
if app.GracePeriod > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(app.GracePeriod))
defer cancel()
app.logger.Info("servers shutting down; grace period initiated", zap.Duration("duration", time.Duration(app.GracePeriod)))
} else {
app.logger.Info("servers shutting down with eternal grace period")
}
// goroutines aren't guaranteed to be scheduled right away,
// so we'll use one WaitGroup to wait for all the goroutines
// to start their server shutdowns, and another to wait for
// them to finish; we'll always block for them to start so
// that when we return the caller can be confident* that the
// old servers are no longer accepting new connections
// (* the scheduler might still pause them right before
// calling Shutdown(), but it's unlikely)
var startedShutdown, finishedShutdown sync.WaitGroup
// these will run in goroutines
stopServer := func(server *Server) {
defer finishedShutdown.Done()
startedShutdown.Done()
if err := server.server.Shutdown(ctx); err != nil {
app.logger.Error("server shutdown",
zap.Error(err),
zap.Strings("addresses", server.Listen))
}
}
stopH3Server := func(server *Server) {
defer finishedShutdown.Done()
startedShutdown.Done()
if server.h3server == nil {
return
}
// First close h3server then close listeners unlike stdlib for several reasons:
// 1, udp has only a single socket, once closed, no more data can be read and
// written. In contrast, closing tcp listeners won't affect established connections.
// This have something to do with graceful shutdown when upstream implements it.
// 2, h3server will only close listeners it's registered (quic listeners). Closing
// listener first and these listeners maybe unregistered thus won't be closed. caddy
// distinguishes quic-listener and underlying datagram sockets.
// TODO: CloseGracefully, once implemented upstream (see https://github.com/quic-go/quic-go/issues/2103)
if err := server.h3server.Close(); err != nil {
app.logger.Error("HTTP/3 server shutdown",
zap.Error(err),
zap.Strings("addresses", server.Listen))
}
}
stopH2Listener := func(server *Server) {
defer finishedShutdown.Done()
startedShutdown.Done()
for i, s := range server.h2listeners {
if err := s.Shutdown(ctx); err != nil {
app.logger.Error("http2 listener shutdown",
zap.Error(err),
zap.Int("index", i))
}
}
}
for _, server := range app.Servers {
startedShutdown.Add(3)
finishedShutdown.Add(3)
go stopServer(server)
go stopH3Server(server)
go stopH2Listener(server)
}
// block until all the goroutines have been run by the scheduler;
// this means that they have likely called Shutdown() by now
startedShutdown.Wait()
// if the process is exiting, we need to block here and wait
// for the grace periods to complete, otherwise the process will
// terminate before the servers are finished shutting down; but
// we don't really need to wait for the grace period to finish
// if the process isn't exiting (but note that frequent config
// reloads with long grace periods for a sustained length of time
// may deplete resources)
if caddy.Exiting() {
finishedShutdown.Wait()
}
// run stop callbacks now that the server shutdowns are complete
for name, s := range app.Servers {
for _, stopHook := range s.onStopFuncs {
if err := stopHook(ctx); err != nil {
app.logger.Error("server stop hook", zap.String("server", name), zap.Error(err))
}
}
}
return nil
}
func (app *App) httpPort() int {
if app.HTTPPort == 0 {
return DefaultHTTPPort
}
return app.HTTPPort
}
func (app *App) httpsPort() int {
if app.HTTPSPort == 0 {
return DefaultHTTPSPort
}
return app.HTTPSPort
}
// defaultIdleTimeout is the default HTTP server timeout
// for closing idle connections; useful to avoid resource
// exhaustion behind hungry CDNs, for example (we've had
// several complaints without this).
const defaultIdleTimeout = caddy.Duration(5 * time.Minute)
// Interface guards
var (
_ caddy.App = (*App)(nil)
_ caddy.Provisioner = (*App)(nil)
_ caddy.Validator = (*App)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"fmt"
"net/http"
"strconv"
"strings"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
// AutoHTTPSConfig is used to disable automatic HTTPS
// or certain aspects of it for a specific server.
// HTTPS is enabled automatically and by default when
// qualifying hostnames are available from the config.
type AutoHTTPSConfig struct {
// If true, automatic HTTPS will be entirely disabled,
// including certificate management and redirects.
Disabled bool `json:"disable,omitempty"`
// If true, only automatic HTTP->HTTPS redirects will
// be disabled, but other auto-HTTPS features will
// remain enabled.
DisableRedir bool `json:"disable_redirects,omitempty"`
// If true, automatic certificate management will be
// disabled, but other auto-HTTPS features will
// remain enabled.
DisableCerts bool `json:"disable_certificates,omitempty"`
// Hosts/domain names listed here will not be included
// in automatic HTTPS (they will not have certificates
// loaded nor redirects applied).
Skip []string `json:"skip,omitempty"`
// Hosts/domain names listed here will still be enabled
// for automatic HTTPS (unless in the Skip list), except
// that certificates will not be provisioned and managed
// for these names.
SkipCerts []string `json:"skip_certificates,omitempty"`
// By default, automatic HTTPS will obtain and renew
// certificates for qualifying hostnames. However, if
// a certificate with a matching SAN is already loaded
// into the cache, certificate management will not be
// enabled. To force automated certificate management
// regardless of loaded certificates, set this to true.
IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"`
}
// Skipped returns true if name is in skipSlice, which
// should be either the Skip or SkipCerts field on ahc.
func (ahc AutoHTTPSConfig) Skipped(name string, skipSlice []string) bool {
for _, n := range skipSlice {
if name == n {
return true
}
}
return false
}
// automaticHTTPSPhase1 provisions all route matchers, determines
// which domain names found in the routes qualify for automatic
// HTTPS, and sets up HTTP->HTTPS redirects. This phase must occur
// at the beginning of provisioning, because it may add routes and
// even servers to the app, which still need to be set up with the
// rest of them during provisioning.
func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) error {
logger := app.logger.Named("auto_https")
// this map acts as a set to store the domain names
// for which we will manage certificates automatically
uniqueDomainsForCerts := make(map[string]struct{})
// this maps domain names for automatic HTTP->HTTPS
// redirects to their destination server addresses
// (there might be more than 1 if bind is used; see
// https://github.com/caddyserver/caddy/issues/3443)
redirDomains := make(map[string][]caddy.NetworkAddress)
// the log configuration for an HTTPS enabled server
var logCfg *ServerLogConfig
for srvName, srv := range app.Servers {
// as a prerequisite, provision route matchers; this is
// required for all routes on all servers, and must be
// done before we attempt to do phase 1 of auto HTTPS,
// since we have to access the decoded host matchers the
// handlers will be provisioned later
if srv.Routes != nil {
err := srv.Routes.ProvisionMatchers(ctx)
if err != nil {
return fmt.Errorf("server %s: setting up route matchers: %v", srvName, err)
}
}
// prepare for automatic HTTPS
if srv.AutoHTTPS == nil {
srv.AutoHTTPS = new(AutoHTTPSConfig)
}
if srv.AutoHTTPS.Disabled {
logger.Info("automatic HTTPS is completely disabled for server", zap.String("server_name", srvName))
continue
}
// skip if all listeners use the HTTP port
if !srv.listenersUseAnyPortOtherThan(app.httpPort()) {
logger.Warn("server is listening only on the HTTP port, so no automatic HTTPS will be applied to this server",
zap.String("server_name", srvName),
zap.Int("http_port", app.httpPort()),
)
srv.AutoHTTPS.Disabled = true
continue
}
// if all listeners are on the HTTPS port, make sure
// there is at least one TLS connection policy; it
// should be obvious that they want to use TLS without
// needing to specify one empty policy to enable it
if srv.TLSConnPolicies == nil &&
!srv.listenersUseAnyPortOtherThan(app.httpsPort()) {
logger.Info("server is listening only on the HTTPS port but has no TLS connection policies; adding one to enable TLS",
zap.String("server_name", srvName),
zap.Int("https_port", app.httpsPort()),
)
srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)}
}
// find all qualifying domain names (deduplicated) in this server
// (this is where we need the provisioned, decoded request matchers)
serverDomainSet := make(map[string]struct{})
for routeIdx, route := range srv.Routes {
for matcherSetIdx, matcherSet := range route.MatcherSets {
for matcherIdx, m := range matcherSet {
if hm, ok := m.(*MatchHost); ok {
for hostMatcherIdx, d := range *hm {
var err error
d, err = repl.ReplaceOrErr(d, true, false)
if err != nil {
return fmt.Errorf("%s: route %d, matcher set %d, matcher %d, host matcher %d: %v",
srvName, routeIdx, matcherSetIdx, matcherIdx, hostMatcherIdx, err)
}
if !srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.Skip) {
serverDomainSet[d] = struct{}{}
}
}
}
}
}
}
// nothing more to do here if there are no domains that qualify for
// automatic HTTPS and there are no explicit TLS connection policies:
// if there is at least one domain but no TLS conn policy (F&&T), we'll
// add one below; if there are no domains but at least one TLS conn
// policy (meaning TLS is enabled) (T&&F), it could be a catch-all with
// on-demand TLS -- and in that case we would still need HTTP->HTTPS
// redirects, which we set up below; hence these two conditions
if len(serverDomainSet) == 0 && len(srv.TLSConnPolicies) == 0 {
continue
}
// clone the logger so we can apply it to the HTTP server
// (not sure if necessary to clone it; but probably safer)
// (we choose one log cfg arbitrarily; not sure which is best)
if srv.Logs != nil {
logCfg = srv.Logs.clone()
}
// for all the hostnames we found, filter them so we have
// a deduplicated list of names for which to obtain certs
// (only if cert management not disabled for this server)
if srv.AutoHTTPS.DisableCerts {
logger.Warn("skipping automated certificate management for server because it is disabled", zap.String("server_name", srvName))
} else {
for d := range serverDomainSet {
if certmagic.SubjectQualifiesForCert(d) &&
!srv.AutoHTTPS.Skipped(d, srv.AutoHTTPS.SkipCerts) {
// if a certificate for this name is already loaded,
// don't obtain another one for it, unless we are
// supposed to ignore loaded certificates
if !srv.AutoHTTPS.IgnoreLoadedCerts && app.tlsApp.HasCertificateForSubject(d) {
logger.Info("skipping automatic certificate management because one or more matching certificates are already loaded",
zap.String("domain", d),
zap.String("server_name", srvName),
)
continue
}
// most clients don't accept wildcards like *.tld... we
// can handle that, but as a courtesy, warn the user
if strings.Contains(d, "*") &&
strings.Count(strings.Trim(d, "."), ".") == 1 {
logger.Warn("most clients do not trust second-level wildcard certificates (*.tld)",
zap.String("domain", d))
}
uniqueDomainsForCerts[d] = struct{}{}
}
}
}
// tell the server to use TLS if it is not already doing so
if srv.TLSConnPolicies == nil {
srv.TLSConnPolicies = caddytls.ConnectionPolicies{new(caddytls.ConnectionPolicy)}
}
// nothing left to do if auto redirects are disabled
if srv.AutoHTTPS.DisableRedir {
logger.Info("automatic HTTP->HTTPS redirects are disabled", zap.String("server_name", srvName))
continue
}
logger.Info("enabling automatic HTTP->HTTPS redirects", zap.String("server_name", srvName))
// create HTTP->HTTPS redirects
for _, listenAddr := range srv.Listen {
// figure out the address we will redirect to...
addr, err := caddy.ParseNetworkAddress(listenAddr)
if err != nil {
msg := "%s: invalid listener address: %v"
if strings.Count(listenAddr, ":") > 1 {
msg = msg + ", there are too many colons, so the port is ambiguous. Did you mean to wrap the IPv6 address with [] brackets?"
}
return fmt.Errorf(msg, srvName, listenAddr)
}
// this address might not have a hostname, i.e. might be a
// catch-all address for a particular port; we need to keep
// track if it is, so we can set up redirects for it anyway
// (e.g. the user might have enabled on-demand TLS); we use
// an empty string to indicate a catch-all, which we have to
// treat special later
if len(serverDomainSet) == 0 {
redirDomains[""] = append(redirDomains[""], addr)
continue
}
// ...and associate it with each domain in this server
for d := range serverDomainSet {
// if this domain is used on more than one HTTPS-enabled
// port, we'll have to choose one, so prefer the HTTPS port
if _, ok := redirDomains[d]; !ok ||
addr.StartPort == uint(app.httpsPort()) {
redirDomains[d] = append(redirDomains[d], addr)
}
}
}
}
// we now have a list of all the unique names for which we need certs;
// turn the set into a slice so that phase 2 can use it
app.allCertDomains = make([]string, 0, len(uniqueDomainsForCerts))
var internal, tailscale []string
uniqueDomainsLoop:
for d := range uniqueDomainsForCerts {
if !isTailscaleDomain(d) {
// whether or not there is already an automation policy for this
// name, we should add it to the list to manage a cert for it,
// unless it's a Tailscale domain, because we don't manage those
app.allCertDomains = append(app.allCertDomains, d)
}
// some names we've found might already have automation policies
// explicitly specified for them; we should exclude those from
// our hidden/implicit policy, since applying a name to more than
// one automation policy would be confusing and an error
if app.tlsApp.Automation != nil {
for _, ap := range app.tlsApp.Automation.Policies {
for _, apHost := range ap.Subjects() {
if apHost == d {
// if the automation policy has all internal subjects but no issuers,
// it will default to CertMagic's issuers which are public CAs; use
// our internal issuer instead
if len(ap.Issuers) == 0 && ap.AllInternalSubjects() {
iss := new(caddytls.InternalIssuer)
if err := iss.Provision(ctx); err != nil {
return err
}
ap.Issuers = append(ap.Issuers, iss)
}
continue uniqueDomainsLoop
}
}
}
}
// if no automation policy exists for the name yet, we
// will associate it with an implicit one
if isTailscaleDomain(d) {
tailscale = append(tailscale, d)
} else if !certmagic.SubjectQualifiesForPublicCert(d) {
internal = append(internal, d)
}
}
// ensure there is an automation policy to handle these certs
err := app.createAutomationPolicies(ctx, internal, tailscale)
if err != nil {
return err
}
// we need to reduce the mapping, i.e. group domains by address
// since new routes are appended to servers by their address
domainsByAddr := make(map[string][]string)
for domain, addrs := range redirDomains {
for _, addr := range addrs {
addrStr := addr.String()
domainsByAddr[addrStr] = append(domainsByAddr[addrStr], domain)
}
}
// these keep track of the redirect server address(es)
// and the routes for those servers which actually
// respond with the redirects
redirServerAddrs := make(map[string]struct{})
redirServers := make(map[string][]Route)
var redirRoutes RouteList
for addrStr, domains := range domainsByAddr {
// build the matcher set for this redirect route; (note that we happen
// to bypass Provision and Validate steps for these matcher modules)
matcherSet := MatcherSet{MatchProtocol("http")}
// match on known domain names, unless it's our special case of a
// catch-all which is an empty string (common among catch-all sites
// that enable on-demand TLS for yet-unknown domain names)
if !(len(domains) == 1 && domains[0] == "") {
matcherSet = append(matcherSet, MatchHost(domains))
}
addr, err := caddy.ParseNetworkAddress(addrStr)
if err != nil {
return err
}
redirRoute := app.makeRedirRoute(addr.StartPort, matcherSet)
// use the network/host information from the address,
// but change the port to the HTTP port then rebuild
redirAddr := addr
redirAddr.StartPort = uint(app.httpPort())
redirAddr.EndPort = redirAddr.StartPort
redirAddrStr := redirAddr.String()
redirServers[redirAddrStr] = append(redirServers[redirAddrStr], redirRoute)
}
// on-demand TLS means that hostnames may be used which are not
// explicitly defined in the config, and we still need to redirect
// those; so we can append a single catch-all route (notice there
// is no Host matcher) after the other redirect routes which will
// allow us to handle unexpected/new hostnames... however, it's
// not entirely clear what the redirect destination should be,
// so I'm going to just hard-code the app's HTTPS port and call
// it good for now...
// TODO: This implies that all plaintext requests will be blindly
// redirected to their HTTPS equivalent, even if this server
// doesn't handle that hostname at all; I don't think this is a
// bad thing, and it also obscures the actual hostnames that this
// server is configured to match on, which may be desirable, but
// it's not something that should be relied on. We can change this
// if we want to.
appendCatchAll := func(routes []Route) []Route {
return append(routes, app.makeRedirRoute(uint(app.httpsPort()), MatcherSet{MatchProtocol("http")}))
}
redirServersLoop:
for redirServerAddr, routes := range redirServers {
// for each redirect listener, see if there's already a
// server configured to listen on that exact address; if so,
// insert the redirect route to the end of its route list
// after any other routes with host matchers; otherwise,
// we'll create a new server for all the listener addresses
// that are unused and serve the remaining redirects from it
for _, srv := range app.Servers {
// only look at servers which listen on an address which
// we want to add redirects to
if !srv.hasListenerAddress(redirServerAddr) {
continue
}
// find the index of the route after the last route with a host
// matcher, then insert the redirects there, but before any
// user-defined catch-all routes
// see https://github.com/caddyserver/caddy/issues/3212
insertIndex := srv.findLastRouteWithHostMatcher()
// add the redirects at the insert index, except for when
// we have a catch-all for HTTPS, in which case the user's
// defined catch-all should take precedence. See #4829
if len(uniqueDomainsForCerts) != 0 {
srv.Routes = append(srv.Routes[:insertIndex], append(routes, srv.Routes[insertIndex:]...)...)
}
// append our catch-all route in case the user didn't define their own
srv.Routes = appendCatchAll(srv.Routes)
continue redirServersLoop
}
// no server with this listener address exists;
// save this address and route for custom server
redirServerAddrs[redirServerAddr] = struct{}{}
redirRoutes = append(redirRoutes, routes...)
}
// if there are routes remaining which do not belong
// in any existing server, make our own to serve the
// rest of the redirects
if len(redirServerAddrs) > 0 {
redirServerAddrsList := make([]string, 0, len(redirServerAddrs))
for a := range redirServerAddrs {
redirServerAddrsList = append(redirServerAddrsList, a)
}
app.Servers["remaining_auto_https_redirects"] = &Server{
Listen: redirServerAddrsList,
Routes: appendCatchAll(redirRoutes),
Logs: logCfg,
}
}
logger.Debug("adjusted config",
zap.Reflect("tls", app.tlsApp),
zap.Reflect("http", app))
return nil
}
func (app *App) makeRedirRoute(redirToPort uint, matcherSet MatcherSet) Route {
redirTo := "https://{http.request.host}"
// since this is an external redirect, we should only append an explicit
// port if we know it is not the officially standardized HTTPS port, and,
// notably, also not the port that Caddy thinks is the HTTPS port (the
// configurable HTTPSPort parameter) - we can't change the standard HTTPS
// port externally, so that config parameter is for internal use only;
// we also do not append the port if it happens to be the HTTP port as
// well, obviously (for example, user defines the HTTP port explicitly
// in the list of listen addresses for a server)
if redirToPort != uint(app.httpPort()) &&
redirToPort != uint(app.httpsPort()) &&
redirToPort != DefaultHTTPPort &&
redirToPort != DefaultHTTPSPort {
redirTo += ":" + strconv.Itoa(int(redirToPort))
}
redirTo += "{http.request.uri}"
return Route{
MatcherSets: []MatcherSet{matcherSet},
Handlers: []MiddlewareHandler{
StaticResponse{
StatusCode: WeakString(strconv.Itoa(http.StatusPermanentRedirect)),
Headers: http.Header{
"Location": []string{redirTo},
},
Close: true,
},
},
}
}
// createAutomationPolicies ensures that automated certificates for this
// app are managed properly. This adds up to two automation policies:
// one for the public names, and one for the internal names. If a catch-all
// automation policy exists, it will be shallow-copied and used as the
// base for the new ones (this is important for preserving behavior the
// user intends to be "defaults").
func (app *App) createAutomationPolicies(ctx caddy.Context, internalNames, tailscaleNames []string) error {
// before we begin, loop through the existing automation policies
// and, for any ACMEIssuers we find, make sure they're filled in
// with default values that might be specified in our HTTP app; also
// look for a base (or "catch-all" / default) automation policy,
// which we're going to essentially require, to make sure it has
// those defaults, too
var basePolicy *caddytls.AutomationPolicy
var foundBasePolicy bool
if app.tlsApp.Automation == nil {
// we will expect this to not be nil from now on
app.tlsApp.Automation = new(caddytls.AutomationConfig)
}
for _, ap := range app.tlsApp.Automation.Policies {
// on-demand policies can have the tailscale manager added implicitly
// if there's no explicit manager configured -- for convenience
if ap.OnDemand && len(ap.Managers) == 0 {
var ts caddytls.Tailscale
if err := ts.Provision(ctx); err != nil {
return err
}
ap.Managers = []certmagic.Manager{ts}
// must reprovision the automation policy so that the underlying
// CertMagic config knows about the updated Managers
if err := ap.Provision(app.tlsApp); err != nil {
return fmt.Errorf("re-provisioning automation policy: %v", err)
}
}
// set up default issuer -- honestly, this is only
// really necessary because the HTTP app is opinionated
// and has settings which could be inferred as new
// defaults for the ACMEIssuer in the TLS app (such as
// what the HTTP and HTTPS ports are)
if ap.Issuers == nil {
var err error
ap.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx)
if err != nil {
return err
}
}
for _, iss := range ap.Issuers {
if acmeIssuer, ok := iss.(acmeCapable); ok {
err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer())
if err != nil {
return err
}
}
}
// while we're here, is this the catch-all/base policy?
if !foundBasePolicy && len(ap.SubjectsRaw) == 0 {
basePolicy = ap
foundBasePolicy = true
}
}
if basePolicy == nil {
// no base policy found; we will make one
basePolicy = new(caddytls.AutomationPolicy)
}
// if the basePolicy has an existing ACMEIssuer (particularly to
// include any type that embeds/wraps an ACMEIssuer), let's use it
// (I guess we just use the first one?), otherwise we'll make one
var baseACMEIssuer *caddytls.ACMEIssuer
for _, iss := range basePolicy.Issuers {
if acmeWrapper, ok := iss.(acmeCapable); ok {
baseACMEIssuer = acmeWrapper.GetACMEIssuer()
break
}
}
if baseACMEIssuer == nil {
// note that this happens if basePolicy.Issuers is empty
// OR if it is not empty but does not have not an ACMEIssuer
baseACMEIssuer = new(caddytls.ACMEIssuer)
}
// if there was a base policy to begin with, we already
// filled in its issuer's defaults; if there wasn't, we
// still need to do that
if !foundBasePolicy {
err := app.fillInACMEIssuer(baseACMEIssuer)
if err != nil {
return err
}
}
// never overwrite any other issuer that might already be configured
if basePolicy.Issuers == nil {
var err error
basePolicy.Issuers, err = caddytls.DefaultIssuersProvisioned(ctx)
if err != nil {
return err
}
for _, iss := range basePolicy.Issuers {
if acmeIssuer, ok := iss.(acmeCapable); ok {
err := app.fillInACMEIssuer(acmeIssuer.GetACMEIssuer())
if err != nil {
return err
}
}
}
}
if !foundBasePolicy {
// there was no base policy to begin with, so add
// our base/catch-all policy - this will serve the
// public-looking names as well as any other names
// that don't match any other policy
err := app.tlsApp.AddAutomationPolicy(basePolicy)
if err != nil {
return err
}
} else {
// a base policy already existed; we might have
// changed it, so re-provision it
err := basePolicy.Provision(app.tlsApp)
if err != nil {
return err
}
}
// public names will be taken care of by the base (catch-all)
// policy, which we've ensured exists if not already specified;
// internal names, however, need to be handled by an internal
// issuer, which we need to make a new policy for, scoped to
// just those names (yes, this logic is a bit asymmetric, but
// it works, because our assumed/natural default issuer is an
// ACME issuer)
if len(internalNames) > 0 {
internalIssuer := new(caddytls.InternalIssuer)
// shallow-copy the base policy; we want to inherit
// from it, not replace it... this takes two lines to
// overrule compiler optimizations
policyCopy := *basePolicy
newPolicy := &policyCopy
// very important to provision the issuer, since we
// are bypassing the JSON-unmarshaling step
if err := internalIssuer.Provision(ctx); err != nil {
return err
}
// this policy should apply only to the given names
// and should use our issuer -- yes, this overrides
// any issuer that may have been set in the base
// policy, but we do this because these names do not
// already have a policy associated with them, which
// is easy to do; consider the case of a Caddyfile
// that has only "localhost" as a name, but sets the
// default/global ACME CA to the Let's Encrypt staging
// endpoint... they probably don't intend to change the
// fundamental set of names that setting applies to,
// rather they just want to change the CA for the set
// of names that would normally use the production API;
// anyway, that gets into the weeds a bit...
newPolicy.SubjectsRaw = internalNames
newPolicy.Issuers = []certmagic.Issuer{internalIssuer}
err := app.tlsApp.AddAutomationPolicy(newPolicy)
if err != nil {
return err
}
}
// tailscale names go in their own automation policies because
// they require on-demand TLS to be enabled, which we obviously
// can't enable for everything
if len(tailscaleNames) > 0 {
policyCopy := *basePolicy
newPolicy := &policyCopy
var ts caddytls.Tailscale
if err := ts.Provision(ctx); err != nil {
return err
}
newPolicy.SubjectsRaw = tailscaleNames
newPolicy.Issuers = nil
newPolicy.Managers = append(newPolicy.Managers, ts)
err := app.tlsApp.AddAutomationPolicy(newPolicy)
if err != nil {
return err
}
}
// we just changed a lot of stuff, so double-check that it's all good
err := app.tlsApp.Validate()
if err != nil {
return err
}
return nil
}
// fillInACMEIssuer fills in default values into acmeIssuer that
// are defined in app; these values at time of writing are just
// app.HTTPPort and app.HTTPSPort, which are used by ACMEIssuer.
// Sure, we could just use the global/CertMagic defaults, but if
// a user has configured those ports in the HTTP app, it makes
// sense to use them in the TLS app too, even if they forgot (or
// were too lazy, like me) to set it in each automation policy
// that uses it -- this just makes things a little less tedious
// for the user, so they don't have to repeat those ports in
// potentially many places. This function never steps on existing
// config values. If any changes are made, acmeIssuer is
// reprovisioned. acmeIssuer must not be nil.
func (app *App) fillInACMEIssuer(acmeIssuer *caddytls.ACMEIssuer) error {
if app.HTTPPort > 0 || app.HTTPSPort > 0 {
if acmeIssuer.Challenges == nil {
acmeIssuer.Challenges = new(caddytls.ChallengesConfig)
}
}
if app.HTTPPort > 0 {
if acmeIssuer.Challenges.HTTP == nil {
acmeIssuer.Challenges.HTTP = new(caddytls.HTTPChallengeConfig)
}
// don't overwrite existing explicit config
if acmeIssuer.Challenges.HTTP.AlternatePort == 0 {
acmeIssuer.Challenges.HTTP.AlternatePort = app.HTTPPort
}
}
if app.HTTPSPort > 0 {
if acmeIssuer.Challenges.TLSALPN == nil {
acmeIssuer.Challenges.TLSALPN = new(caddytls.TLSALPNChallengeConfig)
}
// don't overwrite existing explicit config
if acmeIssuer.Challenges.TLSALPN.AlternatePort == 0 {
acmeIssuer.Challenges.TLSALPN.AlternatePort = app.HTTPSPort
}
}
// we must provision all ACME issuers, even if nothing
// was changed, because we don't know if they are new
// and haven't been provisioned yet; if an ACME issuer
// never gets provisioned, its Agree field stays false,
// which leads to, um, problems later on
return acmeIssuer.Provision(app.ctx)
}
// automaticHTTPSPhase2 begins certificate management for
// all names in the qualifying domain set for each server.
// This phase must occur after provisioning and at the end
// of app start, after all the servers have been started.
// Doing this last ensures that there won't be any race
// for listeners on the HTTP or HTTPS ports when management
// is async (if CertMagic's solvers bind to those ports
// first, then our servers would fail to bind to them,
// which would be bad, since CertMagic's bindings are
// temporary and don't serve the user's sites!).
func (app *App) automaticHTTPSPhase2() error {
if len(app.allCertDomains) == 0 {
return nil
}
app.logger.Info("enabling automatic TLS certificate management",
zap.Strings("domains", app.allCertDomains),
)
err := app.tlsApp.Manage(app.allCertDomains)
if err != nil {
return fmt.Errorf("managing certificates for %v: %s", app.allCertDomains, err)
}
app.allCertDomains = nil // no longer needed; allow GC to deallocate
return nil
}
func isTailscaleDomain(name string) bool {
return strings.HasSuffix(strings.ToLower(name), ".ts.net")
}
type acmeCapable interface{ GetACMEIssuer() *caddytls.ACMEIssuer }
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"bytes"
"encoding/json"
"io"
"net"
"net/http"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(tlsPlaceholderWrapper{})
}
// RequestMatcher is a type that can match to a request.
// A route matcher MUST NOT modify the request, with the
// only exception being its context.
type RequestMatcher interface {
Match(*http.Request) bool
}
// Handler is like http.Handler except ServeHTTP may return an error.
//
// If any handler encounters an error, it should be returned for proper
// handling. Return values should be propagated down the middleware chain
// by returning it unchanged. Returned errors should not be re-wrapped
// if they are already HandlerError values.
type Handler interface {
ServeHTTP(http.ResponseWriter, *http.Request) error
}
// HandlerFunc is a convenience type like http.HandlerFunc.
type HandlerFunc func(http.ResponseWriter, *http.Request) error
// ServeHTTP implements the Handler interface.
func (f HandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
return f(w, r)
}
// Middleware chains one Handler to the next by being passed
// the next Handler in the chain.
type Middleware func(Handler) Handler
// MiddlewareHandler is like Handler except it takes as a third
// argument the next handler in the chain. The next handler will
// never be nil, but may be a no-op handler if this is the last
// handler in the chain. Handlers which act as middleware should
// call the next handler's ServeHTTP method so as to propagate
// the request down the chain properly. Handlers which act as
// responders (content origins) need not invoke the next handler,
// since the last handler in the chain should be the first to
// write the response.
type MiddlewareHandler interface {
ServeHTTP(http.ResponseWriter, *http.Request, Handler) error
}
// emptyHandler is used as a no-op handler.
var emptyHandler Handler = HandlerFunc(func(_ http.ResponseWriter, req *http.Request) error {
SetVar(req.Context(), "unhandled", true)
return nil
})
// An implicit suffix middleware that, if reached, sets the StatusCode to the
// error stored in the ErrorCtxKey. This is to prevent situations where the
// Error chain does not actually handle the error (for instance, it matches only
// on some errors). See #3053
var errorEmptyHandler Handler = HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
httpError := r.Context().Value(ErrorCtxKey)
if handlerError, ok := httpError.(HandlerError); ok {
w.WriteHeader(handlerError.StatusCode)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
return nil
})
// ResponseHandler pairs a response matcher with custom handling
// logic. Either the status code can be changed to something else
// while using the original response body, or, if a status code
// is not set, it can execute a custom route list; this is useful
// for executing handler routes based on the properties of an HTTP
// response that has not been written out to the client yet.
//
// To use this type, provision it at module load time, then when
// ready to use, match the response against its matcher; if it
// matches (or doesn't have a matcher), change the status code on
// the response if configured; otherwise invoke the routes by
// calling `rh.Routes.Compile(next).ServeHTTP(rw, req)` (or similar).
type ResponseHandler struct {
// The response matcher for this handler. If empty/nil,
// it always matches.
Match *ResponseMatcher `json:"match,omitempty"`
// To write the original response body but with a different
// status code, set this field to the desired status code.
// If set, this takes priority over routes.
StatusCode WeakString `json:"status_code,omitempty"`
// The list of HTTP routes to execute if no status code is
// specified. If evaluated, the original response body
// will not be written.
Routes RouteList `json:"routes,omitempty"`
}
// Provision sets up the routes in rh.
func (rh *ResponseHandler) Provision(ctx caddy.Context) error {
if rh.Routes != nil {
err := rh.Routes.Provision(ctx)
if err != nil {
return err
}
}
return nil
}
// WeakString is a type that unmarshals any JSON value
// as a string literal, with the following exceptions:
//
// 1. actual string values are decoded as strings; and
// 2. null is decoded as empty string;
//
// and provides methods for getting the value as various
// primitive types. However, using this type removes any
// type safety as far as deserializing JSON is concerned.
type WeakString string
// UnmarshalJSON satisfies json.Unmarshaler according to
// this type's documentation.
func (ws *WeakString) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return io.EOF
}
if b[0] == byte('"') && b[len(b)-1] == byte('"') {
var s string
err := json.Unmarshal(b, &s)
if err != nil {
return err
}
*ws = WeakString(s)
return nil
}
if bytes.Equal(b, []byte("null")) {
return nil
}
*ws = WeakString(b)
return nil
}
// MarshalJSON marshals was a boolean if true or false,
// a number if an integer, or a string otherwise.
func (ws WeakString) MarshalJSON() ([]byte, error) {
if ws == "true" {
return []byte("true"), nil
}
if ws == "false" {
return []byte("false"), nil
}
if num, err := strconv.Atoi(string(ws)); err == nil {
return json.Marshal(num)
}
return json.Marshal(string(ws))
}
// Int returns ws as an integer. If ws is not an
// integer, 0 is returned.
func (ws WeakString) Int() int {
num, _ := strconv.Atoi(string(ws))
return num
}
// Float64 returns ws as a float64. If ws is not a
// float value, the zero value is returned.
func (ws WeakString) Float64() float64 {
num, _ := strconv.ParseFloat(string(ws), 64)
return num
}
// Bool returns ws as a boolean. If ws is not a
// boolean, false is returned.
func (ws WeakString) Bool() bool {
return string(ws) == "true"
}
// String returns ws as a string.
func (ws WeakString) String() string {
return string(ws)
}
// StatusCodeMatches returns true if a real HTTP status code matches
// the configured status code, which may be either a real HTTP status
// code or an integer representing a class of codes (e.g. 4 for all
// 4xx statuses).
func StatusCodeMatches(actual, configured int) bool {
if actual == configured {
return true
}
if configured < 100 &&
actual >= configured*100 &&
actual < (configured+1)*100 {
return true
}
return false
}
// SanitizedPathJoin performs filepath.Join(root, reqPath) that
// is safe against directory traversal attacks. It uses logic
// similar to that in the Go standard library, specifically
// in the implementation of http.Dir. The root is assumed to
// be a trusted path, but reqPath is not; and the output will
// never be outside of root. The resulting path can be used
// with the local file system. If root is empty, the current
// directory is assumed. If the cleaned request path is deemed
// not local according to lexical processing (i.e. ignoring links),
// it will be rejected as unsafe and only the root will be returned.
func SanitizedPathJoin(root, reqPath string) string {
if root == "" {
root = "."
}
relPath := path.Clean("/" + reqPath)[1:] // clean path and trim the leading /
if relPath != "" && !filepath.IsLocal(relPath) {
// path is unsafe (see https://github.com/golang/go/issues/56336#issuecomment-1416214885)
return root
}
path := filepath.Join(root, filepath.FromSlash(relPath))
// filepath.Join also cleans the path, and cleaning strips
// the trailing slash, so we need to re-add it afterwards.
// if the length is 1, then it's a path to the root,
// and that should return ".", so we don't append the separator.
if strings.HasSuffix(reqPath, "/") && len(reqPath) > 1 {
path += separator
}
return path
}
// CleanPath cleans path p according to path.Clean(), but only
// merges repeated slashes if collapseSlashes is true, and always
// preserves trailing slashes.
func CleanPath(p string, collapseSlashes bool) string {
if collapseSlashes {
return cleanPath(p)
}
// insert an invalid/impossible URI character into each two consecutive
// slashes to expand empty path segments; then clean the path as usual,
// and then remove the remaining temporary characters.
const tmpCh = 0xff
var sb strings.Builder
for i, ch := range p {
if ch == '/' && i > 0 && p[i-1] == '/' {
sb.WriteByte(tmpCh)
}
sb.WriteRune(ch)
}
halfCleaned := cleanPath(sb.String())
halfCleaned = strings.ReplaceAll(halfCleaned, string([]byte{tmpCh}), "")
return halfCleaned
}
// cleanPath does path.Clean(p) but preserves any trailing slash.
func cleanPath(p string) string {
cleaned := path.Clean(p)
if cleaned != "/" && strings.HasSuffix(p, "/") {
cleaned = cleaned + "/"
}
return cleaned
}
// tlsPlaceholderWrapper is a no-op listener wrapper that marks
// where the TLS listener should be in a chain of listener wrappers.
// It should only be used if another listener wrapper must be placed
// in front of the TLS handshake.
type tlsPlaceholderWrapper struct{}
func (tlsPlaceholderWrapper) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.listeners.tls",
New: func() caddy.Module { return new(tlsPlaceholderWrapper) },
}
}
func (tlsPlaceholderWrapper) WrapListener(ln net.Listener) net.Listener { return ln }
func (tlsPlaceholderWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error { return nil }
const (
// DefaultHTTPPort is the default port for HTTP.
DefaultHTTPPort = 80
// DefaultHTTPSPort is the default port for HTTPS.
DefaultHTTPSPort = 443
)
const separator = string(filepath.Separator)
// Interface guard
var (
_ caddy.ListenerWrapper = (*tlsPlaceholderWrapper)(nil)
_ caddyfile.Unmarshaler = (*tlsPlaceholderWrapper)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"crypto/x509/pkix"
"encoding/json"
"errors"
"fmt"
"net/http"
"reflect"
"regexp"
"strings"
"time"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common"
"github.com/google/cel-go/common/ast"
"github.com/google/cel-go/common/operators"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/ext"
"github.com/google/cel-go/interpreter"
"github.com/google/cel-go/interpreter/functions"
"github.com/google/cel-go/parser"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(MatchExpression{})
}
// MatchExpression matches requests by evaluating a
// [CEL](https://github.com/google/cel-spec) expression.
// This enables complex logic to be expressed using a comfortable,
// familiar syntax. Please refer to
// [the standard definitions of CEL functions and operators](https://github.com/google/cel-spec/blob/master/doc/langdef.md#standard-definitions).
//
// This matcher's JSON interface is actually a string, not a struct.
// The generated docs are not correct because this type has custom
// marshaling logic.
//
// COMPATIBILITY NOTE: This module is still experimental and is not
// subject to Caddy's compatibility guarantee.
type MatchExpression struct {
// The CEL expression to evaluate. Any Caddy placeholders
// will be expanded and situated into proper CEL function
// calls before evaluating.
Expr string `json:"expr,omitempty"`
// Name is an optional name for this matcher.
// This is used to populate the name for regexp
// matchers that appear in the expression.
Name string `json:"name,omitempty"`
expandedExpr string
prg cel.Program
ta types.Adapter
log *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (MatchExpression) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.expression",
New: func() caddy.Module { return new(MatchExpression) },
}
}
// MarshalJSON marshals m's expression.
func (m MatchExpression) MarshalJSON() ([]byte, error) {
// if the name is empty, then we can marshal just the expression string
if m.Name == "" {
return json.Marshal(m.Expr)
}
// otherwise, we need to marshal the full object, using an
// anonymous struct to avoid infinite recursion
return json.Marshal(struct {
Expr string `json:"expr"`
Name string `json:"name"`
}{
Expr: m.Expr,
Name: m.Name,
})
}
// UnmarshalJSON unmarshals m's expression.
func (m *MatchExpression) UnmarshalJSON(data []byte) error {
// if the data is a string, then it's just the expression
if data[0] == '"' {
return json.Unmarshal(data, &m.Expr)
}
// otherwise, it's a full object, so unmarshal it,
// using an temp map to avoid infinite recursion
var tmpJson map[string]any
err := json.Unmarshal(data, &tmpJson)
*m = MatchExpression{
Expr: tmpJson["expr"].(string),
Name: tmpJson["name"].(string),
}
return err
}
// Provision sets ups m.
func (m *MatchExpression) Provision(ctx caddy.Context) error {
m.log = ctx.Logger()
// replace placeholders with a function call - this is just some
// light (and possibly naïve) syntactic sugar
m.expandedExpr = placeholderRegexp.ReplaceAllString(m.Expr, placeholderExpansion)
// our type adapter expands CEL's standard type support
m.ta = celTypeAdapter{}
// initialize the CEL libraries from the Matcher implementations which
// have been configured to support CEL.
matcherLibProducers := []CELLibraryProducer{}
for _, info := range caddy.GetModules("http.matchers") {
p, ok := info.New().(CELLibraryProducer)
if ok {
matcherLibProducers = append(matcherLibProducers, p)
}
}
// add the matcher name to the context so that the matcher name
// can be used by regexp matchers being provisioned
ctx = ctx.WithValue(MatcherNameCtxKey, m.Name)
// Assemble the compilation and program options from the different library
// producers into a single cel.Library implementation.
matcherEnvOpts := []cel.EnvOption{}
matcherProgramOpts := []cel.ProgramOption{}
for _, producer := range matcherLibProducers {
l, err := producer.CELLibrary(ctx)
if err != nil {
return fmt.Errorf("error initializing CEL library for %T: %v", producer, err)
}
matcherEnvOpts = append(matcherEnvOpts, l.CompileOptions()...)
matcherProgramOpts = append(matcherProgramOpts, l.ProgramOptions()...)
}
matcherLib := cel.Lib(NewMatcherCELLibrary(matcherEnvOpts, matcherProgramOpts))
// create the CEL environment
env, err := cel.NewEnv(
cel.Function(placeholderFuncName, cel.SingletonBinaryBinding(m.caddyPlaceholderFunc), cel.Overload(
placeholderFuncName+"_httpRequest_string",
[]*cel.Type{httpRequestObjectType, cel.StringType},
cel.AnyType,
)),
cel.Variable("request", httpRequestObjectType),
cel.CustomTypeAdapter(m.ta),
ext.Strings(),
matcherLib,
)
if err != nil {
return fmt.Errorf("setting up CEL environment: %v", err)
}
// parse and type-check the expression
checked, issues := env.Compile(m.expandedExpr)
if issues.Err() != nil {
return fmt.Errorf("compiling CEL program: %s", issues.Err())
}
// request matching is a boolean operation, so we don't really know
// what to do if the expression returns a non-boolean type
if checked.OutputType() != cel.BoolType {
return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.OutputType())
}
// compile the "program"
m.prg, err = env.Program(checked, cel.EvalOptions(cel.OptOptimize))
if err != nil {
return fmt.Errorf("compiling CEL program: %s", err)
}
return nil
}
// Match returns true if r matches m.
func (m MatchExpression) Match(r *http.Request) bool {
celReq := celHTTPRequest{r}
out, _, err := m.prg.Eval(celReq)
if err != nil {
m.log.Error("evaluating expression", zap.Error(err))
SetVar(r.Context(), MatcherErrorVarKey, err)
return false
}
if outBool, ok := out.Value().(bool); ok {
return outBool
}
return false
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchExpression) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume matcher name
// if there's multiple args, then we need to keep the raw
// tokens because the user may have used quotes within their
// CEL expression (e.g. strings) and we should retain that
if d.CountRemainingArgs() > 1 {
m.Expr = strings.Join(d.RemainingArgsRaw(), " ")
return nil
}
// there should at least be one arg
if !d.NextArg() {
return d.ArgErr()
}
// if there's only one token, then we can safely grab the
// cleaned token (no quotes) and use that as the expression
// because there's no valid CEL expression that is only a
// quoted string; commonly quotes are used in Caddyfile to
// define the expression
m.Expr = d.Val()
// use the named matcher's name, to fill regexp
// matchers names by default
m.Name = d.GetContextString(caddyfile.MatcherNameCtxKey)
return nil
}
// caddyPlaceholderFunc implements the custom CEL function that accesses the
// Replacer on a request and gets values from it.
func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val {
celReq, ok := lhs.(celHTTPRequest)
if !ok {
return types.NewErr(
"invalid request of type '%v' to %s(request, placeholderVarName)",
lhs.Type(),
placeholderFuncName,
)
}
phStr, ok := rhs.(types.String)
if !ok {
return types.NewErr(
"invalid placeholder variable name of type '%v' to %s(request, placeholderVarName)",
rhs.Type(),
placeholderFuncName,
)
}
repl := celReq.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
val, _ := repl.Get(string(phStr))
return m.ta.NativeToValue(val)
}
// httpRequestCELType is the type representation of a native HTTP request.
var httpRequestCELType = cel.ObjectType("http.Request", traits.ReceiverType)
// celHTTPRequest wraps an http.Request with ref.Val interface methods.
//
// This type also implements the interpreter.Activation interface which
// drops allocation costs for CEL expression evaluations by roughly half.
type celHTTPRequest struct{ *http.Request }
func (cr celHTTPRequest) ResolveName(name string) (any, bool) {
if name == "request" {
return cr, true
}
return nil, false
}
func (cr celHTTPRequest) Parent() interpreter.Activation {
return nil
}
func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (any, error) {
return cr.Request, nil
}
func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val {
panic("not implemented")
}
func (cr celHTTPRequest) Equal(other ref.Val) ref.Val {
if o, ok := other.Value().(celHTTPRequest); ok {
return types.Bool(o.Request == cr.Request)
}
return types.ValOrErr(other, "%v is not comparable type", other)
}
func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
func (cr celHTTPRequest) Value() any { return cr }
var pkixNameCELType = cel.ObjectType("pkix.Name", traits.ReceiverType)
// celPkixName wraps an pkix.Name with
// methods to satisfy the ref.Val interface.
type celPkixName struct{ *pkix.Name }
func (pn celPkixName) ConvertToNative(typeDesc reflect.Type) (any, error) {
return pn.Name, nil
}
func (pn celPkixName) ConvertToType(typeVal ref.Type) ref.Val {
if typeVal.TypeName() == "string" {
return types.String(pn.Name.String())
}
panic("not implemented")
}
func (pn celPkixName) Equal(other ref.Val) ref.Val {
if o, ok := other.Value().(string); ok {
return types.Bool(pn.Name.String() == o)
}
return types.ValOrErr(other, "%v is not comparable type", other)
}
func (celPkixName) Type() ref.Type { return pkixNameCELType }
func (pn celPkixName) Value() any { return pn }
// celTypeAdapter can adapt our custom types to a CEL value.
type celTypeAdapter struct{}
func (celTypeAdapter) NativeToValue(value any) ref.Val {
switch v := value.(type) {
case celHTTPRequest:
return v
case pkix.Name:
return celPkixName{&v}
case time.Time:
return types.Timestamp{Time: v}
case error:
types.NewErr(v.Error())
}
return types.DefaultTypeAdapter.NativeToValue(value)
}
// CELLibraryProducer provide CEL libraries that expose a Matcher
// implementation as a first class function within the CEL expression
// matcher.
type CELLibraryProducer interface {
// CELLibrary creates a cel.Library which makes it possible to use the
// target object within CEL expression matchers.
CELLibrary(caddy.Context) (cel.Library, error)
}
// CELMatcherImpl creates a new cel.Library based on the following pieces of
// data:
//
// - macroName: the function name to be used within CEL. This will be a macro
// and not a function proper.
// - funcName: the function overload name generated by the CEL macro used to
// represent the matcher.
// - matcherDataTypes: the argument types to the macro.
// - fac: a matcherFactory implementation which converts from CEL constant
// values to a Matcher instance.
//
// Note, macro names and function names must not collide with other macros or
// functions exposed within CEL expressions, or an error will be produced
// during the expression matcher plan time.
//
// The existing CELMatcherImpl support methods are configured to support a
// limited set of function signatures. For strong type validation you may need
// to provide a custom macro which does a more detailed analysis of the CEL
// literal provided to the macro as an argument.
func CELMatcherImpl(macroName, funcName string, matcherDataTypes []*cel.Type, fac CELMatcherFactory) (cel.Library, error) {
requestType := cel.ObjectType("http.Request")
var macro parser.Macro
switch len(matcherDataTypes) {
case 1:
matcherDataType := matcherDataTypes[0]
switch matcherDataType.String() {
case "list(string)":
macro = parser.NewGlobalVarArgMacro(macroName, celMatcherStringListMacroExpander(funcName))
case cel.StringType.String():
macro = parser.NewGlobalMacro(macroName, 1, celMatcherStringMacroExpander(funcName))
case CELTypeJSON.String():
macro = parser.NewGlobalMacro(macroName, 1, celMatcherJSONMacroExpander(funcName))
default:
return nil, fmt.Errorf("unsupported matcher data type: %s", matcherDataType)
}
case 2:
if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType {
macro = parser.NewGlobalMacro(macroName, 2, celMatcherStringListMacroExpander(funcName))
matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)}
} else {
return nil, fmt.Errorf("unsupported matcher data type: %s, %s", matcherDataTypes[0], matcherDataTypes[1])
}
case 3:
if matcherDataTypes[0] == cel.StringType && matcherDataTypes[1] == cel.StringType && matcherDataTypes[2] == cel.StringType {
macro = parser.NewGlobalMacro(macroName, 3, celMatcherStringListMacroExpander(funcName))
matcherDataTypes = []*cel.Type{cel.ListType(cel.StringType)}
} else {
return nil, fmt.Errorf("unsupported matcher data type: %s, %s, %s", matcherDataTypes[0], matcherDataTypes[1], matcherDataTypes[2])
}
}
envOptions := []cel.EnvOption{
cel.Macros(macro),
cel.Function(funcName,
cel.Overload(funcName, append([]*cel.Type{requestType}, matcherDataTypes...), cel.BoolType),
cel.SingletonBinaryBinding(CELMatcherRuntimeFunction(funcName, fac))),
}
programOptions := []cel.ProgramOption{
cel.CustomDecorator(CELMatcherDecorator(funcName, fac)),
}
return NewMatcherCELLibrary(envOptions, programOptions), nil
}
// CELMatcherFactory converts a constant CEL value into a RequestMatcher.
type CELMatcherFactory func(data ref.Val) (RequestMatcher, error)
// matcherCELLibrary is a simplistic configurable cel.Library implementation.
type matcherCELLibrary struct {
envOptions []cel.EnvOption
programOptions []cel.ProgramOption
}
// NewMatcherCELLibrary creates a matcherLibrary from option setes.
func NewMatcherCELLibrary(envOptions []cel.EnvOption, programOptions []cel.ProgramOption) cel.Library {
return &matcherCELLibrary{
envOptions: envOptions,
programOptions: programOptions,
}
}
func (lib *matcherCELLibrary) CompileOptions() []cel.EnvOption {
return lib.envOptions
}
func (lib *matcherCELLibrary) ProgramOptions() []cel.ProgramOption {
return lib.programOptions
}
// CELMatcherDecorator matches a call overload generated by a CEL macro
// that takes a single argument, and optimizes the implementation to precompile
// the matcher and return a function that references the precompiled and
// provisioned matcher.
func CELMatcherDecorator(funcName string, fac CELMatcherFactory) interpreter.InterpretableDecorator {
return func(i interpreter.Interpretable) (interpreter.Interpretable, error) {
call, ok := i.(interpreter.InterpretableCall)
if !ok {
return i, nil
}
if call.OverloadID() != funcName {
return i, nil
}
callArgs := call.Args()
reqAttr, ok := callArgs[0].(interpreter.InterpretableAttribute)
if !ok {
return nil, errors.New("missing 'request' argument")
}
nsAttr, ok := reqAttr.Attr().(interpreter.NamespacedAttribute)
if !ok {
return nil, errors.New("missing 'request' argument")
}
varNames := nsAttr.CandidateVariableNames()
if len(varNames) != 1 || len(varNames) == 1 && varNames[0] != "request" {
return nil, errors.New("missing 'request' argument")
}
matcherData, ok := callArgs[1].(interpreter.InterpretableConst)
if !ok {
// If the matcher arguments are not constant, then this means
// they contain a Caddy placeholder reference and the evaluation
// and matcher provisioning should be handled at dynamically.
return i, nil
}
matcher, err := fac(matcherData.Value())
if err != nil {
return nil, err
}
return interpreter.NewCall(
i.ID(), funcName, funcName+"_opt",
[]interpreter.Interpretable{reqAttr},
func(args ...ref.Val) ref.Val {
// The request value, guaranteed to be of type celHTTPRequest
celReq := args[0]
// If needed this call could be changed to convert the value
// to a *http.Request using CEL's ConvertToNative method.
httpReq := celReq.Value().(celHTTPRequest)
return types.Bool(matcher.Match(httpReq.Request))
},
), nil
}
}
// CELMatcherRuntimeFunction creates a function binding for when the input to the matcher
// is dynamically resolved rather than a set of static constant values.
func CELMatcherRuntimeFunction(funcName string, fac CELMatcherFactory) functions.BinaryOp {
return func(celReq, matcherData ref.Val) ref.Val {
matcher, err := fac(matcherData)
if err != nil {
return types.NewErr(err.Error())
}
httpReq := celReq.Value().(celHTTPRequest)
return types.Bool(matcher.Match(httpReq.Request))
}
}
// celMatcherStringListMacroExpander validates that the macro is called
// with a variable number of string arguments (at least one).
//
// The arguments are collected into a single list argument the following
// function call returned: <funcName>(request, [args])
func celMatcherStringListMacroExpander(funcName string) cel.MacroFactory {
return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
matchArgs := []ast.Expr{}
if len(args) == 0 {
return nil, eh.NewError(0, "matcher requires at least one argument")
}
for _, arg := range args {
if isCELStringExpr(arg) {
matchArgs = append(matchArgs, arg)
} else {
return nil, eh.NewError(arg.ID(), "matcher arguments must be string constants")
}
}
return eh.NewCall(funcName, eh.NewIdent("request"), eh.NewList(matchArgs...)), nil
}
}
// celMatcherStringMacroExpander validates that the macro is called a single
// string argument.
//
// The following function call is returned: <funcName>(request, arg)
func celMatcherStringMacroExpander(funcName string) parser.MacroExpander {
return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
if len(args) != 1 {
return nil, eh.NewError(0, "matcher requires one argument")
}
if isCELStringExpr(args[0]) {
return eh.NewCall(funcName, eh.NewIdent("request"), args[0]), nil
}
return nil, eh.NewError(args[0].ID(), "matcher argument must be a string literal")
}
}
// celMatcherJSONMacroExpander validates that the macro is called a single
// map literal argument.
//
// The following function call is returned: <funcName>(request, arg)
func celMatcherJSONMacroExpander(funcName string) parser.MacroExpander {
return func(eh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.Expr, *common.Error) {
if len(args) != 1 {
return nil, eh.NewError(0, "matcher requires a map literal argument")
}
arg := args[0]
switch arg.Kind() {
case ast.StructKind:
return nil, eh.NewError(arg.ID(),
fmt.Sprintf("matcher input must be a map literal, not a %s", arg.AsStruct().TypeName()))
case ast.MapKind:
mapExpr := arg.AsMap()
for _, entry := range mapExpr.Entries() {
isStringPlaceholder := isCELStringExpr(entry.AsMapEntry().Key())
if !isStringPlaceholder {
return nil, eh.NewError(entry.ID(), "matcher map keys must be string literals")
}
isStringListPlaceholder := isCELStringExpr(entry.AsMapEntry().Value()) ||
isCELStringListLiteral(entry.AsMapEntry().Value())
if !isStringListPlaceholder {
return nil, eh.NewError(entry.AsMapEntry().Value().ID(), "matcher map values must be string or list literals")
}
}
return eh.NewCall(funcName, eh.NewIdent("request"), arg), nil
case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.SelectKind:
// appeasing the linter :)
}
return nil, eh.NewError(arg.ID(), "matcher requires a map literal argument")
}
}
// CELValueToMapStrList converts a CEL value to a map[string][]string
//
// Earlier validation stages should guarantee that the value has this type
// at compile time, and that the runtime value type is map[string]any.
// The reason for the slight difference in value type is that CEL allows for
// map literals containing heterogeneous values, in this case string and list
// of string.
func CELValueToMapStrList(data ref.Val) (map[string][]string, error) {
mapStrType := reflect.TypeOf(map[string]any{})
mapStrRaw, err := data.ConvertToNative(mapStrType)
if err != nil {
return nil, err
}
mapStrIface := mapStrRaw.(map[string]any)
mapStrListStr := make(map[string][]string, len(mapStrIface))
for k, v := range mapStrIface {
switch val := v.(type) {
case string:
mapStrListStr[k] = []string{val}
case types.String:
mapStrListStr[k] = []string{string(val)}
case []string:
mapStrListStr[k] = val
case []ref.Val:
convVals := make([]string, len(val))
for i, elem := range val {
strVal, ok := elem.(types.String)
if !ok {
return nil, fmt.Errorf("unsupported value type in header match: %T", val)
}
convVals[i] = string(strVal)
}
mapStrListStr[k] = convVals
default:
return nil, fmt.Errorf("unsupported value type in header match: %T", val)
}
}
return mapStrListStr, nil
}
// isCELStringExpr indicates whether the expression is a supported string expression
func isCELStringExpr(e ast.Expr) bool {
return isCELStringLiteral(e) || isCELCaddyPlaceholderCall(e) || isCELConcatCall(e)
}
// isCELStringLiteral returns whether the expression is a CEL string literal.
func isCELStringLiteral(e ast.Expr) bool {
switch e.Kind() {
case ast.LiteralKind:
constant := e.AsLiteral()
switch constant.Type() {
case types.StringType:
return true
}
case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.MapKind, ast.SelectKind, ast.StructKind:
// appeasing the linter :)
}
return false
}
// isCELCaddyPlaceholderCall returns whether the expression is a caddy placeholder call.
func isCELCaddyPlaceholderCall(e ast.Expr) bool {
switch e.Kind() {
case ast.CallKind:
call := e.AsCall()
if call.FunctionName() == "caddyPlaceholder" {
return true
}
case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
// appeasing the linter :)
}
return false
}
// isCELConcatCall tests whether the expression is a concat function (+) with string, placeholder, or
// other concat call arguments.
func isCELConcatCall(e ast.Expr) bool {
switch e.Kind() {
case ast.CallKind:
call := e.AsCall()
if call.Target().Kind() != ast.UnspecifiedExprKind {
return false
}
if call.FunctionName() != operators.Add {
return false
}
for _, arg := range call.Args() {
if !isCELStringExpr(arg) {
return false
}
}
return true
case ast.UnspecifiedExprKind, ast.ComprehensionKind, ast.IdentKind, ast.ListKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
// appeasing the linter :)
}
return false
}
// isCELStringListLiteral returns whether the expression resolves to a list literal
// containing only string constants or a placeholder call.
func isCELStringListLiteral(e ast.Expr) bool {
switch e.Kind() {
case ast.ListKind:
list := e.AsList()
for _, elem := range list.Elements() {
if !isCELStringExpr(elem) {
return false
}
}
return true
case ast.UnspecifiedExprKind, ast.CallKind, ast.ComprehensionKind, ast.IdentKind, ast.LiteralKind, ast.MapKind, ast.SelectKind, ast.StructKind:
// appeasing the linter :)
}
return false
}
// Variables used for replacing Caddy placeholders in CEL
// expressions with a proper CEL function call; this is
// just for syntactic sugar.
var (
placeholderRegexp = regexp.MustCompile(`{([a-zA-Z][\w.-]+)}`)
placeholderExpansion = `caddyPlaceholder(request, "${1}")`
CELTypeJSON = cel.MapType(cel.StringType, cel.DynType)
)
var httpRequestObjectType = cel.ObjectType("http.Request")
// The name of the CEL function which accesses Replacer values.
const placeholderFuncName = "caddyPlaceholder"
const MatcherNameCtxKey = "matcher_name"
// Interface guards
var (
_ caddy.Provisioner = (*MatchExpression)(nil)
_ RequestMatcher = (*MatchExpression)(nil)
_ caddyfile.Unmarshaler = (*MatchExpression)(nil)
_ json.Marshaler = (*MatchExpression)(nil)
_ json.Unmarshaler = (*MatchExpression)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"errors"
"fmt"
weakrand "math/rand"
"path"
"runtime"
"strings"
"github.com/caddyserver/caddy/v2"
)
// Error is a convenient way for a Handler to populate the
// essential fields of a HandlerError. If err is itself a
// HandlerError, then any essential fields that are not
// set will be populated.
func Error(statusCode int, err error) HandlerError {
const idLen = 9
var he HandlerError
if errors.As(err, &he) {
if he.ID == "" {
he.ID = randString(idLen, true)
}
if he.Trace == "" {
he.Trace = trace()
}
if he.StatusCode == 0 {
he.StatusCode = statusCode
}
return he
}
return HandlerError{
ID: randString(idLen, true),
StatusCode: statusCode,
Err: err,
Trace: trace(),
}
}
// HandlerError is a serializable representation of
// an error from within an HTTP handler.
type HandlerError struct {
Err error // the original error value and message
StatusCode int // the HTTP status code to associate with this error
ID string // generated; for identifying this error in logs
Trace string // produced from call stack
}
func (e HandlerError) Error() string {
var s string
if e.ID != "" {
s += fmt.Sprintf("{id=%s}", e.ID)
}
if e.Trace != "" {
s += " " + e.Trace
}
if e.StatusCode != 0 {
s += fmt.Sprintf(": HTTP %d", e.StatusCode)
}
if e.Err != nil {
s += ": " + e.Err.Error()
}
return strings.TrimSpace(s)
}
// Unwrap returns the underlying error value. See the `errors` package for info.
func (e HandlerError) Unwrap() error { return e.Err }
// randString returns a string of n random characters.
// It is not even remotely secure OR a proper distribution.
// But it's good enough for some things. It excludes certain
// confusing characters like I, l, 1, 0, O, etc. If sameCase
// is true, then uppercase letters are excluded.
func randString(n int, sameCase bool) string {
if n <= 0 {
return ""
}
dict := []byte("abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRTUVWXY23456789")
if sameCase {
dict = []byte("abcdefghijkmnpqrstuvwxyz0123456789")
}
b := make([]byte, n)
for i := range b {
//nolint:gosec
b[i] = dict[weakrand.Int63()%int64(len(dict))]
}
return string(b)
}
func trace() string {
if pc, file, line, ok := runtime.Caller(2); ok {
filename := path.Base(file)
pkgAndFuncName := path.Base(runtime.FuncForPC(pc).Name())
return fmt.Sprintf("%s (%s:%d)", pkgAndFuncName, filename, line)
}
return ""
}
// ErrorCtxKey is the context key to use when storing
// an error (for use with context.Context).
const ErrorCtxKey = caddy.CtxKey("handler_chain_error")
package caddyhttp
import (
"context"
"crypto/tls"
weakrand "math/rand"
"net"
"net/http"
"sync/atomic"
"time"
"golang.org/x/net/http2"
)
// http2Listener wraps the listener to solve the following problems:
// 1. server h2 natively without using h2c hack when listener handles tls connection but
// don't return *tls.Conn
// 2. graceful shutdown. the shutdown logic is copied from stdlib http.Server, it's an extra maintenance burden but
// whatever, the shutdown logic maybe extracted to be used with h2c graceful shutdown. http2.Server supports graceful shutdown
// sending GO_AWAY frame to connected clients, but doesn't track connection status. It requires explicit call of http2.ConfigureServer
type http2Listener struct {
cnt uint64
net.Listener
server *http.Server
h2server *http2.Server
}
type connectionStateConn interface {
net.Conn
ConnectionState() tls.ConnectionState
}
func (h *http2Listener) Accept() (net.Conn, error) {
for {
conn, err := h.Listener.Accept()
if err != nil {
return nil, err
}
if csc, ok := conn.(connectionStateConn); ok {
// *tls.Conn will return empty string because it's only populated after handshake is complete
if csc.ConnectionState().NegotiatedProtocol == http2.NextProtoTLS {
go h.serveHttp2(csc)
continue
}
}
return conn, nil
}
}
func (h *http2Listener) serveHttp2(csc connectionStateConn) {
atomic.AddUint64(&h.cnt, 1)
h.runHook(csc, http.StateNew)
defer func() {
csc.Close()
atomic.AddUint64(&h.cnt, ^uint64(0))
h.runHook(csc, http.StateClosed)
}()
h.h2server.ServeConn(csc, &http2.ServeConnOpts{
Context: h.server.ConnContext(context.Background(), csc),
BaseConfig: h.server,
Handler: h.server.Handler,
})
}
const shutdownPollIntervalMax = 500 * time.Millisecond
func (h *http2Listener) Shutdown(ctx context.Context) error {
pollIntervalBase := time.Millisecond
nextPollInterval := func() time.Duration {
// Add 10% jitter.
//nolint:gosec
interval := pollIntervalBase + time.Duration(weakrand.Intn(int(pollIntervalBase/10)))
// Double and clamp for next time.
pollIntervalBase *= 2
if pollIntervalBase > shutdownPollIntervalMax {
pollIntervalBase = shutdownPollIntervalMax
}
return interval
}
timer := time.NewTimer(nextPollInterval())
defer timer.Stop()
for {
if atomic.LoadUint64(&h.cnt) == 0 {
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case <-timer.C:
timer.Reset(nextPollInterval())
}
}
}
func (h *http2Listener) runHook(conn net.Conn, state http.ConnState) {
if h.server.ConnState != nil {
h.server.ConnState(conn, state)
}
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"bufio"
"bytes"
"fmt"
"io"
"net"
"net/http"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(HTTPRedirectListenerWrapper{})
}
// HTTPRedirectListenerWrapper provides HTTP->HTTPS redirects for
// connections that come on the TLS port as an HTTP request,
// by detecting using the first few bytes that it's not a TLS
// handshake, but instead an HTTP request.
//
// This is especially useful when using a non-standard HTTPS port.
// A user may simply type the address in their browser without the
// https:// scheme, which would cause the browser to attempt the
// connection over HTTP, but this would cause a "Client sent an
// HTTP request to an HTTPS server" error response.
//
// This listener wrapper must be placed BEFORE the "tls" listener
// wrapper, for it to work properly.
type HTTPRedirectListenerWrapper struct {
// MaxHeaderBytes is the maximum size to parse from a client's
// HTTP request headers. Default: 1 MB
MaxHeaderBytes int64 `json:"max_header_bytes,omitempty"`
}
func (HTTPRedirectListenerWrapper) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.listeners.http_redirect",
New: func() caddy.Module { return new(HTTPRedirectListenerWrapper) },
}
}
func (h *HTTPRedirectListenerWrapper) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
func (h *HTTPRedirectListenerWrapper) WrapListener(l net.Listener) net.Listener {
return &httpRedirectListener{l, h.MaxHeaderBytes}
}
// httpRedirectListener is listener that checks the first few bytes
// of the request when the server is intended to accept HTTPS requests,
// to respond to an HTTP request with a redirect.
type httpRedirectListener struct {
net.Listener
maxHeaderBytes int64
}
// Accept waits for and returns the next connection to the listener,
// wrapping it with a httpRedirectConn.
func (l *httpRedirectListener) Accept() (net.Conn, error) {
c, err := l.Listener.Accept()
if err != nil {
return nil, err
}
maxHeaderBytes := l.maxHeaderBytes
if maxHeaderBytes == 0 {
maxHeaderBytes = 1024 * 1024
}
return &httpRedirectConn{
Conn: c,
limit: maxHeaderBytes,
r: bufio.NewReader(c),
}, nil
}
type httpRedirectConn struct {
net.Conn
once bool
limit int64
r *bufio.Reader
}
// Read tries to peek at the first few bytes of the request, and if we get
// an error reading the headers, and that error was due to the bytes looking
// like an HTTP request, then we perform a HTTP->HTTPS redirect on the same
// port as the original connection.
func (c *httpRedirectConn) Read(p []byte) (int, error) {
if c.once {
return c.r.Read(p)
}
// no need to use sync.Once - net.Conn is not read from concurrently.
c.once = true
firstBytes, err := c.r.Peek(5)
if err != nil {
return 0, err
}
// If the request doesn't look like HTTP, then it's probably
// TLS bytes, and we don't need to do anything.
if !firstBytesLookLikeHTTP(firstBytes) {
return c.r.Read(p)
}
// From now on, we can be almost certain the request is HTTP.
// The returned error will be non nil and caller are expected to
// close the connection.
// Set the read limit, io.MultiReader is needed because
// when resetting, *bufio.Reader discards buffered data.
buffered, _ := c.r.Peek(c.r.Buffered())
mr := io.MultiReader(bytes.NewReader(buffered), c.Conn)
c.r.Reset(io.LimitReader(mr, c.limit))
// Parse the HTTP request, so we can get the Host and URL to redirect to.
req, err := http.ReadRequest(c.r)
if err != nil {
return 0, fmt.Errorf("couldn't read HTTP request")
}
// Build the redirect response, using the same Host and URL,
// but replacing the scheme with https.
headers := make(http.Header)
headers.Add("Location", "https://"+req.Host+req.URL.String())
resp := &http.Response{
Proto: "HTTP/1.0",
Status: "308 Permanent Redirect",
StatusCode: 308,
ProtoMajor: 1,
ProtoMinor: 0,
Header: headers,
}
err = resp.Write(c.Conn)
if err != nil {
return 0, fmt.Errorf("couldn't write HTTP->HTTPS redirect")
}
return 0, fmt.Errorf("redirected HTTP request on HTTPS port")
}
// firstBytesLookLikeHTTP reports whether a TLS record header
// looks like it might've been a misdirected plaintext HTTP request.
func firstBytesLookLikeHTTP(hdr []byte) bool {
switch string(hdr[:5]) {
case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO":
return true
}
return false
}
var (
_ caddy.ListenerWrapper = (*HTTPRedirectListenerWrapper)(nil)
_ caddyfile.Unmarshaler = (*HTTPRedirectListenerWrapper)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"fmt"
"net/http"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(Invoke{})
}
// Invoke implements a handler that compiles and executes a
// named route that was defined on the server.
//
// EXPERIMENTAL: Subject to change or removal.
type Invoke struct {
// Name is the key of the named route to execute
Name string `json:"name,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (Invoke) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.invoke",
New: func() caddy.Module { return new(Invoke) },
}
}
func (invoke *Invoke) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
server := r.Context().Value(ServerCtxKey).(*Server)
if route, ok := server.NamedRoutes[invoke.Name]; ok {
return route.Compile(next).ServeHTTP(w, r)
}
return fmt.Errorf("invoke: route '%s' not found", invoke.Name)
}
// Interface guards
var (
_ MiddlewareHandler = (*Invoke)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"errors"
"fmt"
"net"
"net/http"
"net/netip"
"reflect"
"strings"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types/ref"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
// MatchRemoteIP matches requests by the remote IP address,
// i.e. the IP address of the direct connection to Caddy.
type MatchRemoteIP struct {
// The IPs or CIDR ranges to match.
Ranges []string `json:"ranges,omitempty"`
// cidrs and zones vars should aligned always in the same
// length and indexes for matching later
cidrs []*netip.Prefix
zones []string
logger *zap.Logger
}
// MatchClientIP matches requests by the client IP address,
// i.e. the resolved address, considering trusted proxies.
type MatchClientIP struct {
// The IPs or CIDR ranges to match.
Ranges []string `json:"ranges,omitempty"`
// cidrs and zones vars should aligned always in the same
// length and indexes for matching later
cidrs []*netip.Prefix
zones []string
logger *zap.Logger
}
func init() {
caddy.RegisterModule(MatchRemoteIP{})
caddy.RegisterModule(MatchClientIP{})
}
// CaddyModule returns the Caddy module information.
func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.remote_ip",
New: func() caddy.Module { return new(MatchRemoteIP) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchRemoteIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
for d.NextArg() {
if d.Val() == "forwarded" {
return d.Err("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead")
}
if d.Val() == "private_ranges" {
m.Ranges = append(m.Ranges, PrivateRangesCIDR()...)
continue
}
m.Ranges = append(m.Ranges, d.Val())
}
if d.NextBlock(0) {
return d.Err("malformed remote_ip matcher: blocks are not supported")
}
}
return nil
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression remote_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
func (MatchRemoteIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
// name of the macro, this is the function name that users see when writing expressions.
"remote_ip",
// name of the function that the macro will be rewritten to call.
"remote_ip_match_request_list",
// internal data type of the MatchPath value.
[]*cel.Type{cel.ListType(cel.StringType)},
// function to convert a constant list of strings to a MatchPath instance.
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
strList, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
m := MatchRemoteIP{}
for _, input := range strList.([]string) {
if input == "forwarded" {
return nil, errors.New("the 'forwarded' option is no longer supported; use the 'client_ip' matcher instead")
}
m.Ranges = append(m.Ranges, input)
}
err = m.Provision(ctx)
return m, err
},
)
}
// Provision parses m's IP ranges, either from IP or CIDR expressions.
func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
m.logger = ctx.Logger()
cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges)
if err != nil {
return err
}
m.cidrs = cidrs
m.zones = zones
return nil
}
// Match returns true if r matches m.
func (m MatchRemoteIP) Match(r *http.Request) bool {
if r.TLS != nil && !r.TLS.HandshakeComplete {
return false // if handshake is not finished, we infer 0-RTT that has not verified remote IP; could be spoofed
}
address := r.RemoteAddr
clientIP, zoneID, err := parseIPZoneFromString(address)
if err != nil {
m.logger.Error("getting remote IP", zap.Error(err))
return false
}
matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones)
if !matches && !zoneFilter {
m.logger.Debug("zone ID from remote IP did not match", zap.String("zone", zoneID))
}
return matches
}
// CaddyModule returns the Caddy module information.
func (MatchClientIP) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.client_ip",
New: func() caddy.Module { return new(MatchClientIP) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchClientIP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
for d.NextArg() {
if d.Val() == "private_ranges" {
m.Ranges = append(m.Ranges, PrivateRangesCIDR()...)
continue
}
m.Ranges = append(m.Ranges, d.Val())
}
if d.NextBlock(0) {
return d.Err("malformed client_ip matcher: blocks are not supported")
}
}
return nil
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression client_ip('192.168.0.0/16', '172.16.0.0/12', '10.0.0.0/8')
func (MatchClientIP) CELLibrary(ctx caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
// name of the macro, this is the function name that users see when writing expressions.
"client_ip",
// name of the function that the macro will be rewritten to call.
"client_ip_match_request_list",
// internal data type of the MatchPath value.
[]*cel.Type{cel.ListType(cel.StringType)},
// function to convert a constant list of strings to a MatchPath instance.
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
strList, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
m := MatchClientIP{
Ranges: strList.([]string),
}
err = m.Provision(ctx)
return m, err
},
)
}
// Provision parses m's IP ranges, either from IP or CIDR expressions.
func (m *MatchClientIP) Provision(ctx caddy.Context) error {
m.logger = ctx.Logger()
cidrs, zones, err := provisionCidrsZonesFromRanges(m.Ranges)
if err != nil {
return err
}
m.cidrs = cidrs
m.zones = zones
return nil
}
// Match returns true if r matches m.
func (m MatchClientIP) Match(r *http.Request) bool {
if r.TLS != nil && !r.TLS.HandshakeComplete {
return false // if handshake is not finished, we infer 0-RTT that has not verified remote IP; could be spoofed
}
address := GetVar(r.Context(), ClientIPVarKey).(string)
clientIP, zoneID, err := parseIPZoneFromString(address)
if err != nil {
m.logger.Error("getting client IP", zap.Error(err))
return false
}
matches, zoneFilter := matchIPByCidrZones(clientIP, zoneID, m.cidrs, m.zones)
if !matches && !zoneFilter {
m.logger.Debug("zone ID from client IP did not match", zap.String("zone", zoneID))
}
return matches
}
func provisionCidrsZonesFromRanges(ranges []string) ([]*netip.Prefix, []string, error) {
cidrs := []*netip.Prefix{}
zones := []string{}
for _, str := range ranges {
// Exclude the zone_id from the IP
if strings.Contains(str, "%") {
split := strings.Split(str, "%")
str = split[0]
// write zone identifiers in m.zones for matching later
zones = append(zones, split[1])
} else {
zones = append(zones, "")
}
if strings.Contains(str, "/") {
ipNet, err := netip.ParsePrefix(str)
if err != nil {
return nil, nil, fmt.Errorf("parsing CIDR expression '%s': %v", str, err)
}
cidrs = append(cidrs, &ipNet)
} else {
ipAddr, err := netip.ParseAddr(str)
if err != nil {
return nil, nil, fmt.Errorf("invalid IP address: '%s': %v", str, err)
}
ipNew := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
cidrs = append(cidrs, &ipNew)
}
}
return cidrs, zones, nil
}
func parseIPZoneFromString(address string) (netip.Addr, string, error) {
ipStr, _, err := net.SplitHostPort(address)
if err != nil {
ipStr = address // OK; probably didn't have a port
}
// Some IPv6-Addresses can contain zone identifiers at the end,
// which are separated with "%"
zoneID := ""
if strings.Contains(ipStr, "%") {
split := strings.Split(ipStr, "%")
ipStr = split[0]
zoneID = split[1]
}
ipAddr, err := netip.ParseAddr(ipStr)
if err != nil {
return netip.IPv4Unspecified(), "", err
}
return ipAddr, zoneID, nil
}
func matchIPByCidrZones(clientIP netip.Addr, zoneID string, cidrs []*netip.Prefix, zones []string) (bool, bool) {
zoneFilter := true
for i, ipRange := range cidrs {
if ipRange.Contains(clientIP) {
// Check if there are zone filters assigned and if they match.
if zones[i] == "" || zoneID == zones[i] {
return true, false
}
zoneFilter = false
}
}
return false, zoneFilter
}
// Interface guards
var (
_ RequestMatcher = (*MatchRemoteIP)(nil)
_ caddy.Provisioner = (*MatchRemoteIP)(nil)
_ caddyfile.Unmarshaler = (*MatchRemoteIP)(nil)
_ CELLibraryProducer = (*MatchRemoteIP)(nil)
_ RequestMatcher = (*MatchClientIP)(nil)
_ caddy.Provisioner = (*MatchClientIP)(nil)
_ caddyfile.Unmarshaler = (*MatchClientIP)(nil)
_ CELLibraryProducer = (*MatchClientIP)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"fmt"
"net/http"
"net/netip"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(StaticIPRange{})
}
// IPRangeSource gets a list of IP ranges.
//
// The request is passed as an argument to allow plugin implementations
// to have more flexibility. But, a plugin MUST NOT modify the request.
// The caller will have read the `r.RemoteAddr` before getting IP ranges.
//
// This should be a very fast function -- instant if possible.
// The list of IP ranges should be sourced as soon as possible if loaded
// from an external source (i.e. initially loaded during Provisioning),
// so that it's ready to be used when requests start getting handled.
// A read lock should probably be used to get the cached value if the
// ranges can change at runtime (e.g. periodically refreshed).
// Using a `caddy.UsagePool` may be a good idea to avoid having refetch
// the values when a config reload occurs, which would waste time.
//
// If the list of IP ranges cannot be sourced, then provisioning SHOULD
// fail. Getting the IP ranges at runtime MUST NOT fail, because it would
// cancel incoming requests. If refreshing the list fails, then the
// previous list of IP ranges should continue to be returned so that the
// server can continue to operate normally.
type IPRangeSource interface {
GetIPRanges(*http.Request) []netip.Prefix
}
// StaticIPRange provides a static range of IP address prefixes (CIDRs).
type StaticIPRange struct {
// A static list of IP ranges (supports CIDR notation).
Ranges []string `json:"ranges,omitempty"`
// Holds the parsed CIDR ranges from Ranges.
ranges []netip.Prefix
}
// CaddyModule returns the Caddy module information.
func (StaticIPRange) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.ip_sources.static",
New: func() caddy.Module { return new(StaticIPRange) },
}
}
func (s *StaticIPRange) Provision(ctx caddy.Context) error {
for _, str := range s.Ranges {
prefix, err := CIDRExpressionToPrefix(str)
if err != nil {
return err
}
s.ranges = append(s.ranges, prefix)
}
return nil
}
func (s *StaticIPRange) GetIPRanges(_ *http.Request) []netip.Prefix {
return s.ranges
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *StaticIPRange) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if !d.Next() {
return nil
}
for d.NextArg() {
if d.Val() == "private_ranges" {
m.Ranges = append(m.Ranges, PrivateRangesCIDR()...)
continue
}
m.Ranges = append(m.Ranges, d.Val())
}
return nil
}
// CIDRExpressionToPrefix takes a string which could be either a
// CIDR expression or a single IP address, and returns a netip.Prefix.
func CIDRExpressionToPrefix(expr string) (netip.Prefix, error) {
// Having a slash means it should be a CIDR expression
if strings.Contains(expr, "/") {
prefix, err := netip.ParsePrefix(expr)
if err != nil {
return netip.Prefix{}, fmt.Errorf("parsing CIDR expression: '%s': %v", expr, err)
}
return prefix, nil
}
// Otherwise it's likely a single IP address
parsed, err := netip.ParseAddr(expr)
if err != nil {
return netip.Prefix{}, fmt.Errorf("invalid IP address: '%s': %v", expr, err)
}
prefix := netip.PrefixFrom(parsed, parsed.BitLen())
return prefix, nil
}
// PrivateRangesCIDR returns a list of private CIDR range
// strings, which can be used as a configuration shortcut.
func PrivateRangesCIDR() []string {
return []string{
"192.168.0.0/16",
"172.16.0.0/12",
"10.0.0.0/8",
"127.0.0.1/8",
"fd00::/8",
"::1",
}
}
// Interface guards
var (
_ caddy.Provisioner = (*StaticIPRange)(nil)
_ caddyfile.Unmarshaler = (*StaticIPRange)(nil)
_ IPRangeSource = (*StaticIPRange)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"encoding/json"
"errors"
"net"
"net/http"
"strings"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
)
// ServerLogConfig describes a server's logging configuration. If
// enabled without customization, all requests to this server are
// logged to the default logger; logger destinations may be
// customized per-request-host.
type ServerLogConfig struct {
// The default logger name for all logs emitted by this server for
// hostnames that are not in the logger_names map.
DefaultLoggerName string `json:"default_logger_name,omitempty"`
// LoggerNames maps request hostnames to one or more custom logger
// names. For example, a mapping of `"example.com": ["example"]` would
// cause access logs from requests with a Host of example.com to be
// emitted by a logger named "http.log.access.example". If there are
// multiple logger names, then the log will be emitted to all of them.
// If the logger name is an empty, the default logger is used, i.e.
// the logger "http.log.access".
//
// Keys must be hostnames (without ports), and may contain wildcards
// to match subdomains. The value is an array of logger names.
//
// For backwards compatibility, if the value is a string, it is treated
// as a single-element array.
LoggerNames map[string]StringArray `json:"logger_names,omitempty"`
// By default, all requests to this server will be logged if
// access logging is enabled. This field lists the request
// hosts for which access logging should be disabled.
SkipHosts []string `json:"skip_hosts,omitempty"`
// If true, requests to any host not appearing in the
// logger_names map will not be logged.
SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
// If true, credentials that are otherwise omitted, will be logged.
// The definition of credentials is defined by https://fetch.spec.whatwg.org/#credentials,
// and this includes some request and response headers, i.e `Cookie`,
// `Set-Cookie`, `Authorization`, and `Proxy-Authorization`.
ShouldLogCredentials bool `json:"should_log_credentials,omitempty"`
// Log each individual handler that is invoked.
// Requires that the log emit at DEBUG level.
//
// NOTE: This may log the configuration of your
// HTTP handler modules; do not enable this in
// insecure contexts when there is sensitive
// data in the configuration.
//
// EXPERIMENTAL: Subject to change or removal.
Trace bool `json:"trace,omitempty"`
}
// wrapLogger wraps logger in one or more logger named
// according to user preferences for the given host.
func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, req *http.Request) []*zap.Logger {
// using the `log_name` directive or the `access_logger_names` variable,
// the logger names can be overridden for the current request
if names := GetVar(req.Context(), AccessLoggerNameVarKey); names != nil {
if namesSlice, ok := names.([]any); ok {
loggers := make([]*zap.Logger, 0, len(namesSlice))
for _, loggerName := range namesSlice {
// no name, use the default logger
if loggerName == "" {
loggers = append(loggers, logger)
continue
}
// make a logger with the given name
loggers = append(loggers, logger.Named(loggerName.(string)))
}
return loggers
}
}
// get the hostname from the request, with the port number stripped
host, _, err := net.SplitHostPort(req.Host)
if err != nil {
host = req.Host
}
// get the logger names for this host from the config
hosts := slc.getLoggerHosts(host)
// make a list of named loggers, or the default logger
loggers := make([]*zap.Logger, 0, len(hosts))
for _, loggerName := range hosts {
// no name, use the default logger
if loggerName == "" {
loggers = append(loggers, logger)
continue
}
// make a logger with the given name
loggers = append(loggers, logger.Named(loggerName))
}
return loggers
}
func (slc ServerLogConfig) getLoggerHosts(host string) []string {
// try the exact hostname first
if hosts, ok := slc.LoggerNames[host]; ok {
return hosts
}
// try matching wildcard domains if other non-specific loggers exist
labels := strings.Split(host, ".")
for i := range labels {
if labels[i] == "" {
continue
}
labels[i] = "*"
wildcardHost := strings.Join(labels, ".")
if hosts, ok := slc.LoggerNames[wildcardHost]; ok {
return hosts
}
}
return []string{slc.DefaultLoggerName}
}
func (slc *ServerLogConfig) clone() *ServerLogConfig {
clone := &ServerLogConfig{
DefaultLoggerName: slc.DefaultLoggerName,
LoggerNames: make(map[string]StringArray),
SkipHosts: append([]string{}, slc.SkipHosts...),
SkipUnmappedHosts: slc.SkipUnmappedHosts,
ShouldLogCredentials: slc.ShouldLogCredentials,
}
for k, v := range slc.LoggerNames {
clone.LoggerNames[k] = append([]string{}, v...)
}
return clone
}
// StringArray is a slices of strings, but also accepts
// a single string as a value when JSON unmarshaling,
// converting it to a slice of one string.
type StringArray []string
// UnmarshalJSON satisfies json.Unmarshaler.
func (sa *StringArray) UnmarshalJSON(b []byte) error {
var jsonObj any
err := json.Unmarshal(b, &jsonObj)
if err != nil {
return err
}
switch obj := jsonObj.(type) {
case string:
*sa = StringArray([]string{obj})
return nil
case []any:
s := make([]string, 0, len(obj))
for _, v := range obj {
value, ok := v.(string)
if !ok {
return errors.New("unsupported type")
}
s = append(s, value)
}
*sa = StringArray(s)
return nil
}
return errors.New("unsupported type")
}
// errLogValues inspects err and returns the status code
// to use, the error log message, and any extra fields.
// If err is a HandlerError, the returned values will
// have richer information.
func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
var handlerErr HandlerError
if errors.As(err, &handlerErr) {
status = handlerErr.StatusCode
if handlerErr.Err == nil {
msg = err.Error()
} else {
msg = handlerErr.Err.Error()
}
fields = []zapcore.Field{
zap.Int("status", handlerErr.StatusCode),
zap.String("err_id", handlerErr.ID),
zap.String("err_trace", handlerErr.Trace),
}
return
}
status = http.StatusInternalServerError
msg = err.Error()
return
}
// ExtraLogFields is a list of extra fields to log with every request.
type ExtraLogFields struct {
fields []zapcore.Field
}
// Add adds a field to the list of extra fields to log.
func (e *ExtraLogFields) Add(field zap.Field) {
e.fields = append(e.fields, field)
}
// Set sets a field in the list of extra fields to log.
// If the field already exists, it is replaced.
func (e *ExtraLogFields) Set(field zap.Field) {
for i := range e.fields {
if e.fields[i].Key == field.Key {
e.fields[i] = field
return
}
}
e.fields = append(e.fields, field)
}
const (
// Variable name used to indicate that this request
// should be omitted from the access logs
LogSkipVar string = "log_skip"
// For adding additional fields to the access logs
ExtraLogFieldsCtxKey caddy.CtxKey = "extra_log_fields"
// Variable name used to indicate the logger to be used
AccessLoggerNameVarKey string = "access_logger_names"
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"crypto/tls"
"net"
"net/http"
"strings"
"go.uber.org/zap/zapcore"
)
// LoggableHTTPRequest makes an HTTP request loggable with zap.Object().
type LoggableHTTPRequest struct {
*http.Request
ShouldLogCredentials bool
}
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
func (r LoggableHTTPRequest) MarshalLogObject(enc zapcore.ObjectEncoder) error {
ip, port, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
ip = r.RemoteAddr
port = ""
}
enc.AddString("remote_ip", ip)
enc.AddString("remote_port", port)
if ip, ok := GetVar(r.Context(), ClientIPVarKey).(string); ok {
enc.AddString("client_ip", ip)
}
enc.AddString("proto", r.Proto)
enc.AddString("method", r.Method)
enc.AddString("host", r.Host)
enc.AddString("uri", r.RequestURI)
enc.AddObject("headers", LoggableHTTPHeader{
Header: r.Header,
ShouldLogCredentials: r.ShouldLogCredentials,
})
if r.TLS != nil {
enc.AddObject("tls", LoggableTLSConnState(*r.TLS))
}
return nil
}
// LoggableHTTPHeader makes an HTTP header loggable with zap.Object().
// Headers with potentially sensitive information (Cookie, Set-Cookie,
// Authorization, and Proxy-Authorization) are logged with empty values.
type LoggableHTTPHeader struct {
http.Header
ShouldLogCredentials bool
}
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
func (h LoggableHTTPHeader) MarshalLogObject(enc zapcore.ObjectEncoder) error {
if h.Header == nil {
return nil
}
for key, val := range h.Header {
if !h.ShouldLogCredentials {
switch strings.ToLower(key) {
case "cookie", "set-cookie", "authorization", "proxy-authorization":
val = []string{"REDACTED"} // see #5669. I still think ▒▒▒▒ would be cool.
}
}
enc.AddArray(key, LoggableStringArray(val))
}
return nil
}
// LoggableStringArray makes a slice of strings marshalable for logging.
type LoggableStringArray []string
// MarshalLogArray satisfies the zapcore.ArrayMarshaler interface.
func (sa LoggableStringArray) MarshalLogArray(enc zapcore.ArrayEncoder) error {
if sa == nil {
return nil
}
for _, s := range sa {
enc.AppendString(s)
}
return nil
}
// LoggableTLSConnState makes a TLS connection state loggable with zap.Object().
type LoggableTLSConnState tls.ConnectionState
// MarshalLogObject satisfies the zapcore.ObjectMarshaler interface.
func (t LoggableTLSConnState) MarshalLogObject(enc zapcore.ObjectEncoder) error {
enc.AddBool("resumed", t.DidResume)
enc.AddUint16("version", t.Version)
enc.AddUint16("cipher_suite", t.CipherSuite)
enc.AddString("proto", t.NegotiatedProtocol)
enc.AddString("server_name", t.ServerName)
if len(t.PeerCertificates) > 0 {
enc.AddString("client_common_name", t.PeerCertificates[0].Subject.CommonName)
enc.AddString("client_serial", t.PeerCertificates[0].SerialNumber.String())
}
return nil
}
// Interface guards
var (
_ zapcore.ObjectMarshaler = (*LoggableHTTPRequest)(nil)
_ zapcore.ObjectMarshaler = (*LoggableHTTPHeader)(nil)
_ zapcore.ArrayMarshaler = (*LoggableStringArray)(nil)
_ zapcore.ObjectMarshaler = (*LoggableTLSConnState)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"net/textproto"
"net/url"
"path"
"reflect"
"regexp"
"runtime"
"slices"
"sort"
"strconv"
"strings"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"golang.org/x/net/idna"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
type (
// MatchHost matches requests by the Host value (case-insensitive).
//
// When used in a top-level HTTP route,
// [qualifying domain names](/docs/automatic-https#hostname-requirements)
// may trigger [automatic HTTPS](/docs/automatic-https), which automatically
// provisions and renews certificates for you. Before doing this, you
// should ensure that DNS records for these domains are properly configured,
// especially A/AAAA pointed at your server.
//
// Automatic HTTPS can be
// [customized or disabled](/docs/modules/http#servers/automatic_https).
//
// Wildcards (`*`) may be used to represent exactly one label of the
// hostname, in accordance with RFC 1034 (because host matchers are also
// used for automatic HTTPS which influences TLS certificates). Thus,
// a host of `*` matches hosts like `localhost` or `internal` but not
// `example.com`. To catch all hosts, omit the host matcher entirely.
//
// The wildcard can be useful for matching all subdomains, for example:
// `*.example.com` matches `foo.example.com` but not `foo.bar.example.com`.
//
// Duplicate entries will return an error.
MatchHost []string
// MatchPath case-insensitively matches requests by the URI's path. Path
// matching is exact, not prefix-based, giving you more control and clarity
// over matching. Wildcards (`*`) may be used:
//
// - At the end only, for a prefix match (`/prefix/*`)
// - At the beginning only, for a suffix match (`*.suffix`)
// - On both sides only, for a substring match (`*/contains/*`)
// - In the middle, for a globular match (`/accounts/*/info`)
//
// Slashes are significant; i.e. `/foo*` matches `/foo`, `/foo/`, `/foo/bar`,
// and `/foobar`; but `/foo/*` does not match `/foo` or `/foobar`. Valid
// paths start with a slash `/`.
//
// Because there are, in general, multiple possible escaped forms of any
// path, path matchers operate in unescaped space; that is, path matchers
// should be written in their unescaped form to prevent ambiguities and
// possible security issues, as all request paths will be normalized to
// their unescaped forms before matcher evaluation.
//
// However, escape sequences in a match pattern are supported; they are
// compared with the request's raw/escaped path for those bytes only.
// In other words, a matcher of `/foo%2Fbar` will match a request path
// of precisely `/foo%2Fbar`, but not `/foo/bar`. It follows that matching
// the literal percent sign (%) in normalized space can be done using the
// escaped form, `%25`.
//
// Even though wildcards (`*`) operate in the normalized space, the special
// escaped wildcard (`%*`), which is not a valid escape sequence, may be
// used in place of a span that should NOT be decoded; that is, `/bands/%*`
// will match `/bands/AC%2fDC` whereas `/bands/*` will not.
//
// Even though path matching is done in normalized space, the special
// wildcard `%*` may be used in place of a span that should NOT be decoded;
// that is, `/bands/%*/` will match `/bands/AC%2fDC/` whereas `/bands/*/`
// will not.
//
// This matcher is fast, so it does not support regular expressions or
// capture groups. For slower but more powerful matching, use the
// path_regexp matcher. (Note that due to the special treatment of
// escape sequences in matcher patterns, they may perform slightly slower
// in high-traffic environments.)
MatchPath []string
// MatchPathRE matches requests by a regular expression on the URI's path.
// Path matching is performed in the unescaped (decoded) form of the path.
//
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
// where `name` is the regular expression's name, and `capture_group` is either
// the named or positional capture group from the expression itself. If no name
// is given, then the placeholder omits the name: `{http.regexp.capture_group}`
// (potentially leading to collisions).
MatchPathRE struct{ MatchRegexp }
// MatchMethod matches requests by the method.
MatchMethod []string
// MatchQuery matches requests by the URI's query string. It takes a JSON object
// keyed by the query keys, with an array of string values to match for that key.
// Query key matches are exact, but wildcards may be used for value matches. Both
// keys and values may be placeholders.
//
// An example of the structure to match `?key=value&topic=api&query=something` is:
//
// ```json
// {
// "key": ["value"],
// "topic": ["api"],
// "query": ["*"]
// }
// ```
//
// Invalid query strings, including those with bad escapings or illegal characters
// like semicolons, will fail to parse and thus fail to match.
//
// **NOTE:** Notice that query string values are arrays, not singular values. This is
// because repeated keys are valid in query strings, and each one may have a
// different value. This matcher will match for a key if any one of its configured
// values is assigned in the query string. Backend applications relying on query
// strings MUST take into consideration that query string values are arrays and can
// have multiple values.
MatchQuery url.Values
// MatchHeader matches requests by header fields. The key is the field
// name and the array is the list of field values. It performs fast,
// exact string comparisons of the field values. Fast prefix, suffix,
// and substring matches can also be done by suffixing, prefixing, or
// surrounding the value with the wildcard `*` character, respectively.
// If a list is null, the header must not exist. If the list is empty,
// the field must simply exist, regardless of its value.
//
// **NOTE:** Notice that header values are arrays, not singular values. This is
// because repeated fields are valid in headers, and each one may have a
// different value. This matcher will match for a field if any one of its configured
// values matches in the header. Backend applications relying on headers MUST take
// into consideration that header field values are arrays and can have multiple
// values.
MatchHeader http.Header
// MatchHeaderRE matches requests by a regular expression on header fields.
//
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
// where `name` is the regular expression's name, and `capture_group` is either
// the named or positional capture group from the expression itself. If no name
// is given, then the placeholder omits the name: `{http.regexp.capture_group}`
// (potentially leading to collisions).
MatchHeaderRE map[string]*MatchRegexp
// MatchProtocol matches requests by protocol. Recognized values are
// "http", "https", and "grpc" for broad protocol matches, or specific
// HTTP versions can be specified like so: "http/1", "http/1.1",
// "http/2", "http/3", or minimum versions: "http/2+", etc.
MatchProtocol string
// MatchTLS matches HTTP requests based on the underlying
// TLS connection state. If this matcher is specified but
// the request did not come over TLS, it will never match.
// If this matcher is specified but is empty and the request
// did come in over TLS, it will always match.
MatchTLS struct {
// Matches if the TLS handshake has completed. QUIC 0-RTT early
// data may arrive before the handshake completes. Generally, it
// is unsafe to replay these requests if they are not idempotent;
// additionally, the remote IP of early data packets can more
// easily be spoofed. It is conventional to respond with HTTP 425
// Too Early if the request cannot risk being processed in this
// state.
HandshakeComplete *bool `json:"handshake_complete,omitempty"`
}
// MatchNot matches requests by negating the results of its matcher
// sets. A single "not" matcher takes one or more matcher sets. Each
// matcher set is OR'ed; in other words, if any matcher set returns
// true, the final result of the "not" matcher is false. Individual
// matchers within a set work the same (i.e. different matchers in
// the same set are AND'ed).
//
// NOTE: The generated docs which describe the structure of this
// module are wrong because of how this type unmarshals JSON in a
// custom way. The correct structure is:
//
// ```json
// [
// {},
// {}
// ]
// ```
//
// where each of the array elements is a matcher set, i.e. an
// object keyed by matcher name.
MatchNot struct {
MatcherSetsRaw []caddy.ModuleMap `json:"-" caddy:"namespace=http.matchers"`
MatcherSets []MatcherSet `json:"-"`
}
)
func init() {
caddy.RegisterModule(MatchHost{})
caddy.RegisterModule(MatchPath{})
caddy.RegisterModule(MatchPathRE{})
caddy.RegisterModule(MatchMethod{})
caddy.RegisterModule(MatchQuery{})
caddy.RegisterModule(MatchHeader{})
caddy.RegisterModule(MatchHeaderRE{})
caddy.RegisterModule(new(MatchProtocol))
caddy.RegisterModule(MatchTLS{})
caddy.RegisterModule(MatchNot{})
}
// CaddyModule returns the Caddy module information.
func (MatchHost) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.host",
New: func() caddy.Module { return new(MatchHost) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHost) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
if d.NextBlock(0) {
return d.Err("malformed host matcher: blocks are not supported")
}
}
return nil
}
// Provision sets up and validates m, including making it more efficient for large lists.
func (m MatchHost) Provision(_ caddy.Context) error {
// check for duplicates; they are nonsensical and reduce efficiency
// (we could just remove them, but the user should know their config is erroneous)
seen := make(map[string]int, len(m))
for i, host := range m {
asciiHost, err := idna.ToASCII(host)
if err != nil {
return fmt.Errorf("converting hostname '%s' to ASCII: %v", host, err)
}
if asciiHost != host {
m[i] = asciiHost
}
normalizedHost := strings.ToLower(asciiHost)
if firstI, ok := seen[normalizedHost]; ok {
return fmt.Errorf("host at index %d is repeated at index %d: %s", firstI, i, host)
}
seen[normalizedHost] = i
}
if m.large() {
// sort the slice lexicographically, grouping "fuzzy" entries (wildcards and placeholders)
// at the front of the list; this allows us to use binary search for exact matches, which
// we have seen from experience is the most common kind of value in large lists; and any
// other kinds of values (wildcards and placeholders) are grouped in front so the linear
// search should find a match fairly quickly
sort.Slice(m, func(i, j int) bool {
iInexact, jInexact := m.fuzzy(m[i]), m.fuzzy(m[j])
if iInexact && !jInexact {
return true
}
if !iInexact && jInexact {
return false
}
return m[i] < m[j]
})
}
return nil
}
// Match returns true if r matches m.
func (m MatchHost) Match(r *http.Request) bool {
reqHost, _, err := net.SplitHostPort(r.Host)
if err != nil {
// OK; probably didn't have a port
reqHost = r.Host
// make sure we strip the brackets from IPv6 addresses
reqHost = strings.TrimPrefix(reqHost, "[")
reqHost = strings.TrimSuffix(reqHost, "]")
}
if m.large() {
// fast path: locate exact match using binary search (about 100-1000x faster for large lists)
pos := sort.Search(len(m), func(i int) bool {
if m.fuzzy(m[i]) {
return false
}
return m[i] >= reqHost
})
if pos < len(m) && m[pos] == reqHost {
return true
}
}
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
outer:
for _, host := range m {
// fast path: if matcher is large, we already know we don't have an exact
// match, so we're only looking for fuzzy match now, which should be at the
// front of the list; if we have reached a value that is not fuzzy, there
// will be no match and we can short-circuit for efficiency
if m.large() && !m.fuzzy(host) {
break
}
host = repl.ReplaceAll(host, "")
if strings.Contains(host, "*") {
patternParts := strings.Split(host, ".")
incomingParts := strings.Split(reqHost, ".")
if len(patternParts) != len(incomingParts) {
continue
}
for i := range patternParts {
if patternParts[i] == "*" {
continue
}
if !strings.EqualFold(patternParts[i], incomingParts[i]) {
continue outer
}
}
return true
} else if strings.EqualFold(reqHost, host) {
return true
}
}
return false
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression host('localhost')
func (MatchHost) CELLibrary(ctx caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
"host",
"host_match_request_list",
[]*cel.Type{cel.ListType(cel.StringType)},
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
strList, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
matcher := MatchHost(strList.([]string))
err = matcher.Provision(ctx)
return matcher, err
},
)
}
// fuzzy returns true if the given hostname h is not a specific
// hostname, e.g. has placeholders or wildcards.
func (MatchHost) fuzzy(h string) bool { return strings.ContainsAny(h, "{*") }
// large returns true if m is considered to be large. Optimizing
// the matcher for smaller lists has diminishing returns.
// See related benchmark function in test file to conduct experiments.
func (m MatchHost) large() bool { return len(m) > 100 }
// CaddyModule returns the Caddy module information.
func (MatchPath) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.path",
New: func() caddy.Module { return new(MatchPath) },
}
}
// Provision lower-cases the paths in m to ensure case-insensitive matching.
func (m MatchPath) Provision(_ caddy.Context) error {
for i := range m {
if m[i] == "*" && i > 0 {
// will always match, so just put it first
m[0] = m[i]
break
}
m[i] = strings.ToLower(m[i])
}
return nil
}
// Match returns true if r matches m.
func (m MatchPath) Match(r *http.Request) bool {
// Even though RFC 9110 says that path matching is case-sensitive
// (https://www.rfc-editor.org/rfc/rfc9110.html#section-4.2.3),
// we do case-insensitive matching to mitigate security issues
// related to differences between operating systems, applications,
// etc; if case-sensitive matching is needed, the regex matcher
// can be used instead.
reqPath := strings.ToLower(r.URL.Path)
// See #2917; Windows ignores trailing dots and spaces
// when accessing files (sigh), potentially causing a
// security risk (cry) if PHP files end up being served
// as static files, exposing the source code, instead of
// being matched by *.php to be treated as PHP scripts.
if runtime.GOOS == "windows" { // issue #5613
reqPath = strings.TrimRight(reqPath, ". ")
}
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
for _, matchPattern := range m {
matchPattern = repl.ReplaceAll(matchPattern, "")
// special case: whole path is wildcard; this is unnecessary
// as it matches all requests, which is the same as no matcher
if matchPattern == "*" {
return true
}
// Clean the path, merge doubled slashes, etc.
// This ensures maliciously crafted requests can't bypass
// the path matcher. See #4407. Good security posture
// requires that we should do all we can to reduce any
// funny-looking paths into "normalized" forms such that
// weird variants can't sneak by.
//
// How we clean the path depends on the kind of pattern:
// we either merge slashes or we don't. If the pattern
// has double slashes, we preserve them in the path.
//
// TODO: Despite the fact that the *vast* majority of path
// matchers have only 1 pattern, a possible optimization is
// to remember the cleaned form of the path for future
// iterations; it's just that the way we clean depends on
// the kind of pattern.
mergeSlashes := !strings.Contains(matchPattern, "//")
// if '%' appears in the match pattern, we interpret that to mean
// the intent is to compare that part of the path in raw/escaped
// space; i.e. "%40"=="%40", not "@", and "%2F"=="%2F", not "/"
if strings.Contains(matchPattern, "%") {
reqPathForPattern := CleanPath(r.URL.EscapedPath(), mergeSlashes)
if m.matchPatternWithEscapeSequence(reqPathForPattern, matchPattern) {
return true
}
// doing prefix/suffix/substring matches doesn't make sense
continue
}
reqPathForPattern := CleanPath(reqPath, mergeSlashes)
// for substring, prefix, and suffix matching, only perform those
// special, fast matches if they are the only wildcards in the pattern;
// otherwise we assume a globular match if any * appears in the middle
// special case: first and last characters are wildcard,
// treat it as a fast substring match
if strings.Count(matchPattern, "*") == 2 &&
strings.HasPrefix(matchPattern, "*") &&
strings.HasSuffix(matchPattern, "*") {
if strings.Contains(reqPathForPattern, matchPattern[1:len(matchPattern)-1]) {
return true
}
continue
}
// only perform prefix/suffix match if it is the only wildcard...
// I think that is more correct most of the time
if strings.Count(matchPattern, "*") == 1 {
// special case: first character is a wildcard,
// treat it as a fast suffix match
if strings.HasPrefix(matchPattern, "*") {
if strings.HasSuffix(reqPathForPattern, matchPattern[1:]) {
return true
}
continue
}
// special case: last character is a wildcard,
// treat it as a fast prefix match
if strings.HasSuffix(matchPattern, "*") {
if strings.HasPrefix(reqPathForPattern, matchPattern[:len(matchPattern)-1]) {
return true
}
continue
}
}
// at last, use globular matching, which also is exact matching
// if there are no glob/wildcard chars; we ignore the error here
// because we can't handle it anyway
matches, _ := path.Match(matchPattern, reqPathForPattern)
if matches {
return true
}
}
return false
}
func (MatchPath) matchPatternWithEscapeSequence(escapedPath, matchPath string) bool {
// We would just compare the pattern against r.URL.Path,
// but the pattern contains %, indicating that we should
// compare at least some part of the path in raw/escaped
// space, not normalized space; so we build the string we
// will compare against by adding the normalized parts
// of the path, then switching to the escaped parts where
// the pattern hints to us wherever % is present.
var sb strings.Builder
// iterate the pattern and escaped path in lock-step;
// increment iPattern every time we consume a char from the pattern,
// increment iPath every time we consume a char from the path;
// iPattern and iPath are our cursors/iterator positions for each string
var iPattern, iPath int
for {
if iPattern >= len(matchPath) || iPath >= len(escapedPath) {
break
}
// get the next character from the request path
pathCh := string(escapedPath[iPath])
var escapedPathCh string
// normalize (decode) escape sequences
if pathCh == "%" && len(escapedPath) >= iPath+3 {
// hold onto this in case we find out the intent is to match in escaped space here;
// we lowercase it even though technically the spec says: "For consistency, URI
// producers and normalizers should use uppercase hexadecimal digits for all percent-
// encodings" (RFC 3986 section 2.1) - we lowercased the matcher pattern earlier in
// provisioning so we do the same here to gain case-insensitivity in equivalence;
// besides, this string is never shown visibly
escapedPathCh = strings.ToLower(escapedPath[iPath : iPath+3])
var err error
pathCh, err = url.PathUnescape(escapedPathCh)
if err != nil {
// should be impossible unless EscapedPath() is giving us an invalid sequence!
return false
}
iPath += 2 // escape sequence is 2 bytes longer than normal char
}
// now get the next character from the pattern
normalize := true
switch matchPath[iPattern] {
case '%':
// escape sequence
// if not a wildcard ("%*"), compare literally; consume next two bytes of pattern
if len(matchPath) >= iPattern+3 && matchPath[iPattern+1] != '*' {
sb.WriteString(escapedPathCh)
iPath++
iPattern += 2
break
}
// escaped wildcard sequence; consume next byte only ('*')
iPattern++
normalize = false
fallthrough
case '*':
// wildcard, so consume until next matching character
remaining := escapedPath[iPath:]
until := len(escapedPath) - iPath // go until end of string...
if iPattern < len(matchPath)-1 { // ...unless the * is not at the end
nextCh := matchPath[iPattern+1]
until = strings.IndexByte(remaining, nextCh)
if until == -1 {
// terminating char of wildcard span not found, so definitely no match
return false
}
}
if until == 0 {
// empty span; nothing to add on this iteration
break
}
next := remaining[:until]
if normalize {
var err error
next, err = url.PathUnescape(next)
if err != nil {
return false // should be impossible anyway
}
}
sb.WriteString(next)
iPath += until
default:
sb.WriteString(pathCh)
iPath++
}
iPattern++
}
// we can now treat rawpath globs (%*) as regular globs (*)
matchPath = strings.ReplaceAll(matchPath, "%*", "*")
// ignore error here because we can't handle it anyway=
matches, _ := path.Match(matchPath, sb.String())
return matches
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression path('*substring*', '*suffix')
func (MatchPath) CELLibrary(ctx caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
// name of the macro, this is the function name that users see when writing expressions.
"path",
// name of the function that the macro will be rewritten to call.
"path_match_request_list",
// internal data type of the MatchPath value.
[]*cel.Type{cel.ListType(cel.StringType)},
// function to convert a constant list of strings to a MatchPath instance.
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
strList, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
matcher := MatchPath(strList.([]string))
err = matcher.Provision(ctx)
return matcher, err
},
)
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchPath) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
if d.NextBlock(0) {
return d.Err("malformed path matcher: blocks are not supported")
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (MatchPathRE) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.path_regexp",
New: func() caddy.Module { return new(MatchPathRE) },
}
}
// Match returns true if r matches m.
func (m MatchPathRE) Match(r *http.Request) bool {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// Clean the path, merges doubled slashes, etc.
// This ensures maliciously crafted requests can't bypass
// the path matcher. See #4407
cleanedPath := cleanPath(r.URL.Path)
return m.MatchRegexp.Match(cleanedPath, repl)
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression path_regexp('^/bar')
func (MatchPathRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
unnamedPattern, err := CELMatcherImpl(
"path_regexp",
"path_regexp_request_string",
[]*cel.Type{cel.StringType},
func(data ref.Val) (RequestMatcher, error) {
pattern := data.(types.String)
matcher := MatchPathRE{MatchRegexp{
Name: ctx.Value(MatcherNameCtxKey).(string),
Pattern: string(pattern),
}}
err := matcher.Provision(ctx)
return matcher, err
},
)
if err != nil {
return nil, err
}
namedPattern, err := CELMatcherImpl(
"path_regexp",
"path_regexp_request_string_string",
[]*cel.Type{cel.StringType, cel.StringType},
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
params, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
strParams := params.([]string)
name := strParams[0]
if name == "" {
name = ctx.Value(MatcherNameCtxKey).(string)
}
matcher := MatchPathRE{MatchRegexp{
Name: name,
Pattern: strParams[1],
}}
err = matcher.Provision(ctx)
return matcher, err
},
)
if err != nil {
return nil, err
}
envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...)
prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...)
return NewMatcherCELLibrary(envOpts, prgOpts), nil
}
// CaddyModule returns the Caddy module information.
func (MatchMethod) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.method",
New: func() caddy.Module { return new(MatchMethod) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchMethod) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
*m = append(*m, d.RemainingArgs()...)
if d.NextBlock(0) {
return d.Err("malformed method matcher: blocks are not supported")
}
}
return nil
}
// Match returns true if r matches m.
func (m MatchMethod) Match(r *http.Request) bool {
for _, method := range m {
if r.Method == method {
return true
}
}
return false
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression method('PUT', 'POST')
func (MatchMethod) CELLibrary(_ caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
"method",
"method_request_list",
[]*cel.Type{cel.ListType(cel.StringType)},
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
strList, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
return MatchMethod(strList.([]string)), nil
},
)
}
// CaddyModule returns the Caddy module information.
func (MatchQuery) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.query",
New: func() caddy.Module { return new(MatchQuery) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchQuery) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
// iterate to merge multiple matchers into one
for d.Next() {
for _, query := range d.RemainingArgs() {
if query == "" {
continue
}
before, after, found := strings.Cut(query, "=")
if !found {
return d.Errf("malformed query matcher token: %s; must be in param=val format", d.Val())
}
url.Values(*m).Add(before, after)
}
if d.NextBlock(0) {
return d.Err("malformed query matcher: blocks are not supported")
}
}
return nil
}
// Match returns true if r matches m. An empty m matches an empty query string.
func (m MatchQuery) Match(r *http.Request) bool {
// If no query keys are configured, this only
// matches an empty query string.
if len(m) == 0 {
return len(r.URL.Query()) == 0
}
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// parse query string just once, for efficiency
parsed, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
// Illegal query string. Likely bad escape sequence or unescaped literals.
// Note that semicolons in query string have a controversial history. Summaries:
// - https://github.com/golang/go/issues/50034
// - https://github.com/golang/go/issues/25192
// Despite the URL WHATWG spec mandating the use of & separators for query strings,
// every URL parser implementation is different, and Filippo Valsorda rightly wrote:
// "Relying on parser alignment for security is doomed." Overall conclusion is that
// splitting on & and rejecting ; in key=value pairs is safer than accepting raw ;.
// We regard the Go team's decision as sound and thus reject malformed query strings.
return false
}
// Count the amount of matched keys, to ensure we AND
// between all configured query keys; all keys must
// match at least one value.
matchedKeys := 0
for param, vals := range m {
param = repl.ReplaceAll(param, "")
paramVal, found := parsed[param]
if !found {
return false
}
for _, v := range vals {
v = repl.ReplaceAll(v, "")
if slices.Contains(paramVal, v) || v == "*" {
matchedKeys++
break
}
}
}
return matchedKeys == len(m)
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression query({'sort': 'asc'}) || query({'foo': ['*bar*', 'baz']})
func (MatchQuery) CELLibrary(_ caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
"query",
"query_matcher_request_map",
[]*cel.Type{CELTypeJSON},
func(data ref.Val) (RequestMatcher, error) {
mapStrListStr, err := CELValueToMapStrList(data)
if err != nil {
return nil, err
}
return MatchQuery(url.Values(mapStrListStr)), nil
},
)
}
// CaddyModule returns the Caddy module information.
func (MatchHeader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.header",
New: func() caddy.Module { return new(MatchHeader) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHeader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
// iterate to merge multiple matchers into one
for d.Next() {
var field, val string
if !d.Args(&field) {
return d.Errf("malformed header matcher: expected field")
}
if strings.HasPrefix(field, "!") {
if len(field) == 1 {
return d.Errf("malformed header matcher: must have field name following ! character")
}
field = field[1:]
headers := *m
headers[field] = nil
m = &headers
if d.NextArg() {
return d.Errf("malformed header matcher: null matching headers cannot have a field value")
}
} else {
if !d.NextArg() {
return d.Errf("malformed header matcher: expected both field and value")
}
// If multiple header matchers with the same header field are defined,
// we want to add the existing to the list of headers (will be OR'ed)
val = d.Val()
http.Header(*m).Add(field, val)
}
if d.NextBlock(0) {
return d.Err("malformed header matcher: blocks are not supported")
}
}
return nil
}
// Match returns true if r matches m.
func (m MatchHeader) Match(r *http.Request) bool {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
return matchHeaders(r.Header, http.Header(m), r.Host, repl)
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression header({'content-type': 'image/png'})
// expression header({'foo': ['bar', 'baz']}) // match bar or baz
func (MatchHeader) CELLibrary(_ caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
"header",
"header_matcher_request_map",
[]*cel.Type{CELTypeJSON},
func(data ref.Val) (RequestMatcher, error) {
mapStrListStr, err := CELValueToMapStrList(data)
if err != nil {
return nil, err
}
return MatchHeader(http.Header(mapStrListStr)), nil
},
)
}
// getHeaderFieldVals returns the field values for the given fieldName from input.
// The host parameter should be obtained from the http.Request.Host field since
// net/http removes it from the header map.
func getHeaderFieldVals(input http.Header, fieldName, host string) []string {
fieldName = textproto.CanonicalMIMEHeaderKey(fieldName)
if fieldName == "Host" && host != "" {
return []string{host}
}
return input[fieldName]
}
// matchHeaders returns true if input matches the criteria in against without regex.
// The host parameter should be obtained from the http.Request.Host field since
// net/http removes it from the header map.
func matchHeaders(input, against http.Header, host string, repl *caddy.Replacer) bool {
for field, allowedFieldVals := range against {
actualFieldVals := getHeaderFieldVals(input, field, host)
if allowedFieldVals != nil && len(allowedFieldVals) == 0 && actualFieldVals != nil {
// a non-nil but empty list of allowed values means
// match if the header field exists at all
continue
}
if allowedFieldVals == nil && actualFieldVals == nil {
// a nil list means match if the header does not exist at all
continue
}
var match bool
fieldVals:
for _, actualFieldVal := range actualFieldVals {
for _, allowedFieldVal := range allowedFieldVals {
if repl != nil {
allowedFieldVal = repl.ReplaceAll(allowedFieldVal, "")
}
switch {
case allowedFieldVal == "*":
match = true
case strings.HasPrefix(allowedFieldVal, "*") && strings.HasSuffix(allowedFieldVal, "*"):
match = strings.Contains(actualFieldVal, allowedFieldVal[1:len(allowedFieldVal)-1])
case strings.HasPrefix(allowedFieldVal, "*"):
match = strings.HasSuffix(actualFieldVal, allowedFieldVal[1:])
case strings.HasSuffix(allowedFieldVal, "*"):
match = strings.HasPrefix(actualFieldVal, allowedFieldVal[:len(allowedFieldVal)-1])
default:
match = actualFieldVal == allowedFieldVal
}
if match {
break fieldVals
}
}
}
if !match {
return false
}
}
return true
}
// CaddyModule returns the Caddy module information.
func (MatchHeaderRE) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.header_regexp",
New: func() caddy.Module { return new(MatchHeaderRE) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchHeaderRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string]*MatchRegexp)
}
// iterate to merge multiple matchers into one
for d.Next() {
var first, second, third string
if !d.Args(&first, &second) {
return d.ArgErr()
}
var name, field, val string
if d.Args(&third) {
name = first
field = second
val = third
} else {
field = first
val = second
}
// Default to the named matcher's name, if no regexp name is provided
if name == "" {
name = d.GetContextString(caddyfile.MatcherNameCtxKey)
}
// If there's already a pattern for this field
// then we would end up overwriting the old one
if (*m)[field] != nil {
return d.Errf("header_regexp matcher can only be used once per named matcher, per header field: %s", field)
}
(*m)[field] = &MatchRegexp{Pattern: val, Name: name}
if d.NextBlock(0) {
return d.Err("malformed header_regexp matcher: blocks are not supported")
}
}
return nil
}
// Match returns true if r matches m.
func (m MatchHeaderRE) Match(r *http.Request) bool {
for field, rm := range m {
actualFieldVals := getHeaderFieldVals(r.Header, field, r.Host)
match := false
fieldVal:
for _, actualFieldVal := range actualFieldVals {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if rm.Match(actualFieldVal, repl) {
match = true
break fieldVal
}
}
if !match {
return false
}
}
return true
}
// Provision compiles m's regular expressions.
func (m MatchHeaderRE) Provision(ctx caddy.Context) error {
for _, rm := range m {
err := rm.Provision(ctx)
if err != nil {
return err
}
}
return nil
}
// Validate validates m's regular expressions.
func (m MatchHeaderRE) Validate() error {
for _, rm := range m {
err := rm.Validate()
if err != nil {
return err
}
}
return nil
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression header_regexp('foo', 'Field', 'fo+')
func (MatchHeaderRE) CELLibrary(ctx caddy.Context) (cel.Library, error) {
unnamedPattern, err := CELMatcherImpl(
"header_regexp",
"header_regexp_request_string_string",
[]*cel.Type{cel.StringType, cel.StringType},
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
params, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
strParams := params.([]string)
matcher := MatchHeaderRE{}
matcher[strParams[0]] = &MatchRegexp{
Pattern: strParams[1],
Name: ctx.Value(MatcherNameCtxKey).(string),
}
err = matcher.Provision(ctx)
return matcher, err
},
)
if err != nil {
return nil, err
}
namedPattern, err := CELMatcherImpl(
"header_regexp",
"header_regexp_request_string_string_string",
[]*cel.Type{cel.StringType, cel.StringType, cel.StringType},
func(data ref.Val) (RequestMatcher, error) {
refStringList := reflect.TypeOf([]string{})
params, err := data.ConvertToNative(refStringList)
if err != nil {
return nil, err
}
strParams := params.([]string)
name := strParams[0]
if name == "" {
name = ctx.Value(MatcherNameCtxKey).(string)
}
matcher := MatchHeaderRE{}
matcher[strParams[1]] = &MatchRegexp{
Pattern: strParams[2],
Name: name,
}
err = matcher.Provision(ctx)
return matcher, err
},
)
if err != nil {
return nil, err
}
envOpts := append(unnamedPattern.CompileOptions(), namedPattern.CompileOptions()...)
prgOpts := append(unnamedPattern.ProgramOptions(), namedPattern.ProgramOptions()...)
return NewMatcherCELLibrary(envOpts, prgOpts), nil
}
// CaddyModule returns the Caddy module information.
func (MatchProtocol) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.protocol",
New: func() caddy.Module { return new(MatchProtocol) },
}
}
// Match returns true if r matches m.
func (m MatchProtocol) Match(r *http.Request) bool {
switch string(m) {
case "grpc":
return strings.HasPrefix(r.Header.Get("content-type"), "application/grpc")
case "https":
return r.TLS != nil
case "http":
return r.TLS == nil
case "http/1.0":
return r.ProtoMajor == 1 && r.ProtoMinor == 0
case "http/1.0+":
return r.ProtoAtLeast(1, 0)
case "http/1.1":
return r.ProtoMajor == 1 && r.ProtoMinor == 1
case "http/1.1+":
return r.ProtoAtLeast(1, 1)
case "http/2":
return r.ProtoMajor == 2
case "http/2+":
return r.ProtoAtLeast(2, 0)
case "http/3":
return r.ProtoMajor == 3
case "http/3+":
return r.ProtoAtLeast(3, 0)
}
return false
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchProtocol) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
var proto string
if !d.Args(&proto) {
return d.Err("expected exactly one protocol")
}
*m = MatchProtocol(proto)
}
return nil
}
// CELLibrary produces options that expose this matcher for use in CEL
// expression matchers.
//
// Example:
//
// expression protocol('https')
func (MatchProtocol) CELLibrary(_ caddy.Context) (cel.Library, error) {
return CELMatcherImpl(
"protocol",
"protocol_request_string",
[]*cel.Type{cel.StringType},
func(data ref.Val) (RequestMatcher, error) {
protocolStr, ok := data.(types.String)
if !ok {
return nil, errors.New("protocol argument was not a string")
}
return MatchProtocol(strings.ToLower(string(protocolStr))), nil
},
)
}
// CaddyModule returns the Caddy module information.
func (MatchTLS) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.tls",
New: func() caddy.Module { return new(MatchTLS) },
}
}
// Match returns true if r matches m.
func (m MatchTLS) Match(r *http.Request) bool {
if r.TLS == nil {
return false
}
if m.HandshakeComplete != nil {
if (!*m.HandshakeComplete && r.TLS.HandshakeComplete) ||
(*m.HandshakeComplete && !r.TLS.HandshakeComplete) {
return false
}
}
return true
}
// UnmarshalCaddyfile parses Caddyfile tokens for this matcher. Syntax:
//
// ... tls [early_data]
//
// EXPERIMENTAL SYNTAX: Subject to change.
func (m *MatchTLS) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
if d.NextArg() {
switch d.Val() {
case "early_data":
var false bool
m.HandshakeComplete = &false
}
}
if d.NextArg() {
return d.ArgErr()
}
if d.NextBlock(0) {
return d.Err("malformed tls matcher: blocks are not supported yet")
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (MatchNot) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.not",
New: func() caddy.Module { return new(MatchNot) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchNot) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
matcherSet, err := ParseCaddyfileNestedMatcherSet(d)
if err != nil {
return err
}
m.MatcherSetsRaw = append(m.MatcherSetsRaw, matcherSet)
}
return nil
}
// UnmarshalJSON satisfies json.Unmarshaler. It puts the JSON
// bytes directly into m's MatcherSetsRaw field.
func (m *MatchNot) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &m.MatcherSetsRaw)
}
// MarshalJSON satisfies json.Marshaler by marshaling
// m's raw matcher sets.
func (m MatchNot) MarshalJSON() ([]byte, error) {
return json.Marshal(m.MatcherSetsRaw)
}
// Provision loads the matcher modules to be negated.
func (m *MatchNot) Provision(ctx caddy.Context) error {
matcherSets, err := ctx.LoadModule(m, "MatcherSetsRaw")
if err != nil {
return fmt.Errorf("loading matcher sets: %v", err)
}
for _, modMap := range matcherSets.([]map[string]any) {
var ms MatcherSet
for _, modIface := range modMap {
ms = append(ms, modIface.(RequestMatcher))
}
m.MatcherSets = append(m.MatcherSets, ms)
}
return nil
}
// Match returns true if r matches m. Since this matcher negates
// the embedded matchers, false is returned if any of its matcher
// sets return true.
func (m MatchNot) Match(r *http.Request) bool {
for _, ms := range m.MatcherSets {
if ms.Match(r) {
return false
}
}
return true
}
// MatchRegexp is an embedable type for matching
// using regular expressions. It adds placeholders
// to the request's replacer.
type MatchRegexp struct {
// A unique name for this regular expression. Optional,
// but useful to prevent overwriting captures from other
// regexp matchers.
Name string `json:"name,omitempty"`
// The regular expression to evaluate, in RE2 syntax,
// which is the same general syntax used by Go, Perl,
// and Python. For details, see
// [Go's regexp package](https://golang.org/pkg/regexp/).
// Captures are accessible via placeholders. Unnamed
// capture groups are exposed as their numeric, 1-based
// index, while named capture groups are available by
// the capture group name.
Pattern string `json:"pattern"`
compiled *regexp.Regexp
}
// Provision compiles the regular expression.
func (mre *MatchRegexp) Provision(caddy.Context) error {
re, err := regexp.Compile(mre.Pattern)
if err != nil {
return fmt.Errorf("compiling matcher regexp %s: %v", mre.Pattern, err)
}
mre.compiled = re
return nil
}
// Validate ensures mre is set up correctly.
func (mre *MatchRegexp) Validate() error {
if mre.Name != "" && !wordRE.MatchString(mre.Name) {
return fmt.Errorf("invalid regexp name (must contain only word characters): %s", mre.Name)
}
return nil
}
// Match returns true if input matches the compiled regular
// expression in mre. It sets values on the replacer repl
// associated with capture groups, using the given scope
// (namespace).
func (mre *MatchRegexp) Match(input string, repl *caddy.Replacer) bool {
matches := mre.compiled.FindStringSubmatch(input)
if matches == nil {
return false
}
// save all capture groups, first by index
for i, match := range matches {
keySuffix := "." + strconv.Itoa(i)
if mre.Name != "" {
repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, match)
}
repl.Set(regexpPlaceholderPrefix+keySuffix, match)
}
// then by name
for i, name := range mre.compiled.SubexpNames() {
// skip the first element (the full match), and empty names
if i == 0 || name == "" {
continue
}
keySuffix := "." + name
if mre.Name != "" {
repl.Set(regexpPlaceholderPrefix+"."+mre.Name+keySuffix, matches[i])
}
repl.Set(regexpPlaceholderPrefix+keySuffix, matches[i])
}
return true
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (mre *MatchRegexp) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// iterate to merge multiple matchers into one
for d.Next() {
// If this is the second iteration of the loop
// then there's more than one path_regexp matcher
// and we would end up overwriting the old one
if mre.Pattern != "" {
return d.Err("regular expression can only be used once per named matcher")
}
args := d.RemainingArgs()
switch len(args) {
case 1:
mre.Pattern = args[0]
case 2:
mre.Name = args[0]
mre.Pattern = args[1]
default:
return d.ArgErr()
}
// Default to the named matcher's name, if no regexp name is provided
if mre.Name == "" {
mre.Name = d.GetContextString(caddyfile.MatcherNameCtxKey)
}
if d.NextBlock(0) {
return d.Err("malformed path_regexp matcher: blocks are not supported")
}
}
return nil
}
// ParseCaddyfileNestedMatcher parses the Caddyfile tokens for a nested
// matcher set, and returns its raw module map value.
func ParseCaddyfileNestedMatcherSet(d *caddyfile.Dispenser) (caddy.ModuleMap, error) {
matcherMap := make(map[string]RequestMatcher)
// in case there are multiple instances of the same matcher, concatenate
// their tokens (we expect that UnmarshalCaddyfile should be able to
// handle more than one segment); otherwise, we'd overwrite other
// instances of the matcher in this set
tokensByMatcherName := make(map[string][]caddyfile.Token)
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
matcherName := d.Val()
tokensByMatcherName[matcherName] = append(tokensByMatcherName[matcherName], d.NextSegment()...)
}
for matcherName, tokens := range tokensByMatcherName {
mod, err := caddy.GetModule("http.matchers." + matcherName)
if err != nil {
return nil, d.Errf("getting matcher module '%s': %v", matcherName, err)
}
unm, ok := mod.New().(caddyfile.Unmarshaler)
if !ok {
return nil, d.Errf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
}
err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(tokens))
if err != nil {
return nil, err
}
rm, ok := unm.(RequestMatcher)
if !ok {
return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
}
matcherMap[matcherName] = rm
}
// we should now have a functional matcher, but we also
// need to be able to marshal as JSON, otherwise config
// adaptation will be missing the matchers!
matcherSet := make(caddy.ModuleMap)
for name, matcher := range matcherMap {
jsonBytes, err := json.Marshal(matcher)
if err != nil {
return nil, fmt.Errorf("marshaling %T matcher: %v", matcher, err)
}
matcherSet[name] = jsonBytes
}
return matcherSet, nil
}
var wordRE = regexp.MustCompile(`\w+`)
const regexpPlaceholderPrefix = "http.regexp"
// MatcherErrorVarKey is the key used for the variable that
// holds an optional error emitted from a request matcher,
// to short-circuit the handler chain, since matchers cannot
// return errors via the RequestMatcher interface.
const MatcherErrorVarKey = "matchers.error"
// Interface guards
var (
_ RequestMatcher = (*MatchHost)(nil)
_ caddy.Provisioner = (*MatchHost)(nil)
_ RequestMatcher = (*MatchPath)(nil)
_ RequestMatcher = (*MatchPathRE)(nil)
_ caddy.Provisioner = (*MatchPathRE)(nil)
_ RequestMatcher = (*MatchMethod)(nil)
_ RequestMatcher = (*MatchQuery)(nil)
_ RequestMatcher = (*MatchHeader)(nil)
_ RequestMatcher = (*MatchHeaderRE)(nil)
_ caddy.Provisioner = (*MatchHeaderRE)(nil)
_ RequestMatcher = (*MatchProtocol)(nil)
_ RequestMatcher = (*MatchNot)(nil)
_ caddy.Provisioner = (*MatchNot)(nil)
_ caddy.Provisioner = (*MatchRegexp)(nil)
_ caddyfile.Unmarshaler = (*MatchHost)(nil)
_ caddyfile.Unmarshaler = (*MatchPath)(nil)
_ caddyfile.Unmarshaler = (*MatchPathRE)(nil)
_ caddyfile.Unmarshaler = (*MatchMethod)(nil)
_ caddyfile.Unmarshaler = (*MatchQuery)(nil)
_ caddyfile.Unmarshaler = (*MatchHeader)(nil)
_ caddyfile.Unmarshaler = (*MatchHeaderRE)(nil)
_ caddyfile.Unmarshaler = (*MatchProtocol)(nil)
_ caddyfile.Unmarshaler = (*VarsMatcher)(nil)
_ caddyfile.Unmarshaler = (*MatchVarsRE)(nil)
_ CELLibraryProducer = (*MatchHost)(nil)
_ CELLibraryProducer = (*MatchPath)(nil)
_ CELLibraryProducer = (*MatchPathRE)(nil)
_ CELLibraryProducer = (*MatchMethod)(nil)
_ CELLibraryProducer = (*MatchQuery)(nil)
_ CELLibraryProducer = (*MatchHeader)(nil)
_ CELLibraryProducer = (*MatchHeaderRE)(nil)
_ CELLibraryProducer = (*MatchProtocol)(nil)
// _ CELLibraryProducer = (*VarsMatcher)(nil)
// _ CELLibraryProducer = (*MatchVarsRE)(nil)
_ json.Marshaler = (*MatchNot)(nil)
_ json.Unmarshaler = (*MatchNot)(nil)
)
package caddyhttp
import (
"context"
"errors"
"net/http"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/caddyserver/caddy/v2/internal/metrics"
)
// Metrics configures metrics observations.
// EXPERIMENTAL and subject to change or removal.
type Metrics struct{}
var httpMetrics = struct {
init sync.Once
requestInFlight *prometheus.GaugeVec
requestCount *prometheus.CounterVec
requestErrors *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
requestSize *prometheus.HistogramVec
responseSize *prometheus.HistogramVec
responseDuration *prometheus.HistogramVec
}{
init: sync.Once{},
}
func initHTTPMetrics() {
const ns, sub = "caddy", "http"
basicLabels := []string{"server", "handler"}
httpMetrics.requestInFlight = promauto.NewGaugeVec(prometheus.GaugeOpts{
Namespace: ns,
Subsystem: sub,
Name: "requests_in_flight",
Help: "Number of requests currently handled by this server.",
}, basicLabels)
httpMetrics.requestErrors = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: sub,
Name: "request_errors_total",
Help: "Number of requests resulting in middleware errors.",
}, basicLabels)
httpMetrics.requestCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: sub,
Name: "requests_total",
Help: "Counter of HTTP(S) requests made.",
}, basicLabels)
// TODO: allow these to be customized in the config
durationBuckets := prometheus.DefBuckets
sizeBuckets := prometheus.ExponentialBuckets(256, 4, 8)
httpLabels := []string{"server", "handler", "code", "method"}
httpMetrics.requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: sub,
Name: "request_duration_seconds",
Help: "Histogram of round-trip request durations.",
Buckets: durationBuckets,
}, httpLabels)
httpMetrics.requestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: sub,
Name: "request_size_bytes",
Help: "Total size of the request. Includes body",
Buckets: sizeBuckets,
}, httpLabels)
httpMetrics.responseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: sub,
Name: "response_size_bytes",
Help: "Size of the returned response.",
Buckets: sizeBuckets,
}, httpLabels)
httpMetrics.responseDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: ns,
Subsystem: sub,
Name: "response_duration_seconds",
Help: "Histogram of times to first byte in response bodies.",
Buckets: durationBuckets,
}, httpLabels)
}
// serverNameFromContext extracts the current server name from the context.
// Returns "UNKNOWN" if none is available (should probably never happen).
func serverNameFromContext(ctx context.Context) string {
srv, ok := ctx.Value(ServerCtxKey).(*Server)
if !ok || srv == nil || srv.name == "" {
return "UNKNOWN"
}
return srv.name
}
type metricsInstrumentedHandler struct {
handler string
mh MiddlewareHandler
}
func newMetricsInstrumentedHandler(handler string, mh MiddlewareHandler) *metricsInstrumentedHandler {
httpMetrics.init.Do(func() {
initHTTPMetrics()
})
return &metricsInstrumentedHandler{handler, mh}
}
func (h *metricsInstrumentedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
server := serverNameFromContext(r.Context())
labels := prometheus.Labels{"server": server, "handler": h.handler}
method := metrics.SanitizeMethod(r.Method)
// the "code" value is set later, but initialized here to eliminate the possibility
// of a panic
statusLabels := prometheus.Labels{"server": server, "handler": h.handler, "method": method, "code": ""}
inFlight := httpMetrics.requestInFlight.With(labels)
inFlight.Inc()
defer inFlight.Dec()
start := time.Now()
// This is a _bit_ of a hack - it depends on the ShouldBufferFunc always
// being called when the headers are written.
// Effectively the same behaviour as promhttp.InstrumentHandlerTimeToWriteHeader.
writeHeaderRecorder := ShouldBufferFunc(func(status int, header http.Header) bool {
statusLabels["code"] = metrics.SanitizeCode(status)
ttfb := time.Since(start).Seconds()
httpMetrics.responseDuration.With(statusLabels).Observe(ttfb)
return false
})
wrec := NewResponseRecorder(w, nil, writeHeaderRecorder)
err := h.mh.ServeHTTP(wrec, r, next)
dur := time.Since(start).Seconds()
httpMetrics.requestCount.With(labels).Inc()
observeRequest := func(status int) {
// If the code hasn't been set yet, and we didn't encounter an error, we're
// probably falling through with an empty handler.
if statusLabels["code"] == "" {
// we still sanitize it, even though it's likely to be 0. A 200 is
// returned on fallthrough so we want to reflect that.
statusLabels["code"] = metrics.SanitizeCode(status)
}
httpMetrics.requestDuration.With(statusLabels).Observe(dur)
httpMetrics.requestSize.With(statusLabels).Observe(float64(computeApproximateRequestSize(r)))
httpMetrics.responseSize.With(statusLabels).Observe(float64(wrec.Size()))
}
if err != nil {
var handlerErr HandlerError
if errors.As(err, &handlerErr) {
observeRequest(handlerErr.StatusCode)
}
httpMetrics.requestErrors.With(labels).Inc()
return err
}
observeRequest(wrec.Status())
return nil
}
// taken from https://github.com/prometheus/client_golang/blob/6007b2b5cae01203111de55f753e76d8dac1f529/prometheus/promhttp/instrument_server.go#L298
func computeApproximateRequestSize(r *http.Request) int {
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
return s
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/asn1"
"encoding/base64"
"encoding/pem"
"fmt"
"io"
"net"
"net/http"
"net/netip"
"net/textproto"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
// NewTestReplacer creates a replacer for an http.Request
// for use in tests that are not in this package
func NewTestReplacer(req *http.Request) *caddy.Replacer {
repl := caddy.NewReplacer()
ctx := context.WithValue(req.Context(), caddy.ReplacerCtxKey, repl)
*req = *req.WithContext(ctx)
addHTTPVarsToReplacer(repl, req, nil)
return repl
}
func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.ResponseWriter) {
SetVar(req.Context(), "start_time", time.Now())
SetVar(req.Context(), "uuid", new(requestID))
httpVars := func(key string) (any, bool) {
if req != nil {
// query string parameters
if strings.HasPrefix(key, reqURIQueryReplPrefix) {
vals := req.URL.Query()[key[len(reqURIQueryReplPrefix):]]
// always return true, since the query param might
// be present only in some requests
return strings.Join(vals, ","), true
}
// request header fields
if strings.HasPrefix(key, reqHeaderReplPrefix) {
field := key[len(reqHeaderReplPrefix):]
vals := req.Header[textproto.CanonicalMIMEHeaderKey(field)]
// always return true, since the header field might
// be present only in some requests
return strings.Join(vals, ","), true
}
// cookies
if strings.HasPrefix(key, reqCookieReplPrefix) {
name := key[len(reqCookieReplPrefix):]
for _, cookie := range req.Cookies() {
if strings.EqualFold(name, cookie.Name) {
// always return true, since the cookie might
// be present only in some requests
return cookie.Value, true
}
}
}
// http.request.tls.*
if strings.HasPrefix(key, reqTLSReplPrefix) {
return getReqTLSReplacement(req, key)
}
switch key {
case "http.request.method":
return req.Method, true
case "http.request.scheme":
if req.TLS != nil {
return "https", true
}
return "http", true
case "http.request.proto":
return req.Proto, true
case "http.request.host":
host, _, err := net.SplitHostPort(req.Host)
if err != nil {
return req.Host, true // OK; there probably was no port
}
return host, true
case "http.request.port":
_, port, _ := net.SplitHostPort(req.Host)
if portNum, err := strconv.Atoi(port); err == nil {
return portNum, true
}
return port, true
case "http.request.hostport":
return req.Host, true
case "http.request.local":
localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
return localAddr.String(), true
case "http.request.local.host":
localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
host, _, err := net.SplitHostPort(localAddr.String())
if err != nil {
// localAddr is host:port for tcp and udp sockets and /unix/socket.path
// for unix sockets. net.SplitHostPort only operates on tcp and udp sockets,
// not unix sockets and will fail with the latter.
// We assume when net.SplitHostPort fails, localAddr is a unix socket and thus
// already "split" and save to return.
return localAddr, true
}
return host, true
case "http.request.local.port":
localAddr, _ := req.Context().Value(http.LocalAddrContextKey).(net.Addr)
_, port, _ := net.SplitHostPort(localAddr.String())
if portNum, err := strconv.Atoi(port); err == nil {
return portNum, true
}
return port, true
case "http.request.remote":
if req.TLS != nil && !req.TLS.HandshakeComplete {
// without a complete handshake (QUIC "early data") we can't trust the remote IP address to not be spoofed
return nil, true
}
return req.RemoteAddr, true
case "http.request.remote.host":
if req.TLS != nil && !req.TLS.HandshakeComplete {
// without a complete handshake (QUIC "early data") we can't trust the remote IP address to not be spoofed
return nil, true
}
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
// req.RemoteAddr is host:port for tcp and udp sockets and /unix/socket.path
// for unix sockets. net.SplitHostPort only operates on tcp and udp sockets,
// not unix sockets and will fail with the latter.
// We assume when net.SplitHostPort fails, req.RemoteAddr is a unix socket
// and thus already "split" and save to return.
return req.RemoteAddr, true
}
return host, true
case "http.request.remote.port":
_, port, _ := net.SplitHostPort(req.RemoteAddr)
if portNum, err := strconv.Atoi(port); err == nil {
return portNum, true
}
return port, true
// current URI, including any internal rewrites
case "http.request.uri":
return req.URL.RequestURI(), true
case "http.request.uri.path":
return req.URL.Path, true
case "http.request.uri.path.file":
_, file := path.Split(req.URL.Path)
return file, true
case "http.request.uri.path.dir":
dir, _ := path.Split(req.URL.Path)
return dir, true
case "http.request.uri.path.file.base":
return strings.TrimSuffix(path.Base(req.URL.Path), path.Ext(req.URL.Path)), true
case "http.request.uri.path.file.ext":
return path.Ext(req.URL.Path), true
case "http.request.uri.query":
return req.URL.RawQuery, true
case "http.request.duration":
start := GetVar(req.Context(), "start_time").(time.Time)
return time.Since(start), true
case "http.request.duration_ms":
start := GetVar(req.Context(), "start_time").(time.Time)
return time.Since(start).Seconds() * 1e3, true // multiply seconds to preserve decimal (see #4666)
case "http.request.uuid":
// fetch the UUID for this request
id := GetVar(req.Context(), "uuid").(*requestID)
// set it to this request's access log
extra := req.Context().Value(ExtraLogFieldsCtxKey).(*ExtraLogFields)
extra.Set(zap.String("uuid", id.String()))
return id.String(), true
case "http.request.body":
if req.Body == nil {
return "", true
}
// normally net/http will close the body for us, but since we
// are replacing it with a fake one, we have to ensure we close
// the real body ourselves when we're done
defer req.Body.Close()
// read the request body into a buffer (can't pool because we
// don't know its lifetime and would have to make a copy anyway)
buf := new(bytes.Buffer)
_, _ = io.Copy(buf, req.Body) // can't handle error, so just ignore it
req.Body = io.NopCloser(buf) // replace real body with buffered data
return buf.String(), true
// original request, before any internal changes
case "http.request.orig_method":
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
return or.Method, true
case "http.request.orig_uri":
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
return or.RequestURI, true
case "http.request.orig_uri.path":
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
return or.URL.Path, true
case "http.request.orig_uri.path.file":
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
_, file := path.Split(or.URL.Path)
return file, true
case "http.request.orig_uri.path.dir":
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
dir, _ := path.Split(or.URL.Path)
return dir, true
case "http.request.orig_uri.query":
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
return or.URL.RawQuery, true
}
// remote IP range/prefix (e.g. keep top 24 bits of 1.2.3.4 => "1.2.3.0/24")
// syntax: "/V4,V6" where V4 = IPv4 bits, and V6 = IPv6 bits; if no comma, then same bit length used for both
// (EXPERIMENTAL)
if strings.HasPrefix(key, "http.request.remote.host/") {
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
host = req.RemoteAddr // assume no port, I guess?
}
addr, err := netip.ParseAddr(host)
if err != nil {
return host, true // not an IP address
}
// extract the bits from the end of the placeholder (start after "/") then split on ","
bitsBoth := key[strings.Index(key, "/")+1:]
ipv4BitsStr, ipv6BitsStr, cutOK := strings.Cut(bitsBoth, ",")
bitsStr := ipv4BitsStr
if addr.Is6() && cutOK {
bitsStr = ipv6BitsStr
}
// convert to integer then compute prefix
bits, err := strconv.Atoi(bitsStr)
if err != nil {
return "", true
}
prefix, err := addr.Prefix(bits)
if err != nil {
return "", true
}
return prefix.String(), true
}
// hostname labels
if strings.HasPrefix(key, reqHostLabelsReplPrefix) {
idxStr := key[len(reqHostLabelsReplPrefix):]
idx, err := strconv.Atoi(idxStr)
if err != nil || idx < 0 {
return "", false
}
reqHost, _, err := net.SplitHostPort(req.Host)
if err != nil {
reqHost = req.Host // OK; assume there was no port
}
hostLabels := strings.Split(reqHost, ".")
if idx >= len(hostLabels) {
return "", true
}
return hostLabels[len(hostLabels)-idx-1], true
}
// path parts
if strings.HasPrefix(key, reqURIPathReplPrefix) {
idxStr := key[len(reqURIPathReplPrefix):]
idx, err := strconv.Atoi(idxStr)
if err != nil {
return "", false
}
pathParts := strings.Split(req.URL.Path, "/")
if len(pathParts) > 0 && pathParts[0] == "" {
pathParts = pathParts[1:]
}
if idx < 0 {
return "", false
}
if idx >= len(pathParts) {
return "", true
}
return pathParts[idx], true
}
// orig uri path parts
if strings.HasPrefix(key, reqOrigURIPathReplPrefix) {
idxStr := key[len(reqOrigURIPathReplPrefix):]
idx, err := strconv.Atoi(idxStr)
if err != nil {
return "", false
}
or, _ := req.Context().Value(OriginalRequestCtxKey).(http.Request)
pathParts := strings.Split(or.URL.Path, "/")
if len(pathParts) > 0 && pathParts[0] == "" {
pathParts = pathParts[1:]
}
if idx < 0 {
return "", false
}
if idx >= len(pathParts) {
return "", true
}
return pathParts[idx], true
}
// middleware variables
if strings.HasPrefix(key, varsReplPrefix) {
varName := key[len(varsReplPrefix):]
raw := GetVar(req.Context(), varName)
// variables can be dynamic, so always return true
// even when it may not be set; treat as empty then
return raw, true
}
}
if w != nil {
// response header fields
if strings.HasPrefix(key, respHeaderReplPrefix) {
field := key[len(respHeaderReplPrefix):]
vals := w.Header()[textproto.CanonicalMIMEHeaderKey(field)]
// always return true, since the header field might
// be present only in some responses
return strings.Join(vals, ","), true
}
}
switch {
case key == "http.shutting_down":
server := req.Context().Value(ServerCtxKey).(*Server)
server.shutdownAtMu.RLock()
defer server.shutdownAtMu.RUnlock()
return !server.shutdownAt.IsZero(), true
case key == "http.time_until_shutdown":
server := req.Context().Value(ServerCtxKey).(*Server)
server.shutdownAtMu.RLock()
defer server.shutdownAtMu.RUnlock()
if server.shutdownAt.IsZero() {
return nil, true
}
return time.Until(server.shutdownAt), true
}
return nil, false
}
repl.Map(httpVars)
}
func getReqTLSReplacement(req *http.Request, key string) (any, bool) {
if req == nil || req.TLS == nil {
return nil, false
}
if len(key) < len(reqTLSReplPrefix) {
return nil, false
}
field := strings.ToLower(key[len(reqTLSReplPrefix):])
if strings.HasPrefix(field, "client.") {
cert := getTLSPeerCert(req.TLS)
if cert == nil {
return nil, false
}
// subject alternate names (SANs)
if strings.HasPrefix(field, "client.san.") {
field = field[len("client.san."):]
var fieldName string
var fieldValue any
switch {
case strings.HasPrefix(field, "dns_names"):
fieldName = "dns_names"
fieldValue = cert.DNSNames
case strings.HasPrefix(field, "emails"):
fieldName = "emails"
fieldValue = cert.EmailAddresses
case strings.HasPrefix(field, "ips"):
fieldName = "ips"
fieldValue = cert.IPAddresses
case strings.HasPrefix(field, "uris"):
fieldName = "uris"
fieldValue = cert.URIs
default:
return nil, false
}
field = field[len(fieldName):]
// if no index was specified, return the whole list
if field == "" {
return fieldValue, true
}
if len(field) < 2 || field[0] != '.' {
return nil, false
}
field = field[1:] // trim '.' between field name and index
// get the numeric index
idx, err := strconv.Atoi(field)
if err != nil || idx < 0 {
return nil, false
}
// access the indexed element and return it
switch v := fieldValue.(type) {
case []string:
if idx >= len(v) {
return nil, true
}
return v[idx], true
case []net.IP:
if idx >= len(v) {
return nil, true
}
return v[idx], true
case []*url.URL:
if idx >= len(v) {
return nil, true
}
return v[idx], true
}
}
switch field {
case "client.fingerprint":
return fmt.Sprintf("%x", sha256.Sum256(cert.Raw)), true
case "client.public_key", "client.public_key_sha256":
if cert.PublicKey == nil {
return nil, true
}
pubKeyBytes, err := marshalPublicKey(cert.PublicKey)
if err != nil {
return nil, true
}
if strings.HasSuffix(field, "_sha256") {
return fmt.Sprintf("%x", sha256.Sum256(pubKeyBytes)), true
}
return fmt.Sprintf("%x", pubKeyBytes), true
case "client.issuer":
return cert.Issuer, true
case "client.serial":
return cert.SerialNumber, true
case "client.subject":
return cert.Subject, true
case "client.certificate_pem":
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
return pem.EncodeToMemory(&block), true
case "client.certificate_der_base64":
return base64.StdEncoding.EncodeToString(cert.Raw), true
default:
return nil, false
}
}
switch field {
case "version":
return caddytls.ProtocolName(req.TLS.Version), true
case "cipher_suite":
return tls.CipherSuiteName(req.TLS.CipherSuite), true
case "resumed":
return req.TLS.DidResume, true
case "proto":
return req.TLS.NegotiatedProtocol, true
case "proto_mutual":
// req.TLS.NegotiatedProtocolIsMutual is deprecated - it's always true.
return true, true
case "server_name":
return req.TLS.ServerName, true
}
return nil, false
}
// marshalPublicKey returns the byte encoding of pubKey.
func marshalPublicKey(pubKey any) ([]byte, error) {
switch key := pubKey.(type) {
case *rsa.PublicKey:
return asn1.Marshal(key)
case *ecdsa.PublicKey:
e, err := key.ECDH()
if err != nil {
return nil, err
}
return e.Bytes(), nil
case ed25519.PublicKey:
return key, nil
}
return nil, fmt.Errorf("unrecognized public key type: %T", pubKey)
}
// getTLSPeerCert retrieves the first peer certificate from a TLS session.
// Returns nil if no peer cert is in use.
func getTLSPeerCert(cs *tls.ConnectionState) *x509.Certificate {
if len(cs.PeerCertificates) == 0 {
return nil
}
return cs.PeerCertificates[0]
}
type requestID struct {
value string
}
// Lazy generates UUID string or return cached value if present
func (rid *requestID) String() string {
if rid.value == "" {
if id, err := uuid.NewRandom(); err == nil {
rid.value = id.String()
}
}
return rid.value
}
const (
reqCookieReplPrefix = "http.request.cookie."
reqHeaderReplPrefix = "http.request.header."
reqHostLabelsReplPrefix = "http.request.host.labels."
reqTLSReplPrefix = "http.request.tls."
reqURIPathReplPrefix = "http.request.uri.path."
reqURIQueryReplPrefix = "http.request.uri.query."
respHeaderReplPrefix = "http.response.header."
varsReplPrefix = "http.vars."
reqOrigURIPathReplPrefix = "http.request.orig_uri.path."
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"net/http"
"strconv"
"strings"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
// ResponseMatcher is a type which can determine if an
// HTTP response matches some criteria.
type ResponseMatcher struct {
// If set, one of these status codes would be required.
// A one-digit status can be used to represent all codes
// in that class (e.g. 3 for all 3xx codes).
StatusCode []int `json:"status_code,omitempty"`
// If set, each header specified must be one of the
// specified values, with the same logic used by the
// [request header matcher](/docs/json/apps/http/servers/routes/match/header/).
Headers http.Header `json:"headers,omitempty"`
}
// Match returns true if the given statusCode and hdr match rm.
func (rm ResponseMatcher) Match(statusCode int, hdr http.Header) bool {
if !rm.matchStatusCode(statusCode) {
return false
}
return matchHeaders(hdr, rm.Headers, "", nil)
}
func (rm ResponseMatcher) matchStatusCode(statusCode int) bool {
if rm.StatusCode == nil {
return true
}
for _, code := range rm.StatusCode {
if StatusCodeMatches(statusCode, code) {
return true
}
}
return false
}
// ParseNamedResponseMatcher parses the tokens of a named response matcher.
//
// @name {
// header <field> [<value>]
// status <code...>
// }
//
// Or, single line syntax:
//
// @name [header <field> [<value>]] | [status <code...>]
func ParseNamedResponseMatcher(d *caddyfile.Dispenser, matchers map[string]ResponseMatcher) error {
d.Next() // consume matcher name
definitionName := d.Val()
if _, ok := matchers[definitionName]; ok {
return d.Errf("matcher is defined more than once: %s", definitionName)
}
matcher := ResponseMatcher{}
for nesting := d.Nesting(); d.NextArg() || d.NextBlock(nesting); {
switch d.Val() {
case "header":
if matcher.Headers == nil {
matcher.Headers = http.Header{}
}
// reuse the header request matcher's unmarshaler
headerMatcher := MatchHeader(matcher.Headers)
err := headerMatcher.UnmarshalCaddyfile(d.NewFromNextSegment())
if err != nil {
return err
}
matcher.Headers = http.Header(headerMatcher)
case "status":
if matcher.StatusCode == nil {
matcher.StatusCode = []int{}
}
args := d.RemainingArgs()
if len(args) == 0 {
return d.ArgErr()
}
for _, arg := range args {
if len(arg) == 3 && strings.HasSuffix(arg, "xx") {
arg = arg[:1]
}
statusNum, err := strconv.Atoi(arg)
if err != nil {
return d.Errf("bad status value '%s': %v", arg, err)
}
matcher.StatusCode = append(matcher.StatusCode, statusNum)
}
default:
return d.Errf("unrecognized response matcher %s", d.Val())
}
}
matchers[definitionName] = matcher
return nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"bufio"
"bytes"
"fmt"
"io"
"net"
"net/http"
)
// ResponseWriterWrapper wraps an underlying ResponseWriter and
// promotes its Pusher method as well. To use this type, embed
// a pointer to it within your own struct type that implements
// the http.ResponseWriter interface, then call methods on the
// embedded value.
type ResponseWriterWrapper struct {
http.ResponseWriter
}
// Push implements http.Pusher. It simply calls the underlying
// ResponseWriter's Push method if there is one, or returns
// ErrNotImplemented otherwise.
func (rww *ResponseWriterWrapper) Push(target string, opts *http.PushOptions) error {
if pusher, ok := rww.ResponseWriter.(http.Pusher); ok {
return pusher.Push(target, opts)
}
return ErrNotImplemented
}
// ReadFrom implements io.ReaderFrom. It simply calls io.Copy,
// which uses io.ReaderFrom if available.
func (rww *ResponseWriterWrapper) ReadFrom(r io.Reader) (n int64, err error) {
return io.Copy(rww.ResponseWriter, r)
}
// Unwrap returns the underlying ResponseWriter, necessary for
// http.ResponseController to work correctly.
func (rww *ResponseWriterWrapper) Unwrap() http.ResponseWriter {
return rww.ResponseWriter
}
// ErrNotImplemented is returned when an underlying
// ResponseWriter does not implement the required method.
var ErrNotImplemented = fmt.Errorf("method not implemented")
type responseRecorder struct {
*ResponseWriterWrapper
statusCode int
buf *bytes.Buffer
shouldBuffer ShouldBufferFunc
size int
wroteHeader bool
stream bool
readSize *int
}
// NewResponseRecorder returns a new ResponseRecorder that can be
// used instead of a standard http.ResponseWriter. The recorder is
// useful for middlewares which need to buffer a response and
// potentially process its entire body before actually writing the
// response to the underlying writer. Of course, buffering the entire
// body has a memory overhead, but sometimes there is no way to avoid
// buffering the whole response, hence the existence of this type.
// Still, if at all practical, handlers should strive to stream
// responses by wrapping Write and WriteHeader methods instead of
// buffering whole response bodies.
//
// Buffering is actually optional. The shouldBuffer function will
// be called just before the headers are written. If it returns
// true, the headers and body will be buffered by this recorder
// and not written to the underlying writer; if false, the headers
// will be written immediately and the body will be streamed out
// directly to the underlying writer. If shouldBuffer is nil,
// the response will never be buffered and will always be streamed
// directly to the writer.
//
// You can know if shouldBuffer returned true by calling Buffered().
//
// The provided buffer buf should be obtained from a pool for best
// performance (see the sync.Pool type).
//
// Proper usage of a recorder looks like this:
//
// rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuffer)
// err := next.ServeHTTP(rec, req)
// if err != nil {
// return err
// }
// if !rec.Buffered() {
// return nil
// }
// // process the buffered response here
//
// The header map is not buffered; i.e. the ResponseRecorder's Header()
// method returns the same header map of the underlying ResponseWriter.
// This is a crucial design decision to allow HTTP trailers to be
// flushed properly (https://github.com/caddyserver/caddy/issues/3236).
//
// Once you are ready to write the response, there are two ways you can
// do it. The easier way is to have the recorder do it:
//
// rec.WriteResponse()
//
// This writes the recorded response headers as well as the buffered body.
// Or, you may wish to do it yourself, especially if you manipulated the
// buffered body. First you will need to write the headers with the
// recorded status code, then write the body (this example writes the
// recorder's body buffer, but you might have your own body to write
// instead):
//
// w.WriteHeader(rec.Status())
// io.Copy(w, rec.Buffer())
//
// As a special case, 1xx responses are not buffered nor recorded
// because they are not the final response; they are passed through
// directly to the underlying ResponseWriter.
func NewResponseRecorder(w http.ResponseWriter, buf *bytes.Buffer, shouldBuffer ShouldBufferFunc) ResponseRecorder {
return &responseRecorder{
ResponseWriterWrapper: &ResponseWriterWrapper{ResponseWriter: w},
buf: buf,
shouldBuffer: shouldBuffer,
}
}
// WriteHeader writes the headers with statusCode to the wrapped
// ResponseWriter unless the response is to be buffered instead.
// 1xx responses are never buffered.
func (rr *responseRecorder) WriteHeader(statusCode int) {
if rr.wroteHeader {
return
}
// save statusCode always, in case HTTP middleware upgrades websocket
// connections by manually setting headers and writing status 101
rr.statusCode = statusCode
// 1xx responses aren't final; just informational
if statusCode < 100 || statusCode > 199 {
rr.wroteHeader = true
// decide whether we should buffer the response
if rr.shouldBuffer == nil {
rr.stream = true
} else {
rr.stream = !rr.shouldBuffer(rr.statusCode, rr.ResponseWriterWrapper.Header())
}
}
// if informational or not buffered, immediately write header
if rr.stream || (100 <= statusCode && statusCode <= 199) {
rr.ResponseWriterWrapper.WriteHeader(statusCode)
}
}
func (rr *responseRecorder) Write(data []byte) (int, error) {
rr.WriteHeader(http.StatusOK)
var n int
var err error
if rr.stream {
n, err = rr.ResponseWriterWrapper.Write(data)
} else {
n, err = rr.buf.Write(data)
}
rr.size += n
return n, err
}
func (rr *responseRecorder) ReadFrom(r io.Reader) (int64, error) {
rr.WriteHeader(http.StatusOK)
var n int64
var err error
if rr.stream {
n, err = rr.ResponseWriterWrapper.ReadFrom(r)
} else {
n, err = rr.buf.ReadFrom(r)
}
rr.size += int(n)
return n, err
}
// Status returns the status code that was written, if any.
func (rr *responseRecorder) Status() int {
return rr.statusCode
}
// Size returns the number of bytes written,
// not including the response headers.
func (rr *responseRecorder) Size() int {
return rr.size
}
// Buffer returns the body buffer that rr was created with.
// You should still have your original pointer, though.
func (rr *responseRecorder) Buffer() *bytes.Buffer {
return rr.buf
}
// Buffered returns whether rr has decided to buffer the response.
func (rr *responseRecorder) Buffered() bool {
return !rr.stream
}
func (rr *responseRecorder) WriteResponse() error {
if rr.statusCode == 0 {
// could happen if no handlers actually wrote anything,
// and this prevents a panic; status must be > 0
rr.WriteHeader(http.StatusOK)
}
if rr.stream {
return nil
}
rr.ResponseWriterWrapper.WriteHeader(rr.statusCode)
_, err := io.Copy(rr.ResponseWriterWrapper, rr.buf)
return err
}
// FlushError will suppress actual flushing if the response is buffered. See:
// https://github.com/caddyserver/caddy/issues/6144
func (rr *responseRecorder) FlushError() error {
if rr.stream {
//nolint:bodyclose
return http.NewResponseController(rr.ResponseWriterWrapper).Flush()
}
return nil
}
// Private interface so it can only be used in this package
// #TODO: maybe export it later
func (rr *responseRecorder) setReadSize(size *int) {
rr.readSize = size
}
func (rr *responseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {
//nolint:bodyclose
conn, brw, err := http.NewResponseController(rr.ResponseWriterWrapper).Hijack()
if err != nil {
return nil, nil, err
}
// Per http documentation, returned bufio.Writer is empty, but bufio.Read maybe not
conn = &hijackedConn{conn, rr}
brw.Writer.Reset(conn)
buffered := brw.Reader.Buffered()
if buffered != 0 {
conn.(*hijackedConn).updateReadSize(buffered)
data, _ := brw.Peek(buffered)
brw.Reader.Reset(io.MultiReader(bytes.NewReader(data), conn))
// peek to make buffered data appear, as Reset will make it 0
_, _ = brw.Peek(buffered)
} else {
brw.Reader.Reset(conn)
}
return conn, brw, nil
}
// used to track the size of hijacked response writers
type hijackedConn struct {
net.Conn
rr *responseRecorder
}
func (hc *hijackedConn) updateReadSize(n int) {
if hc.rr.readSize != nil {
*hc.rr.readSize += n
}
}
func (hc *hijackedConn) Read(p []byte) (int, error) {
n, err := hc.Conn.Read(p)
hc.updateReadSize(n)
return n, err
}
func (hc *hijackedConn) WriteTo(w io.Writer) (int64, error) {
n, err := io.Copy(w, hc.Conn)
hc.updateReadSize(int(n))
return n, err
}
func (hc *hijackedConn) Write(p []byte) (int, error) {
n, err := hc.Conn.Write(p)
hc.rr.size += n
return n, err
}
func (hc *hijackedConn) ReadFrom(r io.Reader) (int64, error) {
n, err := io.Copy(hc.Conn, r)
hc.rr.size += int(n)
return n, err
}
// ResponseRecorder is a http.ResponseWriter that records
// responses instead of writing them to the client. See
// docs for NewResponseRecorder for proper usage.
type ResponseRecorder interface {
http.ResponseWriter
Status() int
Buffer() *bytes.Buffer
Buffered() bool
Size() int
WriteResponse() error
}
// ShouldBufferFunc is a function that returns true if the
// response should be buffered, given the pending HTTP status
// code and response headers.
type ShouldBufferFunc func(status int, header http.Header) bool
// Interface guards
var (
_ http.ResponseWriter = (*ResponseWriterWrapper)(nil)
_ ResponseRecorder = (*responseRecorder)(nil)
// Implementing ReaderFrom can be such a significant
// optimization that it should probably be required!
// see PR #5022 (25%-50% speedup)
_ io.ReaderFrom = (*ResponseWriterWrapper)(nil)
_ io.ReaderFrom = (*responseRecorder)(nil)
_ io.ReaderFrom = (*hijackedConn)(nil)
_ io.WriterTo = (*hijackedConn)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"encoding/json"
"fmt"
"net/http"
"github.com/caddyserver/caddy/v2"
)
// Route consists of a set of rules for matching HTTP requests,
// a list of handlers to execute, and optional flow control
// parameters which customize the handling of HTTP requests
// in a highly flexible and performant manner.
type Route struct {
// Group is an optional name for a group to which this
// route belongs. Grouping a route makes it mutually
// exclusive with others in its group; if a route belongs
// to a group, only the first matching route in that group
// will be executed.
Group string `json:"group,omitempty"`
// The matcher sets which will be used to qualify this
// route for a request (essentially the "if" statement
// of this route). Each matcher set is OR'ed, but matchers
// within a set are AND'ed together.
MatcherSetsRaw RawMatcherSets `json:"match,omitempty" caddy:"namespace=http.matchers"`
// The list of handlers for this route. Upon matching a request, they are chained
// together in a middleware fashion: requests flow from the first handler to the last
// (top of the list to the bottom), with the possibility that any handler could stop
// the chain and/or return an error. Responses flow back through the chain (bottom of
// the list to the top) as they are written out to the client.
//
// Not all handlers call the next handler in the chain. For example, the reverse_proxy
// handler always sends a request upstream or returns an error. Thus, configuring
// handlers after reverse_proxy in the same route is illogical, since they would never
// be executed. You will want to put handlers which originate the response at the very
// end of your route(s). The documentation for a module should state whether it invokes
// the next handler, but sometimes it is common sense.
//
// Some handlers manipulate the response. Remember that requests flow down the list, and
// responses flow up the list.
//
// For example, if you wanted to use both `templates` and `encode` handlers, you would
// need to put `templates` after `encode` in your route, because responses flow up.
// Thus, `templates` will be able to parse and execute the plain-text response as a
// template, and then return it up to the `encode` handler which will then compress it
// into a binary format.
//
// If `templates` came before `encode`, then `encode` would write a compressed,
// binary-encoded response to `templates` which would not be able to parse the response
// properly.
//
// The correct order, then, is this:
//
// [
// {"handler": "encode"},
// {"handler": "templates"},
// {"handler": "file_server"}
// ]
//
// The request flows ⬇️ DOWN (`encode` -> `templates` -> `file_server`).
//
// 1. First, `encode` will choose how to `encode` the response and wrap the response.
// 2. Then, `templates` will wrap the response with a buffer.
// 3. Finally, `file_server` will originate the content from a file.
//
// The response flows ⬆️ UP (`file_server` -> `templates` -> `encode`):
//
// 1. First, `file_server` will write the file to the response.
// 2. That write will be buffered and then executed by `templates`.
// 3. Lastly, the write from `templates` will flow into `encode` which will compress the stream.
//
// If you think of routes in this way, it will be easy and even fun to solve the puzzle of writing correct routes.
HandlersRaw []json.RawMessage `json:"handle,omitempty" caddy:"namespace=http.handlers inline_key=handler"`
// If true, no more routes will be executed after this one.
Terminal bool `json:"terminal,omitempty"`
// decoded values
MatcherSets MatcherSets `json:"-"`
Handlers []MiddlewareHandler `json:"-"`
middleware []Middleware
}
// Empty returns true if the route has all zero/default values.
func (r Route) Empty() bool {
return len(r.MatcherSetsRaw) == 0 &&
len(r.MatcherSets) == 0 &&
len(r.HandlersRaw) == 0 &&
len(r.Handlers) == 0 &&
!r.Terminal &&
r.Group == ""
}
func (r Route) String() string {
handlersRaw := "["
for _, hr := range r.HandlersRaw {
handlersRaw += " " + string(hr)
}
handlersRaw += "]"
return fmt.Sprintf(`{Group:"%s" MatcherSetsRaw:%s HandlersRaw:%s Terminal:%t}`,
r.Group, r.MatcherSetsRaw, handlersRaw, r.Terminal)
}
// Provision sets up both the matchers and handlers in the route.
func (r *Route) Provision(ctx caddy.Context, metrics *Metrics) error {
err := r.ProvisionMatchers(ctx)
if err != nil {
return err
}
return r.ProvisionHandlers(ctx, metrics)
}
// ProvisionMatchers sets up all the matchers by loading the
// matcher modules. Only call this method directly if you need
// to set up matchers and handlers separately without having
// to provision a second time; otherwise use Provision instead.
func (r *Route) ProvisionMatchers(ctx caddy.Context) error {
// matchers
matchersIface, err := ctx.LoadModule(r, "MatcherSetsRaw")
if err != nil {
return fmt.Errorf("loading matcher modules: %v", err)
}
err = r.MatcherSets.FromInterface(matchersIface)
if err != nil {
return err
}
return nil
}
// ProvisionHandlers sets up all the handlers by loading the
// handler modules. Only call this method directly if you need
// to set up matchers and handlers separately without having
// to provision a second time; otherwise use Provision instead.
func (r *Route) ProvisionHandlers(ctx caddy.Context, metrics *Metrics) error {
handlersIface, err := ctx.LoadModule(r, "HandlersRaw")
if err != nil {
return fmt.Errorf("loading handler modules: %v", err)
}
for _, handler := range handlersIface.([]any) {
r.Handlers = append(r.Handlers, handler.(MiddlewareHandler))
}
// pre-compile the middleware handler chain
for _, midhandler := range r.Handlers {
r.middleware = append(r.middleware, wrapMiddleware(ctx, midhandler, metrics))
}
return nil
}
// Compile prepares a middleware chain from the route list.
// This should only be done once during the request, just
// before the middleware chain is executed.
func (r Route) Compile(next Handler) Handler {
return wrapRoute(r)(next)
}
// RouteList is a list of server routes that can
// create a middleware chain.
type RouteList []Route
// Provision sets up both the matchers and handlers in the routes.
func (routes RouteList) Provision(ctx caddy.Context) error {
err := routes.ProvisionMatchers(ctx)
if err != nil {
return err
}
return routes.ProvisionHandlers(ctx, nil)
}
// ProvisionMatchers sets up all the matchers by loading the
// matcher modules. Only call this method directly if you need
// to set up matchers and handlers separately without having
// to provision a second time; otherwise use Provision instead.
func (routes RouteList) ProvisionMatchers(ctx caddy.Context) error {
for i := range routes {
err := routes[i].ProvisionMatchers(ctx)
if err != nil {
return fmt.Errorf("route %d: %v", i, err)
}
}
return nil
}
// ProvisionHandlers sets up all the handlers by loading the
// handler modules. Only call this method directly if you need
// to set up matchers and handlers separately without having
// to provision a second time; otherwise use Provision instead.
func (routes RouteList) ProvisionHandlers(ctx caddy.Context, metrics *Metrics) error {
for i := range routes {
err := routes[i].ProvisionHandlers(ctx, metrics)
if err != nil {
return fmt.Errorf("route %d: %v", i, err)
}
}
return nil
}
// Compile prepares a middleware chain from the route list.
// This should only be done either once during provisioning
// for top-level routes, or on each request just before the
// middleware chain is executed for subroutes.
func (routes RouteList) Compile(next Handler) Handler {
mid := make([]Middleware, 0, len(routes))
for _, route := range routes {
mid = append(mid, wrapRoute(route))
}
stack := next
for i := len(mid) - 1; i >= 0; i-- {
stack = mid[i](stack)
}
return stack
}
// wrapRoute wraps route with a middleware and handler so that it can
// be chained in and defer evaluation of its matchers to request-time.
// Like wrapMiddleware, it is vital that this wrapping takes place in
// its own stack frame so as to not overwrite the reference to the
// intended route by looping and changing the reference each time.
func wrapRoute(route Route) Middleware {
return func(next Handler) Handler {
return HandlerFunc(func(rw http.ResponseWriter, req *http.Request) error {
// TODO: Update this comment, it seems we've moved the copy into the handler?
// copy the next handler (it's an interface, so it's just
// a very lightweight copy of a pointer); this is important
// because this is a closure to the func below, which
// re-assigns the value as it compiles the middleware stack;
// if we don't make this copy, we'd affect the underlying
// pointer for all future request (yikes); we could
// alternatively solve this by moving the func below out of
// this closure and into a standalone package-level func,
// but I just thought this made more sense
nextCopy := next
// route must match at least one of the matcher sets
if !route.MatcherSets.AnyMatch(req) {
// allow matchers the opportunity to short circuit
// the request and trigger the error handling chain
err, ok := GetVar(req.Context(), MatcherErrorVarKey).(error)
if ok {
// clear out the error from context, otherwise
// it will cascade to the error routes (#4916)
SetVar(req.Context(), MatcherErrorVarKey, nil)
// return the matcher's error
return err
}
// call the next handler, and skip this one,
// since the matcher didn't match
return nextCopy.ServeHTTP(rw, req)
}
// if route is part of a group, ensure only the
// first matching route in the group is applied
if route.Group != "" {
groups := req.Context().Value(routeGroupCtxKey).(map[string]struct{})
if _, ok := groups[route.Group]; ok {
// this group has already been
// satisfied by a matching route
return nextCopy.ServeHTTP(rw, req)
}
// this matching route satisfies the group
groups[route.Group] = struct{}{}
}
// make terminal routes terminate
if route.Terminal {
if _, ok := req.Context().Value(ErrorCtxKey).(error); ok {
nextCopy = errorEmptyHandler
} else {
nextCopy = emptyHandler
}
}
// compile this route's handler stack
for i := len(route.middleware) - 1; i >= 0; i-- {
nextCopy = route.middleware[i](nextCopy)
}
return nextCopy.ServeHTTP(rw, req)
})
}
}
// wrapMiddleware wraps mh such that it can be correctly
// appended to a list of middleware in preparation for
// compiling into a handler chain. We can't do this inline
// inside a loop, because it relies on a reference to mh
// not changing until the execution of its handler (which
// is deferred by multiple func closures). In other words,
// we need to pull this particular MiddlewareHandler
// pointer into its own stack frame to preserve it so it
// won't be overwritten in future loop iterations.
func wrapMiddleware(_ caddy.Context, mh MiddlewareHandler, metrics *Metrics) Middleware {
handlerToUse := mh
if metrics != nil {
// wrap the middleware with metrics instrumentation
handlerToUse = newMetricsInstrumentedHandler(caddy.GetModuleName(mh), mh)
}
return func(next Handler) Handler {
// copy the next handler (it's an interface, so it's
// just a very lightweight copy of a pointer); this
// is a safeguard against the handler changing the
// value, which could affect future requests (yikes)
nextCopy := next
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
// EXPERIMENTAL: Trace each module that gets invoked
if server, ok := r.Context().Value(ServerCtxKey).(*Server); ok && server != nil {
server.logTrace(handlerToUse)
}
return handlerToUse.ServeHTTP(w, r, nextCopy)
})
}
}
// MatcherSet is a set of matchers which
// must all match in order for the request
// to be matched successfully.
type MatcherSet []RequestMatcher
// Match returns true if the request matches all
// matchers in mset or if there are no matchers.
func (mset MatcherSet) Match(r *http.Request) bool {
for _, m := range mset {
if !m.Match(r) {
return false
}
}
return true
}
// RawMatcherSets is a group of matcher sets
// in their raw, JSON form.
type RawMatcherSets []caddy.ModuleMap
// MatcherSets is a group of matcher sets capable
// of checking whether a request matches any of
// the sets.
type MatcherSets []MatcherSet
// AnyMatch returns true if req matches any of the
// matcher sets in ms or if there are no matchers,
// in which case the request always matches.
func (ms MatcherSets) AnyMatch(req *http.Request) bool {
for _, m := range ms {
if m.Match(req) {
return true
}
}
return len(ms) == 0
}
// FromInterface fills ms from an 'any' value obtained from LoadModule.
func (ms *MatcherSets) FromInterface(matcherSets any) error {
for _, matcherSetIfaces := range matcherSets.([]map[string]any) {
var matcherSet MatcherSet
for _, matcher := range matcherSetIfaces {
reqMatcher, ok := matcher.(RequestMatcher)
if !ok {
return fmt.Errorf("decoded module is not a RequestMatcher: %#v", matcher)
}
matcherSet = append(matcherSet, reqMatcher)
}
*ms = append(*ms, matcherSet)
}
return nil
}
// TODO: Is this used?
func (ms MatcherSets) String() string {
result := "["
for _, matcherSet := range ms {
for _, matcher := range matcherSet {
result += fmt.Sprintf(" %#v", matcher)
}
}
return result + " ]"
}
var routeGroupCtxKey = caddy.CtxKey("route_group")
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/netip"
"net/url"
"runtime"
"strings"
"sync"
"time"
"github.com/caddyserver/certmagic"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyevents"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
// Server describes an HTTP server.
type Server struct {
// Socket addresses to which to bind listeners. Accepts
// [network addresses](/docs/conventions#network-addresses)
// that may include port ranges. Listener addresses must
// be unique; they cannot be repeated across all defined
// servers.
Listen []string `json:"listen,omitempty"`
// A list of listener wrapper modules, which can modify the behavior
// of the base listener. They are applied in the given order.
ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
// How long to allow a read from a client's upload. Setting this
// to a short, non-zero value can mitigate slowloris attacks, but
// may also affect legitimately slow clients.
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
// ReadHeaderTimeout is like ReadTimeout but for request headers.
ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
// WriteTimeout is how long to allow a write to a client. Note
// that setting this to a small value when serving large files
// may negatively affect legitimately slow clients.
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
// IdleTimeout is the maximum time to wait for the next request
// when keep-alives are enabled. If zero, a default timeout of
// 5m is applied to help avoid resource exhaustion.
IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
// KeepAliveInterval is the interval at which TCP keepalive packets
// are sent to keep the connection alive at the TCP layer when no other
// data is being transmitted. The default is 15s.
KeepAliveInterval caddy.Duration `json:"keepalive_interval,omitempty"`
// MaxHeaderBytes is the maximum size to parse from a client's
// HTTP request headers.
MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
// Enable full-duplex communication for HTTP/1 requests.
// Only has an effect if Caddy was built with Go 1.21 or later.
//
// For HTTP/1 requests, the Go HTTP server by default consumes any
// unread portion of the request body before beginning to write the
// response, preventing handlers from concurrently reading from the
// request and writing the response. Enabling this option disables
// this behavior and permits handlers to continue to read from the
// request while concurrently writing the response.
//
// For HTTP/2 requests, the Go HTTP server always permits concurrent
// reads and responses, so this option has no effect.
//
// Test thoroughly with your HTTP clients, as some older clients may
// not support full-duplex HTTP/1 which can cause them to deadlock.
// See https://github.com/golang/go/issues/57786 for more info.
//
// TODO: This is an EXPERIMENTAL feature. Subject to change or removal.
EnableFullDuplex bool `json:"enable_full_duplex,omitempty"`
// Routes describes how this server will handle requests.
// Routes are executed sequentially. First a route's matchers
// are evaluated, then its grouping. If it matches and has
// not been mutually-excluded by its grouping, then its
// handlers are executed sequentially. The sequence of invoked
// handlers comprises a compiled middleware chain that flows
// from each matching route and its handlers to the next.
//
// By default, all unrouted requests receive a 200 OK response
// to indicate the server is working.
Routes RouteList `json:"routes,omitempty"`
// Errors is how this server will handle errors returned from any
// of the handlers in the primary routes. If the primary handler
// chain returns an error, the error along with its recommended
// status code are bubbled back up to the HTTP server which
// executes a separate error route, specified using this property.
// The error routes work exactly like the normal routes.
Errors *HTTPErrorConfig `json:"errors,omitempty"`
// NamedRoutes describes a mapping of reusable routes that can be
// invoked by their name. This can be used to optimize memory usage
// when the same route is needed for many subroutes, by having
// the handlers and matchers be only provisioned once, but used from
// many places. These routes are not executed unless they are invoked
// from another route.
//
// EXPERIMENTAL: Subject to change or removal.
NamedRoutes map[string]*Route `json:"named_routes,omitempty"`
// How to handle TLS connections. At least one policy is
// required to enable HTTPS on this server if automatic
// HTTPS is disabled or does not apply.
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
// AutoHTTPS configures or disables automatic HTTPS within this server.
// HTTPS is enabled automatically and by default when qualifying names
// are present in a Host matcher and/or when the server is listening
// only on the HTTPS port.
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
// If true, will require that a request's Host header match
// the value of the ServerName sent by the client's TLS
// ClientHello; often a necessary safeguard when using TLS
// client authentication.
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
// A module which provides a source of IP ranges, from which
// requests should be trusted. By default, no proxies are
// trusted.
//
// On its own, this configuration will not do anything,
// but it can be used as a default set of ranges for
// handlers or matchers in routes to pick up, instead
// of needing to configure each of them. See the
// `reverse_proxy` handler for example, which uses this
// to trust sensitive incoming `X-Forwarded-*` headers.
TrustedProxiesRaw json.RawMessage `json:"trusted_proxies,omitempty" caddy:"namespace=http.ip_sources inline_key=source"`
// The headers from which the client IP address could be
// read from. These will be considered in order, with the
// first good value being used as the client IP.
// By default, only `X-Forwarded-For` is considered.
//
// This depends on `trusted_proxies` being configured and
// the request being validated as coming from a trusted
// proxy, otherwise the client IP will be set to the direct
// remote IP address.
ClientIPHeaders []string `json:"client_ip_headers,omitempty"`
// If greater than zero, enables strict ClientIPHeaders
// (default X-Forwarded-For) parsing. If enabled, the
// ClientIPHeaders will be parsed from right to left, and
// the first value that is both valid and doesn't match the
// trusted proxy list will be used as client IP. If zero,
// the ClientIPHeaders will be parsed from left to right,
// and the first value that is a valid IP address will be
// used as client IP.
//
// This depends on `trusted_proxies` being configured.
// This option is disabled by default.
TrustedProxiesStrict int `json:"trusted_proxies_strict,omitempty"`
// Enables access logging and configures how access logs are handled
// in this server. To minimally enable access logs, simply set this
// to a non-null, empty struct.
Logs *ServerLogConfig `json:"logs,omitempty"`
// Protocols specifies which HTTP protocols to enable.
// Supported values are:
//
// - `h1` (HTTP/1.1)
// - `h2` (HTTP/2)
// - `h2c` (cleartext HTTP/2)
// - `h3` (HTTP/3)
//
// If enabling `h2` or `h2c`, `h1` must also be enabled;
// this is due to current limitations in the Go standard
// library.
//
// HTTP/2 operates only over TLS (HTTPS). HTTP/3 opens
// a UDP socket to serve QUIC connections.
//
// H2C operates over plain TCP if the client supports it;
// however, because this is not implemented by the Go
// standard library, other server options are not compatible
// and will not be applied to H2C requests. Do not enable this
// only to achieve maximum client compatibility. In practice,
// very few clients implement H2C, and even fewer require it.
// Enabling H2C can be useful for serving/proxying gRPC
// if encryption is not possible or desired.
//
// We recommend for most users to simply let Caddy use the
// default settings.
//
// Default: `[h1 h2 h3]`
Protocols []string `json:"protocols,omitempty"`
// If set, metrics observations will be enabled.
// This setting is EXPERIMENTAL and subject to change.
Metrics *Metrics `json:"metrics,omitempty"`
name string
primaryHandlerChain Handler
errorHandlerChain Handler
listenerWrappers []caddy.ListenerWrapper
listeners []net.Listener
tlsApp *caddytls.TLS
events *caddyevents.App
logger *zap.Logger
accessLogger *zap.Logger
errorLogger *zap.Logger
traceLogger *zap.Logger
ctx caddy.Context
server *http.Server
h3server *http3.Server
h2listeners []*http2Listener
addresses []caddy.NetworkAddress
trustedProxies IPRangeSource
shutdownAt time.Time
shutdownAtMu *sync.RWMutex
// registered callback functions
connStateFuncs []func(net.Conn, http.ConnState)
connContextFuncs []func(ctx context.Context, c net.Conn) context.Context
onShutdownFuncs []func()
onStopFuncs []func(context.Context) error // TODO: Experimental (Nov. 2023)
}
// ServeHTTP is the entry point for all HTTP requests.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// If there are listener wrappers that process tls connections but don't return a *tls.Conn, this field will be nil.
// TODO: Can be removed if https://github.com/golang/go/pull/56110 is ever merged.
if r.TLS == nil {
// not all requests have a conn (like virtual requests) - see #5698
if conn, ok := r.Context().Value(ConnCtxKey).(net.Conn); ok {
if csc, ok := conn.(connectionStateConn); ok {
r.TLS = new(tls.ConnectionState)
*r.TLS = csc.ConnectionState()
}
}
}
w.Header().Set("Server", "Caddy")
// advertise HTTP/3, if enabled
if s.h3server != nil {
if r.ProtoMajor < 3 {
err := s.h3server.SetQUICHeaders(w.Header())
if err != nil {
s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
}
}
}
// reject very long methods; probably a mistake or an attack
if len(r.Method) > 32 {
if s.shouldLogRequest(r) {
s.accessLogger.Debug("rejecting request with long method",
zap.String("method_trunc", r.Method[:32]),
zap.String("remote_addr", r.RemoteAddr))
}
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
repl := caddy.NewReplacer()
r = PrepareRequest(r, repl, w, s)
// enable full-duplex for HTTP/1, ensuring the entire
// request body gets consumed before writing the response
if s.EnableFullDuplex && r.ProtoMajor == 1 {
//nolint:bodyclose
err := http.NewResponseController(w).EnableFullDuplex()
if err != nil {
s.logger.Warn("failed to enable full duplex", zap.Error(err))
}
}
// encode the request for logging purposes before
// it enters any handler chain; this is necessary
// to capture the original request in case it gets
// modified during handling
shouldLogCredentials := s.Logs != nil && s.Logs.ShouldLogCredentials
loggableReq := zap.Object("request", LoggableHTTPRequest{
Request: r,
ShouldLogCredentials: shouldLogCredentials,
})
errLog := s.errorLogger.With(loggableReq)
var duration time.Duration
if s.shouldLogRequest(r) {
wrec := NewResponseRecorder(w, nil, nil)
w = wrec
// wrap the request body in a LengthReader
// so we can track the number of bytes read from it
var bodyReader *lengthReader
if r.Body != nil {
bodyReader = &lengthReader{Source: r.Body}
r.Body = bodyReader
// should always be true, private interface can only be referenced in the same package
if setReadSizer, ok := wrec.(interface{ setReadSize(*int) }); ok {
setReadSizer.setReadSize(&bodyReader.Length)
}
}
// capture the original version of the request
accLog := s.accessLogger.With(loggableReq)
defer s.logRequest(accLog, r, wrec, &duration, repl, bodyReader, shouldLogCredentials)
}
start := time.Now()
// guarantee ACME HTTP challenges; handle them
// separately from any user-defined handlers
if s.tlsApp.HandleHTTPChallenge(w, r) {
duration = time.Since(start)
return
}
// execute the primary handler chain
err := s.primaryHandlerChain.ServeHTTP(w, r)
duration = time.Since(start)
// if no errors, we're done!
if err == nil {
return
}
// restore original request before invoking error handler chain (issue #3717)
// TODO: this does not restore original headers, if modified (for efficiency)
origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request)
r.Method = origReq.Method
r.RemoteAddr = origReq.RemoteAddr
r.RequestURI = origReq.RequestURI
cloneURL(origReq.URL, r.URL)
// prepare the error log
errLog = errLog.With(zap.Duration("duration", duration))
errLoggers := []*zap.Logger{errLog}
if s.Logs != nil {
errLoggers = s.Logs.wrapLogger(errLog, r)
}
// get the values that will be used to log the error
errStatus, errMsg, errFields := errLogValues(err)
// add HTTP error information to request context
r = s.Errors.WithError(r, err)
if s.Errors != nil && len(s.Errors.Routes) > 0 {
// execute user-defined error handling route
err2 := s.errorHandlerChain.ServeHTTP(w, r)
if err2 == nil {
// user's error route handled the error response
// successfully, so now just log the error
for _, logger := range errLoggers {
logger.Debug(errMsg, errFields...)
}
} else {
// well... this is awkward
errFields = append([]zapcore.Field{
zap.String("error", err2.Error()),
zap.Namespace("first_error"),
zap.String("msg", errMsg),
}, errFields...)
for _, logger := range errLoggers {
logger.Error("error handling handler error", errFields...)
}
if handlerErr, ok := err.(HandlerError); ok {
w.WriteHeader(handlerErr.StatusCode)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
}
} else {
for _, logger := range errLoggers {
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
} else {
logger.Debug(errMsg, errFields...)
}
}
w.WriteHeader(errStatus)
}
}
// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
// in s.enforcementHandler which performs crucial security checks, etc.
func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
return s.enforcementHandler(w, r, stack)
})
}
// enforcementHandler is an implicit middleware which performs
// standard checks before executing the HTTP middleware chain.
func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
// enforce strict host matching, which ensures that the SNI
// value (if any), matches the Host header; essential for
// servers that rely on TLS ClientAuth sharing a listener
// with servers that do not; if not enforced, client could
// bypass by sending benign SNI then restricted Host header
if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
hostname, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostname = r.Host // OK; probably lacked port
}
if !strings.EqualFold(r.TLS.ServerName, hostname) {
err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
r.TLS.ServerName, hostname)
r.Close = true
return Error(http.StatusMisdirectedRequest, err)
}
}
return next.ServeHTTP(w, r)
}
// listenersUseAnyPortOtherThan returns true if there are any
// listeners in s that use a port which is not otherPort.
func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
for _, lnAddr := range s.Listen {
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
return true
}
}
return false
}
// hasListenerAddress returns true if s has a listener
// at the given address fullAddr. Currently, fullAddr
// must represent exactly one socket address (port
// ranges are not supported)
func (s *Server) hasListenerAddress(fullAddr string) bool {
laddrs, err := caddy.ParseNetworkAddress(fullAddr)
if err != nil {
return false
}
if laddrs.PortRangeSize() != 1 {
return false // TODO: support port ranges
}
for _, lnAddr := range s.Listen {
thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if thisAddrs.Network != laddrs.Network {
continue
}
// Apparently, Linux requires all bound ports to be distinct
// *regardless of host interface* even if the addresses are
// in fact different; binding "192.168.0.1:9000" and then
// ":9000" will fail for ":9000" because "address is already
// in use" even though it's not, and the same bindings work
// fine on macOS. I also found on Linux that listening on
// "[::]:9000" would fail with a similar error, except with
// the address "0.0.0.0:9000", as if deliberately ignoring
// that I specified the IPv6 interface explicitly. This seems
// to be a major bug in the Linux network stack and I don't
// know why it hasn't been fixed yet, so for now we have to
// special-case ourselves around Linux like a doting parent.
// The second issue seems very similar to a discussion here:
// https://github.com/nodejs/node/issues/9390
//
// This is very easy to reproduce by creating an HTTP server
// that listens to both addresses or just one with a host
// interface; or for a more confusing reproduction, try
// listening on "127.0.0.1:80" and ":443" and you'll see
// the error, if you take away the GOOS condition below.
//
// So, an address is equivalent if the port is in the port
// range, and if not on Linux, the host is the same... sigh.
if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
(laddrs.StartPort <= thisAddrs.EndPort) &&
(laddrs.StartPort >= thisAddrs.StartPort) {
return true
}
}
return false
}
func (s *Server) hasTLSClientAuth() bool {
for _, cp := range s.TLSConnPolicies {
if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
return true
}
}
return false
}
// findLastRouteWithHostMatcher returns the index of the last route
// in the server which has a host matcher. Used during Automatic HTTPS
// to determine where to insert the HTTP->HTTPS redirect route, such
// that it is after any other host matcher but before any "catch-all"
// route without a host matcher.
func (s *Server) findLastRouteWithHostMatcher() int {
foundHostMatcher := false
lastIndex := len(s.Routes)
for i, route := range s.Routes {
// since we want to break out of an inner loop, use a closure
// to allow us to use 'return' when we found a host matcher
found := (func() bool {
for _, sets := range route.MatcherSets {
for _, matcher := range sets {
switch matcher.(type) {
case *MatchHost:
foundHostMatcher = true
return true
}
}
}
return false
})()
// if we found the host matcher, change the lastIndex to
// just after the current route
if found {
lastIndex = i + 1
}
}
// If we didn't actually find a host matcher, return 0
// because that means every defined route was a "catch-all".
// See https://caddy.community/t/how-to-set-priority-in-caddyfile/13002/8
if !foundHostMatcher {
return 0
}
return lastIndex
}
// serveHTTP3 creates a QUIC listener, configures an HTTP/3 server if
// not already done, and then uses that server to serve HTTP/3 over
// the listener, with Server s as the handler.
func (s *Server) serveHTTP3(addr caddy.NetworkAddress, tlsCfg *tls.Config) error {
addr.Network = getHTTP3Network(addr.Network)
h3ln, err := addr.ListenQUIC(s.ctx, 0, net.ListenConfig{}, tlsCfg)
if err != nil {
return fmt.Errorf("starting HTTP/3 QUIC listener: %v", err)
}
// create HTTP/3 server if not done already
if s.h3server == nil {
s.h3server = &http3.Server{
// Currently when closing a http3.Server, only listeners are closed. But caddy reuses these listeners
// if possible, requests are still read and handled by the old handler. Close these connections manually.
// see issue: https://github.com/caddyserver/caddy/issues/6195
// Will interrupt ongoing requests.
// TODO: remove the handler wrap after http3.Server.CloseGracefully is implemented, see App.Stop
Handler: http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
select {
case <-s.ctx.Done():
if quicConn, ok := request.Context().Value(quicConnCtxKey).(quic.Connection); ok {
//nolint:errcheck
quicConn.CloseWithError(quic.ApplicationErrorCode(http3.ErrCodeRequestRejected), "")
}
default:
s.ServeHTTP(writer, request)
}
}),
TLSConfig: tlsCfg,
MaxHeaderBytes: s.MaxHeaderBytes,
// TODO: remove this config when draft versions are no longer supported (we have no need to support drafts)
QUICConfig: &quic.Config{
Versions: []quic.Version{quic.Version1, quic.Version2},
},
ConnContext: func(ctx context.Context, c quic.Connection) context.Context {
return context.WithValue(ctx, quicConnCtxKey, c)
},
}
}
//nolint:errcheck
go s.h3server.ServeListener(h3ln)
return nil
}
// configureServer applies/binds the registered callback functions to the server.
func (s *Server) configureServer(server *http.Server) {
for _, f := range s.connStateFuncs {
if server.ConnState != nil {
baseConnStateFunc := server.ConnState
server.ConnState = func(conn net.Conn, state http.ConnState) {
baseConnStateFunc(conn, state)
f(conn, state)
}
} else {
server.ConnState = f
}
}
for _, f := range s.connContextFuncs {
if server.ConnContext != nil {
baseConnContextFunc := server.ConnContext
server.ConnContext = func(ctx context.Context, c net.Conn) context.Context {
return f(baseConnContextFunc(ctx, c), c)
}
} else {
server.ConnContext = f
}
}
for _, f := range s.onShutdownFuncs {
server.RegisterOnShutdown(f)
}
}
// RegisterConnState registers f to be invoked on s.ConnState.
func (s *Server) RegisterConnState(f func(net.Conn, http.ConnState)) {
s.connStateFuncs = append(s.connStateFuncs, f)
}
// RegisterConnContext registers f to be invoked as part of s.ConnContext.
func (s *Server) RegisterConnContext(f func(ctx context.Context, c net.Conn) context.Context) {
s.connContextFuncs = append(s.connContextFuncs, f)
}
// RegisterOnShutdown registers f to be invoked when the server begins to shut down.
func (s *Server) RegisterOnShutdown(f func()) {
s.onShutdownFuncs = append(s.onShutdownFuncs, f)
}
// RegisterOnStop registers f to be invoked after the server has shut down completely.
//
// EXPERIMENTAL: Subject to change or removal.
func (s *Server) RegisterOnStop(f func(context.Context) error) {
s.onStopFuncs = append(s.onStopFuncs, f)
}
// HTTPErrorConfig determines how to handle errors
// from the HTTP handlers.
type HTTPErrorConfig struct {
// The routes to evaluate after the primary handler
// chain returns an error. In an error route, extra
// placeholders are available:
//
// Placeholder | Description
// ------------|---------------
// `{http.error.status_code}` | The recommended HTTP status code
// `{http.error.status_text}` | The status text associated with the recommended status code
// `{http.error.message}` | The error message
// `{http.error.trace}` | The origin of the error
// `{http.error.id}` | An identifier for this occurrence of the error
Routes RouteList `json:"routes,omitempty"`
}
// WithError makes a shallow copy of r to add the error to its
// context, and sets placeholders on the request's replacer
// related to err. It returns the modified request which has
// the error information in its context and replacer. It
// overwrites any existing error values that are stored.
func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
// add the raw error value to the request context
// so it can be accessed by error handlers
c := context.WithValue(r.Context(), ErrorCtxKey, err)
r = r.WithContext(c)
// add error values to the replacer
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
repl.Set("http.error", err)
if handlerErr, ok := err.(HandlerError); ok {
repl.Set("http.error.status_code", handlerErr.StatusCode)
repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
repl.Set("http.error.id", handlerErr.ID)
repl.Set("http.error.trace", handlerErr.Trace)
if handlerErr.Err != nil {
repl.Set("http.error.message", handlerErr.Err.Error())
} else {
repl.Set("http.error.message", http.StatusText(handlerErr.StatusCode))
}
}
return r
}
// shouldLogRequest returns true if this request should be logged.
func (s *Server) shouldLogRequest(r *http.Request) bool {
if s.accessLogger == nil || s.Logs == nil {
// logging is disabled
return false
}
// strip off the port if any, logger names are host only
hostWithoutPort, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostWithoutPort = r.Host
}
if _, ok := s.Logs.LoggerNames[hostWithoutPort]; ok {
// this host is mapped to a particular logger name
return true
}
for _, dh := range s.Logs.SkipHosts {
// logging for this particular host is disabled
if certmagic.MatchWildcard(hostWithoutPort, dh) {
return false
}
}
// if configured, this host is not mapped and thus must not be logged
return !s.Logs.SkipUnmappedHosts
}
// logTrace will log that this middleware handler is being invoked.
// It emits at DEBUG level.
func (s *Server) logTrace(mh MiddlewareHandler) {
if s.Logs == nil || !s.Logs.Trace {
return
}
s.traceLogger.Debug(caddy.GetModuleName(mh), zap.Any("module", mh))
}
// logRequest logs the request to access logs, unless skipped.
func (s *Server) logRequest(
accLog *zap.Logger, r *http.Request, wrec ResponseRecorder, duration *time.Duration,
repl *caddy.Replacer, bodyReader *lengthReader, shouldLogCredentials bool,
) {
// this request may be flagged as omitted from the logs
if skip, ok := GetVar(r.Context(), LogSkipVar).(bool); ok && skip {
return
}
repl.Set("http.response.status", wrec.Status()) // will be 0 if no response is written by us (Go will write 200 to client)
repl.Set("http.response.size", wrec.Size())
repl.Set("http.response.duration", duration)
repl.Set("http.response.duration_ms", duration.Seconds()*1e3) // multiply seconds to preserve decimal (see #4666)
userID, _ := repl.GetString("http.auth.user.id")
reqBodyLength := 0
if bodyReader != nil {
reqBodyLength = bodyReader.Length
}
extra := r.Context().Value(ExtraLogFieldsCtxKey).(*ExtraLogFields)
fieldCount := 6
fields := make([]zapcore.Field, 0, fieldCount+len(extra.fields))
fields = append(fields,
zap.Int("bytes_read", reqBodyLength),
zap.String("user_id", userID),
zap.Duration("duration", *duration),
zap.Int("size", wrec.Size()),
zap.Int("status", wrec.Status()),
zap.Object("resp_headers", LoggableHTTPHeader{
Header: wrec.Header(),
ShouldLogCredentials: shouldLogCredentials,
}))
fields = append(fields, extra.fields...)
loggers := []*zap.Logger{accLog}
if s.Logs != nil {
loggers = s.Logs.wrapLogger(accLog, r)
}
// wrapping may return multiple loggers, so we log to all of them
for _, logger := range loggers {
logAtLevel := logger.Info
if wrec.Status() >= 500 {
logAtLevel = logger.Error
}
message := "handled request"
if nop, ok := GetVar(r.Context(), "unhandled").(bool); ok && nop {
message = "NOP"
}
logAtLevel(message, fields...)
}
}
// protocol returns true if the protocol proto is configured/enabled.
func (s *Server) protocol(proto string) bool {
for _, p := range s.Protocols {
if p == proto {
return true
}
}
return false
}
// Listeners returns the server's listeners. These are active listeners,
// so calling Accept() or Close() on them will probably break things.
// They are made available here for read-only purposes (e.g. Addr())
// and for type-asserting for purposes where you know what you're doing.
//
// EXPERIMENTAL: Subject to change or removal.
func (s *Server) Listeners() []net.Listener { return s.listeners }
// Name returns the server's name.
func (s *Server) Name() string { return s.name }
// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
// be nil, but the handlers will lose response placeholders and access to the server.
func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
// set up the context for the request
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
ctx = context.WithValue(ctx, ServerCtxKey, s)
trusted, clientIP := determineTrustedProxy(r, s)
ctx = context.WithValue(ctx, VarsCtxKey, map[string]any{
TrustedProxyVarKey: trusted,
ClientIPVarKey: clientIP,
})
ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
var url2 url.URL // avoid letting this escape to the heap
ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
ctx = context.WithValue(ctx, ExtraLogFieldsCtxKey, new(ExtraLogFields))
r = r.WithContext(ctx)
// once the pointer to the request won't change
// anymore, finish setting up the replacer
addHTTPVarsToReplacer(repl, r, w)
return r
}
// originalRequest returns a partial, shallow copy of
// req, including: req.Method, deep copy of req.URL
// (into the urlCopy parameter, which should be on the
// stack), req.RequestURI, and req.RemoteAddr. Notably,
// headers are not copied. This function is designed to
// be very fast and efficient, and useful primarily for
// read-only/logging purposes.
func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
cloneURL(req.URL, urlCopy)
return http.Request{
Method: req.Method,
RemoteAddr: req.RemoteAddr,
RequestURI: req.RequestURI,
URL: urlCopy,
}
}
// determineTrustedProxy parses the remote IP address of
// the request, and determines (if the server configured it)
// if the client is a trusted proxy. If trusted, also returns
// the real client IP if possible.
func determineTrustedProxy(r *http.Request, s *Server) (bool, string) {
// If there's no server, then we can't check anything
if s == nil {
return false, ""
}
// Parse the remote IP, ignore the error as non-fatal,
// but the remote IP is required to continue, so we
// just return early. This should probably never happen
// though, unless some other module manipulated the request's
// remote address and used an invalid value.
clientIP, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return false, ""
}
// Client IP may contain a zone if IPv6, so we need
// to pull that out before parsing the IP
clientIP, _, _ = strings.Cut(clientIP, "%")
ipAddr, err := netip.ParseAddr(clientIP)
if err != nil {
return false, ""
}
// Check if the client is a trusted proxy
if s.trustedProxies == nil {
return false, ipAddr.String()
}
if isTrustedClientIP(ipAddr, s.trustedProxies.GetIPRanges(r)) {
if s.TrustedProxiesStrict > 0 {
return true, strictUntrustedClientIp(r, s.ClientIPHeaders, s.trustedProxies.GetIPRanges(r), ipAddr.String())
}
return true, trustedRealClientIP(r, s.ClientIPHeaders, ipAddr.String())
}
return false, ipAddr.String()
}
// isTrustedClientIP returns true if the given IP address is
// in the list of trusted IP ranges.
func isTrustedClientIP(ipAddr netip.Addr, trusted []netip.Prefix) bool {
for _, ipRange := range trusted {
if ipRange.Contains(ipAddr) {
return true
}
}
return false
}
// trustedRealClientIP finds the client IP from the request assuming it is
// from a trusted client. If there is no client IP headers, then the
// direct remote address is returned. If there are client IP headers,
// then the first value from those headers is used.
func trustedRealClientIP(r *http.Request, headers []string, clientIP string) string {
// Read all the values of the configured client IP headers, in order
var values []string
for _, field := range headers {
values = append(values, r.Header.Values(field)...)
}
// If we don't have any values, then give up
if len(values) == 0 {
return clientIP
}
// Since there can be many header values, we need to
// join them together before splitting to get the full list
allValues := strings.Split(strings.Join(values, ","), ",")
// Get first valid left-most IP address
for _, part := range allValues {
// Some proxies may retain the port number, so split if possible
host, _, err := net.SplitHostPort(part)
if err != nil {
host = part
}
// Remove any zone identifier from the IP address
host, _, _ = strings.Cut(strings.TrimSpace(host), "%")
// Parse the IP address
ipAddr, err := netip.ParseAddr(host)
if err != nil {
continue
}
return ipAddr.String()
}
// We didn't find a valid IP
return clientIP
}
// strictUntrustedClientIp iterates through the list of client IP headers,
// parses them from right-to-left, and returns the first valid IP address
// that is untrusted. If no valid IP address is found, then the direct
// remote address is returned.
func strictUntrustedClientIp(r *http.Request, headers []string, trusted []netip.Prefix, clientIP string) string {
for _, headerName := range headers {
parts := strings.Split(strings.Join(r.Header.Values(headerName), ","), ",")
for i := len(parts) - 1; i >= 0; i-- {
// Some proxies may retain the port number, so split if possible
host, _, err := net.SplitHostPort(parts[i])
if err != nil {
host = parts[i]
}
// Remove any zone identifier from the IP address
host, _, _ = strings.Cut(strings.TrimSpace(host), "%")
// Parse the IP address
ipAddr, err := netip.ParseAddr(host)
if err != nil {
continue
}
if !isTrustedClientIP(ipAddr, trusted) {
return ipAddr.String()
}
}
}
return clientIP
}
// cloneURL makes a copy of r.URL and returns a
// new value that doesn't reference the original.
func cloneURL(from, to *url.URL) {
*to = *from
if from.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *from.User
to.User = userInfo
}
}
// lengthReader is an io.ReadCloser that keeps track of the
// number of bytes read from the request body.
type lengthReader struct {
Source io.ReadCloser
Length int
}
func (r *lengthReader) Read(b []byte) (int, error) {
n, err := r.Source.Read(b)
r.Length += n
return n, err
}
func (r *lengthReader) Close() error {
return r.Source.Close()
}
// Context keys for HTTP request context values.
const (
// For referencing the server instance
ServerCtxKey caddy.CtxKey = "server"
// For the request's variable table
VarsCtxKey caddy.CtxKey = "vars"
// For a partial copy of the unmodified request that
// originally came into the server's entry handler
OriginalRequestCtxKey caddy.CtxKey = "original_request"
// For referencing underlying net.Conn
ConnCtxKey caddy.CtxKey = "conn"
// For referencing underlying quic.Connection
// TODO: export if needed later
quicConnCtxKey caddy.CtxKey = "quic_conn"
// For tracking whether the client is a trusted proxy
TrustedProxyVarKey string = "trusted_proxy"
// For tracking the real client IP (affected by trusted_proxy)
ClientIPVarKey string = "client_ip"
)
var networkTypesHTTP3 = map[string]string{
"unix": "unixgram",
"tcp4": "udp4",
"tcp6": "udp6",
}
// RegisterNetworkHTTP3 registers a mapping from non-HTTP/3 network to HTTP/3
// network. This should be called during init() and will panic if the network
// type is standard, reserved, or already registered.
//
// EXPERIMENTAL: Subject to change.
func RegisterNetworkHTTP3(originalNetwork, h3Network string) {
if _, ok := networkTypesHTTP3[strings.ToLower(originalNetwork)]; ok {
panic("network type " + originalNetwork + " is already registered")
}
networkTypesHTTP3[originalNetwork] = h3Network
}
func getHTTP3Network(originalNetwork string) string {
h3Network, ok := networkTypesHTTP3[strings.ToLower(originalNetwork)]
if !ok {
// TODO: Maybe a better default is to not enable HTTP/3 if we do not know the network?
return "udp"
}
return h3Network
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"fmt"
"net/http"
"strconv"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(StaticError{})
}
// StaticError implements a simple handler that returns an error.
// This handler returns an error value, but does not write a response.
// This is useful when you want the server to act as if an error
// occurred; for example, to invoke your custom error handling logic.
//
// Since this handler does not write a response, the error information
// is for use by the server to know how to handle the error.
type StaticError struct {
// The error message. Optional. Default is no error message.
Error string `json:"error,omitempty"`
// The recommended HTTP status code. Can be either an integer or a
// string if placeholders are needed. Optional. Default is 500.
StatusCode WeakString `json:"status_code,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (StaticError) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.error",
New: func() caddy.Module { return new(StaticError) },
}
}
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
// error [<matcher>] <status>|<message> [<status>] {
// message <text>
// }
//
// If there is just one argument (other than the matcher), it is considered
// to be a status code if it's a valid positive integer of 3 digits.
func (e *StaticError) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume directive name
args := d.RemainingArgs()
switch len(args) {
case 1:
if len(args[0]) == 3 {
if num, err := strconv.Atoi(args[0]); err == nil && num > 0 {
e.StatusCode = WeakString(args[0])
break
}
}
e.Error = args[0]
case 2:
e.Error = args[0]
e.StatusCode = WeakString(args[1])
default:
return d.ArgErr()
}
for d.NextBlock(0) {
switch d.Val() {
case "message":
if e.Error != "" {
return d.Err("message already specified")
}
if !d.AllArgs(&e.Error) {
return d.ArgErr()
}
default:
return d.Errf("unrecognized subdirective '%s'", d.Val())
}
}
return nil
}
func (e StaticError) ServeHTTP(w http.ResponseWriter, r *http.Request, _ Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
statusCode := http.StatusInternalServerError
if codeStr := e.StatusCode.String(); codeStr != "" {
intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, ""))
if err != nil {
return Error(http.StatusInternalServerError, err)
}
statusCode = intVal
}
return Error(statusCode, fmt.Errorf("%s", e.Error))
}
// Interface guard
var (
_ MiddlewareHandler = (*StaticError)(nil)
_ caddyfile.Unmarshaler = (*StaticError)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/textproto"
"os"
"strconv"
"strings"
"text/template"
"time"
"github.com/spf13/cobra"
"go.uber.org/zap"
caddycmd "github.com/caddyserver/caddy/v2/cmd"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(StaticResponse{})
caddycmd.RegisterCommand(caddycmd.Command{
Name: "respond",
Usage: `[--status <code>] [--body <content>] [--listen <addr>] [--access-log] [--debug] [--header "Field: value"] <body|status>`,
Short: "Simple, hard-coded HTTP responses for development and testing",
Long: `
Spins up a quick-and-clean HTTP server for development and testing purposes.
With no options specified, this command listens on a random available port
and answers HTTP requests with an empty 200 response. The listen address can
be customized with the --listen flag and will always be printed to stdout.
If the listen address includes a port range, multiple servers will be started.
If a final, unnamed argument is given, it will be treated as a status code
(same as the --status flag) if it is a 3-digit number. Otherwise, it is used
as the response body (same as the --body flag). The --status and --body flags
will always override this argument (for example, to write a body that
literally says "404" but with a status code of 200, do '--status 200 404').
A body may be given in 3 ways: a flag, a final (and unnamed) argument to
the command, or piped to stdin (if flag and argument are unset). Limited
template evaluation is supported on the body, with the following variables:
{{.N}} The server number (useful if using a port range)
{{.Port}} The listener port
{{.Address}} The listener address
(See the docs for the text/template package in the Go standard library for
information about using templates: https://pkg.go.dev/text/template)
Access/request logging and more verbose debug logging can also be enabled.
Response headers may be added using the --header flag for each header field.
`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("listen", "l", ":0", "The address to which to bind the listener")
cmd.Flags().IntP("status", "s", http.StatusOK, "The response status code")
cmd.Flags().StringP("body", "b", "", "The body of the HTTP response")
cmd.Flags().BoolP("access-log", "", false, "Enable the access log")
cmd.Flags().BoolP("debug", "v", false, "Enable more verbose debug-level logging")
cmd.Flags().StringSliceP("header", "H", []string{}, "Set a header on the response (format: \"Field: value\")")
cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdRespond)
},
})
}
// StaticResponse implements a simple responder for static responses.
type StaticResponse struct {
// The HTTP status code to respond with. Can be an integer or,
// if needing to use a placeholder, a string.
//
// If the status code is 103 (Early Hints), the response headers
// will be written to the client immediately, the body will be
// ignored, and the next handler will be invoked. This behavior
// is EXPERIMENTAL while RFC 8297 is a draft, and may be changed
// or removed.
StatusCode WeakString `json:"status_code,omitempty"`
// Header fields to set on the response; overwrites any existing
// header fields of the same names after normalization.
Headers http.Header `json:"headers,omitempty"`
// The response body. If non-empty, the Content-Type header may
// be added automatically if it is not explicitly configured nor
// already set on the response; the default value is
// "text/plain; charset=utf-8" unless the body is a valid JSON object
// or array, in which case the value will be "application/json".
// Other than those common special cases the Content-Type header
// should be set explicitly if it is desired because MIME sniffing
// is disabled for safety.
Body string `json:"body,omitempty"`
// If true, the server will close the client's connection
// after writing the response.
Close bool `json:"close,omitempty"`
// Immediately and forcefully closes the connection without
// writing a response. Interrupts any other HTTP streams on
// the same connection.
Abort bool `json:"abort,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (StaticResponse) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.static_response",
New: func() caddy.Module { return new(StaticResponse) },
}
}
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
// respond [<matcher>] <status>|<body> [<status>] {
// body <text>
// close
// }
//
// If there is just one argument (other than the matcher), it is considered
// to be a status code if it's a valid positive integer of 3 digits.
func (s *StaticResponse) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume directive name
args := d.RemainingArgs()
switch len(args) {
case 1:
if len(args[0]) == 3 {
if num, err := strconv.Atoi(args[0]); err == nil && num > 0 {
s.StatusCode = WeakString(args[0])
break
}
}
s.Body = args[0]
case 2:
s.Body = args[0]
s.StatusCode = WeakString(args[1])
default:
return d.ArgErr()
}
for d.NextBlock(0) {
switch d.Val() {
case "body":
if s.Body != "" {
return d.Err("body already specified")
}
if !d.AllArgs(&s.Body) {
return d.ArgErr()
}
case "close":
if s.Close {
return d.Err("close already specified")
}
s.Close = true
default:
return d.Errf("unrecognized subdirective '%s'", d.Val())
}
}
return nil
}
func (s StaticResponse) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
// close the connection immediately
if s.Abort {
panic(http.ErrAbortHandler)
}
// close the connection after responding
if s.Close {
r.Close = true
w.Header().Set("Connection", "close")
}
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// set all headers
for field, vals := range s.Headers {
field = textproto.CanonicalMIMEHeaderKey(repl.ReplaceAll(field, ""))
newVals := make([]string, len(vals))
for i := range vals {
newVals[i] = repl.ReplaceAll(vals[i], "")
}
w.Header()[field] = newVals
}
// implicitly set Content-Type header if we can do so safely
// (this allows templates handler to eval templates successfully
// or for clients to render JSON properly which is very common)
body := repl.ReplaceKnown(s.Body, "")
if body != "" && w.Header().Get("Content-Type") == "" {
content := strings.TrimSpace(body)
if len(content) > 2 &&
(content[0] == '{' && content[len(content)-1] == '}' ||
(content[0] == '[' && content[len(content)-1] == ']')) &&
json.Valid([]byte(content)) {
w.Header().Set("Content-Type", "application/json")
} else {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
}
}
// do not allow Go to sniff the content-type, for safety
if w.Header().Get("Content-Type") == "" {
w.Header()["Content-Type"] = nil
}
// get the status code; if this handler exists in an error route,
// use the recommended status code as the default; otherwise 200
statusCode := http.StatusOK
if reqErr, ok := r.Context().Value(ErrorCtxKey).(error); ok {
if handlerErr, ok := reqErr.(HandlerError); ok {
if handlerErr.StatusCode > 0 {
statusCode = handlerErr.StatusCode
}
}
}
if codeStr := s.StatusCode.String(); codeStr != "" {
intVal, err := strconv.Atoi(repl.ReplaceAll(codeStr, ""))
if err != nil {
return Error(http.StatusInternalServerError, err)
}
statusCode = intVal
}
// write headers
w.WriteHeader(statusCode)
// write response body
if statusCode != http.StatusEarlyHints && body != "" {
fmt.Fprint(w, body)
}
// continue handling after Early Hints as they are not the final response
if statusCode == http.StatusEarlyHints {
return next.ServeHTTP(w, r)
}
return nil
}
func buildHTTPServer(i int, port uint, addr string, statusCode int, hdr http.Header, body string, accessLog bool) (*Server, error) {
var handlers []json.RawMessage
// response body supports a basic template; evaluate it
tplCtx := struct {
N int // server number
Port uint // only the port
Address string // listener address
}{
N: i,
Port: port,
Address: addr,
}
tpl, err := template.New("body").Parse(body)
if err != nil {
return nil, err
}
buf := new(bytes.Buffer)
err = tpl.Execute(buf, tplCtx)
if err != nil {
return nil, err
}
// create route with handler
handler := StaticResponse{
StatusCode: WeakString(fmt.Sprintf("%d", statusCode)),
Headers: hdr,
Body: buf.String(),
}
handlers = append(handlers, caddyconfig.JSONModuleObject(handler, "handler", "static_response", nil))
route := Route{HandlersRaw: handlers}
server := &Server{
Listen: []string{addr},
ReadHeaderTimeout: caddy.Duration(10 * time.Second),
IdleTimeout: caddy.Duration(30 * time.Second),
MaxHeaderBytes: 1024 * 10,
Routes: RouteList{route},
AutoHTTPS: &AutoHTTPSConfig{DisableRedir: true},
}
if accessLog {
server.Logs = new(ServerLogConfig)
}
return server, nil
}
func cmdRespond(fl caddycmd.Flags) (int, error) {
caddy.TrapSignals()
// get flag values
listen := fl.String("listen")
statusCodeFl := fl.Int("status")
bodyFl := fl.String("body")
accessLog := fl.Bool("access-log")
debug := fl.Bool("debug")
arg := fl.Arg(0)
if fl.NArg() > 1 {
return caddy.ExitCodeFailedStartup, fmt.Errorf("too many unflagged arguments")
}
// prefer status and body from explicit flags
statusCode, body := statusCodeFl, bodyFl
// figure out if status code was explicitly specified; this lets
// us set a non-zero value as the default but is a little hacky
var statusCodeFlagSpecified bool
for _, fl := range os.Args {
if fl == "--status" {
statusCodeFlagSpecified = true
break
}
}
// try to determine what kind of parameter the unnamed argument is
if arg != "" {
// specifying body and status flags makes the argument redundant/unused
if bodyFl != "" && statusCodeFlagSpecified {
return caddy.ExitCodeFailedStartup, fmt.Errorf("unflagged argument \"%s\" is overridden by flags", arg)
}
// if a valid 3-digit number, treat as status code; otherwise body
if argInt, err := strconv.Atoi(arg); err == nil && !statusCodeFlagSpecified {
if argInt >= 100 && argInt <= 999 {
statusCode = argInt
}
} else if body == "" {
body = arg
}
}
// if we still need a body, see if stdin is being piped
if body == "" {
stdinInfo, err := os.Stdin.Stat()
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if stdinInfo.Mode()&os.ModeNamedPipe != 0 {
bodyBytes, err := io.ReadAll(os.Stdin)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
body = string(bodyBytes)
}
}
// build headers map
headers, err := fl.GetStringSlice("header")
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid header flag: %v", err)
}
hdr := make(http.Header)
for i, h := range headers {
key, val, found := strings.Cut(h, ":")
key, val = strings.TrimSpace(key), strings.TrimSpace(val)
if !found || key == "" || val == "" {
return caddy.ExitCodeFailedStartup, fmt.Errorf("header %d: invalid format \"%s\" (expecting \"Field: value\")", i, h)
}
hdr.Set(key, val)
}
// build each HTTP server
httpApp := App{Servers: make(map[string]*Server)}
// expand listen address, if more than one port
listenAddr, err := caddy.ParseNetworkAddress(listen)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if !listenAddr.IsUnixNetwork() {
listenAddrs := make([]string, 0, listenAddr.PortRangeSize())
for offset := uint(0); offset < listenAddr.PortRangeSize(); offset++ {
listenAddrs = append(listenAddrs, listenAddr.JoinHostPort(offset))
}
for i, addr := range listenAddrs {
server, err := buildHTTPServer(i, listenAddr.StartPort+uint(i), addr, statusCode, hdr, body, accessLog)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// save server
httpApp.Servers[fmt.Sprintf("static%d", i)] = server
}
} else {
server, err := buildHTTPServer(0, 0, listen, statusCode, hdr, body, accessLog)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// save server
httpApp.Servers[fmt.Sprintf("static%d", 0)] = server
}
// finish building the config
var false bool
cfg := &caddy.Config{
Admin: &caddy.AdminConfig{
Disabled: true,
Config: &caddy.ConfigSettings{
Persist: &false,
},
},
AppsRaw: caddy.ModuleMap{
"http": caddyconfig.JSON(httpApp, nil),
},
}
if debug {
cfg.Logging = &caddy.Logging{
Logs: map[string]*caddy.CustomLog{
"default": {BaseLog: caddy.BaseLog{Level: zap.DebugLevel.CapitalString()}},
},
}
}
// run it!
err = caddy.Run(cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// to print listener addresses, get the active HTTP app
loadedHTTPApp, err := caddy.ActiveContext().App("http")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// print each listener address
for _, srv := range loadedHTTPApp.(*App).Servers {
for _, ln := range srv.listeners {
fmt.Printf("Server address: %s\n", ln.Addr())
}
}
select {}
}
// Interface guards
var (
_ MiddlewareHandler = (*StaticResponse)(nil)
_ caddyfile.Unmarshaler = (*StaticResponse)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"fmt"
"net/http"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(Subroute{})
}
// Subroute implements a handler that compiles and executes routes.
// This is useful for a batch of routes that all inherit the same
// matchers, or for multiple routes that should be treated as a
// single route.
//
// You can also use subroutes to handle errors from its handlers.
// First the primary routes will be executed, and if they return an
// error, the errors routes will be executed; in that case, an error
// is only returned to the entry point at the server if there is an
// additional error returned from the errors routes.
type Subroute struct {
// The primary list of routes to compile and execute.
Routes RouteList `json:"routes,omitempty"`
// If the primary routes return an error, error handling
// can be promoted to this configuration instead.
Errors *HTTPErrorConfig `json:"errors,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (Subroute) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.subroute",
New: func() caddy.Module { return new(Subroute) },
}
}
// Provision sets up subrouting.
func (sr *Subroute) Provision(ctx caddy.Context) error {
if sr.Routes != nil {
err := sr.Routes.Provision(ctx)
if err != nil {
return fmt.Errorf("setting up subroutes: %v", err)
}
if sr.Errors != nil {
err := sr.Errors.Routes.Provision(ctx)
if err != nil {
return fmt.Errorf("setting up error subroutes: %v", err)
}
}
}
return nil
}
func (sr *Subroute) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
subroute := sr.Routes.Compile(next)
err := subroute.ServeHTTP(w, r)
if err != nil && sr.Errors != nil {
r = sr.Errors.WithError(r, err)
errRoute := sr.Errors.Routes.Compile(next)
return errRoute.ServeHTTP(w, r)
}
return err
}
// Interface guards
var (
_ caddy.Provisioner = (*Subroute)(nil)
_ MiddlewareHandler = (*Subroute)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package templates
import (
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/caddyconfig/httpcaddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
func init() {
httpcaddyfile.RegisterHandlerDirective("templates", parseCaddyfile)
}
// parseCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
// templates [<matcher>] {
// mime <types...>
// between <open_delim> <close_delim>
// root <path>
// }
func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error) {
h.Next() // consume directive name
t := new(Templates)
for h.NextBlock(0) {
switch h.Val() {
case "mime":
t.MIMETypes = h.RemainingArgs()
if len(t.MIMETypes) == 0 {
return nil, h.ArgErr()
}
case "between":
t.Delimiters = h.RemainingArgs()
if len(t.Delimiters) != 2 {
return nil, h.ArgErr()
}
case "root":
if !h.Args(&t.FileRoot) {
return nil, h.ArgErr()
}
case "extensions":
if h.NextArg() {
return nil, h.ArgErr()
}
if t.ExtensionsRaw != nil {
return nil, h.Err("extensions already specified")
}
for nesting := h.Nesting(); h.NextBlock(nesting); {
extensionModuleName := h.Val()
modID := "http.handlers.templates.functions." + extensionModuleName
unm, err := caddyfile.UnmarshalModule(h.Dispenser, modID)
if err != nil {
return nil, err
}
cf, ok := unm.(CustomFunctions)
if !ok {
return nil, h.Errf("module %s (%T) does not provide template functions", modID, unm)
}
if t.ExtensionsRaw == nil {
t.ExtensionsRaw = make(caddy.ModuleMap)
}
t.ExtensionsRaw[extensionModuleName] = caddyconfig.JSON(cf, nil)
}
}
}
return t, nil
}
package templates
import (
"encoding/json"
"fmt"
"strings"
"unicode"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v3"
)
func extractFrontMatter(input string) (map[string]any, string, error) {
// get the bounds of the first non-empty line
var firstLineStart, firstLineEnd int
lineEmpty := true
for i, b := range input {
if b == '\n' {
firstLineStart = firstLineEnd
if firstLineStart > 0 {
firstLineStart++ // skip newline character
}
firstLineEnd = i
if !lineEmpty {
break
}
continue
}
lineEmpty = lineEmpty && unicode.IsSpace(b)
}
firstLine := input[firstLineStart:firstLineEnd]
// ensure residue windows carriage return byte is removed
firstLine = strings.TrimSpace(firstLine)
// see what kind of front matter there is, if any
var closingFence []string
var fmParser func([]byte) (map[string]any, error)
for _, fmType := range supportedFrontMatterTypes {
if firstLine == fmType.FenceOpen {
closingFence = fmType.FenceClose
fmParser = fmType.ParseFunc
}
}
if fmParser == nil {
// no recognized front matter; whole document is body
return nil, input, nil
}
// find end of front matter
var fmEndFence string
fmEndFenceStart := -1
for _, fence := range closingFence {
index := strings.Index(input[firstLineEnd:], "\n"+fence)
if index >= 0 {
fmEndFenceStart = index
fmEndFence = fence
break
}
}
if fmEndFenceStart < 0 {
return nil, "", fmt.Errorf("unterminated front matter")
}
fmEndFenceStart += firstLineEnd + 1 // add 1 to account for newline
// extract and parse front matter
frontMatter := input[firstLineEnd:fmEndFenceStart]
fm, err := fmParser([]byte(frontMatter))
if err != nil {
return nil, "", err
}
// the rest is the body
body := input[fmEndFenceStart+len(fmEndFence):]
return fm, body, nil
}
func yamlFrontMatter(input []byte) (map[string]any, error) {
m := make(map[string]any)
err := yaml.Unmarshal(input, &m)
return m, err
}
func tomlFrontMatter(input []byte) (map[string]any, error) {
m := make(map[string]any)
err := toml.Unmarshal(input, &m)
return m, err
}
func jsonFrontMatter(input []byte) (map[string]any, error) {
input = append([]byte{'{'}, input...)
input = append(input, '}')
m := make(map[string]any)
err := json.Unmarshal(input, &m)
return m, err
}
type parsedMarkdownDoc struct {
Meta map[string]any `json:"meta,omitempty"`
Body string `json:"body,omitempty"`
}
type frontMatterType struct {
FenceOpen string
FenceClose []string
ParseFunc func(input []byte) (map[string]any, error)
}
var supportedFrontMatterTypes = []frontMatterType{
{
FenceOpen: "---",
FenceClose: []string{"---", "..."},
ParseFunc: yamlFrontMatter,
},
{
FenceOpen: "+++",
FenceClose: []string{"+++"},
ParseFunc: tomlFrontMatter,
},
{
FenceOpen: "{",
FenceClose: []string{"}"},
ParseFunc: jsonFrontMatter,
},
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package templates
func FuzzExtractFrontMatter(data []byte) int {
_, _, err := extractFrontMatter(string(data))
if err != nil {
return 0
}
return 1
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package templates
import (
"bytes"
"errors"
"fmt"
"net/http"
"strconv"
"strings"
"text/template"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
func init() {
caddy.RegisterModule(Templates{})
}
// Templates is a middleware which executes response bodies as Go templates.
// The syntax is documented in the Go standard library's
// [text/template package](https://golang.org/pkg/text/template/).
//
// ⚠️ Template functions/actions are still experimental, so they are subject to change.
//
// Custom template functions can be registered by creating a plugin module under the `http.handlers.templates.functions.*` namespace that implements the `CustomFunctions` interface.
//
// [All Sprig functions](https://masterminds.github.io/sprig/) are supported.
//
// In addition to the standard functions and the Sprig library, Caddy adds
// extra functions and data that are available to a template:
//
// ##### `.Args`
//
// A slice of arguments passed to this page/context, for example
// as the result of a [`include`](#include).
//
// ```
// {{index .Args 0}} // first argument
// ```
//
// ##### `.Cookie`
//
// Gets the value of a cookie by name.
//
// ```
// {{.Cookie "cookiename"}}
// ```
//
// ##### `env`
//
// Gets an environment variable.
//
// ```
// {{env "VAR_NAME"}}
// ```
//
// ##### `placeholder`
//
// Gets an [placeholder variable](/docs/conventions#placeholders).
// The braces (`{}`) have to be omitted.
//
// ```
// {{placeholder "http.request.uri.path"}}
// {{placeholder "http.error.status_code"}}
// ```
//
// ##### `.Host`
//
// Returns the hostname portion (no port) of the Host header of the HTTP request.
//
// ```
// {{.Host}}
// ```
//
// ##### `httpInclude`
//
// Includes the contents of another file, and renders it in-place,
// by making a virtual HTTP request (also known as a sub-request).
// The URI path must exist on the same virtual server because the
// request does not use sockets; instead, the request is crafted in
// memory and the handler is invoked directly for increased efficiency.
//
// ```
// {{httpInclude "/foo/bar?q=val"}}
// ```
//
// ##### `import`
//
// Reads and returns the contents of another file, and parses it
// as a template, adding any template definitions to the template
// stack. If there are no definitions, the filepath will be the
// definition name. Any `{{ define }}` blocks will be accessible by
// `{{ template }}` or `{{ block }}`. Imports must happen before the
// template or block action is called. Note that the contents are
// NOT escaped, so you should only import trusted template files.
//
// **filename.html**
// ```
// {{ define "main" }}
// content
// {{ end }}
// ```
//
// **index.html**
// ```
// {{ import "/path/to/filename.html" }}
// {{ template "main" }}
// ```
//
// ##### `include`
//
// Includes the contents of another file, rendering it in-place.
// Optionally can pass key-value pairs as arguments to be accessed
// by the included file. Use [`.Args N`](#args) to access the N-th
// argument, 0-indexed. Note that the contents are NOT escaped, so
// you should only include trusted template files.
//
// ```
// {{include "path/to/file.html"}} // no arguments
// {{include "path/to/file.html" "arg0" 1 "value 2"}} // with arguments
// ```
//
// ##### `readFile`
//
// Reads and returns the contents of another file, as-is.
// Note that the contents are NOT escaped, so you should
// only read trusted files.
//
// ```
// {{readFile "path/to/file.html"}}
// ```
//
// ##### `listFiles`
//
// Returns a list of the files in the given directory, which is relative
// to the template context's file root.
//
// ```
// {{listFiles "/mydir"}}
// ```
//
// ##### `markdown`
//
// Renders the given Markdown text as HTML and returns it. This uses the
// [Goldmark](https://github.com/yuin/goldmark) library,
// which is CommonMark compliant. It also has these extensions
// enabled: GitHub Flavored Markdown, Footnote, and syntax
// highlighting provided by [Chroma](https://github.com/alecthomas/chroma).
//
// ```
// {{markdown "My _markdown_ text"}}
// ```
//
// ##### `.RemoteIP`
//
// Returns the connection's IP address.
//
// ```
// {{.RemoteIP}}
// ```
//
// ##### `.ClientIP`
//
// Returns the real client's IP address, if `trusted_proxies` was configured,
// otherwise returns the connection's IP address.
//
// ```
// {{.ClientIP}}
// ```
//
// ##### `.Req`
//
// Accesses the current HTTP request, which has various fields, including:
//
// - `.Method` - the method
// - `.URL` - the URL, which in turn has component fields (Scheme, Host, Path, etc.)
// - `.Header` - the header fields
// - `.Host` - the Host or :authority header of the request
//
// ```
// {{.Req.Header.Get "User-Agent"}}
// ```
//
// ##### `.OriginalReq`
//
// Like [`.Req`](#req), except it accesses the original HTTP
// request before rewrites or other internal modifications.
//
// ##### `.RespHeader.Add`
//
// Adds a header field to the HTTP response.
//
// ```
// {{.RespHeader.Add "Field-Name" "val"}}
// ```
//
// ##### `.RespHeader.Del`
//
// Deletes a header field on the HTTP response.
//
// ```
// {{.RespHeader.Del "Field-Name"}}
// ```
//
// ##### `.RespHeader.Set`
//
// Sets a header field on the HTTP response, replacing any existing value.
//
// ```
// {{.RespHeader.Set "Field-Name" "val"}}
// ```
//
// ##### `httpError`
//
// Returns an error with the given status code to the HTTP handler chain.
//
// ```
// {{if not (fileExists $includedFile)}}{{httpError 404}}{{end}}
// ```
//
// ##### `splitFrontMatter`
//
// Splits front matter out from the body. Front matter is metadata that
// appears at the very beginning of a file or string. Front matter can
// be in YAML, TOML, or JSON formats:
//
// **TOML** front matter starts and ends with `+++`:
//
// ```toml
// +++
// template = "blog"
// title = "Blog Homepage"
// sitename = "A Caddy site"
// +++
// ```
//
// **YAML** is surrounded by `---`:
//
// ```yaml
// ---
// template: blog
// title: Blog Homepage
// sitename: A Caddy site
// ---
// ```
//
// **JSON** is simply `{` and `}`:
//
// ```json
// {
// "template": "blog",
// "title": "Blog Homepage",
// "sitename": "A Caddy site"
// }
// ```
//
// The resulting front matter will be made available like so:
//
// - `.Meta` to access the metadata fields, for example: `{{$parsed.Meta.title}}`
// - `.Body` to access the body after the front matter, for example: `{{markdown $parsed.Body}}`
//
// ##### `stripHTML`
//
// Removes HTML from a string.
//
// ```
// {{stripHTML "Shows <b>only</b> text content"}}
// ```
//
// ##### `humanize`
//
// Transforms size and time inputs to a human readable format.
// This uses the [go-humanize](https://github.com/dustin/go-humanize) library.
//
// The first argument must be a format type, and the last argument
// is the input, or the input can be piped in. The supported format
// types are:
// - **size** which turns an integer amount of bytes into a string like `2.3 MB`
// - **time** which turns a time string into a relative time string like `2 weeks ago`
//
// For the `time` format, the layout for parsing the input can be configured
// by appending a colon `:` followed by the desired time layout. You can
// find the documentation on time layouts [in Go's docs](https://pkg.go.dev/time#pkg-constants).
// The default time layout is `RFC1123Z`, i.e. `Mon, 02 Jan 2006 15:04:05 -0700`.
//
// ##### `pathEscape`
//
// Passes a string through `url.PathEscape`, replacing characters that have
// special meaning in URL path parameters (`?`, `&`, `%`).
//
// Useful e.g. to include filenames containing these characters in URL path
// parameters, or use them as an `img` element's `src` attribute.
//
// ```
// {{pathEscape "50%_valid_filename?.jpg"}}
// ```
//
// ```
// {{humanize "size" "2048000"}}
// {{placeholder "http.response.header.Content-Length" | humanize "size"}}
// {{humanize "time" "Fri, 05 May 2022 15:04:05 +0200"}}
// {{humanize "time:2006-Jan-02" "2022-May-05"}}
// ```
type Templates struct {
// The root path from which to load files. Required if template functions
// accessing the file system are used (such as include). Default is
// `{http.vars.root}` if set, or current working directory otherwise.
FileRoot string `json:"file_root,omitempty"`
// The MIME types for which to render templates. It is important to use
// this if the route matchers do not exclude images or other binary files.
// Default is text/plain, text/markdown, and text/html.
MIMETypes []string `json:"mime_types,omitempty"`
// The template action delimiters. If set, must be precisely two elements:
// the opening and closing delimiters. Default: `["{{", "}}"]`
Delimiters []string `json:"delimiters,omitempty"`
// Extensions adds functions to the template's func map. These often
// act as components on web pages, for example.
ExtensionsRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=http.handlers.templates.functions"`
customFuncs []template.FuncMap
logger *zap.Logger
}
// CustomFunctions is the interface for registering custom template functions.
type CustomFunctions interface {
// CustomTemplateFunctions should return the mapping from custom function names to implementations.
CustomTemplateFunctions() template.FuncMap
}
// CaddyModule returns the Caddy module information.
func (Templates) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.templates",
New: func() caddy.Module { return new(Templates) },
}
}
// Provision provisions t.
func (t *Templates) Provision(ctx caddy.Context) error {
t.logger = ctx.Logger()
mods, err := ctx.LoadModule(t, "ExtensionsRaw")
if err != nil {
return fmt.Errorf("loading template extensions: %v", err)
}
for _, modIface := range mods.(map[string]any) {
t.customFuncs = append(t.customFuncs, modIface.(CustomFunctions).CustomTemplateFunctions())
}
if t.MIMETypes == nil {
t.MIMETypes = defaultMIMETypes
}
if t.FileRoot == "" {
t.FileRoot = "{http.vars.root}"
}
return nil
}
// Validate ensures t has a valid configuration.
func (t *Templates) Validate() error {
if len(t.Delimiters) != 0 && len(t.Delimiters) != 2 {
return fmt.Errorf("delimiters must consist of exactly two elements: opening and closing")
}
return nil
}
func (t *Templates) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
// shouldBuf determines whether to execute templates on this response,
// since generally we will not want to execute for images or CSS, etc.
shouldBuf := func(status int, header http.Header) bool {
ct := header.Get("Content-Type")
for _, mt := range t.MIMETypes {
if strings.Contains(ct, mt) {
return true
}
}
return false
}
rec := caddyhttp.NewResponseRecorder(w, buf, shouldBuf)
err := next.ServeHTTP(rec, r)
if err != nil {
return err
}
if !rec.Buffered() {
return nil
}
err = t.executeTemplate(rec, r)
if err != nil {
return err
}
rec.Header().Set("Content-Length", strconv.Itoa(buf.Len()))
rec.Header().Del("Accept-Ranges") // we don't know ranges for dynamically-created content
rec.Header().Del("Last-Modified") // useless for dynamic content since it's always changing
// we don't know a way to quickly generate etag for dynamic content,
// and weak etags still cause browsers to rely on it even after a
// refresh, so disable them until we find a better way to do this
rec.Header().Del("Etag")
return rec.WriteResponse()
}
// executeTemplate executes the template contained in wb.buf and replaces it with the results.
func (t *Templates) executeTemplate(rr caddyhttp.ResponseRecorder, r *http.Request) error {
var fs http.FileSystem
if t.FileRoot != "" {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
fs = http.Dir(repl.ReplaceAll(t.FileRoot, "."))
}
ctx := &TemplateContext{
Root: fs,
Req: r,
RespHeader: WrappedHeader{rr.Header()},
config: t,
CustomFuncs: t.customFuncs,
}
err := ctx.executeTemplateInBuffer(r.URL.Path, rr.Buffer())
if err != nil {
// templates may return a custom HTTP error to be propagated to the client,
// otherwise for any other error we assume the template is broken
var handlerErr caddyhttp.HandlerError
if errors.As(err, &handlerErr) {
return handlerErr
}
return caddyhttp.Error(http.StatusInternalServerError, err)
}
return nil
}
// virtualResponseWriter is used in virtualized HTTP requests
// that templates may execute.
type virtualResponseWriter struct {
status int
header http.Header
body *bytes.Buffer
}
func (vrw *virtualResponseWriter) Header() http.Header {
return vrw.header
}
func (vrw *virtualResponseWriter) WriteHeader(statusCode int) {
vrw.status = statusCode
}
func (vrw *virtualResponseWriter) Write(data []byte) (int, error) {
return vrw.body.Write(data)
}
var defaultMIMETypes = []string{
"text/html",
"text/plain",
"text/markdown",
}
// Interface guards
var (
_ caddy.Provisioner = (*Templates)(nil)
_ caddy.Validator = (*Templates)(nil)
_ caddyhttp.MiddlewareHandler = (*Templates)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package templates
import (
"bytes"
"fmt"
"io"
"io/fs"
"net"
"net/http"
"net/url"
"os"
"path"
"reflect"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/Masterminds/sprig/v3"
chromahtml "github.com/alecthomas/chroma/v2/formatters/html"
"github.com/dustin/go-humanize"
"github.com/yuin/goldmark"
highlighting "github.com/yuin/goldmark-highlighting/v2"
"github.com/yuin/goldmark/extension"
"github.com/yuin/goldmark/parser"
gmhtml "github.com/yuin/goldmark/renderer/html"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
// TemplateContext is the TemplateContext with which HTTP templates are executed.
type TemplateContext struct {
Root http.FileSystem
Req *http.Request
Args []any // defined by arguments to funcInclude
RespHeader WrappedHeader
CustomFuncs []template.FuncMap // functions added by plugins
config *Templates
tpl *template.Template
}
// NewTemplate returns a new template intended to be evaluated with this
// context, as it is initialized with configuration from this context.
func (c *TemplateContext) NewTemplate(tplName string) *template.Template {
c.tpl = template.New(tplName).Option("missingkey=zero")
// customize delimiters, if applicable
if c.config != nil && len(c.config.Delimiters) == 2 {
c.tpl.Delims(c.config.Delimiters[0], c.config.Delimiters[1])
}
// add sprig library
c.tpl.Funcs(sprigFuncMap)
// add all custom functions
for _, funcMap := range c.CustomFuncs {
c.tpl.Funcs(funcMap)
}
// add our own library
c.tpl.Funcs(template.FuncMap{
"include": c.funcInclude,
"readFile": c.funcReadFile,
"import": c.funcImport,
"httpInclude": c.funcHTTPInclude,
"stripHTML": c.funcStripHTML,
"markdown": c.funcMarkdown,
"splitFrontMatter": c.funcSplitFrontMatter,
"listFiles": c.funcListFiles,
"fileStat": c.funcFileStat,
"env": c.funcEnv,
"placeholder": c.funcPlaceholder,
"fileExists": c.funcFileExists,
"httpError": c.funcHTTPError,
"humanize": c.funcHumanize,
"maybe": c.funcMaybe,
"pathEscape": url.PathEscape,
})
return c.tpl
}
// OriginalReq returns the original, unmodified, un-rewritten request as
// it originally came in over the wire.
func (c TemplateContext) OriginalReq() http.Request {
or, _ := c.Req.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)
return or
}
// funcInclude returns the contents of filename relative to the site root and renders it in place.
// Note that included files are NOT escaped, so you should only include
// trusted files. If it is not trusted, be sure to use escaping functions
// in your template.
func (c TemplateContext) funcInclude(filename string, args ...any) (string, error) {
bodyBuf := bufPool.Get().(*bytes.Buffer)
bodyBuf.Reset()
defer bufPool.Put(bodyBuf)
err := c.readFileToBuffer(filename, bodyBuf)
if err != nil {
return "", err
}
c.Args = args
err = c.executeTemplateInBuffer(filename, bodyBuf)
if err != nil {
return "", err
}
return bodyBuf.String(), nil
}
// funcReadFile returns the contents of a filename relative to the site root.
// Note that included files are NOT escaped, so you should only include
// trusted files. If it is not trusted, be sure to use escaping functions
// in your template.
func (c TemplateContext) funcReadFile(filename string) (string, error) {
bodyBuf := bufPool.Get().(*bytes.Buffer)
bodyBuf.Reset()
defer bufPool.Put(bodyBuf)
err := c.readFileToBuffer(filename, bodyBuf)
if err != nil {
return "", err
}
return bodyBuf.String(), nil
}
// readFileToBuffer reads a file into a buffer
func (c TemplateContext) readFileToBuffer(filename string, bodyBuf *bytes.Buffer) error {
if c.Root == nil {
return fmt.Errorf("root file system not specified")
}
file, err := c.Root.Open(filename)
if err != nil {
return err
}
defer file.Close()
_, err = io.Copy(bodyBuf, file)
if err != nil {
return err
}
return nil
}
// funcHTTPInclude returns the body of a virtual (lightweight) request
// to the given URI on the same server. Note that included bodies
// are NOT escaped, so you should only include trusted resources.
// If it is not trusted, be sure to use escaping functions yourself.
func (c TemplateContext) funcHTTPInclude(uri string) (string, error) {
// prevent virtual request loops by counting how many levels
// deep we are; and if we get too deep, return an error
recursionCount := 1
if numStr := c.Req.Header.Get(recursionPreventionHeader); numStr != "" {
num, err := strconv.Atoi(numStr)
if err != nil {
return "", fmt.Errorf("parsing %s: %v", recursionPreventionHeader, err)
}
if num >= 3 {
return "", fmt.Errorf("virtual request cycle")
}
recursionCount = num + 1
}
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
virtReq, err := http.NewRequest("GET", uri, nil)
if err != nil {
return "", err
}
virtReq.Host = c.Req.Host
virtReq.RemoteAddr = "127.0.0.1:10000" // https://github.com/caddyserver/caddy/issues/5835
virtReq.Header = c.Req.Header.Clone()
virtReq.Header.Set("Accept-Encoding", "identity") // https://github.com/caddyserver/caddy/issues/4352
virtReq.Trailer = c.Req.Trailer.Clone()
virtReq.Header.Set(recursionPreventionHeader, strconv.Itoa(recursionCount))
vrw := &virtualResponseWriter{body: buf, header: make(http.Header)}
server := c.Req.Context().Value(caddyhttp.ServerCtxKey).(http.Handler)
server.ServeHTTP(vrw, virtReq)
if vrw.status >= 400 {
return "", fmt.Errorf("http %d", vrw.status)
}
err = c.executeTemplateInBuffer(uri, buf)
if err != nil {
return "", err
}
return buf.String(), nil
}
// funcImport parses the filename into the current template stack. The imported
// file will be rendered within the current template by calling {{ block }} or
// {{ template }} from the standard template library. If the imported file has
// no {{ define }} blocks, the name of the import will be the path
func (c *TemplateContext) funcImport(filename string) (string, error) {
bodyBuf := bufPool.Get().(*bytes.Buffer)
bodyBuf.Reset()
defer bufPool.Put(bodyBuf)
err := c.readFileToBuffer(filename, bodyBuf)
if err != nil {
return "", err
}
_, err = c.tpl.Parse(bodyBuf.String())
if err != nil {
return "", err
}
return "", nil
}
func (c *TemplateContext) executeTemplateInBuffer(tplName string, buf *bytes.Buffer) error {
c.NewTemplate(tplName)
_, err := c.tpl.Parse(buf.String())
if err != nil {
return err
}
buf.Reset() // reuse buffer for output
return c.tpl.Execute(buf, c)
}
func (c TemplateContext) funcPlaceholder(name string) string {
repl := c.Req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// For safety, we don't want to allow the file placeholder in
// templates because it could be used to read arbitrary files
// if the template contents were not trusted.
repl = repl.WithoutFile()
value, _ := repl.GetString(name)
return value
}
func (TemplateContext) funcEnv(varName string) string {
return os.Getenv(varName)
}
// Cookie gets the value of a cookie with name.
func (c TemplateContext) Cookie(name string) string {
cookies := c.Req.Cookies()
for _, cookie := range cookies {
if cookie.Name == name {
return cookie.Value
}
}
return ""
}
// RemoteIP gets the IP address of the connection's remote IP.
func (c TemplateContext) RemoteIP() string {
ip, _, err := net.SplitHostPort(c.Req.RemoteAddr)
if err != nil {
return c.Req.RemoteAddr
}
return ip
}
// ClientIP gets the IP address of the real client making the request
// if the request is trusted (see trusted_proxies), otherwise returns
// the connection's remote IP.
func (c TemplateContext) ClientIP() string {
address := caddyhttp.GetVar(c.Req.Context(), caddyhttp.ClientIPVarKey).(string)
clientIP, _, err := net.SplitHostPort(address)
if err != nil {
clientIP = address // no port
}
return clientIP
}
// Host returns the hostname portion of the Host header
// from the HTTP request.
func (c TemplateContext) Host() (string, error) {
host, _, err := net.SplitHostPort(c.Req.Host)
if err != nil {
if !strings.Contains(c.Req.Host, ":") {
// common with sites served on the default port 80
return c.Req.Host, nil
}
return "", err
}
return host, nil
}
// funcStripHTML returns s without HTML tags. It is fairly naive
// but works with most valid HTML inputs.
func (TemplateContext) funcStripHTML(s string) string {
var buf bytes.Buffer
var inTag, inQuotes bool
var tagStart int
for i, ch := range s {
if inTag {
if ch == '>' && !inQuotes {
inTag = false
} else if ch == '<' && !inQuotes {
// false start
buf.WriteString(s[tagStart:i])
tagStart = i
} else if ch == '"' {
inQuotes = !inQuotes
}
continue
}
if ch == '<' {
inTag = true
tagStart = i
continue
}
buf.WriteRune(ch)
}
if inTag {
// false start
buf.WriteString(s[tagStart:])
}
return buf.String()
}
// funcMarkdown renders the markdown body as HTML. The resulting
// HTML is NOT escaped so that it can be rendered as HTML.
func (TemplateContext) funcMarkdown(input any) (string, error) {
inputStr := caddy.ToString(input)
md := goldmark.New(
goldmark.WithExtensions(
extension.GFM,
extension.Footnote,
highlighting.NewHighlighting(
highlighting.WithFormatOptions(
chromahtml.WithClasses(true),
),
),
),
goldmark.WithParserOptions(
parser.WithAutoHeadingID(),
),
goldmark.WithRendererOptions(
gmhtml.WithUnsafe(), // TODO: this is not awesome, maybe should be configurable?
),
)
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
err := md.Convert([]byte(inputStr), buf)
if err != nil {
return "", err
}
return buf.String(), nil
}
// splitFrontMatter parses front matter out from the beginning of input,
// and returns the separated key-value pairs and the body/content. input
// must be a "stringy" value.
func (TemplateContext) funcSplitFrontMatter(input any) (parsedMarkdownDoc, error) {
meta, body, err := extractFrontMatter(caddy.ToString(input))
if err != nil {
return parsedMarkdownDoc{}, err
}
return parsedMarkdownDoc{Meta: meta, Body: body}, nil
}
// funcListFiles reads and returns a slice of names from the given
// directory relative to the root of c.
func (c TemplateContext) funcListFiles(name string) ([]string, error) {
if c.Root == nil {
return nil, fmt.Errorf("root file system not specified")
}
dir, err := c.Root.Open(path.Clean(name))
if err != nil {
return nil, err
}
defer dir.Close()
stat, err := dir.Stat()
if err != nil {
return nil, err
}
if !stat.IsDir() {
return nil, fmt.Errorf("%v is not a directory", name)
}
dirInfo, err := dir.Readdir(0)
if err != nil {
return nil, err
}
names := make([]string, len(dirInfo))
for i, fileInfo := range dirInfo {
names[i] = fileInfo.Name()
}
return names, nil
}
// funcFileExists returns true if filename can be opened successfully.
func (c TemplateContext) funcFileExists(filename string) (bool, error) {
if c.Root == nil {
return false, fmt.Errorf("root file system not specified")
}
file, err := c.Root.Open(filename)
if err == nil {
file.Close()
return true, nil
}
return false, nil
}
// funcFileStat returns Stat of a filename
func (c TemplateContext) funcFileStat(filename string) (fs.FileInfo, error) {
if c.Root == nil {
return nil, fmt.Errorf("root file system not specified")
}
file, err := c.Root.Open(path.Clean(filename))
if err != nil {
return nil, err
}
defer file.Close()
return file.Stat()
}
// funcHTTPError returns a structured HTTP handler error. EXPERIMENTAL; SUBJECT TO CHANGE.
// Example usage: `{{if not (fileExists $includeFile)}}{{httpError 404}}{{end}}`
func (c TemplateContext) funcHTTPError(statusCode int) (bool, error) {
// Delete some headers that may have been set by the underlying
// handler (such as file_server) which may break the error response.
c.RespHeader.Header.Del("Content-Length")
c.RespHeader.Header.Del("Content-Type")
c.RespHeader.Header.Del("Etag")
c.RespHeader.Header.Del("Last-Modified")
c.RespHeader.Header.Del("Accept-Ranges")
return false, caddyhttp.Error(statusCode, nil)
}
// funcHumanize transforms size and time inputs to a human readable format.
//
// Size inputs are expected to be integers, and are formatted as a
// byte size, such as "83 MB".
//
// Time inputs are parsed using the given layout (default layout is RFC1123Z)
// and are formatted as a relative time, such as "2 weeks ago".
// See https://pkg.go.dev/time#pkg-constants for time layout docs.
func (c TemplateContext) funcHumanize(formatType, data string) (string, error) {
// The format type can optionally be followed
// by a colon to provide arguments for the format
parts := strings.Split(formatType, ":")
switch parts[0] {
case "size":
dataint, dataerr := strconv.ParseUint(data, 10, 64)
if dataerr != nil {
return "", fmt.Errorf("humanize: size cannot be parsed: %s", dataerr.Error())
}
return humanize.Bytes(dataint), nil
case "time":
timelayout := time.RFC1123Z
if len(parts) > 1 {
timelayout = parts[1]
}
dataint, dataerr := time.Parse(timelayout, data)
if dataerr != nil {
return "", fmt.Errorf("humanize: time cannot be parsed: %s", dataerr.Error())
}
return humanize.Time(dataint), nil
}
return "", fmt.Errorf("no know function was given")
}
// funcMaybe invokes the plugged-in function named functionName if it is plugged in
// (is a module in the 'http.handlers.templates.functions' namespace). If it is not
// available, a log message is emitted.
//
// The first argument is the function name, and the rest of the arguments are
// passed on to the actual function.
//
// This function is useful for executing templates that use components that may be
// considered as optional in some cases (like during local development) where you do
// not want to require everyone to have a custom Caddy build to be able to execute
// your template.
//
// NOTE: This function is EXPERIMENTAL and subject to change or removal.
func (c TemplateContext) funcMaybe(functionName string, args ...any) (any, error) {
for _, funcMap := range c.CustomFuncs {
if fn, ok := funcMap[functionName]; ok {
val := reflect.ValueOf(fn)
if val.Kind() != reflect.Func {
continue
}
argVals := make([]reflect.Value, len(args))
for i, arg := range args {
argVals[i] = reflect.ValueOf(arg)
}
returnVals := val.Call(argVals)
switch len(returnVals) {
case 0:
return "", nil
case 1:
return returnVals[0].Interface(), nil
case 2:
var err error
if !returnVals[1].IsNil() {
err = returnVals[1].Interface().(error)
}
return returnVals[0].Interface(), err
default:
return nil, fmt.Errorf("maybe %s: invalid number of return values: %d", functionName, len(returnVals))
}
}
}
c.config.logger.Named("maybe").Warn("template function could not be found; ignoring invocation", zap.String("name", functionName))
return "", nil
}
// WrappedHeader wraps niladic functions so that they
// can be used in templates. (Template functions must
// return a value.)
type WrappedHeader struct{ http.Header }
// Add adds a header field value, appending val to
// existing values for that field. It returns an
// empty string.
func (h WrappedHeader) Add(field, val string) string {
h.Header.Add(field, val)
return ""
}
// Set sets a header field value, overwriting any
// other values for that field. It returns an
// empty string.
func (h WrappedHeader) Set(field, val string) string {
h.Header.Set(field, val)
return ""
}
// Del deletes a header field. It returns an empty string.
func (h WrappedHeader) Del(field string) string {
h.Header.Del(field)
return ""
}
var bufPool = sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
}
// at time of writing, sprig.FuncMap() makes a copy, thus
// involves iterating the whole map, so do it just once
var sprigFuncMap = sprig.TxtFuncMap()
const recursionPreventionHeader = "Caddy-Templates-Include"
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(VarsMiddleware{})
caddy.RegisterModule(VarsMatcher{})
caddy.RegisterModule(MatchVarsRE{})
}
// VarsMiddleware is an HTTP middleware which sets variables to
// have values that can be used in the HTTP request handler
// chain. The primary way to access variables is with placeholders,
// which have the form: `{http.vars.variable_name}`, or with
// the `vars` and `vars_regexp` request matchers.
//
// The key is the variable name, and the value is the value of the
// variable. Both the name and value may use or contain placeholders.
type VarsMiddleware map[string]any
// CaddyModule returns the Caddy module information.
func (VarsMiddleware) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.vars",
New: func() caddy.Module { return new(VarsMiddleware) },
}
}
func (m VarsMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next Handler) error {
vars := r.Context().Value(VarsCtxKey).(map[string]any)
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
for k, v := range m {
keyExpanded := repl.ReplaceAll(k, "")
if valStr, ok := v.(string); ok {
v = repl.ReplaceAll(valStr, "")
}
vars[keyExpanded] = v
// Special case: the user ID is in the replacer, pulled from there
// for access logs. Allow users to override it with the vars handler.
if keyExpanded == "http.auth.user.id" {
repl.Set(keyExpanded, v)
}
}
return next.ServeHTTP(w, r)
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler. Syntax:
//
// vars [<name> <val>] {
// <name> <val>
// ...
// }
func (m *VarsMiddleware) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume directive name
if *m == nil {
*m = make(VarsMiddleware)
}
nextVar := func(headerLine bool) error {
if headerLine {
// header line is optional
if !d.NextArg() {
return nil
}
}
varName := d.Val()
if !d.NextArg() {
return d.ArgErr()
}
varValue := d.ScalarVal()
(*m)[varName] = varValue
if d.NextArg() {
return d.ArgErr()
}
return nil
}
if err := nextVar(true); err != nil {
return err
}
for d.NextBlock(0) {
if err := nextVar(false); err != nil {
return err
}
}
return nil
}
// VarsMatcher is an HTTP request matcher which can match
// requests based on variables in the context or placeholder
// values. The key is the placeholder or name of the variable,
// and the values are possible values the variable can be in
// order to match (logical OR'ed).
//
// If the key is surrounded by `{ }` it is assumed to be a
// placeholder. Otherwise, it will be considered a variable
// name.
//
// Placeholders in the keys are not expanded, but
// placeholders in the values are.
type VarsMatcher map[string][]string
// CaddyModule returns the Caddy module information.
func (VarsMatcher) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.vars",
New: func() caddy.Module { return new(VarsMatcher) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *VarsMatcher) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string][]string)
}
// iterate to merge multiple matchers into one
for d.Next() {
var field string
if !d.Args(&field) {
return d.Errf("malformed vars matcher: expected field name")
}
vals := d.RemainingArgs()
if len(vals) == 0 {
return d.Errf("malformed vars matcher: expected at least one value to match against")
}
(*m)[field] = append((*m)[field], vals...)
if d.NextBlock(0) {
return d.Err("malformed vars matcher: blocks are not supported")
}
}
return nil
}
// Match matches a request based on variables in the context,
// or placeholders if the key is not a variable.
func (m VarsMatcher) Match(r *http.Request) bool {
if len(m) == 0 {
return true
}
vars := r.Context().Value(VarsCtxKey).(map[string]any)
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
for key, vals := range m {
var varValue any
if strings.HasPrefix(key, "{") &&
strings.HasSuffix(key, "}") &&
strings.Count(key, "{") == 1 {
varValue, _ = repl.Get(strings.Trim(key, "{}"))
} else {
varValue = vars[key]
}
// see if any of the values given in the matcher match the actual value
for _, v := range vals {
matcherValExpanded := repl.ReplaceAll(v, "")
var varStr string
switch vv := varValue.(type) {
case string:
varStr = vv
case fmt.Stringer:
varStr = vv.String()
case error:
varStr = vv.Error()
case nil:
varStr = ""
default:
varStr = fmt.Sprintf("%v", vv)
}
if varStr == matcherValExpanded {
return true
}
}
}
return false
}
// MatchVarsRE matches the value of the context variables by a given regular expression.
//
// Upon a match, it adds placeholders to the request: `{http.regexp.name.capture_group}`
// where `name` is the regular expression's name, and `capture_group` is either
// the named or positional capture group from the expression itself. If no name
// is given, then the placeholder omits the name: `{http.regexp.capture_group}`
// (potentially leading to collisions).
type MatchVarsRE map[string]*MatchRegexp
// CaddyModule returns the Caddy module information.
func (MatchVarsRE) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.vars_regexp",
New: func() caddy.Module { return new(MatchVarsRE) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchVarsRE) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if *m == nil {
*m = make(map[string]*MatchRegexp)
}
// iterate to merge multiple matchers into one
for d.Next() {
var first, second, third string
if !d.Args(&first, &second) {
return d.ArgErr()
}
var name, field, val string
if d.Args(&third) {
name = first
field = second
val = third
} else {
field = first
val = second
}
// Default to the named matcher's name, if no regexp name is provided
if name == "" {
name = d.GetContextString(caddyfile.MatcherNameCtxKey)
}
(*m)[field] = &MatchRegexp{Pattern: val, Name: name}
if d.NextBlock(0) {
return d.Err("malformed vars_regexp matcher: blocks are not supported")
}
}
return nil
}
// Provision compiles m's regular expressions.
func (m MatchVarsRE) Provision(ctx caddy.Context) error {
for _, rm := range m {
err := rm.Provision(ctx)
if err != nil {
return err
}
}
return nil
}
// Match returns true if r matches m.
func (m MatchVarsRE) Match(r *http.Request) bool {
vars := r.Context().Value(VarsCtxKey).(map[string]any)
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
for key, val := range m {
var varValue any
if strings.HasPrefix(key, "{") &&
strings.HasSuffix(key, "}") &&
strings.Count(key, "{") == 1 {
varValue, _ = repl.Get(strings.Trim(key, "{}"))
} else {
varValue = vars[key]
}
var varStr string
switch vv := varValue.(type) {
case string:
varStr = vv
case fmt.Stringer:
varStr = vv.String()
case error:
varStr = vv.Error()
case nil:
varStr = ""
default:
varStr = fmt.Sprintf("%v", vv)
}
valExpanded := repl.ReplaceAll(varStr, "")
if match := val.Match(valExpanded, repl); match {
return match
}
}
return false
}
// Validate validates m's regular expressions.
func (m MatchVarsRE) Validate() error {
for _, rm := range m {
err := rm.Validate()
if err != nil {
return err
}
}
return nil
}
// GetVar gets a value out of the context's variable table by key.
// If the key does not exist, the return value will be nil.
func GetVar(ctx context.Context, key string) any {
varMap, ok := ctx.Value(VarsCtxKey).(map[string]any)
if !ok {
return nil
}
return varMap[key]
}
// SetVar sets a value in the context's variable table with
// the given key. It overwrites any previous value with the
// same key.
//
// If the value is nil (note: non-nil interface with nil
// underlying value does not count) and the key exists in
// the table, the key+value will be deleted from the table.
func SetVar(ctx context.Context, key string, value any) {
varMap, ok := ctx.Value(VarsCtxKey).(map[string]any)
if !ok {
return
}
if value == nil {
if _, ok := varMap[key]; ok {
delete(varMap, key)
return
}
}
varMap[key] = value
}
// Interface guards
var (
_ MiddlewareHandler = (*VarsMiddleware)(nil)
_ caddyfile.Unmarshaler = (*VarsMiddleware)(nil)
_ RequestMatcher = (*VarsMatcher)(nil)
_ caddyfile.Unmarshaler = (*VarsMatcher)(nil)
)
// Copyright 2020 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(adminAPI{})
}
// adminAPI is a module that serves PKI endpoints to retrieve
// information about the CAs being managed by Caddy.
type adminAPI struct {
ctx caddy.Context
log *zap.Logger
pkiApp *PKI
}
// CaddyModule returns the Caddy module information.
func (adminAPI) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "admin.api.pki",
New: func() caddy.Module { return new(adminAPI) },
}
}
// Provision sets up the adminAPI module.
func (a *adminAPI) Provision(ctx caddy.Context) error {
a.ctx = ctx
a.log = ctx.Logger(a) // TODO: passing in 'a' is a hack until the admin API is officially extensible (see #5032)
// Avoid initializing PKI if it wasn't configured.
// We intentionally ignore the error since it's not
// fatal if the PKI app is not explicitly configured.
pkiApp, err := ctx.AppIfConfigured("pki")
if err == nil {
a.pkiApp = pkiApp.(*PKI)
}
return nil
}
// Routes returns the admin routes for the PKI app.
func (a *adminAPI) Routes() []caddy.AdminRoute {
return []caddy.AdminRoute{
{
Pattern: adminPKIEndpointBase,
Handler: caddy.AdminHandlerFunc(a.handleAPIEndpoints),
},
}
}
// handleAPIEndpoints routes API requests within adminPKIEndpointBase.
func (a *adminAPI) handleAPIEndpoints(w http.ResponseWriter, r *http.Request) error {
uri := strings.TrimPrefix(r.URL.Path, "/pki/")
parts := strings.Split(uri, "/")
switch {
case len(parts) == 2 && parts[0] == "ca" && parts[1] != "":
return a.handleCAInfo(w, r)
case len(parts) == 3 && parts[0] == "ca" && parts[1] != "" && parts[2] == "certificates":
return a.handleCACerts(w, r)
}
return caddy.APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("resource not found: %v", r.URL.Path),
}
}
// handleCAInfo returns information about a particular
// CA by its ID. If the CA ID is the default, then the CA will be
// provisioned if it has not already been. Other CA IDs will return an
// error if they have not been previously provisioned.
func (a *adminAPI) handleCAInfo(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodGet {
return caddy.APIError{
HTTPStatus: http.StatusMethodNotAllowed,
Err: fmt.Errorf("method not allowed: %v", r.Method),
}
}
ca, err := a.getCAFromAPIRequestPath(r)
if err != nil {
return err
}
rootCert, interCert, err := rootAndIntermediatePEM(ca)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusInternalServerError,
Err: fmt.Errorf("failed to get root and intermediate cert for CA %s: %v", ca.ID, err),
}
}
repl := ca.newReplacer()
response := caInfo{
ID: ca.ID,
Name: ca.Name,
RootCN: repl.ReplaceAll(ca.RootCommonName, ""),
IntermediateCN: repl.ReplaceAll(ca.IntermediateCommonName, ""),
RootCert: string(rootCert),
IntermediateCert: string(interCert),
}
encoded, err := json.Marshal(response)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusInternalServerError,
Err: err,
}
}
w.Header().Set("Content-Type", "application/json")
_, _ = w.Write(encoded)
return nil
}
// handleCACerts returns the certificate chain for a particular
// CA by its ID. If the CA ID is the default, then the CA will be
// provisioned if it has not already been. Other CA IDs will return an
// error if they have not been previously provisioned.
func (a *adminAPI) handleCACerts(w http.ResponseWriter, r *http.Request) error {
if r.Method != http.MethodGet {
return caddy.APIError{
HTTPStatus: http.StatusMethodNotAllowed,
Err: fmt.Errorf("method not allowed: %v", r.Method),
}
}
ca, err := a.getCAFromAPIRequestPath(r)
if err != nil {
return err
}
rootCert, interCert, err := rootAndIntermediatePEM(ca)
if err != nil {
return caddy.APIError{
HTTPStatus: http.StatusInternalServerError,
Err: fmt.Errorf("failed to get root and intermediate cert for CA %s: %v", ca.ID, err),
}
}
w.Header().Set("Content-Type", "application/pem-certificate-chain")
_, err = w.Write(interCert)
if err == nil {
_, _ = w.Write(rootCert)
}
return nil
}
func (a *adminAPI) getCAFromAPIRequestPath(r *http.Request) (*CA, error) {
// Grab the CA ID from the request path, it should be the 4th segment (/pki/ca/<ca>)
id := strings.Split(r.URL.Path, "/")[3]
if id == "" {
return nil, caddy.APIError{
HTTPStatus: http.StatusBadRequest,
Err: fmt.Errorf("missing CA in path"),
}
}
// Find the CA by ID, if PKI is configured
var ca *CA
var ok bool
if a.pkiApp != nil {
ca, ok = a.pkiApp.CAs[id]
}
// If we didn't find the CA, and PKI is not configured
// then we'll either error out if the CA ID is not the
// default. If the CA ID is the default, then we'll
// provision it, because the user probably aims to
// change their config to enable PKI immediately after
// if they actually requested the local CA ID.
if !ok {
if id != DefaultCAID {
return nil, caddy.APIError{
HTTPStatus: http.StatusNotFound,
Err: fmt.Errorf("no certificate authority configured with id: %s", id),
}
}
// Provision the default CA, which generates and stores a root
// certificate in storage, if one doesn't already exist.
ca = new(CA)
err := ca.Provision(a.ctx, id, a.log)
if err != nil {
return nil, caddy.APIError{
HTTPStatus: http.StatusInternalServerError,
Err: fmt.Errorf("failed to provision CA %s, %w", id, err),
}
}
}
return ca, nil
}
func rootAndIntermediatePEM(ca *CA) (root, inter []byte, err error) {
root, err = pemEncodeCert(ca.RootCertificate().Raw)
if err != nil {
return
}
inter, err = pemEncodeCert(ca.IntermediateCertificate().Raw)
if err != nil {
return
}
return
}
// caInfo is the response structure for the CA info API endpoint.
type caInfo struct {
ID string `json:"id"`
Name string `json:"name"`
RootCN string `json:"root_common_name"`
IntermediateCN string `json:"intermediate_common_name"`
RootCert string `json:"root_certificate"`
IntermediateCert string `json:"intermediate_certificate"`
}
// adminPKIEndpointBase is the base admin endpoint under which all PKI admin endpoints exist.
const adminPKIEndpointBase = "/pki/"
// Interface guards
var (
_ caddy.AdminRouter = (*adminAPI)(nil)
_ caddy.Provisioner = (*adminAPI)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"crypto"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io/fs"
"path"
"sync"
"time"
"github.com/caddyserver/certmagic"
"github.com/smallstep/certificates/authority"
"github.com/smallstep/certificates/db"
"github.com/smallstep/truststore"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
// CA describes a certificate authority, which consists of
// root/signing certificates and various settings pertaining
// to the issuance of certificates and trusting them.
type CA struct {
// The user-facing name of the certificate authority.
Name string `json:"name,omitempty"`
// The name to put in the CommonName field of the
// root certificate.
RootCommonName string `json:"root_common_name,omitempty"`
// The name to put in the CommonName field of the
// intermediate certificates.
IntermediateCommonName string `json:"intermediate_common_name,omitempty"`
// The lifetime for the intermediate certificates
IntermediateLifetime caddy.Duration `json:"intermediate_lifetime,omitempty"`
// Whether Caddy will attempt to install the CA's root
// into the system trust store, as well as into Java
// and Mozilla Firefox trust stores. Default: true.
InstallTrust *bool `json:"install_trust,omitempty"`
// The root certificate to use; if null, one will be generated.
Root *KeyPair `json:"root,omitempty"`
// The intermediate (signing) certificate; if null, one will be generated.
Intermediate *KeyPair `json:"intermediate,omitempty"`
// Optionally configure a separate storage module associated with this
// issuer, instead of using Caddy's global/default-configured storage.
// This can be useful if you want to keep your signing keys in a
// separate location from your leaf certificates.
StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
// The unique config-facing ID of the certificate authority.
// Since the ID is set in JSON config via object key, this
// field is exported only for purposes of config generation
// and module provisioning.
ID string `json:"-"`
storage certmagic.Storage
root, inter *x509.Certificate
interKey any // TODO: should we just store these as crypto.Signer?
mu *sync.RWMutex
rootCertPath string // mainly used for logging purposes if trusting
log *zap.Logger
ctx caddy.Context
}
// Provision sets up the CA.
func (ca *CA) Provision(ctx caddy.Context, id string, log *zap.Logger) error {
ca.mu = new(sync.RWMutex)
ca.log = log.Named("ca." + id)
ca.ctx = ctx
if id == "" {
return fmt.Errorf("CA ID is required (use 'local' for the default CA)")
}
ca.mu.Lock()
ca.ID = id
ca.mu.Unlock()
if ca.StorageRaw != nil {
val, err := ctx.LoadModule(ca, "StorageRaw")
if err != nil {
return fmt.Errorf("loading storage module: %v", err)
}
cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage()
if err != nil {
return fmt.Errorf("creating storage configuration: %v", err)
}
ca.storage = cmStorage
}
if ca.storage == nil {
ca.storage = ctx.Storage()
}
if ca.Name == "" {
ca.Name = defaultCAName
}
if ca.RootCommonName == "" {
ca.RootCommonName = defaultRootCommonName
}
if ca.IntermediateCommonName == "" {
ca.IntermediateCommonName = defaultIntermediateCommonName
}
if ca.IntermediateLifetime == 0 {
ca.IntermediateLifetime = caddy.Duration(defaultIntermediateLifetime)
} else if time.Duration(ca.IntermediateLifetime) >= defaultRootLifetime {
return fmt.Errorf("intermediate certificate lifetime must be less than root certificate lifetime (%s)", defaultRootLifetime)
}
// load the certs and key that will be used for signing
var rootCert, interCert *x509.Certificate
var rootKey, interKey crypto.Signer
var err error
if ca.Root != nil {
if ca.Root.Format == "" || ca.Root.Format == "pem_file" {
ca.rootCertPath = ca.Root.Certificate
}
rootCert, rootKey, err = ca.Root.Load()
} else {
ca.rootCertPath = "storage:" + ca.storageKeyRootCert()
rootCert, rootKey, err = ca.loadOrGenRoot()
}
if err != nil {
return err
}
if ca.Intermediate != nil {
interCert, interKey, err = ca.Intermediate.Load()
} else {
interCert, interKey, err = ca.loadOrGenIntermediate(rootCert, rootKey)
}
if err != nil {
return err
}
ca.mu.Lock()
ca.root, ca.inter, ca.interKey = rootCert, interCert, interKey
ca.mu.Unlock()
return nil
}
// RootCertificate returns the CA's root certificate (public key).
func (ca CA) RootCertificate() *x509.Certificate {
ca.mu.RLock()
defer ca.mu.RUnlock()
return ca.root
}
// RootKey returns the CA's root private key. Since the root key is
// not cached in memory long-term, it needs to be loaded from storage,
// which could yield an error.
func (ca CA) RootKey() (any, error) {
_, rootKey, err := ca.loadOrGenRoot()
return rootKey, err
}
// IntermediateCertificate returns the CA's intermediate
// certificate (public key).
func (ca CA) IntermediateCertificate() *x509.Certificate {
ca.mu.RLock()
defer ca.mu.RUnlock()
return ca.inter
}
// IntermediateKey returns the CA's intermediate private key.
func (ca CA) IntermediateKey() any {
ca.mu.RLock()
defer ca.mu.RUnlock()
return ca.interKey
}
// NewAuthority returns a new Smallstep-powered signing authority for this CA.
// Note that we receive *CA (a pointer) in this method to ensure the closure within it, which
// executes at a later time, always has the only copy of the CA so it can access the latest,
// renewed certificates since NewAuthority was called. See #4517 and #4669.
func (ca *CA) NewAuthority(authorityConfig AuthorityConfig) (*authority.Authority, error) {
// get the root certificate and the issuer cert+key
rootCert := ca.RootCertificate()
// set up the signer; cert/key which signs the leaf certs
var signerOption authority.Option
if authorityConfig.SignWithRoot {
// if we're signing with root, we can just pass the
// cert/key directly, since it's unlikely to expire
// while Caddy is running (long lifetime)
var issuerCert *x509.Certificate
var issuerKey any
issuerCert = rootCert
var err error
issuerKey, err = ca.RootKey()
if err != nil {
return nil, fmt.Errorf("loading signing key: %v", err)
}
signerOption = authority.WithX509Signer(issuerCert, issuerKey.(crypto.Signer))
} else {
// if we're signing with intermediate, we need to make
// sure it's always fresh, because the intermediate may
// renew while Caddy is running (medium lifetime)
signerOption = authority.WithX509SignerFunc(func() ([]*x509.Certificate, crypto.Signer, error) {
issuerCert := ca.IntermediateCertificate()
issuerKey := ca.IntermediateKey().(crypto.Signer)
ca.log.Debug("using intermediate signer",
zap.String("serial", issuerCert.SerialNumber.String()),
zap.String("not_before", issuerCert.NotBefore.String()),
zap.String("not_after", issuerCert.NotAfter.String()))
return []*x509.Certificate{issuerCert}, issuerKey, nil
})
}
opts := []authority.Option{
authority.WithConfig(&authority.Config{
AuthorityConfig: authorityConfig.AuthConfig,
}),
signerOption,
authority.WithX509RootCerts(rootCert),
}
// Add a database if we have one
if authorityConfig.DB != nil {
opts = append(opts, authority.WithDatabase(*authorityConfig.DB))
}
auth, err := authority.NewEmbedded(opts...)
if err != nil {
return nil, fmt.Errorf("initializing certificate authority: %v", err)
}
return auth, nil
}
func (ca CA) loadOrGenRoot() (rootCert *x509.Certificate, rootKey crypto.Signer, err error) {
if ca.Root != nil {
return ca.Root.Load()
}
rootCertPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyRootCert())
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, nil, fmt.Errorf("loading root cert: %v", err)
}
// TODO: should we require that all or none of the assets are required before overwriting anything?
rootCert, rootKey, err = ca.genRoot()
if err != nil {
return nil, nil, fmt.Errorf("generating root: %v", err)
}
}
if rootCert == nil {
rootCert, err = pemDecodeSingleCert(rootCertPEM)
if err != nil {
return nil, nil, fmt.Errorf("parsing root certificate PEM: %v", err)
}
}
if rootKey == nil {
rootKeyPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyRootKey())
if err != nil {
return nil, nil, fmt.Errorf("loading root key: %v", err)
}
rootKey, err = certmagic.PEMDecodePrivateKey(rootKeyPEM)
if err != nil {
return nil, nil, fmt.Errorf("decoding root key: %v", err)
}
}
return rootCert, rootKey, nil
}
func (ca CA) genRoot() (rootCert *x509.Certificate, rootKey crypto.Signer, err error) {
repl := ca.newReplacer()
rootCert, rootKey, err = generateRoot(repl.ReplaceAll(ca.RootCommonName, ""))
if err != nil {
return nil, nil, fmt.Errorf("generating CA root: %v", err)
}
rootCertPEM, err := pemEncodeCert(rootCert.Raw)
if err != nil {
return nil, nil, fmt.Errorf("encoding root certificate: %v", err)
}
err = ca.storage.Store(ca.ctx, ca.storageKeyRootCert(), rootCertPEM)
if err != nil {
return nil, nil, fmt.Errorf("saving root certificate: %v", err)
}
rootKeyPEM, err := certmagic.PEMEncodePrivateKey(rootKey)
if err != nil {
return nil, nil, fmt.Errorf("encoding root key: %v", err)
}
err = ca.storage.Store(ca.ctx, ca.storageKeyRootKey(), rootKeyPEM)
if err != nil {
return nil, nil, fmt.Errorf("saving root key: %v", err)
}
return rootCert, rootKey, nil
}
func (ca CA) loadOrGenIntermediate(rootCert *x509.Certificate, rootKey crypto.Signer) (interCert *x509.Certificate, interKey crypto.Signer, err error) {
interCertPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyIntermediateCert())
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, nil, fmt.Errorf("loading intermediate cert: %v", err)
}
// TODO: should we require that all or none of the assets are required before overwriting anything?
interCert, interKey, err = ca.genIntermediate(rootCert, rootKey)
if err != nil {
return nil, nil, fmt.Errorf("generating new intermediate cert: %v", err)
}
}
if interCert == nil {
interCert, err = pemDecodeSingleCert(interCertPEM)
if err != nil {
return nil, nil, fmt.Errorf("decoding intermediate certificate PEM: %v", err)
}
}
if interKey == nil {
interKeyPEM, err := ca.storage.Load(ca.ctx, ca.storageKeyIntermediateKey())
if err != nil {
return nil, nil, fmt.Errorf("loading intermediate key: %v", err)
}
interKey, err = certmagic.PEMDecodePrivateKey(interKeyPEM)
if err != nil {
return nil, nil, fmt.Errorf("decoding intermediate key: %v", err)
}
}
return interCert, interKey, nil
}
func (ca CA) genIntermediate(rootCert *x509.Certificate, rootKey crypto.Signer) (interCert *x509.Certificate, interKey crypto.Signer, err error) {
repl := ca.newReplacer()
interCert, interKey, err = generateIntermediate(repl.ReplaceAll(ca.IntermediateCommonName, ""), rootCert, rootKey, time.Duration(ca.IntermediateLifetime))
if err != nil {
return nil, nil, fmt.Errorf("generating CA intermediate: %v", err)
}
interCertPEM, err := pemEncodeCert(interCert.Raw)
if err != nil {
return nil, nil, fmt.Errorf("encoding intermediate certificate: %v", err)
}
err = ca.storage.Store(ca.ctx, ca.storageKeyIntermediateCert(), interCertPEM)
if err != nil {
return nil, nil, fmt.Errorf("saving intermediate certificate: %v", err)
}
interKeyPEM, err := certmagic.PEMEncodePrivateKey(interKey)
if err != nil {
return nil, nil, fmt.Errorf("encoding intermediate key: %v", err)
}
err = ca.storage.Store(ca.ctx, ca.storageKeyIntermediateKey(), interKeyPEM)
if err != nil {
return nil, nil, fmt.Errorf("saving intermediate key: %v", err)
}
return interCert, interKey, nil
}
func (ca CA) storageKeyCAPrefix() string {
return path.Join("pki", "authorities", certmagic.StorageKeys.Safe(ca.ID))
}
func (ca CA) storageKeyRootCert() string {
return path.Join(ca.storageKeyCAPrefix(), "root.crt")
}
func (ca CA) storageKeyRootKey() string {
return path.Join(ca.storageKeyCAPrefix(), "root.key")
}
func (ca CA) storageKeyIntermediateCert() string {
return path.Join(ca.storageKeyCAPrefix(), "intermediate.crt")
}
func (ca CA) storageKeyIntermediateKey() string {
return path.Join(ca.storageKeyCAPrefix(), "intermediate.key")
}
func (ca CA) newReplacer() *caddy.Replacer {
repl := caddy.NewReplacer()
repl.Set("pki.ca.name", ca.Name)
return repl
}
// installRoot installs this CA's root certificate into the
// local trust store(s) if it is not already trusted. The CA
// must already be provisioned.
func (ca CA) installRoot() error {
// avoid password prompt if already trusted
if trusted(ca.root) {
ca.log.Info("root certificate is already trusted by system",
zap.String("path", ca.rootCertPath))
return nil
}
ca.log.Warn("installing root certificate (you might be prompted for password)",
zap.String("path", ca.rootCertPath))
return truststore.Install(ca.root,
truststore.WithDebug(),
truststore.WithFirefox(),
truststore.WithJava(),
)
}
// AuthorityConfig is used to help a CA configure
// the underlying signing authority.
type AuthorityConfig struct {
SignWithRoot bool
// TODO: should we just embed the underlying authority.Config struct type?
DB *db.AuthDB
AuthConfig *authority.AuthConfig
}
const (
// DefaultCAID is the default CA ID.
DefaultCAID = "local"
defaultCAName = "Caddy Local Authority"
defaultRootCommonName = "{pki.ca.name} - {time.now.year} ECC Root"
defaultIntermediateCommonName = "{pki.ca.name} - ECC Intermediate"
defaultRootLifetime = 24 * time.Hour * 30 * 12 * 10
defaultIntermediateLifetime = 24 * time.Hour * 7
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"crypto"
"crypto/x509"
"time"
"go.step.sm/crypto/keyutil"
"go.step.sm/crypto/x509util"
)
func generateRoot(commonName string) (*x509.Certificate, crypto.Signer, error) {
template, signer, err := newCert(commonName, x509util.DefaultRootTemplate, defaultRootLifetime)
if err != nil {
return nil, nil, err
}
root, err := x509util.CreateCertificate(template, template, signer.Public(), signer)
if err != nil {
return nil, nil, err
}
return root, signer, nil
}
func generateIntermediate(commonName string, rootCrt *x509.Certificate, rootKey crypto.Signer, lifetime time.Duration) (*x509.Certificate, crypto.Signer, error) {
template, signer, err := newCert(commonName, x509util.DefaultIntermediateTemplate, lifetime)
if err != nil {
return nil, nil, err
}
intermediate, err := x509util.CreateCertificate(template, rootCrt, signer.Public(), rootKey)
if err != nil {
return nil, nil, err
}
return intermediate, signer, nil
}
func newCert(commonName, templateName string, lifetime time.Duration) (cert *x509.Certificate, signer crypto.Signer, err error) {
signer, err = keyutil.GenerateDefaultSigner()
if err != nil {
return nil, nil, err
}
csr, err := x509util.CreateCertificateRequest(commonName, []string{}, signer)
if err != nil {
return nil, nil, err
}
template, err := x509util.NewCertificate(csr, x509util.WithTemplate(templateName, x509util.CreateTemplateData(commonName, []string{})))
if err != nil {
return nil, nil, err
}
cert = template.GetCertificate()
cert.NotBefore = time.Now().Truncate(time.Second)
cert.NotAfter = cert.NotBefore.Add(lifetime)
return cert, signer, nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"net/http"
"os"
"path"
"github.com/smallstep/truststore"
"github.com/spf13/cobra"
caddycmd "github.com/caddyserver/caddy/v2/cmd"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddycmd.RegisterCommand(caddycmd.Command{
Name: "trust",
Usage: "[--ca <id>] [--address <listen>] [--config <path> [--adapter <name>]]",
Short: "Installs a CA certificate into local trust stores",
Long: `
Adds a root certificate into the local trust stores.
Caddy will attempt to install its root certificates into the local
trust stores automatically when they are first generated, but it
might fail if Caddy doesn't have the appropriate permissions to
write to the trust store. This command is necessary to pre-install
the certificates before using them, if the server process runs as an
unprivileged user (such as via systemd).
By default, this command installs the root certificate for Caddy's
default CA (i.e. 'local'). You may specify the ID of another CA
with the --ca flag.
This command will attempt to connect to Caddy's admin API running at
'` + caddy.DefaultAdminListen + `' to fetch the root certificate. You may
explicitly specify the --address, or use the --config flag to load
the admin address from your config, if not using the default.`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("ca", "", "", "The ID of the CA to trust (defaults to 'local')")
cmd.Flags().StringP("address", "", "", "Address of the administration API listener (if --config is not used)")
cmd.Flags().StringP("config", "c", "", "Configuration file (if --address is not used)")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (if --config is used)")
cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdTrust)
},
})
caddycmd.RegisterCommand(caddycmd.Command{
Name: "untrust",
Usage: "[--cert <path>] | [[--ca <id>] [--address <listen>] [--config <path> [--adapter <name>]]]",
Short: "Untrusts a locally-trusted CA certificate",
Long: `
Untrusts a root certificate from the local trust store(s).
This command uninstalls trust; it does not necessarily delete the
root certificate from trust stores entirely. Thus, repeatedly
trusting and untrusting new certificates can fill up trust databases.
This command does not delete or modify certificate files from Caddy's
configured storage.
This command can be used in one of two ways. Either by specifying
which certificate to untrust by a direct path to the certificate
file with the --cert flag, or by fetching the root certificate for
the CA from the admin API (default behaviour).
If the admin API is used, then the CA defaults to 'local'. You may
specify the ID of another CA with the --ca flag. By default, this
will attempt to connect to the Caddy's admin API running at
'` + caddy.DefaultAdminListen + `' to fetch the root certificate.
You may explicitly specify the --address, or use the --config flag
to load the admin address from your config, if not using the default.`,
CobraFunc: func(cmd *cobra.Command) {
cmd.Flags().StringP("cert", "p", "", "The path to the CA certificate to untrust")
cmd.Flags().StringP("ca", "", "", "The ID of the CA to untrust (defaults to 'local')")
cmd.Flags().StringP("address", "", "", "Address of the administration API listener (if --config is not used)")
cmd.Flags().StringP("config", "c", "", "Configuration file (if --address is not used)")
cmd.Flags().StringP("adapter", "a", "", "Name of config adapter to apply (if --config is used)")
cmd.RunE = caddycmd.WrapCommandFuncForCobra(cmdUntrust)
},
})
}
func cmdTrust(fl caddycmd.Flags) (int, error) {
caID := fl.String("ca")
addrFlag := fl.String("address")
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
// Prepare the URI to the admin endpoint
if caID == "" {
caID = DefaultCAID
}
// Determine where we're sending the request to get the CA info
adminAddr, err := caddycmd.DetermineAdminAPIAddress(addrFlag, nil, configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
// Fetch the root cert from the admin API
rootCert, err := rootCertFromAdmin(adminAddr, caID)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// Set up the CA struct; we only need to fill in the root
// because we're only using it to make use of the installRoot()
// function. Also needs a logger for warnings, and a "cert path"
// for the root cert; since we're loading from the API and we
// don't know the actual storage path via this flow, we'll just
// pass through the admin API address instead.
ca := CA{
log: caddy.Log(),
root: rootCert,
rootCertPath: adminAddr + path.Join(adminPKIEndpointBase, "ca", caID),
}
// Install the cert!
err = ca.installRoot()
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
return caddy.ExitCodeSuccess, nil
}
func cmdUntrust(fl caddycmd.Flags) (int, error) {
certFile := fl.String("cert")
caID := fl.String("ca")
addrFlag := fl.String("address")
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
if certFile != "" && (caID != "" || addrFlag != "" || configFlag != "") {
return caddy.ExitCodeFailedStartup, fmt.Errorf("conflicting command line arguments, cannot use --cert with other flags")
}
// If a file was specified, try to uninstall the cert matching that file
if certFile != "" {
// Sanity check, make sure cert file exists first
_, err := os.Stat(certFile)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing certificate file: %v", err)
}
// Uninstall the file!
err = truststore.UninstallFile(certFile,
truststore.WithDebug(),
truststore.WithFirefox(),
truststore.WithJava())
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to uninstall certificate file: %v", err)
}
return caddy.ExitCodeSuccess, nil
}
// Prepare the URI to the admin endpoint
if caID == "" {
caID = DefaultCAID
}
// Determine where we're sending the request to get the CA info
adminAddr, err := caddycmd.DetermineAdminAPIAddress(addrFlag, nil, configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
// Fetch the root cert from the admin API
rootCert, err := rootCertFromAdmin(adminAddr, caID)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
// Uninstall the cert!
err = truststore.Uninstall(rootCert,
truststore.WithDebug(),
truststore.WithFirefox(),
truststore.WithJava())
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("failed to uninstall certificate file: %v", err)
}
return caddy.ExitCodeSuccess, nil
}
// rootCertFromAdmin makes the API request to fetch the root certificate for the named CA via admin API.
func rootCertFromAdmin(adminAddr string, caID string) (*x509.Certificate, error) {
uri := path.Join(adminPKIEndpointBase, "ca", caID)
// Make the request to fetch the CA info
resp, err := caddycmd.AdminAPIRequest(adminAddr, http.MethodGet, uri, make(http.Header), nil)
if err != nil {
return nil, fmt.Errorf("requesting CA info: %v", err)
}
defer resp.Body.Close()
// Decode the response
caInfo := new(caInfo)
err = json.NewDecoder(resp.Body).Decode(caInfo)
if err != nil {
return nil, fmt.Errorf("failed to decode JSON response: %v", err)
}
// Decode the root cert
rootBlock, _ := pem.Decode([]byte(caInfo.RootCert))
if rootBlock == nil {
return nil, fmt.Errorf("failed to decode root certificate: %v", err)
}
rootCert, err := x509.ParseCertificate(rootBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse root certificate: %v", err)
}
return rootCert, nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"bytes"
"crypto"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"github.com/caddyserver/certmagic"
)
func pemDecodeSingleCert(pemDER []byte) (*x509.Certificate, error) {
pemBlock, remaining := pem.Decode(pemDER)
if pemBlock == nil {
return nil, fmt.Errorf("no PEM block found")
}
if len(remaining) > 0 {
return nil, fmt.Errorf("input contained more than a single PEM block")
}
if pemBlock.Type != "CERTIFICATE" {
return nil, fmt.Errorf("expected PEM block type to be CERTIFICATE, but got '%s'", pemBlock.Type)
}
return x509.ParseCertificate(pemBlock.Bytes)
}
func pemEncodeCert(der []byte) ([]byte, error) {
return pemEncode("CERTIFICATE", der)
}
func pemEncode(blockType string, b []byte) ([]byte, error) {
var buf bytes.Buffer
err := pem.Encode(&buf, &pem.Block{Type: blockType, Bytes: b})
return buf.Bytes(), err
}
func trusted(cert *x509.Certificate) bool {
chains, err := cert.Verify(x509.VerifyOptions{})
return len(chains) > 0 && err == nil
}
// KeyPair represents a public-private key pair, where the
// public key is also called a certificate.
type KeyPair struct {
// The certificate. By default, this should be the path to
// a PEM file unless format is something else.
Certificate string `json:"certificate,omitempty"`
// The private key. By default, this should be the path to
// a PEM file unless format is something else.
PrivateKey string `json:"private_key,omitempty"`
// The format in which the certificate and private
// key are provided. Default: pem_file
Format string `json:"format,omitempty"`
}
// Load loads the certificate and key.
func (kp KeyPair) Load() (*x509.Certificate, crypto.Signer, error) {
switch kp.Format {
case "", "pem_file":
certData, err := os.ReadFile(kp.Certificate)
if err != nil {
return nil, nil, err
}
cert, err := pemDecodeSingleCert(certData)
if err != nil {
return nil, nil, err
}
var key crypto.Signer
if kp.PrivateKey != "" {
keyData, err := os.ReadFile(kp.PrivateKey)
if err != nil {
return nil, nil, err
}
key, err = certmagic.PEMDecodePrivateKey(keyData)
if err != nil {
return nil, nil, err
}
}
return cert, key, nil
default:
return nil, nil, fmt.Errorf("unsupported format: %s", kp.Format)
}
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"crypto/x509"
"fmt"
"log"
"runtime/debug"
"time"
"go.uber.org/zap"
)
func (p *PKI) maintenance() {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] PKI maintenance: %v\n%s", err, debug.Stack())
}
}()
ticker := time.NewTicker(10 * time.Minute) // TODO: make configurable
defer ticker.Stop()
for {
select {
case <-ticker.C:
p.renewCerts()
case <-p.ctx.Done():
return
}
}
}
func (p *PKI) renewCerts() {
for _, ca := range p.CAs {
err := p.renewCertsForCA(ca)
if err != nil {
p.log.Error("renewing intermediate certificates",
zap.Error(err),
zap.String("ca", ca.ID))
}
}
}
func (p *PKI) renewCertsForCA(ca *CA) error {
ca.mu.Lock()
defer ca.mu.Unlock()
log := p.log.With(zap.String("ca", ca.ID))
// only maintain the root if it's not manually provided in the config
if ca.Root == nil {
if needsRenewal(ca.root) {
// TODO: implement root renewal (use same key)
log.Warn("root certificate expiring soon (FIXME: ROOT RENEWAL NOT YET IMPLEMENTED)",
zap.Duration("time_remaining", time.Until(ca.inter.NotAfter)),
)
}
}
// only maintain the intermediate if it's not manually provided in the config
if ca.Intermediate == nil {
if needsRenewal(ca.inter) {
log.Info("intermediate expires soon; renewing",
zap.Duration("time_remaining", time.Until(ca.inter.NotAfter)),
)
rootCert, rootKey, err := ca.loadOrGenRoot()
if err != nil {
return fmt.Errorf("loading root key: %v", err)
}
interCert, interKey, err := ca.genIntermediate(rootCert, rootKey)
if err != nil {
return fmt.Errorf("generating new certificate: %v", err)
}
ca.inter, ca.interKey = interCert, interKey
log.Info("renewed intermediate",
zap.Time("new_expiration", ca.inter.NotAfter),
)
}
}
return nil
}
func needsRenewal(cert *x509.Certificate) bool {
lifetime := cert.NotAfter.Sub(cert.NotBefore)
renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
renewalWindowStart := cert.NotAfter.Add(-renewalWindow)
return time.Now().After(renewalWindowStart)
}
const renewalWindowRatio = 0.2 // TODO: make configurable
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddypki
import (
"fmt"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(PKI{})
}
// PKI provides Public Key Infrastructure facilities for Caddy.
//
// This app can define certificate authorities (CAs) which are capable
// of signing certificates. Other modules can be configured to use
// the CAs defined by this app for issuing certificates or getting
// key information needed for establishing trust.
type PKI struct {
// The certificate authorities to manage. Each CA is keyed by an
// ID that is used to uniquely identify it from other CAs.
// At runtime, the GetCA() method should be used instead to ensure
// the default CA is provisioned if it hadn't already been.
// The default CA ID is "local".
CAs map[string]*CA `json:"certificate_authorities,omitempty"`
ctx caddy.Context
log *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (PKI) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "pki",
New: func() caddy.Module { return new(PKI) },
}
}
// Provision sets up the configuration for the PKI app.
func (p *PKI) Provision(ctx caddy.Context) error {
p.ctx = ctx
p.log = ctx.Logger()
for caID, ca := range p.CAs {
err := ca.Provision(ctx, caID, p.log)
if err != nil {
return fmt.Errorf("provisioning CA '%s': %v", caID, err)
}
}
// if this app is initialized at all, ensure there's at
// least a default CA that can be used: the standard CA
// which is used implicitly for signing local-use certs
if len(p.CAs) == 0 {
err := p.ProvisionDefaultCA(ctx)
if err != nil {
return fmt.Errorf("provisioning CA '%s': %v", DefaultCAID, err)
}
}
return nil
}
// ProvisionDefaultCA sets up the default CA.
func (p *PKI) ProvisionDefaultCA(ctx caddy.Context) error {
if p.CAs == nil {
p.CAs = make(map[string]*CA)
}
p.CAs[DefaultCAID] = new(CA)
return p.CAs[DefaultCAID].Provision(ctx, DefaultCAID, p.log)
}
// Start starts the PKI app.
func (p *PKI) Start() error {
// install roots to trust store, if not disabled
for _, ca := range p.CAs {
if ca.InstallTrust != nil && !*ca.InstallTrust {
ca.log.Info("root certificate trust store installation disabled; unconfigured clients may show warnings",
zap.String("path", ca.rootCertPath))
continue
}
if err := ca.installRoot(); err != nil {
// could be some system dependencies that are missing;
// shouldn't totally prevent startup, but we should log it
ca.log.Error("failed to install root certificate",
zap.Error(err),
zap.String("certificate_file", ca.rootCertPath))
}
}
// see if root/intermediates need renewal...
p.renewCerts()
// ...and keep them renewed
go p.maintenance()
return nil
}
// Stop stops the PKI app.
func (p *PKI) Stop() error {
return nil
}
// GetCA retrieves a CA by ID. If the ID is the default
// CA ID, and it hasn't been provisioned yet, it will
// be provisioned.
func (p *PKI) GetCA(ctx caddy.Context, id string) (*CA, error) {
ca, ok := p.CAs[id]
if !ok {
// for anything other than the default CA ID, error out if it wasn't configured
if id != DefaultCAID {
return nil, fmt.Errorf("no certificate authority configured with id: %s", id)
}
// for the default CA ID, provision it, because we want it to "just work"
err := p.ProvisionDefaultCA(ctx)
if err != nil {
return nil, fmt.Errorf("failed to provision default CA: %s", err)
}
ca = p.CAs[id]
}
return ca, nil
}
// Interface guards
var (
_ caddy.Provisioner = (*PKI)(nil)
_ caddy.App = (*PKI)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"context"
"crypto/x509"
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/zerossl"
"github.com/mholt/acmez/v2/acme"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(ACMEIssuer{})
}
// ACMEIssuer manages certificates using the ACME protocol (RFC 8555).
type ACMEIssuer struct {
// The URL to the CA's ACME directory endpoint. Default:
// https://acme-v02.api.letsencrypt.org/directory
CA string `json:"ca,omitempty"`
// The URL to the test CA's ACME directory endpoint.
// This endpoint is only used during retries if there
// is a failure using the primary CA. Default:
// https://acme-staging-v02.api.letsencrypt.org/directory
TestCA string `json:"test_ca,omitempty"`
// Your email address, so the CA can contact you if necessary.
// Not required, but strongly recommended to provide one so
// you can be reached if there is a problem. Your email is
// not sent to any Caddy mothership or used for any purpose
// other than ACME transactions.
Email string `json:"email,omitempty"`
// If you have an existing account with the ACME server, put
// the private key here in PEM format. The ACME client will
// look up your account information with this key first before
// trying to create a new one. You can use placeholders here,
// for example if you have it in an environment variable.
AccountKey string `json:"account_key,omitempty"`
// If using an ACME CA that requires an external account
// binding, specify the CA-provided credentials here.
ExternalAccount *acme.EAB `json:"external_account,omitempty"`
// Time to wait before timing out an ACME operation.
// Default: 0 (no timeout)
ACMETimeout caddy.Duration `json:"acme_timeout,omitempty"`
// Configures the various ACME challenge types.
Challenges *ChallengesConfig `json:"challenges,omitempty"`
// An array of files of CA certificates to accept when connecting to the
// ACME CA. Generally, you should only use this if the ACME CA endpoint
// is internal or for development/testing purposes.
TrustedRootsPEMFiles []string `json:"trusted_roots_pem_files,omitempty"`
// Preferences for selecting alternate certificate chains, if offered
// by the CA. By default, the first offered chain will be selected.
// If configured, the chains may be sorted and the first matching chain
// will be selected.
PreferredChains *ChainPreference `json:"preferred_chains,omitempty"`
// The validity period to ask the CA to issue a certificate for.
// Default: 0 (CA chooses lifetime).
// This value is used to compute the "notAfter" field of the ACME order;
// therefore the system must have a reasonably synchronized clock.
// NOTE: Not all CAs support this. Check with your CA's ACME
// documentation to see if this is allowed and what values may
// be used. EXPERIMENTAL: Subject to change.
CertificateLifetime caddy.Duration `json:"certificate_lifetime,omitempty"`
rootPool *x509.CertPool
logger *zap.Logger
template certmagic.ACMEIssuer // set at Provision
magic *certmagic.Config // set at PreCheck
issuer *certmagic.ACMEIssuer // set at PreCheck; result of template + magic
}
// CaddyModule returns the Caddy module information.
func (ACMEIssuer) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.issuance.acme",
New: func() caddy.Module { return new(ACMEIssuer) },
}
}
// Provision sets up iss.
func (iss *ACMEIssuer) Provision(ctx caddy.Context) error {
iss.logger = ctx.Logger()
repl := caddy.NewReplacer()
// expand email address, if non-empty
if iss.Email != "" {
email, err := repl.ReplaceOrErr(iss.Email, true, true)
if err != nil {
return fmt.Errorf("expanding email address '%s': %v", iss.Email, err)
}
iss.Email = email
}
// expand account key, if non-empty
if iss.AccountKey != "" {
accountKey, err := repl.ReplaceOrErr(iss.AccountKey, true, true)
if err != nil {
return fmt.Errorf("expanding account key PEM '%s': %v", iss.AccountKey, err)
}
iss.AccountKey = accountKey
}
// DNS providers
if iss.Challenges != nil && iss.Challenges.DNS != nil && iss.Challenges.DNS.ProviderRaw != nil {
val, err := ctx.LoadModule(iss.Challenges.DNS, "ProviderRaw")
if err != nil {
return fmt.Errorf("loading DNS provider module: %v", err)
}
iss.Challenges.DNS.solver = &certmagic.DNS01Solver{
DNSManager: certmagic.DNSManager{
DNSProvider: val.(certmagic.DNSProvider),
TTL: time.Duration(iss.Challenges.DNS.TTL),
PropagationDelay: time.Duration(iss.Challenges.DNS.PropagationDelay),
PropagationTimeout: time.Duration(iss.Challenges.DNS.PropagationTimeout),
Resolvers: iss.Challenges.DNS.Resolvers,
OverrideDomain: iss.Challenges.DNS.OverrideDomain,
},
}
}
// add any custom CAs to trust store
if len(iss.TrustedRootsPEMFiles) > 0 {
iss.rootPool = x509.NewCertPool()
for _, pemFile := range iss.TrustedRootsPEMFiles {
pemData, err := os.ReadFile(pemFile)
if err != nil {
return fmt.Errorf("loading trusted root CA's PEM file: %s: %v", pemFile, err)
}
if !iss.rootPool.AppendCertsFromPEM(pemData) {
return fmt.Errorf("unable to add %s to trust pool: %v", pemFile, err)
}
}
}
var err error
iss.template, err = iss.makeIssuerTemplate()
if err != nil {
return err
}
return nil
}
func (iss *ACMEIssuer) makeIssuerTemplate() (certmagic.ACMEIssuer, error) {
template := certmagic.ACMEIssuer{
CA: iss.CA,
TestCA: iss.TestCA,
Email: iss.Email,
AccountKeyPEM: iss.AccountKey,
CertObtainTimeout: time.Duration(iss.ACMETimeout),
TrustedRoots: iss.rootPool,
ExternalAccount: iss.ExternalAccount,
NotAfter: time.Duration(iss.CertificateLifetime),
Logger: iss.logger,
}
if iss.Challenges != nil {
if iss.Challenges.HTTP != nil {
template.DisableHTTPChallenge = iss.Challenges.HTTP.Disabled
template.AltHTTPPort = iss.Challenges.HTTP.AlternatePort
}
if iss.Challenges.TLSALPN != nil {
template.DisableTLSALPNChallenge = iss.Challenges.TLSALPN.Disabled
template.AltTLSALPNPort = iss.Challenges.TLSALPN.AlternatePort
}
if iss.Challenges.DNS != nil {
template.DNS01Solver = iss.Challenges.DNS.solver
}
template.ListenHost = iss.Challenges.BindHost
}
if iss.PreferredChains != nil {
template.PreferredChains = certmagic.ChainPreference{
Smallest: iss.PreferredChains.Smallest,
AnyCommonName: iss.PreferredChains.AnyCommonName,
RootCommonName: iss.PreferredChains.RootCommonName,
}
}
// ZeroSSL requires EAB, but we can generate that automatically (requires an email address be configured)
if strings.HasPrefix(iss.CA, "https://acme.zerossl.com/") {
template.NewAccountFunc = func(ctx context.Context, acmeIss *certmagic.ACMEIssuer, acct acme.Account) (acme.Account, error) {
if acmeIss.ExternalAccount != nil {
return acct, nil
}
var err error
acmeIss.ExternalAccount, acct, err = iss.generateZeroSSLEABCredentials(ctx, acct)
return acct, err
}
}
return template, nil
}
// SetConfig sets the associated certmagic config for this issuer.
// This is required because ACME needs values from the config in
// order to solve the challenges during issuance. This implements
// the ConfigSetter interface.
func (iss *ACMEIssuer) SetConfig(cfg *certmagic.Config) {
iss.magic = cfg
iss.issuer = certmagic.NewACMEIssuer(cfg, iss.template)
}
// PreCheck implements the certmagic.PreChecker interface.
func (iss *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
return iss.issuer.PreCheck(ctx, names, interactive)
}
// Issue obtains a certificate for the given csr.
func (iss *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
return iss.issuer.Issue(ctx, csr)
}
// IssuerKey returns the unique issuer key for the configured CA endpoint.
func (iss *ACMEIssuer) IssuerKey() string {
return iss.issuer.IssuerKey()
}
// Revoke revokes the given certificate.
func (iss *ACMEIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error {
return iss.issuer.Revoke(ctx, cert, reason)
}
// GetACMEIssuer returns iss. This is useful when other types embed ACMEIssuer, because
// type-asserting them to *ACMEIssuer will fail, but type-asserting them to an interface
// with only this method will succeed, and will still allow the embedded ACMEIssuer
// to be accessed and manipulated.
func (iss *ACMEIssuer) GetACMEIssuer() *ACMEIssuer { return iss }
// GetRenewalInfo wraps the underlying GetRenewalInfo method and satisfies
// the CertMagic interface for ARI support.
func (iss *ACMEIssuer) GetRenewalInfo(ctx context.Context, cert certmagic.Certificate) (acme.RenewalInfo, error) {
return iss.issuer.GetRenewalInfo(ctx, cert)
}
// generateZeroSSLEABCredentials generates ZeroSSL EAB credentials for the primary contact email
// on the issuer. It should only be usedif the CA endpoint is ZeroSSL. An email address is required.
func (iss *ACMEIssuer) generateZeroSSLEABCredentials(ctx context.Context, acct acme.Account) (*acme.EAB, acme.Account, error) {
if strings.TrimSpace(iss.Email) == "" {
return nil, acme.Account{}, fmt.Errorf("your email address is required to use ZeroSSL's ACME endpoint")
}
if len(acct.Contact) == 0 {
// we borrow the email from config or the default email, so ensure it's saved with the account
acct.Contact = []string{"mailto:" + iss.Email}
}
endpoint := zerossl.BaseURL + "/acme/eab-credentials-email"
form := url.Values{"email": []string{iss.Email}}
body := strings.NewReader(form.Encode())
req, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, body)
if err != nil {
return nil, acct, fmt.Errorf("forming request: %v", err)
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("User-Agent", certmagic.UserAgent)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, acct, fmt.Errorf("performing EAB credentials request: %v", err)
}
defer resp.Body.Close()
var result struct {
Success bool `json:"success"`
Error struct {
Code int `json:"code"`
Type string `json:"type"`
} `json:"error"`
EABKID string `json:"eab_kid"`
EABHMACKey string `json:"eab_hmac_key"`
}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
return nil, acct, fmt.Errorf("decoding API response: %v", err)
}
if result.Error.Code != 0 {
// do this check first because ZeroSSL's API returns 200 on errors
return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d: %s (code %d)",
resp.StatusCode, result.Error.Type, result.Error.Code)
}
if resp.StatusCode != http.StatusOK {
return nil, acct, fmt.Errorf("failed getting EAB credentials: HTTP %d", resp.StatusCode)
}
iss.logger.Info("generated EAB credentials", zap.String("key_id", result.EABKID))
return &acme.EAB{
KeyID: result.EABKID,
MACKey: result.EABHMACKey,
}, acct, nil
}
// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
//
// ... acme [<directory_url>] {
// dir <directory_url>
// test_dir <test_directory_url>
// email <email>
// timeout <duration>
// disable_http_challenge
// disable_tlsalpn_challenge
// alt_http_port <port>
// alt_tlsalpn_port <port>
// eab <key_id> <mac_key>
// trusted_roots <pem_files...>
// dns <provider_name> [<options>]
// propagation_delay <duration>
// propagation_timeout <duration>
// resolvers <dns_servers...>
// dns_ttl <duration>
// dns_challenge_override_domain <domain>
// preferred_chains [smallest] {
// root_common_name <common_names...>
// any_common_name <common_names...>
// }
// }
func (iss *ACMEIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume issuer name
if d.NextArg() {
iss.CA = d.Val()
if d.NextArg() {
return d.ArgErr()
}
}
for d.NextBlock(0) {
switch d.Val() {
case "lifetime":
var lifetimeStr string
if !d.AllArgs(&lifetimeStr) {
return d.ArgErr()
}
lifetime, err := caddy.ParseDuration(lifetimeStr)
if err != nil {
return d.Errf("invalid lifetime %s: %v", lifetimeStr, err)
}
if lifetime < 0 {
return d.Errf("lifetime must be >= 0: %s", lifetime)
}
iss.CertificateLifetime = caddy.Duration(lifetime)
case "dir":
if iss.CA != "" {
return d.Errf("directory is already specified: %s", iss.CA)
}
if !d.AllArgs(&iss.CA) {
return d.ArgErr()
}
case "test_dir":
if !d.AllArgs(&iss.TestCA) {
return d.ArgErr()
}
case "email":
if !d.AllArgs(&iss.Email) {
return d.ArgErr()
}
case "timeout":
var timeoutStr string
if !d.AllArgs(&timeoutStr) {
return d.ArgErr()
}
timeout, err := caddy.ParseDuration(timeoutStr)
if err != nil {
return d.Errf("invalid timeout duration %s: %v", timeoutStr, err)
}
iss.ACMETimeout = caddy.Duration(timeout)
case "disable_http_challenge":
if d.NextArg() {
return d.ArgErr()
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.HTTP == nil {
iss.Challenges.HTTP = new(HTTPChallengeConfig)
}
iss.Challenges.HTTP.Disabled = true
case "disable_tlsalpn_challenge":
if d.NextArg() {
return d.ArgErr()
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.TLSALPN == nil {
iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig)
}
iss.Challenges.TLSALPN.Disabled = true
case "alt_http_port":
if !d.NextArg() {
return d.ArgErr()
}
port, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid port %s: %v", d.Val(), err)
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.HTTP == nil {
iss.Challenges.HTTP = new(HTTPChallengeConfig)
}
iss.Challenges.HTTP.AlternatePort = port
case "alt_tlsalpn_port":
if !d.NextArg() {
return d.ArgErr()
}
port, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid port %s: %v", d.Val(), err)
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.TLSALPN == nil {
iss.Challenges.TLSALPN = new(TLSALPNChallengeConfig)
}
iss.Challenges.TLSALPN.AlternatePort = port
case "eab":
iss.ExternalAccount = new(acme.EAB)
if !d.AllArgs(&iss.ExternalAccount.KeyID, &iss.ExternalAccount.MACKey) {
return d.ArgErr()
}
case "trusted_roots":
iss.TrustedRootsPEMFiles = d.RemainingArgs()
case "dns":
if !d.NextArg() {
return d.ArgErr()
}
provName := d.Val()
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
unm, err := caddyfile.UnmarshalModule(d, "dns.providers."+provName)
if err != nil {
return err
}
iss.Challenges.DNS.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, nil)
case "propagation_delay":
if !d.NextArg() {
return d.ArgErr()
}
delayStr := d.Val()
delay, err := caddy.ParseDuration(delayStr)
if err != nil {
return d.Errf("invalid propagation_delay duration %s: %v", delayStr, err)
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
iss.Challenges.DNS.PropagationDelay = caddy.Duration(delay)
case "propagation_timeout":
if !d.NextArg() {
return d.ArgErr()
}
timeoutStr := d.Val()
var timeout time.Duration
if timeoutStr == "-1" {
timeout = time.Duration(-1)
} else {
var err error
timeout, err = caddy.ParseDuration(timeoutStr)
if err != nil {
return d.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err)
}
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
iss.Challenges.DNS.PropagationTimeout = caddy.Duration(timeout)
case "resolvers":
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
iss.Challenges.DNS.Resolvers = d.RemainingArgs()
if len(iss.Challenges.DNS.Resolvers) == 0 {
return d.ArgErr()
}
case "dns_ttl":
if !d.NextArg() {
return d.ArgErr()
}
ttlStr := d.Val()
ttl, err := caddy.ParseDuration(ttlStr)
if err != nil {
return d.Errf("invalid dns_ttl duration %s: %v", ttlStr, err)
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
iss.Challenges.DNS.TTL = caddy.Duration(ttl)
case "dns_challenge_override_domain":
arg := d.RemainingArgs()
if len(arg) != 1 {
return d.ArgErr()
}
if iss.Challenges == nil {
iss.Challenges = new(ChallengesConfig)
}
if iss.Challenges.DNS == nil {
iss.Challenges.DNS = new(DNSChallengeConfig)
}
iss.Challenges.DNS.OverrideDomain = arg[0]
case "preferred_chains":
chainPref, err := ParseCaddyfilePreferredChainsOptions(d)
if err != nil {
return err
}
iss.PreferredChains = chainPref
default:
return d.Errf("unrecognized ACME issuer property: %s", d.Val())
}
}
return nil
}
func ParseCaddyfilePreferredChainsOptions(d *caddyfile.Dispenser) (*ChainPreference, error) {
chainPref := new(ChainPreference)
if d.NextArg() {
smallestOpt := d.Val()
if smallestOpt == "smallest" {
trueBool := true
chainPref.Smallest = &trueBool
if d.NextArg() { // Only one argument allowed
return nil, d.ArgErr()
}
if d.NextBlock(d.Nesting()) { // Don't allow other options when smallest == true
return nil, d.Err("No more options are accepted when using the 'smallest' option")
}
} else { // Smallest option should always be 'smallest' or unset
return nil, d.Errf("Invalid argument '%s'", smallestOpt)
}
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "root_common_name":
rootCommonNameOpt := d.RemainingArgs()
chainPref.RootCommonName = rootCommonNameOpt
if rootCommonNameOpt == nil {
return nil, d.ArgErr()
}
if chainPref.AnyCommonName != nil {
return nil, d.Err("Can't set root_common_name when any_common_name is already set")
}
case "any_common_name":
anyCommonNameOpt := d.RemainingArgs()
chainPref.AnyCommonName = anyCommonNameOpt
if anyCommonNameOpt == nil {
return nil, d.ArgErr()
}
if chainPref.RootCommonName != nil {
return nil, d.Err("Can't set any_common_name when root_common_name is already set")
}
default:
return nil, d.Errf("Received unrecognized parameter '%s'", d.Val())
}
}
if chainPref.Smallest == nil && chainPref.RootCommonName == nil && chainPref.AnyCommonName == nil {
return nil, d.Err("No options for preferred_chains received")
}
return chainPref, nil
}
// ChainPreference describes the client's preferred certificate chain,
// useful if the CA offers alternate chains. The first matching chain
// will be selected.
type ChainPreference struct {
// Prefer chains with the fewest number of bytes.
Smallest *bool `json:"smallest,omitempty"`
// Select first chain having a root with one of
// these common names.
RootCommonName []string `json:"root_common_name,omitempty"`
// Select first chain that has any issuer with one
// of these common names.
AnyCommonName []string `json:"any_common_name,omitempty"`
}
// Interface guards
var (
_ certmagic.PreChecker = (*ACMEIssuer)(nil)
_ certmagic.Issuer = (*ACMEIssuer)(nil)
_ certmagic.Revoker = (*ACMEIssuer)(nil)
_ certmagic.RenewalInfoGetter = (*ACMEIssuer)(nil)
_ caddy.Provisioner = (*ACMEIssuer)(nil)
_ ConfigSetter = (*ACMEIssuer)(nil)
_ caddyfile.Unmarshaler = (*ACMEIssuer)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"net"
"strings"
"github.com/caddyserver/certmagic"
"github.com/mholt/acmez/v2"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
// AutomationConfig governs the automated management of TLS certificates.
type AutomationConfig struct {
// The list of automation policies. The first policy matching
// a certificate or subject name will be applied.
Policies []*AutomationPolicy `json:"policies,omitempty"`
// On-Demand TLS defers certificate operations to the
// moment they are needed, e.g. during a TLS handshake.
// Useful when you don't know all the hostnames at
// config-time, or when you are not in control of the
// domain names you are managing certificates for.
// In 2015, Caddy became the first web server to
// implement this experimental technology.
//
// Note that this field does not enable on-demand TLS;
// it only configures it for when it is used. To enable
// it, create an automation policy with `on_demand`.
OnDemand *OnDemandConfig `json:"on_demand,omitempty"`
// Caddy staples OCSP (and caches the response) for all
// qualifying certificates by default. This setting
// changes how often it scans responses for freshness,
// and updates them if they are getting stale. Default: 1h
OCSPCheckInterval caddy.Duration `json:"ocsp_interval,omitempty"`
// Every so often, Caddy will scan all loaded, managed
// certificates for expiration. This setting changes how
// frequently the scan for expiring certificates is
// performed. Default: 10m
RenewCheckInterval caddy.Duration `json:"renew_interval,omitempty"`
// How often to scan storage units for old or expired
// assets and remove them. These scans exert lots of
// reads (and list operations) on the storage module, so
// choose a longer interval for large deployments.
// Default: 24h
//
// Storage will always be cleaned when the process first
// starts. Then, a new cleaning will be started this
// duration after the previous cleaning started if the
// previous cleaning finished in less than half the time
// of this interval (otherwise next start will be skipped).
StorageCleanInterval caddy.Duration `json:"storage_clean_interval,omitempty"`
defaultPublicAutomationPolicy *AutomationPolicy
defaultInternalAutomationPolicy *AutomationPolicy // only initialized if necessary
}
// AutomationPolicy designates the policy for automating the
// management (obtaining, renewal, and revocation) of managed
// TLS certificates.
//
// An AutomationPolicy value is not valid until it has been
// provisioned; use the `AddAutomationPolicy()` method on the
// TLS app to properly provision a new policy.
type AutomationPolicy struct {
// Which subjects (hostnames or IP addresses) this policy applies to.
//
// This list is a filter, not a command. In other words, it is used
// only to filter whether this policy should apply to a subject that
// needs a certificate; it does NOT command the TLS app to manage a
// certificate for that subject. To have Caddy automate a certificate
// or specific subjects, use the "automate" certificate loader module
// of the TLS app.
SubjectsRaw []string `json:"subjects,omitempty"`
// The modules that may issue certificates. Default: internal if all
// subjects do not qualify for public certificates; otherwise acme and
// zerossl.
IssuersRaw []json.RawMessage `json:"issuers,omitempty" caddy:"namespace=tls.issuance inline_key=module"`
// Modules that can get a custom certificate to use for any
// given TLS handshake at handshake-time. Custom certificates
// can be useful if another entity is managing certificates
// and Caddy need only get it and serve it. Specifying a Manager
// enables on-demand TLS, i.e. it has the side-effect of setting
// the on_demand parameter to `true`.
//
// TODO: This is an EXPERIMENTAL feature. Subject to change or removal.
ManagersRaw []json.RawMessage `json:"get_certificate,omitempty" caddy:"namespace=tls.get_certificate inline_key=via"`
// If true, certificates will be requested with MustStaple. Not all
// CAs support this, and there are potentially serious consequences
// of enabling this feature without proper threat modeling.
MustStaple bool `json:"must_staple,omitempty"`
// How long before a certificate's expiration to try renewing it,
// as a function of its total lifetime. As a general and conservative
// rule, it is a good idea to renew a certificate when it has about
// 1/3 of its total lifetime remaining. This utilizes the majority
// of the certificate's lifetime while still saving time to
// troubleshoot problems. However, for extremely short-lived certs,
// you may wish to increase the ratio to ~1/2.
RenewalWindowRatio float64 `json:"renewal_window_ratio,omitempty"`
// The type of key to generate for certificates.
// Supported values: `ed25519`, `p256`, `p384`, `rsa2048`, `rsa4096`.
KeyType string `json:"key_type,omitempty"`
// Optionally configure a separate storage module associated with this
// manager, instead of using Caddy's global/default-configured storage.
StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
// If true, certificates will be managed "on demand"; that is, during
// TLS handshakes or when needed, as opposed to at startup or config
// load. This enables On-Demand TLS for this policy.
OnDemand bool `json:"on_demand,omitempty"`
// If true, private keys already existing in storage
// will be reused. Otherwise, a new key will be
// created for every new certificate to mitigate
// pinning and reduce the scope of key compromise.
// TEMPORARY: Key pinning is against industry best practices.
// This property will likely be removed in the future.
// Do not rely on it forever; watch the release notes.
ReusePrivateKeys bool `json:"reuse_private_keys,omitempty"`
// Disables OCSP stapling. Disabling OCSP stapling puts clients at
// greater risk, reduces their privacy, and usually lowers client
// performance. It is NOT recommended to disable this unless you
// are able to justify the costs.
// EXPERIMENTAL. Subject to change.
DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"`
// Overrides the URLs of OCSP responders embedded in certificates.
// Each key is a OCSP server URL to override, and its value is the
// replacement. An empty value will disable querying of that server.
// EXPERIMENTAL. Subject to change.
OCSPOverrides map[string]string `json:"ocsp_overrides,omitempty"`
// Issuers and Managers store the decoded issuer and manager modules;
// they are only used to populate an underlying certmagic.Config's
// fields during provisioning so that the modules can survive a
// re-provisioning.
Issuers []certmagic.Issuer `json:"-"`
Managers []certmagic.Manager `json:"-"`
subjects []string
magic *certmagic.Config
storage certmagic.Storage
// Whether this policy had explicit managers configured directly on it.
hadExplicitManagers bool
}
// Provision sets up ap and builds its underlying CertMagic config.
func (ap *AutomationPolicy) Provision(tlsApp *TLS) error {
// replace placeholders in subjects to allow environment variables
repl := caddy.NewReplacer()
subjects := make([]string, len(ap.SubjectsRaw))
for i, sub := range ap.SubjectsRaw {
subjects[i] = repl.ReplaceAll(sub, "")
}
ap.subjects = subjects
// policy-specific storage implementation
if ap.StorageRaw != nil {
val, err := tlsApp.ctx.LoadModule(ap, "StorageRaw")
if err != nil {
return fmt.Errorf("loading TLS storage module: %v", err)
}
cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage()
if err != nil {
return fmt.Errorf("creating TLS storage configuration: %v", err)
}
ap.storage = cmStorage
}
// we don't store loaded modules directly in the certmagic config since
// policy provisioning may happen more than once (during auto-HTTPS) and
// loading a module clears its config bytes; thus, load the module and
// store them on the policy before putting it on the config
// load and provision any cert manager modules
if ap.ManagersRaw != nil {
ap.hadExplicitManagers = true
vals, err := tlsApp.ctx.LoadModule(ap, "ManagersRaw")
if err != nil {
return fmt.Errorf("loading external certificate manager modules: %v", err)
}
for _, getCertVal := range vals.([]any) {
ap.Managers = append(ap.Managers, getCertVal.(certmagic.Manager))
}
}
// load and provision any explicitly-configured issuer modules
if ap.IssuersRaw != nil {
val, err := tlsApp.ctx.LoadModule(ap, "IssuersRaw")
if err != nil {
return fmt.Errorf("loading TLS automation management module: %s", err)
}
for _, issVal := range val.([]any) {
ap.Issuers = append(ap.Issuers, issVal.(certmagic.Issuer))
}
}
issuers := ap.Issuers
if len(issuers) == 0 {
var err error
issuers, err = DefaultIssuersProvisioned(tlsApp.ctx)
if err != nil {
return err
}
}
keyType := ap.KeyType
if keyType != "" {
var err error
keyType, err = caddy.NewReplacer().ReplaceOrErr(ap.KeyType, true, true)
if err != nil {
return fmt.Errorf("invalid key type %s: %s", ap.KeyType, err)
}
if _, ok := supportedCertKeyTypes[keyType]; !ok {
return fmt.Errorf("unrecognized key type: %s", keyType)
}
}
keySource := certmagic.StandardKeyGenerator{
KeyType: supportedCertKeyTypes[keyType],
}
storage := ap.storage
if storage == nil {
storage = tlsApp.ctx.Storage()
}
// on-demand TLS
var ond *certmagic.OnDemandConfig
if ap.OnDemand || len(ap.Managers) > 0 {
// permission module is now required after a number of negligence cases that allowed abuse;
// but it may still be optional for explicit subjects (bounded, non-wildcard), for the
// internal issuer since it doesn't cause public PKI pressure on ACME servers; subtly, it
// is useful to allow on-demand TLS to be enabled so Managers can be used, but to still
// prevent issuance from Issuers (when Managers don't provide a certificate) if there's no
// permission module configured
noProtections := ap.isWildcardOrDefault() && !ap.onlyInternalIssuer() && (tlsApp.Automation == nil || tlsApp.Automation.OnDemand == nil || tlsApp.Automation.OnDemand.permission == nil)
failClosed := noProtections && !ap.hadExplicitManagers // don't allow on-demand issuance (other than implicit managers) if no managers have been explicitly configured
if noProtections {
if !ap.hadExplicitManagers {
// no managers, no explicitly-configured permission module, this is a config error
return fmt.Errorf("on-demand TLS cannot be enabled without a permission module to prevent abuse; please refer to documentation for details")
}
// allow on-demand to be enabled but only for the purpose of the Managers; issuance won't be allowed from Issuers
tlsApp.logger.Warn("on-demand TLS can only get certificates from the configured external manager(s) because no ask endpoint / permission module is specified")
}
ond = &certmagic.OnDemandConfig{
DecisionFunc: func(ctx context.Context, name string) error {
if failClosed {
return fmt.Errorf("no permission module configured; certificates not allowed except from external Managers")
}
if tlsApp.Automation == nil || tlsApp.Automation.OnDemand == nil {
return nil
}
// logging the remote IP can be useful for servers that want to count
// attempts from clients to detect patterns of abuse -- it should NOT be
// used solely for decision making, however
var remoteIP string
if hello, ok := ctx.Value(certmagic.ClientHelloInfoCtxKey).(*tls.ClientHelloInfo); ok && hello != nil {
if remote := hello.Conn.RemoteAddr(); remote != nil {
remoteIP, _, _ = net.SplitHostPort(remote.String())
}
}
tlsApp.logger.Debug("asking for permission for on-demand certificate",
zap.String("remote_ip", remoteIP),
zap.String("domain", name))
// ask the permission module if this cert is allowed
if err := tlsApp.Automation.OnDemand.permission.CertificateAllowed(ctx, name); err != nil {
// distinguish true errors from denials, because it's important to elevate actual errors
if errors.Is(err, ErrPermissionDenied) {
tlsApp.logger.Debug("on-demand certificate issuance denied",
zap.String("domain", name),
zap.Error(err))
} else {
tlsApp.logger.Error("failed to get permission for on-demand certificate",
zap.String("domain", name),
zap.Error(err))
}
return err
}
// check the rate limiter last because
// doing so makes a reservation
if !onDemandRateLimiter.Allow() {
return fmt.Errorf("on-demand rate limit exceeded")
}
return nil
},
Managers: ap.Managers,
}
}
template := certmagic.Config{
MustStaple: ap.MustStaple,
RenewalWindowRatio: ap.RenewalWindowRatio,
KeySource: keySource,
OnEvent: tlsApp.onEvent,
OnDemand: ond,
ReusePrivateKeys: ap.ReusePrivateKeys,
OCSP: certmagic.OCSPConfig{
DisableStapling: ap.DisableOCSPStapling,
ResponderOverrides: ap.OCSPOverrides,
},
Storage: storage,
Issuers: issuers,
Logger: tlsApp.logger,
}
certCacheMu.RLock()
ap.magic = certmagic.New(certCache, template)
certCacheMu.RUnlock()
// sometimes issuers may need the parent certmagic.Config in
// order to function properly (for example, ACMEIssuer needs
// access to the correct storage and cache so it can solve
// ACME challenges -- it's an annoying, inelegant circular
// dependency that I don't know how to resolve nicely!)
for _, issuer := range ap.magic.Issuers {
if annoying, ok := issuer.(ConfigSetter); ok {
annoying.SetConfig(ap.magic)
}
}
return nil
}
// Subjects returns the list of subjects with all placeholders replaced.
func (ap *AutomationPolicy) Subjects() []string {
return ap.subjects
}
// AllInternalSubjects returns true if all the subjects on this policy are internal.
func (ap *AutomationPolicy) AllInternalSubjects() bool {
for _, subj := range ap.subjects {
if !certmagic.SubjectIsInternal(subj) {
return false
}
}
return true
}
func (ap *AutomationPolicy) onlyInternalIssuer() bool {
if len(ap.Issuers) != 1 {
return false
}
_, ok := ap.Issuers[0].(*InternalIssuer)
return ok
}
// isWildcardOrDefault determines if the subjects include any wildcard domains,
// or is the "default" policy (i.e. no subjects) which is unbounded.
func (ap *AutomationPolicy) isWildcardOrDefault() bool {
isWildcardOrDefault := false
if len(ap.subjects) == 0 {
isWildcardOrDefault = true
}
for _, sub := range ap.subjects {
if strings.HasPrefix(sub, "*") {
isWildcardOrDefault = true
break
}
}
return isWildcardOrDefault
}
// DefaultIssuers returns empty Issuers (not provisioned) to be used as defaults.
// This function is experimental and has no compatibility promises.
func DefaultIssuers(userEmail string) []certmagic.Issuer {
issuers := []certmagic.Issuer{new(ACMEIssuer)}
if strings.TrimSpace(userEmail) != "" {
issuers = append(issuers, &ACMEIssuer{
CA: certmagic.ZeroSSLProductionCA,
Email: userEmail,
})
}
return issuers
}
// DefaultIssuersProvisioned returns empty but provisioned default Issuers from
// DefaultIssuers(). This function is experimental and has no compatibility promises.
func DefaultIssuersProvisioned(ctx caddy.Context) ([]certmagic.Issuer, error) {
issuers := DefaultIssuers("")
for i, iss := range issuers {
if prov, ok := iss.(caddy.Provisioner); ok {
err := prov.Provision(ctx)
if err != nil {
return nil, fmt.Errorf("provisioning default issuer %d: %T: %v", i, iss, err)
}
}
}
return issuers, nil
}
// ChallengesConfig configures the ACME challenges.
type ChallengesConfig struct {
// HTTP configures the ACME HTTP challenge. This
// challenge is enabled and used automatically
// and by default.
HTTP *HTTPChallengeConfig `json:"http,omitempty"`
// TLSALPN configures the ACME TLS-ALPN challenge.
// This challenge is enabled and used automatically
// and by default.
TLSALPN *TLSALPNChallengeConfig `json:"tls-alpn,omitempty"`
// Configures the ACME DNS challenge. Because this
// challenge typically requires credentials for
// interfacing with a DNS provider, this challenge is
// not enabled by default. This is the only challenge
// type which does not require a direct connection
// to Caddy from an external server.
//
// NOTE: DNS providers are currently being upgraded,
// and this API is subject to change, but should be
// stabilized soon.
DNS *DNSChallengeConfig `json:"dns,omitempty"`
// Optionally customize the host to which a listener
// is bound if required for solving a challenge.
BindHost string `json:"bind_host,omitempty"`
}
// HTTPChallengeConfig configures the ACME HTTP challenge.
type HTTPChallengeConfig struct {
// If true, the HTTP challenge will be disabled.
Disabled bool `json:"disabled,omitempty"`
// An alternate port on which to service this
// challenge. Note that the HTTP challenge port is
// hard-coded into the spec and cannot be changed,
// so you would have to forward packets from the
// standard HTTP challenge port to this one.
AlternatePort int `json:"alternate_port,omitempty"`
}
// TLSALPNChallengeConfig configures the ACME TLS-ALPN challenge.
type TLSALPNChallengeConfig struct {
// If true, the TLS-ALPN challenge will be disabled.
Disabled bool `json:"disabled,omitempty"`
// An alternate port on which to service this
// challenge. Note that the TLS-ALPN challenge port
// is hard-coded into the spec and cannot be changed,
// so you would have to forward packets from the
// standard TLS-ALPN challenge port to this one.
AlternatePort int `json:"alternate_port,omitempty"`
}
// DNSChallengeConfig configures the ACME DNS challenge.
//
// NOTE: This API is still experimental and is subject to change.
type DNSChallengeConfig struct {
// The DNS provider module to use which will manage
// the DNS records relevant to the ACME challenge.
// Required.
ProviderRaw json.RawMessage `json:"provider,omitempty" caddy:"namespace=dns.providers inline_key=name"`
// The TTL of the TXT record used for the DNS challenge.
TTL caddy.Duration `json:"ttl,omitempty"`
// How long to wait before starting propagation checks.
// Default: 0 (no wait).
PropagationDelay caddy.Duration `json:"propagation_delay,omitempty"`
// Maximum time to wait for temporary DNS record to appear.
// Set to -1 to disable propagation checks.
// Default: 2 minutes.
PropagationTimeout caddy.Duration `json:"propagation_timeout,omitempty"`
// Custom DNS resolvers to prefer over system/built-in defaults.
// Often necessary to configure when using split-horizon DNS.
Resolvers []string `json:"resolvers,omitempty"`
// Override the domain to use for the DNS challenge. This
// is to delegate the challenge to a different domain,
// e.g. one that updates faster or one with a provider API.
OverrideDomain string `json:"override_domain,omitempty"`
solver acmez.Solver
}
// ConfigSetter is implemented by certmagic.Issuers that
// need access to a parent certmagic.Config as part of
// their provisioning phase. For example, the ACMEIssuer
// requires a config so it can access storage and the
// cache to solve ACME challenges.
type ConfigSetter interface {
SetConfig(cfg *certmagic.Config)
}
package caddytls
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"reflect"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddypki"
)
func init() {
caddy.RegisterModule(InlineCAPool{})
caddy.RegisterModule(FileCAPool{})
caddy.RegisterModule(PKIRootCAPool{})
caddy.RegisterModule(PKIIntermediateCAPool{})
caddy.RegisterModule(StoragePool{})
caddy.RegisterModule(HTTPCertPool{})
}
// The interface to be implemented by all guest modules part of
// the namespace 'tls.ca_pool.source.'
type CA interface {
CertPool() *x509.CertPool
}
// InlineCAPool is a certificate authority pool provider coming from
// a DER-encoded certificates in the config
type InlineCAPool struct {
// A list of base64 DER-encoded CA certificates
// against which to validate client certificates.
// Client certs which are not signed by any of
// these CAs will be rejected.
TrustedCACerts []string `json:"trusted_ca_certs,omitempty"`
pool *x509.CertPool
}
// CaddyModule implements caddy.Module.
func (icp InlineCAPool) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.ca_pool.source.inline",
New: func() caddy.Module {
return new(InlineCAPool)
},
}
}
// Provision implements caddy.Provisioner.
func (icp *InlineCAPool) Provision(ctx caddy.Context) error {
caPool := x509.NewCertPool()
for i, clientCAString := range icp.TrustedCACerts {
clientCA, err := decodeBase64DERCert(clientCAString)
if err != nil {
return fmt.Errorf("parsing certificate at index %d: %v", i, err)
}
caPool.AddCert(clientCA)
}
icp.pool = caPool
return nil
}
// Syntax:
//
// trust_pool inline {
// trust_der <base64_der_cert>...
// }
//
// The 'trust_der' directive can be specified multiple times.
func (icp *InlineCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume module name
if d.CountRemainingArgs() > 0 {
return d.ArgErr()
}
for d.NextBlock(0) {
switch d.Val() {
case "trust_der":
icp.TrustedCACerts = append(icp.TrustedCACerts, d.RemainingArgs()...)
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
if len(icp.TrustedCACerts) == 0 {
return d.Err("no certificates specified")
}
return nil
}
// CertPool implements CA.
func (icp InlineCAPool) CertPool() *x509.CertPool {
return icp.pool
}
// FileCAPool generates trusted root certificates pool from the designated DER and PEM file
type FileCAPool struct {
// TrustedCACertPEMFiles is a list of PEM file names
// from which to load certificates of trusted CAs.
// Client certificates which are not signed by any of
// these CA certificates will be rejected.
TrustedCACertPEMFiles []string `json:"pem_files,omitempty"`
pool *x509.CertPool
}
// CaddyModule implements caddy.Module.
func (FileCAPool) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.ca_pool.source.file",
New: func() caddy.Module {
return new(FileCAPool)
},
}
}
// Loads and decodes the DER and pem files to generate the certificate pool
func (f *FileCAPool) Provision(ctx caddy.Context) error {
caPool := x509.NewCertPool()
for _, pemFile := range f.TrustedCACertPEMFiles {
pemContents, err := os.ReadFile(pemFile)
if err != nil {
return fmt.Errorf("reading %s: %v", pemFile, err)
}
caPool.AppendCertsFromPEM(pemContents)
}
f.pool = caPool
return nil
}
// Syntax:
//
// trust_pool file [<pem_file>...] {
// pem_file <pem_file>...
// }
//
// The 'pem_file' directive can be specified multiple times.
func (fcap *FileCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume module name
fcap.TrustedCACertPEMFiles = append(fcap.TrustedCACertPEMFiles, d.RemainingArgs()...)
for d.NextBlock(0) {
switch d.Val() {
case "pem_file":
fcap.TrustedCACertPEMFiles = append(fcap.TrustedCACertPEMFiles, d.RemainingArgs()...)
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
if len(fcap.TrustedCACertPEMFiles) == 0 {
return d.Err("no certificates specified")
}
return nil
}
func (f FileCAPool) CertPool() *x509.CertPool {
return f.pool
}
// PKIRootCAPool extracts the trusted root certificates from Caddy's native 'pki' app
type PKIRootCAPool struct {
// List of the Authority names that are configured in the `pki` app whose root certificates are trusted
Authority []string `json:"authority,omitempty"`
ca []*caddypki.CA
pool *x509.CertPool
}
// CaddyModule implements caddy.Module.
func (PKIRootCAPool) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.ca_pool.source.pki_root",
New: func() caddy.Module {
return new(PKIRootCAPool)
},
}
}
// Loads the PKI app and load the root certificates into the certificate pool
func (p *PKIRootCAPool) Provision(ctx caddy.Context) error {
pkiApp, err := ctx.AppIfConfigured("pki")
if err != nil {
return fmt.Errorf("pki_root CA pool requires that a PKI app is configured: %v", err)
}
pki := pkiApp.(*caddypki.PKI)
for _, caID := range p.Authority {
c, err := pki.GetCA(ctx, caID)
if err != nil || c == nil {
return fmt.Errorf("getting CA %s: %v", caID, err)
}
p.ca = append(p.ca, c)
}
caPool := x509.NewCertPool()
for _, ca := range p.ca {
caPool.AddCert(ca.RootCertificate())
}
p.pool = caPool
return nil
}
// Syntax:
//
// trust_pool pki_root [<ca_name>...] {
// authority <ca_name>...
// }
//
// The 'authority' directive can be specified multiple times.
func (pkir *PKIRootCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume module name
pkir.Authority = append(pkir.Authority, d.RemainingArgs()...)
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "authority":
pkir.Authority = append(pkir.Authority, d.RemainingArgs()...)
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
if len(pkir.Authority) == 0 {
return d.Err("no authorities specified")
}
return nil
}
// return the certificate pool generated with root certificates from the PKI app
func (p PKIRootCAPool) CertPool() *x509.CertPool {
return p.pool
}
// PKIIntermediateCAPool extracts the trusted intermediate certificates from Caddy's native 'pki' app
type PKIIntermediateCAPool struct {
// List of the Authority names that are configured in the `pki` app whose intermediate certificates are trusted
Authority []string `json:"authority,omitempty"`
ca []*caddypki.CA
pool *x509.CertPool
}
// CaddyModule implements caddy.Module.
func (PKIIntermediateCAPool) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.ca_pool.source.pki_intermediate",
New: func() caddy.Module {
return new(PKIIntermediateCAPool)
},
}
}
// Loads the PKI app and load the intermediate certificates into the certificate pool
func (p *PKIIntermediateCAPool) Provision(ctx caddy.Context) error {
pkiApp, err := ctx.AppIfConfigured("pki")
if err != nil {
return fmt.Errorf("pki_intermediate CA pool requires that a PKI app is configured: %v", err)
}
pki := pkiApp.(*caddypki.PKI)
for _, caID := range p.Authority {
c, err := pki.GetCA(ctx, caID)
if err != nil || c == nil {
return fmt.Errorf("getting CA %s: %v", caID, err)
}
p.ca = append(p.ca, c)
}
caPool := x509.NewCertPool()
for _, ca := range p.ca {
caPool.AddCert(ca.IntermediateCertificate())
}
p.pool = caPool
return nil
}
// Syntax:
//
// trust_pool pki_intermediate [<ca_name>...] {
// authority <ca_name>...
// }
//
// The 'authority' directive can be specified multiple times.
func (pic *PKIIntermediateCAPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume module name
pic.Authority = append(pic.Authority, d.RemainingArgs()...)
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "authority":
pic.Authority = append(pic.Authority, d.RemainingArgs()...)
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
if len(pic.Authority) == 0 {
return d.Err("no authorities specified")
}
return nil
}
// return the certificate pool generated with intermediate certificates from the PKI app
func (p PKIIntermediateCAPool) CertPool() *x509.CertPool {
return p.pool
}
// StoragePool extracts the trusted certificates root from Caddy storage
type StoragePool struct {
// The storage module where the trusted root certificates are stored. Absent
// explicit storage implies the use of Caddy default storage.
StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
// The storage key/index to the location of the certificates
PEMKeys []string `json:"pem_keys,omitempty"`
storage certmagic.Storage
pool *x509.CertPool
}
// CaddyModule implements caddy.Module.
func (StoragePool) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.ca_pool.source.storage",
New: func() caddy.Module {
return new(StoragePool)
},
}
}
// Provision implements caddy.Provisioner.
func (ca *StoragePool) Provision(ctx caddy.Context) error {
if ca.StorageRaw != nil {
val, err := ctx.LoadModule(ca, "StorageRaw")
if err != nil {
return fmt.Errorf("loading storage module: %v", err)
}
cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage()
if err != nil {
return fmt.Errorf("creating storage configuration: %v", err)
}
ca.storage = cmStorage
}
if ca.storage == nil {
ca.storage = ctx.Storage()
}
if len(ca.PEMKeys) == 0 {
return fmt.Errorf("no PEM keys specified")
}
caPool := x509.NewCertPool()
for _, caID := range ca.PEMKeys {
bs, err := ca.storage.Load(ctx, caID)
if err != nil {
return fmt.Errorf("error loading cert '%s' from storage: %s", caID, err)
}
if !caPool.AppendCertsFromPEM(bs) {
return fmt.Errorf("failed to add certificate '%s' to pool", caID)
}
}
ca.pool = caPool
return nil
}
// Syntax:
//
// trust_pool storage [<storage_keys>...] {
// storage <storage_module>
// keys <storage_keys>...
// }
//
// The 'keys' directive can be specified multiple times.
// The'storage' directive is optional and defaults to the default storage module.
func (sp *StoragePool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume module name
sp.PEMKeys = append(sp.PEMKeys, d.RemainingArgs()...)
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "storage":
if sp.StorageRaw != nil {
return d.Err("storage module already set")
}
if !d.NextArg() {
return d.ArgErr()
}
modStem := d.Val()
modID := "caddy.storage." + modStem
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
storage, ok := unm.(caddy.StorageConverter)
if !ok {
return d.Errf("module %s is not a caddy.StorageConverter", modID)
}
sp.StorageRaw = caddyconfig.JSONModuleObject(storage, "module", modStem, nil)
case "keys":
sp.PEMKeys = append(sp.PEMKeys, d.RemainingArgs()...)
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
return nil
}
func (p StoragePool) CertPool() *x509.CertPool {
return p.pool
}
// TLSConfig holds configuration related to the TLS configuration for the
// transport/client.
// copied from with minor modifications: modules/caddyhttp/reverseproxy/httptransport.go
type TLSConfig struct {
// Provides the guest module that provides the trusted certificate authority (CA) certificates
CARaw json.RawMessage `json:"ca,omitempty" caddy:"namespace=tls.ca_pool.source inline_key=provider"`
// If true, TLS verification of server certificates will be disabled.
// This is insecure and may be removed in the future. Do not use this
// option except in testing or local development environments.
InsecureSkipVerify bool `json:"insecure_skip_verify,omitempty"`
// The duration to allow a TLS handshake to a server. Default: No timeout.
HandshakeTimeout caddy.Duration `json:"handshake_timeout,omitempty"`
// The server name used when verifying the certificate received in the TLS
// handshake. By default, this will use the upstream address' host part.
// You only need to override this if your upstream address does not match the
// certificate the upstream is likely to use. For example if the upstream
// address is an IP address, then you would need to configure this to the
// hostname being served by the upstream server. Currently, this does not
// support placeholders because the TLS config is not provisioned on each
// connection, so a static value must be used.
ServerName string `json:"server_name,omitempty"`
// TLS renegotiation level. TLS renegotiation is the act of performing
// subsequent handshakes on a connection after the first.
// The level can be:
// - "never": (the default) disables renegotiation.
// - "once": allows a remote server to request renegotiation once per connection.
// - "freely": allows a remote server to repeatedly request renegotiation.
Renegotiation string `json:"renegotiation,omitempty"`
}
func (t *TLSConfig) unmarshalCaddyfile(d *caddyfile.Dispenser) error {
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "ca":
if !d.NextArg() {
return d.ArgErr()
}
modStem := d.Val()
modID := "tls.ca_pool.source." + modStem
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
ca, ok := unm.(CA)
if !ok {
return d.Errf("module %s is not a caddytls.CA", modID)
}
t.CARaw = caddyconfig.JSONModuleObject(ca, "provider", modStem, nil)
case "insecure_skip_verify":
t.InsecureSkipVerify = true
case "handshake_timeout":
if !d.NextArg() {
return d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("bad timeout value '%s': %v", d.Val(), err)
}
t.HandshakeTimeout = caddy.Duration(dur)
case "server_name":
if !d.Args(&t.ServerName) {
return d.ArgErr()
}
case "renegotiation":
if !d.Args(&t.Renegotiation) {
return d.ArgErr()
}
switch t.Renegotiation {
case "never", "once", "freely":
continue
default:
t.Renegotiation = ""
return d.Errf("unrecognized renegotiation level: %s", t.Renegotiation)
}
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
return nil
}
// MakeTLSClientConfig returns a tls.Config usable by a client to a backend.
// If there is no custom TLS configuration, a nil config may be returned.
// copied from with minor modifications: modules/caddyhttp/reverseproxy/httptransport.go
func (t *TLSConfig) makeTLSClientConfig(ctx caddy.Context) (*tls.Config, error) {
repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if repl == nil {
repl = caddy.NewReplacer()
}
cfg := new(tls.Config)
if t.CARaw != nil {
caRaw, err := ctx.LoadModule(t, "CARaw")
if err != nil {
return nil, err
}
ca := caRaw.(CA)
cfg.RootCAs = ca.CertPool()
}
// Renegotiation
switch t.Renegotiation {
case "never", "":
cfg.Renegotiation = tls.RenegotiateNever
case "once":
cfg.Renegotiation = tls.RenegotiateOnceAsClient
case "freely":
cfg.Renegotiation = tls.RenegotiateFreelyAsClient
default:
return nil, fmt.Errorf("invalid TLS renegotiation level: %v", t.Renegotiation)
}
// override for the server name used verify the TLS handshake
cfg.ServerName = repl.ReplaceKnown(cfg.ServerName, "")
// throw all security out the window
cfg.InsecureSkipVerify = t.InsecureSkipVerify
// only return a config if it's not empty
if reflect.DeepEqual(cfg, new(tls.Config)) {
return nil, nil
}
return cfg, nil
}
// The HTTPCertPool fetches the trusted root certificates from HTTP(S)
// endpoints. The TLS connection properties can be customized, including custom
// trusted root certificate. One example usage of this module is to get the trusted
// certificates from another Caddy instance that is running the PKI app and ACME server.
type HTTPCertPool struct {
// the list of URLs that respond with PEM-encoded certificates to trust.
Endpoints []string `json:"endpoints,omitempty"`
// Customize the TLS connection knobs to used during the HTTP call
TLS *TLSConfig `json:"tls,omitempty"`
pool *x509.CertPool
}
// CaddyModule implements caddy.Module.
func (HTTPCertPool) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.ca_pool.source.http",
New: func() caddy.Module {
return new(HTTPCertPool)
},
}
}
// Provision implements caddy.Provisioner.
func (hcp *HTTPCertPool) Provision(ctx caddy.Context) error {
caPool := x509.NewCertPool()
customTransport := http.DefaultTransport.(*http.Transport).Clone()
if hcp.TLS != nil {
tlsConfig, err := hcp.TLS.makeTLSClientConfig(ctx)
if err != nil {
return err
}
customTransport.TLSClientConfig = tlsConfig
}
httpClient := *http.DefaultClient
httpClient.Transport = customTransport
for _, uri := range hcp.Endpoints {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, uri, nil)
if err != nil {
return err
}
res, err := httpClient.Do(req)
if err != nil {
return err
}
pembs, err := io.ReadAll(res.Body)
res.Body.Close()
if err != nil {
return err
}
if !caPool.AppendCertsFromPEM(pembs) {
return fmt.Errorf("failed to add certs from URL: %s", uri)
}
}
hcp.pool = caPool
return nil
}
// Syntax:
//
// trust_pool http [<endpoints...>] {
// endpoints <endpoints...>
// tls <tls_config>
// }
//
// tls_config:
//
// ca <ca_module>
// insecure_skip_verify
// handshake_timeout <duration>
// server_name <name>
// renegotiation <never|once|freely>
//
// <ca_module> is the name of the CA module to source the trust
//
// certificate pool and follows the syntax of the named CA module.
func (hcp *HTTPCertPool) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume module name
hcp.Endpoints = append(hcp.Endpoints, d.RemainingArgs()...)
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "endpoints":
if d.CountRemainingArgs() == 0 {
return d.ArgErr()
}
hcp.Endpoints = append(hcp.Endpoints, d.RemainingArgs()...)
case "tls":
if hcp.TLS != nil {
return d.Err("tls block already defined")
}
hcp.TLS = new(TLSConfig)
if err := hcp.TLS.unmarshalCaddyfile(d); err != nil {
return err
}
default:
return d.Errf("unrecognized directive: %s", d.Val())
}
}
return nil
}
// report error if the endpoints are not valid URLs
func (hcp HTTPCertPool) Validate() (err error) {
for _, u := range hcp.Endpoints {
_, e := url.Parse(u)
if e != nil {
err = errors.Join(err, e)
}
}
return err
}
// CertPool return the certificate pool generated from the HTTP responses
func (hcp HTTPCertPool) CertPool() *x509.CertPool {
return hcp.pool
}
var (
_ caddy.Module = (*InlineCAPool)(nil)
_ caddy.Provisioner = (*InlineCAPool)(nil)
_ CA = (*InlineCAPool)(nil)
_ caddyfile.Unmarshaler = (*InlineCAPool)(nil)
_ caddy.Module = (*FileCAPool)(nil)
_ caddy.Provisioner = (*FileCAPool)(nil)
_ CA = (*FileCAPool)(nil)
_ caddyfile.Unmarshaler = (*FileCAPool)(nil)
_ caddy.Module = (*PKIRootCAPool)(nil)
_ caddy.Provisioner = (*PKIRootCAPool)(nil)
_ CA = (*PKIRootCAPool)(nil)
_ caddyfile.Unmarshaler = (*PKIRootCAPool)(nil)
_ caddy.Module = (*PKIIntermediateCAPool)(nil)
_ caddy.Provisioner = (*PKIIntermediateCAPool)(nil)
_ CA = (*PKIIntermediateCAPool)(nil)
_ caddyfile.Unmarshaler = (*PKIIntermediateCAPool)(nil)
_ caddy.Module = (*StoragePool)(nil)
_ caddy.Provisioner = (*StoragePool)(nil)
_ CA = (*StoragePool)(nil)
_ caddyfile.Unmarshaler = (*StoragePool)(nil)
_ caddy.Module = (*HTTPCertPool)(nil)
_ caddy.Provisioner = (*HTTPCertPool)(nil)
_ caddy.Validator = (*HTTPCertPool)(nil)
_ CA = (*HTTPCertPool)(nil)
_ caddyfile.Unmarshaler = (*HTTPCertPool)(nil)
)
package caddytls
import (
"context"
"crypto/tls"
"fmt"
"io"
"net/http"
"net/url"
"strings"
"github.com/caddyserver/certmagic"
"github.com/tailscale/tscert"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(Tailscale{})
caddy.RegisterModule(HTTPCertGetter{})
}
// Tailscale is a module that can get certificates from the local Tailscale process.
type Tailscale struct {
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (Tailscale) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.get_certificate.tailscale",
New: func() caddy.Module { return new(Tailscale) },
}
}
func (ts *Tailscale) Provision(ctx caddy.Context) error {
ts.logger = ctx.Logger()
return nil
}
func (ts Tailscale) GetCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
canGetCert, err := ts.canHazCertificate(ctx, hello)
if err == nil && !canGetCert {
return nil, nil // pass-thru: Tailscale can't offer a cert for this name
}
if err != nil {
ts.logger.Warn("could not get status; will try to get certificate anyway", zap.Error(err))
}
return tscert.GetCertificateWithContext(ctx, hello)
}
// canHazCertificate returns true if Tailscale reports it can get a certificate for the given ClientHello.
func (ts Tailscale) canHazCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (bool, error) {
if !strings.HasSuffix(strings.ToLower(hello.ServerName), tailscaleDomainAliasEnding) {
return false, nil
}
status, err := tscert.GetStatus(ctx)
if err != nil {
return false, err
}
for _, domain := range status.CertDomains {
if certmagic.MatchWildcard(hello.ServerName, domain) {
return true, nil
}
}
return false, nil
}
// UnmarshalCaddyfile deserializes Caddyfile tokens into ts.
//
// ... tailscale
func (Tailscale) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume cert manager name
if d.NextArg() {
return d.ArgErr()
}
return nil
}
// tailscaleDomainAliasEnding is the ending for all Tailscale custom domains.
const tailscaleDomainAliasEnding = ".ts.net"
// HTTPCertGetter can get a certificate via HTTP(S) request.
type HTTPCertGetter struct {
// The URL from which to download the certificate. Required.
//
// The URL will be augmented with query string parameters taken
// from the TLS handshake:
//
// - server_name: The SNI value
// - signature_schemes: Comma-separated list of hex IDs of signatures
// - cipher_suites: Comma-separated list of hex IDs of cipher suites
//
// To be valid, the response must be HTTP 200 with a PEM body
// consisting of blocks for the certificate chain and the private
// key.
//
// To indicate that this manager is not managing a certificate for
// the described handshake, the endpoint should return HTTP 204
// (No Content). Error statuses will indicate that the manager is
// capable of providing a certificate but was unable to.
URL string `json:"url,omitempty"`
ctx context.Context
}
// CaddyModule returns the Caddy module information.
func (hcg HTTPCertGetter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.get_certificate.http",
New: func() caddy.Module { return new(HTTPCertGetter) },
}
}
func (hcg *HTTPCertGetter) Provision(ctx caddy.Context) error {
hcg.ctx = ctx
if hcg.URL == "" {
return fmt.Errorf("URL is required")
}
return nil
}
func (hcg HTTPCertGetter) GetCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
sigs := make([]string, len(hello.SignatureSchemes))
for i, sig := range hello.SignatureSchemes {
sigs[i] = fmt.Sprintf("%x", uint16(sig)) // you won't believe what %x uses if the val is a Stringer
}
suites := make([]string, len(hello.CipherSuites))
for i, cs := range hello.CipherSuites {
suites[i] = fmt.Sprintf("%x", cs)
}
parsed, err := url.Parse(hcg.URL)
if err != nil {
return nil, err
}
qs := parsed.Query()
qs.Set("server_name", hello.ServerName)
qs.Set("signature_schemes", strings.Join(sigs, ","))
qs.Set("cipher_suites", strings.Join(suites, ","))
parsed.RawQuery = qs.Encode()
req, err := http.NewRequestWithContext(hcg.ctx, http.MethodGet, parsed.String(), nil)
if err != nil {
return nil, err
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
// endpoint is not managing certs for this handshake
return nil, nil
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("got HTTP %d", resp.StatusCode)
}
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("error reading response body: %v", err)
}
cert, err := tlsCertFromCertAndKeyPEMBundle(bodyBytes)
if err != nil {
return nil, err
}
return &cert, nil
}
// UnmarshalCaddyfile deserializes Caddyfile tokens into ts.
//
// ... http <url>
func (hcg *HTTPCertGetter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume cert manager name
if !d.NextArg() {
return d.ArgErr()
}
hcg.URL = d.Val()
if d.NextArg() {
return d.ArgErr()
}
if d.NextBlock(0) {
return d.Err("block not allowed here")
}
return nil
}
// Interface guards
var (
_ certmagic.Manager = (*Tailscale)(nil)
_ caddy.Provisioner = (*Tailscale)(nil)
_ caddyfile.Unmarshaler = (*Tailscale)(nil)
_ certmagic.Manager = (*HTTPCertGetter)(nil)
_ caddy.Provisioner = (*HTTPCertGetter)(nil)
_ caddyfile.Unmarshaler = (*HTTPCertGetter)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"math/big"
"github.com/caddyserver/certmagic"
)
// CustomCertSelectionPolicy represents a policy for selecting the certificate
// used to complete a handshake when there may be multiple options. All fields
// specified must match the candidate certificate for it to be chosen.
// This was needed to solve https://github.com/caddyserver/caddy/issues/2588.
type CustomCertSelectionPolicy struct {
// The certificate must have one of these serial numbers.
SerialNumber []bigInt `json:"serial_number,omitempty"`
// The certificate must have one of these organization names.
SubjectOrganization []string `json:"subject_organization,omitempty"`
// The certificate must use this public key algorithm.
PublicKeyAlgorithm PublicKeyAlgorithm `json:"public_key_algorithm,omitempty"`
// The certificate must have at least one of the tags in the list.
AnyTag []string `json:"any_tag,omitempty"`
// The certificate must have all of the tags in the list.
AllTags []string `json:"all_tags,omitempty"`
}
// SelectCertificate implements certmagic.CertificateSelector. It
// only chooses a certificate that at least meets the criteria in
// p. It then chooses the first non-expired certificate that is
// compatible with the client. If none are valid, it chooses the
// first viable candidate anyway.
func (p CustomCertSelectionPolicy) SelectCertificate(hello *tls.ClientHelloInfo, choices []certmagic.Certificate) (certmagic.Certificate, error) {
viable := make([]certmagic.Certificate, 0, len(choices))
nextChoice:
for _, cert := range choices {
if len(p.SerialNumber) > 0 {
var found bool
for _, sn := range p.SerialNumber {
snInt := sn.Int // avoid taking address of iteration variable (gosec warning)
if cert.Leaf.SerialNumber.Cmp(&snInt) == 0 {
found = true
break
}
}
if !found {
continue
}
}
if len(p.SubjectOrganization) > 0 {
var found bool
for _, subjOrg := range p.SubjectOrganization {
for _, org := range cert.Leaf.Subject.Organization {
if subjOrg == org {
found = true
break
}
}
}
if !found {
continue
}
}
if p.PublicKeyAlgorithm != PublicKeyAlgorithm(x509.UnknownPublicKeyAlgorithm) &&
PublicKeyAlgorithm(cert.Leaf.PublicKeyAlgorithm) != p.PublicKeyAlgorithm {
continue
}
if len(p.AnyTag) > 0 {
var found bool
for _, tag := range p.AnyTag {
if cert.HasTag(tag) {
found = true
break
}
}
if !found {
continue
}
}
if len(p.AllTags) > 0 {
for _, tag := range p.AllTags {
if !cert.HasTag(tag) {
continue nextChoice
}
}
}
// this certificate at least meets the policy's requirements,
// but we still have to check expiration and compatibility
viable = append(viable, cert)
}
if len(viable) == 0 {
return certmagic.Certificate{}, fmt.Errorf("no certificates matched custom selection policy")
}
return certmagic.DefaultCertificateSelector(hello, viable)
}
// bigInt is a big.Int type that interops with JSON encodings as a string.
type bigInt struct{ big.Int }
func (bi bigInt) MarshalJSON() ([]byte, error) {
return json.Marshal(bi.String())
}
func (bi *bigInt) UnmarshalJSON(p []byte) error {
if string(p) == "null" {
return nil
}
var stringRep string
err := json.Unmarshal(p, &stringRep)
if err != nil {
return err
}
_, ok := bi.SetString(stringRep, 10)
if !ok {
return fmt.Errorf("not a valid big integer: %s", p)
}
return nil
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/mholt/acmez/v2"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(LeafCertClientAuth{})
}
// ConnectionPolicies govern the establishment of TLS connections. It is
// an ordered group of connection policies; the first matching policy will
// be used to configure TLS connections at handshake-time.
type ConnectionPolicies []*ConnectionPolicy
// Provision sets up each connection policy. It should be called
// during the Validate() phase, after the TLS app (if any) is
// already set up.
func (cp ConnectionPolicies) Provision(ctx caddy.Context) error {
for i, pol := range cp {
// matchers
mods, err := ctx.LoadModule(pol, "MatchersRaw")
if err != nil {
return fmt.Errorf("loading handshake matchers: %v", err)
}
for _, modIface := range mods.(map[string]any) {
cp[i].matchers = append(cp[i].matchers, modIface.(ConnectionMatcher))
}
// enable HTTP/2 by default
if pol.ALPN == nil {
pol.ALPN = append(pol.ALPN, defaultALPN...)
}
// pre-build standard TLS config so we don't have to at handshake-time
err = pol.buildStandardTLSConfig(ctx)
if err != nil {
return fmt.Errorf("connection policy %d: building standard TLS config: %s", i, err)
}
if pol.ClientAuthentication != nil && len(pol.ClientAuthentication.VerifiersRaw) > 0 {
clientCertValidations, err := ctx.LoadModule(pol.ClientAuthentication, "VerifiersRaw")
if err != nil {
return fmt.Errorf("loading client cert verifiers: %v", err)
}
for _, validator := range clientCertValidations.([]any) {
cp[i].ClientAuthentication.verifiers = append(cp[i].ClientAuthentication.verifiers, validator.(ClientCertificateVerifier))
}
}
}
return nil
}
// TLSConfig returns a standard-lib-compatible TLS configuration which
// selects the first matching policy based on the ClientHello.
func (cp ConnectionPolicies) TLSConfig(_ caddy.Context) *tls.Config {
// using ServerName to match policies is extremely common, especially in configs
// with lots and lots of different policies; we can fast-track those by indexing
// them by SNI, so we don't have to iterate potentially thousands of policies
// (TODO: this map does not account for wildcards, see if this is a problem in practice? look for reports of high connection latency with wildcard certs but low latency for non-wildcards in multi-thousand-cert deployments)
indexedBySNI := make(map[string]ConnectionPolicies)
if len(cp) > 30 {
for _, p := range cp {
for _, m := range p.matchers {
if sni, ok := m.(MatchServerName); ok {
for _, sniName := range sni {
indexedBySNI[sniName] = append(indexedBySNI[sniName], p)
}
}
}
}
}
return &tls.Config{
MinVersion: tls.VersionTLS12,
GetConfigForClient: func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
// filter policies by SNI first, if possible, to speed things up
// when there may be lots of policies
possiblePolicies := cp
if indexedPolicies, ok := indexedBySNI[hello.ServerName]; ok {
possiblePolicies = indexedPolicies
}
policyLoop:
for _, pol := range possiblePolicies {
for _, matcher := range pol.matchers {
if !matcher.Match(hello) {
continue policyLoop
}
}
if pol.Drop {
return nil, fmt.Errorf("dropping connection")
}
return pol.TLSConfig, nil
}
return nil, fmt.Errorf("no server TLS configuration available for ClientHello: %+v", hello)
},
}
}
// ConnectionPolicy specifies the logic for handling a TLS handshake.
// An empty policy is valid; safe and sensible defaults will be used.
type ConnectionPolicy struct {
// How to match this policy with a TLS ClientHello. If
// this policy is the first to match, it will be used.
MatchersRaw caddy.ModuleMap `json:"match,omitempty" caddy:"namespace=tls.handshake_match"`
// How to choose a certificate if more than one matched
// the given ServerName (SNI) value.
CertSelection *CustomCertSelectionPolicy `json:"certificate_selection,omitempty"`
// The list of cipher suites to support. Caddy's
// defaults are modern and secure.
CipherSuites []string `json:"cipher_suites,omitempty"`
// The list of elliptic curves to support. Caddy's
// defaults are modern and secure.
Curves []string `json:"curves,omitempty"`
// Protocols to use for Application-Layer Protocol
// Negotiation (ALPN) during the handshake.
ALPN []string `json:"alpn,omitempty"`
// Minimum TLS protocol version to allow. Default: `tls1.2`
ProtocolMin string `json:"protocol_min,omitempty"`
// Maximum TLS protocol version to allow. Default: `tls1.3`
ProtocolMax string `json:"protocol_max,omitempty"`
// Reject TLS connections. EXPERIMENTAL: May change.
Drop bool `json:"drop,omitempty"`
// Enables and configures TLS client authentication.
ClientAuthentication *ClientAuthentication `json:"client_authentication,omitempty"`
// DefaultSNI becomes the ServerName in a ClientHello if there
// is no policy configured for the empty SNI value.
DefaultSNI string `json:"default_sni,omitempty"`
// FallbackSNI becomes the ServerName in a ClientHello if
// the original ServerName doesn't match any certificates
// in the cache. The use cases for this are very niche;
// typically if a client is a CDN and passes through the
// ServerName of the downstream handshake but can accept
// a certificate with the origin's hostname instead, then
// you would set this to your origin's hostname. Note that
// Caddy must be managing a certificate for this name.
//
// This feature is EXPERIMENTAL and subject to change or removal.
FallbackSNI string `json:"fallback_sni,omitempty"`
// Also known as "SSLKEYLOGFILE", TLS secrets will be written to
// this file in NSS key log format which can then be parsed by
// Wireshark and other tools. This is INSECURE as it allows other
// programs or tools to decrypt TLS connections. However, this
// capability can be useful for debugging and troubleshooting.
// **ENABLING THIS LOG COMPROMISES SECURITY!**
//
// This feature is EXPERIMENTAL and subject to change or removal.
InsecureSecretsLog string `json:"insecure_secrets_log,omitempty"`
// TLSConfig is the fully-formed, standard lib TLS config
// used to serve TLS connections. Provision all
// ConnectionPolicies to populate this. It is exported only
// so it can be minimally adjusted after provisioning
// if necessary (like to adjust NextProtos to disable HTTP/2),
// and may be unexported in the future.
TLSConfig *tls.Config `json:"-"`
matchers []ConnectionMatcher
}
func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
tlsAppIface, err := ctx.App("tls")
if err != nil {
return fmt.Errorf("getting tls app: %v", err)
}
tlsApp := tlsAppIface.(*TLS)
// fill in some "easy" default values, but for other values
// (such as slices), we should ensure that they start empty
// so the user-provided config can fill them in; then we will
// fill in a default config at the end if they are still unset
cfg := &tls.Config{
NextProtos: p.ALPN,
GetCertificate: func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
// TODO: I don't love how this works: we pre-build certmagic configs
// so that handshakes are faster. Unfortunately, certmagic configs are
// comprised of settings from both a TLS connection policy and a TLS
// automation policy. The only two fields (as of March 2020; v2 beta 17)
// of a certmagic config that come from the TLS connection policy are
// CertSelection and DefaultServerName, so an automation policy is what
// builds the base certmagic config. Since the pre-built config is
// shared, I don't think we can change any of its fields per-handshake,
// hence the awkward shallow copy (dereference) here and the subsequent
// changing of some of its fields. I'm worried this dereference allocates
// more at handshake-time, but I don't know how to practically pre-build
// a certmagic config for each combination of conn policy + automation policy...
cfg := *tlsApp.getConfigForName(hello.ServerName)
if p.CertSelection != nil {
// you would think we could just set this whether or not
// p.CertSelection is nil, but that leads to panics if
// it is, because cfg.CertSelection is an interface,
// so it will have a non-nil value even if the actual
// value underlying it is nil (sigh)
cfg.CertSelection = p.CertSelection
}
cfg.DefaultServerName = p.DefaultSNI
cfg.FallbackServerName = p.FallbackSNI
return cfg.GetCertificate(hello)
},
MinVersion: tls.VersionTLS12,
MaxVersion: tls.VersionTLS13,
}
// session tickets support
if tlsApp.SessionTickets != nil {
cfg.SessionTicketsDisabled = tlsApp.SessionTickets.Disabled
// session ticket key rotation
tlsApp.SessionTickets.register(cfg)
ctx.OnCancel(func() {
// do cleanup when the context is canceled because,
// though unlikely, it is possible that a context
// needing a TLS server config could exist for less
// than the lifetime of the whole app
tlsApp.SessionTickets.unregister(cfg)
})
}
// TODO: Clean up session ticket active locks in storage if app (or process) is being closed!
// add all the cipher suites in order, without duplicates
cipherSuitesAdded := make(map[uint16]struct{})
for _, csName := range p.CipherSuites {
csID := CipherSuiteID(csName)
if csID == 0 {
return fmt.Errorf("unsupported cipher suite: %s", csName)
}
if _, ok := cipherSuitesAdded[csID]; !ok {
cipherSuitesAdded[csID] = struct{}{}
cfg.CipherSuites = append(cfg.CipherSuites, csID)
}
}
// add all the curve preferences in order, without duplicates
curvesAdded := make(map[tls.CurveID]struct{})
for _, curveName := range p.Curves {
curveID := SupportedCurves[curveName]
if _, ok := curvesAdded[curveID]; !ok {
curvesAdded[curveID] = struct{}{}
cfg.CurvePreferences = append(cfg.CurvePreferences, curveID)
}
}
// ensure ALPN includes the ACME TLS-ALPN protocol
var alpnFound bool
for _, a := range p.ALPN {
if a == acmez.ACMETLS1Protocol {
alpnFound = true
break
}
}
if !alpnFound && (cfg.NextProtos == nil || len(cfg.NextProtos) > 0) {
cfg.NextProtos = append(cfg.NextProtos, acmez.ACMETLS1Protocol)
}
// min and max protocol versions
if (p.ProtocolMin != "" && p.ProtocolMax != "") && p.ProtocolMin > p.ProtocolMax {
return fmt.Errorf("protocol min (%x) cannot be greater than protocol max (%x)", p.ProtocolMin, p.ProtocolMax)
}
if p.ProtocolMin != "" {
cfg.MinVersion = SupportedProtocols[p.ProtocolMin]
}
if p.ProtocolMax != "" {
cfg.MaxVersion = SupportedProtocols[p.ProtocolMax]
}
// client authentication
if p.ClientAuthentication != nil {
if err := p.ClientAuthentication.provision(ctx); err != nil {
return fmt.Errorf("provisioning client CA: %v", err)
}
if err := p.ClientAuthentication.ConfigureTLSConfig(cfg); err != nil {
return fmt.Errorf("configuring TLS client authentication: %v", err)
}
}
if p.InsecureSecretsLog != "" {
filename, err := caddy.NewReplacer().ReplaceOrErr(p.InsecureSecretsLog, true, true)
if err != nil {
return err
}
filename, err = filepath.Abs(filename)
if err != nil {
return err
}
logFile, _, err := secretsLogPool.LoadOrNew(filename, func() (caddy.Destructor, error) {
w, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600)
return destructableWriter{w}, err
})
if err != nil {
return err
}
ctx.OnCancel(func() { _, _ = secretsLogPool.Delete(filename) })
cfg.KeyLogWriter = logFile.(io.Writer)
tlsApp.logger.Warn("TLS SECURITY COMPROMISED: secrets logging is enabled!",
zap.String("log_filename", filename))
}
setDefaultTLSParams(cfg)
p.TLSConfig = cfg
return nil
}
// SettingsEmpty returns true if p's settings (fields
// except the matchers) are all empty/unset.
func (p ConnectionPolicy) SettingsEmpty() bool {
return p.CertSelection == nil &&
p.CipherSuites == nil &&
p.Curves == nil &&
p.ALPN == nil &&
p.ProtocolMin == "" &&
p.ProtocolMax == "" &&
p.ClientAuthentication == nil &&
p.DefaultSNI == "" &&
p.InsecureSecretsLog == ""
}
// ClientAuthentication configures TLS client auth.
type ClientAuthentication struct {
// Certificate authority module which provides the certificate pool of trusted certificates
CARaw json.RawMessage `json:"ca,omitempty" caddy:"namespace=tls.ca_pool.source inline_key=provider"`
ca CA
// DEPRECATED: Use the `ca` field with the `tls.ca_pool.source.inline` module instead.
// A list of base64 DER-encoded CA certificates
// against which to validate client certificates.
// Client certs which are not signed by any of
// these CAs will be rejected.
TrustedCACerts []string `json:"trusted_ca_certs,omitempty"`
// DEPRECATED: Use the `ca` field with the `tls.ca_pool.source.file` module instead.
// TrustedCACertPEMFiles is a list of PEM file names
// from which to load certificates of trusted CAs.
// Client certificates which are not signed by any of
// these CA certificates will be rejected.
TrustedCACertPEMFiles []string `json:"trusted_ca_certs_pem_files,omitempty"`
// DEPRECATED: This field is deprecated and will be removed in
// a future version. Please use the `validators` field instead
// with the tls.client_auth.verifier.leaf module instead.
//
// A list of base64 DER-encoded client leaf certs
// to accept. If this list is not empty, client certs
// which are not in this list will be rejected.
TrustedLeafCerts []string `json:"trusted_leaf_certs,omitempty"`
// Client certificate verification modules. These can perform
// custom client authentication checks, such as ensuring the
// certificate is not revoked.
VerifiersRaw []json.RawMessage `json:"verifiers,omitempty" caddy:"namespace=tls.client_auth.verifier inline_key=verifier"`
verifiers []ClientCertificateVerifier
// The mode for authenticating the client. Allowed values are:
//
// Mode | Description
// -----|---------------
// `request` | Ask clients for a certificate, but allow even if there isn't one; do not verify it
// `require` | Require clients to present a certificate, but do not verify it
// `verify_if_given` | Ask clients for a certificate; allow even if there isn't one, but verify it if there is
// `require_and_verify` | Require clients to present a valid certificate that is verified
//
// The default mode is `require_and_verify` if any
// TrustedCACerts or TrustedCACertPEMFiles or TrustedLeafCerts
// are provided; otherwise, the default mode is `require`.
Mode string `json:"mode,omitempty"`
existingVerifyPeerCert func([][]byte, [][]*x509.Certificate) error
}
// UnmarshalCaddyfile parses the Caddyfile segment to set up the client authentication. Syntax:
//
// client_auth {
// mode [request|require|verify_if_given|require_and_verify]
// trust_pool <module> {
// ...
// }
// trusted_leaf_cert <base64_der>
// trusted_leaf_cert_file <filename>
// verifier <module>
// }
//
// If `mode` is not provided, it defaults to `require_and_verify` if any of the following are provided:
// - `trusted_leaf_certs`
// - `trusted_leaf_cert_file`
// - `trust_pool`
//
// Otherwise, it defaults to `require`.
func (ca *ClientAuthentication) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.NextArg() {
// consume any tokens on the same line, if any.
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
subdir := d.Val()
switch subdir {
case "mode":
if d.CountRemainingArgs() > 1 {
return d.ArgErr()
}
if !d.Args(&ca.Mode) {
return d.ArgErr()
}
case "trusted_ca_cert":
caddy.Log().Warn("The 'trusted_ca_cert' field is deprecated. Use the 'trust_pool' field instead.")
if len(ca.CARaw) != 0 {
return d.Err("cannot specify both 'trust_pool' and 'trusted_ca_cert' or 'trusted_ca_cert_file'")
}
if !d.NextArg() {
return d.ArgErr()
}
ca.TrustedCACerts = append(ca.TrustedCACerts, d.Val())
case "trusted_leaf_cert":
if !d.NextArg() {
return d.ArgErr()
}
ca.TrustedLeafCerts = append(ca.TrustedLeafCerts, d.Val())
case "trusted_ca_cert_file":
caddy.Log().Warn("The 'trusted_ca_cert_file' field is deprecated. Use the 'trust_pool' field instead.")
if len(ca.CARaw) != 0 {
return d.Err("cannot specify both 'trust_pool' and 'trusted_ca_cert' or 'trusted_ca_cert_file'")
}
if !d.NextArg() {
return d.ArgErr()
}
filename := d.Val()
ders, err := convertPEMFilesToDER(filename)
if err != nil {
return d.WrapErr(err)
}
ca.TrustedCACerts = append(ca.TrustedCACerts, ders...)
case "trusted_leaf_cert_file":
if !d.NextArg() {
return d.ArgErr()
}
filename := d.Val()
ders, err := convertPEMFilesToDER(filename)
if err != nil {
return d.WrapErr(err)
}
ca.TrustedLeafCerts = append(ca.TrustedLeafCerts, ders...)
case "trust_pool":
if len(ca.TrustedCACerts) != 0 {
return d.Err("cannot specify both 'trust_pool' and 'trusted_ca_cert' or 'trusted_ca_cert_file'")
}
if !d.NextArg() {
return d.ArgErr()
}
modName := d.Val()
mod, err := caddyfile.UnmarshalModule(d, "tls.ca_pool.source."+modName)
if err != nil {
return d.WrapErr(err)
}
caMod, ok := mod.(CA)
if !ok {
return fmt.Errorf("trust_pool module '%s' is not a certificate pool provider", caMod)
}
ca.CARaw = caddyconfig.JSONModuleObject(caMod, "provider", modName, nil)
case "verifier":
if !d.NextArg() {
return d.ArgErr()
}
vType := d.Val()
modID := "tls.client_auth.verifier." + vType
unm, err := caddyfile.UnmarshalModule(d, modID)
if err != nil {
return err
}
_, ok := unm.(ClientCertificateVerifier)
if !ok {
return d.Errf("module '%s' is not a caddytls.ClientCertificateVerifier", modID)
}
ca.VerifiersRaw = append(ca.VerifiersRaw, caddyconfig.JSONModuleObject(unm, "verifier", vType, nil))
default:
return d.Errf("unknown subdirective for client_auth: %s", subdir)
}
}
// only trust_ca_cert or trust_ca_cert_file was specified
if len(ca.TrustedCACerts) > 0 {
fileMod := &InlineCAPool{}
fileMod.TrustedCACerts = append(fileMod.TrustedCACerts, ca.TrustedCACerts...)
ca.CARaw = caddyconfig.JSONModuleObject(fileMod, "provider", "inline", nil)
ca.TrustedCACertPEMFiles, ca.TrustedCACerts = nil, nil
}
return nil
}
func convertPEMFilesToDER(filename string) ([]string, error) {
certDataPEM, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
var ders []string
// while block is not nil, we have more certificates in the file
for block, rest := pem.Decode(certDataPEM); block != nil; block, rest = pem.Decode(rest) {
if block.Type != "CERTIFICATE" {
return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename)
}
ders = append(
ders,
base64.StdEncoding.EncodeToString(block.Bytes),
)
}
// if we decoded nothing, return an error
if len(ders) == 0 {
return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename)
}
return ders, nil
}
func (clientauth *ClientAuthentication) provision(ctx caddy.Context) error {
if len(clientauth.CARaw) > 0 && (len(clientauth.TrustedCACerts) > 0 || len(clientauth.TrustedCACertPEMFiles) > 0) {
return fmt.Errorf("conflicting config for client authentication trust CA")
}
// convert all named file paths to inline
if len(clientauth.TrustedCACertPEMFiles) > 0 {
for _, fpath := range clientauth.TrustedCACertPEMFiles {
ders, err := convertPEMFilesToDER(fpath)
if err != nil {
return nil
}
clientauth.TrustedCACerts = append(clientauth.TrustedCACerts, ders...)
}
}
// if we have TrustedCACerts explicitly set, create an 'inline' CA and return
if len(clientauth.TrustedCACerts) > 0 {
clientauth.ca = InlineCAPool{
TrustedCACerts: clientauth.TrustedCACerts,
}
return nil
}
// if we don't have any CARaw set, there's not much work to do
if clientauth.CARaw == nil {
return nil
}
caRaw, err := ctx.LoadModule(clientauth, "CARaw")
if err != nil {
return err
}
ca, ok := caRaw.(CA)
if !ok {
return fmt.Errorf("'ca' module '%s' is not a certificate pool provider", ca)
}
clientauth.ca = ca
return nil
}
// Active returns true if clientauth has an actionable configuration.
func (clientauth ClientAuthentication) Active() bool {
return len(clientauth.TrustedCACerts) > 0 ||
len(clientauth.TrustedCACertPEMFiles) > 0 ||
len(clientauth.TrustedLeafCerts) > 0 || // TODO: DEPRECATED
len(clientauth.VerifiersRaw) > 0 ||
len(clientauth.Mode) > 0 ||
clientauth.CARaw != nil || clientauth.ca != nil
}
// ConfigureTLSConfig sets up cfg to enforce clientauth's configuration.
func (clientauth *ClientAuthentication) ConfigureTLSConfig(cfg *tls.Config) error {
// if there's no actionable client auth, simply disable it
if !clientauth.Active() {
cfg.ClientAuth = tls.NoClientCert
return nil
}
// enforce desired mode of client authentication
if len(clientauth.Mode) > 0 {
switch clientauth.Mode {
case "request":
cfg.ClientAuth = tls.RequestClientCert
case "require":
cfg.ClientAuth = tls.RequireAnyClientCert
case "verify_if_given":
cfg.ClientAuth = tls.VerifyClientCertIfGiven
case "require_and_verify":
cfg.ClientAuth = tls.RequireAndVerifyClientCert
default:
return fmt.Errorf("client auth mode not recognized: %s", clientauth.Mode)
}
} else {
// otherwise, set a safe default mode
if len(clientauth.TrustedCACerts) > 0 ||
len(clientauth.TrustedCACertPEMFiles) > 0 ||
len(clientauth.TrustedLeafCerts) > 0 ||
clientauth.CARaw != nil {
cfg.ClientAuth = tls.RequireAndVerifyClientCert
} else {
cfg.ClientAuth = tls.RequireAnyClientCert
}
}
// enforce CA verification by adding CA certs to the ClientCAs pool
if clientauth.ca != nil {
cfg.ClientCAs = clientauth.ca.CertPool()
}
// TODO: DEPRECATED: Only here for backwards compatibility.
// If leaf cert is specified, enforce by adding a client auth module
if len(clientauth.TrustedLeafCerts) > 0 {
caddy.Log().Named("tls.connection_policy").Warn("trusted_leaf_certs is deprecated; use leaf verifier module instead")
var trustedLeafCerts []*x509.Certificate
for _, clientCertString := range clientauth.TrustedLeafCerts {
clientCert, err := decodeBase64DERCert(clientCertString)
if err != nil {
return fmt.Errorf("parsing certificate: %v", err)
}
trustedLeafCerts = append(trustedLeafCerts, clientCert)
}
clientauth.verifiers = append(clientauth.verifiers, LeafCertClientAuth{trustedLeafCerts: trustedLeafCerts})
}
// if a custom verification function already exists, wrap it
clientauth.existingVerifyPeerCert = cfg.VerifyPeerCertificate
cfg.VerifyPeerCertificate = clientauth.verifyPeerCertificate
return nil
}
// verifyPeerCertificate is for use as a tls.Config.VerifyPeerCertificate
// callback to do custom client certificate verification. It is intended
// for installation only by clientauth.ConfigureTLSConfig().
func (clientauth *ClientAuthentication) verifyPeerCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
// first use any pre-existing custom verification function
if clientauth.existingVerifyPeerCert != nil {
err := clientauth.existingVerifyPeerCert(rawCerts, verifiedChains)
if err != nil {
return err
}
}
for _, verifier := range clientauth.verifiers {
err := verifier.VerifyClientCertificate(rawCerts, verifiedChains)
if err != nil {
return err
}
}
return nil
}
// decodeBase64DERCert base64-decodes, then DER-decodes, certStr.
func decodeBase64DERCert(certStr string) (*x509.Certificate, error) {
derBytes, err := base64.StdEncoding.DecodeString(certStr)
if err != nil {
return nil, err
}
return x509.ParseCertificate(derBytes)
}
// setDefaultTLSParams sets the default TLS cipher suites, protocol versions,
// and server preferences of cfg if they are not already set; it does not
// overwrite values, only fills in missing values.
func setDefaultTLSParams(cfg *tls.Config) {
if len(cfg.CipherSuites) == 0 {
cfg.CipherSuites = getOptimalDefaultCipherSuites()
}
// Not a cipher suite, but still important for mitigating protocol downgrade attacks
// (prepend since having it at end breaks http2 due to non-h2-approved suites before it)
cfg.CipherSuites = append([]uint16{tls.TLS_FALLBACK_SCSV}, cfg.CipherSuites...)
if len(cfg.CurvePreferences) == 0 {
cfg.CurvePreferences = defaultCurves
}
if cfg.MinVersion == 0 {
cfg.MinVersion = tls.VersionTLS12
}
if cfg.MaxVersion == 0 {
cfg.MaxVersion = tls.VersionTLS13
}
}
// LeafCertClientAuth verifies the client's leaf certificate.
type LeafCertClientAuth struct {
LeafCertificateLoadersRaw []json.RawMessage `json:"leaf_certs_loaders,omitempty" caddy:"namespace=tls.leaf_cert_loader inline_key=loader"`
trustedLeafCerts []*x509.Certificate
}
// CaddyModule returns the Caddy module information.
func (LeafCertClientAuth) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.client_auth.verifier.leaf",
New: func() caddy.Module { return new(LeafCertClientAuth) },
}
}
func (l *LeafCertClientAuth) Provision(ctx caddy.Context) error {
if l.LeafCertificateLoadersRaw == nil {
return nil
}
val, err := ctx.LoadModule(l, "LeafCertificateLoadersRaw")
if err != nil {
return fmt.Errorf("could not parse leaf certificates loaders: %s", err.Error())
}
trustedLeafCertloaders := []LeafCertificateLoader{}
for _, loader := range val.([]any) {
trustedLeafCertloaders = append(trustedLeafCertloaders, loader.(LeafCertificateLoader))
}
trustedLeafCertificates := []*x509.Certificate{}
for _, loader := range trustedLeafCertloaders {
certs, err := loader.LoadLeafCertificates()
if err != nil {
return fmt.Errorf("could not load leaf certificates: %s", err.Error())
}
trustedLeafCertificates = append(trustedLeafCertificates, certs...)
}
l.trustedLeafCerts = trustedLeafCertificates
return nil
}
func (l LeafCertClientAuth) VerifyClientCertificate(rawCerts [][]byte, _ [][]*x509.Certificate) error {
if len(rawCerts) == 0 {
return fmt.Errorf("no client certificate provided")
}
remoteLeafCert, err := x509.ParseCertificate(rawCerts[0])
if err != nil {
return fmt.Errorf("can't parse the given certificate: %s", err.Error())
}
for _, trustedLeafCert := range l.trustedLeafCerts {
if remoteLeafCert.Equal(trustedLeafCert) {
return nil
}
}
return fmt.Errorf("client leaf certificate failed validation")
}
// PublicKeyAlgorithm is a JSON-unmarshalable wrapper type.
type PublicKeyAlgorithm x509.PublicKeyAlgorithm
// UnmarshalJSON satisfies json.Unmarshaler.
func (a *PublicKeyAlgorithm) UnmarshalJSON(b []byte) error {
algoStr := strings.ToLower(strings.Trim(string(b), `"`))
algo, ok := publicKeyAlgorithms[algoStr]
if !ok {
return fmt.Errorf("unrecognized public key algorithm: %s (expected one of %v)",
algoStr, publicKeyAlgorithms)
}
*a = PublicKeyAlgorithm(algo)
return nil
}
// ConnectionMatcher is a type which matches TLS handshakes.
type ConnectionMatcher interface {
Match(*tls.ClientHelloInfo) bool
}
// LeafCertificateLoader is a type that loads the trusted leaf certificates
// for the tls.leaf_cert_loader modules
type LeafCertificateLoader interface {
LoadLeafCertificates() ([]*x509.Certificate, error)
}
// ClientCertificateVerifier is a type which verifies client certificates.
// It is called during verifyPeerCertificate in the TLS handshake.
type ClientCertificateVerifier interface {
VerifyClientCertificate(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error
}
var defaultALPN = []string{"h2", "http/1.1"}
type destructableWriter struct{ *os.File }
func (d destructableWriter) Destruct() error { return d.Close() }
var secretsLogPool = caddy.NewUsagePool()
var _ caddyfile.Unmarshaler = (*ClientAuthentication)(nil)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"fmt"
"os"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(FileLoader{})
}
// FileLoader loads certificates and their associated keys from disk.
type FileLoader []CertKeyFilePair
// Provision implements caddy.Provisioner.
func (fl FileLoader) Provision(ctx caddy.Context) error {
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, pair := range fl {
for i, tag := range pair.Tags {
pair.Tags[i] = repl.ReplaceKnown(tag, "")
}
fl[k] = CertKeyFilePair{
Certificate: repl.ReplaceKnown(pair.Certificate, ""),
Key: repl.ReplaceKnown(pair.Key, ""),
Format: repl.ReplaceKnown(pair.Format, ""),
Tags: pair.Tags,
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (FileLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.certificates.load_files",
New: func() caddy.Module { return new(FileLoader) },
}
}
// CertKeyFilePair pairs certificate and key file names along with their
// encoding format so that they can be loaded from disk.
type CertKeyFilePair struct {
// Path to the certificate (public key) file.
Certificate string `json:"certificate"`
// Path to the private key file.
Key string `json:"key"`
// The format of the cert and key. Can be "pem". Default: "pem"
Format string `json:"format,omitempty"`
// Arbitrary values to associate with this certificate.
// Can be useful when you want to select a particular
// certificate when there may be multiple valid candidates.
Tags []string `json:"tags,omitempty"`
}
// LoadCertificates returns the certificates to be loaded by fl.
func (fl FileLoader) LoadCertificates() ([]Certificate, error) {
certs := make([]Certificate, 0, len(fl))
for _, pair := range fl {
certData, err := os.ReadFile(pair.Certificate)
if err != nil {
return nil, err
}
keyData, err := os.ReadFile(pair.Key)
if err != nil {
return nil, err
}
var cert tls.Certificate
switch pair.Format {
case "":
fallthrough
case "pem":
cert, err = tls.X509KeyPair(certData, keyData)
default:
return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format)
}
if err != nil {
return nil, err
}
certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags})
}
return certs, nil
}
// Interface guard
var (
_ CertificateLoader = (FileLoader)(nil)
_ caddy.Provisioner = (FileLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"bytes"
"crypto/tls"
"encoding/pem"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(FolderLoader{})
}
// FolderLoader loads certificates and their associated keys from disk
// by recursively walking the specified directories, looking for PEM
// files which contain both a certificate and a key.
type FolderLoader []string
// CaddyModule returns the Caddy module information.
func (FolderLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.certificates.load_folders",
New: func() caddy.Module { return new(FolderLoader) },
}
}
// Provision implements caddy.Provisioner.
func (fl FolderLoader) Provision(ctx caddy.Context) error {
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, path := range fl {
fl[k] = repl.ReplaceKnown(path, "")
}
return nil
}
// LoadCertificates loads all the certificates+keys in the directories
// listed in fl from all files ending with .pem. This method of loading
// certificates expects the certificate and key to be bundled into the
// same file.
func (fl FolderLoader) LoadCertificates() ([]Certificate, error) {
var certs []Certificate
for _, dir := range fl {
err := filepath.Walk(dir, func(fpath string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("unable to traverse into path: %s", fpath)
}
if info.IsDir() {
return nil
}
if !strings.HasSuffix(strings.ToLower(info.Name()), ".pem") {
return nil
}
bundle, err := os.ReadFile(fpath)
if err != nil {
return err
}
cert, err := tlsCertFromCertAndKeyPEMBundle(bundle)
if err != nil {
return fmt.Errorf("%s: %w", fpath, err)
}
certs = append(certs, Certificate{Certificate: cert})
return nil
})
if err != nil {
return nil, err
}
}
return certs, nil
}
func tlsCertFromCertAndKeyPEMBundle(bundle []byte) (tls.Certificate, error) {
certBuilder, keyBuilder := new(bytes.Buffer), new(bytes.Buffer)
var foundKey bool // use only the first key in the file
for {
// Decode next block so we can see what type it is
var derBlock *pem.Block
derBlock, bundle = pem.Decode(bundle)
if derBlock == nil {
break
}
if derBlock.Type == "CERTIFICATE" {
// Re-encode certificate as PEM, appending to certificate chain
if err := pem.Encode(certBuilder, derBlock); err != nil {
return tls.Certificate{}, err
}
} else if derBlock.Type == "EC PARAMETERS" {
// EC keys generated from openssl can be composed of two blocks:
// parameters and key (parameter block should come first)
if !foundKey {
// Encode parameters
if err := pem.Encode(keyBuilder, derBlock); err != nil {
return tls.Certificate{}, err
}
// Key must immediately follow
derBlock, bundle = pem.Decode(bundle)
if derBlock == nil || derBlock.Type != "EC PRIVATE KEY" {
return tls.Certificate{}, fmt.Errorf("expected elliptic private key to immediately follow EC parameters")
}
if err := pem.Encode(keyBuilder, derBlock); err != nil {
return tls.Certificate{}, err
}
foundKey = true
}
} else if derBlock.Type == "PRIVATE KEY" || strings.HasSuffix(derBlock.Type, " PRIVATE KEY") {
// RSA key
if !foundKey {
if err := pem.Encode(keyBuilder, derBlock); err != nil {
return tls.Certificate{}, err
}
foundKey = true
}
} else {
return tls.Certificate{}, fmt.Errorf("unrecognized PEM block type: %s", derBlock.Type)
}
}
certPEMBytes, keyPEMBytes := certBuilder.Bytes(), keyBuilder.Bytes()
if len(certPEMBytes) == 0 {
return tls.Certificate{}, fmt.Errorf("failed to parse PEM data")
}
if len(keyPEMBytes) == 0 {
return tls.Certificate{}, fmt.Errorf("no private key block found")
}
cert, err := tls.X509KeyPair(certPEMBytes, keyPEMBytes)
if err != nil {
return tls.Certificate{}, fmt.Errorf("making X509 key pair: %v", err)
}
return cert, nil
}
var (
_ CertificateLoader = (FolderLoader)(nil)
_ caddy.Provisioner = (FolderLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"bytes"
"context"
"crypto/x509"
"encoding/pem"
"time"
"github.com/caddyserver/certmagic"
"github.com/smallstep/certificates/authority/provisioner"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddypki"
)
func init() {
caddy.RegisterModule(InternalIssuer{})
}
// InternalIssuer is a certificate issuer that generates
// certificates internally using a locally-configured
// CA which can be customized using the `pki` app.
type InternalIssuer struct {
// The ID of the CA to use for signing. The default
// CA ID is "local". The CA can be configured with the
// `pki` app.
CA string `json:"ca,omitempty"`
// The validity period of certificates.
Lifetime caddy.Duration `json:"lifetime,omitempty"`
// If true, the root will be the issuer instead of
// the intermediate. This is NOT recommended and should
// only be used when devices/clients do not properly
// validate certificate chains.
SignWithRoot bool `json:"sign_with_root,omitempty"`
ca *caddypki.CA
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (InternalIssuer) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.issuance.internal",
New: func() caddy.Module { return new(InternalIssuer) },
}
}
// Provision sets up the issuer.
func (iss *InternalIssuer) Provision(ctx caddy.Context) error {
iss.logger = ctx.Logger()
// set some defaults
if iss.CA == "" {
iss.CA = caddypki.DefaultCAID
}
// get a reference to the configured CA
appModule, err := ctx.App("pki")
if err != nil {
return err
}
pkiApp := appModule.(*caddypki.PKI)
ca, err := pkiApp.GetCA(ctx, iss.CA)
if err != nil {
return err
}
iss.ca = ca
// set any other default values
if iss.Lifetime == 0 {
iss.Lifetime = caddy.Duration(defaultInternalCertLifetime)
}
return nil
}
// IssuerKey returns the unique issuer key for the
// configured CA endpoint.
func (iss InternalIssuer) IssuerKey() string {
return iss.ca.ID
}
// Issue issues a certificate to satisfy the CSR.
func (iss InternalIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
// prepare the signing authority
authCfg := caddypki.AuthorityConfig{
SignWithRoot: iss.SignWithRoot,
}
auth, err := iss.ca.NewAuthority(authCfg)
if err != nil {
return nil, err
}
// get the cert (public key) that will be used for signing
var issuerCert *x509.Certificate
if iss.SignWithRoot {
issuerCert = iss.ca.RootCertificate()
} else {
issuerCert = iss.ca.IntermediateCertificate()
}
// ensure issued certificate does not expire later than its issuer
lifetime := time.Duration(iss.Lifetime)
if time.Now().Add(lifetime).After(issuerCert.NotAfter) {
lifetime = time.Until(issuerCert.NotAfter)
iss.logger.Warn("cert lifetime would exceed issuer NotAfter, clamping lifetime",
zap.Duration("orig_lifetime", time.Duration(iss.Lifetime)),
zap.Duration("lifetime", lifetime),
zap.Time("not_after", issuerCert.NotAfter),
)
}
certChain, err := auth.SignWithContext(ctx, csr, provisioner.SignOptions{}, customCertLifetime(caddy.Duration(lifetime)))
if err != nil {
return nil, err
}
var buf bytes.Buffer
for _, cert := range certChain {
err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
if err != nil {
return nil, err
}
}
return &certmagic.IssuedCertificate{
Certificate: buf.Bytes(),
}, nil
}
// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
//
// ... internal {
// ca <name>
// lifetime <duration>
// sign_with_root
// }
func (iss *InternalIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume issuer name
for d.NextBlock(0) {
switch d.Val() {
case "ca":
if !d.AllArgs(&iss.CA) {
return d.ArgErr()
}
case "lifetime":
if !d.NextArg() {
return d.ArgErr()
}
dur, err := caddy.ParseDuration(d.Val())
if err != nil {
return err
}
iss.Lifetime = caddy.Duration(dur)
case "sign_with_root":
if d.NextArg() {
return d.ArgErr()
}
iss.SignWithRoot = true
}
}
return nil
}
// customCertLifetime allows us to customize certificates that are issued
// by Smallstep libs, particularly the NotBefore & NotAfter dates.
type customCertLifetime time.Duration
func (d customCertLifetime) Modify(cert *x509.Certificate, _ provisioner.SignOptions) error {
cert.NotBefore = time.Now()
cert.NotAfter = cert.NotBefore.Add(time.Duration(d))
return nil
}
const defaultInternalCertLifetime = 12 * time.Hour
// Interface guards
var (
_ caddy.Provisioner = (*InternalIssuer)(nil)
_ certmagic.Issuer = (*InternalIssuer)(nil)
_ provisioner.CertificateModifier = (*customCertLifetime)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(LeafFileLoader{})
}
// LeafFileLoader loads leaf certificates from disk.
type LeafFileLoader struct {
Files []string `json:"files,omitempty"`
}
// Provision implements caddy.Provisioner.
func (fl *LeafFileLoader) Provision(ctx caddy.Context) error {
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, path := range fl.Files {
fl.Files[k] = repl.ReplaceKnown(path, "")
}
return nil
}
// CaddyModule returns the Caddy module information.
func (LeafFileLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.leaf_cert_loader.file",
New: func() caddy.Module { return new(LeafFileLoader) },
}
}
// LoadLeafCertificates returns the certificates to be loaded by fl.
func (fl LeafFileLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
certificates := make([]*x509.Certificate, 0, len(fl.Files))
for _, path := range fl.Files {
ders, err := convertPEMFilesToDERBytes(path)
if err != nil {
return nil, err
}
certs, err := x509.ParseCertificates(ders)
if err != nil {
return nil, err
}
certificates = append(certificates, certs...)
}
return certificates, nil
}
func convertPEMFilesToDERBytes(filename string) ([]byte, error) {
certDataPEM, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
var ders []byte
// while block is not nil, we have more certificates in the file
for block, rest := pem.Decode(certDataPEM); block != nil; block, rest = pem.Decode(rest) {
if block.Type != "CERTIFICATE" {
return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename)
}
ders = append(
ders,
block.Bytes...,
)
}
// if we decoded nothing, return an error
if len(ders) == 0 {
return nil, fmt.Errorf("no CERTIFICATE pem block found in %s", filename)
}
return ders, nil
}
// Interface guard
var (
_ LeafCertificateLoader = (*LeafFileLoader)(nil)
_ caddy.Provisioner = (*LeafFileLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/x509"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(LeafFolderLoader{})
}
// LeafFolderLoader loads certificates and their associated keys from disk
// by recursively walking the specified directories, looking for PEM
// files which contain both a certificate and a key.
type LeafFolderLoader struct {
Folders []string `json:"folders,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (LeafFolderLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.leaf_cert_loader.folder",
New: func() caddy.Module { return new(LeafFolderLoader) },
}
}
// Provision implements caddy.Provisioner.
func (fl *LeafFolderLoader) Provision(ctx caddy.Context) error {
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, path := range fl.Folders {
fl.Folders[k] = repl.ReplaceKnown(path, "")
}
return nil
}
// LoadLeafCertificates loads all the leaf certificates in the directories
// listed in fl from all files ending with .pem.
func (fl LeafFolderLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
var certs []*x509.Certificate
for _, dir := range fl.Folders {
err := filepath.Walk(dir, func(fpath string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("unable to traverse into path: %s", fpath)
}
if info.IsDir() {
return nil
}
if !strings.HasSuffix(strings.ToLower(info.Name()), ".pem") {
return nil
}
certData, err := convertPEMFilesToDERBytes(fpath)
if err != nil {
return err
}
cert, err := x509.ParseCertificate(certData)
if err != nil {
return fmt.Errorf("%s: %w", fpath, err)
}
certs = append(certs, cert)
return nil
})
if err != nil {
return nil, err
}
}
return certs, nil
}
var (
_ LeafCertificateLoader = (*LeafFolderLoader)(nil)
_ caddy.Provisioner = (*LeafFolderLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/x509"
"fmt"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(LeafPEMLoader{})
}
// LeafPEMLoader loads leaf certificates by
// decoding their PEM blocks directly. This has the advantage
// of not needing to store them on disk at all.
type LeafPEMLoader struct {
Certificates []string `json:"certificates,omitempty"`
}
// Provision implements caddy.Provisioner.
func (pl *LeafPEMLoader) Provision(ctx caddy.Context) error {
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for i, cert := range pl.Certificates {
pl.Certificates[i] = repl.ReplaceKnown(cert, "")
}
return nil
}
// CaddyModule returns the Caddy module information.
func (LeafPEMLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.leaf_cert_loader.pem",
New: func() caddy.Module { return new(LeafPEMLoader) },
}
}
// LoadLeafCertificates returns the certificates contained in pl.
func (pl LeafPEMLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
certs := make([]*x509.Certificate, 0, len(pl.Certificates))
for i, cert := range pl.Certificates {
derBytes, err := convertPEMToDER([]byte(cert))
if err != nil {
return nil, fmt.Errorf("PEM leaf certificate loader, cert %d: %v", i, err)
}
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, fmt.Errorf("PEM cert %d: %v", i, err)
}
certs = append(certs, cert)
}
return certs, nil
}
// Interface guard
var (
_ LeafCertificateLoader = (*LeafPEMLoader)(nil)
_ caddy.Provisioner = (*LeafPEMLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(LeafStorageLoader{})
}
// LeafStorageLoader loads leaf certificates from the
// globally configured storage module.
type LeafStorageLoader struct {
// A list of certificate file names to be loaded from storage.
Certificates []string `json:"certificates,omitempty"`
// The storage module where the trusted leaf certificates are stored. Absent
// explicit storage implies the use of Caddy default storage.
StorageRaw json.RawMessage `json:"storage,omitempty" caddy:"namespace=caddy.storage inline_key=module"`
// Reference to the globally configured storage module.
storage certmagic.Storage
ctx caddy.Context
}
// CaddyModule returns the Caddy module information.
func (LeafStorageLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.leaf_cert_loader.storage",
New: func() caddy.Module { return new(LeafStorageLoader) },
}
}
// Provision loads the storage module for sl.
func (sl *LeafStorageLoader) Provision(ctx caddy.Context) error {
if sl.StorageRaw != nil {
val, err := ctx.LoadModule(sl, "StorageRaw")
if err != nil {
return fmt.Errorf("loading storage module: %v", err)
}
cmStorage, err := val.(caddy.StorageConverter).CertMagicStorage()
if err != nil {
return fmt.Errorf("creating storage configuration: %v", err)
}
sl.storage = cmStorage
}
if sl.storage == nil {
sl.storage = ctx.Storage()
}
sl.ctx = ctx
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, path := range sl.Certificates {
sl.Certificates[k] = repl.ReplaceKnown(path, "")
}
return nil
}
// LoadLeafCertificates returns the certificates to be loaded by sl.
func (sl LeafStorageLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
certificates := make([]*x509.Certificate, 0, len(sl.Certificates))
for _, path := range sl.Certificates {
certData, err := sl.storage.Load(sl.ctx, path)
if err != nil {
return nil, err
}
ders, err := convertPEMToDER(certData)
if err != nil {
return nil, err
}
certs, err := x509.ParseCertificates(ders)
if err != nil {
return nil, err
}
certificates = append(certificates, certs...)
}
return certificates, nil
}
func convertPEMToDER(pemData []byte) ([]byte, error) {
var ders []byte
// while block is not nil, we have more certificates in the file
for block, rest := pem.Decode(pemData); block != nil; block, rest = pem.Decode(rest) {
if block.Type != "CERTIFICATE" {
return nil, fmt.Errorf("no CERTIFICATE pem block found in the given pem data")
}
ders = append(
ders,
block.Bytes...,
)
}
// if we decoded nothing, return an error
if len(ders) == 0 {
return nil, fmt.Errorf("no CERTIFICATE pem block found in the given pem data")
}
return ders, nil
}
// Interface guard
var (
_ LeafCertificateLoader = (*LeafStorageLoader)(nil)
_ caddy.Provisioner = (*LeafStorageLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"fmt"
"net"
"net/netip"
"strings"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(MatchServerName{})
caddy.RegisterModule(MatchRemoteIP{})
caddy.RegisterModule(MatchLocalIP{})
}
// MatchServerName matches based on SNI. Names in
// this list may use left-most-label wildcards,
// similar to wildcard certificates.
type MatchServerName []string
// CaddyModule returns the Caddy module information.
func (MatchServerName) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.handshake_match.sni",
New: func() caddy.Module { return new(MatchServerName) },
}
}
// Match matches hello based on SNI.
func (m MatchServerName) Match(hello *tls.ClientHelloInfo) bool {
for _, name := range m {
if certmagic.MatchWildcard(hello.ServerName, name) {
return true
}
}
return false
}
// MatchRemoteIP matches based on the remote IP of the
// connection. Specific IPs or CIDR ranges can be specified.
//
// Note that IPs can sometimes be spoofed, so do not rely
// on this as a replacement for actual authentication.
type MatchRemoteIP struct {
// The IPs or CIDR ranges to match.
Ranges []string `json:"ranges,omitempty"`
// The IPs or CIDR ranges to *NOT* match.
NotRanges []string `json:"not_ranges,omitempty"`
cidrs []netip.Prefix
notCidrs []netip.Prefix
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (MatchRemoteIP) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.handshake_match.remote_ip",
New: func() caddy.Module { return new(MatchRemoteIP) },
}
}
// Provision parses m's IP ranges, either from IP or CIDR expressions.
func (m *MatchRemoteIP) Provision(ctx caddy.Context) error {
m.logger = ctx.Logger()
for _, str := range m.Ranges {
cidrs, err := m.parseIPRange(str)
if err != nil {
return err
}
m.cidrs = append(m.cidrs, cidrs...)
}
for _, str := range m.NotRanges {
cidrs, err := m.parseIPRange(str)
if err != nil {
return err
}
m.notCidrs = append(m.notCidrs, cidrs...)
}
return nil
}
// Match matches hello based on the connection's remote IP.
func (m MatchRemoteIP) Match(hello *tls.ClientHelloInfo) bool {
remoteAddr := hello.Conn.RemoteAddr().String()
ipStr, _, err := net.SplitHostPort(remoteAddr)
if err != nil {
ipStr = remoteAddr // weird; maybe no port?
}
ipAddr, err := netip.ParseAddr(ipStr)
if err != nil {
m.logger.Error("invalid client IP address", zap.String("ip", ipStr))
return false
}
return (len(m.cidrs) == 0 || m.matches(ipAddr, m.cidrs)) &&
(len(m.notCidrs) == 0 || !m.matches(ipAddr, m.notCidrs))
}
func (MatchRemoteIP) parseIPRange(str string) ([]netip.Prefix, error) {
var cidrs []netip.Prefix
if strings.Contains(str, "/") {
ipNet, err := netip.ParsePrefix(str)
if err != nil {
return nil, fmt.Errorf("parsing CIDR expression: %v", err)
}
cidrs = append(cidrs, ipNet)
} else {
ipAddr, err := netip.ParseAddr(str)
if err != nil {
return nil, fmt.Errorf("invalid IP address: '%s': %v", str, err)
}
ip := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
cidrs = append(cidrs, ip)
}
return cidrs, nil
}
func (MatchRemoteIP) matches(ip netip.Addr, ranges []netip.Prefix) bool {
for _, ipRange := range ranges {
if ipRange.Contains(ip) {
return true
}
}
return false
}
// MatchLocalIP matches based on the IP address of the interface
// receiving the connection. Specific IPs or CIDR ranges can be specified.
type MatchLocalIP struct {
// The IPs or CIDR ranges to match.
Ranges []string `json:"ranges,omitempty"`
cidrs []netip.Prefix
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (MatchLocalIP) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.handshake_match.local_ip",
New: func() caddy.Module { return new(MatchLocalIP) },
}
}
// Provision parses m's IP ranges, either from IP or CIDR expressions.
func (m *MatchLocalIP) Provision(ctx caddy.Context) error {
m.logger = ctx.Logger()
for _, str := range m.Ranges {
cidrs, err := m.parseIPRange(str)
if err != nil {
return err
}
m.cidrs = append(m.cidrs, cidrs...)
}
return nil
}
// Match matches hello based on the connection's remote IP.
func (m MatchLocalIP) Match(hello *tls.ClientHelloInfo) bool {
localAddr := hello.Conn.LocalAddr().String()
ipStr, _, err := net.SplitHostPort(localAddr)
if err != nil {
ipStr = localAddr // weird; maybe no port?
}
ipAddr, err := netip.ParseAddr(ipStr)
if err != nil {
m.logger.Error("invalid local IP address", zap.String("ip", ipStr))
return false
}
return (len(m.cidrs) == 0 || m.matches(ipAddr, m.cidrs))
}
func (MatchLocalIP) parseIPRange(str string) ([]netip.Prefix, error) {
var cidrs []netip.Prefix
if strings.Contains(str, "/") {
ipNet, err := netip.ParsePrefix(str)
if err != nil {
return nil, fmt.Errorf("parsing CIDR expression: %v", err)
}
cidrs = append(cidrs, ipNet)
} else {
ipAddr, err := netip.ParseAddr(str)
if err != nil {
return nil, fmt.Errorf("invalid IP address: '%s': %v", str, err)
}
ip := netip.PrefixFrom(ipAddr, ipAddr.BitLen())
cidrs = append(cidrs, ip)
}
return cidrs, nil
}
func (MatchLocalIP) matches(ip netip.Addr, ranges []netip.Prefix) bool {
for _, ipRange := range ranges {
if ipRange.Contains(ip) {
return true
}
}
return false
}
// Interface guards
var (
_ ConnectionMatcher = (*MatchServerName)(nil)
_ ConnectionMatcher = (*MatchRemoteIP)(nil)
_ caddy.Provisioner = (*MatchLocalIP)(nil)
_ ConnectionMatcher = (*MatchLocalIP)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"time"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(PermissionByHTTP{})
}
// OnDemandConfig configures on-demand TLS, for obtaining
// needed certificates at handshake-time. Because this
// feature can easily be abused, you should use this to
// establish rate limits and/or an internal endpoint that
// Caddy can "ask" if it should be allowed to manage
// certificates for a given hostname.
type OnDemandConfig struct {
// DEPRECATED. WILL BE REMOVED SOON. Use 'permission' instead.
Ask string `json:"ask,omitempty"`
// REQUIRED. A module that will determine whether a
// certificate is allowed to be loaded from storage
// or obtained from an issuer on demand.
PermissionRaw json.RawMessage `json:"permission,omitempty" caddy:"namespace=tls.permission inline_key=module"`
permission OnDemandPermission
// DEPRECATED. An optional rate limit to throttle
// the checking of storage and the issuance of
// certificates from handshakes if not already in
// storage. WILL BE REMOVED IN A FUTURE RELEASE.
RateLimit *RateLimit `json:"rate_limit,omitempty"`
}
// DEPRECATED. WILL LIKELY BE REMOVED SOON.
// Instead of using this rate limiter, use a proper tool such as a
// level 3 or 4 firewall and/or a permission module to apply rate limits.
type RateLimit struct {
// A duration value. Storage may be checked and a certificate may be
// obtained 'burst' times during this interval.
Interval caddy.Duration `json:"interval,omitempty"`
// How many times during an interval storage can be checked or a
// certificate can be obtained.
Burst int `json:"burst,omitempty"`
}
// OnDemandPermission is a type that can give permission for
// whether a certificate should be allowed to be obtained or
// loaded from storage on-demand.
// EXPERIMENTAL: This API is experimental and subject to change.
type OnDemandPermission interface {
// CertificateAllowed returns nil if a certificate for the given
// name is allowed to be either obtained from an issuer or loaded
// from storage on-demand.
//
// The context passed in has the associated *tls.ClientHelloInfo
// value available at the certmagic.ClientHelloInfoCtxKey key.
//
// In the worst case, this function may be called as frequently
// as every TLS handshake, so it should return as quick as possible
// to reduce latency. In the normal case, this function is only
// called when a certificate is needed that is not already loaded
// into memory ready to serve.
CertificateAllowed(ctx context.Context, name string) error
}
// PermissionByHTTP determines permission for a TLS certificate by
// making a request to an HTTP endpoint.
type PermissionByHTTP struct {
// The endpoint to access. It should be a full URL.
// A query string parameter "domain" will be added to it,
// containing the domain (or IP) for the desired certificate,
// like so: `?domain=example.com`. Generally, this endpoint
// is not exposed publicly to avoid a minor information leak
// (which domains are serviced by your application).
//
// The endpoint must return a 200 OK status if a certificate
// is allowed; anything else will cause it to be denied.
// Redirects are not followed.
Endpoint string `json:"endpoint"`
logger *zap.Logger
replacer *caddy.Replacer
}
// CaddyModule returns the Caddy module information.
func (PermissionByHTTP) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.permission.http",
New: func() caddy.Module { return new(PermissionByHTTP) },
}
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (p *PermissionByHTTP) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
if !d.Next() {
return nil
}
if !d.AllArgs(&p.Endpoint) {
return d.ArgErr()
}
return nil
}
func (p *PermissionByHTTP) Provision(ctx caddy.Context) error {
p.logger = ctx.Logger()
p.replacer = caddy.NewReplacer()
return nil
}
func (p PermissionByHTTP) CertificateAllowed(ctx context.Context, name string) error {
// run replacer on endpoint URL (for environment variables) -- return errors to prevent surprises (#5036)
askEndpoint, err := p.replacer.ReplaceOrErr(p.Endpoint, true, true)
if err != nil {
return fmt.Errorf("preparing 'ask' endpoint: %v", err)
}
askURL, err := url.Parse(askEndpoint)
if err != nil {
return fmt.Errorf("parsing ask URL: %v", err)
}
qs := askURL.Query()
qs.Set("domain", name)
askURL.RawQuery = qs.Encode()
askURLString := askURL.String()
var remote string
if chi, ok := ctx.Value(certmagic.ClientHelloInfoCtxKey).(*tls.ClientHelloInfo); ok && chi != nil {
remote = chi.Conn.RemoteAddr().String()
}
p.logger.Debug("asking permission endpoint",
zap.String("remote", remote),
zap.String("domain", name),
zap.String("url", askURLString))
resp, err := onDemandAskClient.Get(askURLString)
if err != nil {
return fmt.Errorf("checking %v to determine if certificate for hostname '%s' should be allowed: %v",
askEndpoint, name, err)
}
resp.Body.Close()
p.logger.Debug("response from permission endpoint",
zap.String("remote", remote),
zap.String("domain", name),
zap.String("url", askURLString),
zap.Int("status", resp.StatusCode))
if resp.StatusCode < 200 || resp.StatusCode > 299 {
return fmt.Errorf("%s: %w %s - non-2xx status code %d", name, ErrPermissionDenied, askEndpoint, resp.StatusCode)
}
return nil
}
// ErrPermissionDenied is an error that should be wrapped or returned when the
// configured permission module does not allow a certificate to be issued,
// to distinguish that from other errors such as connection failure.
var ErrPermissionDenied = errors.New("certificate not allowed by permission module")
// These perpetual values are used for on-demand TLS.
var (
onDemandRateLimiter = certmagic.NewRateLimiter(0, 0)
onDemandAskClient = &http.Client{
Timeout: 10 * time.Second,
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return fmt.Errorf("following http redirects is not allowed")
},
}
)
// Interface guards
var (
_ OnDemandPermission = (*PermissionByHTTP)(nil)
_ caddy.Provisioner = (*PermissionByHTTP)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"fmt"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(PEMLoader{})
}
// PEMLoader loads certificates and their associated keys by
// decoding their PEM blocks directly. This has the advantage
// of not needing to store them on disk at all.
type PEMLoader []CertKeyPEMPair
// Provision implements caddy.Provisioner.
func (pl PEMLoader) Provision(ctx caddy.Context) error {
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, pair := range pl {
for i, tag := range pair.Tags {
pair.Tags[i] = repl.ReplaceKnown(tag, "")
}
pl[k] = CertKeyPEMPair{
CertificatePEM: repl.ReplaceKnown(pair.CertificatePEM, ""),
KeyPEM: repl.ReplaceKnown(pair.KeyPEM, ""),
Tags: pair.Tags,
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (PEMLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.certificates.load_pem",
New: func() caddy.Module { return new(PEMLoader) },
}
}
// CertKeyPEMPair pairs certificate and key PEM blocks.
type CertKeyPEMPair struct {
// The certificate (public key) in PEM format.
CertificatePEM string `json:"certificate"`
// The private key in PEM format.
KeyPEM string `json:"key"`
// Arbitrary values to associate with this certificate.
// Can be useful when you want to select a particular
// certificate when there may be multiple valid candidates.
Tags []string `json:"tags,omitempty"`
}
// LoadCertificates returns the certificates contained in pl.
func (pl PEMLoader) LoadCertificates() ([]Certificate, error) {
certs := make([]Certificate, 0, len(pl))
for i, pair := range pl {
cert, err := tls.X509KeyPair([]byte(pair.CertificatePEM), []byte(pair.KeyPEM))
if err != nil {
return nil, fmt.Errorf("PEM pair %d: %v", i, err)
}
certs = append(certs, Certificate{
Certificate: cert,
Tags: pair.Tags,
})
}
return certs, nil
}
// Interface guard
var (
_ CertificateLoader = (PEMLoader)(nil)
_ caddy.Provisioner = (PEMLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/rand"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"log"
"runtime/debug"
"sync"
"time"
"github.com/caddyserver/caddy/v2"
)
// SessionTicketService configures and manages TLS session tickets.
type SessionTicketService struct {
// KeySource is the method by which Caddy produces or obtains
// TLS session ticket keys (STEKs). By default, Caddy generates
// them internally using a secure pseudorandom source.
KeySource json.RawMessage `json:"key_source,omitempty" caddy:"namespace=tls.stek inline_key=provider"`
// How often Caddy rotates STEKs. Default: 12h.
RotationInterval caddy.Duration `json:"rotation_interval,omitempty"`
// The maximum number of keys to keep in rotation. Default: 4.
MaxKeys int `json:"max_keys,omitempty"`
// Disables STEK rotation.
DisableRotation bool `json:"disable_rotation,omitempty"`
// Disables TLS session resumption by tickets.
Disabled bool `json:"disabled,omitempty"`
keySource STEKProvider
configs map[*tls.Config]struct{}
stopChan chan struct{}
currentKeys [][32]byte
mu *sync.Mutex
}
func (s *SessionTicketService) provision(ctx caddy.Context) error {
s.configs = make(map[*tls.Config]struct{})
s.mu = new(sync.Mutex)
// establish sane defaults
if s.RotationInterval == 0 {
s.RotationInterval = caddy.Duration(defaultSTEKRotationInterval)
}
if s.MaxKeys <= 0 {
s.MaxKeys = defaultMaxSTEKs
}
if s.KeySource == nil {
s.KeySource = json.RawMessage(`{"provider":"standard"}`)
}
// load the STEK module, which will provide keys
val, err := ctx.LoadModule(s, "KeySource")
if err != nil {
return fmt.Errorf("loading TLS session ticket ephemeral keys provider module: %s", err)
}
s.keySource = val.(STEKProvider)
// if session tickets or just rotation are
// disabled, no need to start service
if s.Disabled || s.DisableRotation {
return nil
}
// start the STEK module; this ensures we have
// a starting key before any config needs one
return s.start()
}
// start loads the starting STEKs and spawns a goroutine
// which loops to rotate the STEKs, which continues until
// stop() is called. If start() was already called, this
// is a no-op.
func (s *SessionTicketService) start() error {
if s.stopChan != nil {
return nil
}
s.stopChan = make(chan struct{})
// initializing the key source gives us our
// initial key(s) to start with; if successful,
// we need to be sure to call Next() so that
// the key source can know when it is done
initialKeys, err := s.keySource.Initialize(s)
if err != nil {
return fmt.Errorf("setting STEK module configuration: %v", err)
}
s.mu.Lock()
s.currentKeys = initialKeys
s.mu.Unlock()
// keep the keys rotated
go s.stayUpdated()
return nil
}
// stayUpdated is a blocking function which rotates
// the keys whenever new ones are sent. It reads
// from keysChan until s.stop() is called.
func (s *SessionTicketService) stayUpdated() {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] session ticket service: %v\n%s", err, debug.Stack())
}
}()
// this call is essential when Initialize()
// returns without error, because the stop
// channel is the only way the key source
// will know when to clean up
keysChan := s.keySource.Next(s.stopChan)
for {
select {
case newKeys := <-keysChan:
s.mu.Lock()
s.currentKeys = newKeys
configs := s.configs
s.mu.Unlock()
for cfg := range configs {
cfg.SetSessionTicketKeys(newKeys)
}
case <-s.stopChan:
return
}
}
}
// stop terminates the key rotation goroutine.
func (s *SessionTicketService) stop() {
if s.stopChan != nil {
close(s.stopChan)
}
}
// register sets the session ticket keys on cfg
// and keeps them updated. Any values registered
// must be unregistered, or they will not be
// garbage-collected. s.start() must have been
// called first. If session tickets are disabled
// or if ticket key rotation is disabled, this
// function is a no-op.
func (s *SessionTicketService) register(cfg *tls.Config) {
if s.Disabled || s.DisableRotation {
return
}
s.mu.Lock()
cfg.SetSessionTicketKeys(s.currentKeys)
s.configs[cfg] = struct{}{}
s.mu.Unlock()
}
// unregister stops session key management on cfg and
// removes the internal stored reference to cfg. If
// session tickets are disabled or if ticket key rotation
// is disabled, this function is a no-op.
func (s *SessionTicketService) unregister(cfg *tls.Config) {
if s.Disabled || s.DisableRotation {
return
}
s.mu.Lock()
delete(s.configs, cfg)
s.mu.Unlock()
}
// RotateSTEKs rotates the keys in keys by producing a new key and eliding
// the oldest one. The new slice of keys is returned.
func (s SessionTicketService) RotateSTEKs(keys [][32]byte) ([][32]byte, error) {
// produce a new key
newKey, err := s.generateSTEK()
if err != nil {
return nil, fmt.Errorf("generating STEK: %v", err)
}
// we need to prepend this new key to the list of
// keys so that it is preferred, but we need to be
// careful that we do not grow the slice larger
// than MaxKeys, otherwise we'll be storing one
// more key in memory than we expect; so be sure
// that the slice does not grow beyond the limit
// even for a brief period of time, since there's
// no guarantee when that extra allocation will
// be overwritten; this is why we first trim the
// length to one less the max, THEN prepend the
// new key
if len(keys) >= s.MaxKeys {
keys[len(keys)-1] = [32]byte{} // zero-out memory of oldest key
keys = keys[:s.MaxKeys-1] // trim length of slice
}
keys = append([][32]byte{newKey}, keys...) // prepend new key
return keys, nil
}
// generateSTEK generates key material suitable for use as a
// session ticket ephemeral key.
func (s *SessionTicketService) generateSTEK() ([32]byte, error) {
var newTicketKey [32]byte
_, err := io.ReadFull(rand.Reader, newTicketKey[:])
return newTicketKey, err
}
// STEKProvider is a type that can provide session ticket ephemeral
// keys (STEKs).
type STEKProvider interface {
// Initialize provides the STEK configuration to the STEK
// module so that it can obtain and manage keys accordingly.
// It returns the initial key(s) to use. Implementations can
// rely on Next() being called if Initialize() returns
// without error, so that it may know when it is done.
Initialize(config *SessionTicketService) ([][32]byte, error)
// Next returns the channel through which the next session
// ticket keys will be transmitted until doneChan is closed.
// Keys should be sent on keysChan as they are updated.
// When doneChan is closed, any resources allocated in
// Initialize() must be cleaned up.
Next(doneChan <-chan struct{}) (keysChan <-chan [][32]byte)
}
const (
defaultSTEKRotationInterval = 12 * time.Hour
defaultMaxSTEKs = 4
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"fmt"
"github.com/caddyserver/certmagic"
"github.com/caddyserver/caddy/v2"
)
func init() {
caddy.RegisterModule(StorageLoader{})
}
// StorageLoader loads certificates and their associated keys
// from the globally configured storage module.
type StorageLoader struct {
// A list of pairs of certificate and key file names along with their
// encoding format so that they can be loaded from storage.
Pairs []CertKeyFilePair `json:"pairs,omitempty"`
// Reference to the globally configured storage module.
storage certmagic.Storage
ctx caddy.Context
}
// CaddyModule returns the Caddy module information.
func (StorageLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.certificates.load_storage",
New: func() caddy.Module { return new(StorageLoader) },
}
}
// Provision loads the storage module for sl.
func (sl *StorageLoader) Provision(ctx caddy.Context) error {
sl.storage = ctx.Storage()
sl.ctx = ctx
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
if !ok {
repl = caddy.NewReplacer()
}
for k, pair := range sl.Pairs {
for i, tag := range pair.Tags {
pair.Tags[i] = repl.ReplaceKnown(tag, "")
}
sl.Pairs[k] = CertKeyFilePair{
Certificate: repl.ReplaceKnown(pair.Certificate, ""),
Key: repl.ReplaceKnown(pair.Key, ""),
Format: repl.ReplaceKnown(pair.Format, ""),
Tags: pair.Tags,
}
}
return nil
}
// LoadCertificates returns the certificates to be loaded by sl.
func (sl StorageLoader) LoadCertificates() ([]Certificate, error) {
certs := make([]Certificate, 0, len(sl.Pairs))
for _, pair := range sl.Pairs {
certData, err := sl.storage.Load(sl.ctx, pair.Certificate)
if err != nil {
return nil, err
}
keyData, err := sl.storage.Load(sl.ctx, pair.Key)
if err != nil {
return nil, err
}
var cert tls.Certificate
switch pair.Format {
case "":
fallthrough
case "pem":
cert, err = tls.X509KeyPair(certData, keyData)
default:
return nil, fmt.Errorf("unrecognized certificate/key encoding format: %s", pair.Format)
}
if err != nil {
return nil, err
}
certs = append(certs, Certificate{Certificate: cert, Tags: pair.Tags})
}
return certs, nil
}
// Interface guard
var (
_ CertificateLoader = (*StorageLoader)(nil)
_ caddy.Provisioner = (*StorageLoader)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"log"
"net/http"
"runtime/debug"
"sync"
"time"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyevents"
)
func init() {
caddy.RegisterModule(TLS{})
caddy.RegisterModule(AutomateLoader{})
}
var (
certCache *certmagic.Cache
certCacheMu sync.RWMutex
)
// TLS provides TLS facilities including certificate
// loading and management, client auth, and more.
type TLS struct {
// Certificates to load into memory for quick recall during
// TLS handshakes. Each key is the name of a certificate
// loader module.
//
// The "automate" certificate loader module can be used to
// specify a list of subjects that need certificates to be
// managed automatically. The first matching automation
// policy will be applied to manage the certificate(s).
//
// All loaded certificates get pooled
// into the same cache and may be used to complete TLS
// handshakes for the relevant server names (SNI).
// Certificates loaded manually (anything other than
// "automate") are not automatically managed and will
// have to be refreshed manually before they expire.
CertificatesRaw caddy.ModuleMap `json:"certificates,omitempty" caddy:"namespace=tls.certificates"`
// Configures certificate automation.
Automation *AutomationConfig `json:"automation,omitempty"`
// Configures session ticket ephemeral keys (STEKs).
SessionTickets *SessionTicketService `json:"session_tickets,omitempty"`
// Configures the in-memory certificate cache.
Cache *CertCacheOptions `json:"cache,omitempty"`
// Disables OCSP stapling for manually-managed certificates only.
// To configure OCSP stapling for automated certificates, use an
// automation policy instead.
//
// Disabling OCSP stapling puts clients at greater risk, reduces their
// privacy, and usually lowers client performance. It is NOT recommended
// to disable this unless you are able to justify the costs.
// EXPERIMENTAL. Subject to change.
DisableOCSPStapling bool `json:"disable_ocsp_stapling,omitempty"`
// Disables checks in certmagic that the configured storage is ready
// and able to handle writing new content to it. These checks are
// intended to prevent information loss (newly issued certificates), but
// can be expensive on the storage.
//
// Disabling these checks should only be done when the storage
// can be trusted to have enough capacity and no other problems.
// EXPERIMENTAL. Subject to change.
DisableStorageCheck bool `json:"disable_storage_check,omitempty"`
certificateLoaders []CertificateLoader
automateNames []string
ctx caddy.Context
storageCleanTicker *time.Ticker
storageCleanStop chan struct{}
logger *zap.Logger
events *caddyevents.App
// set of subjects with managed certificates,
// and hashes of manually-loaded certificates
// (managing's value is an optional issuer key, for distinction)
managing, loaded map[string]string
}
// CaddyModule returns the Caddy module information.
func (TLS) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls",
New: func() caddy.Module { return new(TLS) },
}
}
// Provision sets up the configuration for the TLS app.
func (t *TLS) Provision(ctx caddy.Context) error {
eventsAppIface, err := ctx.App("events")
if err != nil {
return fmt.Errorf("getting events app: %v", err)
}
t.events = eventsAppIface.(*caddyevents.App)
t.ctx = ctx
t.logger = ctx.Logger()
repl := caddy.NewReplacer()
t.managing, t.loaded = make(map[string]string), make(map[string]string)
// set up a new certificate cache; this (re)loads all certificates
cacheOpts := certmagic.CacheOptions{
GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
return t.getConfigForName(cert.Names[0]), nil
},
Logger: t.logger.Named("cache"),
}
if t.Automation != nil {
cacheOpts.OCSPCheckInterval = time.Duration(t.Automation.OCSPCheckInterval)
cacheOpts.RenewCheckInterval = time.Duration(t.Automation.RenewCheckInterval)
}
if t.Cache != nil {
cacheOpts.Capacity = t.Cache.Capacity
}
if cacheOpts.Capacity <= 0 {
cacheOpts.Capacity = 10000
}
certCacheMu.Lock()
if certCache == nil {
certCache = certmagic.NewCache(cacheOpts)
} else {
certCache.SetOptions(cacheOpts)
}
certCacheMu.Unlock()
// certificate loaders
val, err := ctx.LoadModule(t, "CertificatesRaw")
if err != nil {
return fmt.Errorf("loading certificate loader modules: %s", err)
}
for modName, modIface := range val.(map[string]any) {
if modName == "automate" {
// special case; these will be loaded in later using our automation facilities,
// which we want to avoid doing during provisioning
if automateNames, ok := modIface.(*AutomateLoader); ok && automateNames != nil {
repl := caddy.NewReplacer()
subjects := make([]string, len(*automateNames))
for i, sub := range *automateNames {
subjects[i] = repl.ReplaceAll(sub, "")
}
t.automateNames = subjects
} else {
return fmt.Errorf("loading certificates with 'automate' requires array of strings, got: %T", modIface)
}
continue
}
t.certificateLoaders = append(t.certificateLoaders, modIface.(CertificateLoader))
}
// on-demand permission module
if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.PermissionRaw != nil {
if t.Automation.OnDemand.Ask != "" {
return fmt.Errorf("on-demand TLS config conflict: both 'ask' endpoint and a 'permission' module are specified; 'ask' is deprecated, so use only the permission module")
}
val, err := ctx.LoadModule(t.Automation.OnDemand, "PermissionRaw")
if err != nil {
return fmt.Errorf("loading on-demand TLS permission module: %v", err)
}
t.Automation.OnDemand.permission = val.(OnDemandPermission)
}
// on-demand rate limiting (TODO: deprecated, and should be removed later; rate limiting is ineffective now that permission modules are required)
if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.RateLimit != nil {
t.logger.Warn("DEPRECATED: on_demand.rate_limit will be removed in a future release; use permission modules or external certificate managers instead")
onDemandRateLimiter.SetMaxEvents(t.Automation.OnDemand.RateLimit.Burst)
onDemandRateLimiter.SetWindow(time.Duration(t.Automation.OnDemand.RateLimit.Interval))
} else {
// remove any existing rate limiter
onDemandRateLimiter.SetWindow(0)
onDemandRateLimiter.SetMaxEvents(0)
}
// run replacer on ask URL (for environment variables) -- return errors to prevent surprises (#5036)
if t.Automation != nil && t.Automation.OnDemand != nil && t.Automation.OnDemand.Ask != "" {
t.Automation.OnDemand.Ask, err = repl.ReplaceOrErr(t.Automation.OnDemand.Ask, true, true)
if err != nil {
return fmt.Errorf("preparing 'ask' endpoint: %v", err)
}
perm := PermissionByHTTP{
Endpoint: t.Automation.OnDemand.Ask,
}
if err := perm.Provision(ctx); err != nil {
return fmt.Errorf("provisioning 'ask' module: %v", err)
}
t.Automation.OnDemand.permission = perm
}
// automation/management policies
if t.Automation == nil {
t.Automation = new(AutomationConfig)
}
t.Automation.defaultPublicAutomationPolicy = new(AutomationPolicy)
err = t.Automation.defaultPublicAutomationPolicy.Provision(t)
if err != nil {
return fmt.Errorf("provisioning default public automation policy: %v", err)
}
for _, n := range t.automateNames {
// if any names specified by the "automate" loader do not qualify for a public
// certificate, we should initialize a default internal automation policy
// (but we don't want to do this unnecessarily, since it may prompt for password!)
if certmagic.SubjectQualifiesForPublicCert(n) {
continue
}
t.Automation.defaultInternalAutomationPolicy = &AutomationPolicy{
IssuersRaw: []json.RawMessage{json.RawMessage(`{"module":"internal"}`)},
}
err = t.Automation.defaultInternalAutomationPolicy.Provision(t)
if err != nil {
return fmt.Errorf("provisioning default internal automation policy: %v", err)
}
break
}
for i, ap := range t.Automation.Policies {
err := ap.Provision(t)
if err != nil {
return fmt.Errorf("provisioning automation policy %d: %v", i, err)
}
}
// session ticket ephemeral keys (STEK) service and provider
if t.SessionTickets != nil {
err := t.SessionTickets.provision(ctx)
if err != nil {
return fmt.Errorf("provisioning session tickets configuration: %v", err)
}
}
// load manual/static (unmanaged) certificates - we do this in
// provision so that other apps (such as http) can know which
// certificates have been manually loaded, and also so that
// commands like validate can be a better test
certCacheMu.RLock()
magic := certmagic.New(certCache, certmagic.Config{
Storage: ctx.Storage(),
Logger: t.logger,
OnEvent: t.onEvent,
OCSP: certmagic.OCSPConfig{
DisableStapling: t.DisableOCSPStapling,
},
DisableStorageCheck: t.DisableStorageCheck,
})
certCacheMu.RUnlock()
for _, loader := range t.certificateLoaders {
certs, err := loader.LoadCertificates()
if err != nil {
return fmt.Errorf("loading certificates: %v", err)
}
for _, cert := range certs {
hash, err := magic.CacheUnmanagedTLSCertificate(ctx, cert.Certificate, cert.Tags)
if err != nil {
return fmt.Errorf("caching unmanaged certificate: %v", err)
}
t.loaded[hash] = ""
}
}
return nil
}
// Validate validates t's configuration.
func (t *TLS) Validate() error {
if t.Automation != nil {
// ensure that host aren't repeated; since only the first
// automation policy is used, repeating a host in the lists
// isn't useful and is probably a mistake; same for two
// catch-all/default policies
var hasDefault bool
hostSet := make(map[string]int)
for i, ap := range t.Automation.Policies {
if len(ap.subjects) == 0 {
if hasDefault {
return fmt.Errorf("automation policy %d is the second policy that acts as default/catch-all, but will never be used", i)
}
hasDefault = true
}
for _, h := range ap.subjects {
if first, ok := hostSet[h]; ok {
return fmt.Errorf("automation policy %d: cannot apply more than one automation policy to host: %s (first match in policy %d)", i, h, first)
}
hostSet[h] = i
}
}
}
if t.Cache != nil {
if t.Cache.Capacity < 0 {
return fmt.Errorf("cache capacity must be >= 0")
}
}
return nil
}
// Start activates the TLS module.
func (t *TLS) Start() error {
// warn if on-demand TLS is enabled but no restrictions are in place
if t.Automation.OnDemand == nil || (t.Automation.OnDemand.Ask == "" && t.Automation.OnDemand.permission == nil) {
for _, ap := range t.Automation.Policies {
if ap.OnDemand && ap.isWildcardOrDefault() {
t.logger.Warn("YOUR SERVER MAY BE VULNERABLE TO ABUSE: on-demand TLS is enabled, but no protections are in place",
zap.String("docs", "https://caddyserver.com/docs/automatic-https#on-demand-tls"))
break
}
}
}
// now that we are running, and all manual certificates have
// been loaded, time to load the automated/managed certificates
err := t.Manage(t.automateNames)
if err != nil {
return fmt.Errorf("automate: managing %v: %v", t.automateNames, err)
}
t.keepStorageClean()
return nil
}
// Stop stops the TLS module and cleans up any allocations.
func (t *TLS) Stop() error {
// stop the storage cleaner goroutine and ticker
if t.storageCleanStop != nil {
close(t.storageCleanStop)
}
if t.storageCleanTicker != nil {
t.storageCleanTicker.Stop()
}
return nil
}
// Cleanup frees up resources allocated during Provision.
func (t *TLS) Cleanup() error {
// stop the session ticket rotation goroutine
if t.SessionTickets != nil {
t.SessionTickets.stop()
}
// if a new TLS app was loaded, remove certificates from the cache that are no longer
// being managed or loaded by the new config; if there is no more TLS app running,
// then stop cert maintenance and let the cert cache be GC'ed
if nextTLS, err := caddy.ActiveContext().AppIfConfigured("tls"); err == nil && nextTLS != nil {
nextTLSApp := nextTLS.(*TLS)
// compute which certificates were managed or loaded into the cert cache by this
// app instance (which is being stopped) that are not managed or loaded by the
// new app instance (which just started), and remove them from the cache
var noLongerManaged []certmagic.SubjectIssuer
var reManage, noLongerLoaded []string
for subj, currentIssuerKey := range t.managing {
// It's a bit nuanced: managed certs can sometimes be different enough that we have to
// swap them out for a different one, even if they are for the same subject/domain.
// We consider "private" certs (internal CA/locally-trusted/etc) to be significantly
// distinct from "public" certs (production CAs/globally-trusted/etc) because of the
// implications when it comes to actual deployments: switching between an internal CA
// and a production CA, for example, is quite significant. Switching from one public CA
// to another, however, is not, and for our purposes we consider those to be the same.
// Anyway, if the next TLS app does not manage a cert for this name at all, definitely
// remove it from the cache. But if it does, and it's not the same kind of issuer/CA
// as we have, also remove it, so that it can swap it out for the right one.
if nextIssuerKey, ok := nextTLSApp.managing[subj]; !ok || nextIssuerKey != currentIssuerKey {
// next app is not managing a cert for this domain at all or is using a different issuer, so remove it
noLongerManaged = append(noLongerManaged, certmagic.SubjectIssuer{Subject: subj, IssuerKey: currentIssuerKey})
// then, if the next app is managing a cert for this name, but with a different issuer, re-manage it
if ok && nextIssuerKey != currentIssuerKey {
reManage = append(reManage, subj)
}
}
}
for hash := range t.loaded {
if _, ok := nextTLSApp.loaded[hash]; !ok {
noLongerLoaded = append(noLongerLoaded, hash)
}
}
// remove the certs
certCacheMu.RLock()
certCache.RemoveManaged(noLongerManaged)
certCache.Remove(noLongerLoaded)
certCacheMu.RUnlock()
// give the new TLS app a "kick" to manage certs that it is configured for
// with its own configuration instead of the one we just evicted
if err := nextTLSApp.Manage(reManage); err != nil {
t.logger.Error("re-managing unloaded certificates with new config",
zap.Strings("subjects", reManage),
zap.Error(err))
}
} else {
// no more TLS app running, so delete in-memory cert cache
certCache.Stop()
certCacheMu.Lock()
certCache = nil
certCacheMu.Unlock()
}
return nil
}
// Manage immediately begins managing names according to the
// matching automation policy.
func (t *TLS) Manage(names []string) error {
// for a large number of names, we can be more memory-efficient
// by making only one certmagic.Config for all the names that
// use that config, rather than calling ManageAsync once for
// every name; so first, bin names by AutomationPolicy
policyToNames := make(map[*AutomationPolicy][]string)
for _, name := range names {
ap := t.getAutomationPolicyForName(name)
policyToNames[ap] = append(policyToNames[ap], name)
}
// now that names are grouped by policy, we can simply make one
// certmagic.Config for each (potentially large) group of names
// and call ManageAsync just once for the whole batch
for ap, names := range policyToNames {
err := ap.magic.ManageAsync(t.ctx.Context, names)
if err != nil {
return fmt.Errorf("automate: manage %v: %v", names, err)
}
for _, name := range names {
// certs that are issued solely by our internal issuer are a little bit of
// a special case: if you have an initial config that manages example.com
// using internal CA, then after testing it you switch to a production CA,
// you wouldn't want to keep using the same self-signed cert, obviously;
// so we differentiate these by associating the subject with its issuer key;
// we do this because CertMagic has no notion of "InternalIssuer" like we
// do, so we have to do this logic ourselves
var issuerKey string
if len(ap.Issuers) == 1 {
if intIss, ok := ap.Issuers[0].(*InternalIssuer); ok && intIss != nil {
issuerKey = intIss.IssuerKey()
}
}
t.managing[name] = issuerKey
}
}
return nil
}
// HandleHTTPChallenge ensures that the ACME HTTP challenge or ZeroSSL HTTP
// validation request is handled for the certificate named by r.Host, if it
// is an HTTP challenge request. It requires that the automation policy for
// r.Host has an issuer that implements GetACMEIssuer() or is a *ZeroSSLIssuer.
func (t *TLS) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
acmeChallenge := certmagic.LooksLikeHTTPChallenge(r)
zerosslValidation := certmagic.LooksLikeZeroSSLHTTPValidation(r)
// no-op if it's not an ACME challenge request
if !acmeChallenge && !zerosslValidation {
return false
}
// try all the issuers until we find the one that initiated the challenge
ap := t.getAutomationPolicyForName(r.Host)
if acmeChallenge {
type acmeCapable interface{ GetACMEIssuer() *ACMEIssuer }
for _, iss := range ap.magic.Issuers {
if acmeIssuer, ok := iss.(acmeCapable); ok {
if acmeIssuer.GetACMEIssuer().issuer.HandleHTTPChallenge(w, r) {
return true
}
}
}
// it's possible another server in this process initiated the challenge;
// users have requested that Caddy only handle HTTP challenges it initiated,
// so that users can proxy the others through to their backends; but we
// might not have an automation policy for all identifiers that are trying
// to get certificates (e.g. the admin endpoint), so we do this manual check
if challenge, ok := certmagic.GetACMEChallenge(r.Host); ok {
return certmagic.SolveHTTPChallenge(t.logger, w, r, challenge.Challenge)
}
} else if zerosslValidation {
for _, iss := range ap.magic.Issuers {
if ziss, ok := iss.(*ZeroSSLIssuer); ok {
if ziss.issuer.HandleZeroSSLHTTPValidation(w, r) {
return true
}
}
}
}
return false
}
// AddAutomationPolicy provisions and adds ap to the list of the app's
// automation policies. If an existing automation policy exists that has
// fewer hosts in its list than ap does, ap will be inserted before that
// other policy (this helps ensure that ap will be prioritized/chosen
// over, say, a catch-all policy).
func (t *TLS) AddAutomationPolicy(ap *AutomationPolicy) error {
if t.Automation == nil {
t.Automation = new(AutomationConfig)
}
err := ap.Provision(t)
if err != nil {
return err
}
// sort new automation policies just before any other which is a superset
// of this one; if we find an existing policy that covers every subject in
// ap but less specifically (e.g. a catch-all policy, or one with wildcards
// or with fewer subjects), insert ap just before it, otherwise ap would
// never be used because the first matching policy is more general
for i, existing := range t.Automation.Policies {
// first see if existing is superset of ap for all names
var otherIsSuperset bool
outer:
for _, thisSubj := range ap.subjects {
for _, otherSubj := range existing.subjects {
if certmagic.MatchWildcard(thisSubj, otherSubj) {
otherIsSuperset = true
break outer
}
}
}
// if existing AP is a superset or if it contains fewer names (i.e. is
// more general), then new AP is more specific, so insert before it
if otherIsSuperset || len(existing.SubjectsRaw) < len(ap.SubjectsRaw) {
t.Automation.Policies = append(t.Automation.Policies[:i],
append([]*AutomationPolicy{ap}, t.Automation.Policies[i:]...)...)
return nil
}
}
// otherwise just append the new one
t.Automation.Policies = append(t.Automation.Policies, ap)
return nil
}
func (t *TLS) getConfigForName(name string) *certmagic.Config {
ap := t.getAutomationPolicyForName(name)
return ap.magic
}
// getAutomationPolicyForName returns the first matching automation policy
// for the given subject name. If no matching policy can be found, the
// default policy is used, depending on whether the name qualifies for a
// public certificate or not.
func (t *TLS) getAutomationPolicyForName(name string) *AutomationPolicy {
for _, ap := range t.Automation.Policies {
if len(ap.subjects) == 0 {
return ap // no host filter is an automatic match
}
for _, h := range ap.subjects {
if certmagic.MatchWildcard(name, h) {
return ap
}
}
}
if certmagic.SubjectQualifiesForPublicCert(name) || t.Automation.defaultInternalAutomationPolicy == nil {
return t.Automation.defaultPublicAutomationPolicy
}
return t.Automation.defaultInternalAutomationPolicy
}
// AllMatchingCertificates returns the list of all certificates in
// the cache which could be used to satisfy the given SAN.
func AllMatchingCertificates(san string) []certmagic.Certificate {
return certCache.AllMatchingCertificates(san)
}
func (t *TLS) HasCertificateForSubject(subject string) bool {
certCacheMu.RLock()
allMatchingCerts := certCache.AllMatchingCertificates(subject)
certCacheMu.RUnlock()
for _, cert := range allMatchingCerts {
// check if the cert is manually loaded by this config
if _, ok := t.loaded[cert.Hash()]; ok {
return true
}
// check if the cert is automatically managed by this config
for _, name := range cert.Names {
if _, ok := t.managing[name]; ok {
return true
}
}
}
return false
}
// keepStorageClean starts a goroutine that immediately cleans up all
// known storage units if it was not recently done, and then runs the
// operation at every tick from t.storageCleanTicker.
func (t *TLS) keepStorageClean() {
t.storageCleanTicker = time.NewTicker(t.storageCleanInterval())
t.storageCleanStop = make(chan struct{})
go func() {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] storage cleaner: %v\n%s", err, debug.Stack())
}
}()
t.cleanStorageUnits()
for {
select {
case <-t.storageCleanStop:
return
case <-t.storageCleanTicker.C:
t.cleanStorageUnits()
}
}
}()
}
func (t *TLS) cleanStorageUnits() {
storageCleanMu.Lock()
defer storageCleanMu.Unlock()
// TODO: This check might not be needed anymore now that CertMagic syncs
// and throttles storage cleaning globally across the cluster.
// The original comment below might be outdated:
//
// If storage was cleaned recently, don't do it again for now. Although the ticker
// calling this function drops missed ticks for us, config reloads discard the old
// ticker and replace it with a new one, possibly invoking a cleaning to happen again
// too soon. (We divide the interval by 2 because the actual cleaning takes non-zero
// time, and we don't want to skip cleanings if we don't have to; whereas if a cleaning
// took most of the interval, we'd probably want to skip the next one so we aren't
// constantly cleaning. This allows cleanings to take up to half the interval's
// duration before we decide to skip the next one.)
if !storageClean.IsZero() && time.Since(storageClean) < t.storageCleanInterval()/2 {
return
}
id, err := caddy.InstanceID()
if err != nil {
t.logger.Warn("unable to get instance ID; storage clean stamps will be incomplete", zap.Error(err))
}
options := certmagic.CleanStorageOptions{
Logger: t.logger,
InstanceID: id.String(),
Interval: t.storageCleanInterval(),
OCSPStaples: true,
ExpiredCerts: true,
ExpiredCertGracePeriod: 24 * time.Hour * 14,
}
// start with the default/global storage
err = certmagic.CleanStorage(t.ctx, t.ctx.Storage(), options)
if err != nil {
// probably don't want to return early, since we should still
// see if any other storages can get cleaned up
t.logger.Error("could not clean default/global storage", zap.Error(err))
}
// then clean each storage defined in ACME automation policies
if t.Automation != nil {
for _, ap := range t.Automation.Policies {
if ap.storage == nil {
continue
}
if err := certmagic.CleanStorage(t.ctx, ap.storage, options); err != nil {
t.logger.Error("could not clean storage configured in automation policy", zap.Error(err))
}
}
}
// remember last time storage was finished cleaning
storageClean = time.Now()
t.logger.Info("finished cleaning storage units")
}
func (t *TLS) storageCleanInterval() time.Duration {
if t.Automation != nil && t.Automation.StorageCleanInterval > 0 {
return time.Duration(t.Automation.StorageCleanInterval)
}
return defaultStorageCleanInterval
}
// onEvent translates CertMagic events into Caddy events then dispatches them.
func (t *TLS) onEvent(ctx context.Context, eventName string, data map[string]any) error {
evt := t.events.Emit(t.ctx, eventName, data)
return evt.Aborted
}
// CertificateLoader is a type that can load certificates.
// Certificates can optionally be associated with tags.
type CertificateLoader interface {
LoadCertificates() ([]Certificate, error)
}
// Certificate is a TLS certificate, optionally
// associated with arbitrary tags.
type Certificate struct {
tls.Certificate
Tags []string
}
// AutomateLoader will automatically manage certificates for the names in the
// list, including obtaining and renewing certificates. Automated certificates
// are managed according to their matching automation policy, configured
// elsewhere in this app.
//
// Technically, this is a no-op certificate loader module that is treated as
// a special case: it uses this app's automation features to load certificates
// for the list of hostnames, rather than loading certificates manually. But
// the end result is the same: certificates for these subject names will be
// loaded into the in-memory cache and may then be used.
type AutomateLoader []string
// CaddyModule returns the Caddy module information.
func (AutomateLoader) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.certificates.automate",
New: func() caddy.Module { return new(AutomateLoader) },
}
}
// CertCacheOptions configures the certificate cache.
type CertCacheOptions struct {
// Maximum number of certificates to allow in the
// cache. If reached, certificates will be randomly
// evicted to make room for new ones. Default: 10,000
Capacity int `json:"capacity,omitempty"`
}
// Variables related to storage cleaning.
var (
defaultStorageCleanInterval = 24 * time.Hour
storageClean time.Time
storageCleanMu sync.Mutex
)
// Interface guards
var (
_ caddy.App = (*TLS)(nil)
_ caddy.Provisioner = (*TLS)(nil)
_ caddy.Validator = (*TLS)(nil)
_ caddy.CleanerUpper = (*TLS)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/caddyserver/certmagic"
"github.com/klauspost/cpuid/v2"
)
// CipherSuiteNameSupported returns true if name is
// a supported cipher suite.
func CipherSuiteNameSupported(name string) bool {
return CipherSuiteID(name) != 0
}
// CipherSuiteID returns the ID of the cipher suite associated with
// the given name, or 0 if the name is not recognized/supported.
func CipherSuiteID(name string) uint16 {
for _, cs := range SupportedCipherSuites() {
if cs.Name == name {
return cs.ID
}
}
return 0
}
// SupportedCipherSuites returns a list of all the cipher suites
// Caddy supports. The list is NOT ordered by security preference.
func SupportedCipherSuites() []*tls.CipherSuite {
return tls.CipherSuites()
}
// defaultCipherSuites is the ordered list of all the cipher
// suites we want to support by default, assuming AES-NI
// (hardware acceleration for AES).
var defaultCipherSuitesWithAESNI = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
}
// defaultCipherSuites is the ordered list of all the cipher
// suites we want to support by default, assuming lack of
// AES-NI (NO hardware acceleration for AES).
var defaultCipherSuitesWithoutAESNI = []uint16{
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
}
// getOptimalDefaultCipherSuites returns an appropriate cipher
// suite to use depending on the hardware support for AES.
//
// See https://github.com/caddyserver/caddy/issues/1674
func getOptimalDefaultCipherSuites() []uint16 {
if cpuid.CPU.Supports(cpuid.AESNI) {
return defaultCipherSuitesWithAESNI
}
return defaultCipherSuitesWithoutAESNI
}
// SupportedCurves is the unordered map of supported curves.
// https://golang.org/pkg/crypto/tls/#CurveID
var SupportedCurves = map[string]tls.CurveID{
"x25519": tls.X25519,
"secp256r1": tls.CurveP256,
"secp384r1": tls.CurveP384,
"secp521r1": tls.CurveP521,
}
// supportedCertKeyTypes is all the key types that are supported
// for certificates that are obtained through ACME.
var supportedCertKeyTypes = map[string]certmagic.KeyType{
"rsa2048": certmagic.RSA2048,
"rsa4096": certmagic.RSA4096,
"p256": certmagic.P256,
"p384": certmagic.P384,
"ed25519": certmagic.ED25519,
}
// defaultCurves is the list of only the curves we want to use
// by default, in descending order of preference.
//
// This list should only include curves which are fast by design
// (e.g. X25519) and those for which an optimized assembly
// implementation exists (e.g. P256). The latter ones can be
// found here:
// https://github.com/golang/go/tree/master/src/crypto/elliptic
var defaultCurves = []tls.CurveID{
tls.X25519,
tls.CurveP256,
}
// SupportedProtocols is a map of supported protocols.
var SupportedProtocols = map[string]uint16{
"tls1.2": tls.VersionTLS12,
"tls1.3": tls.VersionTLS13,
}
// unsupportedProtocols is a map of unsupported protocols.
// Used for logging only, not enforcement.
var unsupportedProtocols = map[string]uint16{
//nolint:staticcheck
"ssl3.0": tls.VersionSSL30,
"tls1.0": tls.VersionTLS10,
"tls1.1": tls.VersionTLS11,
}
// publicKeyAlgorithms is the map of supported public key algorithms.
var publicKeyAlgorithms = map[string]x509.PublicKeyAlgorithm{
"rsa": x509.RSA,
"dsa": x509.DSA,
"ecdsa": x509.ECDSA,
}
// ProtocolName returns the standard name for the passed protocol version ID
// (e.g. "TLS1.3") or a fallback representation of the ID value if the version
// is not supported.
func ProtocolName(id uint16) string {
for k, v := range SupportedProtocols {
if v == id {
return k
}
}
for k, v := range unsupportedProtocols {
if v == id {
return k
}
}
return fmt.Sprintf("0x%04x", id)
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddytls
import (
"context"
"crypto/x509"
"fmt"
"strconv"
"time"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(new(ZeroSSLIssuer))
}
// ZeroSSLIssuer uses the ZeroSSL API to get certificates.
// Note that this is distinct from ZeroSSL's ACME endpoint.
// To use ZeroSSL's ACME endpoint, use the ACMEIssuer
// configured with ZeroSSL's ACME directory endpoint.
type ZeroSSLIssuer struct {
// The API key (or "access key") for using the ZeroSSL API.
// REQUIRED.
APIKey string `json:"api_key,omitempty"`
// How many days the certificate should be valid for.
// Only certain values are accepted; see ZeroSSL docs.
ValidityDays int `json:"validity_days,omitempty"`
// The host to bind to when opening a listener for
// verifying domain names (or IPs).
ListenHost string `json:"listen_host,omitempty"`
// If HTTP is forwarded from port 80, specify the
// forwarded port here.
AlternateHTTPPort int `json:"alternate_http_port,omitempty"`
// Use CNAME validation instead of HTTP. ZeroSSL's
// API uses CNAME records for DNS validation, similar
// to how Let's Encrypt uses TXT records for the
// DNS challenge.
CNAMEValidation *DNSChallengeConfig `json:"cname_validation,omitempty"`
logger *zap.Logger
storage certmagic.Storage
issuer *certmagic.ZeroSSLIssuer
}
// CaddyModule returns the Caddy module information.
func (*ZeroSSLIssuer) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "tls.issuance.zerossl",
New: func() caddy.Module { return new(ZeroSSLIssuer) },
}
}
// Provision sets up the issuer.
func (iss *ZeroSSLIssuer) Provision(ctx caddy.Context) error {
iss.logger = ctx.Logger()
iss.storage = ctx.Storage()
repl := caddy.NewReplacer()
var dnsManager *certmagic.DNSManager
if iss.CNAMEValidation != nil && len(iss.CNAMEValidation.ProviderRaw) > 0 {
val, err := ctx.LoadModule(iss.CNAMEValidation, "ProviderRaw")
if err != nil {
return fmt.Errorf("loading DNS provider module: %v", err)
}
dnsManager = &certmagic.DNSManager{
DNSProvider: val.(certmagic.DNSProvider),
TTL: time.Duration(iss.CNAMEValidation.TTL),
PropagationDelay: time.Duration(iss.CNAMEValidation.PropagationDelay),
PropagationTimeout: time.Duration(iss.CNAMEValidation.PropagationTimeout),
Resolvers: iss.CNAMEValidation.Resolvers,
OverrideDomain: iss.CNAMEValidation.OverrideDomain,
Logger: iss.logger.Named("cname"),
}
}
iss.issuer = &certmagic.ZeroSSLIssuer{
APIKey: repl.ReplaceAll(iss.APIKey, ""),
ValidityDays: iss.ValidityDays,
ListenHost: iss.ListenHost,
AltHTTPPort: iss.AlternateHTTPPort,
Storage: iss.storage,
CNAMEValidation: dnsManager,
Logger: iss.logger,
}
return nil
}
// Issue obtains a certificate for the given csr.
func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*certmagic.IssuedCertificate, error) {
return iss.issuer.Issue(ctx, csr)
}
// IssuerKey returns the unique issuer key for the configured CA endpoint.
func (iss *ZeroSSLIssuer) IssuerKey() string {
return iss.issuer.IssuerKey()
}
// Revoke revokes the given certificate.
func (iss *ZeroSSLIssuer) Revoke(ctx context.Context, cert certmagic.CertificateResource, reason int) error {
return iss.issuer.Revoke(ctx, cert, reason)
}
// UnmarshalCaddyfile deserializes Caddyfile tokens into iss.
//
// ... zerossl <api_key> {
// validity_days <days>
// alt_http_port <port>
// dns <provider_name> ...
// propagation_delay <duration>
// propagation_timeout <duration>
// resolvers <list...>
// dns_ttl <duration>
// }
func (iss *ZeroSSLIssuer) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume issuer name
// API key is required
if !d.NextArg() {
return d.ArgErr()
}
iss.APIKey = d.Val()
if d.NextArg() {
return d.ArgErr()
}
for nesting := d.Nesting(); d.NextBlock(nesting); {
switch d.Val() {
case "validity_days":
if iss.ValidityDays != 0 {
return d.Errf("validity days is already specified: %d", iss.ValidityDays)
}
days, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid number of days %s: %v", d.Val(), err)
}
iss.ValidityDays = days
case "alt_http_port":
if !d.NextArg() {
return d.ArgErr()
}
port, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("invalid port %s: %v", d.Val(), err)
}
iss.AlternateHTTPPort = port
case "dns":
if !d.NextArg() {
return d.ArgErr()
}
provName := d.Val()
if iss.CNAMEValidation == nil {
iss.CNAMEValidation = new(DNSChallengeConfig)
}
unm, err := caddyfile.UnmarshalModule(d, "dns.providers."+provName)
if err != nil {
return err
}
iss.CNAMEValidation.ProviderRaw = caddyconfig.JSONModuleObject(unm, "name", provName, nil)
case "propagation_delay":
if !d.NextArg() {
return d.ArgErr()
}
delayStr := d.Val()
delay, err := caddy.ParseDuration(delayStr)
if err != nil {
return d.Errf("invalid propagation_delay duration %s: %v", delayStr, err)
}
if iss.CNAMEValidation == nil {
iss.CNAMEValidation = new(DNSChallengeConfig)
}
iss.CNAMEValidation.PropagationDelay = caddy.Duration(delay)
case "propagation_timeout":
if !d.NextArg() {
return d.ArgErr()
}
timeoutStr := d.Val()
var timeout time.Duration
if timeoutStr == "-1" {
timeout = time.Duration(-1)
} else {
var err error
timeout, err = caddy.ParseDuration(timeoutStr)
if err != nil {
return d.Errf("invalid propagation_timeout duration %s: %v", timeoutStr, err)
}
}
if iss.CNAMEValidation == nil {
iss.CNAMEValidation = new(DNSChallengeConfig)
}
iss.CNAMEValidation.PropagationTimeout = caddy.Duration(timeout)
case "resolvers":
if iss.CNAMEValidation == nil {
iss.CNAMEValidation = new(DNSChallengeConfig)
}
iss.CNAMEValidation.Resolvers = d.RemainingArgs()
if len(iss.CNAMEValidation.Resolvers) == 0 {
return d.ArgErr()
}
case "dns_ttl":
if !d.NextArg() {
return d.ArgErr()
}
ttlStr := d.Val()
ttl, err := caddy.ParseDuration(ttlStr)
if err != nil {
return d.Errf("invalid dns_ttl duration %s: %v", ttlStr, err)
}
if iss.CNAMEValidation == nil {
iss.CNAMEValidation = new(DNSChallengeConfig)
}
iss.CNAMEValidation.TTL = caddy.Duration(ttl)
default:
return d.Errf("unrecognized zerossl issuer property: %s", d.Val())
}
}
return nil
}
// Interface guards
var (
_ certmagic.Issuer = (*ZeroSSLIssuer)(nil)
_ certmagic.Revoker = (*ZeroSSLIssuer)(nil)
_ caddy.Provisioner = (*ZeroSSLIssuer)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"encoding/json"
"fmt"
"os"
"strings"
"time"
"go.uber.org/zap"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"golang.org/x/term"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(AppendEncoder{})
}
// AppendEncoder can be used to add fields to all log entries
// that pass through it. It is a wrapper around another
// encoder, which it uses to actually encode the log entries.
// It is most useful for adding information about the Caddy
// instance that is producing the log entries, possibly via
// an environment variable.
type AppendEncoder struct {
// The underlying encoder that actually encodes the
// log entries. If not specified, defaults to "json",
// unless the output is a terminal, in which case
// it defaults to "console".
WrappedRaw json.RawMessage `json:"wrap,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"`
// A map of field names to their values. The values
// can be global placeholders (e.g. env vars), or constants.
// Note that the encoder does not run as part of an HTTP
// request context, so request placeholders are not available.
Fields map[string]any `json:"fields,omitempty"`
wrapped zapcore.Encoder
repl *caddy.Replacer
wrappedIsDefault bool
ctx caddy.Context
}
// CaddyModule returns the Caddy module information.
func (AppendEncoder) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.append",
New: func() caddy.Module { return new(AppendEncoder) },
}
}
// Provision sets up the encoder.
func (fe *AppendEncoder) Provision(ctx caddy.Context) error {
fe.ctx = ctx
fe.repl = caddy.NewReplacer()
if fe.WrappedRaw == nil {
// if wrap is not specified, default to JSON
fe.wrapped = &JSONEncoder{}
if p, ok := fe.wrapped.(caddy.Provisioner); ok {
if err := p.Provision(ctx); err != nil {
return fmt.Errorf("provisioning fallback encoder module: %v", err)
}
}
fe.wrappedIsDefault = true
} else {
// set up wrapped encoder
val, err := ctx.LoadModule(fe, "WrappedRaw")
if err != nil {
return fmt.Errorf("loading fallback encoder module: %v", err)
}
fe.wrapped = val.(zapcore.Encoder)
}
return nil
}
// ConfigureDefaultFormat will set the default format to "console"
// if the writer is a terminal. If already configured, it passes
// through the writer so a deeply nested encoder can configure
// its own default format.
func (fe *AppendEncoder) ConfigureDefaultFormat(wo caddy.WriterOpener) error {
if !fe.wrappedIsDefault {
if cfd, ok := fe.wrapped.(caddy.ConfiguresFormatterDefault); ok {
return cfd.ConfigureDefaultFormat(wo)
}
return nil
}
if caddy.IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) {
fe.wrapped = &ConsoleEncoder{}
if p, ok := fe.wrapped.(caddy.Provisioner); ok {
if err := p.Provision(fe.ctx); err != nil {
return fmt.Errorf("provisioning fallback encoder module: %v", err)
}
}
}
return nil
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax:
//
// append {
// wrap <another encoder>
// fields {
// <field> <value>
// }
// <field> <value>
// }
func (fe *AppendEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume encoder name
// parse a field
parseField := func() error {
if fe.Fields == nil {
fe.Fields = make(map[string]any)
}
field := d.Val()
if !d.NextArg() {
return d.ArgErr()
}
fe.Fields[field] = d.ScalarVal()
if d.NextArg() {
return d.ArgErr()
}
return nil
}
for d.NextBlock(0) {
switch d.Val() {
case "wrap":
if !d.NextArg() {
return d.ArgErr()
}
moduleName := d.Val()
moduleID := "caddy.logging.encoders." + moduleName
unm, err := caddyfile.UnmarshalModule(d, moduleID)
if err != nil {
return err
}
enc, ok := unm.(zapcore.Encoder)
if !ok {
return d.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm)
}
fe.WrappedRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, nil)
case "fields":
for nesting := d.Nesting(); d.NextBlock(nesting); {
err := parseField()
if err != nil {
return err
}
}
default:
// if unknown, assume it's a field so that
// the config can be flat
err := parseField()
if err != nil {
return err
}
}
}
return nil
}
// AddArray is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error {
return fe.wrapped.AddArray(key, marshaler)
}
// AddObject is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error {
return fe.wrapped.AddObject(key, marshaler)
}
// AddBinary is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddBinary(key string, value []byte) {
fe.wrapped.AddBinary(key, value)
}
// AddByteString is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddByteString(key string, value []byte) {
fe.wrapped.AddByteString(key, value)
}
// AddBool is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddBool(key string, value bool) {
fe.wrapped.AddBool(key, value)
}
// AddComplex128 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddComplex128(key string, value complex128) {
fe.wrapped.AddComplex128(key, value)
}
// AddComplex64 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddComplex64(key string, value complex64) {
fe.wrapped.AddComplex64(key, value)
}
// AddDuration is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddDuration(key string, value time.Duration) {
fe.wrapped.AddDuration(key, value)
}
// AddFloat64 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddFloat64(key string, value float64) {
fe.wrapped.AddFloat64(key, value)
}
// AddFloat32 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddFloat32(key string, value float32) {
fe.wrapped.AddFloat32(key, value)
}
// AddInt is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddInt(key string, value int) {
fe.wrapped.AddInt(key, value)
}
// AddInt64 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddInt64(key string, value int64) {
fe.wrapped.AddInt64(key, value)
}
// AddInt32 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddInt32(key string, value int32) {
fe.wrapped.AddInt32(key, value)
}
// AddInt16 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddInt16(key string, value int16) {
fe.wrapped.AddInt16(key, value)
}
// AddInt8 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddInt8(key string, value int8) {
fe.wrapped.AddInt8(key, value)
}
// AddString is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddString(key, value string) {
fe.wrapped.AddString(key, value)
}
// AddTime is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddTime(key string, value time.Time) {
fe.wrapped.AddTime(key, value)
}
// AddUint is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddUint(key string, value uint) {
fe.wrapped.AddUint(key, value)
}
// AddUint64 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddUint64(key string, value uint64) {
fe.wrapped.AddUint64(key, value)
}
// AddUint32 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddUint32(key string, value uint32) {
fe.wrapped.AddUint32(key, value)
}
// AddUint16 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddUint16(key string, value uint16) {
fe.wrapped.AddUint16(key, value)
}
// AddUint8 is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddUint8(key string, value uint8) {
fe.wrapped.AddUint8(key, value)
}
// AddUintptr is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddUintptr(key string, value uintptr) {
fe.wrapped.AddUintptr(key, value)
}
// AddReflected is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) AddReflected(key string, value any) error {
return fe.wrapped.AddReflected(key, value)
}
// OpenNamespace is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) OpenNamespace(key string) {
fe.wrapped.OpenNamespace(key)
}
// Clone is part of the zapcore.ObjectEncoder interface.
func (fe AppendEncoder) Clone() zapcore.Encoder {
return AppendEncoder{
Fields: fe.Fields,
wrapped: fe.wrapped.Clone(),
repl: fe.repl,
}
}
// EncodeEntry partially implements the zapcore.Encoder interface.
func (fe AppendEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
fe.wrapped = fe.wrapped.Clone()
for _, field := range fields {
field.AddTo(fe)
}
// append fields from config
for key, value := range fe.Fields {
// if the value is a string
if str, ok := value.(string); ok {
isPlaceholder := strings.HasPrefix(str, "{") &&
strings.HasSuffix(str, "}") &&
strings.Count(str, "{") == 1
if isPlaceholder {
// and it looks like a placeholder, evaluate it
replaced, _ := fe.repl.Get(strings.Trim(str, "{}"))
zap.Any(key, replaced).AddTo(fe)
} else {
// just use the string as-is
zap.String(key, str).AddTo(fe)
}
} else {
// not a string, so use the value as any
zap.Any(key, value).AddTo(fe)
}
}
return fe.wrapped.EncodeEntry(ent, nil)
}
// Interface guards
var (
_ zapcore.Encoder = (*AppendEncoder)(nil)
_ caddyfile.Unmarshaler = (*AppendEncoder)(nil)
_ caddy.ConfiguresFormatterDefault = (*AppendEncoder)(nil)
)
package logging
import (
"go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(MockCore{})
}
// MockCore is a no-op module, purely for testing
type MockCore struct {
zapcore.Core `json:"-"`
}
// CaddyModule returns the Caddy module information.
func (MockCore) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.cores.mock",
New: func() caddy.Module { return new(MockCore) },
}
}
func (lec *MockCore) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
// Interface guards
var (
_ zapcore.Core = (*MockCore)(nil)
_ caddy.Module = (*MockCore)(nil)
_ caddyfile.Unmarshaler = (*MockCore)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"time"
"go.uber.org/zap"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(ConsoleEncoder{})
caddy.RegisterModule(JSONEncoder{})
}
// ConsoleEncoder encodes log entries that are mostly human-readable.
type ConsoleEncoder struct {
zapcore.Encoder `json:"-"`
LogEncoderConfig
}
// CaddyModule returns the Caddy module information.
func (ConsoleEncoder) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.console",
New: func() caddy.Module { return new(ConsoleEncoder) },
}
}
// Provision sets up the encoder.
func (ce *ConsoleEncoder) Provision(_ caddy.Context) error {
if ce.LevelFormat == "" {
ce.LevelFormat = "color"
}
if ce.TimeFormat == "" {
ce.TimeFormat = "wall_milli"
}
ce.Encoder = zapcore.NewConsoleEncoder(ce.ZapcoreEncoderConfig())
return nil
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax:
//
// console {
// <common encoder config subdirectives...>
// }
//
// See the godoc on the LogEncoderConfig type for the syntax of
// subdirectives that are common to most/all encoders.
func (ce *ConsoleEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume encoder name
if d.NextArg() {
return d.ArgErr()
}
err := ce.LogEncoderConfig.UnmarshalCaddyfile(d)
if err != nil {
return err
}
return nil
}
// JSONEncoder encodes entries as JSON.
type JSONEncoder struct {
zapcore.Encoder `json:"-"`
LogEncoderConfig
}
// CaddyModule returns the Caddy module information.
func (JSONEncoder) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.json",
New: func() caddy.Module { return new(JSONEncoder) },
}
}
// Provision sets up the encoder.
func (je *JSONEncoder) Provision(_ caddy.Context) error {
je.Encoder = zapcore.NewJSONEncoder(je.ZapcoreEncoderConfig())
return nil
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax:
//
// json {
// <common encoder config subdirectives...>
// }
//
// See the godoc on the LogEncoderConfig type for the syntax of
// subdirectives that are common to most/all encoders.
func (je *JSONEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume encoder name
if d.NextArg() {
return d.ArgErr()
}
err := je.LogEncoderConfig.UnmarshalCaddyfile(d)
if err != nil {
return err
}
return nil
}
// LogEncoderConfig holds configuration common to most encoders.
type LogEncoderConfig struct {
MessageKey *string `json:"message_key,omitempty"`
LevelKey *string `json:"level_key,omitempty"`
TimeKey *string `json:"time_key,omitempty"`
NameKey *string `json:"name_key,omitempty"`
CallerKey *string `json:"caller_key,omitempty"`
StacktraceKey *string `json:"stacktrace_key,omitempty"`
LineEnding *string `json:"line_ending,omitempty"`
// Recognized values are: unix_seconds_float, unix_milli_float, unix_nano, iso8601, rfc3339, rfc3339_nano, wall, wall_milli, wall_nano, common_log.
// The value may also be custom format per the Go `time` package layout specification, as described [here](https://pkg.go.dev/time#pkg-constants).
TimeFormat string `json:"time_format,omitempty"`
TimeLocal bool `json:"time_local,omitempty"`
// Recognized values are: s/second/seconds, ns/nano/nanos, ms/milli/millis, string.
// Empty and unrecognized value default to seconds.
DurationFormat string `json:"duration_format,omitempty"`
// Recognized values are: lower, upper, color.
// Empty and unrecognized value default to lower.
LevelFormat string `json:"level_format,omitempty"`
}
// UnmarshalCaddyfile populates the struct from Caddyfile tokens. Syntax:
//
// {
// message_key <key>
// level_key <key>
// time_key <key>
// name_key <key>
// caller_key <key>
// stacktrace_key <key>
// line_ending <char>
// time_format <format>
// time_local
// duration_format <format>
// level_format <format>
// }
func (lec *LogEncoderConfig) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.NextBlock(0) {
subdir := d.Val()
switch subdir {
case "time_local":
lec.TimeLocal = true
if d.NextArg() {
return d.ArgErr()
}
continue
}
var arg string
if !d.AllArgs(&arg) {
return d.ArgErr()
}
switch subdir {
case "message_key":
lec.MessageKey = &arg
case "level_key":
lec.LevelKey = &arg
case "time_key":
lec.TimeKey = &arg
case "name_key":
lec.NameKey = &arg
case "caller_key":
lec.CallerKey = &arg
case "stacktrace_key":
lec.StacktraceKey = &arg
case "line_ending":
lec.LineEnding = &arg
case "time_format":
lec.TimeFormat = arg
case "duration_format":
lec.DurationFormat = arg
case "level_format":
lec.LevelFormat = arg
default:
return d.Errf("unrecognized subdirective %s", subdir)
}
}
return nil
}
// ZapcoreEncoderConfig returns the equivalent zapcore.EncoderConfig.
// If lec is nil, zap.NewProductionEncoderConfig() is returned.
func (lec *LogEncoderConfig) ZapcoreEncoderConfig() zapcore.EncoderConfig {
cfg := zap.NewProductionEncoderConfig()
if lec == nil {
lec = new(LogEncoderConfig)
}
if lec.MessageKey != nil {
cfg.MessageKey = *lec.MessageKey
}
if lec.LevelKey != nil {
cfg.LevelKey = *lec.LevelKey
}
if lec.TimeKey != nil {
cfg.TimeKey = *lec.TimeKey
}
if lec.NameKey != nil {
cfg.NameKey = *lec.NameKey
}
if lec.CallerKey != nil {
cfg.CallerKey = *lec.CallerKey
}
if lec.StacktraceKey != nil {
cfg.StacktraceKey = *lec.StacktraceKey
}
if lec.LineEnding != nil {
cfg.LineEnding = *lec.LineEnding
}
// time format
var timeFormatter zapcore.TimeEncoder
switch lec.TimeFormat {
case "", "unix_seconds_float":
timeFormatter = zapcore.EpochTimeEncoder
case "unix_milli_float":
timeFormatter = zapcore.EpochMillisTimeEncoder
case "unix_nano":
timeFormatter = zapcore.EpochNanosTimeEncoder
case "iso8601":
timeFormatter = zapcore.ISO8601TimeEncoder
default:
timeFormat := lec.TimeFormat
switch lec.TimeFormat {
case "rfc3339":
timeFormat = time.RFC3339
case "rfc3339_nano":
timeFormat = time.RFC3339Nano
case "wall":
timeFormat = "2006/01/02 15:04:05"
case "wall_milli":
timeFormat = "2006/01/02 15:04:05.000"
case "wall_nano":
timeFormat = "2006/01/02 15:04:05.000000000"
case "common_log":
timeFormat = "02/Jan/2006:15:04:05 -0700"
}
timeFormatter = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
var time time.Time
if lec.TimeLocal {
time = ts.Local()
} else {
time = ts.UTC()
}
encoder.AppendString(time.Format(timeFormat))
}
}
cfg.EncodeTime = timeFormatter
// duration format
var durFormatter zapcore.DurationEncoder
switch lec.DurationFormat {
case "s", "second", "seconds":
durFormatter = zapcore.SecondsDurationEncoder
case "ns", "nano", "nanos":
durFormatter = zapcore.NanosDurationEncoder
case "ms", "milli", "millis":
durFormatter = zapcore.MillisDurationEncoder
case "string":
durFormatter = zapcore.StringDurationEncoder
default:
durFormatter = zapcore.SecondsDurationEncoder
}
cfg.EncodeDuration = durFormatter
// level format
var levelFormatter zapcore.LevelEncoder
switch lec.LevelFormat {
case "", "lower":
levelFormatter = zapcore.LowercaseLevelEncoder
case "upper":
levelFormatter = zapcore.CapitalLevelEncoder
case "color":
levelFormatter = zapcore.CapitalColorLevelEncoder
}
cfg.EncodeLevel = levelFormatter
return cfg
}
var bufferpool = buffer.NewPool()
// Interface guards
var (
_ zapcore.Encoder = (*ConsoleEncoder)(nil)
_ zapcore.Encoder = (*JSONEncoder)(nil)
_ caddyfile.Unmarshaler = (*ConsoleEncoder)(nil)
_ caddyfile.Unmarshaler = (*JSONEncoder)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"encoding/json"
"fmt"
"io"
"math"
"os"
"path/filepath"
"strconv"
"github.com/dustin/go-humanize"
"gopkg.in/natefinch/lumberjack.v2"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(FileWriter{})
}
// fileMode is a string made of 1 to 4 octal digits representing
// a numeric mode as specified with the `chmod` unix command.
// `"0777"` and `"777"` are thus equivalent values.
type fileMode os.FileMode
// UnmarshalJSON satisfies json.Unmarshaler.
func (m *fileMode) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return io.EOF
}
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
mode, err := parseFileMode(s)
if err != nil {
return err
}
*m = fileMode(mode)
return err
}
// MarshalJSON satisfies json.Marshaler.
func (m *fileMode) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf("\"%04o\"", *m)), nil
}
// parseFileMode parses a file mode string,
// adding support for `chmod` unix command like
// 1 to 4 digital octal values.
func parseFileMode(s string) (os.FileMode, error) {
modeStr := fmt.Sprintf("%04s", s)
mode, err := strconv.ParseUint(modeStr, 8, 32)
if err != nil {
return 0, err
}
return os.FileMode(mode), nil
}
// FileWriter can write logs to files. By default, log files
// are rotated ("rolled") when they get large, and old log
// files get deleted, to ensure that the process does not
// exhaust disk space.
type FileWriter struct {
// Filename is the name of the file to write.
Filename string `json:"filename,omitempty"`
// The file permissions mode.
// 0600 by default.
Mode fileMode `json:"mode,omitempty"`
// Roll toggles log rolling or rotation, which is
// enabled by default.
Roll *bool `json:"roll,omitempty"`
// When a log file reaches approximately this size,
// it will be rotated.
RollSizeMB int `json:"roll_size_mb,omitempty"`
// Whether to compress rolled files. Default: true
RollCompress *bool `json:"roll_gzip,omitempty"`
// Whether to use local timestamps in rolled filenames.
// Default: false
RollLocalTime bool `json:"roll_local_time,omitempty"`
// The maximum number of rolled log files to keep.
// Default: 10
RollKeep int `json:"roll_keep,omitempty"`
// How many days to keep rolled log files. Default: 90
RollKeepDays int `json:"roll_keep_days,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (FileWriter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.writers.file",
New: func() caddy.Module { return new(FileWriter) },
}
}
// Provision sets up the module
func (fw *FileWriter) Provision(ctx caddy.Context) error {
// Replace placeholder in filename
repl := caddy.NewReplacer()
filename, err := repl.ReplaceOrErr(fw.Filename, true, true)
if err != nil {
return fmt.Errorf("invalid filename for log file: %v", err)
}
fw.Filename = filename
return nil
}
func (fw FileWriter) String() string {
fpath, err := filepath.Abs(fw.Filename)
if err == nil {
return fpath
}
return fw.Filename
}
// WriterKey returns a unique key representing this fw.
func (fw FileWriter) WriterKey() string {
return "file:" + fw.Filename
}
// OpenWriter opens a new file writer.
func (fw FileWriter) OpenWriter() (io.WriteCloser, error) {
if fw.Mode == 0 {
fw.Mode = 0o600
}
// roll log files by default
if fw.Roll == nil || *fw.Roll {
if fw.RollSizeMB == 0 {
fw.RollSizeMB = 100
}
if fw.RollCompress == nil {
compress := true
fw.RollCompress = &compress
}
if fw.RollKeep == 0 {
fw.RollKeep = 10
}
if fw.RollKeepDays == 0 {
fw.RollKeepDays = 90
}
// create the file if it does not exist with the right mode.
// lumberjack will reuse the file mode across log rotation.
f_tmp, err := os.OpenFile(fw.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(fw.Mode))
if err != nil {
return nil, err
}
f_tmp.Close()
// ensure already existing files have the right mode,
// since OpenFile will not set the mode in such case.
if err = os.Chmod(fw.Filename, os.FileMode(fw.Mode)); err != nil {
return nil, err
}
return &lumberjack.Logger{
Filename: fw.Filename,
MaxSize: fw.RollSizeMB,
MaxAge: fw.RollKeepDays,
MaxBackups: fw.RollKeep,
LocalTime: fw.RollLocalTime,
Compress: *fw.RollCompress,
}, nil
}
// otherwise just open a regular file
return os.OpenFile(fw.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(fw.Mode))
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax:
//
// file <filename> {
// mode <mode>
// roll_disabled
// roll_size <size>
// roll_uncompressed
// roll_local_time
// roll_keep <num>
// roll_keep_for <days>
// }
//
// The roll_size value has megabyte resolution.
// Fractional values are rounded up to the next whole megabyte (MiB).
//
// By default, compression is enabled, but can be turned off by setting
// the roll_uncompressed option.
//
// The roll_keep_for duration has day resolution.
// Fractional values are rounded up to the next whole number of days.
//
// If any of the mode, roll_size, roll_keep, or roll_keep_for subdirectives are
// omitted or set to a zero value, then Caddy's default value for that
// subdirective is used.
func (fw *FileWriter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume writer name
if !d.NextArg() {
return d.ArgErr()
}
fw.Filename = d.Val()
if d.NextArg() {
return d.ArgErr()
}
for d.NextBlock(0) {
switch d.Val() {
case "mode":
var modeStr string
if !d.AllArgs(&modeStr) {
return d.ArgErr()
}
mode, err := parseFileMode(modeStr)
if err != nil {
return d.Errf("parsing mode: %v", err)
}
fw.Mode = fileMode(mode)
case "roll_disabled":
var f bool
fw.Roll = &f
if d.NextArg() {
return d.ArgErr()
}
case "roll_size":
var sizeStr string
if !d.AllArgs(&sizeStr) {
return d.ArgErr()
}
size, err := humanize.ParseBytes(sizeStr)
if err != nil {
return d.Errf("parsing size: %v", err)
}
fw.RollSizeMB = int(math.Ceil(float64(size) / humanize.MiByte))
case "roll_uncompressed":
var f bool
fw.RollCompress = &f
if d.NextArg() {
return d.ArgErr()
}
case "roll_local_time":
fw.RollLocalTime = true
if d.NextArg() {
return d.ArgErr()
}
case "roll_keep":
var keepStr string
if !d.AllArgs(&keepStr) {
return d.ArgErr()
}
keep, err := strconv.Atoi(keepStr)
if err != nil {
return d.Errf("parsing roll_keep number: %v", err)
}
fw.RollKeep = keep
case "roll_keep_for":
var keepForStr string
if !d.AllArgs(&keepForStr) {
return d.ArgErr()
}
keepFor, err := caddy.ParseDuration(keepForStr)
if err != nil {
return d.Errf("parsing roll_keep_for duration: %v", err)
}
if keepFor < 0 {
return d.Errf("negative roll_keep_for duration: %v", keepFor)
}
fw.RollKeepDays = int(math.Ceil(keepFor.Hours() / 24))
}
}
return nil
}
// Interface guards
var (
_ caddy.Provisioner = (*FileWriter)(nil)
_ caddy.WriterOpener = (*FileWriter)(nil)
_ caddyfile.Unmarshaler = (*FileWriter)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"encoding/json"
"fmt"
"os"
"time"
"go.uber.org/zap"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
"golang.org/x/term"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(FilterEncoder{})
}
// FilterEncoder can filter (manipulate) fields on
// log entries before they are actually encoded by
// an underlying encoder.
type FilterEncoder struct {
// The underlying encoder that actually encodes the
// log entries. If not specified, defaults to "json",
// unless the output is a terminal, in which case
// it defaults to "console".
WrappedRaw json.RawMessage `json:"wrap,omitempty" caddy:"namespace=caddy.logging.encoders inline_key=format"`
// A map of field names to their filters. Note that this
// is not a module map; the keys are field names.
//
// Nested fields can be referenced by representing a
// layer of nesting with `>`. In other words, for an
// object like `{"a":{"b":0}}`, the inner field can
// be referenced as `a>b`.
//
// The following fields are fundamental to the log and
// cannot be filtered because they are added by the
// underlying logging library as special cases: ts,
// level, logger, and msg.
FieldsRaw map[string]json.RawMessage `json:"fields,omitempty" caddy:"namespace=caddy.logging.encoders.filter inline_key=filter"`
wrapped zapcore.Encoder
Fields map[string]LogFieldFilter `json:"-"`
// used to keep keys unique across nested objects
keyPrefix string
wrappedIsDefault bool
ctx caddy.Context
}
// CaddyModule returns the Caddy module information.
func (FilterEncoder) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter",
New: func() caddy.Module { return new(FilterEncoder) },
}
}
// Provision sets up the encoder.
func (fe *FilterEncoder) Provision(ctx caddy.Context) error {
fe.ctx = ctx
if fe.WrappedRaw == nil {
// if wrap is not specified, default to JSON
fe.wrapped = &JSONEncoder{}
if p, ok := fe.wrapped.(caddy.Provisioner); ok {
if err := p.Provision(ctx); err != nil {
return fmt.Errorf("provisioning fallback encoder module: %v", err)
}
}
fe.wrappedIsDefault = true
} else {
// set up wrapped encoder
val, err := ctx.LoadModule(fe, "WrappedRaw")
if err != nil {
return fmt.Errorf("loading fallback encoder module: %v", err)
}
fe.wrapped = val.(zapcore.Encoder)
}
// set up each field filter
if fe.Fields == nil {
fe.Fields = make(map[string]LogFieldFilter)
}
vals, err := ctx.LoadModule(fe, "FieldsRaw")
if err != nil {
return fmt.Errorf("loading log filter modules: %v", err)
}
for fieldName, modIface := range vals.(map[string]any) {
fe.Fields[fieldName] = modIface.(LogFieldFilter)
}
return nil
}
// ConfigureDefaultFormat will set the default format to "console"
// if the writer is a terminal. If already configured as a filter
// encoder, it passes through the writer so a deeply nested filter
// encoder can configure its own default format.
func (fe *FilterEncoder) ConfigureDefaultFormat(wo caddy.WriterOpener) error {
if !fe.wrappedIsDefault {
if cfd, ok := fe.wrapped.(caddy.ConfiguresFormatterDefault); ok {
return cfd.ConfigureDefaultFormat(wo)
}
return nil
}
if caddy.IsWriterStandardStream(wo) && term.IsTerminal(int(os.Stderr.Fd())) {
fe.wrapped = &ConsoleEncoder{}
if p, ok := fe.wrapped.(caddy.Provisioner); ok {
if err := p.Provision(fe.ctx); err != nil {
return fmt.Errorf("provisioning fallback encoder module: %v", err)
}
}
}
return nil
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens. Syntax:
//
// filter {
// wrap <another encoder>
// fields {
// <field> <filter> {
// <filter options>
// }
// }
// <field> <filter> {
// <filter options>
// }
// }
func (fe *FilterEncoder) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume encoder name
// parse a field
parseField := func() error {
if fe.FieldsRaw == nil {
fe.FieldsRaw = make(map[string]json.RawMessage)
}
field := d.Val()
if !d.NextArg() {
return d.ArgErr()
}
filterName := d.Val()
moduleID := "caddy.logging.encoders.filter." + filterName
unm, err := caddyfile.UnmarshalModule(d, moduleID)
if err != nil {
return err
}
filter, ok := unm.(LogFieldFilter)
if !ok {
return d.Errf("module %s (%T) is not a logging.LogFieldFilter", moduleID, unm)
}
fe.FieldsRaw[field] = caddyconfig.JSONModuleObject(filter, "filter", filterName, nil)
return nil
}
for d.NextBlock(0) {
switch d.Val() {
case "wrap":
if !d.NextArg() {
return d.ArgErr()
}
moduleName := d.Val()
moduleID := "caddy.logging.encoders." + moduleName
unm, err := caddyfile.UnmarshalModule(d, moduleID)
if err != nil {
return err
}
enc, ok := unm.(zapcore.Encoder)
if !ok {
return d.Errf("module %s (%T) is not a zapcore.Encoder", moduleID, unm)
}
fe.WrappedRaw = caddyconfig.JSONModuleObject(enc, "format", moduleName, nil)
case "fields":
for nesting := d.Nesting(); d.NextBlock(nesting); {
err := parseField()
if err != nil {
return err
}
}
default:
// if unknown, assume it's a field so that
// the config can be flat
err := parseField()
if err != nil {
return err
}
}
}
return nil
}
// AddArray is part of the zapcore.ObjectEncoder interface.
// Array elements do not get filtered.
func (fe FilterEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error {
if filter, ok := fe.Fields[fe.keyPrefix+key]; ok {
filter.Filter(zap.Array(key, marshaler)).AddTo(fe.wrapped)
return nil
}
return fe.wrapped.AddArray(key, marshaler)
}
// AddObject is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error {
if fe.filtered(key, marshaler) {
return nil
}
fe.keyPrefix += key + ">"
return fe.wrapped.AddObject(key, logObjectMarshalerWrapper{
enc: fe,
marsh: marshaler,
})
}
// AddBinary is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddBinary(key string, value []byte) {
if !fe.filtered(key, value) {
fe.wrapped.AddBinary(key, value)
}
}
// AddByteString is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddByteString(key string, value []byte) {
if !fe.filtered(key, value) {
fe.wrapped.AddByteString(key, value)
}
}
// AddBool is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddBool(key string, value bool) {
if !fe.filtered(key, value) {
fe.wrapped.AddBool(key, value)
}
}
// AddComplex128 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddComplex128(key string, value complex128) {
if !fe.filtered(key, value) {
fe.wrapped.AddComplex128(key, value)
}
}
// AddComplex64 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddComplex64(key string, value complex64) {
if !fe.filtered(key, value) {
fe.wrapped.AddComplex64(key, value)
}
}
// AddDuration is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddDuration(key string, value time.Duration) {
if !fe.filtered(key, value) {
fe.wrapped.AddDuration(key, value)
}
}
// AddFloat64 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddFloat64(key string, value float64) {
if !fe.filtered(key, value) {
fe.wrapped.AddFloat64(key, value)
}
}
// AddFloat32 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddFloat32(key string, value float32) {
if !fe.filtered(key, value) {
fe.wrapped.AddFloat32(key, value)
}
}
// AddInt is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddInt(key string, value int) {
if !fe.filtered(key, value) {
fe.wrapped.AddInt(key, value)
}
}
// AddInt64 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddInt64(key string, value int64) {
if !fe.filtered(key, value) {
fe.wrapped.AddInt64(key, value)
}
}
// AddInt32 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddInt32(key string, value int32) {
if !fe.filtered(key, value) {
fe.wrapped.AddInt32(key, value)
}
}
// AddInt16 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddInt16(key string, value int16) {
if !fe.filtered(key, value) {
fe.wrapped.AddInt16(key, value)
}
}
// AddInt8 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddInt8(key string, value int8) {
if !fe.filtered(key, value) {
fe.wrapped.AddInt8(key, value)
}
}
// AddString is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddString(key, value string) {
if !fe.filtered(key, value) {
fe.wrapped.AddString(key, value)
}
}
// AddTime is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddTime(key string, value time.Time) {
if !fe.filtered(key, value) {
fe.wrapped.AddTime(key, value)
}
}
// AddUint is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddUint(key string, value uint) {
if !fe.filtered(key, value) {
fe.wrapped.AddUint(key, value)
}
}
// AddUint64 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddUint64(key string, value uint64) {
if !fe.filtered(key, value) {
fe.wrapped.AddUint64(key, value)
}
}
// AddUint32 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddUint32(key string, value uint32) {
if !fe.filtered(key, value) {
fe.wrapped.AddUint32(key, value)
}
}
// AddUint16 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddUint16(key string, value uint16) {
if !fe.filtered(key, value) {
fe.wrapped.AddUint16(key, value)
}
}
// AddUint8 is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddUint8(key string, value uint8) {
if !fe.filtered(key, value) {
fe.wrapped.AddUint8(key, value)
}
}
// AddUintptr is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddUintptr(key string, value uintptr) {
if !fe.filtered(key, value) {
fe.wrapped.AddUintptr(key, value)
}
}
// AddReflected is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) AddReflected(key string, value any) error {
if !fe.filtered(key, value) {
return fe.wrapped.AddReflected(key, value)
}
return nil
}
// OpenNamespace is part of the zapcore.ObjectEncoder interface.
func (fe FilterEncoder) OpenNamespace(key string) {
fe.wrapped.OpenNamespace(key)
}
// Clone is part of the zapcore.ObjectEncoder interface.
// We don't use it as of Oct 2019 (v2 beta 7), I'm not
// really sure what it'd be useful for in our case.
func (fe FilterEncoder) Clone() zapcore.Encoder {
return FilterEncoder{
Fields: fe.Fields,
wrapped: fe.wrapped.Clone(),
keyPrefix: fe.keyPrefix,
}
}
// EncodeEntry partially implements the zapcore.Encoder interface.
func (fe FilterEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
// without this clone and storing it to fe.wrapped, fields
// from subsequent log entries get appended to previous
// ones, and I'm not 100% sure why; see end of
// https://github.com/uber-go/zap/issues/750
fe.wrapped = fe.wrapped.Clone()
for _, field := range fields {
field.AddTo(fe)
}
return fe.wrapped.EncodeEntry(ent, nil)
}
// filtered returns true if the field was filtered.
// If true is returned, the field was filtered and
// added to the underlying encoder (so do not do
// that again). If false was returned, the field has
// not yet been added to the underlying encoder.
func (fe FilterEncoder) filtered(key string, value any) bool {
filter, ok := fe.Fields[fe.keyPrefix+key]
if !ok {
return false
}
filter.Filter(zap.Any(key, value)).AddTo(fe.wrapped)
return true
}
// logObjectMarshalerWrapper allows us to recursively
// filter fields of objects as they get encoded.
type logObjectMarshalerWrapper struct {
enc FilterEncoder
marsh zapcore.ObjectMarshaler
}
// MarshalLogObject implements the zapcore.ObjectMarshaler interface.
func (mom logObjectMarshalerWrapper) MarshalLogObject(_ zapcore.ObjectEncoder) error {
return mom.marsh.MarshalLogObject(mom.enc)
}
// Interface guards
var (
_ zapcore.Encoder = (*FilterEncoder)(nil)
_ zapcore.ObjectMarshaler = (*logObjectMarshalerWrapper)(nil)
_ caddyfile.Unmarshaler = (*FilterEncoder)(nil)
_ caddy.ConfiguresFormatterDefault = (*FilterEncoder)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"crypto/sha256"
"errors"
"fmt"
"net"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
"go.uber.org/zap/zapcore"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
)
func init() {
caddy.RegisterModule(DeleteFilter{})
caddy.RegisterModule(HashFilter{})
caddy.RegisterModule(ReplaceFilter{})
caddy.RegisterModule(IPMaskFilter{})
caddy.RegisterModule(QueryFilter{})
caddy.RegisterModule(CookieFilter{})
caddy.RegisterModule(RegexpFilter{})
caddy.RegisterModule(RenameFilter{})
}
// LogFieldFilter can filter (or manipulate)
// a field in a log entry.
type LogFieldFilter interface {
Filter(zapcore.Field) zapcore.Field
}
// DeleteFilter is a Caddy log field filter that
// deletes the field.
type DeleteFilter struct{}
// CaddyModule returns the Caddy module information.
func (DeleteFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.delete",
New: func() caddy.Module { return new(DeleteFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (DeleteFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
// Filter filters the input field.
func (DeleteFilter) Filter(in zapcore.Field) zapcore.Field {
in.Type = zapcore.SkipType
return in
}
// hash returns the first 4 bytes of the SHA-256 hash of the given data as hexadecimal
func hash(s string) string {
return fmt.Sprintf("%.4x", sha256.Sum256([]byte(s)))
}
// HashFilter is a Caddy log field filter that
// replaces the field with the initial 4 bytes
// of the SHA-256 hash of the content. Operates
// on string fields, or on arrays of strings
// where each string is hashed.
type HashFilter struct{}
// CaddyModule returns the Caddy module information.
func (HashFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.hash",
New: func() caddy.Module { return new(HashFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (f *HashFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
// Filter filters the input field with the replacement value.
func (f *HashFilter) Filter(in zapcore.Field) zapcore.Field {
if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok {
newArray := make(caddyhttp.LoggableStringArray, len(array))
for i, s := range array {
newArray[i] = hash(s)
}
in.Interface = newArray
} else {
in.String = hash(in.String)
}
return in
}
// ReplaceFilter is a Caddy log field filter that
// replaces the field with the indicated string.
type ReplaceFilter struct {
Value string `json:"value,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (ReplaceFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.replace",
New: func() caddy.Module { return new(ReplaceFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (f *ReplaceFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume filter name
if d.NextArg() {
f.Value = d.Val()
}
return nil
}
// Filter filters the input field with the replacement value.
func (f *ReplaceFilter) Filter(in zapcore.Field) zapcore.Field {
in.Type = zapcore.StringType
in.String = f.Value
return in
}
// IPMaskFilter is a Caddy log field filter that
// masks IP addresses in a string, or in an array
// of strings. The string may be a comma separated
// list of IP addresses, where all of the values
// will be masked.
type IPMaskFilter struct {
// The IPv4 mask, as an subnet size CIDR.
IPv4MaskRaw int `json:"ipv4_cidr,omitempty"`
// The IPv6 mask, as an subnet size CIDR.
IPv6MaskRaw int `json:"ipv6_cidr,omitempty"`
v4Mask net.IPMask
v6Mask net.IPMask
}
// CaddyModule returns the Caddy module information.
func (IPMaskFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.ip_mask",
New: func() caddy.Module { return new(IPMaskFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (m *IPMaskFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume filter name
args := d.RemainingArgs()
if len(args) > 2 {
return d.Errf("too many arguments")
}
if len(args) > 0 {
val, err := strconv.Atoi(args[0])
if err != nil {
return d.Errf("error parsing %s: %v", args[0], err)
}
m.IPv4MaskRaw = val
if len(args) > 1 {
val, err := strconv.Atoi(args[1])
if err != nil {
return d.Errf("error parsing %s: %v", args[1], err)
}
m.IPv6MaskRaw = val
}
}
for d.NextBlock(0) {
switch d.Val() {
case "ipv4":
if !d.NextArg() {
return d.ArgErr()
}
val, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("error parsing %s: %v", d.Val(), err)
}
m.IPv4MaskRaw = val
case "ipv6":
if !d.NextArg() {
return d.ArgErr()
}
val, err := strconv.Atoi(d.Val())
if err != nil {
return d.Errf("error parsing %s: %v", d.Val(), err)
}
m.IPv6MaskRaw = val
default:
return d.Errf("unrecognized subdirective %s", d.Val())
}
}
return nil
}
// Provision parses m's IP masks, from integers.
func (m *IPMaskFilter) Provision(ctx caddy.Context) error {
parseRawToMask := func(rawField int, bitLen int) net.IPMask {
if rawField == 0 {
return nil
}
// we assume the int is a subnet size CIDR
// e.g. "16" being equivalent to masking the last
// two bytes of an ipv4 address, like "255.255.0.0"
return net.CIDRMask(rawField, bitLen)
}
m.v4Mask = parseRawToMask(m.IPv4MaskRaw, 32)
m.v6Mask = parseRawToMask(m.IPv6MaskRaw, 128)
return nil
}
// Filter filters the input field.
func (m IPMaskFilter) Filter(in zapcore.Field) zapcore.Field {
if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok {
newArray := make(caddyhttp.LoggableStringArray, len(array))
for i, s := range array {
newArray[i] = m.mask(s)
}
in.Interface = newArray
} else {
in.String = m.mask(in.String)
}
return in
}
func (m IPMaskFilter) mask(s string) string {
output := ""
for _, value := range strings.Split(s, ",") {
value = strings.TrimSpace(value)
host, port, err := net.SplitHostPort(value)
if err != nil {
host = value // assume whole thing was IP address
}
ipAddr := net.ParseIP(host)
if ipAddr == nil {
output += value + ", "
continue
}
mask := m.v4Mask
if ipAddr.To4() == nil {
mask = m.v6Mask
}
masked := ipAddr.Mask(mask)
if port == "" {
output += masked.String() + ", "
continue
}
output += net.JoinHostPort(masked.String(), port) + ", "
}
return strings.TrimSuffix(output, ", ")
}
type filterAction string
const (
// Replace value(s).
replaceAction filterAction = "replace"
// Hash value(s).
hashAction filterAction = "hash"
// Delete.
deleteAction filterAction = "delete"
)
func (a filterAction) IsValid() error {
switch a {
case replaceAction, deleteAction, hashAction:
return nil
}
return errors.New("invalid action type")
}
type queryFilterAction struct {
// `replace` to replace the value(s) associated with the parameter(s), `hash` to replace them with the 4 initial bytes of the SHA-256 of their content or `delete` to remove them entirely.
Type filterAction `json:"type"`
// The name of the query parameter.
Parameter string `json:"parameter"`
// The value to use as replacement if the action is `replace`.
Value string `json:"value,omitempty"`
}
// QueryFilter is a Caddy log field filter that filters
// query parameters from a URL.
//
// This filter updates the logged URL string to remove, replace or hash
// query parameters containing sensitive data. For instance, it can be
// used to redact any kind of secrets which were passed as query parameters,
// such as OAuth access tokens, session IDs, magic link tokens, etc.
type QueryFilter struct {
// A list of actions to apply to the query parameters of the URL.
Actions []queryFilterAction `json:"actions"`
}
// Validate checks that action types are correct.
func (f *QueryFilter) Validate() error {
for _, a := range f.Actions {
if err := a.Type.IsValid(); err != nil {
return err
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (QueryFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.query",
New: func() caddy.Module { return new(QueryFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (m *QueryFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume filter name
for d.NextBlock(0) {
qfa := queryFilterAction{}
switch d.Val() {
case "replace":
if !d.NextArg() {
return d.ArgErr()
}
qfa.Type = replaceAction
qfa.Parameter = d.Val()
if !d.NextArg() {
return d.ArgErr()
}
qfa.Value = d.Val()
case "hash":
if !d.NextArg() {
return d.ArgErr()
}
qfa.Type = hashAction
qfa.Parameter = d.Val()
case "delete":
if !d.NextArg() {
return d.ArgErr()
}
qfa.Type = deleteAction
qfa.Parameter = d.Val()
default:
return d.Errf("unrecognized subdirective %s", d.Val())
}
m.Actions = append(m.Actions, qfa)
}
return nil
}
// Filter filters the input field.
func (m QueryFilter) Filter(in zapcore.Field) zapcore.Field {
if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok {
newArray := make(caddyhttp.LoggableStringArray, len(array))
for i, s := range array {
newArray[i] = m.processQueryString(s)
}
in.Interface = newArray
} else {
in.String = m.processQueryString(in.String)
}
return in
}
func (m QueryFilter) processQueryString(s string) string {
u, err := url.Parse(s)
if err != nil {
return s
}
q := u.Query()
for _, a := range m.Actions {
switch a.Type {
case replaceAction:
for i := range q[a.Parameter] {
q[a.Parameter][i] = a.Value
}
case hashAction:
for i := range q[a.Parameter] {
q[a.Parameter][i] = hash(a.Value)
}
case deleteAction:
q.Del(a.Parameter)
}
}
u.RawQuery = q.Encode()
return u.String()
}
type cookieFilterAction struct {
// `replace` to replace the value of the cookie, `hash` to replace it with the 4 initial bytes of the SHA-256 of its content or `delete` to remove it entirely.
Type filterAction `json:"type"`
// The name of the cookie.
Name string `json:"name"`
// The value to use as replacement if the action is `replace`.
Value string `json:"value,omitempty"`
}
// CookieFilter is a Caddy log field filter that filters
// cookies.
//
// This filter updates the logged HTTP header string
// to remove, replace or hash cookies containing sensitive data. For instance,
// it can be used to redact any kind of secrets, such as session IDs.
//
// If several actions are configured for the same cookie name, only the first
// will be applied.
type CookieFilter struct {
// A list of actions to apply to the cookies.
Actions []cookieFilterAction `json:"actions"`
}
// Validate checks that action types are correct.
func (f *CookieFilter) Validate() error {
for _, a := range f.Actions {
if err := a.Type.IsValid(); err != nil {
return err
}
}
return nil
}
// CaddyModule returns the Caddy module information.
func (CookieFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.cookie",
New: func() caddy.Module { return new(CookieFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (m *CookieFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume filter name
for d.NextBlock(0) {
cfa := cookieFilterAction{}
switch d.Val() {
case "replace":
if !d.NextArg() {
return d.ArgErr()
}
cfa.Type = replaceAction
cfa.Name = d.Val()
if !d.NextArg() {
return d.ArgErr()
}
cfa.Value = d.Val()
case "hash":
if !d.NextArg() {
return d.ArgErr()
}
cfa.Type = hashAction
cfa.Name = d.Val()
case "delete":
if !d.NextArg() {
return d.ArgErr()
}
cfa.Type = deleteAction
cfa.Name = d.Val()
default:
return d.Errf("unrecognized subdirective %s", d.Val())
}
m.Actions = append(m.Actions, cfa)
}
return nil
}
// Filter filters the input field.
func (m CookieFilter) Filter(in zapcore.Field) zapcore.Field {
cookiesSlice, ok := in.Interface.(caddyhttp.LoggableStringArray)
if !ok {
return in
}
// using a dummy Request to make use of the Cookies() function to parse it
originRequest := http.Request{Header: http.Header{"Cookie": cookiesSlice}}
cookies := originRequest.Cookies()
transformedRequest := http.Request{Header: make(http.Header)}
OUTER:
for _, c := range cookies {
for _, a := range m.Actions {
if c.Name != a.Name {
continue
}
switch a.Type {
case replaceAction:
c.Value = a.Value
transformedRequest.AddCookie(c)
continue OUTER
case hashAction:
c.Value = hash(c.Value)
transformedRequest.AddCookie(c)
continue OUTER
case deleteAction:
continue OUTER
}
}
transformedRequest.AddCookie(c)
}
in.Interface = caddyhttp.LoggableStringArray(transformedRequest.Header["Cookie"])
return in
}
// RegexpFilter is a Caddy log field filter that
// replaces the field matching the provided regexp
// with the indicated string. If the field is an
// array of strings, each of them will have the
// regexp replacement applied.
type RegexpFilter struct {
// The regular expression pattern defining what to replace.
RawRegexp string `json:"regexp,omitempty"`
// The value to use as replacement
Value string `json:"value,omitempty"`
regexp *regexp.Regexp
}
// CaddyModule returns the Caddy module information.
func (RegexpFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.regexp",
New: func() caddy.Module { return new(RegexpFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (f *RegexpFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume filter name
if d.NextArg() {
f.RawRegexp = d.Val()
}
if d.NextArg() {
f.Value = d.Val()
}
return nil
}
// Provision compiles m's regexp.
func (m *RegexpFilter) Provision(ctx caddy.Context) error {
r, err := regexp.Compile(m.RawRegexp)
if err != nil {
return err
}
m.regexp = r
return nil
}
// Filter filters the input field with the replacement value if it matches the regexp.
func (f *RegexpFilter) Filter(in zapcore.Field) zapcore.Field {
if array, ok := in.Interface.(caddyhttp.LoggableStringArray); ok {
newArray := make(caddyhttp.LoggableStringArray, len(array))
for i, s := range array {
newArray[i] = f.regexp.ReplaceAllString(s, f.Value)
}
in.Interface = newArray
} else {
in.String = f.regexp.ReplaceAllString(in.String, f.Value)
}
return in
}
// RenameFilter is a Caddy log field filter that
// renames the field's key with the indicated name.
type RenameFilter struct {
Name string `json:"name,omitempty"`
}
// CaddyModule returns the Caddy module information.
func (RenameFilter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.encoders.filter.rename",
New: func() caddy.Module { return new(RenameFilter) },
}
}
// UnmarshalCaddyfile sets up the module from Caddyfile tokens.
func (f *RenameFilter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume filter name
if d.NextArg() {
f.Name = d.Val()
}
return nil
}
// Filter renames the input field with the replacement name.
func (f *RenameFilter) Filter(in zapcore.Field) zapcore.Field {
in.Key = f.Name
return in
}
// Interface guards
var (
_ LogFieldFilter = (*DeleteFilter)(nil)
_ LogFieldFilter = (*HashFilter)(nil)
_ LogFieldFilter = (*ReplaceFilter)(nil)
_ LogFieldFilter = (*IPMaskFilter)(nil)
_ LogFieldFilter = (*QueryFilter)(nil)
_ LogFieldFilter = (*CookieFilter)(nil)
_ LogFieldFilter = (*RegexpFilter)(nil)
_ LogFieldFilter = (*RenameFilter)(nil)
_ caddyfile.Unmarshaler = (*DeleteFilter)(nil)
_ caddyfile.Unmarshaler = (*HashFilter)(nil)
_ caddyfile.Unmarshaler = (*ReplaceFilter)(nil)
_ caddyfile.Unmarshaler = (*IPMaskFilter)(nil)
_ caddyfile.Unmarshaler = (*QueryFilter)(nil)
_ caddyfile.Unmarshaler = (*CookieFilter)(nil)
_ caddyfile.Unmarshaler = (*RegexpFilter)(nil)
_ caddyfile.Unmarshaler = (*RenameFilter)(nil)
_ caddy.Provisioner = (*IPMaskFilter)(nil)
_ caddy.Provisioner = (*RegexpFilter)(nil)
_ caddy.Validator = (*QueryFilter)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"fmt"
"io"
"net"
"os"
"sync"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
)
func init() {
caddy.RegisterModule(NetWriter{})
}
// NetWriter implements a log writer that outputs to a network socket. If
// the socket goes down, it will dump logs to stderr while it attempts to
// reconnect.
type NetWriter struct {
// The address of the network socket to which to connect.
Address string `json:"address,omitempty"`
// The timeout to wait while connecting to the socket.
DialTimeout caddy.Duration `json:"dial_timeout,omitempty"`
// If enabled, allow connections errors when first opening the
// writer. The error and subsequent log entries will be reported
// to stderr instead until a connection can be re-established.
SoftStart bool `json:"soft_start,omitempty"`
addr caddy.NetworkAddress
}
// CaddyModule returns the Caddy module information.
func (NetWriter) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "caddy.logging.writers.net",
New: func() caddy.Module { return new(NetWriter) },
}
}
// Provision sets up the module.
func (nw *NetWriter) Provision(ctx caddy.Context) error {
repl := caddy.NewReplacer()
address, err := repl.ReplaceOrErr(nw.Address, true, true)
if err != nil {
return fmt.Errorf("invalid host in address: %v", err)
}
nw.addr, err = caddy.ParseNetworkAddress(address)
if err != nil {
return fmt.Errorf("parsing network address '%s': %v", address, err)
}
if nw.addr.PortRangeSize() != 1 {
return fmt.Errorf("multiple ports not supported")
}
if nw.DialTimeout < 0 {
return fmt.Errorf("timeout cannot be less than 0")
}
return nil
}
func (nw NetWriter) String() string {
return nw.addr.String()
}
// WriterKey returns a unique key representing this nw.
func (nw NetWriter) WriterKey() string {
return nw.addr.String()
}
// OpenWriter opens a new network connection.
func (nw NetWriter) OpenWriter() (io.WriteCloser, error) {
reconn := &redialerConn{
nw: nw,
timeout: time.Duration(nw.DialTimeout),
}
conn, err := reconn.dial()
if err != nil {
if !nw.SoftStart {
return nil, err
}
// don't block config load if remote is down or some other external problem;
// we can dump logs to stderr for now (see issue #5520)
fmt.Fprintf(os.Stderr, "[ERROR] net log writer failed to connect: %v (will retry connection and print errors here in the meantime)\n", err)
}
reconn.connMu.Lock()
reconn.Conn = conn
reconn.connMu.Unlock()
return reconn, nil
}
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
// net <address> {
// dial_timeout <duration>
// soft_start
// }
func (nw *NetWriter) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
d.Next() // consume writer name
if !d.NextArg() {
return d.ArgErr()
}
nw.Address = d.Val()
if d.NextArg() {
return d.ArgErr()
}
for d.NextBlock(0) {
switch d.Val() {
case "dial_timeout":
if !d.NextArg() {
return d.ArgErr()
}
timeout, err := caddy.ParseDuration(d.Val())
if err != nil {
return d.Errf("invalid duration: %s", d.Val())
}
if d.NextArg() {
return d.ArgErr()
}
nw.DialTimeout = caddy.Duration(timeout)
case "soft_start":
if d.NextArg() {
return d.ArgErr()
}
nw.SoftStart = true
}
}
return nil
}
// redialerConn wraps an underlying Conn so that if any
// writes fail, the connection is redialed and the write
// is retried.
type redialerConn struct {
net.Conn
connMu sync.RWMutex
nw NetWriter
timeout time.Duration
lastRedial time.Time
}
// Write wraps the underlying Conn.Write method, but if that fails,
// it will re-dial the connection anew and try writing again.
func (reconn *redialerConn) Write(b []byte) (n int, err error) {
reconn.connMu.RLock()
conn := reconn.Conn
reconn.connMu.RUnlock()
if conn != nil {
if n, err = conn.Write(b); err == nil {
return
}
}
// problem with the connection - lock it and try to fix it
reconn.connMu.Lock()
defer reconn.connMu.Unlock()
// if multiple concurrent writes failed on the same broken conn, then
// one of them might have already re-dialed by now; try writing again
if reconn.Conn != nil {
if n, err = reconn.Conn.Write(b); err == nil {
return
}
}
// there's still a problem, so try to re-attempt dialing the socket
// if some time has passed in which the issue could have potentially
// been resolved - we don't want to block at every single log
// emission (!) - see discussion in #4111
if time.Since(reconn.lastRedial) > 10*time.Second {
reconn.lastRedial = time.Now()
conn2, err2 := reconn.dial()
if err2 != nil {
// logger socket still offline; instead of discarding the log, dump it to stderr
os.Stderr.Write(b)
return
}
if n, err = conn2.Write(b); err == nil {
if reconn.Conn != nil {
reconn.Conn.Close()
}
reconn.Conn = conn2
}
} else {
// last redial attempt was too recent; just dump to stderr for now
os.Stderr.Write(b)
}
return
}
func (reconn *redialerConn) dial() (net.Conn, error) {
return net.DialTimeout(reconn.nw.addr.Network, reconn.nw.addr.JoinHostPort(0), reconn.timeout)
}
// Interface guards
var (
_ caddy.Provisioner = (*NetWriter)(nil)
_ caddy.WriterOpener = (*NetWriter)(nil)
_ caddyfile.Unmarshaler = (*NetWriter)(nil)
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"time"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
)
// nopEncoder is a zapcore.Encoder that does nothing.
type nopEncoder struct{}
// AddArray is part of the zapcore.ObjectEncoder interface.
// Array elements do not get filtered.
func (nopEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { return nil }
// AddObject is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { return nil }
// AddBinary is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddBinary(key string, value []byte) {}
// AddByteString is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddByteString(key string, value []byte) {}
// AddBool is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddBool(key string, value bool) {}
// AddComplex128 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddComplex128(key string, value complex128) {}
// AddComplex64 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddComplex64(key string, value complex64) {}
// AddDuration is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddDuration(key string, value time.Duration) {}
// AddFloat64 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddFloat64(key string, value float64) {}
// AddFloat32 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddFloat32(key string, value float32) {}
// AddInt is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddInt(key string, value int) {}
// AddInt64 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddInt64(key string, value int64) {}
// AddInt32 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddInt32(key string, value int32) {}
// AddInt16 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddInt16(key string, value int16) {}
// AddInt8 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddInt8(key string, value int8) {}
// AddString is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddString(key, value string) {}
// AddTime is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddTime(key string, value time.Time) {}
// AddUint is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddUint(key string, value uint) {}
// AddUint64 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddUint64(key string, value uint64) {}
// AddUint32 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddUint32(key string, value uint32) {}
// AddUint16 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddUint16(key string, value uint16) {}
// AddUint8 is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddUint8(key string, value uint8) {}
// AddUintptr is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddUintptr(key string, value uintptr) {}
// AddReflected is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) AddReflected(key string, value any) error { return nil }
// OpenNamespace is part of the zapcore.ObjectEncoder interface.
func (nopEncoder) OpenNamespace(key string) {}
// Clone is part of the zapcore.ObjectEncoder interface.
// We don't use it as of Oct 2019 (v2 beta 7), I'm not
// really sure what it'd be useful for in our case.
func (ne nopEncoder) Clone() zapcore.Encoder { return ne }
// EncodeEntry partially implements the zapcore.Encoder interface.
func (nopEncoder) EncodeEntry(ent zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
return bufferpool.Get(), nil
}
// Interface guard
var _ zapcore.Encoder = (*nopEncoder)(nil)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package notify provides facilities for notifying process managers
// of state changes, mainly for when running as a system service.
package notify
import (
"fmt"
"net"
"os"
"strings"
)
// The documentation about this IPC protocol is available here:
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
func sdNotify(payload string) error {
if socketPath == "" {
return nil
}
socketAddr := &net.UnixAddr{
Name: socketPath,
Net: "unixgram",
}
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
if err != nil {
return err
}
defer conn.Close()
_, err = conn.Write([]byte(payload))
return err
}
// Ready notifies systemd that caddy has finished its
// initialization routines.
func Ready() error {
return sdNotify("READY=1")
}
// Reloading notifies systemd that caddy is reloading its config.
func Reloading() error {
return sdNotify("RELOADING=1")
}
// Stopping notifies systemd that caddy is stopping.
func Stopping() error {
return sdNotify("STOPPING=1")
}
// Status sends systemd an updated status message.
func Status(msg string) error {
return sdNotify("STATUS=" + msg)
}
// Error is like Status, but sends systemd an error message
// instead, with an optional errno-style error number.
func Error(err error, errno int) error {
collapsedErr := strings.ReplaceAll(err.Error(), "\n", " ")
msg := fmt.Sprintf("STATUS=%s", collapsedErr)
if errno > 0 {
msg += fmt.Sprintf("\nERRNO=%d", errno)
}
return sdNotify(msg)
}
var socketPath, _ = os.LookupEnv("NOTIFY_SOCKET")
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"time"
"go.uber.org/zap"
)
// NewReplacer returns a new Replacer.
func NewReplacer() *Replacer {
rep := &Replacer{
static: make(map[string]any),
mapMutex: &sync.RWMutex{},
}
rep.providers = []replacementProvider{
globalDefaultReplacementProvider{},
fileReplacementProvider{},
ReplacerFunc(rep.fromStatic),
}
return rep
}
// NewEmptyReplacer returns a new Replacer,
// without the global default replacements.
func NewEmptyReplacer() *Replacer {
rep := &Replacer{
static: make(map[string]any),
mapMutex: &sync.RWMutex{},
}
rep.providers = []replacementProvider{
ReplacerFunc(rep.fromStatic),
}
return rep
}
// Replacer can replace values in strings.
// A default/empty Replacer is not valid;
// use NewReplacer to make one.
type Replacer struct {
providers []replacementProvider
static map[string]any
mapMutex *sync.RWMutex
}
// WithoutFile returns a copy of the current Replacer
// without support for the {file.*} placeholder, which
// may be unsafe in some contexts.
//
// EXPERIMENTAL: Subject to change or removal.
func (r *Replacer) WithoutFile() *Replacer {
rep := &Replacer{static: r.static}
for _, v := range r.providers {
if _, ok := v.(fileReplacementProvider); ok {
continue
}
rep.providers = append(rep.providers, v)
}
return rep
}
// Map adds mapFunc to the list of value providers.
// mapFunc will be executed only at replace-time.
func (r *Replacer) Map(mapFunc ReplacerFunc) {
r.providers = append(r.providers, mapFunc)
}
// Set sets a custom variable to a static value.
func (r *Replacer) Set(variable string, value any) {
r.mapMutex.Lock()
r.static[variable] = value
r.mapMutex.Unlock()
}
// Get gets a value from the replacer. It returns
// the value and whether the variable was known.
func (r *Replacer) Get(variable string) (any, bool) {
for _, mapFunc := range r.providers {
if val, ok := mapFunc.replace(variable); ok {
return val, true
}
}
return nil, false
}
// GetString is the same as Get, but coerces the value to a
// string representation as efficiently as possible.
func (r *Replacer) GetString(variable string) (string, bool) {
s, found := r.Get(variable)
return ToString(s), found
}
// Delete removes a variable with a static value
// that was created using Set.
func (r *Replacer) Delete(variable string) {
r.mapMutex.Lock()
delete(r.static, variable)
r.mapMutex.Unlock()
}
// fromStatic provides values from r.static.
func (r *Replacer) fromStatic(key string) (any, bool) {
r.mapMutex.RLock()
defer r.mapMutex.RUnlock()
val, ok := r.static[key]
return val, ok
}
// ReplaceOrErr is like ReplaceAll, but any placeholders
// that are empty or not recognized will cause an error to
// be returned.
func (r *Replacer) ReplaceOrErr(input string, errOnEmpty, errOnUnknown bool) (string, error) {
return r.replace(input, "", false, errOnEmpty, errOnUnknown, nil)
}
// ReplaceKnown is like ReplaceAll but only replaces
// placeholders that are known (recognized). Unrecognized
// placeholders will remain in the output.
func (r *Replacer) ReplaceKnown(input, empty string) string {
out, _ := r.replace(input, empty, false, false, false, nil)
return out
}
// ReplaceAll efficiently replaces placeholders in input with
// their values. All placeholders are replaced in the output
// whether they are recognized or not. Values that are empty
// string will be substituted with empty.
func (r *Replacer) ReplaceAll(input, empty string) string {
out, _ := r.replace(input, empty, true, false, false, nil)
return out
}
// ReplaceFunc is the same as ReplaceAll, but calls f for every
// replacement to be made, in case f wants to change or inspect
// the replacement.
func (r *Replacer) ReplaceFunc(input string, f ReplacementFunc) (string, error) {
return r.replace(input, "", true, false, false, f)
}
func (r *Replacer) replace(input, empty string,
treatUnknownAsEmpty, errOnEmpty, errOnUnknown bool,
f ReplacementFunc,
) (string, error) {
if !strings.Contains(input, string(phOpen)) && !strings.Contains(input, string(phClose)) {
return input, nil
}
var sb strings.Builder
// it is reasonable to assume that the output
// will be approximately as long as the input
sb.Grow(len(input))
// iterate the input to find each placeholder
var lastWriteCursor int
// fail fast if too many placeholders are unclosed
var unclosedCount int
scan:
for i := 0; i < len(input); i++ {
// check for escaped braces
if i > 0 && input[i-1] == phEscape && (input[i] == phClose || input[i] == phOpen) {
sb.WriteString(input[lastWriteCursor : i-1])
lastWriteCursor = i
continue
}
if input[i] != phOpen {
continue
}
// our iterator is now on an unescaped open brace (start of placeholder)
// too many unclosed placeholders in absolutely ridiculous input can be extremely slow (issue #4170)
if unclosedCount > 100 {
return "", fmt.Errorf("too many unclosed placeholders")
}
// find the end of the placeholder
end := strings.Index(input[i:], string(phClose)) + i
if end < i {
unclosedCount++
continue
}
// if necessary look for the first closing brace that is not escaped
for end > 0 && end < len(input)-1 && input[end-1] == phEscape {
nextEnd := strings.Index(input[end+1:], string(phClose))
if nextEnd < 0 {
unclosedCount++
continue scan
}
end += nextEnd + 1
}
// write the substring from the last cursor to this point
sb.WriteString(input[lastWriteCursor:i])
// trim opening bracket
key := input[i+1 : end]
// try to get a value for this key, handle empty values accordingly
val, found := r.Get(key)
if !found {
// placeholder is unknown (unrecognized); handle accordingly
if errOnUnknown {
return "", fmt.Errorf("unrecognized placeholder %s%s%s",
string(phOpen), key, string(phClose))
} else if !treatUnknownAsEmpty {
// if treatUnknownAsEmpty is true, we'll handle an empty
// val later; so only continue otherwise
lastWriteCursor = i
continue
}
}
// apply any transformations
if f != nil {
var err error
val, err = f(key, val)
if err != nil {
return "", err
}
}
// convert val to a string as efficiently as possible
valStr := ToString(val)
// write the value; if it's empty, either return
// an error or write a default value
if valStr == "" {
if errOnEmpty {
return "", fmt.Errorf("evaluated placeholder %s%s%s is empty",
string(phOpen), key, string(phClose))
} else if empty != "" {
sb.WriteString(empty)
}
} else {
sb.WriteString(valStr)
}
// advance cursor to end of placeholder
i = end
lastWriteCursor = i + 1
}
// flush any unwritten remainder
sb.WriteString(input[lastWriteCursor:])
return sb.String(), nil
}
// ToString returns val as a string, as efficiently as possible.
// EXPERIMENTAL: may be changed or removed later.
func ToString(val any) string {
switch v := val.(type) {
case nil:
return ""
case string:
return v
case fmt.Stringer:
return v.String()
case error:
return v.Error()
case byte:
return string(v)
case []byte:
return string(v)
case []rune:
return string(v)
case int:
return strconv.Itoa(v)
case int32:
return strconv.Itoa(int(v))
case int64:
return strconv.Itoa(int(v))
case uint:
return strconv.Itoa(int(v))
case uint32:
return strconv.Itoa(int(v))
case uint64:
return strconv.Itoa(int(v))
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(v, 'f', -1, 64)
case bool:
if v {
return "true"
}
return "false"
default:
return fmt.Sprintf("%+v", v)
}
}
// ReplacerFunc is a function that returns a replacement for the
// given key along with true if the function is able to service
// that key (even if the value is blank). If the function does
// not recognize the key, false should be returned.
type ReplacerFunc func(key string) (any, bool)
func (f ReplacerFunc) replace(key string) (any, bool) {
return f(key)
}
// replacementProvider is a type that can provide replacements
// for placeholders. Allows for type assertion to determine
// which type of provider it is.
type replacementProvider interface {
replace(key string) (any, bool)
}
// fileReplacementsProvider handles {file.*} replacements,
// reading a file from disk and replacing with its contents.
type fileReplacementProvider struct{}
func (f fileReplacementProvider) replace(key string) (any, bool) {
if !strings.HasPrefix(key, filePrefix) {
return nil, false
}
filename := key[len(filePrefix):]
maxSize := 1024 * 1024
body, err := readFileIntoBuffer(filename, maxSize)
if err != nil {
wd, _ := os.Getwd()
Log().Error("placeholder: failed to read file",
zap.String("file", filename),
zap.String("working_dir", wd),
zap.Error(err))
return nil, true
}
return string(body), true
}
// globalDefaultReplacementsProvider handles replacements
// that can be used in any context, such as system variables,
// time, or environment variables.
type globalDefaultReplacementProvider struct{}
func (f globalDefaultReplacementProvider) replace(key string) (any, bool) {
// check environment variable
const envPrefix = "env."
if strings.HasPrefix(key, envPrefix) {
return os.Getenv(key[len(envPrefix):]), true
}
switch key {
case "system.hostname":
// OK if there is an error; just return empty string
name, _ := os.Hostname()
return name, true
case "system.slash":
return string(filepath.Separator), true
case "system.os":
return runtime.GOOS, true
case "system.wd":
// OK if there is an error; just return empty string
wd, _ := os.Getwd()
return wd, true
case "system.arch":
return runtime.GOARCH, true
case "time.now":
return nowFunc(), true
case "time.now.http":
// According to the comment for http.TimeFormat, the timezone must be in UTC
// to generate the correct format.
// https://github.com/caddyserver/caddy/issues/5773
return nowFunc().UTC().Format(http.TimeFormat), true
case "time.now.common_log":
return nowFunc().Format("02/Jan/2006:15:04:05 -0700"), true
case "time.now.year":
return strconv.Itoa(nowFunc().Year()), true
case "time.now.unix":
return strconv.FormatInt(nowFunc().Unix(), 10), true
case "time.now.unix_ms":
return strconv.FormatInt(nowFunc().UnixNano()/int64(time.Millisecond), 10), true
}
return nil, false
}
// readFileIntoBuffer reads the file at filePath into a size limited buffer.
func readFileIntoBuffer(filename string, size int) ([]byte, error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
buffer := make([]byte, size)
n, err := file.Read(buffer)
if err != nil && err != io.EOF {
return nil, err
}
// slice the buffer to the actual size
return buffer[:n], nil
}
// ReplacementFunc is a function that is called when a
// replacement is being performed. It receives the
// variable (i.e. placeholder name) and the value that
// will be the replacement, and returns the value that
// will actually be the replacement, or an error. Note
// that errors are sometimes ignored by replacers.
type ReplacementFunc func(variable string, val any) (any, error)
// nowFunc is a variable so tests can change it
// in order to obtain a deterministic time.
var nowFunc = time.Now
// ReplacerCtxKey is the context key for a replacer.
const ReplacerCtxKey CtxKey = "replacer"
const phOpen, phClose, phEscape = '{', '}', '\\'
const filePrefix = "file."
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build gofuzz
package caddy
func FuzzReplacer(data []byte) (score int) {
NewReplacer().ReplaceAll(string(data), "")
NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), "")
NewReplacer().ReplaceAll(NewReplacer().ReplaceAll(string(data), ""), NewReplacer().ReplaceAll(string(data), ""))
NewReplacer().ReplaceAll(string(data[:len(data)/2]), string(data[len(data)/2:]))
return 0
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"context"
"os"
"os/signal"
"go.uber.org/zap"
)
// TrapSignals create signal/interrupt handlers as best it can for the
// current OS. This is a rather invasive function to call in a Go program
// that captures signals already, so in that case it would be better to
// implement these handlers yourself.
func TrapSignals() {
trapSignalsCrossPlatform()
trapSignalsPosix()
}
// trapSignalsCrossPlatform captures SIGINT or interrupt (depending
// on the OS), which initiates a graceful shutdown. A second SIGINT
// or interrupt will forcefully exit the process immediately.
func trapSignalsCrossPlatform() {
go func() {
shutdown := make(chan os.Signal, 1)
signal.Notify(shutdown, os.Interrupt)
for i := 0; true; i++ {
<-shutdown
if i > 0 {
Log().Warn("force quit", zap.String("signal", "SIGINT"))
os.Exit(ExitCodeForceQuit)
}
Log().Info("shutting down", zap.String("signal", "SIGINT"))
go exitProcessFromSignal("SIGINT")
}
}()
}
// exitProcessFromSignal exits the process from a system signal.
func exitProcessFromSignal(sigName string) {
logger := Log().With(zap.String("signal", sigName))
exitProcess(context.TODO(), logger)
}
// Exit codes. Generally, you should NOT
// automatically restart the process if the
// exit code is ExitCodeFailedStartup (1).
const (
ExitCodeSuccess = iota
ExitCodeFailedStartup
ExitCodeForceQuit
ExitCodeFailedQuit
)
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows && !plan9 && !nacl && !js
package caddy
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
)
// trapSignalsPosix captures POSIX-only signals.
func trapSignalsPosix() {
go func() {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2)
for sig := range sigchan {
switch sig {
case syscall.SIGQUIT:
Log().Info("quitting process immediately", zap.String("signal", "SIGQUIT"))
certmagic.CleanUpOwnLocks(context.TODO(), Log()) // try to clean up locks anyway, it's important
os.Exit(ExitCodeForceQuit)
case syscall.SIGTERM:
Log().Info("shutting down apps, then terminating", zap.String("signal", "SIGTERM"))
exitProcessFromSignal("SIGTERM")
case syscall.SIGUSR1:
Log().Info("not implemented", zap.String("signal", "SIGUSR1"))
case syscall.SIGUSR2:
Log().Info("not implemented", zap.String("signal", "SIGUSR2"))
case syscall.SIGHUP:
// ignore; this signal is sometimes sent outside of the user's control
Log().Info("not implemented", zap.String("signal", "SIGHUP"))
}
}
}()
}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"os"
"path/filepath"
"runtime"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
)
// StorageConverter is a type that can convert itself
// to a valid, usable certmagic.Storage value. (The
// value might be short-lived.) This interface allows
// us to adapt any CertMagic storage implementation
// into a consistent API for Caddy configuration.
type StorageConverter interface {
CertMagicStorage() (certmagic.Storage, error)
}
// HomeDir returns the best guess of the current user's home
// directory from environment variables. If unknown, "." (the
// current directory) is returned instead, except GOOS=android,
// which returns "/sdcard".
func HomeDir() string {
home := homeDirUnsafe()
if home == "" && runtime.GOOS == "android" {
home = "/sdcard"
}
if home == "" {
home = "."
}
return home
}
// homeDirUnsafe is a low-level function that returns
// the user's home directory from environment
// variables. Careful: if it cannot be determined, an
// empty string is returned. If not accounting for
// that case, use HomeDir() instead; otherwise you
// may end up using the root of the file system.
func homeDirUnsafe() string {
home := os.Getenv("HOME")
if home == "" && runtime.GOOS == "windows" {
drive := os.Getenv("HOMEDRIVE")
path := os.Getenv("HOMEPATH")
home = drive + path
if drive == "" || path == "" {
home = os.Getenv("USERPROFILE")
}
}
if home == "" && runtime.GOOS == "plan9" {
home = os.Getenv("home")
}
return home
}
// AppConfigDir returns the directory where to store user's config.
//
// If XDG_CONFIG_HOME is set, it returns: $XDG_CONFIG_HOME/caddy.
// Otherwise, os.UserConfigDir() is used; if successful, it appends
// "Caddy" (Windows & Mac) or "caddy" (every other OS) to the path.
// If it returns an error, the fallback path "./caddy" is returned.
//
// The config directory is not guaranteed to be different from
// AppDataDir().
//
// Unlike os.UserConfigDir(), this function prefers the
// XDG_CONFIG_HOME env var on all platforms, not just Unix.
//
// Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
func AppConfigDir() string {
if basedir := os.Getenv("XDG_CONFIG_HOME"); basedir != "" {
return filepath.Join(basedir, "caddy")
}
basedir, err := os.UserConfigDir()
if err != nil {
Log().Warn("unable to determine directory for user configuration; falling back to current directory", zap.Error(err))
return "./caddy"
}
subdir := "caddy"
switch runtime.GOOS {
case "windows", "darwin":
subdir = "Caddy"
}
return filepath.Join(basedir, subdir)
}
// AppDataDir returns a directory path that is suitable for storing
// application data on disk. It uses the environment for finding the
// best place to store data, and appends a "caddy" or "Caddy" (depending
// on OS and environment) subdirectory.
//
// For a base directory path:
// If XDG_DATA_HOME is set, it returns: $XDG_DATA_HOME/caddy; otherwise,
// on Windows it returns: %AppData%/Caddy,
// on Mac: $HOME/Library/Application Support/Caddy,
// on Plan9: $home/lib/caddy,
// on Android: $HOME/caddy,
// and on everything else: $HOME/.local/share/caddy.
//
// If a data directory cannot be determined, it returns "./caddy"
// (this is not ideal, and the environment should be fixed).
//
// The data directory is not guaranteed to be different from AppConfigDir().
//
// Ref: https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html
func AppDataDir() string {
if basedir := os.Getenv("XDG_DATA_HOME"); basedir != "" {
return filepath.Join(basedir, "caddy")
}
switch runtime.GOOS {
case "windows":
appData := os.Getenv("AppData")
if appData != "" {
return filepath.Join(appData, "Caddy")
}
case "darwin":
home := homeDirUnsafe()
if home != "" {
return filepath.Join(home, "Library", "Application Support", "Caddy")
}
case "plan9":
home := homeDirUnsafe()
if home != "" {
return filepath.Join(home, "lib", "caddy")
}
case "android":
home := homeDirUnsafe()
if home != "" {
return filepath.Join(home, "caddy")
}
default:
home := homeDirUnsafe()
if home != "" {
return filepath.Join(home, ".local", "share", "caddy")
}
}
return "./caddy"
}
// ConfigAutosavePath is the default path to which the last config will be persisted.
var ConfigAutosavePath = filepath.Join(AppConfigDir(), "autosave.json")
// DefaultStorage is Caddy's default storage module.
var DefaultStorage = &certmagic.FileStorage{Path: AppDataDir()}
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddy
import (
"fmt"
"sync"
"sync/atomic"
)
// UsagePool is a thread-safe map that pools values
// based on usage (reference counting). Values are
// only inserted if they do not already exist. There
// are two ways to add values to the pool:
//
// 1. LoadOrStore will increment usage and store the
// value immediately if it does not already exist.
// 2. LoadOrNew will atomically check for existence
// and construct the value immediately if it does
// not already exist, or increment the usage
// otherwise, then store that value in the pool.
// When the constructed value is finally deleted
// from the pool (when its usage reaches 0), it
// will be cleaned up by calling Destruct().
//
// The use of LoadOrNew allows values to be created
// and reused and finally cleaned up only once, even
// though they may have many references throughout
// their lifespan. This is helpful, for example, when
// sharing thread-safe io.Writers that you only want
// to open and close once.
//
// There is no way to overwrite existing keys in the
// pool without first deleting it as many times as it
// was stored. Deleting too many times will panic.
//
// The implementation does not use a sync.Pool because
// UsagePool needs additional atomicity to run the
// constructor functions when creating a new value when
// LoadOrNew is used. (We could probably use sync.Pool
// but we'd still have to layer our own additional locks
// on top.)
//
// An empty UsagePool is NOT safe to use; always call
// NewUsagePool() to make a new one.
type UsagePool struct {
sync.RWMutex
pool map[any]*usagePoolVal
}
// NewUsagePool returns a new usage pool that is ready to use.
func NewUsagePool() *UsagePool {
return &UsagePool{
pool: make(map[any]*usagePoolVal),
}
}
// LoadOrNew loads the value associated with key from the pool if it
// already exists. If the key doesn't exist, it will call construct
// to create a new value and then stores that in the pool. An error
// is only returned if the constructor returns an error. The loaded
// or constructed value is returned. The loaded return value is true
// if the value already existed and was loaded, or false if it was
// newly constructed.
func (up *UsagePool) LoadOrNew(key any, construct Constructor) (value any, loaded bool, err error) {
var upv *usagePoolVal
up.Lock()
upv, loaded = up.pool[key]
if loaded {
atomic.AddInt32(&upv.refs, 1)
up.Unlock()
upv.RLock()
value = upv.value
err = upv.err
upv.RUnlock()
} else {
upv = &usagePoolVal{refs: 1}
upv.Lock()
up.pool[key] = upv
up.Unlock()
value, err = construct()
if err == nil {
upv.value = value
} else {
upv.err = err
up.Lock()
// this *should* be safe, I think, because we have a
// write lock on upv, but we might also need to ensure
// that upv.err is nil before doing this, since we
// released the write lock on up during construct...
// but then again it's also after midnight...
delete(up.pool, key)
up.Unlock()
}
upv.Unlock()
}
return
}
// LoadOrStore loads the value associated with key from the pool if it
// already exists, or stores it if it does not exist. It returns the
// value that was either loaded or stored, and true if the value already
// existed and was loaded, false if the value didn't exist and was stored.
func (up *UsagePool) LoadOrStore(key, val any) (value any, loaded bool) {
var upv *usagePoolVal
up.Lock()
upv, loaded = up.pool[key]
if loaded {
atomic.AddInt32(&upv.refs, 1)
up.Unlock()
upv.Lock()
if upv.err == nil {
value = upv.value
} else {
upv.value = val
upv.err = nil
}
upv.Unlock()
} else {
upv = &usagePoolVal{refs: 1, value: val}
up.pool[key] = upv
up.Unlock()
value = val
}
return
}
// Range iterates the pool similarly to how sync.Map.Range() does:
// it calls f for every key in the pool, and if f returns false,
// iteration is stopped. Ranging does not affect usage counts.
//
// This method is somewhat naive and acquires a read lock on the
// entire pool during iteration, so do your best to make f() really
// fast, m'kay?
func (up *UsagePool) Range(f func(key, value any) bool) {
up.RLock()
defer up.RUnlock()
for key, upv := range up.pool {
upv.RLock()
if upv.err != nil {
upv.RUnlock()
continue
}
val := upv.value
upv.RUnlock()
if !f(key, val) {
break
}
}
}
// Delete decrements the usage count for key and removes the
// value from the underlying map if the usage is 0. It returns
// true if the usage count reached 0 and the value was deleted.
// It panics if the usage count drops below 0; always call
// Delete precisely as many times as LoadOrStore.
func (up *UsagePool) Delete(key any) (deleted bool, err error) {
up.Lock()
upv, ok := up.pool[key]
if !ok {
up.Unlock()
return false, nil
}
refs := atomic.AddInt32(&upv.refs, -1)
if refs == 0 {
delete(up.pool, key)
up.Unlock()
upv.RLock()
val := upv.value
upv.RUnlock()
if destructor, ok := val.(Destructor); ok {
err = destructor.Destruct()
}
deleted = true
} else {
up.Unlock()
if refs < 0 {
panic(fmt.Sprintf("deleted more than stored: %#v (usage: %d)",
upv.value, upv.refs))
}
}
return
}
// References returns the number of references (count of usages) to a
// key in the pool, and true if the key exists, or false otherwise.
func (up *UsagePool) References(key any) (int, bool) {
up.RLock()
upv, loaded := up.pool[key]
up.RUnlock()
if loaded {
// I wonder if it'd be safer to read this value during
// our lock on the UsagePool... guess we'll see...
refs := atomic.LoadInt32(&upv.refs)
return int(refs), true
}
return 0, false
}
// Constructor is a function that returns a new value
// that can destruct itself when it is no longer needed.
type Constructor func() (Destructor, error)
// Destructor is a value that can clean itself up when
// it is deallocated.
type Destructor interface {
Destruct() error
}
type usagePoolVal struct {
refs int32 // accessed atomically; must be 64-bit aligned for 32-bit systems
value any
err error
sync.RWMutex
}