package distribution
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"time"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
// ErrBlobExists returned when blob already exists
ErrBlobExists = errors.New("blob exists")
// ErrBlobDigestUnsupported when blob digest is an unsupported version.
ErrBlobDigestUnsupported = errors.New("unsupported blob digest")
// ErrBlobUnknown when blob is not found.
ErrBlobUnknown = errors.New("unknown blob")
// ErrBlobUploadUnknown returned when upload is not found.
ErrBlobUploadUnknown = errors.New("blob upload unknown")
// ErrBlobInvalidLength returned when the blob has an expected length on
// commit, meaning mismatched with the descriptor or an invalid value.
ErrBlobInvalidLength = errors.New("blob invalid length")
)
// ErrBlobInvalidDigest returned when digest check fails.
type ErrBlobInvalidDigest struct {
Digest digest.Digest
Reason error
}
func (err ErrBlobInvalidDigest) Error() string {
return fmt.Sprintf("invalid digest for referenced layer: %v, %v",
err.Digest, err.Reason)
}
// ErrBlobMounted returned when a blob is mounted from another repository
// instead of initiating an upload session.
type ErrBlobMounted struct {
From reference.Canonical
Descriptor v1.Descriptor
}
func (err ErrBlobMounted) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Descriptor)
}
// Descriptor describes targeted content. Used in conjunction with a blob
// store, a descriptor can be used to fetch, store and target any kind of
// blob. The struct also describes the wire protocol format. Fields should
// only be added but never changed.
//
// Descriptor is an alias for [v1.Descriptor].
type Descriptor = v1.Descriptor
// BlobStatter makes blob descriptors available by digest. The service may
// provide a descriptor of a different digest if the provided digest is not
// canonical.
type BlobStatter interface {
// Stat provides metadata about a blob identified by the digest. If the
// blob is unknown to the describer, ErrBlobUnknown will be returned.
Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error)
}
// BlobDeleter enables deleting blobs from storage.
type BlobDeleter interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// BlobEnumerator enables iterating over blobs from storage
type BlobEnumerator interface {
Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error
}
// BlobDescriptorService manages metadata about a blob by digest. Most
// implementations will not expose such an interface explicitly. Such mappings
// should be maintained by interacting with the BlobIngester. Hence, this is
// left off of BlobService and BlobStore.
type BlobDescriptorService interface {
BlobStatter
// SetDescriptor assigns the descriptor to the digest. The provided digest and
// the digest in the descriptor must map to identical content but they may
// differ on their algorithm. The descriptor must have the canonical
// digest of the content and the digest algorithm must match the
// annotators canonical algorithm.
//
// Such a facility can be used to map blobs between digest domains, with
// the restriction that the algorithm of the descriptor must match the
// canonical algorithm (ie sha256) of the annotator.
SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error
// Clear enables descriptors to be unlinked
Clear(ctx context.Context, dgst digest.Digest) error
}
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
type BlobDescriptorServiceFactory interface {
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
}
// BlobProvider describes operations for getting blob data.
type BlobProvider interface {
// Get returns the entire blob identified by digest along with the descriptor.
Get(ctx context.Context, dgst digest.Digest) ([]byte, error)
// Open provides an [io.ReadSeekCloser] to the blob identified by the provided
// descriptor. If the blob is not known to the service, an error is returned.
Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error)
}
// BlobServer can serve blobs via http.
type BlobServer interface {
// ServeBlob attempts to serve the blob, identified by dgst, via http. The
// service may decide to redirect the client elsewhere or serve the data
// directly.
//
// This handler only issues successful responses, such as 2xx or 3xx,
// meaning it serves data or issues a redirect. If the blob is not
// available, an error will be returned and the caller may still issue a
// response.
//
// The implementation may serve the same blob from a different digest
// domain. The appropriate headers will be set for the blob, unless they
// have already been set by the caller.
ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error
}
// BlobIngester ingests blob data.
type BlobIngester interface {
// Put inserts the content p into the blob service, returning a descriptor
// or an error.
Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error)
// Create allocates a new blob writer to add a blob to this service. The
// returned handle can be written to and later resumed using an opaque
// identifier. With this approach, one can Close and Resume a BlobWriter
// multiple times until the BlobWriter is committed or cancelled.
Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error)
// Resume attempts to resume a write to a blob, identified by an id.
Resume(ctx context.Context, id string) (BlobWriter, error)
}
// BlobCreateOption is a general extensible function argument for blob creation
// methods. A BlobIngester may choose to honor any or none of the given
// BlobCreateOptions, which can be specific to the implementation of the
// BlobIngester receiving them.
// TODO (brianbland): unify this with ManifestServiceOption in the future
type BlobCreateOption interface {
Apply(interface{}) error
}
// CreateOptions is a collection of blob creation modifiers relevant to general
// blob storage intended to be configured by the BlobCreateOption.Apply method.
type CreateOptions struct {
Mount struct {
ShouldMount bool
From reference.Canonical
// Stat allows to pass precalculated descriptor to link and return.
// Blob access check will be skipped if set.
Stat *v1.Descriptor
}
}
// BlobWriter provides a handle for inserting data into a blob store.
// Instances should be obtained from BlobWriteService.Writer and
// BlobWriteService.Resume. If supported by the store, a writer can be
// recovered with the id.
type BlobWriter interface {
io.WriteCloser
io.ReaderFrom
// Size returns the number of bytes written to this blob.
Size() int64
// ID returns the identifier for this writer. The ID can be used with the
// Blob service to later resume the write.
ID() string
// StartedAt returns the time this blob write was started.
StartedAt() time.Time
// Commit completes the blob writer process. The content is verified
// against the provided provisional descriptor, which may result in an
// error. Depending on the implementation, written data may be validated
// against the provisional descriptor fields. If MediaType is not present,
// the implementation may reject the commit or assign "application/octet-
// stream" to the blob. The returned descriptor may have a different
// digest depending on the blob store, referred to as the canonical
// descriptor.
Commit(ctx context.Context, provisional v1.Descriptor) (canonical v1.Descriptor, err error)
// Cancel ends the blob write without storing any data and frees any
// associated resources. Any data written thus far will be lost. Cancel
// implementations should allow multiple calls even after a commit that
// result in a no-op. This allows use of Cancel in a defer statement,
// increasing the assurance that it is correctly called.
Cancel(ctx context.Context) error
}
// BlobService combines the operations to access, read and write blobs. This
// can be used to describe remote blob services.
type BlobService interface {
BlobStatter
BlobProvider
BlobIngester
}
// BlobStore represent the entire suite of blob related operations. Such an
// implementation can access, read, write, delete and serve blobs.
type BlobStore interface {
BlobService
BlobServer
BlobDeleter
}
package configuration
import (
"errors"
"fmt"
"io"
"net/http"
"reflect"
"strings"
"time"
)
// Configuration is a versioned registry configuration, intended to be provided by a yaml file, and
// optionally modified by environment variables.
//
// Note that yaml field names should never include _ characters, since this is the separator used
// in environment variable names.
type Configuration struct {
// Version is the version which defines the format of the rest of the configuration
Version Version `yaml:"version"`
// Log supports setting various parameters related to the logging
// subsystem.
Log Log `yaml:"log"`
// Loglevel is the level at which registry operations are logged.
//
// Deprecated: Use Log.Level instead.
Loglevel Loglevel `yaml:"loglevel,omitempty"`
// Storage is the configuration for the registry's storage driver
Storage Storage `yaml:"storage"`
// Auth allows configuration of various authorization methods that may be
// used to gate requests.
Auth Auth `yaml:"auth,omitempty"`
// Middleware lists all middlewares to be used by the registry.
Middleware map[string][]Middleware `yaml:"middleware,omitempty"`
// HTTP contains configuration parameters for the registry's http
// interface.
HTTP HTTP `yaml:"http,omitempty"`
// Notifications specifies configuration about various endpoint to which
// registry events are dispatched.
Notifications Notifications `yaml:"notifications,omitempty"`
// Redis configures the redis pool available to the registry webapp.
Redis Redis `yaml:"redis,omitempty"`
// Health provides the configuration section for health checks.
// It allows defining various checks to monitor the health of different subsystems.
Health Health `yaml:"health,omitempty"`
// Catalog is composed of MaxEntries.
// Catalog endpoint (/v2/_catalog) configuration, it provides the configuration
// options to control the maximum number of entries returned by the catalog endpoint.
Catalog Catalog `yaml:"catalog,omitempty"`
// Proxy defines the configuration options for using the registry as a pull-through cache.
Proxy Proxy `yaml:"proxy,omitempty"`
// Validation configures validation options for the registry.
Validation Validation `yaml:"validation,omitempty"`
// Policy configures registry policy options.
Policy Policy `yaml:"policy,omitempty"`
}
// Policy defines configuration options for managing registry policies.
type Policy struct {
// Repository configures policies for repositories
Repository Repository `yaml:"repository,omitempty"`
}
// Repository defines configuration options related to repository policies in the registry.
type Repository struct {
// Classes is a list of repository classes that the registry allows content for.
// This value is matched against the media type in uploaded manifests.
// If this field is non-empty, the registry enforces that all uploaded
// content belongs to one of the specified classes.
Classes []string `yaml:"classes"`
}
// Catalog provides configuration options for the /v2/_catalog endpoint.
type Catalog struct {
// Max number of entries returned by the catalog endpoint. Requesting n entries
// to the catalog endpoint will return at most MaxEntries entries.
// An empty or a negative value will set a default of 1000 maximum entries by default.
MaxEntries int `yaml:"maxentries,omitempty"`
}
// Log represents the configuration for logging within the application.
type Log struct {
// AccessLog configures access logging.
AccessLog AccessLog `yaml:"accesslog,omitempty"`
// Level is the granularity at which registry operations are logged.
Level Loglevel `yaml:"level,omitempty"`
// Formatter overrides the default formatter with another. Options
// include "text", "json" and "logstash".
Formatter string `yaml:"formatter,omitempty"`
// Fields allows users to specify static string fields to include in
// the logger context.
Fields map[string]interface{} `yaml:"fields,omitempty"`
// Hooks allows users to configure the log hooks, to enabling the
// sequent handling behavior, when defined levels of log message emit.
Hooks []LogHook `yaml:"hooks,omitempty"`
// ReportCaller allows user to configure the log to report the caller
ReportCaller bool `yaml:"reportcaller,omitempty"`
}
// AccessLog configures options for access logging.
type AccessLog struct {
// Disabled disables access logging.
Disabled bool `yaml:"disabled,omitempty"`
}
// HTTP defines configuration options for the HTTP interface of the registry.
type HTTP struct {
// Addr specifies the bind address for the registry instance.
Addr string `yaml:"addr,omitempty"`
// Net specifies the net portion of the bind address. A default empty value means tcp.
Net string `yaml:"net,omitempty"`
// Host specifies an externally-reachable address for the registry, as a fully
// qualified URL.
Host string `yaml:"host,omitempty"`
// Prefix specifies a URL path prefix for the HTTP interface.
// This can be used to serve the registry under a specific path
// rather than at the root of the domain (e.g., "/registry").
Prefix string `yaml:"prefix,omitempty"`
// Secret specifies the secret key which HMAC tokens are created with.
Secret string `yaml:"secret,omitempty"`
// RelativeURLs specifies that relative URLs should be returned in
// Location headers
RelativeURLs bool `yaml:"relativeurls,omitempty"`
// Amount of time to wait for connection to drain before shutting down when registry
// receives a stop signal
DrainTimeout time.Duration `yaml:"draintimeout,omitempty"`
// TLS instructs the http server to listen with a TLS configuration.
// This only support simple tls configuration with a cert and key.
// Mostly, this is useful for testing situations or simple deployments
// that require tls. If more complex configurations are required, use
// a proxy or make a proposal to add support here.
TLS TLS `yaml:"tls,omitempty"`
// Headers is a set of headers to include in HTTP responses. A common
// use case for this would be security headers such as
// Strict-Transport-Security. The map keys are the header names, and
// the values are the associated header payloads.
Headers http.Header `yaml:"headers,omitempty"`
// Debug configures the http debug interface, if specified. This can
// include services such as pprof, expvar and other data that should
// not be exposed externally. Left disabled by default.
Debug Debug `yaml:"debug,omitempty"`
// HTTP2 configures options for HTTP/2 support.
HTTP2 HTTP2 `yaml:"http2,omitempty"`
// H2C configures support for HTTP/2 without requiring TLS (HTTP/2 Cleartext).
H2C H2C `yaml:"h2c,omitempty"`
}
// Debug defines the configuration options for the registry's debug interface.
// It allows administrators to enable or disable the debug server and configure
// telemetry and monitoring endpoints such as Prometheus.
type Debug struct {
// Addr specifies the bind address for the debug server.
Addr string `yaml:"addr,omitempty"`
// Prometheus configures the Prometheus telemetry endpoint for monitoring purposes.
Prometheus Prometheus `yaml:"prometheus,omitempty"`
}
// Prometheus configures the Prometheus telemetry endpoint for the registry.
// It allows administrators to enable Prometheus monitoring and customize
// the scrape path for metric collection.
type Prometheus struct {
// Enabled determines whether Prometheus telemetry is enabled or not.
Enabled bool `yaml:"enabled,omitempty"`
// Path specifies the URL path where the Prometheus metrics are exposed.
// The default is "/metrics", but it can be customized here.
Path string `yaml:"path,omitempty"`
}
// HTTP2 configures options.
type HTTP2 struct {
// Specifies whether the registry should disallow clients attempting
// to connect via HTTP/2. If set to true, only HTTP/1.1 is supported.
Disabled bool `yaml:"disabled,omitempty"`
}
// H2C configures support for HTTP/2 Cleartext.
type H2C struct {
// Enables H2C (HTTP/2 Cleartext). Enable to support HTTP/2 without needing to configure TLS
// Useful when deploying the registry behind a load balancer (e.g. Cloud Run)
Enabled bool `yaml:"enabled,omitempty"`
}
// TLS defines the configuration options for enabling and configuring TLS (Transport Layer Security)
// for secure communication between the registry and clients. It allows the registry to listen for
// HTTPS connections with a specified certificate, key, and optional client authentication settings.
type TLS struct {
// Certificate specifies the path to an x509 certificate file to
// be used for TLS.
Certificate string `yaml:"certificate,omitempty"`
// Key specifies the path to the x509 key file, which should
// contain the private portion for the file specified in
// Certificate.
Key string `yaml:"key,omitempty"`
// Specifies the CA certs for client authentication
// A file may contain multiple CA certificates encoded as PEM
ClientCAs []string `yaml:"clientcas,omitempty"`
// Client certificate authentication mode
// One of: request-client-cert, require-any-client-cert, verify-client-cert-if-given, require-and-verify-client-cert
ClientAuth ClientAuth `yaml:"clientauth,omitempty"`
// Specifies the lowest TLS version allowed
MinimumTLS string `yaml:"minimumtls,omitempty"`
// Specifies a list of cipher suites allowed
CipherSuites []string `yaml:"ciphersuites,omitempty"`
// LetsEncrypt is used to configuration setting up TLS through
// Let's Encrypt instead of manually specifying certificate and
// key. If a TLS certificate is specified, the Let's Encrypt
// section will not be used.
LetsEncrypt LetsEncrypt `yaml:"letsencrypt,omitempty"`
}
// LetsEncrypt configures automatic TLS certificate provisioning using Let's Encrypt.
type LetsEncrypt struct {
// CacheFile specifies cache file to use for lets encrypt
// certificates and keys.
CacheFile string `yaml:"cachefile,omitempty"`
// Email is the email to use during Let's Encrypt registration
Email string `yaml:"email,omitempty"`
// Hosts specifies the hosts which are allowed to obtain Let's
// Encrypt certificates.
Hosts []string `yaml:"hosts,omitempty"`
// DirectoryURL points to the CA directory endpoint.
// If empty, LetsEncrypt is used.
DirectoryURL string `yaml:"directoryurl,omitempty"`
}
// LogHook is composed of hook Level and Type.
// After hooks configuration, it can execute the next handling automatically,
// when defined levels of log message emitted.
// Example: hook can sending an email notification when error log happens in app.
type LogHook struct {
// Disable lets user select to enable hook or not.
Disabled bool `yaml:"disabled,omitempty"`
// Type allows user to select which type of hook handler they want.
Type string `yaml:"type,omitempty"`
// Levels set which levels of log message will let hook executed.
Levels []string `yaml:"levels,omitempty"`
// MailOptions allows user to configure email parameters.
MailOptions MailOptions `yaml:"options,omitempty"`
}
// MailOptions provides the configuration sections to user, for specific handler.
type MailOptions struct {
// SMTP defines the configuration options for the SMTP server used for sending email notifications.
SMTP SMTP `yaml:"smtp,omitempty"`
// From defines mail sending address
From string `yaml:"from,omitempty"`
// To defines mail receiving address
To []string `yaml:"to,omitempty"`
}
// SMTP represents the configuration for an SMTP (Simple Mail Transfer Protocol) server
// used for sending emails. It includes settings for the SMTP server's address, authentication,
// and other relevant configurations needed to connect and send emails.
type SMTP struct {
// Addr defines smtp host address
Addr string `yaml:"addr,omitempty"`
// Username defines user name to smtp host
Username string `yaml:"username,omitempty"`
// Password defines password of login user
Password string `yaml:"password,omitempty"`
// Insecure defines if smtp login skips the secure certification.
Insecure bool `yaml:"insecure,omitempty"`
}
// FileChecker is a type of entry in the health section for checking files.
type FileChecker struct {
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// File is the path to check
File string `yaml:"file,omitempty"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// HTTPChecker is a type of entry in the health section for checking HTTP URIs.
type HTTPChecker struct {
// Timeout is the duration to wait before timing out the HTTP request
Timeout time.Duration `yaml:"timeout,omitempty"`
// StatusCode is the expected status code
StatusCode int
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// URI is the HTTP URI to check
URI string `yaml:"uri,omitempty"`
// Headers lists static headers that should be added to all requests
Headers http.Header `yaml:"headers"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// TCPChecker is a type of entry in the health section for checking TCP servers.
type TCPChecker struct {
// Timeout is the duration to wait before timing out the TCP connection
Timeout time.Duration `yaml:"timeout,omitempty"`
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// Addr is the TCP address to check
Addr string `yaml:"addr,omitempty"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// Health provides the configuration section for health checks.
type Health struct {
// FileCheckers is a list of paths to check
FileCheckers []FileChecker `yaml:"file,omitempty"`
// HTTPCheckers is a list of URIs to check
HTTPCheckers []HTTPChecker `yaml:"http,omitempty"`
// TCPCheckers is a list of URIs to check
TCPCheckers []TCPChecker `yaml:"tcp,omitempty"`
// StorageDriver configures a health check on the configured storage
// driver
StorageDriver StorageDriver `yaml:"storagedriver,omitempty"`
}
// StorageDriver configures health checks specific to the storage driver.
type StorageDriver struct {
// Enabled turns on the health check for the storage driver
Enabled bool `yaml:"enabled,omitempty"`
// Interval is the duration in between checks
Interval time.Duration `yaml:"interval,omitempty"`
// Threshold is the number of times a check must fail to trigger an
// unhealthy state
Threshold int `yaml:"threshold,omitempty"`
}
// Platform specifies the characteristics of a computing environment
// and allows registry administrators to define required platforms for image validation.
// Administrators can select specific architectures and operating systems that must exist
// in the registry. This ensures that all image indexes uploaded to the registry are valid
// for the specified platforms.
type Platform struct {
// Architecture is the architecture for this platform
Architecture string `yaml:"architecture,omitempty"`
// OS is the operating system for this platform
OS string `yaml:"os,omitempty"`
}
// v0_1Configuration is a Version 0.1 Configuration struct
// This is currently aliased to Configuration, as it is the current version
type v0_1Configuration Configuration
// UnmarshalYAML implements the yaml.Unmarshaler interface
// Unmarshals a string of the form X.Y into a Version, validating that X and Y can represent unsigned integers
func (version *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
var versionString string
err := unmarshal(&versionString)
if err != nil {
return err
}
newVersion := Version(versionString)
if _, err := newVersion.major(); err != nil {
return err
}
if _, err := newVersion.minor(); err != nil {
return err
}
*version = newVersion
return nil
}
// CurrentVersion is the most recent Version that can be parsed
var CurrentVersion = MajorMinorVersion(0, 1)
// Loglevel is the level at which operations are logged
// This can be error, warn, info, or debug
type Loglevel string
// UnmarshalYAML implements the yaml.Umarshaler interface
// Unmarshals a string into a Loglevel, lowercasing the string and validating that it represents a
// valid loglevel
func (loglevel *Loglevel) UnmarshalYAML(unmarshal func(interface{}) error) error {
var loglevelString string
err := unmarshal(&loglevelString)
if err != nil {
return err
}
loglevelString = strings.ToLower(loglevelString)
switch loglevelString {
case "error", "warn", "info", "debug":
default:
return fmt.Errorf("invalid loglevel %s Must be one of [error, warn, info, debug]", loglevelString)
}
*loglevel = Loglevel(loglevelString)
return nil
}
// Parameters defines a key-value parameters mapping
type Parameters map[string]interface{}
// Storage defines the configuration for registry object storage
type Storage map[string]Parameters
// Type returns the storage driver type, such as filesystem or s3
func (storage Storage) Type() string {
var storageType []string
// Return only key in this map
for k := range storage {
switch k {
case "maintenance":
// allow configuration of maintenance
case "cache":
// allow configuration of caching
case "delete":
// allow configuration of delete
case "redirect":
// allow configuration of redirect
case "tag":
// allow configuration of tag
default:
storageType = append(storageType, k)
}
}
if len(storageType) > 1 {
panic("multiple storage drivers specified in configuration or environment: " + strings.Join(storageType, ", "))
}
if len(storageType) == 1 {
return storageType[0]
}
return ""
}
// TagParameters returns the Parameters map for a Storage tag configuration
func (storage Storage) TagParameters() Parameters {
return storage["tag"]
}
// setTagParameter changes the parameter at the provided key to the new value
func (storage Storage) setTagParameter(key string, value interface{}) {
if _, ok := storage["tag"]; !ok {
storage["tag"] = make(Parameters)
}
storage["tag"][key] = value
}
// Parameters returns the Parameters map for a Storage configuration
func (storage Storage) Parameters() Parameters {
return storage[storage.Type()]
}
// setParameter changes the parameter at the provided key to the new value
func (storage Storage) setParameter(key string, value interface{}) {
storage[storage.Type()][key] = value
}
// UnmarshalYAML implements the yaml.Unmarshaler interface
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
func (storage *Storage) UnmarshalYAML(unmarshal func(interface{}) error) error {
var storageMap map[string]Parameters
err := unmarshal(&storageMap)
if err == nil {
if len(storageMap) > 1 {
types := make([]string, 0, len(storageMap))
for k := range storageMap {
switch k {
case "maintenance":
// allow for configuration of maintenance
case "cache":
// allow configuration of caching
case "delete":
// allow configuration of delete
case "redirect":
// allow configuration of redirect
case "tag":
// allow configuration of tag
default:
types = append(types, k)
}
}
if len(types) > 1 {
return fmt.Errorf("must provide exactly one storage type. Provided: %v", types)
}
}
*storage = storageMap
return nil
}
var storageType string
err = unmarshal(&storageType)
if err == nil {
*storage = Storage{storageType: Parameters{}}
return nil
}
return err
}
// MarshalYAML implements the yaml.Marshaler interface
func (storage Storage) MarshalYAML() (interface{}, error) {
if storage.Parameters() == nil {
return storage.Type(), nil
}
return map[string]Parameters(storage), nil
}
// Auth defines the configuration for registry authorization.
type Auth map[string]Parameters
// Type returns the auth type, such as htpasswd or token
func (auth Auth) Type() string {
// Return only key in this map
for k := range auth {
return k
}
return ""
}
// Parameters returns the Parameters map for an Auth configuration
func (auth Auth) Parameters() Parameters {
return auth[auth.Type()]
}
// setParameter changes the parameter at the provided key to the new value
func (auth Auth) setParameter(key string, value interface{}) {
auth[auth.Type()][key] = value
}
// UnmarshalYAML implements the yaml.Unmarshaler interface
// Unmarshals a single item map into a Storage or a string into a Storage type with no parameters
func (auth *Auth) UnmarshalYAML(unmarshal func(interface{}) error) error {
var m map[string]Parameters
err := unmarshal(&m)
if err == nil {
if len(m) > 1 {
types := make([]string, 0, len(m))
for k := range m {
types = append(types, k)
}
// TODO(stevvooe): May want to change this slightly for
// authorization to allow multiple challenges.
return fmt.Errorf("must provide exactly one type. Provided: %v", types)
}
*auth = m
return nil
}
var authType string
err = unmarshal(&authType)
if err == nil {
*auth = Auth{authType: Parameters{}}
return nil
}
return err
}
// MarshalYAML implements the yaml.Marshaler interface
func (auth Auth) MarshalYAML() (interface{}, error) {
if auth.Parameters() == nil {
return auth.Type(), nil
}
return map[string]Parameters(auth), nil
}
// Notifications configures multiple http endpoints.
type Notifications struct {
// EventConfig is the configuration for the event format that is sent to each Endpoint.
EventConfig Events `yaml:"events,omitempty"`
// Endpoints is a list of http configurations for endpoints that
// respond to webhook notifications. In the future, we may allow other
// kinds of endpoints, such as external queues.
Endpoints []Endpoint `yaml:"endpoints,omitempty"`
}
// Endpoint describes the configuration of an http webhook notification
// endpoint.
type Endpoint struct {
Name string `yaml:"name"` // identifies the endpoint in the registry instance.
Disabled bool `yaml:"disabled"` // disables the endpoint
URL string `yaml:"url"` // post url for the endpoint.
Headers http.Header `yaml:"headers"` // static headers that should be added to all requests
Timeout time.Duration `yaml:"timeout"` // HTTP timeout
Threshold int `yaml:"threshold"` // circuit breaker threshold before backing off on failure
Backoff time.Duration `yaml:"backoff"` // backoff duration
IgnoredMediaTypes []string `yaml:"ignoredmediatypes"` // target media types to ignore
Ignore Ignore `yaml:"ignore"` // ignore event types
}
// Events configures notification events.
type Events struct {
IncludeReferences bool `yaml:"includereferences"` // include reference data in manifest events
}
// Ignore configures mediaTypes and actions of the event, that it won't be propagated
type Ignore struct {
MediaTypes []string `yaml:"mediatypes"` // target media types to ignore
Actions []string `yaml:"actions"` // ignore action types
}
// Middleware configures named middlewares to be applied at injection points.
type Middleware struct {
// Name the middleware registers itself as
Name string `yaml:"name"`
// Flag to disable middleware easily
Disabled bool `yaml:"disabled,omitempty"`
// Map of parameters that will be passed to the middleware's initialization function
Options Parameters `yaml:"options"`
}
// Proxy configures the registry as a pull through cache
type Proxy struct {
// RemoteURL is the URL of the remote registry
RemoteURL string `yaml:"remoteurl"`
// Username of the hub user
Username string `yaml:"username"`
// Password of the hub user
Password string `yaml:"password"`
// Exec specifies a custom exec-based command to retrieve credentials.
// If set, Username and Password are ignored.
Exec *ExecConfig `yaml:"exec,omitempty"`
// TTL is the expiry time of the content and will be cleaned up when it expires
// if not set, defaults to 7 * 24 hours
// If set to zero, will never expire cache
TTL *time.Duration `yaml:"ttl,omitempty"`
}
// ExecConfig defines the configuration for executing a command as a credential helper.
// This allows the registry to authenticate against an upstream registry by executing a
// specified command to obtain credentials. The command can be re-executed based on the
// configured lifetime, enabling the registry to run as a pull-through cache that manages
// its authentication dynamically.
type ExecConfig struct {
// Command is the command to execute.
Command string `yaml:"command"`
// Lifetime is the expiry period of the credentials. The credentials
// returned by the command is reused through the configured lifetime, then
// the command will be re-executed to retrieve new credentials.
// If set to zero, the command will be executed for every request.
// If not set, the command will only be executed once.
Lifetime *time.Duration `yaml:"lifetime,omitempty"`
}
// Validation configures validation options for the registry.
type Validation struct {
// Enabled enables the other options in this section. This field is
// deprecated in favor of Disabled.
Enabled bool `yaml:"enabled,omitempty"`
// Disabled disables the other options in this section.
Disabled bool `yaml:"disabled,omitempty"`
// Manifests configures manifest validation.
Manifests ValidationManifests `yaml:"manifests,omitempty"`
}
// ValidationManifests configures validation rules for manifests pushed to the registry.
type ValidationManifests struct {
// URLs configures validation for URLs in pushed manifests.
URLs URLs `yaml:"urls,omitempty"`
// ImageIndexes configures validation of image indexes
Indexes ValidationIndexes `yaml:"indexes,omitempty"`
}
// URLs defines validation rules for URLs found in the manifests pushed to the registry.
type URLs struct {
// Allow specifies regular expressions (https://godoc.org/regexp/syntax)
// that URLs in pushed manifests must match.
Allow []string `yaml:"allow,omitempty"`
// Deny specifies regular expressions (https://godoc.org/regexp/syntax)
// that URLs in pushed manifests must not match.
Deny []string `yaml:"deny,omitempty"`
}
// ValidationIndexes configures validation rules for image indexes within the manifest.
type ValidationIndexes struct {
// Platforms configures the validation applies to the platform images included in an image index
Platforms Platforms `yaml:"platforms"`
// PlatformList filters the set of platforms to validate for image existence.
PlatformList []Platform `yaml:"platformlist,omitempty"`
}
// Platforms configures the validation applies to the platform images included in an image index
// This can be all, none, or list
type Platforms string
// UnmarshalYAML implements the yaml.Umarshaler interface
// Unmarshals a string into a Platforms option, lowercasing the string and validating that it represents a
// valid option
func (platforms *Platforms) UnmarshalYAML(unmarshal func(interface{}) error) error {
var platformsString string
err := unmarshal(&platformsString)
if err != nil {
return err
}
platformsString = strings.ToLower(platformsString)
switch platformsString {
case "all", "none", "list":
default:
return fmt.Errorf("invalid platforms option %s Must be one of [all, none, list]", platformsString)
}
*platforms = Platforms(platformsString)
return nil
}
// Parse parses an input configuration yaml document into a Configuration struct
// This should generally be capable of handling old configuration format versions
//
// Environment variables may be used to override configuration parameters other than version,
// following the scheme below:
// Configuration.Abc may be replaced by the value of REGISTRY_ABC,
// Configuration.Abc.Xyz may be replaced by the value of REGISTRY_ABC_XYZ, and so forth
func Parse(rd io.Reader) (*Configuration, error) {
in, err := io.ReadAll(rd)
if err != nil {
return nil, err
}
p := NewParser("registry", []VersionedParseInfo{
{
Version: MajorMinorVersion(0, 1),
ParseAs: reflect.TypeOf(v0_1Configuration{}),
ConversionFunc: func(c interface{}) (interface{}, error) {
if v0_1, ok := c.(*v0_1Configuration); ok {
if v0_1.Log.Level == Loglevel("") {
if v0_1.Loglevel != Loglevel("") {
v0_1.Log.Level = v0_1.Loglevel
} else {
v0_1.Log.Level = Loglevel("info")
}
}
if v0_1.Loglevel != Loglevel("") {
v0_1.Loglevel = Loglevel("")
}
if v0_1.Catalog.MaxEntries <= 0 {
v0_1.Catalog.MaxEntries = 1000
}
if v0_1.Storage.Type() == "" {
return nil, errors.New("no storage configuration provided")
}
return (*Configuration)(v0_1), nil
}
return nil, fmt.Errorf("expected *v0_1Configuration, received %#v", c)
},
},
})
config := new(Configuration)
err = p.Parse(in, config)
if err != nil {
return nil, err
}
return config, nil
}
// RedisOptions represents the configuration options for Redis. This struct can be used
// to configure the connection to Redis in a universal (clustered or standalone) setup.
type RedisOptions struct {
// Addrs is either a single address or a seed list of host:port addresses
// of cluster/sentinel nodes.
Addrs []string `yaml:"addrs,omitempty"`
// ClientName will execute the `CLIENT SETNAME ClientName` command for each connection.
ClientName string `yaml:"clientname,omitempty"`
// DB is the database to be selected after connecting to the server.
// Only applicable to single-node and failover clients.
DB int `yaml:"db,omitempty"`
// Protocol specifies the Redis protocol version to use.
Protocol int `yaml:"protocol,omitempty"`
// Username for authentication (used with ACLs).
Username string `yaml:"username,omitempty"`
// Password for authentication.
Password string `yaml:"password,omitempty"`
// SentinelUsername is the username for Sentinel authentication.
SentinelUsername string `yaml:"sentinelusername,omitempty"`
// SentinelPassword is the password for Sentinel authentication.
SentinelPassword string `yaml:"sentinelpassword,omitempty"`
// MaxRetries is the maximum number of retries before giving up.
MaxRetries int `yaml:"maxretries,omitempty"`
// MinRetryBackoff is the minimum backoff between each retry.
MinRetryBackoff time.Duration `yaml:"minretrybackoff,omitempty"`
// MaxRetryBackoff is the maximum backoff between each retry.
MaxRetryBackoff time.Duration `yaml:"maxretrybackoff,omitempty"`
// DialTimeout is the timeout for establishing new connections.
DialTimeout time.Duration `yaml:"dialtimeout,omitempty"`
// ReadTimeout is the timeout for reading a single command reply.
ReadTimeout time.Duration `yaml:"readtimeout,omitempty"`
// WriteTimeout is the timeout for writing a single command.
WriteTimeout time.Duration `yaml:"writetimeout,omitempty"`
// ContextTimeoutEnabled enables wrapping operations with a context timeout.
ContextTimeoutEnabled bool `yaml:"contexttimeoutenabled,omitempty"`
// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default is LIFO).
PoolFIFO bool `yaml:"poolfifo,omitempty"`
// PoolSize is the maximum number of socket connections.
PoolSize int `yaml:"poolsize,omitempty"`
// PoolTimeout is the amount of time a client waits for a connection if all are busy.
PoolTimeout time.Duration `yaml:"pooltimeout,omitempty"`
// MinIdleConns is the minimum number of idle connections maintained in the pool.
MinIdleConns int `yaml:"minidleconns,omitempty"`
// MaxIdleConns is the maximum number of idle connections.
MaxIdleConns int `yaml:"maxidleconns,omitempty"`
// MaxActiveConns is the maximum number of active connections (cluster mode only).
MaxActiveConns int `yaml:"maxactiveconns,omitempty"`
// ConnMaxIdleTime is the maximum amount of time a connection can be idle.
ConnMaxIdleTime time.Duration `yaml:"connmaxidletime,omitempty"`
// ConnMaxLifetime is the maximum lifetime of a connection.
ConnMaxLifetime time.Duration `yaml:"connmaxlifetime,omitempty"`
// MaxRedirects is the maximum number of redirects to follow in cluster mode.
MaxRedirects int `yaml:"maxredirects,omitempty"`
// ReadOnly enables read-only mode for cluster clients.
ReadOnly bool `yaml:"readonly,omitempty"`
// RouteByLatency routes commands to the closest node based on latency.
RouteByLatency bool `yaml:"routebylatency,omitempty"`
// RouteRandomly routes commands randomly among eligible nodes.
RouteRandomly bool `yaml:"routerandomly,omitempty"`
// MasterName is the Sentinel master name.
// Only applicable for failover clients.
MasterName string `yaml:"mastername,omitempty"`
// DisableIdentity disables the CLIENT SETINFO command on connect.
DisableIdentity bool `yaml:"disableidentity,omitempty"`
// IdentitySuffix is an optional suffix for CLIENT SETINFO.
IdentitySuffix string `yaml:"identitysuffix,omitempty"`
// UnstableResp3 enables RESP3 features that are not finalized yet.
UnstableResp3 bool `yaml:"unstableresp3,omitempty"`
}
// RedisTLSOptions configures the TLS (Transport Layer Security) settings for
// Redis connections, allowing secure communication over the network.
type RedisTLSOptions struct {
// Certificate specifies the path to the certificate file for TLS authentication.
// This certificate is used to establish a secure connection with the Redis server.
Certificate string `yaml:"certificate,omitempty"`
// Key specifies the path to the private key file associated with the certificate.
// This key is used to authenticate the client during the TLS handshake.
Key string `yaml:"key,omitempty"`
// RootCAs specifies a list of root certificate authorities that clients use when
// verifying server certificates. If RootCAs is nil, TLS uses the host's root CA set.
RootCAs []string `yaml:"rootcas,omitempty"`
}
// Redis represents the configuration for connecting to a Redis server. It includes
// both the basic connection options and optional TLS settings to secure the connection.
type Redis struct {
// Options provides the configuration for connecting to Redis, including
// options for both clustered and standalone Redis setups. It is provided inline
// from the `redis.UniversalOptions` struct.
Options RedisOptions `yaml:",inline"`
// TLS contains the TLS settings for secure communication with the Redis server.
// If specified, these settings will enable encryption and authentication via TLS.
TLS RedisTLSOptions `yaml:"tls,omitempty"`
}
const (
ClientAuthRequestClientCert = "request-client-cert"
ClientAuthRequireAnyClientCert = "require-any-client-cert"
ClientAuthVerifyClientCertIfGiven = "verify-client-cert-if-given"
ClientAuthRequireAndVerifyClientCert = "require-and-verify-client-cert"
)
type ClientAuth string
// UnmarshalYAML implements the yaml.Umarshaler interface
// Unmarshals a string into a ClientAuth, validating that it represents a valid ClientAuth mod
func (clientAuth *ClientAuth) UnmarshalYAML(unmarshal func(interface{}) error) error {
var clientAuthString string
err := unmarshal(&clientAuthString)
if err != nil {
return err
}
switch clientAuthString {
case ClientAuthRequestClientCert:
case ClientAuthRequireAnyClientCert:
case ClientAuthVerifyClientCertIfGiven:
case ClientAuthRequireAndVerifyClientCert:
default:
return fmt.Errorf("invalid ClientAuth %s Must be one of: %s, %s, %s, %s", clientAuthString, ClientAuthRequestClientCert, ClientAuthRequireAnyClientCert, ClientAuthVerifyClientCertIfGiven, ClientAuthRequireAndVerifyClientCert)
}
*clientAuth = ClientAuth(clientAuthString)
return nil
}
//go:build gofuzz
// +build gofuzz
package configuration
import (
"bytes"
"os"
"testing"
)
// ParserFuzzer implements a fuzzer that targets Parser()
// Export before building
// nolint:deadcode
func parserFuzzer(f *testing.F) {
f.Fuzz(func(t *testing.T, rdData []byte, envKey, envValue string) {
os.Setenv(envKey, envValue)
defer os.Unsetenv(envKey)
rd := bytes.NewReader(rdData)
_, _ = Parse(rd)
})
}
package configuration
import (
"fmt"
"os"
"reflect"
"sort"
"strconv"
"strings"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
// Version is a major/minor version pair of the form Major.Minor
// Major version upgrades indicate structure or type changes
// Minor version upgrades should be strictly additive
type Version string
// MajorMinorVersion constructs a Version from its Major and Minor components
func MajorMinorVersion(major, minor uint) Version {
return Version(fmt.Sprintf("%d.%d", major, minor))
}
func (version Version) major() (uint, error) {
majorPart, _, _ := strings.Cut(string(version), ".")
major, err := strconv.ParseUint(majorPart, 10, 0)
return uint(major), err
}
// Major returns the major version portion of a Version
func (version Version) Major() uint {
major, _ := version.major()
return major
}
func (version Version) minor() (uint, error) {
_, minorPart, _ := strings.Cut(string(version), ".")
minor, err := strconv.ParseUint(minorPart, 10, 0)
return uint(minor), err
}
// Minor returns the minor version portion of a Version
func (version Version) Minor() uint {
minor, _ := version.minor()
return minor
}
// VersionedParseInfo defines how a specific version of a configuration should
// be parsed into the current version
type VersionedParseInfo struct {
// Version is the version which this parsing information relates to
Version Version
// ParseAs defines the type which a configuration file of this version
// should be parsed into
ParseAs reflect.Type
// ConversionFunc defines a method for converting the parsed configuration
// (of type ParseAs) into the current configuration version
// Note: this method signature is very unclear with the absence of generics
ConversionFunc func(interface{}) (interface{}, error)
}
type envVar struct {
name string
value string
}
type envVars []envVar
func (a envVars) Len() int { return len(a) }
func (a envVars) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a envVars) Less(i, j int) bool { return a[i].name < a[j].name }
// Parser can be used to parse a configuration file and environment of a defined
// version into a unified output structure
type Parser struct {
prefix string
mapping map[Version]VersionedParseInfo
env envVars
}
// NewParser returns a *Parser with the given environment prefix which handles
// versioned configurations which match the given parseInfos
func NewParser(prefix string, parseInfos []VersionedParseInfo) *Parser {
p := Parser{prefix: prefix, mapping: make(map[Version]VersionedParseInfo)}
for _, parseInfo := range parseInfos {
p.mapping[parseInfo.Version] = parseInfo
}
for _, env := range os.Environ() {
k, v, _ := strings.Cut(env, "=")
p.env = append(p.env, envVar{k, v})
}
// We must sort the environment variables lexically by name so that
// more specific variables are applied before less specific ones
// (i.e. REGISTRY_STORAGE before
// REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY). This sucks, but it's a
// lot simpler and easier to get right than unmarshalling map entries
// into temporaries and merging with the existing entry.
sort.Sort(p.env)
return &p
}
// Parse reads in the given []byte and environment and writes the resulting
// configuration into the input v
//
// Environment variables may be used to override configuration parameters other
// than version, following the scheme below:
// v.Abc may be replaced by the value of PREFIX_ABC,
// v.Abc.Xyz may be replaced by the value of PREFIX_ABC_XYZ, and so forth
func (p *Parser) Parse(in []byte, v interface{}) error {
var versionedStruct struct {
Version Version
}
if err := yaml.Unmarshal(in, &versionedStruct); err != nil {
return err
}
parseInfo, ok := p.mapping[versionedStruct.Version]
if !ok {
return fmt.Errorf("unsupported version: %q", versionedStruct.Version)
}
parseAs := reflect.New(parseInfo.ParseAs)
err := yaml.Unmarshal(in, parseAs.Interface())
if err != nil {
return err
}
for _, envVar := range p.env {
pathStr := envVar.name
if strings.HasPrefix(pathStr, strings.ToUpper(p.prefix)+"_") {
path := strings.Split(pathStr, "_")
err = p.overwriteFields(parseAs, pathStr, path[1:], envVar.value)
if err != nil {
return fmt.Errorf("parsing environment variable %s: %v", pathStr, err)
}
}
}
c, err := parseInfo.ConversionFunc(parseAs.Interface())
if err != nil {
return err
}
reflect.ValueOf(v).Elem().Set(reflect.Indirect(reflect.ValueOf(c)))
return nil
}
// overwriteFields replaces configuration values with alternate values specified
// through the environment. Precondition: an empty path slice must never be
// passed in.
func (p *Parser) overwriteFields(v reflect.Value, fullpath string, path []string, payload string) error {
for v.Kind() == reflect.Ptr {
if v.IsNil() {
panic("encountered nil pointer while handling environment variable " + fullpath)
}
v = reflect.Indirect(v)
}
switch v.Kind() {
case reflect.Struct:
return p.overwriteStruct(v, fullpath, path, payload)
case reflect.Map:
return p.overwriteMap(v, fullpath, path, payload)
case reflect.Slice:
idx, err := strconv.Atoi(path[0])
if err != nil {
panic("non-numeric index: " + path[0])
}
if idx > v.Len() {
panic("undefined index: " + path[0])
}
// if there is no element or the current slice length
// is the same as the indexed variable create a new element,
// append it and then set it to the passed in env var value.
if v.Len() == 0 || idx == v.Len() {
typ := v.Type().Elem()
elem := reflect.New(typ).Elem()
v.Set(reflect.Append(v, elem))
}
return p.overwriteFields(v.Index(idx), fullpath, path[1:], payload)
case reflect.Interface:
if v.NumMethod() == 0 {
if !v.IsNil() {
return p.overwriteFields(v.Elem(), fullpath, path, payload)
}
// Interface was empty; create an implicit map
var template map[string]interface{}
wrappedV := reflect.MakeMap(reflect.TypeOf(template))
v.Set(wrappedV)
return p.overwriteMap(wrappedV, fullpath, path, payload)
}
}
return nil
}
func (p *Parser) overwriteStruct(v reflect.Value, fullpath string, path []string, payload string) error {
// Generate case-insensitive map of struct fields
byUpperCase := make(map[string]int)
for i := 0; i < v.NumField(); i++ {
sf := v.Type().Field(i)
upper := strings.ToUpper(sf.Name)
if _, present := byUpperCase[upper]; present {
panic(fmt.Sprintf("field name collision in configuration object: %s", sf.Name))
}
byUpperCase[upper] = i
}
fieldIndex, present := byUpperCase[path[0]]
if !present {
logrus.Warnf("Ignoring unrecognized environment variable %s", fullpath)
return nil
}
field := v.Field(fieldIndex)
sf := v.Type().Field(fieldIndex)
if len(path) == 1 {
// Env var specifies this field directly
fieldVal := reflect.New(sf.Type)
err := yaml.Unmarshal([]byte(payload), fieldVal.Interface())
if err != nil {
return err
}
field.Set(reflect.Indirect(fieldVal))
return nil
}
// If the field is nil, must create an object
switch sf.Type.Kind() {
case reflect.Map:
if field.IsNil() {
field.Set(reflect.MakeMap(sf.Type))
}
case reflect.Ptr:
if field.IsNil() {
field.Set(reflect.New(field.Type().Elem()))
}
}
err := p.overwriteFields(field, fullpath, path[1:], payload)
if err != nil {
return err
}
return nil
}
func (p *Parser) overwriteMap(m reflect.Value, fullpath string, path []string, payload string) error {
if m.Type().Key().Kind() != reflect.String {
// non-string keys unsupported
logrus.Warnf("Ignoring environment variable %s involving map with non-string keys", fullpath)
return nil
}
if len(path) > 1 {
// If a matching key exists, get its value and continue the
// overwriting process.
for _, k := range m.MapKeys() {
if strings.ToUpper(k.String()) == path[0] {
mapValue := m.MapIndex(k)
// If the existing value is nil, we want to
// recreate it instead of using this value.
if (mapValue.Kind() == reflect.Ptr ||
mapValue.Kind() == reflect.Interface ||
mapValue.Kind() == reflect.Map) &&
mapValue.IsNil() {
break
}
return p.overwriteFields(mapValue, fullpath, path[1:], payload)
}
}
}
// (Re)create this key
var mapValue reflect.Value
if m.Type().Elem().Kind() == reflect.Map {
mapValue = reflect.MakeMap(m.Type().Elem())
} else {
mapValue = reflect.New(m.Type().Elem())
}
if len(path) > 1 {
err := p.overwriteFields(mapValue, fullpath, path[1:], payload)
if err != nil {
return err
}
} else {
err := yaml.Unmarshal([]byte(payload), mapValue.Interface())
if err != nil {
return err
}
}
m.SetMapIndex(reflect.ValueOf(strings.ToLower(path[0])), reflect.Indirect(mapValue))
return nil
}
package distribution
import (
"errors"
"fmt"
"strings"
"github.com/opencontainers/go-digest"
)
// ErrAccessDenied is returned when an access to a requested resource is
// denied.
var ErrAccessDenied = errors.New("access denied")
// ErrManifestNotModified is returned when a conditional manifest GetByTag
// returns nil due to the client indicating it has the latest version
var ErrManifestNotModified = errors.New("manifest not modified")
// ErrUnsupported is returned when an unimplemented or unsupported action is
// performed
var ErrUnsupported = errors.New("operation unsupported")
// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
// manifest but the registry is configured to reject it
var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
// ErrTagUnknown is returned if the given tag is not known by the tag service
type ErrTagUnknown struct {
Tag string
}
func (err ErrTagUnknown) Error() string {
return fmt.Sprintf("unknown tag=%s", err.Tag)
}
// ErrRepositoryUnknown is returned if the named repository is not known by
// the registry.
type ErrRepositoryUnknown struct {
Name string
}
func (err ErrRepositoryUnknown) Error() string {
return fmt.Sprintf("unknown repository name=%s", err.Name)
}
// ErrRepositoryNameInvalid should be used to denote an invalid repository
// name. Reason may set, indicating the cause of invalidity.
type ErrRepositoryNameInvalid struct {
Name string
Reason error
}
func (err ErrRepositoryNameInvalid) Error() string {
return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason)
}
// ErrManifestUnknown is returned if the manifest is not known by the
// registry.
type ErrManifestUnknown struct {
Name string
Tag string
}
func (err ErrManifestUnknown) Error() string {
return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag)
}
// ErrManifestUnknownRevision is returned when a manifest cannot be found by
// revision within a repository.
type ErrManifestUnknownRevision struct {
Name string
Revision digest.Digest
}
func (err ErrManifestUnknownRevision) Error() string {
return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision)
}
// ErrManifestUnverified is returned when the registry is unable to verify
// the manifest.
type ErrManifestUnverified struct{}
func (ErrManifestUnverified) Error() string {
return "unverified manifest"
}
// ErrManifestVerification provides a type to collect errors encountered
// during manifest verification. Currently, it accepts errors of all types,
// but it may be narrowed to those involving manifest verification.
type ErrManifestVerification []error
func (errs ErrManifestVerification) Error() string {
parts := make([]string, 0, len(errs))
for _, err := range errs {
parts = append(parts, err.Error())
}
return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ","))
}
// ErrManifestBlobUnknown returned when a referenced blob cannot be found.
type ErrManifestBlobUnknown struct {
Digest digest.Digest
}
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}
package checks
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"os"
"path/filepath"
"time"
"github.com/distribution/distribution/v3/health"
)
// FileChecker checks the existence of a file and returns an error
// if the file exists.
func FileChecker(f string) health.Checker {
return health.CheckFunc(func(context.Context) error {
absoluteFilePath, err := filepath.Abs(f)
if err != nil {
return fmt.Errorf("failed to get absolute path for %q: %v", f, err)
}
_, err = os.Stat(absoluteFilePath)
if err == nil {
return errors.New("file exists")
} else if os.IsNotExist(err) {
return nil
}
return err
})
}
// HTTPChecker does a HEAD request and verifies that the HTTP status code
// returned matches statusCode.
func HTTPChecker(r string, statusCode int, timeout time.Duration, headers http.Header) health.Checker {
return health.CheckFunc(func(ctx context.Context) error {
client := http.Client{
Timeout: timeout,
}
req, err := http.NewRequestWithContext(ctx, http.MethodHead, r, nil)
if err != nil {
return fmt.Errorf("%v: error creating request: %w", r, err)
}
for headerName, headerValues := range headers {
for _, headerValue := range headerValues {
req.Header.Add(headerName, headerValue)
}
}
response, err := client.Do(req)
if err != nil {
return fmt.Errorf("%v: error while checking: %w", r, err)
}
defer response.Body.Close()
if response.StatusCode != statusCode {
return fmt.Errorf("%v: downstream service returned unexpected status: %d", r, response.StatusCode)
}
return nil
})
}
// TCPChecker attempts to open a TCP connection.
func TCPChecker(addr string, timeout time.Duration) health.Checker {
return health.CheckFunc(func(ctx context.Context) error {
d := net.Dialer{Timeout: timeout}
conn, err := d.DialContext(ctx, "tcp", addr)
if err != nil {
return fmt.Errorf("%v: connection failed: %w", addr, err)
}
conn.Close()
return nil
})
}
package health
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/api/errcode"
)
// Registers global /debug/health api endpoint, creates default registry
func init() {
DefaultRegistry = NewRegistry()
http.HandleFunc("/debug/health", StatusHandler)
}
// A Registry is a collection of checks. Most applications will use the global
// registry defined in DefaultRegistry. However, unit tests may need to create
// separate registries to isolate themselves from other tests.
type Registry struct {
mu sync.RWMutex
registeredChecks map[string]Checker
}
// NewRegistry creates a new registry. This isn't necessary for normal use of
// the package, but may be useful for unit tests so individual tests have their
// own set of checks.
func NewRegistry() *Registry {
return &Registry{
registeredChecks: make(map[string]Checker),
}
}
// DefaultRegistry is the default registry where checks are registered. It is
// the registry used by the HTTP handler.
var DefaultRegistry *Registry
// Checker is the interface for a Health Checker
type Checker interface {
// Check returns nil if the service is okay.
Check(context.Context) error
}
// CheckFunc is a convenience type to create functions that implement
// the Checker interface
type CheckFunc func(context.Context) error
// Check Implements the Checker interface to allow for any func() error method
// to be passed as a Checker
func (cf CheckFunc) Check(ctx context.Context) error {
return cf(ctx)
}
// Updater implements a health check that is explicitly set.
type Updater interface {
Checker
// Update updates the current status of the health check.
Update(status error)
}
// updater implements Checker and Updater, providing an asynchronous Update
// method.
// This allows us to have a Checker that returns the Check() call immediately
// not blocking on a potentially expensive check.
type updater struct {
mu sync.Mutex
status error
}
// Check implements the Checker interface
func (u *updater) Check(context.Context) error {
u.mu.Lock()
defer u.mu.Unlock()
return u.status
}
// Update implements the Updater interface, allowing asynchronous access to
// the status of a Checker.
func (u *updater) Update(status error) {
u.mu.Lock()
defer u.mu.Unlock()
u.status = status
}
// NewStatusUpdater returns a new updater
func NewStatusUpdater() Updater {
return &updater{}
}
// thresholdUpdater implements Checker and Updater, providing an asynchronous Update
// method.
// This allows us to have a Checker that returns the Check() call immediately
// not blocking on a potentially expensive check.
type thresholdUpdater struct {
mu sync.Mutex
status error
threshold int
count int
}
// Check implements the Checker interface
func (tu *thresholdUpdater) Check(context.Context) error {
tu.mu.Lock()
defer tu.mu.Unlock()
if tu.count >= tu.threshold || errors.As(tu.status, new(pollingTerminatedErr)) {
return tu.status
}
return nil
}
// thresholdUpdater implements the Updater interface, allowing asynchronous
// access to the status of a Checker.
func (tu *thresholdUpdater) Update(status error) {
tu.mu.Lock()
defer tu.mu.Unlock()
if status == nil {
tu.count = 0
} else if tu.count < tu.threshold {
tu.count++
}
tu.status = status
}
// NewThresholdStatusUpdater returns a new thresholdUpdater
func NewThresholdStatusUpdater(t int) Updater {
if t > 0 {
return &thresholdUpdater{threshold: t}
}
return NewStatusUpdater()
}
type pollingTerminatedErr struct{ Err error }
func (e pollingTerminatedErr) Error() string {
return fmt.Sprintf("health: check is not polled: %v", e.Err)
}
func (e pollingTerminatedErr) Unwrap() error { return e.Err }
// Poll periodically polls the checker c at interval and updates the updater u
// with the result. The checker is called with ctx as the context. When ctx is
// done, Poll updates the updater with ctx.Err() and returns.
func Poll(ctx context.Context, u Updater, c Checker, interval time.Duration) {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-ctx.Done():
u.Update(pollingTerminatedErr{Err: ctx.Err()})
return
case <-t.C:
u.Update(c.Check(ctx))
}
}
}
// CheckStatus returns a map with all the current health check errors
func (registry *Registry) CheckStatus(ctx context.Context) map[string]string { // TODO(stevvooe) this needs a proper type
registry.mu.RLock()
defer registry.mu.RUnlock()
statusKeys := make(map[string]string)
for k, v := range registry.registeredChecks {
err := v.Check(ctx)
if err != nil {
statusKeys[k] = err.Error()
}
}
return statusKeys
}
// CheckStatus returns a map with all the current health check errors from the
// default registry.
func CheckStatus(ctx context.Context) map[string]string {
return DefaultRegistry.CheckStatus(ctx)
}
// Register associates the checker with the provided name.
func (registry *Registry) Register(name string, check Checker) {
if registry == nil {
registry = DefaultRegistry
}
registry.mu.Lock()
defer registry.mu.Unlock()
_, ok := registry.registeredChecks[name]
if ok {
panic("Check already exists: " + name)
}
registry.registeredChecks[name] = check
}
// Register associates the checker with the provided name in the default
// registry.
func Register(name string, check Checker) {
DefaultRegistry.Register(name, check)
}
// RegisterFunc allows the convenience of registering a checker directly from
// an arbitrary func(context.Context) error.
func (registry *Registry) RegisterFunc(name string, check CheckFunc) {
registry.Register(name, check)
}
// RegisterFunc allows the convenience of registering a checker in the default
// registry directly from an arbitrary func(context.Context) error.
func RegisterFunc(name string, check CheckFunc) {
DefaultRegistry.RegisterFunc(name, check)
}
// StatusHandler returns a JSON blob with all the currently registered Health Checks
// and their corresponding status.
// Returns 503 if any Error status exists, 200 otherwise
func StatusHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodGet {
checks := CheckStatus(r.Context())
status := http.StatusOK
// If there is an error, return 503
if len(checks) != 0 {
status = http.StatusServiceUnavailable
}
statusResponse(w, r, status, checks)
} else {
http.NotFound(w, r)
}
}
// Handler returns a handler that will return 503 response code if the health
// checks have failed. If everything is okay with the health checks, the
// handler will pass through to the provided handler. Use this handler to
// disable a web application when the health checks fail.
func Handler(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
checks := CheckStatus(r.Context())
if len(checks) != 0 {
// NOTE(milosgajdos): disable errcheck as the error is
// accessible via /debug/health
// nolint:errcheck
errcode.ServeJSON(w, errcode.ErrorCodeUnavailable.
WithDetail("health check failed: please see /debug/health"))
return
}
handler.ServeHTTP(w, r) // pass through
})
}
// statusResponse completes the request with a response describing the health
// of the service.
func statusResponse(w http.ResponseWriter, r *http.Request, status int, checks map[string]string) {
p, err := json.Marshal(checks)
if err != nil {
dcontext.GetLogger(r.Context()).Errorf("error serializing health status: %v", err)
p, err = json.Marshal(struct {
ServerError string `json:"server_error"`
}{
ServerError: "Could not parse error message",
})
status = http.StatusInternalServerError
if err != nil {
dcontext.GetLogger(r.Context()).Errorf("error serializing health status failure message: %v", err)
return
}
}
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
w.WriteHeader(status)
if _, err := w.Write(p); err != nil {
dcontext.GetLogger(r.Context()).Errorf("error writing health status response body: %v", err)
}
}
package auth
import (
"net/http"
"strings"
)
// APIVersion represents a version of an API including its
// type and version number.
type APIVersion struct {
// Type refers to the name of a specific API specification
// such as "registry"
Type string
// Version is the version of the API specification implemented,
// This may omit the revision number and only include
// the major and minor version, such as "2.0"
Version string
}
// String returns the string formatted API Version
func (v APIVersion) String() string {
return v.Type + "/" + v.Version
}
// APIVersions gets the API versions out of an HTTP response using the provided
// version header as the key for the HTTP header.
func APIVersions(resp *http.Response, versionHeader string) []APIVersion {
versions := []APIVersion{}
if versionHeader != "" {
for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] {
for _, version := range strings.Fields(supportedVersions) {
versions = append(versions, ParseAPIVersion(version))
}
}
}
return versions
}
// ParseAPIVersion parses an API version string into an APIVersion
// Format (Expected, not enforced):
// API version string = <API type> '/' <API version>
// API type = [a-z][a-z0-9]*
// API version = [0-9]+(\.[0-9]+)?
// TODO(dmcgowan): Enforce format, add error condition, remove unknown type
func ParseAPIVersion(versionStr string) APIVersion {
idx := strings.IndexRune(versionStr, '/')
if idx == -1 {
return APIVersion{
Type: "unknown",
Version: versionStr,
}
}
return APIVersion{
Type: strings.ToLower(versionStr[:idx]),
Version: versionStr[idx+1:],
}
}
package challenge
import (
"net/url"
"strings"
)
// FROM: https://golang.org/src/net/http/http.go
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
// return true if the string includes a port.
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
// FROM: http://golang.org/src/net/http/transport.go
var portMap = map[string]string{
"http": "80",
"https": "443",
}
// canonicalAddr returns url.Host but always with a ":port" suffix
// FROM: http://golang.org/src/net/http/transport.go
func canonicalAddr(url *url.URL) string {
addr := url.Host
if !hasPort(addr) {
return addr + ":" + portMap[url.Scheme]
}
return addr
}
package challenge
import (
"fmt"
"net/http"
"net/url"
"strings"
"sync"
)
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
// Challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type Challenge struct {
// Scheme is the auth-scheme according to RFC 2617
Scheme string
// Parameters are the auth-params according to RFC 2617
Parameters map[string]string
}
// Manager manages the challenges for endpoints.
// The challenges are pulled out of HTTP responses. Only
// responses which expect challenges should be added to
// the manager, since a non-unauthorized request will be
// viewed as not requiring challenges.
type Manager interface {
// GetChallenges returns the challenges for the given
// endpoint URL.
GetChallenges(endpoint url.URL) ([]Challenge, error)
// AddResponse adds the response to the challenge
// manager. The challenges will be parsed out of
// the WWW-Authenticate headers and added to the
// URL which was produced the response. If the
// response was authorized, any challenges for the
// endpoint will be cleared.
AddResponse(resp *http.Response) error
}
// NewSimpleManager returns an instance of
// Manager which only maps endpoints to challenges
// based on the responses which have been added the
// manager. The simple manager will make no attempt to
// perform requests on the endpoints or cache the responses
// to a backend.
func NewSimpleManager() Manager {
return &simpleManager{
Challenges: make(map[string][]Challenge),
}
}
type simpleManager struct {
sync.RWMutex
Challenges map[string][]Challenge
}
func normalizeURL(endpoint *url.URL) {
endpoint.Host = strings.ToLower(endpoint.Host)
endpoint.Host = canonicalAddr(endpoint)
}
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
normalizeURL(&endpoint)
m.RLock()
defer m.RUnlock()
challenges := m.Challenges[endpoint.String()]
return challenges, nil
}
func (m *simpleManager) AddResponse(resp *http.Response) error {
challenges := ResponseChallenges(resp)
if resp.Request == nil {
return fmt.Errorf("missing request reference")
}
urlCopy := url.URL{
Path: resp.Request.URL.Path,
Host: resp.Request.URL.Host,
Scheme: resp.Request.URL.Scheme,
}
normalizeURL(&urlCopy)
m.Lock()
defer m.Unlock()
m.Challenges[urlCopy.String()] = challenges
return nil
}
// ResponseChallenges returns a list of authorization challenges
// for the given http Response. Challenges are only checked if
// the response status code was a 401.
func ResponseChallenges(resp *http.Response) []Challenge {
if resp.StatusCode == http.StatusUnauthorized {
// Parse the WWW-Authenticate Header and store the challenges
// on this endpoint object.
return parseAuthHeader(resp.Header)
}
return nil
}
func parseAuthHeader(header http.Header) []Challenge {
challenges := []Challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
if v != "" {
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
}
}
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
s = "," + skipSpace(s)
for strings.HasPrefix(s, ",") {
var pkey string
pkey, s = expectToken(skipSpace(s[1:]))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
}
return
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}
package auth
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/distribution/distribution/v3/internal/client"
"github.com/distribution/distribution/v3/internal/client/auth/challenge"
"github.com/distribution/distribution/v3/internal/client/transport"
)
var (
// ErrNoBasicAuthCredentials is returned if a request can't be authorized with
// basic auth due to lack of credentials.
ErrNoBasicAuthCredentials = errors.New("no basic auth credentials")
// ErrNoToken is returned if a request is successful but the body does not
// contain an authorization token.
ErrNoToken = errors.New("authorization server did not include a token in the response")
)
const defaultClientID = "registry-client"
// AuthenticationHandler is an interface for authorizing a request from
// params from a "WWW-Authenticate" header for a single scheme.
type AuthenticationHandler interface {
// Scheme returns the scheme as expected from the "WWW-Authenticate" header.
Scheme() string
// AuthorizeRequest adds the authorization header to a request (if needed)
// using the parameters from "WWW-Authenticate" method. The parameters
// values depend on the scheme.
AuthorizeRequest(req *http.Request, params map[string]string) error
}
// CredentialStore is an interface for getting credentials for
// a given URL
type CredentialStore interface {
// Basic returns basic auth for the given URL
Basic(*url.URL) (string, string)
// RefreshToken returns a refresh token for the
// given URL and service
RefreshToken(*url.URL, string) string
// SetRefreshToken sets the refresh token if none
// is provided for the given url and service
SetRefreshToken(realm *url.URL, service, token string)
}
// NewAuthorizer creates an authorizer which can handle multiple authentication
// schemes. The handlers are tried in order, the higher priority authentication
// methods should be first. The challengeMap holds a list of challenges for
// a given root API endpoint (for example "https://registry-1.docker.io/v2/").
func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier {
return &endpointAuthorizer{
challenges: manager,
handlers: handlers,
}
}
type endpointAuthorizer struct {
challenges challenge.Manager
handlers []AuthenticationHandler
}
func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error {
pingPath := req.URL.Path
if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 {
pingPath = pingPath[:v2Root+4]
} else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 {
pingPath = pingPath[:v1Root] + "/v2/"
} else {
return nil
}
ping := url.URL{
Host: req.URL.Host,
Scheme: req.URL.Scheme,
Path: pingPath,
}
challenges, err := ea.challenges.GetChallenges(ping)
if err != nil {
return err
}
if len(challenges) > 0 {
for _, handler := range ea.handlers {
for _, c := range challenges {
if c.Scheme != handler.Scheme() {
continue
}
if err := handler.AuthorizeRequest(req, c.Parameters); err != nil {
return err
}
}
}
}
return nil
}
// This is the minimum duration a token can last (in seconds).
// A token must not live less than 60 seconds because older versions
// of the Docker client didn't read their expiration from the token
// response and assumed 60 seconds. So to remain compatible with
// those implementations, a token must live at least this long.
const minimumTokenLifetimeSeconds = 60
// Private interface for time used by this package to enable tests to provide their own implementation.
type clock interface {
Now() time.Time
}
type tokenHandler struct {
creds CredentialStore
transport http.RoundTripper
clock clock
offlineAccess bool
forceOAuth bool
clientID string
scopes []Scope
tokenLock sync.Mutex
tokenCache string
tokenExpiration time.Time
logger Logger
}
// Scope is a type which is serializable to a string
// using the allow scope grammar.
type Scope interface {
String() string
}
// RepositoryScope represents a token scope for access
// to a repository.
type RepositoryScope struct {
Repository string
Class string
Actions []string
}
// String returns the string representation of the repository
// using the scope grammar
func (rs RepositoryScope) String() string {
repoType := "repository"
// Keep existing format for image class to maintain backwards compatibility
// with authorization servers which do not support the expanded grammar.
if rs.Class != "" && rs.Class != "image" {
repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class)
}
return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ","))
}
// RegistryScope represents a token scope for access
// to resources in the registry.
type RegistryScope struct {
Name string
Actions []string
}
// String returns the string representation of the user
// using the scope grammar
func (rs RegistryScope) String() string {
return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ","))
}
// Logger defines the injectable logging interface, used on TokenHandlers.
type Logger interface {
Debugf(format string, args ...interface{})
}
func logDebugf(logger Logger, format string, args ...interface{}) {
if logger == nil {
return
}
logger.Debugf(format, args...)
}
// TokenHandlerOptions is used to configure a new token handler
type TokenHandlerOptions struct {
Transport http.RoundTripper
Credentials CredentialStore
OfflineAccess bool
ForceOAuth bool
ClientID string
Scopes []Scope
Logger Logger
}
// An implementation of clock for providing real time data.
type realClock struct{}
// Now implements clock
func (realClock) Now() time.Time { return time.Now() }
// NewTokenHandler creates a new AuthenicationHandler which supports
// fetching tokens from a remote token server.
func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler {
// Create options...
return NewTokenHandlerWithOptions(TokenHandlerOptions{
Transport: transport,
Credentials: creds,
Scopes: []Scope{
RepositoryScope{
Repository: scope,
Actions: actions,
},
},
})
}
// NewTokenHandlerWithOptions creates a new token handler using the provided
// options structure.
func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler {
handler := &tokenHandler{
transport: options.Transport,
creds: options.Credentials,
offlineAccess: options.OfflineAccess,
forceOAuth: options.ForceOAuth,
clientID: options.ClientID,
scopes: options.Scopes,
clock: realClock{},
logger: options.Logger,
}
return handler
}
func (th *tokenHandler) client() *http.Client {
return &http.Client{
Transport: th.transport,
Timeout: 15 * time.Second,
}
}
func (th *tokenHandler) Scheme() string {
return "bearer"
}
func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
var additionalScopes []string
if fromParam := req.URL.Query().Get("from"); fromParam != "" {
additionalScopes = append(additionalScopes, RepositoryScope{
Repository: fromParam,
Actions: []string{"pull"},
}.String())
}
token, err := th.getToken(req.Context(), params, additionalScopes...)
if err != nil {
return err
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return nil
}
func (th *tokenHandler) getToken(ctx context.Context, params map[string]string, additionalScopes ...string) (string, error) {
th.tokenLock.Lock()
defer th.tokenLock.Unlock()
scopes := make([]string, 0, len(th.scopes)+len(additionalScopes))
for _, scope := range th.scopes {
scopes = append(scopes, scope.String())
}
var addedScopes bool
for _, scope := range additionalScopes {
if hasScope(scopes, scope) {
continue
}
scopes = append(scopes, scope)
addedScopes = true
}
now := th.clock.Now()
if now.After(th.tokenExpiration) || addedScopes {
token, expiration, err := th.fetchToken(ctx, params, scopes)
if err != nil {
return "", err
}
// do not update cache for added scope tokens
if !addedScopes {
th.tokenCache = token
th.tokenExpiration = expiration
}
return token, nil
}
return th.tokenCache, nil
}
func hasScope(scopes []string, scope string) bool {
for _, s := range scopes {
if s == scope {
return true
}
}
return false
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (th *tokenHandler) fetchTokenWithOAuth(ctx context.Context, realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) {
form := url.Values{}
form.Set("scope", strings.Join(scopes, " "))
form.Set("service", service)
clientID := th.clientID
if clientID == "" {
// Use default client, this is a required field
clientID = defaultClientID
}
form.Set("client_id", clientID)
if refreshToken != "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", refreshToken)
} else if th.creds != nil {
form.Set("grant_type", "password")
username, password := th.creds.Basic(realm)
form.Set("username", username)
form.Set("password", password)
// attempt to get a refresh token
form.Set("access_type", "offline")
} else {
// refuse to do oauth without a grant type
return "", time.Time{}, fmt.Errorf("no supported grant type")
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, realm.String(), strings.NewReader(form.Encode()))
if err != nil {
return "", time.Time{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
resp, err := th.client().Do(req)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if err := client.HandleHTTPResponseError(resp); err != nil {
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.AccessToken == "" {
return "", time.Time{}, ErrNoToken
}
if tr.RefreshToken != "" && tr.RefreshToken != refreshToken {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
func (th *tokenHandler) fetchTokenWithBasicAuth(ctx context.Context, realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, realm.String(), nil)
if err != nil {
return "", time.Time{}, err
}
reqParams := req.URL.Query()
if service != "" {
reqParams.Add("service", service)
}
for _, scope := range scopes {
reqParams.Add("scope", scope)
}
if th.offlineAccess {
reqParams.Add("offline_token", "true")
clientID := th.clientID
if clientID == "" {
clientID = defaultClientID
}
reqParams.Add("client_id", clientID)
}
if th.creds != nil {
username, password := th.creds.Basic(realm)
if username != "" && password != "" {
reqParams.Add("account", username)
req.SetBasicAuth(username, password)
}
}
req.URL.RawQuery = reqParams.Encode()
resp, err := th.client().Do(req)
if err != nil {
return "", time.Time{}, err
}
defer resp.Body.Close()
if err := client.HandleHTTPResponseError(resp); err != nil {
return "", time.Time{}, err
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err)
}
if tr.RefreshToken != "" && th.creds != nil {
th.creds.SetRefreshToken(realm, service, tr.RefreshToken)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", time.Time{}, ErrNoToken
}
if tr.ExpiresIn < minimumTokenLifetimeSeconds {
// The default/minimum lifetime.
tr.ExpiresIn = minimumTokenLifetimeSeconds
logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn)
}
if tr.IssuedAt.IsZero() {
// issued_at is optional in the token response.
tr.IssuedAt = th.clock.Now().UTC()
}
return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil
}
func (th *tokenHandler) fetchToken(ctx context.Context, params map[string]string, scopes []string) (token string, expiration time.Time, err error) {
realm, ok := params["realm"]
if !ok {
return "", time.Time{}, errors.New("no realm specified for token auth challenge")
}
// TODO(dmcgowan): Handle empty scheme and relative realm
realmURL, err := url.Parse(realm)
if err != nil {
return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err)
}
service := params["service"]
var refreshToken string
if th.creds != nil {
refreshToken = th.creds.RefreshToken(realmURL, service)
}
if refreshToken != "" || th.forceOAuth {
return th.fetchTokenWithOAuth(ctx, realmURL, refreshToken, service, scopes)
}
return th.fetchTokenWithBasicAuth(ctx, realmURL, service, scopes)
}
type basicHandler struct {
creds CredentialStore
}
// NewBasicHandler creaters a new authentiation handler which adds
// basic authentication credentials to a request.
func NewBasicHandler(creds CredentialStore) AuthenticationHandler {
return &basicHandler{
creds: creds,
}
}
func (*basicHandler) Scheme() string {
return "basic"
}
func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error {
if bh.creds != nil {
username, password := bh.creds.Basic(req.URL)
if username != "" && password != "" {
req.SetBasicAuth(username, password)
return nil
}
}
return ErrNoBasicAuthCredentials
}
package client
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
"time"
"github.com/distribution/distribution/v3"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type httpBlobUpload struct {
ctx context.Context
statter distribution.BlobStatter
client *http.Client
uuid string
startedAt time.Time
location string // always the last value of the location header.
offset int64
closed bool
}
func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) {
panic("Not implemented")
}
func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUploadUnknown
}
return HandleHTTPResponseError(resp)
}
func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) {
req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPatch, hbu.location, io.NopCloser(r))
if err != nil {
return 0, err
}
defer req.Body.Close()
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := hbu.client.Do(req)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if err := hbu.handleErrorResponse(resp); err != nil {
return 0, err
}
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
if err != nil {
return 0, err
}
rng := resp.Header.Get("Range")
var start, end int64
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
return 0, err
} else if n != 2 || end < start {
return 0, fmt.Errorf("bad range format: %s", rng)
}
hbu.offset += end - start + 1
return (end - start + 1), nil
}
func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) {
req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPatch, hbu.location, bytes.NewReader(p))
if err != nil {
return 0, err
}
req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1)))
req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p)))
req.Header.Set("Content-Type", "application/octet-stream")
resp, err := hbu.client.Do(req)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if err := hbu.handleErrorResponse(resp); err != nil {
return 0, err
}
hbu.uuid = resp.Header.Get("Docker-Upload-UUID")
hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location)
if err != nil {
return 0, err
}
rng := resp.Header.Get("Range")
var start, end int
if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil {
return 0, err
} else if n != 2 || end < start {
return 0, fmt.Errorf("bad range format: %s", rng)
}
hbu.offset += int64(end - start + 1)
return (end - start + 1), nil
}
func (hbu *httpBlobUpload) Size() int64 {
return hbu.offset
}
func (hbu *httpBlobUpload) ID() string {
return hbu.uuid
}
func (hbu *httpBlobUpload) StartedAt() time.Time {
return hbu.startedAt
}
func (hbu *httpBlobUpload) Commit(ctx context.Context, desc v1.Descriptor) (v1.Descriptor, error) {
// TODO(dmcgowan): Check if already finished, if so just fetch
req, err := http.NewRequestWithContext(hbu.ctx, http.MethodPut, hbu.location, nil)
if err != nil {
return v1.Descriptor{}, err
}
values := req.URL.Query()
values.Set("digest", desc.Digest.String())
req.URL.RawQuery = values.Encode()
resp, err := hbu.client.Do(req)
if err != nil {
return v1.Descriptor{}, err
}
defer resp.Body.Close()
if err := hbu.handleErrorResponse(resp); err != nil {
return v1.Descriptor{}, err
}
return hbu.statter.Stat(ctx, desc.Digest)
}
func (hbu *httpBlobUpload) Cancel(ctx context.Context) error {
req, err := http.NewRequestWithContext(hbu.ctx, http.MethodDelete, hbu.location, nil)
if err != nil {
return err
}
resp, err := hbu.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil
}
return hbu.handleErrorResponse(resp)
}
func (hbu *httpBlobUpload) Close() error {
hbu.closed = true
return nil
}
package client
import (
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net/http"
"github.com/distribution/distribution/v3/internal/client/auth/challenge"
"github.com/distribution/distribution/v3/registry/api/errcode"
)
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
// errcode.Errors slice.
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
// returned when making a registry api call.
type UnexpectedHTTPStatusError struct {
Status string
}
func (e *UnexpectedHTTPStatusError) Error() string {
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
}
// UnexpectedHTTPResponseError is returned when an expected HTTP status code
// is returned, but the content was unexpected and failed to be parsed.
type UnexpectedHTTPResponseError struct {
ParseErr error
StatusCode int
Response []byte
}
func (e *UnexpectedHTTPResponseError) Error() string {
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
}
func parseHTTPErrorResponse(resp *http.Response) error {
var errors errcode.Errors
body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
statusCode := resp.StatusCode
// A HEAD request for example validly does not contain any body, while
// still returning a JSON content-type.
if len(body) == 0 {
return makeError(statusCode, "")
}
ctHeader := resp.Header.Get("Content-Type")
if ctHeader == "" {
return makeError(statusCode, string(body))
}
contentType, _, err := mime.ParseMediaType(ctHeader)
if err != nil {
return fmt.Errorf("failed parsing content-type: %w", err)
}
if contentType != "application/json" && contentType != "application/vnd.api+json" {
return makeError(statusCode, string(body))
}
// For backward compatibility, handle irregularly formatted
// messages that contain a "details" field.
var detailsErr struct {
Details string `json:"details"`
}
err = json.Unmarshal(body, &detailsErr)
if err == nil && detailsErr.Details != "" {
return makeError(statusCode, detailsErr.Details)
}
if err := json.Unmarshal(body, &errors); err != nil {
return &UnexpectedHTTPResponseError{
ParseErr: err,
StatusCode: statusCode,
Response: body,
}
}
if len(errors) == 0 {
// If there was no error specified in the body, return
// UnexpectedHTTPResponseError.
return &UnexpectedHTTPResponseError{
ParseErr: ErrNoErrorsInBody,
StatusCode: statusCode,
Response: body,
}
}
return errors
}
func makeError(statusCode int, details string) error {
switch statusCode {
case http.StatusUnauthorized:
return errcode.ErrorCodeUnauthorized.WithMessage(details)
case http.StatusForbidden:
return errcode.ErrorCodeDenied.WithMessage(details)
case http.StatusTooManyRequests:
return errcode.ErrorCodeTooManyRequests.WithMessage(details)
default:
return errcode.ErrorCodeUnknown.WithMessage(details)
}
}
func makeErrorList(err error) []error {
if errL, ok := err.(errcode.Errors); ok {
return []error(errL)
}
return []error{err}
}
func mergeErrors(err1, err2 error) error {
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
}
// HandleHTTPResponseError returns error parsed from HTTP response, if any.
// It returns nil if no error occurred (HTTP status 200-399), or an error
// for unsuccessful HTTP response codes (in the range 400 - 499 inclusive).
// If possible, it returns a typed error, but an UnexpectedHTTPStatusError
// is returned for response code outside the expected range (HTTP status < 200
// and > 500).
func HandleHTTPResponseError(resp *http.Response) error {
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
return nil
}
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
// Check for OAuth errors within the `WWW-Authenticate` header first
// See https://tools.ietf.org/html/rfc6750#section-3
for _, c := range challenge.ResponseChallenges(resp) {
if c.Scheme == "bearer" {
var err errcode.Error
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
switch c.Parameters["error"] {
case "invalid_token":
err.Code = errcode.ErrorCodeUnauthorized
case "insufficient_scope":
err.Code = errcode.ErrorCodeDenied
default:
continue
}
if description := c.Parameters["error_description"]; description != "" {
err.Message = description
} else {
err.Message = err.Code.Message()
}
return mergeErrors(err, parseHTTPErrorResponse(resp))
}
}
err := parseHTTPErrorResponse(resp)
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
}
return err
}
return &UnexpectedHTTPStatusError{Status: resp.Status}
}
// HandleErrorResponse returns error parsed from HTTP response for an
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
// UnexpectedHTTPStatusError returned for response code outside of expected
// range.
//
// Deprecated: use [HandleHTTPResponseError] and check the error.
func HandleErrorResponse(resp *http.Response) error {
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
return &UnexpectedHTTPStatusError{Status: resp.Status}
}
return HandleHTTPResponseError(resp)
}
// SuccessStatus returns true if the argument is a successful HTTP response
// code (in the range 200 - 399 inclusive).
//
// Deprecated: use [HandleHTTPResponseError] and check the error.
func SuccessStatus(status int) bool {
return status >= 200 && status <= 399
}
package client
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/client/transport"
v2 "github.com/distribution/distribution/v3/registry/api/v2"
"github.com/distribution/distribution/v3/registry/storage/cache"
"github.com/distribution/distribution/v3/registry/storage/cache/memory"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Registry provides an interface for calling Repositories, which returns a catalog of repositories.
type Registry interface {
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
}
// checkHTTPRedirect is a callback that can manipulate redirected HTTP
// requests. It is used to preserve Accept and Range headers.
func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
if len(via) > 0 {
for headerName, headerVals := range via[0].Header {
if headerName != "Accept" && headerName != "Range" {
continue
}
for _, val := range headerVals {
// Don't add to redirected request if redirected
// request already has a header with the same
// name and value.
hasValue := false
for _, existingVal := range req.Header[headerName] {
if existingVal == val {
hasValue = true
break
}
}
if !hasValue {
req.Header.Add(headerName, val)
}
}
}
}
return nil
}
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
}
client := &http.Client{
Transport: transport,
Timeout: 1 * time.Minute,
CheckRedirect: checkHTTPRedirect,
}
return ®istry{
client: client,
ub: ub,
}, nil
}
type registry struct {
client *http.Client
ub *v2.URLBuilder
}
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there
// are no more entries
func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) {
values := buildCatalogValues(len(entries), last)
u, err := r.ub.BuildCatalogURL(values)
if err != nil {
return 0, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
return 0, err
}
resp, err := r.client.Do(req)
if err != nil {
return 0, err
}
defer resp.Body.Close()
if err := HandleHTTPResponseError(resp); err != nil {
return 0, err
}
var ctlg struct {
Repositories []string `json:"repositories"`
}
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&ctlg); err != nil {
return 0, err
}
copy(entries, ctlg.Repositories)
numFilled := len(ctlg.Repositories)
if resp.Header.Get("Link") == "" {
return numFilled, io.EOF
}
return numFilled, nil
}
// NewRepository creates a new Repository for the given repository name and base URL.
func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
ub, err := v2.NewURLBuilderFromString(baseURL, false)
if err != nil {
return nil, err
}
return &repository{
client: &http.Client{
Transport: transport,
CheckRedirect: checkHTTPRedirect,
// TODO(dmcgowan): create cookie jar
},
ub: ub,
name: name,
}, nil
}
type repository struct {
client *http.Client
ub *v2.URLBuilder
name reference.Named
}
func (r *repository) Named() reference.Named {
return r.name
}
func (r *repository) Blobs(ctx context.Context) distribution.BlobStore {
return &blobs{
name: r.name,
ub: r.ub,
client: r.client,
statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(memory.UnlimitedSize), &blobStatter{
name: r.name,
ub: r.ub,
client: r.client,
}),
}
}
func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
// todo(richardscothern): options should be sent over the wire
return &manifests{
name: r.name,
ub: r.ub,
client: r.client,
etags: make(map[string]string),
}, nil
}
func (r *repository) Tags(ctx context.Context) distribution.TagService {
return &tags{
client: r.client,
ub: r.ub,
name: r.Named(),
}
}
// tags implements remote tagging operations.
type tags struct {
client *http.Client
ub *v2.URLBuilder
name reference.Named
}
// All returns all tags
func (t *tags) All(ctx context.Context) ([]string, error) {
listURLStr, err := t.ub.BuildTagsURL(t.name)
if err != nil {
return nil, err
}
listURL, err := url.Parse(listURLStr)
if err != nil {
return nil, err
}
var allTags []string
for {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, listURL.String(), nil)
if err != nil {
return nil, err
}
resp, err := t.client.Do(req)
if err != nil {
return allTags, err
}
defer resp.Body.Close()
if err := HandleHTTPResponseError(resp); err != nil {
return allTags, err
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return allTags, err
}
tagsResponse := struct {
Tags []string `json:"tags"`
}{}
if err := json.Unmarshal(b, &tagsResponse); err != nil {
return allTags, err
}
allTags = append(allTags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" {
firsLink, _, _ := strings.Cut(link, ";")
linkURL, err := url.Parse(strings.Trim(firsLink, "<>"))
if err != nil {
return allTags, err
}
listURL = listURL.ResolveReference(linkURL)
} else {
return allTags, nil
}
}
}
func descriptorFromResponse(response *http.Response) (v1.Descriptor, error) {
desc := v1.Descriptor{}
headers := response.Header
ctHeader := headers.Get("Content-Type")
if ctHeader == "" {
return v1.Descriptor{}, errors.New("missing or empty Content-Type header")
}
desc.MediaType = ctHeader
digestHeader := headers.Get("Docker-Content-Digest")
if digestHeader == "" {
data, err := io.ReadAll(response.Body)
if err != nil {
return v1.Descriptor{}, err
}
_, desc, err := distribution.UnmarshalManifest(ctHeader, data)
if err != nil {
return v1.Descriptor{}, err
}
return desc, nil
}
dgst, err := digest.Parse(digestHeader)
if err != nil {
return v1.Descriptor{}, err
}
desc.Digest = dgst
lengthHeader := headers.Get("Content-Length")
if lengthHeader == "" {
return v1.Descriptor{}, errors.New("missing or empty Content-Length header")
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return v1.Descriptor{}, err
}
desc.Size = length
return desc, nil
}
// Get issues a HEAD request for a Manifest against its named endpoint in order
// to construct a descriptor for the tag. If the registry doesn't support HEADing
// a manifest, fallback to GET.
func (t *tags) Get(ctx context.Context, tag string) (v1.Descriptor, error) {
ref, err := reference.WithTag(t.name, tag)
if err != nil {
return v1.Descriptor{}, err
}
u, err := t.ub.BuildManifestURL(ref)
if err != nil {
return v1.Descriptor{}, err
}
newRequest := func(method string) (*http.Response, error) {
req, err := http.NewRequestWithContext(ctx, method, u, nil)
if err != nil {
return nil, err
}
for _, t := range distribution.ManifestMediaTypes() {
req.Header.Add("Accept", t)
}
resp, err := t.client.Do(req)
return resp, err
}
resp, err := newRequest(http.MethodHead)
if err != nil {
return v1.Descriptor{}, err
}
defer resp.Body.Close()
switch {
case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
// if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
return descriptorFromResponse(resp)
default:
// if the response is an error - there will be no body to decode.
// Issue a GET request:
// - for data from a server that does not handle HEAD
// - to get error details in case of a failure
resp, err = newRequest(http.MethodGet)
if err != nil {
return v1.Descriptor{}, err
}
defer resp.Body.Close()
if resp.StatusCode >= 200 && resp.StatusCode < 400 {
return descriptorFromResponse(resp)
}
return v1.Descriptor{}, HandleHTTPResponseError(resp)
}
}
func (t *tags) Lookup(ctx context.Context, digest v1.Descriptor) ([]string, error) {
panic("not implemented")
}
func (t *tags) Tag(ctx context.Context, tag string, desc v1.Descriptor) error {
panic("not implemented")
}
func (t *tags) Untag(ctx context.Context, tag string) error {
ref, err := reference.WithTag(t.name, tag)
if err != nil {
return err
}
u, err := t.ub.BuildManifestURL(ref)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u, nil)
if err != nil {
return err
}
resp, err := t.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return HandleHTTPResponseError(resp)
}
type manifests struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
etags map[string]string
}
func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
ref, err := reference.WithDigest(ms.name, dgst)
if err != nil {
return false, err
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return false, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodHead, u, nil)
if err != nil {
return false, err
}
mediaTypes := distribution.ManifestMediaTypes()
for _, t := range mediaTypes {
req.Header.Add("Accept", t)
}
resp, err := ms.client.Do(req)
if err != nil {
return false, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
if err := HandleHTTPResponseError(resp); err != nil {
return false, err
}
return true, nil
}
// AddEtagToTag allows a client to supply an eTag to Get which will be
// used for a conditional HTTP request. If the eTag matches, a nil manifest
// and ErrManifestNotModified error will be returned. etag is automatically
// quoted when added to this map.
func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption {
return etagOption{tag, etag}
}
type etagOption struct{ tag, etag string }
func (o etagOption) Apply(ms distribution.ManifestService) error {
if ms, ok := ms.(*manifests); ok {
ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag)
return nil
}
return fmt.Errorf("etag options is a client-only option")
}
// ReturnContentDigest allows a client to set a the content digest on
// a successful request from the 'Docker-Content-Digest' header. This
// returned digest is represents the digest which the registry uses
// to refer to the content and can be used to delete the content.
func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption {
return contentDigestOption{dgst}
}
type contentDigestOption struct{ digest *digest.Digest }
func (o contentDigestOption) Apply(ms distribution.ManifestService) error {
return nil
}
func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
var (
digestOrTag string
ref reference.Named
err error
contentDgst *digest.Digest
mediaTypes []string
)
for _, option := range options {
switch opt := option.(type) {
case distribution.WithTagOption:
digestOrTag = opt.Tag
ref, err = reference.WithTag(ms.name, opt.Tag)
if err != nil {
return nil, err
}
case contentDigestOption:
contentDgst = opt.digest
case distribution.WithManifestMediaTypesOption:
mediaTypes = opt.MediaTypes
default:
err := option.Apply(ms)
if err != nil {
return nil, err
}
}
}
if digestOrTag == "" {
digestOrTag = dgst.String()
ref, err = reference.WithDigest(ms.name, dgst)
if err != nil {
return nil, err
}
}
if len(mediaTypes) == 0 {
mediaTypes = distribution.ManifestMediaTypes()
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
if err != nil {
return nil, err
}
for _, t := range mediaTypes {
req.Header.Add("Accept", t)
}
if _, ok := ms.etags[digestOrTag]; ok {
req.Header.Set("If-None-Match", ms.etags[digestOrTag])
}
resp, err := ms.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotModified {
return nil, distribution.ErrManifestNotModified
}
if err := HandleHTTPResponseError(resp); err != nil {
return nil, err
}
if contentDgst != nil {
dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
if err == nil {
*contentDgst = dgst
}
}
mt := resp.Header.Get("Content-Type")
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}
m, _, err := distribution.UnmarshalManifest(mt, body)
if err != nil {
return nil, err
}
return m, nil
}
// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the
// tag name in order to build the correct upload URL.
func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
ref := ms.name
var tagged bool
for _, option := range options {
if opt, ok := option.(distribution.WithTagOption); ok {
var err error
ref, err = reference.WithTag(ref, opt.Tag)
if err != nil {
return "", err
}
tagged = true
} else {
err := option.Apply(ms)
if err != nil {
return "", err
}
}
}
mediaType, p, err := m.Payload()
if err != nil {
return "", err
}
if !tagged {
// generate a canonical digest and Put by digest
_, d, err := distribution.UnmarshalManifest(mediaType, p)
if err != nil {
return "", err
}
ref, err = reference.WithDigest(ref, d.Digest)
if err != nil {
return "", err
}
}
manifestURL, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return "", err
}
putRequest, err := http.NewRequestWithContext(ctx, http.MethodPut, manifestURL, bytes.NewReader(p))
if err != nil {
return "", err
}
putRequest.Header.Set("Content-Type", mediaType)
resp, err := ms.client.Do(putRequest)
if err != nil {
return "", err
}
defer resp.Body.Close()
if err := HandleHTTPResponseError(resp); err != nil {
return "", err
}
dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
if err != nil {
return "", err
}
return dgst, nil
}
func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error {
ref, err := reference.WithDigest(ms.name, dgst)
if err != nil {
return err
}
u, err := ms.ub.BuildManifestURL(ref)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, u, nil)
if err != nil {
return err
}
resp, err := ms.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return HandleHTTPResponseError(resp)
}
// todo(richardscothern): Restore interface and implementation with merge of #1050
/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) {
panic("not supported")
}*/
type blobs struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
statter distribution.BlobDescriptorService
distribution.BlobDeleter
}
func sanitizeLocation(location, base string) (string, error) {
baseURL, err := url.Parse(base)
if err != nil {
return "", err
}
locationURL, err := url.Parse(location)
if err != nil {
return "", err
}
return baseURL.ResolveReference(locationURL).String(), nil
}
func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
return bs.statter.Stat(ctx, dgst)
}
func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
reader, err := bs.Open(ctx, dgst)
if err != nil {
return nil, err
}
defer reader.Close()
return io.ReadAll(reader)
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return nil, err
}
blobURL, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return nil, err
}
return transport.NewHTTPReadSeeker(ctx, bs.client, blobURL, func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return HandleHTTPResponseError(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return err
}
w.Header().Set("Content-Length", strconv.FormatInt(desc.Size, 10))
w.Header().Set("Content-Type", desc.MediaType)
w.Header().Set("Docker-Content-Digest", dgst.String())
w.Header().Set("Etag", dgst.String())
if r.Method == http.MethodHead {
return nil
}
blob, err := bs.Open(ctx, dgst)
if err != nil {
return err
}
defer blob.Close()
_, err = io.CopyN(w, blob, desc.Size)
return err
}
func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) {
writer, err := bs.Create(ctx)
if err != nil {
return v1.Descriptor{}, err
}
dgstr := digest.Canonical.Digester()
n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash()))
if err != nil {
return v1.Descriptor{}, err
}
if n < int64(len(p)) {
return v1.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p))
}
return writer.Commit(ctx, v1.Descriptor{
MediaType: mediaType,
Size: int64(len(p)),
Digest: dgstr.Digest(),
})
}
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*distribution.CreateOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
var opts distribution.CreateOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
var values []url.Values
if opts.Mount.ShouldMount {
values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}})
}
u, err := bs.ub.BuildBlobUploadURL(bs.name, values...)
if err != nil {
return nil, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, u, nil)
if err != nil {
return nil, err
}
resp, err := bs.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusCreated:
desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest())
if err != nil {
return nil, err
}
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
case http.StatusAccepted:
// TODO(dmcgowan): Check for invalid UUID
uuid := resp.Header.Get("Docker-Upload-UUID")
if uuid == "" {
// uuid is expected to be the last path element
_, uuid = path.Split(resp.Header.Get("Location"))
}
if uuid == "" {
return nil, errors.New("cannot retrieve docker upload UUID")
}
location, err := sanitizeLocation(resp.Header.Get("Location"), u)
if err != nil {
return nil, err
}
return &httpBlobUpload{
ctx: ctx,
statter: bs.statter,
client: bs.client,
uuid: uuid,
startedAt: time.Now(),
location: location,
}, nil
default:
return nil, HandleHTTPResponseError(resp)
}
}
func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
location, err := bs.ub.BuildBlobUploadChunkURL(bs.name, id)
if err != nil {
return nil, err
}
return &httpBlobUpload{
ctx: ctx,
statter: bs.statter,
client: bs.client,
uuid: id,
startedAt: time.Now(),
location: location,
}, nil
}
func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error {
return bs.statter.Clear(ctx, dgst)
}
type blobStatter struct {
name reference.Named
ub *v2.URLBuilder
client *http.Client
}
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return v1.Descriptor{}, err
}
u, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return v1.Descriptor{}, err
}
req, err := http.NewRequestWithContext(ctx, http.MethodHead, u, nil)
if err != nil {
return v1.Descriptor{}, err
}
resp, err := bs.client.Do(req)
if err != nil {
return v1.Descriptor{}, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
if err := HandleHTTPResponseError(resp); err != nil {
return v1.Descriptor{}, err
}
lengthHeader := resp.Header.Get("Content-Length")
if lengthHeader == "" {
return v1.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u)
}
length, err := strconv.ParseInt(lengthHeader, 10, 64)
if err != nil {
return v1.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err)
}
return v1.Descriptor{
MediaType: resp.Header.Get("Content-Type"),
Size: length,
Digest: dgst,
}, nil
}
func buildCatalogValues(maxEntries int, last string) url.Values {
values := url.Values{}
if maxEntries > 0 {
values.Add("n", strconv.Itoa(maxEntries))
}
if last != "" {
values.Add("last", last)
}
return values
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
ref, err := reference.WithDigest(bs.name, dgst)
if err != nil {
return err
}
blobURL, err := bs.ub.BuildBlobURL(ref)
if err != nil {
return err
}
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, blobURL, nil)
if err != nil {
return err
}
resp, err := bs.client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
return HandleHTTPResponseError(resp)
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
return nil
}
package transport
import (
"compress/flate"
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"regexp"
"strconv"
"strings"
"unicode"
"github.com/klauspost/compress/zstd"
)
var (
contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
// ErrWrongCodeForByteRange is returned if the client sends a request
// with a Range header but the server returns a 2xx or 3xx code other
// than 206 Partial Content.
ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request")
)
// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET
// request. When seeking and starting a read from a non-zero offset
// the a "Range" header will be added which sets the offset.
//
// TODO(dmcgowan): Move this into a separate utility package
func NewHTTPReadSeeker(ctx context.Context, client *http.Client, url string, errorHandler func(*http.Response) error) *HTTPReadSeeker {
return &HTTPReadSeeker{
ctx: ctx,
client: client,
url: url,
errorHandler: errorHandler,
}
}
// HTTPReadSeeker implements an [io.ReadSeekCloser].
type HTTPReadSeeker struct {
ctx context.Context
client *http.Client
url string
// errorHandler creates an error from an unsuccessful HTTP response.
// This allows the error to be created with the HTTP response body
// without leaking the body through a returned error.
errorHandler func(*http.Response) error
size int64
// rc is the remote read closer.
rc io.ReadCloser
// readerOffset tracks the offset as of the last read.
readerOffset int64
// seekOffset allows Seek to override the offset. Seek changes
// seekOffset instead of changing readOffset directly so that
// connection resets can be delayed and possibly avoided if the
// seek is undone (i.e. seeking to the end and then back to the
// beginning).
seekOffset int64
err error
}
func (hrs *HTTPReadSeeker) Read(p []byte) (n int, err error) {
if hrs.err != nil {
return 0, hrs.err
}
// If we sought to a different position, we need to reset the
// connection. This logic is here instead of Seek so that if
// a seek is undone before the next read, the connection doesn't
// need to be closed and reopened. A common example of this is
// seeking to the end to determine the length, and then seeking
// back to the original position.
if hrs.readerOffset != hrs.seekOffset {
hrs.reset()
}
hrs.readerOffset = hrs.seekOffset
rd, err := hrs.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
hrs.seekOffset += int64(n)
hrs.readerOffset += int64(n)
return n, err
}
func (hrs *HTTPReadSeeker) Seek(offset int64, whence int) (int64, error) {
if hrs.err != nil {
return 0, hrs.err
}
lastReaderOffset := hrs.readerOffset
if whence == io.SeekStart && hrs.rc == nil {
// If no request has been made yet, and we are seeking to an
// absolute position, set the read offset as well to avoid an
// unnecessary request.
hrs.readerOffset = offset
}
_, err := hrs.reader()
if err != nil {
hrs.readerOffset = lastReaderOffset
return 0, err
}
newOffset := hrs.seekOffset
switch whence {
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
if hrs.size < 0 {
return 0, errors.New("content length not known")
}
newOffset = hrs.size + offset
case io.SeekStart:
newOffset = offset
}
if newOffset < 0 {
err = errors.New("cannot seek to negative position")
} else {
hrs.seekOffset = newOffset
}
return hrs.seekOffset, err
}
func (hrs *HTTPReadSeeker) Close() error {
if hrs.err != nil {
return hrs.err
}
// close and release reader chain
if hrs.rc != nil {
hrs.rc.Close()
}
hrs.rc = nil
hrs.err = errors.New("httpLayer: closed")
return nil
}
func (hrs *HTTPReadSeeker) reset() {
if hrs.err != nil {
return
}
if hrs.rc != nil {
hrs.rc.Close()
hrs.rc = nil
}
}
func (hrs *HTTPReadSeeker) reader() (_ io.Reader, retErr error) {
if hrs.err != nil {
return nil, hrs.err
}
if hrs.rc != nil {
return hrs.rc, nil
}
req, err := http.NewRequestWithContext(hrs.ctx, http.MethodGet, hrs.url, nil)
if err != nil {
return nil, err
}
if hrs.readerOffset > 0 {
// If we are at different offset, issue a range request from there.
req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset))
// TODO: get context in here
// context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range"))
}
req.Header.Add("Accept-Encoding", "zstd, gzip, deflate")
resp, err := hrs.client.Do(req)
if err != nil {
return nil, err
}
defer func() {
if retErr != nil {
_ = resp.Body.Close()
}
}()
// Normally would use client.SuccessStatus, but that would be a cyclic
// import
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
if hrs.readerOffset > 0 {
if resp.StatusCode != http.StatusPartialContent {
return nil, ErrWrongCodeForByteRange
}
contentRange := resp.Header.Get("Content-Range")
if contentRange == "" {
return nil, errors.New("no Content-Range header found in HTTP 206 response")
}
submatches := contentRangeRegexp.FindStringSubmatch(contentRange)
if len(submatches) < 4 {
return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange)
}
startByte, err := strconv.ParseUint(submatches[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange)
}
if startByte != uint64(hrs.readerOffset) {
return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset)
}
endByte, err := strconv.ParseUint(submatches[2], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange)
}
if submatches[3] == "*" {
hrs.size = -1
} else {
size, err := strconv.ParseUint(submatches[3], 10, 64)
if err != nil {
return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange)
}
if endByte+1 != size {
return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange)
}
if size > math.MaxInt64 {
return nil, fmt.Errorf("Content-Range size: %d exceeds max allowed size", size)
}
hrs.size = int64(size)
}
} else if resp.StatusCode == http.StatusOK {
hrs.size = resp.ContentLength
} else {
hrs.size = -1
}
body := resp.Body
encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool {
return unicode.IsSpace(r) || r == ','
})
for i := len(encoding) - 1; i >= 0; i-- {
algorithm := strings.ToLower(encoding[i])
switch algorithm {
case "zstd":
r, err := zstd.NewReader(body)
if err != nil {
return nil, err
}
body = r.IOReadCloser()
case "gzip":
body, err = gzip.NewReader(body)
if err != nil {
return nil, err
}
case "deflate":
body = flate.NewReader(body)
case "":
// no content-encoding applied, use raw body
default:
return nil, errors.New("unsupported Content-Encoding algorithm: " + algorithm)
}
}
hrs.rc = body
} else {
if hrs.errorHandler != nil {
// Closing the body should be handled by the existing defer,
// but in case a custom "errHandler" is used that doesn't return
// an error, we close the body regardless.
defer resp.Body.Close()
return nil, hrs.errorHandler(resp)
}
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
}
return hrs.rc, nil
}
package transport
import (
"io"
"net/http"
"sync"
)
func identityTransportWrapper(rt http.RoundTripper) http.RoundTripper {
return rt
}
// DefaultTransportWrapper allows a user to wrap every generated transport
var DefaultTransportWrapper = identityTransportWrapper
// RequestModifier represents an object which will do an inplace
// modification of an HTTP request.
type RequestModifier interface {
ModifyRequest(*http.Request) error
}
type headerModifier http.Header
// NewHeaderRequestModifier returns a new RequestModifier which will
// add the given headers to a request.
func NewHeaderRequestModifier(header http.Header) RequestModifier {
return headerModifier(header)
}
func (h headerModifier) ModifyRequest(req *http.Request) error {
for k, s := range http.Header(h) {
req.Header[k] = append(req.Header[k], s...)
}
return nil
}
// NewTransport creates a new transport which will apply modifiers to
// the request on a RoundTrip call.
func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper {
return DefaultTransportWrapper(
&transport{
Modifiers: modifiers,
Base: base,
})
}
// transport is an http.RoundTripper that makes HTTP requests after
// copying and modifying the request
type transport struct {
Modifiers []RequestModifier
Base http.RoundTripper
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// RoundTrip authorizes and authenticates the request with an
// access token. If no token exists or token is expired,
// tries to refresh/fetch a new token.
func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
req2 := cloneRequest(req)
for _, modifier := range t.Modifiers {
if err := modifier.ModifyRequest(req2); err != nil {
return nil, err
}
}
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
if err != nil {
t.setModReq(req, nil)
return nil, err
}
res.Body = &onEOFReader{
rc: res.Body,
fn: func() { t.setModReq(req, nil) },
}
return res, nil
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
t.mu.Lock()
modReq := t.modReq[req]
delete(t.modReq, req)
t.mu.Unlock()
cr.CancelRequest(modReq)
}
}
func (t *transport) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
func (t *transport) setModReq(orig, mod *http.Request) {
t.mu.Lock()
defer t.mu.Unlock()
if t.modReq == nil {
t.modReq = make(map[*http.Request]*http.Request)
}
if mod == nil {
delete(t.modReq, orig)
} else {
t.modReq[orig] = mod
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
type onEOFReader struct {
rc io.ReadCloser
fn func()
}
func (r *onEOFReader) Read(p []byte) (n int, err error) {
n, err = r.rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
func (r *onEOFReader) Close() error {
err := r.rc.Close()
r.runFunc()
return err
}
func (r *onEOFReader) runFunc() {
if fn := r.fn; fn != nil {
fn()
r.fn = nil
}
}
package dcontext
import (
"context"
"maps"
"sync"
"github.com/distribution/distribution/v3/internal/uuid"
)
// instanceContext is a context that provides only an instance id. It is
// provided as the main background context.
type instanceContext struct {
context.Context
id string // id of context, logged as "instance.id"
once sync.Once // once protect generation of the id
}
func (ic *instanceContext) Value(key interface{}) interface{} {
if key == "instance.id" {
ic.once.Do(func() {
// We want to lazy initialize the UUID such that we don't
// call a random generator from the package initialization
// code. For various reasons random could not be available
// https://github.com/distribution/distribution/issues/782
ic.id = uuid.NewString()
})
return ic.id
}
return ic.Context.Value(key)
}
var background = &instanceContext{
Context: context.Background(),
}
// Background returns a non-nil, empty Context. The background context
// provides a single key, "instance.id" that is globally unique to the
// process.
func Background() context.Context {
return background
}
// stringMapContext is a simple context implementation that checks a map for a
// key, falling back to a parent if not present.
type stringMapContext struct {
context.Context
m map[string]interface{}
}
// WithValues returns a context that proxies lookups through a map. Only
// supports string keys.
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
mo := make(map[string]interface{}, len(m)) // make our own copy.
maps.Copy(mo, m)
return stringMapContext{
Context: ctx,
m: mo,
}
}
func (smc stringMapContext) Value(key interface{}) interface{} {
if ks, ok := key.(string); ok {
if v, ok := smc.m[ks]; ok {
return v
}
}
return smc.Context.Value(key)
}
package dcontext
import (
"context"
"errors"
"net/http"
"strings"
"sync"
"time"
"github.com/distribution/distribution/v3/internal/requestutil"
"github.com/distribution/distribution/v3/internal/uuid"
"github.com/gorilla/mux"
)
// Common errors used with this package.
var (
ErrNoRequestContext = errors.New("no http request in context")
ErrNoResponseWriterContext = errors.New("no http response in context")
)
// WithRequest places the request on the context. The context of the request
// is assigned a unique id, available at "http.request.id". The request itself
// is available at "http.request". Other common attributes are available under
// the prefix "http.request.". If a request is already present on the context,
// this method will panic.
func WithRequest(ctx context.Context, r *http.Request) context.Context {
if ctx.Value("http.request") != nil {
// NOTE(stevvooe): This needs to be considered a programming error. It
// is unlikely that we'd want to have more than one request in
// context.
panic("only one request per context")
}
return &httpRequestContext{
Context: ctx,
startedAt: time.Now(),
id: uuid.NewString(),
r: r,
}
}
// GetRequestID attempts to resolve the current request id, if possible. An
// error is return if it is not available on the context.
func GetRequestID(ctx context.Context) string {
return GetStringValue(ctx, "http.request.id")
}
// WithResponseWriter returns a new context and response writer that makes
// interesting response statistics available within the context.
func WithResponseWriter(ctx context.Context, w http.ResponseWriter) (context.Context, http.ResponseWriter) {
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
return &irw, &irw
}
// GetResponseWriter returns the http.ResponseWriter from the provided
// context. If not present, ErrNoResponseWriterContext is returned. The
// returned instance provides instrumentation in the context.
func GetResponseWriter(ctx context.Context) (http.ResponseWriter, error) {
v := ctx.Value("http.response")
rw, ok := v.(http.ResponseWriter)
if !ok || rw == nil {
return nil, ErrNoResponseWriterContext
}
return rw, nil
}
// getVarsFromRequest let's us change request vars implementation for testing
// and maybe future changes.
var getVarsFromRequest = mux.Vars
// WithVars extracts gorilla/mux vars and makes them available on the returned
// context. Variables are available at keys with the prefix "vars.". For
// example, if looking for the variable "name", it can be accessed as
// "vars.name". Implementations that are accessing values need not know that
// the underlying context is implemented with gorilla/mux vars.
func WithVars(ctx context.Context, r *http.Request) context.Context {
return &muxVarsContext{
Context: ctx,
vars: getVarsFromRequest(r),
}
}
// GetRequestLogger returns a logger that contains fields from the request in
// the current context. If the request is not available in the context, no
// fields will display. Request loggers can safely be pushed onto the context.
func GetRequestLogger(ctx context.Context) Logger {
return GetLogger(ctx,
"http.request.id",
"http.request.method",
"http.request.host",
"http.request.uri",
"http.request.referer",
"http.request.useragent",
"http.request.remoteaddr",
"http.request.contenttype")
}
// GetResponseLogger reads the current response stats and builds a logger.
// Because the values are read at call time, pushing a logger returned from
// this function on the context will lead to missing or invalid data. Only
// call this at the end of a request, after the response has been written.
func GetResponseLogger(ctx context.Context) Logger {
l := getLogrusLogger(ctx,
"http.response.written",
"http.response.status",
"http.response.contenttype")
duration := Since(ctx, "http.request.startedat")
if duration > 0 {
l = l.WithField("http.response.duration", duration.String())
}
return l
}
// httpRequestContext makes information about a request available to context.
type httpRequestContext struct {
context.Context
startedAt time.Time
id string
r *http.Request
}
// Value returns a keyed element of the request for use in the context. To get
// the request itself, query "request". For other components, access them as
// "request.<component>". For example, r.RequestURI
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
switch keyStr {
case "http.request":
return ctx.r
case "http.request.uri":
return ctx.r.RequestURI
case "http.request.remoteaddr":
return requestutil.RemoteAddr(ctx.r)
case "http.request.method":
return ctx.r.Method
case "http.request.host":
return ctx.r.Host
case "http.request.referer":
referer := ctx.r.Referer()
if referer != "" {
return referer
}
case "http.request.useragent":
return ctx.r.UserAgent()
case "http.request.id":
return ctx.id
case "http.request.startedat":
return ctx.startedAt
case "http.request.contenttype":
if ct := ctx.r.Header.Get("Content-Type"); ct != "" {
return ct
}
default:
// no match; fall back to standard behavior below
}
}
return ctx.Context.Value(key)
}
type muxVarsContext struct {
context.Context
vars map[string]string
}
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "vars" {
return ctx.vars
}
// TODO(thaJeztah): this considers "vars.FOO" and "FOO" to be equal.
// We need to check if that's intentional (could be a bug).
if v, ok := ctx.vars[strings.TrimPrefix(keyStr, "vars.")]; ok {
return v
}
}
return ctx.Context.Value(key)
}
// instrumentedResponseWriter provides response writer information in a
// context. This variant is only used in the case where CloseNotifier is not
// implemented by the parent ResponseWriter.
type instrumentedResponseWriter struct {
http.ResponseWriter
context.Context
mu sync.Mutex
status int
written int64
}
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
n, err = irw.ResponseWriter.Write(p)
irw.mu.Lock()
irw.written += int64(n)
// Guess the likely status if not set.
if irw.status == 0 {
irw.status = http.StatusOK
}
irw.mu.Unlock()
return
}
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
irw.ResponseWriter.WriteHeader(status)
irw.mu.Lock()
irw.status = status
irw.mu.Unlock()
}
func (irw *instrumentedResponseWriter) Flush() {
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
switch keyStr {
case "http.response":
return irw
case "http.response.written":
irw.mu.Lock()
defer irw.mu.Unlock()
return irw.written
case "http.response.status":
irw.mu.Lock()
defer irw.mu.Unlock()
return irw.status
case "http.response.contenttype":
if ct := irw.Header().Get("Content-Type"); ct != "" {
return ct
}
default:
// no match; fall back to standard behavior below
}
}
return irw.Context.Value(key)
}
package dcontext
import (
"context"
"fmt"
"runtime"
"sync"
"github.com/sirupsen/logrus"
)
var (
defaultLogger *logrus.Entry = logrus.StandardLogger().WithField("go.version", runtime.Version())
defaultLoggerMu sync.RWMutex
)
// Logger provides a leveled-logging interface.
type Logger interface {
// standard logger methods
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Panic(args ...interface{})
Panicf(format string, args ...interface{})
Panicln(args ...interface{})
// Leveled methods, from logrus
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Debugln(args ...interface{})
Error(args ...interface{})
Errorf(format string, args ...interface{})
Errorln(args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Infoln(args ...interface{})
Warn(args ...interface{})
Warnf(format string, args ...interface{})
Warnln(args ...interface{})
WithError(err error) *logrus.Entry
}
type loggerKey struct{}
// WithLogger creates a new context with provided logger.
func WithLogger(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, loggerKey{}, logger)
}
// GetLoggerWithField returns a logger instance with the specified field key
// and value without affecting the context. Extra specified keys will be
// resolved from the context.
func GetLoggerWithField(ctx context.Context, key, value interface{}, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
}
// GetLoggerWithFields returns a logger instance with the specified fields
// without affecting the context. Extra specified keys will be resolved from
// the context.
func GetLoggerWithFields(ctx context.Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
lfields := make(logrus.Fields, len(fields))
for key, value := range fields {
lfields[fmt.Sprint(key)] = value
}
return getLogrusLogger(ctx, keys...).WithFields(lfields)
}
// GetLogger returns the logger from the current context, if present. If one
// or more keys are provided, they will be resolved on the context and
// included in the logger. While context.Value takes an interface, any key
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
// a logging key field. If context keys are integer constants, for example,
// its recommended that a String method is implemented.
func GetLogger(ctx context.Context, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...)
}
// SetDefaultLogger sets the default logger upon which to base new loggers.
func SetDefaultLogger(logger Logger) {
entry, ok := logger.(*logrus.Entry)
if !ok {
return
}
defaultLoggerMu.Lock()
defaultLogger = entry
defaultLoggerMu.Unlock()
}
// GetLogrusLogger returns the logrus logger for the context. If one more keys
// are provided, they will be resolved on the context and included in the
// logger. Only use this function if specific logrus functionality is
// required.
func getLogrusLogger(ctx context.Context, keys ...interface{}) *logrus.Entry {
var logger *logrus.Entry
// Get a logger, if it is present.
loggerInterface := ctx.Value(loggerKey{})
if loggerInterface != nil {
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
logger = lgr
}
}
if logger == nil {
fields := logrus.Fields{}
// Fill in the instance id, if we have it.
instanceID := ctx.Value("instance.id")
if instanceID != nil {
fields["instance.id"] = instanceID
}
defaultLoggerMu.RLock()
logger = defaultLogger.WithFields(fields)
defaultLoggerMu.RUnlock()
}
fields := logrus.Fields{}
for _, key := range keys {
v := ctx.Value(key)
if v != nil {
fields[fmt.Sprint(key)] = v
}
}
return logger.WithFields(fields)
}
package dcontext
import (
"context"
"runtime"
"time"
"github.com/distribution/distribution/v3/internal/uuid"
)
// WithTrace allocates a traced timing span in a new context. This allows a
// caller to track the time between calling WithTrace and the returned done
// function. When the done function is called, a log message is emitted with a
// "trace.duration" field, corresponding to the elapsed time and a
// "trace.func" field, corresponding to the function that called WithTrace.
//
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
// dapper-like tracing. This function should be complemented with a WithSpan
// method that could be used for tracing distributed RPC calls.
//
// The main benefit of this function is to post-process log messages or
// intercept them in a hook to provide timing data. Trace ids and parent ids
// can also be linked to provide call tracing, if so required.
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/distribution/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.
func WithTrace(ctx context.Context) (context.Context, func(format string, a ...interface{})) {
if ctx == nil {
ctx = Background()
}
pc, file, line, _ := runtime.Caller(1)
f := runtime.FuncForPC(pc)
ctx = &traced{
Context: ctx,
id: uuid.NewString(),
start: time.Now(),
parent: GetStringValue(ctx, "trace.id"),
fnname: f.Name(),
file: file,
line: line,
}
return ctx, func(format string, a ...interface{}) {
GetLogger(ctx,
"trace.duration",
"trace.id",
"trace.parent.id",
"trace.func",
"trace.file",
"trace.line").
Debugf(format, a...)
}
}
// traced represents a context that is traced for function call timing. It
// also provides fast lookup for the various attributes that are available on
// the trace.
type traced struct {
context.Context
id string
parent string
start time.Time
fnname string
file string
line int
}
func (ts *traced) Value(key interface{}) interface{} {
switch key {
case "trace.start":
return ts.start
case "trace.duration":
return time.Since(ts.start)
case "trace.id":
return ts.id
case "trace.parent.id":
if ts.parent == "" {
return nil // must return nil to signal no parent.
}
return ts.parent
case "trace.func":
return ts.fnname
case "trace.file":
return ts.file
case "trace.line":
return ts.line
}
return ts.Context.Value(key)
}
package dcontext
import (
"context"
"time"
)
// Since looks up key, which should be a time.Time, and returns the duration
// since that time. If the key is not found, the value returned will be zero.
// This is helpful when inferring metrics related to context execution times.
func Since(ctx context.Context, key interface{}) time.Duration {
if startedAt, ok := ctx.Value(key).(time.Time); ok {
return time.Since(startedAt)
}
return 0
}
// GetStringValue returns a string value from the context. The empty string
// will be returned if not found.
func GetStringValue(ctx context.Context, key interface{}) (value string) {
if valuev, ok := ctx.Value(key).(string); ok {
value = valuev
}
return value
}
package dcontext
import "context"
type versionKey struct{}
func (versionKey) String() string { return "version" }
// WithVersion stores the application version in the context. The new context
// gets a logger to ensure log messages are marked with the application
// version.
func WithVersion(ctx context.Context, version string) context.Context {
ctx = context.WithValue(ctx, versionKey{}, version)
// push a new logger onto the stack
return WithLogger(ctx, GetLogger(ctx, versionKey{}))
}
// GetVersion returns the application version from the context. An empty
// string may returned if the version was not set on the context.
func GetVersion(ctx context.Context) string {
return GetStringValue(ctx, versionKey{})
}
package requestutil
import (
"net"
"net/http"
"strings"
log "github.com/sirupsen/logrus"
)
func parseIP(ipStr string) net.IP {
ip := net.ParseIP(ipStr)
if ip == nil {
log.Warnf("invalid remote IP address: %q", ipStr)
}
return ip
}
// RemoteAddr extracts the remote address of the request, taking into
// account proxy headers.
func RemoteAddr(r *http.Request) string {
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
remoteAddr, _, _ := strings.Cut(prior, ",")
remoteAddr = strings.Trim(remoteAddr, " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
}
// X-Real-Ip is less supported, but worth checking in the
// absence of X-Forwarded-For
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
if parseIP(realIP) != nil {
return realIP
}
}
return r.RemoteAddr
}
// RemoteIP extracts the remote IP of the request, taking into
// account proxy headers.
func RemoteIP(r *http.Request) string {
addr := RemoteAddr(r)
// Try parsing it as "IP:port"
if ip, _, err := net.SplitHostPort(addr); err == nil {
return ip
}
return addr
}
package uuid
import (
"github.com/google/uuid"
)
// Returns a new V7 UUID string. V7 UUIDs are time-ordered for better database performance.
// Panics on error to maintain compatibility with google/uuid's NewString() method.
func NewString() string {
return uuid.Must(uuid.NewV7()).String()
}
package manifestlist
import (
"encoding/json"
"errors"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/manifest"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// MediaTypeManifestList specifies the mediaType for manifest lists.
MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
//
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
// to [MediaTypeManifestList].
//
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifestList,
}
func init() {
if err := distribution.RegisterManifestSchema(MediaTypeManifestList, unmarshalManifestList); err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
func unmarshalManifestList(b []byte) (distribution.Manifest, v1.Descriptor, error) {
m := &DeserializedManifestList{}
if err := m.UnmarshalJSON(b); err != nil {
return nil, v1.Descriptor{}, err
}
if m.MediaType != MediaTypeManifestList {
return nil, v1.Descriptor{}, fmt.Errorf("mediaType in manifest list should be '%s' not '%s'", MediaTypeManifestList, m.MediaType)
}
return m, v1.Descriptor{
Digest: digest.FromBytes(b),
Size: int64(len(b)),
MediaType: MediaTypeManifestList,
}, nil
}
// PlatformSpec specifies a platform where a particular image manifest is
// applicable.
type PlatformSpec struct {
// Architecture field specifies the CPU architecture, for example
// `amd64` or `ppc64`.
Architecture string `json:"architecture"`
// OS specifies the operating system, for example `linux` or `windows`.
OS string `json:"os"`
// OSVersion is an optional field specifying the operating system
// version, for example `10.0.10586`.
OSVersion string `json:"os.version,omitempty"`
// OSFeatures is an optional field specifying an array of strings,
// each listing a required OS feature (for example on Windows `win32k`).
OSFeatures []string `json:"os.features,omitempty"`
// Variant is an optional field specifying a variant of the CPU, for
// example `ppc64le` to specify a little-endian version of a PowerPC CPU.
Variant string `json:"variant,omitempty"`
// Features is an optional field specifying an array of strings, each
// listing a required CPU feature (for example `sse4` or `aes`).
Features []string `json:"features,omitempty"`
}
// A ManifestDescriptor references a platform-specific manifest.
type ManifestDescriptor struct {
v1.Descriptor
// Platform specifies which platform the manifest pointed to by the
// descriptor runs on.
Platform PlatformSpec `json:"platform"`
}
// ManifestList references manifests for various platforms.
type ManifestList struct {
specs.Versioned
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
// Manifests references a list of manifests
Manifests []ManifestDescriptor `json:"manifests"`
}
// References returns the distribution descriptors for the referenced image
// manifests.
func (m ManifestList) References() []v1.Descriptor {
dependencies := make([]v1.Descriptor, len(m.Manifests))
for i := range m.Manifests {
dependencies[i] = m.Manifests[i].Descriptor
dependencies[i].Platform = &v1.Platform{
Architecture: m.Manifests[i].Platform.Architecture,
OS: m.Manifests[i].Platform.OS,
OSVersion: m.Manifests[i].Platform.OSVersion,
OSFeatures: m.Manifests[i].Platform.OSFeatures,
Variant: m.Manifests[i].Platform.Variant,
}
}
return dependencies
}
// DeserializedManifestList wraps ManifestList with a copy of the original
// JSON.
type DeserializedManifestList struct {
ManifestList
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromDescriptors takes a slice of descriptors, and returns a
// DeserializedManifestList which contains the resulting manifest list
// and its JSON representation.
func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
return fromDescriptorsWithMediaType(descriptors, MediaTypeManifestList)
}
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
func fromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) {
m := ManifestList{
Versioned: specs.Versioned{SchemaVersion: 2},
MediaType: mediaType,
}
m.Manifests = make([]ManifestDescriptor, len(descriptors))
copy(m.Manifests, descriptors)
deserialized := DeserializedManifestList{
ManifestList: m,
}
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new ManifestList struct from JSON data.
func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest list in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into ManifestList object
var manifestList ManifestList
if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
return err
}
m.ManifestList = manifestList
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
}
// Payload returns the raw content of the manifest list. The contents can be
// used to calculate the content identifier.
func (m DeserializedManifestList) Payload() (string, []byte, error) {
var mediaType string
if m.MediaType == "" {
mediaType = v1.MediaTypeImageIndex
} else {
mediaType = m.MediaType
}
return mediaType, m.canonical, nil
}
// validateManifestList returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a manifest
func validateManifestList(b []byte) error {
var doc struct {
Config interface{} `json:"config,omitempty"`
Layers interface{} `json:"layers,omitempty"`
}
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Config != nil || doc.Layers != nil {
return errors.New("manifestlist: expected list but found manifest")
}
return nil
}
package ocischema
import (
"context"
"errors"
"github.com/distribution/distribution/v3"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Builder is a type for constructing manifests.
type Builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configJSON references
configJSON []byte
// layers is a list of layer descriptors that gets built by successive
// calls to AppendReference.
layers []v1.Descriptor
// Annotations contains arbitrary metadata relating to the targeted content.
annotations map[string]string
// For testing purposes
mediaType string
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process, and annotations.
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) *Builder {
mb := &Builder{
bs: bs,
configJSON: make([]byte, len(configJSON)),
annotations: annotations,
mediaType: v1.MediaTypeImageManifest,
}
copy(mb.configJSON, configJSON)
return mb
}
// SetMediaType assigns the passed mediatype or error if the mediatype is not a
// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json"
func (mb *Builder) SetMediaType(mediaType string) error {
if mediaType != "" && mediaType != v1.MediaTypeImageManifest {
return errors.New("invalid media type for OCI image manifest")
}
mb.mediaType = mediaType
return nil
}
// Build produces a final manifest from the given references.
func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: specs.Versioned{SchemaVersion: 2},
MediaType: mb.mediaType,
Layers: make([]v1.Descriptor, len(mb.layers)),
Annotations: mb.annotations,
}
copy(m.Layers, mb.layers)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = v1.MediaTypeImageConfig
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, v1.MediaTypeImageConfig, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = v1.MediaTypeImageConfig
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *Builder) AppendReference(ref v1.Descriptor) error {
mb.layers = append(mb.layers, ref)
return nil
}
// References returns the current references added to this builder.
func (mb *Builder) References() []v1.Descriptor {
return mb.layers
}
package ocischema
import (
"encoding/json"
"errors"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/manifest"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// IndexSchemaVersion provides a pre-initialized version structure for OCI Image
// Indices.
//
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
// to [v1.MediaTypeImageIndex].
//
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
var IndexSchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: v1.MediaTypeImageIndex,
}
func init() {
if err := distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, unmarshalImageIndex); err != nil {
panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err))
}
}
func unmarshalImageIndex(b []byte) (distribution.Manifest, v1.Descriptor, error) {
if err := validateIndex(b); err != nil {
return nil, v1.Descriptor{}, err
}
m := &DeserializedImageIndex{}
if err := m.UnmarshalJSON(b); err != nil {
return nil, v1.Descriptor{}, err
}
if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex {
return nil, v1.Descriptor{}, fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'", v1.MediaTypeImageIndex, m.MediaType)
}
return m, v1.Descriptor{
MediaType: v1.MediaTypeImageIndex,
Digest: digest.FromBytes(b),
Size: int64(len(b)),
Annotations: m.Annotations,
}, nil
}
// ImageIndex references manifests for various platforms.
type ImageIndex struct {
specs.Versioned
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
// Manifests references a list of manifests
Manifests []v1.Descriptor `json:"manifests"`
// Annotations is an optional field that contains arbitrary metadata for the
// image index
Annotations map[string]string `json:"annotations,omitempty"`
}
// References returns the distribution descriptors for the referenced image
// manifests.
func (ii ImageIndex) References() []v1.Descriptor {
return ii.Manifests
}
// DeserializedImageIndex wraps ManifestList with a copy of the original
// JSON.
type DeserializedImageIndex struct {
ImageIndex
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromDescriptors takes a slice of descriptors and a map of annotations, and
// returns a DeserializedManifestList which contains the resulting manifest list
// and its JSON representation. If annotations is nil or empty then the
// annotations property will be omitted from the JSON representation.
func FromDescriptors(descriptors []v1.Descriptor, annotations map[string]string) (*DeserializedImageIndex, error) {
return fromDescriptorsWithMediaType(descriptors, annotations, v1.MediaTypeImageIndex)
}
// fromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
func fromDescriptorsWithMediaType(descriptors []v1.Descriptor, annotations map[string]string, mediaType string) (_ *DeserializedImageIndex, err error) {
m := ImageIndex{
Versioned: specs.Versioned{SchemaVersion: 2},
MediaType: mediaType,
Annotations: annotations,
}
m.Manifests = make([]v1.Descriptor, len(descriptors))
copy(m.Manifests, descriptors)
deserialized := DeserializedImageIndex{
ImageIndex: m,
}
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new ManifestList struct from JSON data.
func (m *DeserializedImageIndex) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest list in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into ManifestList object
var manifestList ImageIndex
if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
return err
}
m.ImageIndex = manifestList
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedImageIndex) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedImageIndex")
}
// Payload returns the raw content of the manifest list. The contents can be
// used to calculate the content identifier.
func (m DeserializedImageIndex) Payload() (string, []byte, error) {
mediaType := m.MediaType
if m.MediaType == "" {
mediaType = v1.MediaTypeImageIndex
}
return mediaType, m.canonical, nil
}
// validateIndex returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a manifest
func validateIndex(b []byte) error {
var doc struct {
Config interface{} `json:"config,omitempty"`
Layers interface{} `json:"layers,omitempty"`
}
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Config != nil || doc.Layers != nil {
return errors.New("index: expected index but found manifest")
}
return nil
}
package ocischema
import (
"encoding/json"
"errors"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/manifest"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// SchemaVersion provides a pre-initialized version structure for OCI Image
// Manifests.
//
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
// to [v1.MediaTypeImageManifest].
//
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: v1.MediaTypeImageManifest,
}
func init() {
if err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, unmarshalOCISchema); err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
func unmarshalOCISchema(b []byte) (distribution.Manifest, v1.Descriptor, error) {
if err := validateManifest(b); err != nil {
return nil, v1.Descriptor{}, err
}
m := &DeserializedManifest{}
if err := m.UnmarshalJSON(b); err != nil {
return nil, v1.Descriptor{}, err
}
return m, v1.Descriptor{
MediaType: v1.MediaTypeImageManifest,
Digest: digest.FromBytes(b),
Size: int64(len(b)),
Annotations: m.Annotations,
}, nil
}
// Manifest defines a ocischema manifest.
type Manifest struct {
specs.Versioned
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
// Config references the image configuration as a blob.
Config v1.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []v1.Descriptor `json:"layers"`
// Annotations contains arbitrary metadata for the image manifest.
Annotations map[string]string `json:"annotations,omitempty"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []v1.Descriptor {
references := make([]v1.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() v1.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var mfst Manifest
if err := json.Unmarshal(m.canonical, &mfst); err != nil {
return err
}
if mfst.MediaType != "" && mfst.MediaType != v1.MediaTypeImageManifest {
return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'",
v1.MediaTypeImageManifest, mfst.MediaType)
}
m.Manifest = mfst
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m *DeserializedManifest) Payload() (string, []byte, error) {
return v1.MediaTypeImageManifest, m.canonical, nil
}
// validateManifest returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a index
func validateManifest(b []byte) error {
var doc struct {
Manifests interface{} `json:"manifests,omitempty"`
}
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Manifests != nil {
return errors.New("ocimanifest: expected manifest but found index")
}
return nil
}
package schema2
import (
"context"
"github.com/distribution/distribution/v3"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Builder is a type for constructing manifests.
type Builder struct {
// configDescriptor is used to describe configuration
configDescriptor v1.Descriptor
// configJSON references
configJSON []byte
// dependencies is a list of descriptors that gets built by successive
// calls to AppendReference. In case of image configuration these are layers.
dependencies []v1.Descriptor
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process.
func NewManifestBuilder(configDescriptor v1.Descriptor, configJSON []byte) *Builder {
mb := &Builder{
configDescriptor: configDescriptor,
configJSON: make([]byte, len(configJSON)),
}
copy(mb.configJSON, configJSON)
return mb
}
// Build produces a final manifest from the given references.
func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: specs.Versioned{SchemaVersion: defaultSchemaVersion},
MediaType: defaultMediaType,
Layers: make([]v1.Descriptor, len(mb.dependencies)),
}
copy(m.Layers, mb.dependencies)
m.Config = mb.configDescriptor
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *Builder) AppendReference(ref v1.Descriptor) error {
mb.dependencies = append(mb.dependencies, ref)
return nil
}
// References returns the current references added to this builder.
func (mb *Builder) References() []v1.Descriptor {
return mb.dependencies
}
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/manifest"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeImageConfig specifies the mediaType for the image configuration.
MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
// MediaTypeUncompressedLayer is the mediaType used for layers which
// are not compressed.
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
const (
defaultSchemaVersion = 2
defaultMediaType = MediaTypeManifest
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
//
// Deprecated: use [specs.Versioned] and set MediaType on the manifest
// to [MediaTypeManifest].
//
//nolint:staticcheck // ignore SA1019: manifest.Versioned is deprecated:
var SchemaVersion = manifest.Versioned{
SchemaVersion: defaultSchemaVersion,
MediaType: defaultMediaType,
}
func init() {
if err := distribution.RegisterManifestSchema(defaultMediaType, unmarshalSchema2); err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
func unmarshalSchema2(b []byte) (distribution.Manifest, v1.Descriptor, error) {
m := &DeserializedManifest{}
if err := m.UnmarshalJSON(b); err != nil {
return nil, v1.Descriptor{}, err
}
return m, v1.Descriptor{
Digest: digest.FromBytes(b),
Size: int64(len(b)),
MediaType: defaultMediaType,
}, nil
}
// Manifest defines a schema2 manifest.
type Manifest struct {
specs.Versioned
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
// Config references the image configuration as a blob.
Config v1.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []v1.Descriptor `json:"layers"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []v1.Descriptor {
references := make([]v1.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() v1.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var mfst Manifest
if err := json.Unmarshal(m.canonical, &mfst); err != nil {
return err
}
if mfst.MediaType != defaultMediaType {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'", defaultMediaType, mfst.MediaType)
}
m.Manifest = mfst
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}
package distribution
import (
"context"
"fmt"
"mime"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Manifest represents a registry object specifying a set of
// references and an optional target
type Manifest interface {
// References returns a list of objects which make up this manifest.
// A reference is anything which can be represented by a
// Descriptor. These can consist of layers, resources or other
// manifests.
//
// While no particular order is required, implementations should return
// them from highest to lowest priority. For example, one might want to
// return the base layer before the top layer.
References() []v1.Descriptor
// Payload provides the serialized format of the manifest, in addition to
// the media type.
Payload() (mediaType string, payload []byte, err error)
}
// ManifestService describes operations on manifests.
type ManifestService interface {
// Exists returns true if the manifest exists.
Exists(ctx context.Context, dgst digest.Digest) (bool, error)
// Get retrieves the manifest specified by the given digest
Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error)
// Put creates or updates the given manifest returning the manifest digest
Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error)
// Delete removes the manifest specified by the given digest. Deleting
// a manifest that doesn't exist will return ErrManifestNotFound
Delete(ctx context.Context, dgst digest.Digest) error
}
// ManifestEnumerator enables iterating over manifests
type ManifestEnumerator interface {
// Enumerate calls ingester for each manifest.
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// Describable is an interface for descriptors.
//
// Implementations of Describable are generally objects which can be
// described, not simply descriptors.
type Describable interface {
// Descriptor returns the descriptor.
Descriptor() v1.Descriptor
}
// ManifestMediaTypes returns the supported media types for manifests.
func ManifestMediaTypes() (mediaTypes []string) {
for t := range mappings {
if t != "" {
mediaTypes = append(mediaTypes, t)
}
}
return
}
// UnmarshalFunc implements manifest unmarshalling a given MediaType
type UnmarshalFunc func([]byte) (Manifest, v1.Descriptor, error)
var mappings = make(map[string]UnmarshalFunc)
// UnmarshalManifest looks up manifest unmarshal functions based on
// MediaType
func UnmarshalManifest(ctHeader string, p []byte) (Manifest, v1.Descriptor, error) {
// Need to look up by the actual media type, not the raw contents of
// the header. Strip semicolons and anything following them.
var mediaType string
if ctHeader != "" {
var err error
mediaType, _, err = mime.ParseMediaType(ctHeader)
if err != nil {
return nil, v1.Descriptor{}, err
}
}
unmarshalFunc, ok := mappings[mediaType]
if !ok {
unmarshalFunc, ok = mappings[""]
if !ok {
return nil, v1.Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType)
}
}
return unmarshalFunc(p)
}
// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This
// should be called from specific
func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error {
if _, ok := mappings[mediaType]; ok {
return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType)
}
mappings[mediaType] = u
return nil
}
package notifications
import (
"net/http"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/requestutil"
"github.com/distribution/distribution/v3/internal/uuid"
"github.com/distribution/reference"
events "github.com/docker/go-events"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type bridge struct {
ub URLBuilder
includeReferences bool
actor ActorRecord
source SourceRecord
request RequestRecord
sink events.Sink
}
var _ Listener = &bridge{}
// URLBuilder defines a subset of url builder to be used by the event listener.
type URLBuilder interface {
BuildManifestURL(name reference.Named) (string, error)
BuildBlobURL(ref reference.Canonical) (string, error)
}
// NewBridge returns a notification listener that writes records to sink,
// using the actor and source. Any urls populated in the events created by
// this bridge will be created using the URLBuilder.
// TODO(stevvooe): Update this to simply take a context.Context object.
func NewBridge(ub URLBuilder, source SourceRecord, actor ActorRecord, request RequestRecord, sink events.Sink, includeReferences bool) Listener {
return &bridge{
ub: ub,
includeReferences: includeReferences,
actor: actor,
source: source,
request: request,
sink: sink,
}
}
// NewRequestRecord builds a RequestRecord for use in NewBridge from an
// http.Request, associating it with a request id.
func NewRequestRecord(id string, r *http.Request) RequestRecord {
return RequestRecord{
ID: id,
Addr: requestutil.RemoteAddr(r),
Host: r.Host,
Method: r.Method,
UserAgent: r.UserAgent(),
}
}
func (b *bridge) ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error {
manifestEvent, err := b.createManifestEvent(EventActionPush, repo, sm)
if err != nil {
return err
}
for _, option := range options {
if opt, ok := option.(distribution.WithTagOption); ok {
manifestEvent.Target.Tag = opt.Tag
break
}
}
return b.sink.Write(*manifestEvent)
}
func (b *bridge) ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error {
manifestEvent, err := b.createManifestEvent(EventActionPull, repo, sm)
if err != nil {
return err
}
for _, option := range options {
if opt, ok := option.(distribution.WithTagOption); ok {
manifestEvent.Target.Tag = opt.Tag
break
}
}
return b.sink.Write(*manifestEvent)
}
func (b *bridge) ManifestDeleted(repo reference.Named, dgst digest.Digest) error {
return b.createManifestDeleteEventAndWrite(EventActionDelete, repo, dgst)
}
func (b *bridge) BlobPushed(repo reference.Named, desc v1.Descriptor) error {
return b.createBlobEventAndWrite(EventActionPush, repo, desc)
}
func (b *bridge) BlobPulled(repo reference.Named, desc v1.Descriptor) error {
return b.createBlobEventAndWrite(EventActionPull, repo, desc)
}
func (b *bridge) BlobMounted(repo reference.Named, desc v1.Descriptor, fromRepo reference.Named) error {
event, err := b.createBlobEvent(EventActionMount, repo, desc)
if err != nil {
return err
}
event.Target.FromRepository = fromRepo.Name()
return b.sink.Write(*event)
}
func (b *bridge) BlobDeleted(repo reference.Named, dgst digest.Digest) error {
return b.createBlobDeleteEventAndWrite(EventActionDelete, repo, dgst)
}
func (b *bridge) TagDeleted(repo reference.Named, tag string) error {
event := b.createEvent(EventActionDelete)
event.Target.Repository = repo.Name()
event.Target.Tag = tag
return b.sink.Write(*event)
}
func (b *bridge) RepoDeleted(repo reference.Named) error {
event := b.createEvent(EventActionDelete)
event.Target.Repository = repo.Name()
return b.sink.Write(*event)
}
func (b *bridge) createManifestDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error {
event := b.createEvent(action)
event.Target.Repository = repo.Name()
event.Target.Digest = dgst
return b.sink.Write(*event)
}
func (b *bridge) createManifestEvent(action string, repo reference.Named, sm distribution.Manifest) (*Event, error) {
event := b.createEvent(action)
event.Target.Repository = repo.Name()
mt, p, err := sm.Payload()
if err != nil {
return nil, err
}
// Ensure we have the canonical manifest descriptor here
manifest, desc, err := distribution.UnmarshalManifest(mt, p)
if err != nil {
return nil, err
}
event.Target.MediaType = mt
event.Target.Digest = desc.Digest
event.Target.Size = desc.Size
event.Target.Length = desc.Size
if b.includeReferences {
event.Target.References = append(event.Target.References, manifest.References()...)
}
ref, err := reference.WithDigest(repo, event.Target.Digest)
if err != nil {
return nil, err
}
event.Target.URL, err = b.ub.BuildManifestURL(ref)
if err != nil {
return nil, err
}
return event, nil
}
func (b *bridge) createBlobDeleteEventAndWrite(action string, repo reference.Named, dgst digest.Digest) error {
event := b.createEvent(action)
event.Target.Digest = dgst
event.Target.Repository = repo.Name()
return b.sink.Write(*event)
}
func (b *bridge) createBlobEventAndWrite(action string, repo reference.Named, desc v1.Descriptor) error {
event, err := b.createBlobEvent(action, repo, desc)
if err != nil {
return err
}
return b.sink.Write(*event)
}
func (b *bridge) createBlobEvent(action string, repo reference.Named, desc v1.Descriptor) (*Event, error) {
event := b.createEvent(action)
event.Target.Descriptor = desc
event.Target.Length = desc.Size
event.Target.Repository = repo.Name()
ref, err := reference.WithDigest(repo, desc.Digest)
if err != nil {
return nil, err
}
event.Target.URL, err = b.ub.BuildBlobURL(ref)
if err != nil {
return nil, err
}
return event, nil
}
// createEvent creates an event with actor and source populated.
func (b *bridge) createEvent(action string) *Event {
event := createEvent(action)
event.Source = b.source
event.Actor = b.actor
event.Request = b.request
return event
}
// createEvent returns a new event, timestamped, with the specified action.
func createEvent(action string) *Event {
return &Event{
ID: uuid.NewString(),
Timestamp: time.Now(),
Action: action,
}
}
package notifications
import (
"maps"
"net/http"
"time"
"github.com/distribution/distribution/v3/configuration"
events "github.com/docker/go-events"
)
// EndpointConfig covers the optional configuration parameters for an active
// endpoint.
type EndpointConfig struct {
Headers http.Header
Timeout time.Duration
Threshold int
Backoff time.Duration
IgnoredMediaTypes []string
Transport *http.Transport `json:"-"`
Ignore configuration.Ignore
}
// defaults set any zero-valued fields to a reasonable default.
func (ec *EndpointConfig) defaults() {
if ec.Timeout <= 0 {
ec.Timeout = time.Second
}
if ec.Threshold <= 0 {
ec.Threshold = 10
}
if ec.Backoff <= 0 {
ec.Backoff = time.Second
}
if ec.Transport == nil {
ec.Transport = http.DefaultTransport.(*http.Transport)
}
}
// Endpoint is a reliable, queued, thread-safe sink that notify external http
// services when events are written. Writes are non-blocking and always
// succeed for callers but events may be queued internally.
type Endpoint struct {
events.Sink
url string
name string
EndpointConfig
metrics *safeMetrics
}
// NewEndpoint returns a running endpoint, ready to receive events.
func NewEndpoint(name, url string, config EndpointConfig) *Endpoint {
var endpoint Endpoint
endpoint.name = name
endpoint.url = url
endpoint.EndpointConfig = config
endpoint.defaults()
endpoint.metrics = newSafeMetrics(name)
// Configures the inmemory queue, retry, http pipeline.
endpoint.Sink = newHTTPSink(
endpoint.url, endpoint.Timeout, endpoint.Headers,
endpoint.Transport, endpoint.metrics.httpStatusListener())
endpoint.Sink = events.NewRetryingSink(endpoint.Sink, events.NewBreaker(endpoint.Threshold, endpoint.Backoff))
endpoint.Sink = newEventQueue(endpoint.Sink, endpoint.metrics.eventQueueListener())
mediaTypes := append(config.Ignore.MediaTypes, config.IgnoredMediaTypes...)
endpoint.Sink = newIgnoredSink(endpoint.Sink, mediaTypes, config.Ignore.Actions)
register(&endpoint)
return &endpoint
}
// Name returns the name of the endpoint, generally used for debugging.
func (e *Endpoint) Name() string {
return e.name
}
// URL returns the url of the endpoint.
func (e *Endpoint) URL() string {
return e.url
}
// ReadMetrics populates em with metrics from the endpoint.
func (e *Endpoint) ReadMetrics(em *EndpointMetrics) {
e.metrics.Lock()
defer e.metrics.Unlock()
*em = e.metrics.EndpointMetrics
// Map still need to copied in a threadsafe manner.
em.Statuses = make(map[string]int)
maps.Copy(em.Statuses, e.metrics.Statuses)
}
package notifications
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
events "github.com/docker/go-events"
)
// httpSink implements a single-flight, http notification endpoint. This is
// very lightweight in that it only makes an attempt at an http request.
// Reliability should be provided by the caller.
type httpSink struct {
url string
mu sync.Mutex
closed bool
client *http.Client
listeners []httpStatusListener
// TODO(stevvooe): Allow one to configure the media type accepted by this
// sink and choose the serialization based on that.
}
// newHTTPSink returns an unreliable, single-flight http sink. Wrap in other
// sinks for increased reliability.
func newHTTPSink(u string, timeout time.Duration, headers http.Header, transport *http.Transport, listeners ...httpStatusListener) *httpSink {
if transport == nil {
transport = http.DefaultTransport.(*http.Transport)
}
return &httpSink{
url: u,
listeners: listeners,
client: &http.Client{
Transport: &headerRoundTripper{
Transport: transport,
headers: headers,
},
Timeout: timeout,
},
}
}
// httpStatusListener is called on various outcomes of sending notifications.
type httpStatusListener interface {
success(status int, event events.Event)
failure(status int, events events.Event)
err(err error, events events.Event)
}
// Accept makes an attempt to notify the endpoint, returning an error if it
// fails. It is the caller's responsibility to retry on error. The events are
// accepted or rejected as a group.
func (hs *httpSink) Write(event events.Event) error {
hs.mu.Lock()
defer hs.mu.Unlock()
defer hs.client.Transport.(*headerRoundTripper).CloseIdleConnections()
if hs.closed {
return ErrSinkClosed
}
envelope := Envelope{
Events: []events.Event{event},
}
// TODO(stevvooe): It is not ideal to keep re-encoding the request body on
// retry but we are going to do it to keep the code simple. It is likely
// we could change the event struct to manage its own buffer.
p, err := json.MarshalIndent(envelope, "", " ")
if err != nil {
for _, listener := range hs.listeners {
listener.err(err, event)
}
return fmt.Errorf("%v: error marshaling event envelope: %v", hs, err)
}
body := bytes.NewReader(p)
resp, err := hs.client.Post(hs.url, EventsMediaType, body)
if err != nil {
for _, listener := range hs.listeners {
listener.err(err, event)
}
return fmt.Errorf("%v: error posting: %v", hs, err)
}
defer resp.Body.Close()
// The notifier will treat any 2xx or 3xx response as accepted by the
// endpoint.
switch {
case resp.StatusCode >= 200 && resp.StatusCode < 400:
for _, listener := range hs.listeners {
listener.success(resp.StatusCode, event)
}
// TODO(stevvooe): This is a little accepting: we may want to support
// unsupported media type responses with retries using the correct
// media type. There may also be cases that will never work.
return nil
default:
for _, listener := range hs.listeners {
listener.failure(resp.StatusCode, event)
}
return fmt.Errorf("%v: response status %v unaccepted", hs, resp.Status)
}
}
// Close the endpoint
func (hs *httpSink) Close() error {
hs.mu.Lock()
defer hs.mu.Unlock()
if hs.closed {
return fmt.Errorf("httpsink: already closed")
}
hs.closed = true
return nil
}
func (hs *httpSink) String() string {
return fmt.Sprintf("httpSink{%s}", hs.url)
}
type headerRoundTripper struct {
*http.Transport // must be transport to support CancelRequest
headers http.Header
}
func (hrt *headerRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
nreq := *req
nreq.Header = make(http.Header)
merge := func(headers http.Header) {
for k, v := range headers {
nreq.Header[k] = append(nreq.Header[k], v...)
}
}
merge(req.Header)
merge(hrt.headers)
return hrt.Transport.RoundTrip(&nreq)
}
package notifications
import (
"context"
"io"
"net/http"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ManifestListener describes a set of methods for listening to events related to manifests.
type ManifestListener interface {
ManifestPushed(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error
ManifestPulled(repo reference.Named, sm distribution.Manifest, options ...distribution.ManifestServiceOption) error
ManifestDeleted(repo reference.Named, dgst digest.Digest) error
}
// BlobListener describes a listener that can respond to layer related events.
type BlobListener interface {
BlobPushed(repo reference.Named, desc v1.Descriptor) error
BlobPulled(repo reference.Named, desc v1.Descriptor) error
BlobMounted(repo reference.Named, desc v1.Descriptor, fromRepo reference.Named) error
BlobDeleted(repo reference.Named, desc digest.Digest) error
}
// RepoListener provides repository methods that respond to repository lifecycle
type RepoListener interface {
TagDeleted(repo reference.Named, tag string) error
RepoDeleted(repo reference.Named) error
}
// Listener combines all repository events into a single interface.
type Listener interface {
ManifestListener
BlobListener
RepoListener
}
type repositoryListener struct {
distribution.Repository
listener Listener
}
type removerListener struct {
distribution.RepositoryRemover
listener Listener
}
// Listen dispatches events on the repository to the listener.
func Listen(repo distribution.Repository, remover distribution.RepositoryRemover, listener Listener) (distribution.Repository, distribution.RepositoryRemover) {
return &repositoryListener{
Repository: repo,
listener: listener,
}, &removerListener{
RepositoryRemover: remover,
listener: listener,
}
}
func (nl *removerListener) Remove(ctx context.Context, name reference.Named) error {
err := nl.RepositoryRemover.Remove(ctx, name)
if err != nil {
return err
}
return nl.listener.RepoDeleted(name)
}
func (rl *repositoryListener) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
manifests, err := rl.Repository.Manifests(ctx, options...)
if err != nil {
return nil, err
}
return &manifestServiceListener{
ManifestService: manifests,
parent: rl,
}, nil
}
func (rl *repositoryListener) Blobs(ctx context.Context) distribution.BlobStore {
return &blobServiceListener{
BlobStore: rl.Repository.Blobs(ctx),
parent: rl,
}
}
type manifestServiceListener struct {
distribution.ManifestService
parent *repositoryListener
}
func (msl *manifestServiceListener) Delete(ctx context.Context, dgst digest.Digest) error {
err := msl.ManifestService.Delete(ctx, dgst)
if err == nil {
if err := msl.parent.listener.ManifestDeleted(msl.parent.Repository.Named(), dgst); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching manifest delete to listener: %v", err)
}
}
return err
}
func (msl *manifestServiceListener) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
sm, err := msl.ManifestService.Get(ctx, dgst, options...)
if err == nil {
if err := msl.parent.listener.ManifestPulled(msl.parent.Repository.Named(), sm, options...); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching manifest pull to listener: %v", err)
}
}
return sm, err
}
func (msl *manifestServiceListener) Put(ctx context.Context, sm distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
dgst, err := msl.ManifestService.Put(ctx, sm, options...)
if err == nil {
if err := msl.parent.listener.ManifestPushed(msl.parent.Repository.Named(), sm, options...); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching manifest push to listener: %v", err)
}
}
return dgst, err
}
type blobServiceListener struct {
distribution.BlobStore
parent *repositoryListener
}
var _ distribution.BlobStore = &blobServiceListener{}
func (bsl *blobServiceListener) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
p, err := bsl.BlobStore.Get(ctx, dgst)
if err == nil {
if desc, err := bsl.Stat(ctx, dgst); err != nil {
dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err)
} else {
if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err)
}
}
}
return p, err
}
func (bsl *blobServiceListener) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
rc, err := bsl.BlobStore.Open(ctx, dgst)
if err == nil {
if desc, err := bsl.Stat(ctx, dgst); err != nil {
dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err)
} else {
if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err)
}
}
}
return rc, err
}
func (bsl *blobServiceListener) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
err := bsl.BlobStore.ServeBlob(ctx, w, r, dgst)
if err == nil {
if desc, err := bsl.Stat(ctx, dgst); err != nil {
dcontext.GetLogger(ctx).Errorf("error resolving descriptor in ServeBlob listener: %v", err)
} else {
if err := bsl.parent.listener.BlobPulled(bsl.parent.Repository.Named(), desc); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching layer pull to listener: %v", err)
}
}
}
return err
}
func (bsl *blobServiceListener) Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) {
desc, err := bsl.BlobStore.Put(ctx, mediaType, p)
if err == nil {
if err := bsl.parent.listener.BlobPushed(bsl.parent.Repository.Named(), desc); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching layer push to listener: %v", err)
}
}
return desc, err
}
func (bsl *blobServiceListener) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
wr, err := bsl.BlobStore.Create(ctx, options...)
switch err := err.(type) {
case distribution.ErrBlobMounted:
if err := bsl.parent.listener.BlobMounted(bsl.parent.Repository.Named(), err.Descriptor, err.From); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching blob mount to listener: %v", err)
}
return nil, err
}
return bsl.decorateWriter(wr), err
}
func (bsl *blobServiceListener) Delete(ctx context.Context, dgst digest.Digest) error {
err := bsl.BlobStore.Delete(ctx, dgst)
if err == nil {
if err := bsl.parent.listener.BlobDeleted(bsl.parent.Repository.Named(), dgst); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching layer delete to listener: %v", err)
}
}
return err
}
func (bsl *blobServiceListener) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
wr, err := bsl.BlobStore.Resume(ctx, id)
return bsl.decorateWriter(wr), err
}
func (bsl *blobServiceListener) decorateWriter(wr distribution.BlobWriter) distribution.BlobWriter {
return &blobWriterListener{
BlobWriter: wr,
parent: bsl,
}
}
type blobWriterListener struct {
distribution.BlobWriter
parent *blobServiceListener
}
func (bwl *blobWriterListener) Commit(ctx context.Context, desc v1.Descriptor) (v1.Descriptor, error) {
committed, err := bwl.BlobWriter.Commit(ctx, desc)
if err == nil {
if err := bwl.parent.parent.listener.BlobPushed(bwl.parent.parent.Repository.Named(), committed); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching blob push to listener: %v", err)
}
}
return committed, err
}
type tagServiceListener struct {
distribution.TagService
parent *repositoryListener
}
func (rl *repositoryListener) Tags(ctx context.Context) distribution.TagService {
return &tagServiceListener{
TagService: rl.Repository.Tags(ctx),
parent: rl,
}
}
func (tagSL *tagServiceListener) Untag(ctx context.Context, tag string) error {
if err := tagSL.TagService.Untag(ctx, tag); err != nil {
return err
}
if err := tagSL.parent.listener.TagDeleted(tagSL.parent.Repository.Named(), tag); err != nil {
dcontext.GetLogger(ctx).Errorf("error dispatching tag deleted to listener: %v", err)
return err
}
return nil
}
package notifications
import (
"expvar"
"fmt"
"net/http"
"sync"
prometheus "github.com/distribution/distribution/v3/metrics"
events "github.com/docker/go-events"
"github.com/docker/go-metrics"
)
var (
// eventsCounter counts total events of incoming, success, failure, and errors
eventsCounter = prometheus.NotificationsNamespace.NewLabeledCounter("events", "The number of total events", "type", "endpoint")
// pendingGauge measures the pending queue size
pendingGauge = prometheus.NotificationsNamespace.NewLabeledGauge("pending", "The gauge of pending events in queue", metrics.Total, "endpoint")
// statusCounter counts the total notification call per each status code
statusCounter = prometheus.NotificationsNamespace.NewLabeledCounter("status", "The number of status code", "code", "endpoint")
)
// endpoints is global registry of endpoints used to report metrics to expvar
var endpoints struct {
registered []*Endpoint
mu sync.Mutex
}
func init() {
// NOTE(stevvooe): Setup registry metrics structure to report to expvar.
// Ideally, we do more metrics through logging but we need some nice
// realtime metrics for queue state for now.
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
var notifications expvar.Map
notifications.Init()
notifications.Set("endpoints", expvar.Func(func() interface{} {
endpoints.mu.Lock()
defer endpoints.mu.Unlock()
var names []interface{}
for _, v := range endpoints.registered {
var epjson struct {
Name string `json:"name"`
URL string `json:"url"`
EndpointConfig
Metrics EndpointMetrics
}
epjson.Name = v.Name()
epjson.URL = v.URL()
epjson.EndpointConfig = v.EndpointConfig
v.ReadMetrics(&epjson.Metrics)
names = append(names, epjson)
}
return names
}))
registry.(*expvar.Map).Set("notifications", ¬ifications)
// register prometheus metrics
metrics.Register(prometheus.NotificationsNamespace)
}
// EndpointMetrics track various actions taken by the endpoint, typically by
// number of events. The goal of this to export it via expvar but we may find
// some other future solution to be better.
type EndpointMetrics struct {
Pending int // events pending in queue
Events int // total events incoming
Successes int // total events written successfully
Failures int // total events failed
Errors int // total events errored
Statuses map[string]int // status code histogram, per call event
}
// safeMetrics guards the metrics implementation with a lock and provides a
// safe update function.
type safeMetrics struct {
EndpointName string
EndpointMetrics
sync.Mutex // protects statuses map
}
// newSafeMetrics returns safeMetrics with map allocated.
func newSafeMetrics(name string) *safeMetrics {
var sm safeMetrics
sm.Statuses = make(map[string]int)
sm.EndpointName = name
return &sm
}
// httpStatusListener returns the listener for the http sink that updates the
// relevant counters.
func (sm *safeMetrics) httpStatusListener() httpStatusListener {
return &endpointMetricsHTTPStatusListener{
safeMetrics: sm,
}
}
// eventQueueListener returns a listener that maintains queue related counters.
func (sm *safeMetrics) eventQueueListener() eventQueueListener {
return &endpointMetricsEventQueueListener{
safeMetrics: sm,
}
}
// endpointMetricsHTTPStatusListener increments counters related to http sinks
// for the relevant events.
type endpointMetricsHTTPStatusListener struct {
*safeMetrics
}
var _ httpStatusListener = &endpointMetricsHTTPStatusListener{}
func (emsl *endpointMetricsHTTPStatusListener) success(status int, event events.Event) {
emsl.safeMetrics.Lock()
defer emsl.safeMetrics.Unlock()
emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))]++
emsl.Successes++
statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status)), emsl.EndpointName).Inc(1)
eventsCounter.WithValues("Successes", emsl.EndpointName).Inc(1)
}
func (emsl *endpointMetricsHTTPStatusListener) failure(status int, event events.Event) {
emsl.safeMetrics.Lock()
defer emsl.safeMetrics.Unlock()
emsl.Statuses[fmt.Sprintf("%d %s", status, http.StatusText(status))]++
emsl.Failures++
statusCounter.WithValues(fmt.Sprintf("%d %s", status, http.StatusText(status)), emsl.EndpointName).Inc(1)
eventsCounter.WithValues("Failures", emsl.EndpointName).Inc(1)
}
func (emsl *endpointMetricsHTTPStatusListener) err(err error, event events.Event) {
emsl.safeMetrics.Lock()
defer emsl.safeMetrics.Unlock()
emsl.Errors++
eventsCounter.WithValues("Errors", emsl.EndpointName).Inc(1)
}
// endpointMetricsEventQueueListener maintains the incoming events counter and
// the queues pending count.
type endpointMetricsEventQueueListener struct {
*safeMetrics
}
func (eqc *endpointMetricsEventQueueListener) ingress(event events.Event) {
eqc.Lock()
defer eqc.Unlock()
eqc.Events++
eqc.Pending++
eventsCounter.WithValues("Events", eqc.EndpointName).Inc()
pendingGauge.WithValues(eqc.EndpointName).Inc(1)
}
func (eqc *endpointMetricsEventQueueListener) egress(event events.Event) {
eqc.Lock()
defer eqc.Unlock()
eqc.Pending--
pendingGauge.WithValues(eqc.EndpointName).Dec(1)
}
// register places the endpoint into expvar so that stats are tracked.
func register(e *Endpoint) {
endpoints.mu.Lock()
defer endpoints.mu.Unlock()
endpoints.registered = append(endpoints.registered, e)
}
package notifications
import (
"container/list"
"fmt"
"sync"
events "github.com/docker/go-events"
"github.com/sirupsen/logrus"
)
// eventQueue accepts all messages into a queue for asynchronous consumption
// by a sink. It is unbounded and thread safe but the sink must be reliable or
// events will be dropped.
type eventQueue struct {
sink events.Sink
events *list.List
listeners []eventQueueListener
cond *sync.Cond
mu sync.Mutex
closed bool
}
// eventQueueListener is called when various events happen on the queue.
type eventQueueListener interface {
ingress(event events.Event)
egress(event events.Event)
}
// newEventQueue returns a queue to the provided sink. If the updater is non-
// nil, it will be called to update pending metrics on ingress and egress.
func newEventQueue(sink events.Sink, listeners ...eventQueueListener) *eventQueue {
eq := eventQueue{
sink: sink,
events: list.New(),
listeners: listeners,
}
eq.cond = sync.NewCond(&eq.mu)
go eq.run()
return &eq
}
// Write accepts the events into the queue, only failing if the queue has
// beend closed.
func (eq *eventQueue) Write(event events.Event) error {
eq.mu.Lock()
defer eq.mu.Unlock()
if eq.closed {
return ErrSinkClosed
}
for _, listener := range eq.listeners {
listener.ingress(event)
}
eq.events.PushBack(event)
eq.cond.Signal() // signal waiters
return nil
}
// Close shuts down the event queue, flushing
func (eq *eventQueue) Close() error {
eq.mu.Lock()
defer eq.mu.Unlock()
if eq.closed {
return fmt.Errorf("eventqueue: already closed")
}
// set closed flag
eq.closed = true
eq.cond.Signal() // signal flushes queue
eq.cond.Wait() // wait for signal from last flush
return eq.sink.Close()
}
// run is the main goroutine to flush events to the target sink.
func (eq *eventQueue) run() {
for {
event := eq.next()
if event == nil {
return // nil block means event queue is closed.
}
if err := eq.sink.Write(event); err != nil {
logrus.Warnf("eventqueue: error writing events to %v, these events will be lost: %v", eq.sink, err)
}
for _, listener := range eq.listeners {
listener.egress(event)
}
}
}
// next encompasses the critical section of the run loop. When the queue is
// empty, it will block on the condition. If new data arrives, it will wake
// and return a block. When closed, a nil slice will be returned.
func (eq *eventQueue) next() events.Event {
eq.mu.Lock()
defer eq.mu.Unlock()
for eq.events.Len() < 1 {
if eq.closed {
eq.cond.Broadcast()
return nil
}
eq.cond.Wait()
}
front := eq.events.Front()
block := front.Value.(events.Event)
eq.events.Remove(front)
return block
}
// ignoredSink discards events with ignored target media types and actions.
// passes the rest along.
type ignoredSink struct {
events.Sink
ignoreMediaTypes map[string]bool
ignoreActions map[string]bool
}
func newIgnoredSink(sink events.Sink, ignored []string, ignoreActions []string) events.Sink {
if len(ignored) == 0 {
return sink
}
ignoredMap := make(map[string]bool)
for _, mediaType := range ignored {
ignoredMap[mediaType] = true
}
ignoredActionsMap := make(map[string]bool)
for _, action := range ignoreActions {
ignoredActionsMap[action] = true
}
return &ignoredSink{
Sink: sink,
ignoreMediaTypes: ignoredMap,
ignoreActions: ignoredActionsMap,
}
}
// Write discards events with ignored target media types and passes the rest
// along.
func (imts *ignoredSink) Write(event events.Event) error {
if imts.ignoreMediaTypes[event.(Event).Target.MediaType] || imts.ignoreActions[event.(Event).Action] {
return nil
}
return imts.Sink.Write(event)
}
func (imts *ignoredSink) Close() error {
return nil
}
package distribution
import (
"context"
"github.com/distribution/reference"
)
// Scope defines the set of items that match a namespace.
type Scope interface {
// Contains returns true if the name belongs to the namespace.
Contains(name string) bool
}
type fullScope struct{}
func (f fullScope) Contains(string) bool {
return true
}
// GlobalScope represents the full namespace scope which contains
// all other scopes.
var GlobalScope = Scope(fullScope{})
// Namespace represents a collection of repositories, addressable by name.
// Generally, a namespace is backed by a set of one or more services,
// providing facilities such as registry access, trust, and indexing.
type Namespace interface {
// Scope describes the names that can be used with this Namespace. The
// global namespace will have a scope that matches all names. The scope
// effectively provides an identity for the namespace.
Scope() Scope
// Repository should return a reference to the named repository. The
// registry may or may not have the repository but should always return a
// reference.
Repository(ctx context.Context, name reference.Named) (Repository, error)
// Repositories fills 'repos' with a lexicographically sorted catalog of repositories
// up to the size of 'repos' and returns the value 'n' for the number of entries
// which were filled. 'last' contains an offset in the catalog, and 'err' will be
// set to io.EOF if there are no more entries to obtain.
Repositories(ctx context.Context, repos []string, last string) (n int, err error)
// Blobs returns a blob enumerator to access all blobs
Blobs() BlobEnumerator
// BlobStatter returns a BlobStatter to control
BlobStatter() BlobStatter
}
// RepositoryEnumerator describes an operation to enumerate repositories
type RepositoryEnumerator interface {
Enumerate(ctx context.Context, ingester func(string) error) error
}
// RepositoryRemover removes given repository
type RepositoryRemover interface {
Remove(ctx context.Context, name reference.Named) error
}
// ManifestServiceOption is a function argument for Manifest Service methods
type ManifestServiceOption interface {
Apply(ManifestService) error
}
// WithTag allows a tag to be passed into Put
func WithTag(tag string) ManifestServiceOption {
return WithTagOption{tag}
}
// WithTagOption holds a tag
type WithTagOption struct{ Tag string }
// Apply conforms to the ManifestServiceOption interface
func (o WithTagOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// WithManifestMediaTypes lists the media types the client wishes
// the server to provide.
func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
return WithManifestMediaTypesOption{mediaTypes}
}
// WithManifestMediaTypesOption holds a list of accepted media types
type WithManifestMediaTypesOption struct{ MediaTypes []string }
// Apply conforms to the ManifestServiceOption interface
func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
// no implementation
return nil
}
// Repository is a named collection of manifests and layers.
type Repository interface {
// Named returns the name of the repository.
Named() reference.Named
// Manifests returns a reference to this repository's manifest service.
// with the supplied options applied.
Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error)
// Blobs returns a reference to this repository's blob service.
Blobs(ctx context.Context) BlobStore
// TODO(stevvooe): The above BlobStore return can probably be relaxed to
// be a BlobService for use with clients. This will allow such
// implementations to avoid implementing ServeBlob.
// Tags returns a reference to this repositories tag service
Tags(ctx context.Context) TagService
}
// TODO(stevvooe): Must add close methods to all these. May want to change the
// way instances are created to better reflect internal dependency
// relationships.
package errcode
import (
"encoding/json"
"fmt"
"strings"
)
// ErrorCoder is the base interface for ErrorCode and Error allowing
// users of each to just call ErrorCode to get the real ID of each
type ErrorCoder interface {
ErrorCode() ErrorCode
}
// ErrorCode represents the error type. The errors are serialized via strings
// and the integer format may change and should *never* be exported.
type ErrorCode int
var _ error = ErrorCode(0)
// ErrorCode just returns itself
func (ec ErrorCode) ErrorCode() ErrorCode {
return ec
}
// Error returns the ID/Value
func (ec ErrorCode) Error() string {
// NOTE(stevvooe): Cannot use message here since it may have unpopulated args.
return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1))
}
// Descriptor returns the descriptor for the error code.
func (ec ErrorCode) Descriptor() ErrorDescriptor {
d, ok := errorCodeToDescriptors[ec]
if !ok {
return ErrorCodeUnknown.Descriptor()
}
return d
}
// String returns the canonical identifier for this error code.
func (ec ErrorCode) String() string {
return ec.Descriptor().Value
}
// Message returned the human-readable error message for this error code.
func (ec ErrorCode) Message() string {
return ec.Descriptor().Message
}
// MarshalText encodes the receiver into UTF-8-encoded text and returns the
// result.
func (ec ErrorCode) MarshalText() (text []byte, err error) {
return []byte(ec.String()), nil
}
// UnmarshalText decodes the form generated by MarshalText.
func (ec *ErrorCode) UnmarshalText(text []byte) error {
desc, ok := idToDescriptors[string(text)]
if !ok {
desc = ErrorCodeUnknown.Descriptor()
}
*ec = desc.Code
return nil
}
// WithMessage creates a new Error struct based on the passed-in info and
// overrides the Message property.
func (ec ErrorCode) WithMessage(message string) Error {
return Error{
Code: ec,
Message: message,
}
}
// WithDetail creates a new Error struct based on the passed-in info and
// set the Detail property appropriately
func (ec ErrorCode) WithDetail(detail interface{}) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithDetail(detail)
}
// WithArgs creates a new Error struct and sets the Args slice
func (ec ErrorCode) WithArgs(args ...interface{}) Error {
return Error{
Code: ec,
Message: ec.Message(),
}.WithArgs(args...)
}
// Error provides a wrapper around ErrorCode with extra Details provided.
type Error struct {
Code ErrorCode `json:"code"`
Message string `json:"message"`
Detail interface{} `json:"detail,omitempty"`
// TODO(duglin): See if we need an "args" property so we can do the
// variable substitution right before showing the message to the user
}
var _ error = Error{}
// ErrorCode returns the ID/Value of this Error
func (e Error) ErrorCode() ErrorCode {
return e.Code
}
// Error returns a human readable representation of the error.
func (e Error) Error() string {
return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message)
}
// WithDetail will return a new Error, based on the current one, but with
// some Detail info added
func (e Error) WithDetail(detail interface{}) Error {
return Error{
Code: e.Code,
Message: e.Message,
Detail: detail,
}
}
// WithArgs uses the passed-in list of interface{} as the substitution
// variables in the Error's Message string, but returns a new Error
func (e Error) WithArgs(args ...interface{}) Error {
return Error{
Code: e.Code,
Message: fmt.Sprintf(e.Code.Message(), args...),
Detail: e.Detail,
}
}
// ErrorDescriptor provides relevant information about a given error code.
type ErrorDescriptor struct {
// Code is the error code that this descriptor describes.
Code ErrorCode
// Value provides a unique, string key, often captilized with
// underscores, to identify the error code. This value is used as the
// keyed value when serializing api errors.
Value string
// Message is a short, human readable description of the error condition
// included in API responses.
Message string
// Description provides a complete account of the errors purpose, suitable
// for use in documentation.
Description string
// HTTPStatusCode provides the http status code that is associated with
// this error condition.
HTTPStatusCode int
}
// ParseErrorCode returns the value by the string error code.
// `ErrorCodeUnknown` will be returned if the error is not known.
func ParseErrorCode(value string) ErrorCode {
ed, ok := idToDescriptors[value]
if ok {
return ed.Code
}
return ErrorCodeUnknown
}
// Errors provides the envelope for multiple errors and a few sugar methods
// for use within the application.
type Errors []error
var _ error = Errors{}
func (errs Errors) Error() string {
switch len(errs) {
case 0:
return "<nil>"
case 1:
return errs[0].Error()
default:
msg := "errors:\n"
for _, err := range errs {
msg += err.Error() + "\n"
}
return msg
}
}
// Len returns the current number of errors.
func (errs Errors) Len() int {
return len(errs)
}
// MarshalJSON converts slice of error, ErrorCode or Error into a
// slice of Error - then serializes
func (errs Errors) MarshalJSON() ([]byte, error) {
var tmpErrs struct {
Errors []Error `json:"errors,omitempty"`
}
for _, daErr := range errs {
var err Error
switch daErr := daErr.(type) {
case ErrorCode:
err = daErr.WithDetail(nil)
case Error:
err = daErr
default:
err = ErrorCodeUnknown.WithDetail(daErr)
}
// If the Error struct was setup and they forgot to set the
// Message field (meaning its "") then grab it from the ErrCode
msg := err.Message
if msg == "" {
msg = err.Code.Message()
}
tmpErr := Error{
Code: err.Code,
Message: msg,
Detail: err.Detail,
}
// if the detail contains error extract the error message
// otherwise json.Marshal will not serialize it at all
// https://github.com/golang/go/issues/10748
if detail, ok := tmpErr.Detail.(error); ok {
tmpErr.Detail = detail.Error()
}
tmpErrs.Errors = append(tmpErrs.Errors, tmpErr)
}
return json.Marshal(tmpErrs)
}
// UnmarshalJSON deserializes []Error and then converts it into slice of
// Error or ErrorCode
func (errs *Errors) UnmarshalJSON(data []byte) error {
var tmpErrs struct {
Errors []Error
}
if err := json.Unmarshal(data, &tmpErrs); err != nil {
return err
}
var newErrs Errors
for _, daErr := range tmpErrs.Errors {
// If Message is empty or exactly matches the Code's message string
// then just use the Code, no need for a full Error struct
if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) {
// Error's w/o details get converted to ErrorCode
newErrs = append(newErrs, daErr.Code)
} else {
// Error's w/ details are untouched
newErrs = append(newErrs, Error{
Code: daErr.Code,
Message: daErr.Message,
Detail: daErr.Detail,
})
}
}
*errs = newErrs
return nil
}
package errcode
import (
"encoding/json"
"net/http"
)
// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err
// and sets the content-type header to 'application/json'. It will handle
// ErrorCoder and Errors, and if necessary will create an envelope.
func ServeJSON(w http.ResponseWriter, err error) error {
w.Header().Set("Content-Type", "application/json")
var sc int
switch errs := err.(type) {
case Errors:
if len(errs) < 1 {
break
}
if err, ok := errs[0].(ErrorCoder); ok {
sc = err.ErrorCode().Descriptor().HTTPStatusCode
}
case ErrorCoder:
sc = errs.ErrorCode().Descriptor().HTTPStatusCode
err = Errors{err} // create an envelope.
default:
// We just have an unhandled error type, so just place in an envelope
// and move along.
err = Errors{err}
}
if sc == 0 {
sc = http.StatusInternalServerError
}
w.WriteHeader(sc)
return json.NewEncoder(w).Encode(err)
}
package errcode
import (
"fmt"
"net/http"
"sort"
"sync"
)
var (
errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{}
idToDescriptors = map[string]ErrorDescriptor{}
groupToDescriptors = map[string][]ErrorDescriptor{}
)
var (
// ErrorCodeUnknown is a generic error that can be used as a last
// resort if there is no situation-specific error message that can be used
ErrorCodeUnknown = register("errcode", ErrorDescriptor{
Value: "UNKNOWN",
Message: "unknown error",
Description: `Generic error returned when the error does not have an
API classification.`,
HTTPStatusCode: http.StatusInternalServerError,
})
// ErrorCodeUnsupported is returned when an operation is not supported.
ErrorCodeUnsupported = register("errcode", ErrorDescriptor{
Value: "UNSUPPORTED",
Message: "The operation is unsupported.",
Description: `The operation was unsupported due to a missing
implementation or invalid set of parameters.`,
HTTPStatusCode: http.StatusMethodNotAllowed,
})
// ErrorCodeUnauthorized is returned if a request requires
// authentication.
ErrorCodeUnauthorized = register("errcode", ErrorDescriptor{
Value: "UNAUTHORIZED",
Message: "authentication required",
Description: `The access controller was unable to authenticate
the client. Often this will be accompanied by a
Www-Authenticate HTTP response header indicating how to
authenticate.`,
HTTPStatusCode: http.StatusUnauthorized,
})
// ErrorCodeDenied is returned if a client does not have sufficient
// permission to perform an action.
ErrorCodeDenied = register("errcode", ErrorDescriptor{
Value: "DENIED",
Message: "requested access to the resource is denied",
Description: `The access controller denied access for the
operation on a resource.`,
HTTPStatusCode: http.StatusForbidden,
})
// ErrorCodeUnavailable provides a common error to report unavailability
// of a service or endpoint.
ErrorCodeUnavailable = register("errcode", ErrorDescriptor{
Value: "UNAVAILABLE",
Message: "service unavailable",
Description: "Returned when a service is not available",
HTTPStatusCode: http.StatusServiceUnavailable,
})
// ErrorCodeTooManyRequests is returned if a client attempts too many
// times to contact a service endpoint.
ErrorCodeTooManyRequests = register("errcode", ErrorDescriptor{
Value: "TOOMANYREQUESTS",
Message: "too many requests",
Description: `Returned when a client attempts to contact a
service too many times`,
HTTPStatusCode: http.StatusTooManyRequests,
})
)
const errGroup = "registry.api.v2"
var (
// ErrorCodeDigestInvalid is returned when uploading a blob if the
// provided digest does not match the blob contents.
ErrorCodeDigestInvalid = register(errGroup, ErrorDescriptor{
Value: "DIGEST_INVALID",
Message: "provided digest did not match uploaded content",
Description: `When a blob is uploaded, the registry will check that
the content matches the digest provided by the client. The error may
include a detail structure with the key "digest", including the
invalid digest string. This error may also be returned when a manifest
includes an invalid layer digest.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeSizeInvalid is returned when uploading a blob if the provided
ErrorCodeSizeInvalid = register(errGroup, ErrorDescriptor{
Value: "SIZE_INVALID",
Message: "provided length did not match content length",
Description: `When a layer is uploaded, the provided size will be
checked against the uploaded content. If they do not match, this error
will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeRangeInvalid is returned when uploading a blob if the provided
// content range is invalid.
ErrorCodeRangeInvalid = register(errGroup, ErrorDescriptor{
Value: "RANGE_INVALID",
Message: "invalid content range",
Description: `When a layer is uploaded, the provided range is checked
against the uploaded chunk. This error is returned if the range is
out of order.`,
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
})
// ErrorCodeNameInvalid is returned when the name in the manifest does not
// match the provided name.
ErrorCodeNameInvalid = register(errGroup, ErrorDescriptor{
Value: "NAME_INVALID",
Message: "invalid repository name",
Description: `Invalid repository name encountered either during
manifest validation or any API operation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeTagInvalid is returned when the tag in the manifest does not
// match the provided tag.
ErrorCodeTagInvalid = register(errGroup, ErrorDescriptor{
Value: "TAG_INVALID",
Message: "manifest tag did not match URI",
Description: `During a manifest upload, if the tag in the manifest
does not match the uri tag, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeNameUnknown when the repository name is not known.
ErrorCodeNameUnknown = register(errGroup, ErrorDescriptor{
Value: "NAME_UNKNOWN",
Message: "repository name not known to registry",
Description: `This is returned if the name used during an operation is
unknown to the registry.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestUnknown returned when image manifest is unknown.
ErrorCodeManifestUnknown = register(errGroup, ErrorDescriptor{
Value: "MANIFEST_UNKNOWN",
Message: "manifest unknown",
Description: `This error is returned when the manifest, identified by
name and tag is unknown to the repository.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeManifestInvalid returned when an image manifest is invalid,
// typically during a PUT operation. This error encompasses all errors
// encountered during manifest validation that aren't signature errors.
ErrorCodeManifestInvalid = register(errGroup, ErrorDescriptor{
Value: "MANIFEST_INVALID",
Message: "manifest invalid",
Description: `During upload, manifests undergo several checks ensuring
validity. If those checks fail, this error may be returned, unless a
more specific error is included. The detail will contain information
the failed validation.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestUnverified is returned when the manifest fails
// signature verification.
ErrorCodeManifestUnverified = register(errGroup, ErrorDescriptor{
Value: "MANIFEST_UNVERIFIED",
Message: "manifest failed signature verification",
Description: `During manifest upload, if the manifest fails signature
verification, this error will be returned.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeManifestBlobUnknown is returned when a manifest blob is
// unknown to the registry.
ErrorCodeManifestBlobUnknown = register(errGroup, ErrorDescriptor{
Value: "MANIFEST_BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a manifest blob is
unknown to the registry.`,
HTTPStatusCode: http.StatusBadRequest,
})
// ErrorCodeBlobUnknown is returned when a blob is unknown to the
// registry. This can happen when the manifest references a nonexistent
// layer or the result is not found by a blob fetch.
ErrorCodeBlobUnknown = register(errGroup, ErrorDescriptor{
Value: "BLOB_UNKNOWN",
Message: "blob unknown to registry",
Description: `This error may be returned when a blob is unknown to the
registry in a specified repository. This can be returned with a
standard get or if a manifest references an unknown layer during
upload.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadUnknown is returned when an upload is unknown.
ErrorCodeBlobUploadUnknown = register(errGroup, ErrorDescriptor{
Value: "BLOB_UPLOAD_UNKNOWN",
Message: "blob upload unknown to registry",
Description: `If a blob upload has been cancelled or was never
started, this error code may be returned.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodeBlobUploadInvalid is returned when an upload is invalid.
ErrorCodeBlobUploadInvalid = register(errGroup, ErrorDescriptor{
Value: "BLOB_UPLOAD_INVALID",
Message: "blob upload invalid",
Description: `The blob upload encountered an error and can no
longer proceed.`,
HTTPStatusCode: http.StatusNotFound,
})
// ErrorCodePaginationNumberInvalid is returned when the `n` parameter is
// not an integer, or `n` is negative.
ErrorCodePaginationNumberInvalid = register(errGroup, ErrorDescriptor{
Value: "PAGINATION_NUMBER_INVALID",
Message: "invalid number of results requested",
Description: `Returned when the "n" parameter (number of results
to return) is not an integer, "n" is negative or "n" is bigger than
the maximum allowed.`,
HTTPStatusCode: http.StatusBadRequest,
})
)
var (
nextCode = 1000
registerLock sync.Mutex
)
// Register will make the passed-in error known to the environment and
// return a new ErrorCode
func Register(group string, descriptor ErrorDescriptor) ErrorCode {
return register(group, descriptor)
}
// register will make the passed-in error known to the environment and
// return a new ErrorCode
func register(group string, descriptor ErrorDescriptor) ErrorCode {
registerLock.Lock()
defer registerLock.Unlock()
descriptor.Code = ErrorCode(nextCode)
if _, ok := idToDescriptors[descriptor.Value]; ok {
panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value))
}
if _, ok := errorCodeToDescriptors[descriptor.Code]; ok {
panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code))
}
groupToDescriptors[group] = append(groupToDescriptors[group], descriptor)
errorCodeToDescriptors[descriptor.Code] = descriptor
idToDescriptors[descriptor.Value] = descriptor
nextCode++
return descriptor.Code
}
type byValue []ErrorDescriptor
func (a byValue) Len() int { return len(a) }
func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value }
// GetGroupNames returns the list of Error group names that are registered
func GetGroupNames() []string {
keys := []string{}
for k := range groupToDescriptors {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// GetErrorCodeGroup returns the named group of error descriptors
func GetErrorCodeGroup(name string) []ErrorDescriptor {
desc := groupToDescriptors[name]
sort.Sort(byValue(desc))
return desc
}
// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are
// registered, irrespective of what group they're in
func GetErrorAllDescriptors() []ErrorDescriptor {
result := []ErrorDescriptor{}
for _, group := range GetGroupNames() {
result = append(result, GetErrorCodeGroup(group)...)
}
sort.Sort(byValue(result))
return result
}
package v2
import (
"net/http"
"regexp"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
)
var routeDescriptorsMap map[string]RouteDescriptor
func init() {
routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors))
for _, descriptor := range routeDescriptors {
routeDescriptorsMap[descriptor.Name] = descriptor
}
}
var (
nameParameterDescriptor = ParameterDescriptor{
Name: "name",
Type: "string",
Format: reference.NameRegexp.String(),
Required: true,
Description: `Name of the target repository.`,
}
referenceParameterDescriptor = ParameterDescriptor{
Name: "reference",
Type: "string",
Format: reference.TagRegexp.String(),
Required: true,
Description: `Tag or digest of the target manifest.`,
}
uuidParameterDescriptor = ParameterDescriptor{
Name: "uuid",
Type: "opaque",
Required: true,
Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.",
}
digestPathParameter = ParameterDescriptor{
Name: "digest",
Type: "path",
Required: true,
Format: digest.DigestRegexp.String(),
Description: `Digest of desired blob.`,
}
hostHeader = ParameterDescriptor{
Name: "Host",
Type: "string",
Description: "Standard HTTP Host Header. Should be set to the registry host.",
Format: "<registry host>",
Examples: []string{"registry-1.docker.io"},
}
authHeader = ParameterDescriptor{
Name: "Authorization",
Type: "string",
Description: "An RFC7235 compliant authorization header.",
Format: "<scheme> <token>",
Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="},
}
authChallengeHeader = ParameterDescriptor{
Name: "WWW-Authenticate",
Type: "string",
Description: "An RFC7235 compliant authentication challenge header.",
Format: `<scheme> realm="<realm>", ..."`,
Examples: []string{
`Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`,
},
}
contentLengthZeroHeader = ParameterDescriptor{
Name: "Content-Length",
Description: "The `Content-Length` header must be zero and the body must be empty.",
Type: "integer",
Format: "0",
}
dockerUploadUUIDHeader = ParameterDescriptor{
Name: "Docker-Upload-UUID",
Description: "Identifies the docker upload uuid for the current request.",
Type: "uuid",
Format: "<uuid>",
}
digestHeader = ParameterDescriptor{
Name: "Docker-Content-Digest",
Description: "Digest of the targeted content for the request.",
Type: "digest",
Format: "<digest>",
}
linkHeader = ParameterDescriptor{
Name: "Link",
Type: "link",
Description: "RFC5988 compliant rel='next' with URL to next result set, if available",
Format: `<<url>?n=<last n value>&last=<last entry from response>>; rel="next"`,
}
paginationParameters = []ParameterDescriptor{
{
Name: "n",
Type: "integer",
Description: "Limit the number of entries in each response. It not present, 100 entries will be returned.",
Format: "<integer>",
Required: false,
},
{
Name: "last",
Type: "string",
Description: "Result set will include values lexically after last.",
Format: "<integer>",
Required: false,
},
}
unauthorizedResponseDescriptor = ResponseDescriptor{
Name: "Authentication Required",
StatusCode: http.StatusUnauthorized,
Description: "The client is not authenticated.",
Headers: []ParameterDescriptor{
authChallengeHeader,
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeUnauthorized,
},
}
invalidPaginationResponseDescriptor = ResponseDescriptor{
Name: "Invalid pagination number",
Description: "The received parameter n was invalid in some way, as described by the error code. The client should resolve the issue and retry the request.",
StatusCode: http.StatusBadRequest,
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodePaginationNumberInvalid,
},
}
repositoryNotFoundResponseDescriptor = ResponseDescriptor{
Name: "No Such Repository Error",
StatusCode: http.StatusNotFound,
Description: "The repository is not known to the registry.",
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameUnknown,
},
}
deniedResponseDescriptor = ResponseDescriptor{
Name: "Access Denied",
StatusCode: http.StatusForbidden,
Description: "The client does not have required access to the repository.",
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDenied,
},
}
tooManyRequestsDescriptor = ResponseDescriptor{
Name: "Too Many Requests",
StatusCode: http.StatusTooManyRequests,
Description: "The client made too many requests within a time interval.",
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeTooManyRequests,
},
}
)
const (
manifestBody = `{
"name": <name>,
"tag": <tag>,
"fsLayers": [
{
"blobSum": "<digest>"
},
...
],
"history": <v1 images>,
"signature": <JWS>
}`
errorsBody = `{
"errors": [
{
"code": <error code>,
"message": "<error message>",
"detail": ...
},
...
]
}`
)
// APIDescriptor exports descriptions of the layout of the v2 registry API.
var APIDescriptor = struct {
// RouteDescriptors provides a list of the routes available in the API.
RouteDescriptors []RouteDescriptor
}{
RouteDescriptors: routeDescriptors,
}
// RouteDescriptor describes a route specified by name.
type RouteDescriptor struct {
// Name is the name of the route, as specified in RouteNameXXX exports.
// These names a should be considered a unique reference for a route. If
// the route is registered with gorilla, this is the name that will be
// used.
Name string
// Path is a gorilla/mux-compatible regexp that can be used to match the
// route. For any incoming method and path, only one route descriptor
// should match.
Path string
// Entity should be a short, human-readable description of the object
// targeted by the endpoint.
Entity string
// Description should provide an accurate overview of the functionality
// provided by the route.
Description string
// Methods should describe the various HTTP methods that may be used on
// this route, including request and response formats.
Methods []MethodDescriptor
}
// MethodDescriptor provides a description of the requests that may be
// conducted with the target method.
type MethodDescriptor struct {
// Method is an HTTP method, such as GET, PUT or POST.
Method string
// Description should provide an overview of the functionality provided by
// the covered method, suitable for use in documentation. Use of markdown
// here is encouraged.
Description string
// Requests is a slice of request descriptors enumerating how this
// endpoint may be used.
Requests []RequestDescriptor
}
// RequestDescriptor covers a particular set of headers and parameters that
// can be carried out with the parent method. Its most helpful to have one
// RequestDescriptor per API use case.
type RequestDescriptor struct {
// Name provides a short identifier for the request, usable as a title or
// to provide quick context for the particular request.
Name string
// Description should cover the requests purpose, covering any details for
// this particular use case.
Description string
// Headers describes headers that must be used with the HTTP request.
Headers []ParameterDescriptor
// PathParameters enumerate the parameterized path components for the
// given request, as defined in the route's regular expression.
PathParameters []ParameterDescriptor
// QueryParameters provides a list of query parameters for the given
// request.
QueryParameters []ParameterDescriptor
// Body describes the format of the request body.
Body BodyDescriptor
// Successes enumerates the possible responses that are considered to be
// the result of a successful request.
Successes []ResponseDescriptor
// Failures covers the possible failures from this particular request.
Failures []ResponseDescriptor
}
// ResponseDescriptor describes the components of an API response.
type ResponseDescriptor struct {
// Name provides a short identifier for the response, usable as a title or
// to provide quick context for the particular response.
Name string
// Description should provide a brief overview of the role of the
// response.
Description string
// StatusCode specifies the status received by this particular response.
StatusCode int
// Headers covers any headers that may be returned from the response.
Headers []ParameterDescriptor
// Fields describes any fields that may be present in the response.
Fields []ParameterDescriptor
// ErrorCodes enumerates the error codes that may be returned along with
// the response.
ErrorCodes []errcode.ErrorCode
// Body describes the body of the response, if any.
Body BodyDescriptor
}
// BodyDescriptor describes a request body and its expected content type. For
// the most part, it should be example json or some placeholder for body
// data in documentation.
type BodyDescriptor struct {
ContentType string
Format string
}
// ParameterDescriptor describes the format of a request parameter, which may
// be a header, path parameter or query parameter.
type ParameterDescriptor struct {
// Name is the name of the parameter, either of the path component or
// query parameter.
Name string
// Type specifies the type of the parameter, such as string, integer, etc.
Type string
// Description provides a human-readable description of the parameter.
Description string
// Required means the field is required when set.
Required bool
// Format is a specifying the string format accepted by this parameter.
Format string
// Regexp is a compiled regular expression that can be used to validate
// the contents of the parameter.
Regexp *regexp.Regexp
// Examples provides multiple examples for the values that might be valid
// for this parameter.
Examples []string
}
var routeDescriptors = []RouteDescriptor{
{
Name: RouteNameBase,
Path: "/v2/",
Entity: "Base",
Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`,
Methods: []MethodDescriptor{
{
Method: http.MethodGet,
Description: "Check that the endpoint implements Docker Registry API V2.",
Requests: []RequestDescriptor{
{
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
Successes: []ResponseDescriptor{
{
Description: "The API implements V2 protocol and is accessible.",
StatusCode: http.StatusOK,
},
},
Failures: []ResponseDescriptor{
{
Description: "The registry does not implement the V2 API.",
StatusCode: http.StatusNotFound,
},
unauthorizedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
},
},
{
Name: RouteNameTags,
Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list",
Entity: "Tags",
Description: "Retrieve information about tags.",
Methods: []MethodDescriptor{
{
Method: http.MethodGet,
Description: "Fetch the tags under the repository identified by `name`.",
Requests: []RequestDescriptor{
{
Name: "Tags",
Description: "Return all tags for the repository",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
},
Successes: []ResponseDescriptor{
{
StatusCode: http.StatusOK,
Description: "A list of tags for the named repository.",
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: `{
"name": <name>,
"tags": [
<tag>,
...
]
}`,
},
},
},
Failures: []ResponseDescriptor{
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
{
Name: "Tags Paginated",
Description: "Return a portion of the tags for the specified repository.",
PathParameters: []ParameterDescriptor{nameParameterDescriptor},
QueryParameters: paginationParameters,
Successes: []ResponseDescriptor{
{
StatusCode: http.StatusOK,
Description: "A list of tags for the named repository.",
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
linkHeader,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: `{
"name": <name>,
"tags": [
<tag>,
...
],
}`,
},
},
},
Failures: []ResponseDescriptor{
invalidPaginationResponseDescriptor,
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
},
},
{
Name: RouteNameManifest,
Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}",
Entity: "Manifest",
Description: "Create, update, delete and retrieve manifests.",
Methods: []MethodDescriptor{
{
Method: http.MethodGet,
Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
Requests: []RequestDescriptor{
{
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
referenceParameterDescriptor,
},
Successes: []ResponseDescriptor{
{
Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.",
StatusCode: http.StatusOK,
Headers: []ParameterDescriptor{
digestHeader,
},
Body: BodyDescriptor{
ContentType: "<media type of manifest>",
Format: manifestBody,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "The name or reference was invalid.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeTagInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
{
Method: http.MethodPut,
Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.",
Requests: []RequestDescriptor{
{
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
referenceParameterDescriptor,
},
Body: BodyDescriptor{
ContentType: "<media type of manifest>",
Format: manifestBody,
},
Successes: []ResponseDescriptor{
{
Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.",
StatusCode: http.StatusCreated,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Description: "The canonical location url of the uploaded manifest.",
Format: "<url>",
},
contentLengthZeroHeader,
digestHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Name: "Invalid Manifest",
Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.",
StatusCode: http.StatusBadRequest,
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeTagInvalid,
errcode.ErrorCodeManifestInvalid,
errcode.ErrorCodeManifestUnverified,
errcode.ErrorCodeBlobUnknown,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
{
Name: "Missing Layer(s)",
Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeBlobUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: `{
"errors": [
{
"code": "BLOB_UNKNOWN",
"message": "blob unknown to registry",
"detail": {
"digest": "<digest>"
}
},
...
]
}`,
},
},
{
Name: "Not allowed",
Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason",
StatusCode: http.StatusMethodNotAllowed,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeUnsupported,
},
},
},
},
},
},
{
Method: http.MethodDelete,
Description: "Delete the manifest or tag identified by `name` and `reference` where `reference` can be a tag or digest. Note that a manifest can _only_ be deleted by digest.",
Requests: []RequestDescriptor{
{
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
referenceParameterDescriptor,
},
Successes: []ResponseDescriptor{
{
StatusCode: http.StatusAccepted,
},
},
Failures: []ResponseDescriptor{
{
Name: "Invalid Name or Reference",
Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeTagInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
{
Name: "Unknown Manifest",
Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest or tag was already deleted if this response is returned.",
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameUnknown,
errcode.ErrorCodeManifestUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Name: "Not allowed",
Description: "Manifest or tag delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.",
StatusCode: http.StatusMethodNotAllowed,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeUnsupported,
},
},
},
},
},
},
},
},
{
Name: RouteNameBlob,
Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}",
Entity: "Blob",
Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.",
Methods: []MethodDescriptor{
{
Method: http.MethodGet,
Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.",
Requests: []RequestDescriptor{
{
Name: "Fetch Blob",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
digestPathParameter,
},
Successes: []ResponseDescriptor{
{
Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.",
StatusCode: http.StatusOK,
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "The length of the requested blob content.",
Format: "<length>",
},
digestHeader,
},
Body: BodyDescriptor{
ContentType: "application/octet-stream",
Format: "<blob binary data>",
},
},
{
Description: "The blob identified by `digest` is available at the provided location.",
StatusCode: http.StatusTemporaryRedirect,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Description: "The location where the layer should be accessible.",
Format: "<blob location>",
},
digestHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeDigestInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
StatusCode: http.StatusNotFound,
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameUnknown,
errcode.ErrorCodeBlobUnknown,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
{
Name: "Fetch Blob Part",
Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
{
Name: "Range",
Type: "string",
Description: "HTTP Range header specifying blob chunk.",
Format: "bytes=<start>-<end>",
},
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
digestPathParameter,
},
Successes: []ResponseDescriptor{
{
Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.",
StatusCode: http.StatusPartialContent,
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "The length of the requested blob chunk.",
Format: "<length>",
},
{
Name: "Content-Range",
Type: "byte range",
Description: "Content range of blob chunk.",
Format: "bytes <start>-<end>/<size>",
},
},
Body: BodyDescriptor{
ContentType: "application/octet-stream",
Format: "<blob binary data>",
},
},
},
Failures: []ResponseDescriptor{
{
Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeDigestInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameUnknown,
errcode.ErrorCodeBlobUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.",
StatusCode: http.StatusRequestedRangeNotSatisfiable,
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
{
Method: http.MethodDelete,
Description: "Delete the blob identified by `name` and `digest`",
Requests: []RequestDescriptor{
{
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
digestPathParameter,
},
Successes: []ResponseDescriptor{
{
StatusCode: http.StatusAccepted,
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "0",
Format: "0",
},
digestHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Name: "Invalid Name or Digest",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
},
},
{
Description: "The blob, identified by `name` and `digest`, is unknown to the registry.",
StatusCode: http.StatusNotFound,
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameUnknown,
errcode.ErrorCodeBlobUnknown,
},
},
{
Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled",
StatusCode: http.StatusMethodNotAllowed,
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeUnsupported,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
// TODO(stevvooe): We may want to add a PUT request here to
// kickoff an upload of a blob, integrated with the blob upload
// API.
},
},
{
Name: RouteNameBlobUpload,
Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/",
Entity: "Initiate Blob Upload",
Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.",
Methods: []MethodDescriptor{
{
Method: http.MethodPost,
Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.",
Requests: []RequestDescriptor{
{
Name: "Initiate Monolithic Blob Upload",
Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
{
Name: "Content-Length",
Type: "integer",
Format: "<length of blob>",
},
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
},
QueryParameters: []ParameterDescriptor{
{
Name: "digest",
Type: "query",
Format: "<digest>",
Regexp: digest.DigestRegexp,
Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`,
},
},
Body: BodyDescriptor{
ContentType: "application/octet-stream",
Format: "<binary data>",
},
Successes: []ResponseDescriptor{
{
Description: "The blob has been created in the registry and is available at the provided location.",
StatusCode: http.StatusCreated,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Format: "<blob location>",
},
contentLengthZeroHeader,
dockerUploadUUIDHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Name: "Invalid Name or Digest",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
},
},
{
Name: "Not allowed",
Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason",
StatusCode: http.StatusMethodNotAllowed,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeUnsupported,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
{
Name: "Initiate Resumable Blob Upload",
Description: "Initiate a resumable blob upload with an empty request body.",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
contentLengthZeroHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
},
Successes: []ResponseDescriptor{
{
Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.",
StatusCode: http.StatusAccepted,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Format: "/v2/<name>/blobs/uploads/<uuid>",
Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
},
{
Name: "Range",
Format: "0-<offset>",
Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.",
},
contentLengthZeroHeader,
dockerUploadUUIDHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Name: "Invalid Name or Digest",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
{
Name: "Mount Blob",
Description: "Mount a blob identified by the `mount` parameter from another repository.",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
contentLengthZeroHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
},
QueryParameters: []ParameterDescriptor{
{
Name: "mount",
Type: "query",
Format: "<digest>",
Regexp: digest.DigestRegexp,
Description: `Digest of blob to mount from the source repository.`,
},
{
Name: "from",
Type: "query",
Format: "<repository name>",
Regexp: reference.NameRegexp,
Description: `Name of the source repository.`,
},
},
Successes: []ResponseDescriptor{
{
Description: "The blob has been mounted in the repository and is available at the provided location.",
StatusCode: http.StatusCreated,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Format: "<blob location>",
},
contentLengthZeroHeader,
dockerUploadUUIDHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Name: "Invalid Name or Digest",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
},
},
{
Name: "Not allowed",
Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason",
StatusCode: http.StatusMethodNotAllowed,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeUnsupported,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
},
},
{
Name: RouteNameBlobUploadChunk,
Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}",
Entity: "Blob Upload",
Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.",
Methods: []MethodDescriptor{
{
Method: http.MethodGet,
Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.",
Requests: []RequestDescriptor{
{
Description: "Retrieve the progress of the current upload, as reported by the `Range` header.",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
uuidParameterDescriptor,
},
Successes: []ResponseDescriptor{
{
Name: "Upload Progress",
Description: "The upload is known and in progress. The last received offset is available in the `Range` header.",
StatusCode: http.StatusNoContent,
Headers: []ParameterDescriptor{
{
Name: "Range",
Type: "header",
Format: "0-<offset>",
Description: "Range indicating the current progress of the upload.",
},
contentLengthZeroHeader,
dockerUploadUUIDHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "There was an error processing the upload and it must be restarted.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeBlobUploadInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The upload is unknown to the registry. The upload must be restarted.",
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeBlobUploadUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
{
Method: http.MethodPatch,
Description: "Upload a chunk of data for the specified upload.",
Requests: []RequestDescriptor{
{
Name: "Stream upload",
Description: "Upload a stream of data to upload without completing the upload.",
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
uuidParameterDescriptor,
},
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
},
Body: BodyDescriptor{
ContentType: "application/octet-stream",
Format: "<binary data>",
},
Successes: []ResponseDescriptor{
{
Name: "Data Accepted",
Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
StatusCode: http.StatusAccepted,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Format: "/v2/<name>/blobs/uploads/<uuid>",
Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
},
{
Name: "Range",
Type: "header",
Format: "0-<offset>",
Description: "Range indicating the current progress of the upload.",
},
contentLengthZeroHeader,
dockerUploadUUIDHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "There was an error processing the upload and it must be restarted.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeBlobUploadInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The upload is unknown to the registry. The upload must be restarted.",
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeBlobUploadUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
{
Name: "Chunked upload",
Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.",
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
uuidParameterDescriptor,
},
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
{
Name: "Content-Range",
Type: "header",
Format: "<start of range>-<end of range, inclusive>",
Required: true,
Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.",
},
{
Name: "Content-Length",
Type: "integer",
Format: "<length of chunk>",
Description: "Length of the chunk being uploaded, corresponding the length of the request body.",
},
},
Body: BodyDescriptor{
ContentType: "application/octet-stream",
Format: "<binary chunk>",
},
Successes: []ResponseDescriptor{
{
Name: "Chunk Accepted",
Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.",
StatusCode: http.StatusAccepted,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Format: "/v2/<name>/blobs/uploads/<uuid>",
Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.",
},
{
Name: "Range",
Type: "header",
Format: "0-<offset>",
Description: "Range indicating the current progress of the upload.",
},
contentLengthZeroHeader,
dockerUploadUUIDHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "There was an error processing the upload and it must be restarted.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeBlobUploadInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The upload is unknown to the registry. The upload must be restarted.",
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeBlobUploadUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.",
StatusCode: http.StatusRequestedRangeNotSatisfiable,
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
{
Method: http.MethodPut,
Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.",
Requests: []RequestDescriptor{
{
Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.",
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
{
Name: "Content-Length",
Type: "integer",
Format: "<length of data>",
Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.",
},
},
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
uuidParameterDescriptor,
},
QueryParameters: []ParameterDescriptor{
{
Name: "digest",
Type: "string",
Format: "<digest>",
Regexp: digest.DigestRegexp,
Required: true,
Description: `Digest of uploaded blob.`,
},
},
Body: BodyDescriptor{
ContentType: "application/octet-stream",
Format: "<binary data>",
},
Successes: []ResponseDescriptor{
{
Name: "Upload Complete",
Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.",
StatusCode: http.StatusCreated,
Headers: []ParameterDescriptor{
{
Name: "Location",
Type: "url",
Format: "<blob location>",
Description: "The canonical location of the blob for retrieval",
},
{
Name: "Content-Range",
Type: "header",
Format: "<start of range>-<end of range, inclusive>",
Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.",
},
contentLengthZeroHeader,
digestHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "There was an error processing the upload and it must be restarted.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeDigestInvalid,
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeBlobUploadInvalid,
errcode.ErrorCodeUnsupported,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The upload is unknown to the registry. The upload must be restarted.",
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeBlobUploadUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
{
Method: http.MethodDelete,
Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.",
Requests: []RequestDescriptor{
{
Description: "Cancel the upload specified by `uuid`.",
PathParameters: []ParameterDescriptor{
nameParameterDescriptor,
uuidParameterDescriptor,
},
Headers: []ParameterDescriptor{
hostHeader,
authHeader,
contentLengthZeroHeader,
},
Successes: []ResponseDescriptor{
{
Name: "Upload Deleted",
Description: "The upload has been successfully deleted.",
StatusCode: http.StatusNoContent,
Headers: []ParameterDescriptor{
contentLengthZeroHeader,
},
},
},
Failures: []ResponseDescriptor{
{
Description: "An error was encountered processing the delete. The client may ignore this error.",
StatusCode: http.StatusBadRequest,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeNameInvalid,
errcode.ErrorCodeBlobUploadInvalid,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
{
Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.",
StatusCode: http.StatusNotFound,
ErrorCodes: []errcode.ErrorCode{
errcode.ErrorCodeBlobUploadUnknown,
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: errorsBody,
},
},
unauthorizedResponseDescriptor,
repositoryNotFoundResponseDescriptor,
deniedResponseDescriptor,
tooManyRequestsDescriptor,
},
},
},
},
},
},
{
Name: RouteNameCatalog,
Path: "/v2/_catalog",
Entity: "Catalog",
Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.",
Methods: []MethodDescriptor{
{
Method: http.MethodGet,
Description: "Retrieve a sorted, json list of repositories available in the registry.",
Requests: []RequestDescriptor{
{
Name: "Catalog Fetch",
Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.",
Successes: []ResponseDescriptor{
{
Description: "Returns the unabridged list of repositories as a json response.",
StatusCode: http.StatusOK,
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
},
Body: BodyDescriptor{
ContentType: "application/json",
Format: `{
"repositories": [
<name>,
...
],
}`,
},
},
},
},
{
Name: "Catalog Fetch Paginated",
Description: "Return the specified portion of repositories.",
QueryParameters: paginationParameters,
Successes: []ResponseDescriptor{
{
StatusCode: http.StatusOK,
Body: BodyDescriptor{
ContentType: "application/json",
Format: `{
"repositories": [
<name>,
...
],
"next": "<url>?last=<name>&n=<last value of n>"
}`,
},
Headers: []ParameterDescriptor{
{
Name: "Content-Length",
Type: "integer",
Description: "Length of the JSON response body.",
Format: "<length>",
},
linkHeader,
},
},
},
Failures: []ResponseDescriptor{
invalidPaginationResponseDescriptor,
},
},
},
},
},
},
}
package v2
import (
"fmt"
"regexp"
"strings"
"unicode"
)
var (
// according to rfc7230
reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`)
reQuotedValue = regexp.MustCompile(`^[^\\"]+`)
reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`)
)
// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains
// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The
// function parses only the first element of the list, which is set by the very first proxy. It returns a map
// of corresponding key-value pairs and an unparsed slice of the input string.
//
// Examples of Forwarded header values:
//
// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown
// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80"
//
// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into
// {"for": "192.0.2.43:443", "host": "registry.example.org"}.
func parseForwardedHeader(forwarded string) (map[string]string, string, error) {
// Following are states of forwarded header parser. Any state could transition to a failure.
const (
// terminating state; can transition to Parameter
stateElement = iota
// terminating state; can transition to KeyValueDelimiter
stateParameter
// can transition to Value
stateKeyValueDelimiter
// can transition to one of { QuotedValue, PairEnd }
stateValue
// can transition to one of { EscapedCharacter, PairEnd }
stateQuotedValue
// can transition to one of { QuotedValue }
stateEscapedCharacter
// terminating state; can transition to one of { Parameter, Element }
statePairEnd
)
var (
parameter string
value string
parse = forwarded[:]
res = map[string]string{}
state = stateElement
)
Loop:
for {
// skip spaces unless in quoted value
if state != stateQuotedValue && state != stateEscapedCharacter {
parse = strings.TrimLeftFunc(parse, unicode.IsSpace)
}
if len(parse) == 0 {
if state != stateElement && state != statePairEnd && state != stateParameter {
return nil, parse, fmt.Errorf("unexpected end of input")
}
// terminating
break
}
switch state {
// terminate at list element delimiter
case stateElement:
if parse[0] == ',' {
parse = parse[1:]
break Loop
}
state = stateParameter
// parse parameter (the key of key-value pair)
case stateParameter:
match := reToken.FindString(parse)
if len(match) == 0 {
return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse))
}
parameter = strings.ToLower(match)
parse = parse[len(match):]
state = stateKeyValueDelimiter
// parse '='
case stateKeyValueDelimiter:
if parse[0] != '=' {
return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse))
}
parse = parse[1:]
state = stateValue
// parse value or quoted value
case stateValue:
if parse[0] == '"' {
parse = parse[1:]
state = stateQuotedValue
} else {
value = reToken.FindString(parse)
if len(value) == 0 {
return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse))
}
if _, exists := res[parameter]; exists {
return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse))
}
res[parameter] = value
parse = parse[len(value):]
value = ""
state = statePairEnd
}
// parse a part of quoted value until the first backslash
case stateQuotedValue:
match := reQuotedValue.FindString(parse)
value += match
parse = parse[len(match):]
switch {
case len(parse) == 0:
return nil, parse, fmt.Errorf("unterminated quoted string")
case parse[0] == '"':
res[parameter] = value
value = ""
parse = parse[1:]
state = statePairEnd
case parse[0] == '\\':
parse = parse[1:]
state = stateEscapedCharacter
}
// parse escaped character in a quoted string, ignore the backslash
// transition back to QuotedValue state
case stateEscapedCharacter:
c := reEscapedCharacter.FindString(parse)
if len(c) == 0 {
return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1)
}
value += c
parse = parse[1:]
state = stateQuotedValue
// expect either a new key-value pair, new list or end of input
case statePairEnd:
switch parse[0] {
case ';':
parse = parse[1:]
state = stateParameter
case ',':
state = stateElement
default:
return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse))
}
}
}
return res, parse, nil
}
package v2
import (
"sync"
"github.com/gorilla/mux"
)
// The following are definitions of the name under which all V2 routes are
// registered. These symbols can be used to look up a route based on the name.
const (
RouteNameBase = "base"
RouteNameManifest = "manifest"
RouteNameTags = "tags"
RouteNameBlob = "blob"
RouteNameBlobUpload = "blob-upload"
RouteNameBlobUploadChunk = "blob-upload-chunk"
RouteNameCatalog = "catalog"
)
var (
baseRouter *mux.Router
createBaseRouterOnce sync.Once
)
// Router builds a gorilla router with named routes for the various API
// methods. This can be used directly by both server implementations and
// clients.
func Router() *mux.Router {
createBaseRouterOnce.Do(func() {
baseRouter = RouterWithPrefix("")
})
return baseRouter
}
// RouterWithPrefix builds a gorilla router with a configured prefix
// on all routes.
func RouterWithPrefix(prefix string) *mux.Router {
rootRouter := mux.NewRouter()
router := rootRouter
if prefix != "" {
router = router.PathPrefix(prefix).Subrouter()
}
router.StrictSlash(true)
for _, descriptor := range routeDescriptors {
router.Path(descriptor.Path).Name(descriptor.Name)
}
return rootRouter
}
package v2
import (
"fmt"
"net/http"
"net/url"
"strings"
"github.com/distribution/reference"
"github.com/gorilla/mux"
)
// URLBuilder creates registry API urls from a single base endpoint. It can be
// used to create urls for use in a registry client or server.
//
// All urls will be created from the given base, including the api version.
// For example, if a root of "/foo/" is provided, urls generated will be fall
// under "/foo/v2/...". Most application will only provide a schema, host and
// port, such as "https://localhost:5000/".
type URLBuilder struct {
root *url.URL // url root (ie http://localhost/)
router *mux.Router
relative bool
}
// NewURLBuilder creates a URLBuilder with provided root url object.
func NewURLBuilder(root *url.URL, relative bool) *URLBuilder {
return &URLBuilder{
root: root,
router: Router(),
relative: relative,
}
}
// NewURLBuilderFromString works identically to NewURLBuilder except it takes
// a string argument for the root, returning an error if it is not a valid
// url.
func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) {
u, err := url.Parse(root)
if err != nil {
return nil, err
}
return NewURLBuilder(u, relative), nil
}
// NewURLBuilderFromRequest uses information from an *http.Request to
// construct the root url.
func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder {
var (
scheme = "http"
host = r.Host
)
if r.TLS != nil {
scheme = "https"
} else if len(r.URL.Scheme) > 0 {
scheme = r.URL.Scheme
}
// Handle forwarded headers
// Prefer "Forwarded" header as defined by rfc7239 if given
// see https://tools.ietf.org/html/rfc7239
if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 {
forwardedHeader, _, err := parseForwardedHeader(forwarded)
if err == nil {
if fproto := forwardedHeader["proto"]; len(fproto) > 0 {
scheme = fproto
}
if fhost := forwardedHeader["host"]; len(fhost) > 0 {
host = fhost
}
}
} else {
if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 {
scheme = forwardedProto
}
if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 {
// According to the Apache mod_proxy docs, X-Forwarded-Host can be a
// comma-separated list of hosts, to which each proxy appends the
// requested host. We want to grab the first from this comma-separated
// list.
host, _, _ = strings.Cut(forwardedHost, ",")
host = strings.TrimSpace(host)
}
}
basePath := routeDescriptorsMap[RouteNameBase].Path
requestPath := r.URL.Path
index := strings.Index(requestPath, basePath)
u := &url.URL{
Scheme: scheme,
Host: host,
}
if index > 0 {
// N.B. index+1 is important because we want to include the trailing /
u.Path = requestPath[0 : index+1]
}
return NewURLBuilder(u, relative)
}
// BuildBaseURL constructs a base url for the API, typically just "/v2/".
func (ub *URLBuilder) BuildBaseURL() (string, error) {
route := ub.cloneRoute(RouteNameBase)
baseURL, err := route.URL()
if err != nil {
return "", err
}
return baseURL.String(), nil
}
// BuildCatalogURL constructs a url get a catalog of repositories
func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameCatalog)
catalogURL, err := route.URL()
if err != nil {
return "", err
}
return appendValuesURL(catalogURL, values...).String(), nil
}
// BuildTagsURL constructs a url to list the tags in the named repository.
func (ub *URLBuilder) BuildTagsURL(name reference.Named, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameTags)
tagsURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return appendValuesURL(tagsURL, values...).String(), nil
}
// BuildManifestURL constructs a url for the manifest identified by name and
// reference. The argument reference may be either a tag or digest.
func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) {
route := ub.cloneRoute(RouteNameManifest)
tagOrDigest := ""
switch v := ref.(type) {
case reference.Tagged:
tagOrDigest = v.Tag()
case reference.Digested:
tagOrDigest = v.Digest().String()
default:
return "", fmt.Errorf("reference must have a tag or digest")
}
manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest)
if err != nil {
return "", err
}
return manifestURL.String(), nil
}
// BuildBlobURL constructs the url for the blob identified by name and dgst.
func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) {
route := ub.cloneRoute(RouteNameBlob)
layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String())
if err != nil {
return "", err
}
return layerURL.String(), nil
}
// BuildBlobUploadURL constructs a url to begin a blob upload in the
// repository identified by name.
func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUpload)
uploadURL, err := route.URL("name", name.Name())
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid,
// including any url values. This should generally not be used by clients, as
// this url is provided by server implementations during the blob upload
// process.
func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) {
route := ub.cloneRoute(RouteNameBlobUploadChunk)
uploadURL, err := route.URL("name", name.Name(), "uuid", uuid)
if err != nil {
return "", err
}
return appendValuesURL(uploadURL, values...).String(), nil
}
// cloneRoute returns a clone of the named route from the router. Routes
// must be cloned to avoid modifying them during url generation.
func (ub *URLBuilder) cloneRoute(name string) clonedRoute {
route := new(mux.Route)
root := new(url.URL)
*route = *ub.router.GetRoute(name) // clone the route
*root = *ub.root
return clonedRoute{Route: route, root: root, relative: ub.relative}
}
type clonedRoute struct {
*mux.Route
root *url.URL
relative bool
}
func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) {
routeURL, err := cr.Route.URL(pairs...)
if err != nil {
return nil, err
}
if cr.relative {
return routeURL, nil
}
if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" {
routeURL.Path = routeURL.Path[1:]
}
url := cr.root.ResolveReference(routeURL)
url.Scheme = cr.root.Scheme
return url, nil
}
// appendValuesURL appends the parameters to the url.
func appendValuesURL(u *url.URL, values ...url.Values) *url.URL {
merged := u.Query()
for _, v := range values {
for k, vv := range v {
merged[k] = append(merged[k], vv...)
}
}
u.RawQuery = merged.Encode()
return u
}
// Package auth defines a standard interface for request access controllers.
//
// An access controller has a simple interface with a single `Authorized`
// method which checks that a given request is authorized to perform one or
// more actions on one or more resources. This method should return a non-nil
// error if the request is not authorized.
//
// An implementation registers its access controller by name with a constructor
// which accepts an options map for configuring the access controller.
//
// options := map[string]interface{}{"sillySecret": "whysosilly?"}
// accessController, _ := auth.GetAccessController("silly", options)
//
// This `accessController` can then be used in a request handler like so:
//
// func updateOrder(w http.ResponseWriter, r *http.Request) {
// orderNumber := r.FormValue("orderNumber")
// resource := auth.Resource{Type: "customerOrder", Name: orderNumber}
// access := auth.Access{Resource: resource, Action: "update"}
//
// if ctx, err := accessController.Authorized(r, access); err != nil {
// if challenge, ok := err.(auth.Challenge) {
// // Let the challenge write the response.
// challenge.SetHeaders(r, w)
// w.WriteHeader(http.StatusUnauthorized)
// return
// } else {
// // Some other error.
// }
// }
// }
package auth
import (
"errors"
"fmt"
"net/http"
)
var (
// ErrInvalidCredential is returned when the auth token does not authenticate correctly.
ErrInvalidCredential = errors.New("invalid authorization credential")
// ErrAuthenticationFailure returned when authentication fails.
ErrAuthenticationFailure = errors.New("authentication failure")
)
// InitFunc is the type of an AccessController factory function and is used
// to register the constructor for different AccessController backends.
type InitFunc func(options map[string]interface{}) (AccessController, error)
var accessControllers map[string]InitFunc
func init() {
accessControllers = make(map[string]InitFunc)
}
// UserInfo carries information about
// an authenticated/authorized client.
type UserInfo struct {
Name string
}
// Resource describes a resource by type and name.
type Resource struct {
Type string
Class string
Name string
}
// Access describes a specific action that is
// requested or allowed for a given resource.
type Access struct {
Resource
Action string
}
// Grant describes the permitted level of access for an authorized request.
type Grant struct {
User UserInfo // The authenticated user for the request.
Resources []Resource // The list of resources which have been authorized for the request.
}
// Challenge is a special error type which is used for HTTP 401 Unauthorized
// responses and is able to write the response with WWW-Authenticate challenge
// header values based on the error.
type Challenge interface {
error
// SetHeaders prepares the request to conduct a challenge response by
// adding the an HTTP challenge header on the response message. Callers
// are expected to set the appropriate HTTP status code (e.g. 401)
// themselves.
SetHeaders(r *http.Request, w http.ResponseWriter)
}
// AccessController controls access to registry resources based on a request
// and required access levels for a request. Implementations can support both
// complete denial and http authorization challenges.
type AccessController interface {
// Authorized determines if the request is granted access. If one or more
// Access structs are provided, the requested access will be compared with
// what is available to the request.
//
// Return a Grant to grant the request access. Return an error to deny
// access. The error may be of type Challenge, in which case the caller may
// have the Challenge handle the request or choose what action to take based
// on the Challenge header or response status.
Authorized(r *http.Request, access ...Access) (*Grant, error)
}
// CredentialAuthenticator is an object which is able to authenticate credentials
type CredentialAuthenticator interface {
AuthenticateUser(username, password string) error
}
// Register is used to register an InitFunc for
// an AccessController backend with the given name.
func Register(name string, initFunc InitFunc) error {
if _, exists := accessControllers[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
accessControllers[name] = initFunc
return nil
}
// GetAccessController constructs an AccessController
// with the given options using the named backend.
func GetAccessController(name string, options map[string]interface{}) (AccessController, error) {
if initFunc, exists := accessControllers[name]; exists {
return initFunc(options)
}
return nil, fmt.Errorf("no access controller registered with name: %s", name)
}
// Package silly provides a simple authentication scheme that checks for the
// existence of an Authorization header and issues access if is present and
// non-empty.
//
// This package is present as an example implementation of a minimal
// auth.AccessController and for testing. This is not suitable for any kind of
// production security.
package silly
import (
"fmt"
"net/http"
"strings"
"github.com/distribution/distribution/v3/registry/auth"
"github.com/sirupsen/logrus"
)
// init registers the silly auth backend.
func init() {
if err := auth.Register("silly", auth.InitFunc(newAccessController)); err != nil {
logrus.Errorf("failed to register silly auth: %v", err)
}
}
// accessController provides a simple implementation of auth.AccessController
// that simply checks for a non-empty Authorization header. It is useful for
// demonstration and testing.
type accessController struct {
realm string
service string
}
var _ auth.AccessController = &accessController{}
func newAccessController(options map[string]interface{}) (auth.AccessController, error) {
realm, present := options["realm"]
if _, ok := realm.(string); !present || !ok {
return nil, fmt.Errorf(`"realm" must be set for silly access controller`)
}
service, present := options["service"]
if _, ok := service.(string); !present || !ok {
return nil, fmt.Errorf(`"service" must be set for silly access controller`)
}
return &accessController{realm: realm.(string), service: service.(string)}, nil
}
// Authorized simply checks for the existence of the authorization header,
// responding with a bearer challenge if it doesn't exist.
func (ac *accessController) Authorized(req *http.Request, accessRecords ...auth.Access) (*auth.Grant, error) {
if req.Header.Get("Authorization") == "" {
challenge := challenge{
realm: ac.realm,
service: ac.service,
}
if len(accessRecords) > 0 {
var scopes []string
for _, access := range accessRecords {
scopes = append(scopes, fmt.Sprintf("%s:%s:%s", access.Type, access.Resource.Name, access.Action))
}
challenge.scope = strings.Join(scopes, " ")
}
return nil, &challenge
}
return &auth.Grant{User: auth.UserInfo{Name: "silly"}}, nil
}
type challenge struct {
realm string
service string
scope string
}
var _ auth.Challenge = challenge{}
// SetHeaders sets a simple bearer challenge on the response.
func (ch challenge) SetHeaders(r *http.Request, w http.ResponseWriter) {
header := fmt.Sprintf("Bearer realm=%q,service=%q", ch.realm, ch.service)
if ch.scope != "" {
header = fmt.Sprintf("%s,scope=%q", header, ch.scope)
}
w.Header().Set("WWW-Authenticate", header)
}
func (ch challenge) Error() string {
return fmt.Sprintf("silly authentication challenge: %#v", ch)
}
package handlers
import (
"context"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"expvar"
"fmt"
"math"
"math/big"
"net"
"net/http"
"net/url"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/configuration"
"github.com/distribution/distribution/v3/health"
"github.com/distribution/distribution/v3/health/checks"
"github.com/distribution/distribution/v3/internal/dcontext"
prometheus "github.com/distribution/distribution/v3/metrics"
"github.com/distribution/distribution/v3/notifications"
"github.com/distribution/distribution/v3/registry/api/errcode"
v2 "github.com/distribution/distribution/v3/registry/api/v2"
"github.com/distribution/distribution/v3/registry/auth"
registrymiddleware "github.com/distribution/distribution/v3/registry/middleware/registry"
repositorymiddleware "github.com/distribution/distribution/v3/registry/middleware/repository"
"github.com/distribution/distribution/v3/registry/proxy"
"github.com/distribution/distribution/v3/registry/storage"
memorycache "github.com/distribution/distribution/v3/registry/storage/cache/memory"
rediscache "github.com/distribution/distribution/v3/registry/storage/cache/redis"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
storagemiddleware "github.com/distribution/distribution/v3/registry/storage/driver/middleware"
"github.com/distribution/distribution/v3/version"
"github.com/distribution/reference"
events "github.com/docker/go-events"
"github.com/docker/go-metrics"
"github.com/gorilla/mux"
"github.com/redis/go-redis/extra/redisotel/v9"
"github.com/redis/go-redis/v9"
"github.com/sirupsen/logrus"
)
// randomSecretSize is the number of random bytes to generate if no secret
// was specified.
const randomSecretSize = 32
// defaultCheckInterval is the default time in between health checks
const defaultCheckInterval = 10 * time.Second
// App is a global registry application object. Shared resources can be placed
// on this object that will be accessible from all requests. Any writable
// fields should be protected.
type App struct {
context.Context
Config *configuration.Configuration
router *mux.Router // main application router, configured with dispatchers
driver storagedriver.StorageDriver // driver maintains the app global storage driver instance.
registry distribution.Namespace // registry is the primary registry backend for the app instance.
repoRemover distribution.RepositoryRemover // repoRemover provides ability to delete repos
accessController auth.AccessController // main access controller for application
// httpHost is a parsed representation of the http.host parameter from
// the configuration. Only the Scheme and Host fields are used.
httpHost url.URL
// events contains notification related configuration.
events struct {
sink events.Sink
source notifications.SourceRecord
}
redis redis.UniversalClient
// isCache is true if this registry is configured as a pull through cache
isCache bool
// readOnly is true if the registry is in a read-only maintenance mode
readOnly bool
}
// NewApp takes a configuration and returns a configured app, ready to serve
// requests. The app only implements ServeHTTP and can be wrapped in other
// handlers accordingly.
func NewApp(ctx context.Context, config *configuration.Configuration) *App {
app := &App{
Config: config,
Context: ctx,
router: v2.RouterWithPrefix(config.HTTP.Prefix),
isCache: config.Proxy.RemoteURL != "",
}
// Register the handler dispatchers.
app.register(v2.RouteNameBase, func(ctx *Context, r *http.Request) http.Handler {
return http.HandlerFunc(apiBase)
})
app.register(v2.RouteNameManifest, manifestDispatcher)
app.register(v2.RouteNameCatalog, catalogDispatcher)
app.register(v2.RouteNameTags, tagsDispatcher)
app.register(v2.RouteNameBlob, blobDispatcher)
app.register(v2.RouteNameBlobUpload, blobUploadDispatcher)
app.register(v2.RouteNameBlobUploadChunk, blobUploadDispatcher)
// override the storage driver's UA string for registry outbound HTTP requests
storageParams := config.Storage.Parameters()
if storageParams == nil {
storageParams = make(configuration.Parameters)
}
if storageParams["useragent"] == "" {
storageParams["useragent"] = fmt.Sprintf("distribution/%s %s", version.Version(), runtime.Version())
}
var err error
app.driver, err = factory.Create(app, config.Storage.Type(), storageParams)
if err != nil {
// TODO(stevvooe): Move the creation of a service into a protected
// method, where this is created lazily. Its status can be queried via
// a health check.
panic(err)
}
purgeConfig := uploadPurgeDefaultConfig()
if mc, ok := config.Storage["maintenance"]; ok {
if v, ok := mc["uploadpurging"]; ok {
purgeConfig, ok = v.(map[interface{}]interface{})
if !ok {
panic("uploadpurging config key must contain additional keys")
}
}
if v, ok := mc["readonly"]; ok {
readOnly, ok := v.(map[interface{}]interface{})
if !ok {
panic("readonly config key must contain additional keys")
}
if readOnlyEnabled, ok := readOnly["enabled"]; ok {
app.readOnly, ok = readOnlyEnabled.(bool)
if !ok {
panic("readonly's enabled config key must have a boolean value")
}
}
}
}
startUploadPurger(app, app.driver, dcontext.GetLogger(app), purgeConfig)
app.driver, err = applyStorageMiddleware(app, app.driver, config.Middleware["storage"])
if err != nil {
panic(err)
}
// Do not configure HTTP secret for a proxy registry as HTTP secret
// is only used for blob uploads and a proxy registry does not support blob uploads.
if !app.isCache {
app.configureSecret(config)
}
app.configureEvents(config)
app.configureRedis(config)
app.configureLogHook(config)
options := registrymiddleware.GetRegistryOptions()
if config.HTTP.Host != "" {
u, err := url.Parse(config.HTTP.Host)
if err != nil {
panic(fmt.Sprintf(`could not parse http "host" parameter: %v`, err))
}
app.httpHost = *u
}
if app.isCache {
options = append(options, storage.DisableDigestResumption)
}
// configure deletion
if d, ok := config.Storage["delete"]; ok {
e, ok := d["enabled"]
if ok {
if deleteEnabled, ok := e.(bool); ok && deleteEnabled {
options = append(options, storage.EnableDelete)
}
}
}
// configure tag lookup concurrency limit
if p := config.Storage.TagParameters(); p != nil {
l, ok := p["concurrencylimit"]
if ok {
limit, ok := l.(int)
if !ok {
panic("tag lookup concurrency limit config key must have a integer value")
}
if limit < 0 {
panic("tag lookup concurrency limit should be a non-negative integer value")
}
options = append(options, storage.TagLookupConcurrencyLimit(limit))
}
}
// configure redirects
var redirectDisabled bool
if redirectConfig, ok := config.Storage["redirect"]; ok {
v := redirectConfig["disable"]
switch v := v.(type) {
case bool:
redirectDisabled = v
default:
panic(fmt.Sprintf("invalid type for redirect config: %#v", redirectConfig))
}
}
if redirectDisabled {
dcontext.GetLogger(app).Infof("backend redirection disabled")
} else {
options = append(options, storage.EnableRedirect)
}
if !config.Validation.Enabled {
config.Validation.Enabled = !config.Validation.Disabled
}
// configure validation
if config.Validation.Enabled {
if len(config.Validation.Manifests.URLs.Allow) == 0 && len(config.Validation.Manifests.URLs.Deny) == 0 {
// If Allow and Deny are empty, allow nothing.
options = append(options, storage.ManifestURLsAllowRegexp(regexp.MustCompile("^$")))
} else {
if len(config.Validation.Manifests.URLs.Allow) > 0 {
for i, s := range config.Validation.Manifests.URLs.Allow {
// Validate via compilation.
if _, err := regexp.Compile(s); err != nil {
panic(fmt.Sprintf("validation.manifests.urls.allow: %s", err))
}
// Wrap with non-capturing group.
config.Validation.Manifests.URLs.Allow[i] = fmt.Sprintf("(?:%s)", s)
}
re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Allow, "|"))
options = append(options, storage.ManifestURLsAllowRegexp(re))
}
if len(config.Validation.Manifests.URLs.Deny) > 0 {
for i, s := range config.Validation.Manifests.URLs.Deny {
// Validate via compilation.
if _, err := regexp.Compile(s); err != nil {
panic(fmt.Sprintf("validation.manifests.urls.deny: %s", err))
}
// Wrap with non-capturing group.
config.Validation.Manifests.URLs.Deny[i] = fmt.Sprintf("(?:%s)", s)
}
re := regexp.MustCompile(strings.Join(config.Validation.Manifests.URLs.Deny, "|"))
options = append(options, storage.ManifestURLsDenyRegexp(re))
}
}
switch config.Validation.Manifests.Indexes.Platforms {
case "list":
options = append(options, storage.EnableValidateImageIndexImagesExist)
for _, platform := range config.Validation.Manifests.Indexes.PlatformList {
options = append(options, storage.AddValidateImageIndexImagesExistPlatform(platform.Architecture, platform.OS))
}
fallthrough
case "none":
dcontext.GetLogger(app).Warn("Image index completeness validation has been disabled, which is an experimental option because other container tooling might expect all image indexes to be complete")
case "all":
fallthrough
default:
options = append(options, storage.EnableValidateImageIndexImagesExist)
}
}
// configure storage caches
if cc, ok := config.Storage["cache"]; ok {
v, ok := cc["blobdescriptor"]
if !ok {
// Backwards compatible: "layerinfo" == "blobdescriptor"
v = cc["layerinfo"]
}
switch v {
case "redis":
if app.redis == nil {
panic("redis configuration required to use for layerinfo cache")
}
if _, ok := cc["blobdescriptorsize"]; ok {
dcontext.GetLogger(app).Warnf("blobdescriptorsize parameter is not supported with redis cache")
}
cacheProvider := rediscache.NewRedisBlobDescriptorCacheProvider(app.redis)
localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider))
app.registry, err = storage.NewRegistry(app, app.driver, localOptions...)
if err != nil {
panic("could not create registry: " + err.Error())
}
dcontext.GetLogger(app).Infof("using redis blob descriptor cache")
case "inmemory":
blobDescriptorSize := memorycache.DefaultSize
configuredSize, ok := cc["blobdescriptorsize"]
if ok {
// Since Parameters is not strongly typed, render to a string and convert back
blobDescriptorSize, err = strconv.Atoi(fmt.Sprint(configuredSize))
if err != nil {
panic(fmt.Sprintf("invalid blobdescriptorsize value %s: %s", configuredSize, err))
}
}
cacheProvider := memorycache.NewInMemoryBlobDescriptorCacheProvider(blobDescriptorSize)
localOptions := append(options, storage.BlobDescriptorCacheProvider(cacheProvider))
app.registry, err = storage.NewRegistry(app, app.driver, localOptions...)
if err != nil {
panic("could not create registry: " + err.Error())
}
dcontext.GetLogger(app).Infof("using inmemory blob descriptor cache")
default:
if v != "" {
dcontext.GetLogger(app).Warnf("unknown cache type %q, caching disabled", config.Storage["cache"])
}
}
}
if app.registry == nil {
// configure the registry if no cache section is available.
app.registry, err = storage.NewRegistry(app.Context, app.driver, options...)
if err != nil {
panic("could not create registry: " + err.Error())
}
}
app.registry, err = applyRegistryMiddleware(app, app.registry, app.driver, config.Middleware["registry"])
if err != nil {
panic(err)
}
authType := config.Auth.Type()
if authType != "" && !strings.EqualFold(authType, "none") {
accessController, err := auth.GetAccessController(config.Auth.Type(), config.Auth.Parameters())
if err != nil {
panic(fmt.Sprintf("unable to configure authorization (%s): %v", authType, err))
}
app.accessController = accessController
dcontext.GetLogger(app).Debugf("configured %q access controller", authType)
}
// configure as a pull through cache
if config.Proxy.RemoteURL != "" {
app.registry, err = proxy.NewRegistryPullThroughCache(ctx, app.registry, app.driver, config.Proxy)
if err != nil {
panic(err.Error())
}
app.isCache = true
dcontext.GetLogger(app).Info("Registry configured as a proxy cache to ", config.Proxy.RemoteURL)
}
var ok bool
app.repoRemover, ok = app.registry.(distribution.RepositoryRemover)
if !ok {
dcontext.GetLogger(app).Warnf("Registry does not implement RepositoryRemover. Will not be able to delete repos and tags")
}
return app
}
// RegisterHealthChecks is an awful hack to defer health check registration
// control to callers. This should only ever be called once per registry
// process, typically in a main function. The correct way would be register
// health checks outside of app, since multiple apps may exist in the same
// process. Because the configuration and app are tightly coupled,
// implementing this properly will require a refactor. This method may panic
// if called twice in the same process.
func (app *App) RegisterHealthChecks(healthRegistries ...*health.Registry) {
if len(healthRegistries) > 1 {
panic("RegisterHealthChecks called with more than one registry")
}
healthRegistry := health.DefaultRegistry
if len(healthRegistries) == 1 {
healthRegistry = healthRegistries[0]
}
if app.Config.Health.StorageDriver.Enabled {
interval := app.Config.Health.StorageDriver.Interval
if interval == 0 {
interval = defaultCheckInterval
}
storageDriverCheck := health.CheckFunc(func(ctx context.Context) error {
_, err := app.driver.Stat(ctx, "/") // "/" should always exist
if _, ok := err.(storagedriver.PathNotFoundError); ok {
err = nil // pass this through, backend is responding, but this path doesn't exist.
}
if err != nil {
dcontext.GetLogger(ctx).Errorf("storage driver health check: %v", err)
}
return err
})
updater := health.NewThresholdStatusUpdater(app.Config.Health.StorageDriver.Threshold)
healthRegistry.Register("storagedriver_"+app.Config.Storage.Type(), updater)
go health.Poll(app, updater, storageDriverCheck, interval)
}
for _, fileChecker := range app.Config.Health.FileCheckers {
interval := fileChecker.Interval
if interval == 0 {
interval = defaultCheckInterval
}
dcontext.GetLogger(app).Infof("configuring file health check path=%s, interval=%d", fileChecker.File, interval/time.Second)
u := health.NewStatusUpdater()
healthRegistry.Register(fileChecker.File, u)
go health.Poll(app, u, checks.FileChecker(fileChecker.File), interval)
}
for _, httpChecker := range app.Config.Health.HTTPCheckers {
interval := httpChecker.Interval
if interval == 0 {
interval = defaultCheckInterval
}
statusCode := httpChecker.StatusCode
if statusCode == 0 {
statusCode = 200
}
checker := checks.HTTPChecker(httpChecker.URI, statusCode, httpChecker.Timeout, httpChecker.Headers)
dcontext.GetLogger(app).Infof("configuring HTTP health check uri=%s, interval=%d, threshold=%d", httpChecker.URI, interval/time.Second, httpChecker.Threshold)
updater := health.NewThresholdStatusUpdater(httpChecker.Threshold)
healthRegistry.Register(httpChecker.URI, updater)
go health.Poll(app, updater, checker, interval)
}
for _, tcpChecker := range app.Config.Health.TCPCheckers {
interval := tcpChecker.Interval
if interval == 0 {
interval = defaultCheckInterval
}
checker := checks.TCPChecker(tcpChecker.Addr, tcpChecker.Timeout)
dcontext.GetLogger(app).Infof("configuring TCP health check addr=%s, interval=%d, threshold=%d", tcpChecker.Addr, interval/time.Second, tcpChecker.Threshold)
updater := health.NewThresholdStatusUpdater(tcpChecker.Threshold)
healthRegistry.Register(tcpChecker.Addr, updater)
go health.Poll(app, updater, checker, interval)
}
}
// Shutdown close the underlying registry
func (app *App) Shutdown() error {
if r, ok := app.registry.(proxy.Closer); ok {
return r.Close()
}
return nil
}
// register a handler with the application, by route name. The handler will be
// passed through the application filters and context will be constructed at
// request time.
func (app *App) register(routeName string, dispatch dispatchFunc) {
handler := app.dispatcher(dispatch)
// Chain the handler with prometheus instrumented handler
if app.Config.HTTP.Debug.Prometheus.Enabled {
namespace := metrics.NewNamespace(prometheus.NamespacePrefix, "http", nil)
httpMetrics := namespace.NewDefaultHttpMetrics(strings.Replace(routeName, "-", "_", -1))
metrics.Register(namespace)
handler = metrics.InstrumentHandler(httpMetrics, handler)
}
// TODO(stevvooe): This odd dispatcher/route registration is by-product of
// some limitations in the gorilla/mux router. We are using it to keep
// routing consistent between the client and server, but we may want to
// replace it with manual routing and structure-based dispatch for better
// control over the request execution.
app.router.GetRoute(routeName).Handler(handler)
}
// configureEvents prepares the event sink for action.
func (app *App) configureEvents(configuration *configuration.Configuration) {
// Configure all of the endpoint sinks.
// NOTE(milosgajdos): we are disabling the linter here as
// if an endpoint is disabled we continue with the evaluation
// of the next one so we do not know the exact size the slice
// should have at the time the iteration starts
// nolint:prealloc
var sinks []events.Sink
for _, endpoint := range configuration.Notifications.Endpoints {
if endpoint.Disabled {
dcontext.GetLogger(app).Infof("endpoint %s disabled, skipping", endpoint.Name)
continue
}
dcontext.GetLogger(app).Infof("configuring endpoint %v (%v), timeout=%s, headers=%v", endpoint.Name, endpoint.URL, endpoint.Timeout, endpoint.Headers)
endpoint := notifications.NewEndpoint(endpoint.Name, endpoint.URL, notifications.EndpointConfig{
Timeout: endpoint.Timeout,
Threshold: endpoint.Threshold,
Backoff: endpoint.Backoff,
Headers: endpoint.Headers,
IgnoredMediaTypes: endpoint.IgnoredMediaTypes,
Ignore: endpoint.Ignore,
})
sinks = append(sinks, endpoint)
}
// NOTE(stevvooe): Moving to a new queuing implementation is as easy as
// replacing broadcaster with a rabbitmq implementation. It's recommended
// that the registry instances also act as the workers to keep deployment
// simple.
app.events.sink = events.NewBroadcaster(sinks...)
// Populate registry event source
hostname, err := os.Hostname()
if err != nil {
hostname = configuration.HTTP.Addr
} else {
// try to pick the port off the config
_, port, err := net.SplitHostPort(configuration.HTTP.Addr)
if err == nil {
hostname = net.JoinHostPort(hostname, port)
}
}
app.events.source = notifications.SourceRecord{
Addr: hostname,
InstanceID: dcontext.GetStringValue(app, "instance.id"),
}
}
func (app *App) configureRedis(cfg *configuration.Configuration) {
if len(cfg.Redis.Options.Addrs) == 0 {
dcontext.GetLogger(app).Infof("redis not configured")
return
}
opts := redis.UniversalOptions{
Addrs: cfg.Redis.Options.Addrs,
ClientName: cfg.Redis.Options.ClientName,
DB: cfg.Redis.Options.DB,
Protocol: cfg.Redis.Options.Protocol,
Username: cfg.Redis.Options.Username,
Password: cfg.Redis.Options.Password,
SentinelUsername: cfg.Redis.Options.SentinelUsername,
SentinelPassword: cfg.Redis.Options.SentinelPassword,
MaxRetries: cfg.Redis.Options.MaxRetries,
MinRetryBackoff: cfg.Redis.Options.MinRetryBackoff,
MaxRetryBackoff: cfg.Redis.Options.MaxRetryBackoff,
DialTimeout: cfg.Redis.Options.DialTimeout,
ReadTimeout: cfg.Redis.Options.ReadTimeout,
WriteTimeout: cfg.Redis.Options.WriteTimeout,
ContextTimeoutEnabled: cfg.Redis.Options.ContextTimeoutEnabled,
PoolFIFO: cfg.Redis.Options.PoolFIFO,
PoolSize: cfg.Redis.Options.PoolSize,
PoolTimeout: cfg.Redis.Options.PoolTimeout,
MinIdleConns: cfg.Redis.Options.MinIdleConns,
MaxIdleConns: cfg.Redis.Options.MaxIdleConns,
MaxActiveConns: cfg.Redis.Options.MaxActiveConns,
ConnMaxIdleTime: cfg.Redis.Options.ConnMaxIdleTime,
ConnMaxLifetime: cfg.Redis.Options.ConnMaxLifetime,
MaxRedirects: cfg.Redis.Options.MaxRedirects,
ReadOnly: cfg.Redis.Options.ReadOnly,
RouteByLatency: cfg.Redis.Options.RouteByLatency,
RouteRandomly: cfg.Redis.Options.RouteRandomly,
MasterName: cfg.Redis.Options.MasterName,
DisableIdentity: cfg.Redis.Options.DisableIdentity,
IdentitySuffix: cfg.Redis.Options.IdentitySuffix,
UnstableResp3: cfg.Redis.Options.UnstableResp3,
}
// redis TLS config
if cfg.Redis.TLS.Certificate != "" || cfg.Redis.TLS.Key != "" {
var err error
tlsConf := &tls.Config{}
tlsConf.Certificates = make([]tls.Certificate, 1)
tlsConf.Certificates[0], err = tls.LoadX509KeyPair(cfg.Redis.TLS.Certificate, cfg.Redis.TLS.Key)
if err != nil {
panic(err)
}
if len(cfg.Redis.TLS.RootCAs) != 0 {
pool := x509.NewCertPool()
for _, ca := range cfg.Redis.TLS.RootCAs {
caPem, err := os.ReadFile(ca)
if err != nil {
dcontext.GetLogger(app).Errorf("failed reading redis client CA: %v", err)
return
}
if ok := pool.AppendCertsFromPEM(caPem); !ok {
dcontext.GetLogger(app).Error("could not add CA to pool")
return
}
}
tlsConf.RootCAs = pool
}
opts.TLSConfig = tlsConf
}
app.redis = app.createPool(opts)
// Enable metrics instrumentation.
if err := redisotel.InstrumentMetrics(app.redis); err != nil {
dcontext.GetLogger(app).Errorf("failed to instrument metrics on redis: %v", err)
}
// setup expvar
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
registry.(*expvar.Map).Set("redis", expvar.Func(func() interface{} {
stats := app.redis.PoolStats()
return map[string]interface{}{
"Config": cfg,
"Active": stats.TotalConns - stats.IdleConns,
}
}))
}
func (app *App) createPool(cfg redis.UniversalOptions) redis.UniversalClient {
cfg.OnConnect = func(ctx context.Context, cn *redis.Conn) error {
res := cn.Ping(ctx)
return res.Err()
}
return redis.NewUniversalClient(&cfg)
}
// configureLogHook prepares logging hook parameters.
func (app *App) configureLogHook(configuration *configuration.Configuration) {
entry, ok := dcontext.GetLogger(app).(*logrus.Entry)
if !ok {
// somehow, we are not using logrus
return
}
logger := entry.Logger
for _, configHook := range configuration.Log.Hooks {
if !configHook.Disabled {
switch configHook.Type {
case "mail":
hook := &logHook{}
hook.LevelsParam = configHook.Levels
hook.Mail = &mailer{
Addr: configHook.MailOptions.SMTP.Addr,
Username: configHook.MailOptions.SMTP.Username,
Password: configHook.MailOptions.SMTP.Password,
Insecure: configHook.MailOptions.SMTP.Insecure,
From: configHook.MailOptions.From,
To: configHook.MailOptions.To,
}
logger.Hooks.Add(hook)
default:
}
}
}
}
// configureSecret creates a random secret if a secret wasn't included in the
// configuration.
func (app *App) configureSecret(configuration *configuration.Configuration) {
if configuration.HTTP.Secret == "" {
var secretBytes [randomSecretSize]byte
if _, err := rand.Read(secretBytes[:]); err != nil {
panic(fmt.Sprintf("could not generate random bytes for HTTP secret: %v", err))
}
configuration.HTTP.Secret = string(secretBytes[:])
dcontext.GetLogger(app).Warn("No HTTP secret provided - generated random secret. This may cause problems with uploads if multiple registries are behind a load-balancer. To provide a shared secret, fill in http.secret in the configuration file or set the REGISTRY_HTTP_SECRET environment variable.")
}
}
func (app *App) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Prepare the context with our own little decorations.
ctx := r.Context()
ctx = dcontext.WithRequest(ctx, r)
ctx, w = dcontext.WithResponseWriter(ctx, w)
ctx = dcontext.WithLogger(ctx, dcontext.GetRequestLogger(ctx))
r = r.WithContext(ctx)
// Set a header with the Docker Distribution API Version for all responses.
w.Header().Add("Docker-Distribution-API-Version", "registry/2.0")
app.router.ServeHTTP(w, r)
}
// dispatchFunc takes a context and request and returns a constructed handler
// for the route. The dispatcher will use this to dynamically create request
// specific handlers for each endpoint without creating a new router for each
// request.
type dispatchFunc func(ctx *Context, r *http.Request) http.Handler
// TODO(stevvooe): dispatchers should probably have some validation error
// chain with proper error reporting.
// dispatcher returns a handler that constructs a request specific context and
// handler, using the dispatch factory function.
func (app *App) dispatcher(dispatch dispatchFunc) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for headerName, headerValues := range app.Config.HTTP.Headers {
for _, value := range headerValues {
w.Header().Add(headerName, value)
}
}
context := app.context(w, r)
defer func() {
// Automated error response handling here. Handlers may return their
// own errors if they need different behavior (such as range errors
// for layer upload).
if context.Errors.Len() > 0 {
_ = errcode.ServeJSON(w, context.Errors)
app.logError(context, context.Errors)
} else if status, ok := context.Value("http.response.status").(int); ok && status >= 200 && status <= 399 {
dcontext.GetResponseLogger(context).Infof("response completed")
}
}()
if err := app.authorized(w, r, context); err != nil {
dcontext.GetLogger(context).Warnf("error authorizing context: %v", err)
return
}
// Add username to request logging
context.Context = dcontext.WithLogger(context.Context, dcontext.GetLogger(context.Context, userNameKey))
// sync up context on the request.
r = r.WithContext(context)
if app.nameRequired(r) {
nameRef, err := reference.WithName(getName(context))
if err != nil {
dcontext.GetLogger(context).Errorf("error parsing reference from context: %v", err)
context.Errors = append(context.Errors, distribution.ErrRepositoryNameInvalid{
Name: getName(context),
Reason: err,
})
if err := errcode.ServeJSON(w, context.Errors); err != nil {
dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return
}
repository, err := app.registry.Repository(context, nameRef)
if err != nil {
dcontext.GetLogger(context).Errorf("error resolving repository: %v", err)
switch err := err.(type) {
case distribution.ErrRepositoryUnknown:
context.Errors = append(context.Errors, errcode.ErrorCodeNameUnknown.WithDetail(err))
case distribution.ErrRepositoryNameInvalid:
context.Errors = append(context.Errors, errcode.ErrorCodeNameInvalid.WithDetail(err))
case errcode.Error:
context.Errors = append(context.Errors, err)
}
if err := errcode.ServeJSON(w, context.Errors); err != nil {
dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return
}
// assign and decorate the authorized repository with an event bridge.
context.Repository, context.RepositoryRemover = notifications.Listen(
repository,
context.App.repoRemover,
app.eventBridge(context, r))
context.Repository, err = applyRepoMiddleware(app, context.Repository, app.Config.Middleware["repository"])
if err != nil {
dcontext.GetLogger(context).Errorf("error initializing repository middleware: %v", err)
context.Errors = append(context.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
if err := errcode.ServeJSON(w, context.Errors); err != nil {
dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return
}
}
dispatch(context, r).ServeHTTP(w, r)
})
}
type errCodeKey struct{}
func (errCodeKey) String() string { return "err.code" }
type errMessageKey struct{}
func (errMessageKey) String() string { return "err.message" }
type errDetailKey struct{}
func (errDetailKey) String() string { return "err.detail" }
func (app *App) logError(ctx context.Context, errors errcode.Errors) {
for _, e1 := range errors {
var c context.Context
switch e := e1.(type) {
case errcode.Error:
c = context.WithValue(ctx, errCodeKey{}, e.Code)
c = context.WithValue(c, errMessageKey{}, e.Message)
c = context.WithValue(c, errDetailKey{}, e.Detail)
case errcode.ErrorCode:
c = context.WithValue(ctx, errCodeKey{}, e)
c = context.WithValue(c, errMessageKey{}, e.Message())
default:
// just normal go 'error'
c = context.WithValue(ctx, errCodeKey{}, errcode.ErrorCodeUnknown)
c = context.WithValue(c, errMessageKey{}, e.Error())
}
c = dcontext.WithLogger(c, dcontext.GetLogger(c,
errCodeKey{},
errMessageKey{},
errDetailKey{}))
dcontext.GetResponseLogger(c).Errorf("response completed with error")
}
}
// context constructs the context object for the application. This only be
// called once per request.
func (app *App) context(w http.ResponseWriter, r *http.Request) *Context {
ctx := r.Context()
ctx = dcontext.WithVars(ctx, r)
ctx = dcontext.WithLogger(ctx, dcontext.GetLogger(ctx,
"vars.name",
"vars.reference",
"vars.digest",
"vars.uuid"))
context := &Context{
App: app,
Context: ctx,
}
if app.httpHost.Scheme != "" && app.httpHost.Host != "" {
// A "host" item in the configuration takes precedence over
// X-Forwarded-Proto and X-Forwarded-Host headers, and the
// hostname in the request.
context.urlBuilder = v2.NewURLBuilder(&app.httpHost, false)
} else {
context.urlBuilder = v2.NewURLBuilderFromRequest(r, app.Config.HTTP.RelativeURLs)
}
return context
}
// authorized checks if the request can proceed with access to the requested
// repository. If it succeeds, the context may access the requested
// repository. An error will be returned if access is not available.
func (app *App) authorized(w http.ResponseWriter, r *http.Request, context *Context) error {
dcontext.GetLogger(context).Debug("authorizing request")
repo := getName(context)
if app.accessController == nil {
return nil // access controller is not enabled.
}
var accessRecords []auth.Access
if repo != "" {
accessRecords = appendAccessRecords(accessRecords, r.Method, repo)
if fromRepo := r.FormValue("from"); fromRepo != "" {
// mounting a blob from one repository to another requires pull (GET)
// access to the source repository.
accessRecords = appendAccessRecords(accessRecords, http.MethodGet, fromRepo)
}
} else {
// Only allow the name not to be set on the base route.
if app.nameRequired(r) {
// For this to be properly secured, repo must always be set for a
// resource that may make a modification. The only condition under
// which name is not set and we still allow access is when the
// base route is accessed. This section prevents us from making
// that mistake elsewhere in the code, allowing any operation to
// proceed.
if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized); err != nil {
dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
return fmt.Errorf("forbidden: no repository name")
}
accessRecords = appendCatalogAccessRecord(accessRecords, r)
}
grant, err := app.accessController.Authorized(r.WithContext(context.Context), accessRecords...)
if err != nil {
switch err := err.(type) {
case auth.Challenge:
// Add the appropriate WWW-Auth header
err.SetHeaders(r, w)
if err := errcode.ServeJSON(w, errcode.ErrorCodeUnauthorized.WithDetail(accessRecords)); err != nil {
dcontext.GetLogger(context).Errorf("error serving error json: %v (from %v)", err, context.Errors)
}
default:
// This condition is a potential security problem either in
// the configuration or whatever is backing the access
// controller. Just return a bad request with no information
// to avoid exposure. The request should not proceed.
dcontext.GetLogger(context).Errorf("error checking authorization: %v", err)
w.WriteHeader(http.StatusBadRequest)
}
return err
}
if grant == nil {
return fmt.Errorf("access controller returned neither an access grant nor an error")
}
ctx := withUser(context.Context, grant.User)
ctx = withResources(ctx, grant.Resources)
dcontext.GetLogger(ctx, userNameKey).Info("authorized request")
// TODO(stevvooe): This pattern needs to be cleaned up a bit. One context
// should be replaced by another, rather than replacing the context on a
// mutable object.
context.Context = ctx
return nil
}
// eventBridge returns a bridge for the current request, configured with the
// correct actor and source.
func (app *App) eventBridge(ctx *Context, r *http.Request) notifications.Listener {
actor := notifications.ActorRecord{
Name: getUserName(ctx, r),
}
request := notifications.NewRequestRecord(dcontext.GetRequestID(ctx), r)
return notifications.NewBridge(ctx.urlBuilder, app.events.source, actor, request, app.events.sink, app.Config.Notifications.EventConfig.IncludeReferences)
}
// nameRequired returns true if the route requires a name.
func (app *App) nameRequired(r *http.Request) bool {
route := mux.CurrentRoute(r)
if route == nil {
return true
}
routeName := route.GetName()
return routeName != v2.RouteNameBase && routeName != v2.RouteNameCatalog
}
// apiBase implements a simple yes-man for doing overall checks against the
// api. This can support auth roundtrips to support docker login.
func apiBase(w http.ResponseWriter, r *http.Request) {
const emptyJSON = "{}"
// Provide a simple /v2/ 200 OK response with empty json response.
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Length", fmt.Sprint(len(emptyJSON)))
fmt.Fprint(w, emptyJSON)
}
// appendAccessRecords checks the method and adds the appropriate Access records to the records list.
func appendAccessRecords(records []auth.Access, method string, repo string) []auth.Access {
resource := auth.Resource{
Type: "repository",
Name: repo,
}
switch method {
case http.MethodGet, http.MethodHead:
records = append(records,
auth.Access{
Resource: resource,
Action: "pull",
})
case http.MethodPost, http.MethodPut, http.MethodPatch:
records = append(records,
auth.Access{
Resource: resource,
Action: "pull",
},
auth.Access{
Resource: resource,
Action: "push",
})
case http.MethodDelete:
records = append(records,
auth.Access{
Resource: resource,
Action: "delete",
})
}
return records
}
// Add the access record for the catalog if it's our current route
func appendCatalogAccessRecord(accessRecords []auth.Access, r *http.Request) []auth.Access {
route := mux.CurrentRoute(r)
routeName := route.GetName()
if routeName == v2.RouteNameCatalog {
resource := auth.Resource{
Type: "registry",
Name: "catalog",
}
accessRecords = append(accessRecords,
auth.Access{
Resource: resource,
Action: "*",
})
}
return accessRecords
}
// applyRegistryMiddleware wraps a registry instance with the configured middlewares
func applyRegistryMiddleware(ctx context.Context, registry distribution.Namespace, driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (distribution.Namespace, error) {
for _, mw := range middlewares {
rmw, err := registrymiddleware.Get(ctx, mw.Name, mw.Options, registry, driver)
if err != nil {
return nil, fmt.Errorf("unable to configure registry middleware (%s): %s", mw.Name, err)
}
registry = rmw
}
return registry, nil
}
// applyRepoMiddleware wraps a repository with the configured middlewares
func applyRepoMiddleware(ctx context.Context, repository distribution.Repository, middlewares []configuration.Middleware) (distribution.Repository, error) {
for _, mw := range middlewares {
rmw, err := repositorymiddleware.Get(ctx, mw.Name, mw.Options, repository)
if err != nil {
return nil, err
}
repository = rmw
}
return repository, nil
}
// applyStorageMiddleware wraps a storage driver with the configured middlewares
func applyStorageMiddleware(ctx context.Context, driver storagedriver.StorageDriver, middlewares []configuration.Middleware) (storagedriver.StorageDriver, error) {
for _, mw := range middlewares {
smw, err := storagemiddleware.Get(ctx, mw.Name, mw.Options, driver)
if err != nil {
return nil, fmt.Errorf("unable to configure storage middleware (%s): %v", mw.Name, err)
}
driver = smw
}
return driver, nil
}
// uploadPurgeDefaultConfig provides a default configuration for upload
// purging to be used in the absence of configuration in the
// configuration file
func uploadPurgeDefaultConfig() map[interface{}]interface{} {
config := map[interface{}]interface{}{}
config["enabled"] = true
config["age"] = "168h"
config["interval"] = "24h"
config["dryrun"] = false
return config
}
func badPurgeUploadConfig(reason string) {
panic(fmt.Sprintf("Unable to parse upload purge configuration: %s", reason))
}
// startUploadPurger schedules a goroutine which will periodically
// check upload directories for old files and delete them
func startUploadPurger(ctx context.Context, storageDriver storagedriver.StorageDriver, log dcontext.Logger, config map[interface{}]interface{}) {
if config["enabled"] == false {
return
}
var purgeAgeDuration time.Duration
var err error
purgeAge, ok := config["age"]
if ok {
ageStr, ok := purgeAge.(string)
if !ok {
badPurgeUploadConfig("age is not a string")
}
purgeAgeDuration, err = time.ParseDuration(ageStr)
if err != nil {
badPurgeUploadConfig(fmt.Sprintf("Cannot parse duration: %s", err.Error()))
}
} else {
badPurgeUploadConfig("age missing")
}
var intervalDuration time.Duration
interval, ok := config["interval"]
if ok {
intervalStr, ok := interval.(string)
if !ok {
badPurgeUploadConfig("interval is not a string")
}
intervalDuration, err = time.ParseDuration(intervalStr)
if err != nil {
badPurgeUploadConfig(fmt.Sprintf("Cannot parse interval: %s", err.Error()))
}
} else {
badPurgeUploadConfig("interval missing")
}
var dryRunBool bool
dryRun, ok := config["dryrun"]
if ok {
dryRunBool, ok = dryRun.(bool)
if !ok {
badPurgeUploadConfig("cannot parse dryrun")
}
} else {
badPurgeUploadConfig("dryrun missing")
}
go func() {
randInt, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64))
if err != nil {
log.Infof("Failed to generate random jitter: %v", err)
// sleep 30min for failure case
randInt = big.NewInt(30)
}
jitter := time.Duration(randInt.Int64()%60) * time.Minute
log.Infof("Starting upload purge in %s", jitter)
time.Sleep(jitter)
for {
storage.PurgeUploads(ctx, storageDriver, time.Now().Add(-purgeAgeDuration), !dryRunBool)
log.Infof("Starting upload purge in %s", intervalDuration)
time.Sleep(intervalDuration)
}
}()
}
package handlers
import (
"net/http"
)
func basicAuth(r *http.Request) (username, password string, ok bool) {
return r.BasicAuth()
}
package handlers
import (
"net/http"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/gorilla/handlers"
"github.com/opencontainers/go-digest"
)
// blobDispatcher uses the request context to build a blobHandler.
func blobDispatcher(ctx *Context, r *http.Request) http.Handler {
dgst, err := getDigest(ctx)
if err != nil {
if err == errDigestNotAvailable {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx.Errors = append(ctx.Errors, errcode.ErrorCodeDigestInvalid.WithDetail(err))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx.Errors = append(ctx.Errors, errcode.ErrorCodeDigestInvalid.WithDetail(err))
})
}
blobHandler := &blobHandler{
Context: ctx,
Digest: dgst,
}
mhandler := handlers.MethodHandler{
http.MethodGet: http.HandlerFunc(blobHandler.GetBlob),
http.MethodHead: http.HandlerFunc(blobHandler.GetBlob),
}
if !ctx.readOnly {
mhandler[http.MethodDelete] = http.HandlerFunc(blobHandler.DeleteBlob)
}
return mhandler
}
// blobHandler serves http blob requests.
type blobHandler struct {
*Context
Digest digest.Digest
}
// GetBlob fetches the binary data from backend storage returns it in the
// response.
func (bh *blobHandler) GetBlob(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(bh).Debug("GetBlob")
blobs := bh.Repository.Blobs(bh)
desc, err := blobs.Stat(bh, bh.Digest)
if err != nil {
if err == distribution.ErrBlobUnknown {
bh.Errors = append(bh.Errors, errcode.ErrorCodeBlobUnknown.WithDetail(bh.Digest))
} else {
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
if err := blobs.ServeBlob(bh, w, r, desc.Digest); err != nil {
dcontext.GetLogger(bh).Debugf("unexpected error getting blob HTTP handler: %v", err)
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// DeleteBlob deletes a layer blob
func (bh *blobHandler) DeleteBlob(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(bh).Debug("DeleteBlob")
blobs := bh.Repository.Blobs(bh)
err := blobs.Delete(bh, bh.Digest)
if err != nil {
switch err {
case distribution.ErrUnsupported:
bh.Errors = append(bh.Errors, errcode.ErrorCodeUnsupported)
return
case distribution.ErrBlobUnknown:
bh.Errors = append(bh.Errors, errcode.ErrorCodeBlobUnknown)
return
default:
bh.Errors = append(bh.Errors, err)
dcontext.GetLogger(bh).Errorf("Unknown error deleting blob: %s", err.Error())
return
}
}
w.Header().Set("Content-Length", "0")
w.WriteHeader(http.StatusAccepted)
}
package handlers
import (
"fmt"
"net/http"
"net/url"
"strconv"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/distribution/distribution/v3/registry/storage"
"github.com/distribution/reference"
"github.com/gorilla/handlers"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// blobUploadDispatcher constructs and returns the blob upload handler for the
// given request context.
func blobUploadDispatcher(ctx *Context, r *http.Request) http.Handler {
buh := &blobUploadHandler{
Context: ctx,
UUID: getUploadUUID(ctx),
}
handler := handlers.MethodHandler{
http.MethodGet: http.HandlerFunc(buh.GetUploadStatus),
http.MethodHead: http.HandlerFunc(buh.GetUploadStatus),
}
if !ctx.readOnly {
handler[http.MethodPost] = http.HandlerFunc(buh.StartBlobUpload)
handler[http.MethodPatch] = http.HandlerFunc(buh.PatchBlobData)
handler[http.MethodPut] = http.HandlerFunc(buh.PutBlobUploadComplete)
handler[http.MethodDelete] = http.HandlerFunc(buh.CancelBlobUpload)
}
if buh.UUID != "" {
if r.Method == http.MethodGet || r.Method == http.MethodHead {
return handler
}
if h := buh.ResumeBlobUpload(ctx, r); h != nil {
return h
}
return closeResources(handler, buh.Upload)
}
return handler
}
// blobUploadHandler handles the http blob upload process.
type blobUploadHandler struct {
*Context
// UUID identifies the upload instance for the current request. Using UUID
// to key blob writers since this implementation uses UUIDs.
UUID string
Upload distribution.BlobWriter
State blobUploadState
}
// StartBlobUpload begins the blob upload process and allocates a server-side
// blob writer session, optionally mounting the blob from a separate repository.
func (buh *blobUploadHandler) StartBlobUpload(w http.ResponseWriter, r *http.Request) {
var options []distribution.BlobCreateOption
fromRepo := r.FormValue("from")
mountDigest := r.FormValue("mount")
if mountDigest != "" && fromRepo != "" {
opt, err := buh.createBlobMountOption(fromRepo, mountDigest)
if opt != nil && err == nil {
options = append(options, opt)
}
}
blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Create(buh, options...)
if err != nil {
if ebm, ok := err.(distribution.ErrBlobMounted); ok {
if err := buh.writeBlobCreatedHeaders(w, ebm.Descriptor); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
} else if err == distribution.ErrUnsupported {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
} else {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
buh.Upload = upload
if err := buh.blobUploadResponse(w, r); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Docker-Upload-UUID", buh.Upload.ID())
w.WriteHeader(http.StatusAccepted)
}
// GetUploadStatus returns the status of a given upload, identified by id.
func (buh *blobUploadHandler) GetUploadStatus(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
blobs := buh.Repository.Blobs(buh)
upload, err := blobs.Resume(buh, buh.UUID)
if err != nil {
if err == distribution.ErrBlobUploadUnknown {
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadUnknown.WithDetail(err))
} else {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
buh.Upload = upload
}
if err := buh.blobUploadResponse(w, r); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.WriteHeader(http.StatusNoContent)
}
// PatchBlobData writes data to an upload.
func (buh *blobUploadHandler) PatchBlobData(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadUnknown)
return
}
ct := r.Header.Get("Content-Type")
if ct != "" && ct != "application/octet-stream" {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(fmt.Errorf("bad Content-Type")))
// TODO(dmcgowan): encode error
return
}
cr := r.Header.Get("Content-Range")
cl := r.Header.Get("Content-Length")
if cr != "" && cl != "" {
start, end, err := parseContentRange(cr)
if err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error()))
return
}
if start > end || start != buh.Upload.Size() {
buh.Errors = append(buh.Errors, errcode.ErrorCodeRangeInvalid)
return
}
clInt, err := strconv.ParseInt(cl, 10, 64)
if err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error()))
return
}
if clInt != (end-start)+1 {
buh.Errors = append(buh.Errors, errcode.ErrorCodeSizeInvalid)
return
}
}
if err := copyFullPayload(buh, w, r, buh.Upload, -1, "blob PATCH"); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error()))
return
}
if err := buh.blobUploadResponse(w, r); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.WriteHeader(http.StatusAccepted)
}
// PutBlobUploadComplete takes the final request of a blob upload. The
// request may include all the blob data or no blob data. Any data
// provided is received and verified. If successful, the blob is linked
// into the blob store and 201 Created is returned with the canonical
// url of the blob.
func (buh *blobUploadHandler) PutBlobUploadComplete(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadUnknown)
return
}
defer buh.Upload.Close()
dgstStr := r.FormValue("digest") // TODO(stevvooe): Support multiple digest parameters!
if dgstStr == "" {
// no digest? return error, but allow retry.
buh.Errors = append(buh.Errors, errcode.ErrorCodeDigestInvalid.WithDetail("digest missing"))
return
}
dgst, err := digest.Parse(dgstStr)
if err != nil {
// no digest? return error, but allow retry.
buh.Errors = append(buh.Errors, errcode.ErrorCodeDigestInvalid.WithDetail("digest parsing failed"))
return
}
if err := copyFullPayload(buh, w, r, buh.Upload, -1, "blob PUT"); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err.Error()))
return
}
desc, err := buh.Upload.Commit(buh, v1.Descriptor{
Digest: dgst,
// TODO(stevvooe): This isn't wildly important yet, but we should
// really set the mediatype. For now, we can let the backend take care
// of this.
})
if err != nil {
switch err := err.(type) {
case distribution.ErrBlobInvalidDigest:
buh.Errors = append(buh.Errors, errcode.ErrorCodeDigestInvalid.WithDetail(err))
case errcode.Error:
buh.Errors = append(buh.Errors, err)
default:
switch err {
case distribution.ErrAccessDenied:
buh.Errors = append(buh.Errors, errcode.ErrorCodeDenied)
case distribution.ErrUnsupported:
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnsupported)
case distribution.ErrBlobInvalidLength, distribution.ErrBlobDigestUnsupported:
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadInvalid.WithDetail(err))
default:
dcontext.GetLogger(buh).Errorf("unknown error completing upload: %v", err)
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
}
// Clean up the backend blob data if there was an error.
if err := buh.Upload.Cancel(buh); err != nil {
// If the cleanup fails, all we can do is observe and report.
dcontext.GetLogger(buh).Errorf("error canceling upload after error: %v", err)
}
return
}
if err := buh.writeBlobCreatedHeaders(w, desc); err != nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// CancelBlobUpload cancels an in-progress upload of a blob.
func (buh *blobUploadHandler) CancelBlobUpload(w http.ResponseWriter, r *http.Request) {
if buh.Upload == nil {
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadUnknown)
return
}
defer buh.Upload.Close()
w.Header().Set("Docker-Upload-UUID", buh.UUID)
if err := buh.Upload.Cancel(buh); err != nil {
dcontext.GetLogger(buh).Errorf("error encountered canceling upload: %v", err)
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
w.WriteHeader(http.StatusNoContent)
}
func (buh *blobUploadHandler) ResumeBlobUpload(ctx *Context, r *http.Request) http.Handler {
state, err := hmacKey(ctx.Config.HTTP.Secret).unpackUploadState(r.FormValue("_state"))
if err != nil {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(ctx).Infof("error resolving upload: %v", err)
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadInvalid.WithDetail(err))
})
}
buh.State = state
if state.Name != ctx.Repository.Named().Name() {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(ctx).Infof("mismatched repository name in upload state: %q != %q", state.Name, buh.Repository.Named().Name())
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadInvalid.WithDetail(err))
})
}
if state.UUID != buh.UUID {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(ctx).Infof("mismatched uuid in upload state: %q != %q", state.UUID, buh.UUID)
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadInvalid.WithDetail(err))
})
}
blobs := ctx.Repository.Blobs(buh)
upload, err := blobs.Resume(buh, buh.UUID)
if err != nil {
dcontext.GetLogger(ctx).Errorf("error resolving upload: %v", err)
if err == distribution.ErrBlobUploadUnknown {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buh.Errors = append(buh.Errors, errcode.ErrorCodeBlobUploadUnknown.WithDetail(err))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buh.Errors = append(buh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
})
}
buh.Upload = upload
if size := upload.Size(); size != buh.State.Offset {
dcontext.GetLogger(ctx).Errorf("upload resumed at wrong offset: %d != %d", size, buh.State.Offset)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
buh.Errors = append(buh.Errors, errcode.ErrorCodeRangeInvalid.WithDetail(err))
})
}
return nil
}
// blobUploadResponse provides a standard request for uploading blobs and
// chunk responses. This sets the correct headers but the response status is
// left to the caller.
func (buh *blobUploadHandler) blobUploadResponse(w http.ResponseWriter, r *http.Request) error {
// TODO(stevvooe): Need a better way to manage the upload state automatically.
buh.State.Name = buh.Repository.Named().Name()
buh.State.UUID = buh.Upload.ID()
buh.Upload.Close()
buh.State.Offset = buh.Upload.Size()
buh.State.StartedAt = buh.Upload.StartedAt()
token, err := hmacKey(buh.Config.HTTP.Secret).packUploadState(buh.State)
if err != nil {
dcontext.GetLogger(buh).Infof("error building upload state token: %s", err)
return err
}
uploadURL, err := buh.urlBuilder.BuildBlobUploadChunkURL(
buh.Repository.Named(), buh.Upload.ID(),
url.Values{
"_state": []string{token},
})
if err != nil {
dcontext.GetLogger(buh).Infof("error building upload url: %s", err)
return err
}
endRange := buh.Upload.Size()
if endRange > 0 {
endRange = endRange - 1
}
w.Header().Set("Docker-Upload-UUID", buh.UUID)
w.Header().Set("Location", uploadURL)
w.Header().Set("Content-Length", "0")
w.Header().Set("Range", fmt.Sprintf("0-%d", endRange))
return nil
}
// mountBlob attempts to mount a blob from another repository by its digest. If
// successful, the blob is linked into the blob store and 201 Created is
// returned with the canonical url of the blob.
func (buh *blobUploadHandler) createBlobMountOption(fromRepo, mountDigest string) (distribution.BlobCreateOption, error) {
dgst, err := digest.Parse(mountDigest)
if err != nil {
return nil, err
}
ref, err := reference.WithName(fromRepo)
if err != nil {
return nil, err
}
canonical, err := reference.WithDigest(ref, dgst)
if err != nil {
return nil, err
}
return storage.WithMountFrom(canonical), nil
}
// writeBlobCreatedHeaders writes the standard headers describing a newly
// created blob. A 201 Created is written as well as the canonical URL and
// blob digest.
func (buh *blobUploadHandler) writeBlobCreatedHeaders(w http.ResponseWriter, desc v1.Descriptor) error {
ref, err := reference.WithDigest(buh.Repository.Named(), desc.Digest)
if err != nil {
return err
}
blobURL, err := buh.urlBuilder.BuildBlobURL(ref)
if err != nil {
return err
}
w.Header().Set("Location", blobURL)
w.Header().Set("Content-Length", "0")
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
w.WriteHeader(http.StatusCreated)
return nil
}
package handlers
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/gorilla/handlers"
)
const defaultReturnedEntries = 100
func catalogDispatcher(ctx *Context, r *http.Request) http.Handler {
catalogHandler := &catalogHandler{
Context: ctx,
}
return handlers.MethodHandler{
http.MethodGet: http.HandlerFunc(catalogHandler.GetCatalog),
}
}
type catalogHandler struct {
*Context
}
type catalogAPIResponse struct {
Repositories []string `json:"repositories"`
}
func (ch *catalogHandler) GetCatalog(w http.ResponseWriter, r *http.Request) {
moreEntries := true
q := r.URL.Query()
lastEntry := q.Get("last")
entries := defaultReturnedEntries
maximumConfiguredEntries := ch.App.Config.Catalog.MaxEntries
// parse n, if n is negative abort with an error
if n := q.Get("n"); n != "" {
parsedMax, err := strconv.Atoi(n)
if err != nil || parsedMax < 0 {
ch.Errors = append(ch.Errors, errcode.ErrorCodePaginationNumberInvalid.WithDetail(map[string]string{"n": n}))
return
}
// if a client requests more than it's allowed to receive
if parsedMax > maximumConfiguredEntries {
ch.Errors = append(ch.Errors, errcode.ErrorCodePaginationNumberInvalid.WithDetail(map[string]int{"n": parsedMax}))
return
}
entries = parsedMax
}
// then enforce entries to be between 0 & maximumConfiguredEntries
// max(0, min(entries, maximumConfiguredEntries))
if entries < 0 || entries > maximumConfiguredEntries {
entries = maximumConfiguredEntries
}
repos := make([]string, entries)
filled := 0
// entries is guaranteed to be >= 0 and < maximumConfiguredEntries
if entries == 0 {
moreEntries = false
} else {
returnedRepositories, err := ch.App.registry.Repositories(ch.Context, repos, lastEntry)
if err != nil {
_, pathNotFound := err.(driver.PathNotFoundError)
if err != io.EOF && !pathNotFound {
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
// err is either io.EOF or not PathNotFoundError
moreEntries = false
}
filled = returnedRepositories
}
w.Header().Set("Content-Type", "application/json")
// Add a link header if there are more entries to retrieve
if moreEntries {
lastEntry = repos[filled-1]
urlStr, err := createLinkEntry(r.URL.String(), entries, lastEntry)
if err != nil {
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Link", urlStr)
}
enc := json.NewEncoder(w)
if err := enc.Encode(catalogAPIResponse{
Repositories: repos[0:filled],
}); err != nil {
ch.Errors = append(ch.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// Use the original URL from the request to create a new URL for
// the link header
func createLinkEntry(origURL string, maxEntries int, lastEntry string) (string, error) {
calledURL, err := url.Parse(origURL)
if err != nil {
return "", err
}
v := url.Values{}
v.Add("n", strconv.Itoa(maxEntries))
v.Add("last", lastEntry)
calledURL.RawQuery = v.Encode()
calledURL.Fragment = ""
urlStr := fmt.Sprintf("<%s>; rel=\"next\"", calledURL.String())
return urlStr, nil
}
package handlers
import (
"context"
"fmt"
"net/http"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/api/errcode"
v2 "github.com/distribution/distribution/v3/registry/api/v2"
"github.com/distribution/distribution/v3/registry/auth"
"github.com/opencontainers/go-digest"
)
// Context should contain the request specific context for use in across
// handlers. Resources that don't need to be shared across handlers should not
// be on this object.
type Context struct {
// App points to the application structure that created this context.
*App
context.Context
// Repository is the repository for the current request. All requests
// should be scoped to a single repository. This field may be nil.
Repository distribution.Repository
// RepositoryRemover provides method to delete a repository
RepositoryRemover distribution.RepositoryRemover
// Errors is a collection of errors encountered during the request to be
// returned to the client API. If errors are added to the collection, the
// handler *must not* start the response via http.ResponseWriter.
Errors errcode.Errors
urlBuilder *v2.URLBuilder
// TODO(stevvooe): The goal is too completely factor this context and
// dispatching out of the web application. Ideally, we should lean on
// context.Context for injection of these resources.
}
// Value overrides context.Context.Value to ensure that calls are routed to
// correct context.
func (ctx *Context) Value(key interface{}) interface{} {
return ctx.Context.Value(key)
}
func getName(ctx context.Context) (name string) {
return dcontext.GetStringValue(ctx, "vars.name")
}
func getReference(ctx context.Context) (reference string) {
return dcontext.GetStringValue(ctx, "vars.reference")
}
var errDigestNotAvailable = fmt.Errorf("digest not available in context")
func getDigest(ctx context.Context) (dgst digest.Digest, err error) {
dgstStr := dcontext.GetStringValue(ctx, "vars.digest")
if dgstStr == "" {
dcontext.GetLogger(ctx).Errorf("digest not available")
return "", errDigestNotAvailable
}
d, err := digest.Parse(dgstStr)
if err != nil {
dcontext.GetLogger(ctx).Errorf("error parsing digest=%q: %v", dgstStr, err)
return "", err
}
return d, nil
}
func getUploadUUID(ctx context.Context) (uuid string) {
return dcontext.GetStringValue(ctx, "vars.uuid")
}
const (
// userKey is used to get the user object from
// a user context
userKey = "auth.user"
// userNameKey is used to get the user name from
// a user context
userNameKey = "auth.user.name"
)
// getUserName attempts to resolve a username from the context and request. If
// a username cannot be resolved, the empty string is returned.
func getUserName(ctx context.Context, r *http.Request) string {
username := dcontext.GetStringValue(ctx, userNameKey)
// Fallback to request user with basic auth
if username == "" {
var ok bool
uname, _, ok := basicAuth(r)
if ok {
username = uname
}
}
return username
}
// withUser returns a context with the authorized user info.
func withUser(ctx context.Context, user auth.UserInfo) context.Context {
return userInfoContext{
Context: ctx,
user: user,
}
}
type userInfoContext struct {
context.Context
user auth.UserInfo
}
func (uic userInfoContext) Value(key interface{}) interface{} {
switch key {
case userKey:
return uic.user
case userNameKey:
return uic.user.Name
}
return uic.Context.Value(key)
}
// withResources returns a context with the authorized resources.
func withResources(ctx context.Context, resources []auth.Resource) context.Context {
return resourceContext{
Context: ctx,
resources: resources,
}
}
type resourceContext struct {
context.Context
resources []auth.Resource
}
type resourceKey struct{}
func (rc resourceContext) Value(key interface{}) interface{} {
if key == (resourceKey{}) {
return rc.resources
}
return rc.Context.Value(key)
}
// authorizedResources returns the list of resources which have
// been authorized for this request.
func authorizedResources(ctx context.Context) []auth.Resource {
if resources, ok := ctx.Value(resourceKey{}).([]auth.Resource); ok {
return resources
}
return nil
}
package handlers
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"github.com/distribution/distribution/v3/internal/dcontext"
)
// closeResources closes all the provided resources after running the target
// handler.
func closeResources(handler http.Handler, closers ...io.Closer) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
for _, closer := range closers {
defer closer.Close()
}
handler.ServeHTTP(w, r)
})
}
// copyFullPayload copies the payload of an HTTP request to destWriter. If it
// receives less content than expected, and the client disconnected during the
// upload, it avoids sending a 400 error to keep the logs cleaner.
//
// The copy will be limited to `limit` bytes, if limit is greater than zero.
func copyFullPayload(ctx context.Context, responseWriter http.ResponseWriter, r *http.Request, destWriter io.Writer, limit int64, action string) error {
// Get a channel that tells us if the client disconnects
clientClosed := r.Context().Done()
body := r.Body
if limit > 0 {
body = http.MaxBytesReader(responseWriter, body, limit)
}
// Read in the data, if any.
copied, err := io.Copy(destWriter, body)
if clientClosed != nil && (err != nil || (r.ContentLength > 0 && copied < r.ContentLength)) {
// Didn't receive as much content as expected. Did the client
// disconnect during the request? If so, avoid returning a 400
// error to keep the logs cleaner.
select {
case <-clientClosed:
// Set the response code to "499 Client Closed Request"
// Even though the connection has already been closed,
// this causes the logger to pick up a 499 error
// instead of showing 0 for the HTTP status.
responseWriter.WriteHeader(499)
dcontext.GetLoggerWithFields(ctx, map[interface{}]interface{}{
"error": err,
"copied": copied,
"contentLength": r.ContentLength,
}, "error", "copied", "contentLength").Error("client disconnected during " + action)
return errors.New("client disconnected")
default:
}
}
if err != nil {
dcontext.GetLogger(ctx).Errorf("unknown error reading request payload: %v", err)
return err
}
return nil
}
func parseContentRange(cr string) (start int64, end int64, err error) {
rStart, rEnd, ok := strings.Cut(cr, "-")
if !ok {
return -1, -1, fmt.Errorf("invalid content range format, %s", cr)
}
start, err = strconv.ParseInt(rStart, 10, 64)
if err != nil {
return -1, -1, err
}
end, err = strconv.ParseInt(rEnd, 10, 64)
if err != nil {
return -1, -1, err
}
return start, end, nil
}
package handlers
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"time"
)
// blobUploadState captures the state serializable state of the blob upload.
type blobUploadState struct {
// name is the primary repository under which the blob will be linked.
Name string
// UUID identifies the upload.
UUID string
// offset contains the current progress of the upload.
Offset int64
// StartedAt is the original start time of the upload.
StartedAt time.Time
}
type hmacKey string
var errInvalidSecret = fmt.Errorf("invalid secret")
// unpackUploadState unpacks and validates the blob upload state from the
// token, using the hmacKey secret.
func (secret hmacKey) unpackUploadState(token string) (blobUploadState, error) {
var state blobUploadState
tokenBytes, err := base64.URLEncoding.DecodeString(token)
if err != nil {
return state, err
}
mac := hmac.New(sha256.New, []byte(secret))
if len(tokenBytes) < mac.Size() {
return state, errInvalidSecret
}
macBytes := tokenBytes[:mac.Size()]
messageBytes := tokenBytes[mac.Size():]
mac.Write(messageBytes)
if !hmac.Equal(mac.Sum(nil), macBytes) {
return state, errInvalidSecret
}
if err := json.Unmarshal(messageBytes, &state); err != nil {
return state, err
}
return state, nil
}
// packUploadState packs the upload state signed with and hmac digest using
// the hmacKey secret, encoding to url safe base64. The resulting token can be
// used to share data with minimized risk of external tampering.
func (secret hmacKey) packUploadState(lus blobUploadState) (string, error) {
mac := hmac.New(sha256.New, []byte(secret))
p, err := json.Marshal(lus)
if err != nil {
return "", err
}
mac.Write(p)
return base64.URLEncoding.EncodeToString(append(mac.Sum(nil), p...)), nil
}
package handlers
import (
"bytes"
"errors"
"fmt"
"strings"
"text/template"
"github.com/sirupsen/logrus"
)
// logHook is for hooking Panic in web application
type logHook struct {
LevelsParam []string
Mail *mailer
}
// Fire forwards an error to LogHook
func (hook *logHook) Fire(entry *logrus.Entry) error {
host, _, ok := strings.Cut(hook.Mail.Addr, ":")
if !ok || host == "" {
return errors.New("invalid Mail Address")
}
subject := fmt.Sprintf("[%s] %s: %s", entry.Level, host, entry.Message)
html := `
{{.Message}}
{{range $key, $value := .Data}}
{{$key}}: {{$value}}
{{end}}
`
b := bytes.NewBuffer(make([]byte, 0))
t := template.Must(template.New("mail body").Parse(html))
if err := t.Execute(b, entry); err != nil {
return err
}
body := b.String()
return hook.Mail.sendMail(subject, body)
}
// Levels contains hook levels to be catched
func (hook *logHook) Levels() []logrus.Level {
levels := []logrus.Level{}
for _, v := range hook.LevelsParam {
lv, _ := logrus.ParseLevel(v)
levels = append(levels, lv)
}
return levels
}
package handlers
import (
"errors"
"net/smtp"
"strings"
)
// mailer provides fields of email configuration for sending.
type mailer struct {
Addr, Username, Password, From string
Insecure bool
To []string
}
// sendMail allows users to send email, only if mail parameters is configured correctly.
func (mail *mailer) sendMail(subject, message string) error {
addr := strings.Split(mail.Addr, ":")
if len(addr) != 2 {
return errors.New("invalid Mail Address")
}
host := addr[0]
msg := []byte("To:" + strings.Join(mail.To, ";") +
"\r\nFrom: " + mail.From +
"\r\nSubject: " + subject +
"\r\nContent-Type: text/plain\r\n\r\n" +
message)
auth := smtp.PlainAuth(
"",
mail.Username,
mail.Password,
host,
)
err := smtp.SendMail(
mail.Addr,
auth,
mail.From,
mail.To,
msg,
)
if err != nil {
return err
}
return nil
}
package handlers
import (
"bytes"
"fmt"
"mime"
"net/http"
"strings"
"sync"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/manifestlist"
"github.com/distribution/distribution/v3/manifest/ocischema"
"github.com/distribution/distribution/v3/manifest/schema2"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/distribution/distribution/v3/registry/storage"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/reference"
"github.com/gorilla/handlers"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/sync/errgroup"
)
const (
defaultArch = "amd64"
defaultOS = "linux"
maxManifestBodySize = 4 * 1024 * 1024
imageClass = "image"
)
type storageType int
const (
manifestSchema2 storageType = iota // 0
manifestlistSchema // 1
ociSchema // 2
ociImageIndexSchema // 3
numStorageTypes // 4
)
// manifestDispatcher takes the request context and builds the
// appropriate handler for handling manifest requests.
func manifestDispatcher(ctx *Context, r *http.Request) http.Handler {
manifestHandler := &manifestHandler{
Context: ctx,
}
ref := getReference(ctx)
dgst, err := digest.Parse(ref)
if err != nil {
// We just have a tag
manifestHandler.Tag = ref
} else {
manifestHandler.Digest = dgst
}
mhandler := handlers.MethodHandler{
http.MethodGet: http.HandlerFunc(manifestHandler.GetManifest),
http.MethodHead: http.HandlerFunc(manifestHandler.GetManifest),
}
if !ctx.readOnly {
mhandler[http.MethodPut] = http.HandlerFunc(manifestHandler.PutManifest)
mhandler[http.MethodDelete] = http.HandlerFunc(manifestHandler.DeleteManifest)
}
return mhandler
}
// manifestHandler handles http operations on image manifests.
type manifestHandler struct {
*Context
// One of tag or digest gets set, depending on what is present in context.
Tag string
Digest digest.Digest
}
// GetManifest fetches the image manifest from the storage backend, if it exists.
func (imh *manifestHandler) GetManifest(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(imh).Debug("GetImageManifest")
manifests, err := imh.Repository.Manifests(imh)
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
var supports [numStorageTypes]bool
// this parsing of Accept headers is not quite as full-featured as godoc.org's parser, but we don't care about "q=" values
// https://github.com/golang/gddo/blob/e91d4165076d7474d20abda83f92d15c7ebc3e81/httputil/header/header.go#L165-L202
for _, acceptHeader := range r.Header["Accept"] {
// r.Header[...] is a slice in case the request contains the same header more than once
// if the header isn't set, we'll get the zero value, which "range" will handle gracefully
// we need to split each header value on "," to get the full list of "Accept" values (per RFC 2616)
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.1
for _, mediaType := range strings.Split(acceptHeader, ",") {
if mediaType, _, err = mime.ParseMediaType(mediaType); err != nil {
continue
}
if mediaType == schema2.MediaTypeManifest {
supports[manifestSchema2] = true
}
if mediaType == manifestlist.MediaTypeManifestList {
supports[manifestlistSchema] = true
}
if mediaType == v1.MediaTypeImageManifest {
supports[ociSchema] = true
}
if mediaType == v1.MediaTypeImageIndex {
supports[ociImageIndexSchema] = true
}
}
}
if imh.Tag != "" {
tags := imh.Repository.Tags(imh)
desc, err := tags.Get(imh, imh.Tag)
if err != nil {
if _, ok := err.(distribution.ErrTagUnknown); ok {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown.WithDetail(err))
} else {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
imh.Digest = desc.Digest
}
if etagMatch(r, imh.Digest.String()) {
w.WriteHeader(http.StatusNotModified)
return
}
var options []distribution.ManifestServiceOption
if imh.Tag != "" {
options = append(options, distribution.WithTag(imh.Tag))
}
manifest, err := manifests.Get(imh, imh.Digest, options...)
if err != nil {
if _, ok := err.(distribution.ErrManifestUnknownRevision); ok {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown.WithDetail(err))
} else {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
// determine the type of the returned manifest
manifestType := manifestSchema2
manifestList, isManifestList := manifest.(*manifestlist.DeserializedManifestList)
if _, isOCImanifest := manifest.(*ocischema.DeserializedManifest); isOCImanifest {
manifestType = ociSchema
} else if isManifestList {
if manifestList.MediaType == manifestlist.MediaTypeManifestList {
manifestType = manifestlistSchema
} else if manifestList.MediaType == v1.MediaTypeImageIndex {
manifestType = ociImageIndexSchema
}
}
if manifestType == ociSchema && !supports[ociSchema] {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown.WithMessage("OCI manifest found, but accept header does not support OCI manifests"))
return
}
if manifestType == ociImageIndexSchema && !supports[ociImageIndexSchema] {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown.WithMessage("OCI index found, but accept header does not support OCI indexes"))
return
}
if imh.Tag != "" && manifestType == manifestlistSchema && !supports[manifestlistSchema] {
// Rewrite manifest in schema1 format
dcontext.GetLogger(imh).Infof("rewriting manifest list %s in schema1 format to support old client", imh.Digest.String())
// Find the image manifest corresponding to the default
// platform
var manifestDigest digest.Digest
for _, manifestDescriptor := range manifestList.Manifests {
if manifestDescriptor.Platform.Architecture == defaultArch && manifestDescriptor.Platform.OS == defaultOS {
manifestDigest = manifestDescriptor.Digest
break
}
}
if manifestDigest == "" {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown)
return
}
manifest, err = manifests.Get(imh, manifestDigest)
if err != nil {
if _, ok := err.(distribution.ErrManifestUnknownRevision); ok {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown.WithDetail(err))
} else {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
if _, isSchema2 := manifest.(*schema2.DeserializedManifest); isSchema2 && !supports[manifestSchema2] {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestInvalid.WithMessage("Schema 2 manifest not supported by client"))
return
} else {
imh.Digest = manifestDigest
}
}
ct, p, err := manifest.Payload()
if err != nil {
return
}
w.Header().Set("Content-Type", ct)
w.Header().Set("Content-Length", fmt.Sprint(len(p)))
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
w.Header().Set("Etag", fmt.Sprintf(`"%s"`, imh.Digest))
if r.Method == http.MethodHead {
return
}
if _, err := w.Write(p); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
func etagMatch(r *http.Request, etag string) bool {
for _, headerVal := range r.Header["If-None-Match"] {
if headerVal == etag || headerVal == fmt.Sprintf(`"%s"`, etag) { // allow quoted or unquoted
return true
}
}
return false
}
// PutManifest validates and stores a manifest in the registry.
func (imh *manifestHandler) PutManifest(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(imh).Debug("PutImageManifest")
manifests, err := imh.Repository.Manifests(imh)
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
var jsonBuf bytes.Buffer
if err := copyFullPayload(imh, w, r, &jsonBuf, maxManifestBodySize, "image manifest PUT"); err != nil {
// copyFullPayload reports the error if necessary
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestInvalid.WithDetail(err.Error()))
return
}
mediaType := r.Header.Get("Content-Type")
manifest, desc, err := distribution.UnmarshalManifest(mediaType, jsonBuf.Bytes())
if err != nil {
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestInvalid.WithDetail(err))
return
}
if imh.Digest != "" {
if desc.Digest != imh.Digest {
dcontext.GetLogger(imh).Errorf("payload digest does not match: %q != %q", desc.Digest, imh.Digest)
imh.Errors = append(imh.Errors, errcode.ErrorCodeDigestInvalid)
return
}
} else if imh.Tag != "" {
imh.Digest = desc.Digest
} else {
imh.Errors = append(imh.Errors, errcode.ErrorCodeTagInvalid.WithDetail("no tag or digest specified"))
return
}
isAnOCIManifest := mediaType == v1.MediaTypeImageManifest || mediaType == v1.MediaTypeImageIndex
if isAnOCIManifest {
dcontext.GetLogger(imh).Debug("Putting an OCI Manifest!")
} else {
dcontext.GetLogger(imh).Debug("Putting a Docker Manifest!")
}
var options []distribution.ManifestServiceOption
if imh.Tag != "" {
options = append(options, distribution.WithTag(imh.Tag))
}
if err := imh.applyResourcePolicy(manifest); err != nil {
imh.Errors = append(imh.Errors, err)
return
}
_, err = manifests.Put(imh, manifest, options...)
if err != nil {
// TODO(stevvooe): These error handling switches really need to be
// handled by an app global mapper.
if err == distribution.ErrUnsupported {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
return
}
if err == distribution.ErrAccessDenied {
imh.Errors = append(imh.Errors, errcode.ErrorCodeDenied)
return
}
switch err := err.(type) {
case distribution.ErrManifestVerification:
for _, verificationError := range err {
switch verificationError := verificationError.(type) {
case distribution.ErrManifestBlobUnknown:
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestBlobUnknown.WithDetail(verificationError.Digest))
case distribution.ErrManifestNameInvalid:
imh.Errors = append(imh.Errors, errcode.ErrorCodeNameInvalid.WithDetail(err))
case distribution.ErrManifestUnverified:
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnverified)
default:
if verificationError == digest.ErrDigestInvalidFormat {
imh.Errors = append(imh.Errors, errcode.ErrorCodeDigestInvalid)
} else {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown, verificationError)
}
}
}
case errcode.Error:
imh.Errors = append(imh.Errors, err)
default:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
// Tag this manifest
if imh.Tag != "" {
tags := imh.Repository.Tags(imh)
err = tags.Tag(imh, imh.Tag, desc)
if err != nil {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
// Construct a canonical url for the uploaded manifest.
ref, err := reference.WithDigest(imh.Repository.Named(), imh.Digest)
if err != nil {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
location, err := imh.urlBuilder.BuildManifestURL(ref)
if err != nil {
// NOTE(stevvooe): Given the behavior above, this absurdly unlikely to
// happen. We'll log the error here but proceed as if it worked. Worst
// case, we set an empty location header.
dcontext.GetLogger(imh).Errorf("error building manifest url from digest: %v", err)
}
w.Header().Set("Location", location)
w.Header().Set("Docker-Content-Digest", imh.Digest.String())
w.WriteHeader(http.StatusCreated)
dcontext.GetLogger(imh).Debug("Succeeded in putting manifest!")
}
// applyResourcePolicy checks whether the resource class matches what has
// been authorized and allowed by the policy configuration.
func (imh *manifestHandler) applyResourcePolicy(manifest distribution.Manifest) error {
allowedClasses := imh.App.Config.Policy.Repository.Classes
if len(allowedClasses) == 0 {
return nil
}
var class string
switch m := manifest.(type) {
case *schema2.DeserializedManifest:
switch m.Config.MediaType {
case schema2.MediaTypeImageConfig:
class = imageClass
case schema2.MediaTypePluginConfig:
class = "plugin"
default:
return errcode.ErrorCodeDenied.WithMessage("unknown manifest class for " + m.Config.MediaType)
}
case *ocischema.DeserializedManifest:
switch m.Config.MediaType {
case v1.MediaTypeImageConfig:
class = imageClass
default:
return errcode.ErrorCodeDenied.WithMessage("unknown manifest class for " + m.Config.MediaType)
}
}
if class == "" {
return nil
}
// Check to see if class is allowed in registry
var allowedClass bool
for _, c := range allowedClasses {
if class == c {
allowedClass = true
break
}
}
if !allowedClass {
return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("registry does not allow %s manifest", class))
}
resources := authorizedResources(imh)
n := imh.Repository.Named().Name()
var foundResource bool
for _, r := range resources {
if r.Name == n {
if r.Class == "" {
r.Class = imageClass
}
if r.Class == class {
return nil
}
foundResource = true
}
}
// resource was found but no matching class was found
if foundResource {
return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("repository not authorized for %s manifest", class))
}
return nil
}
// DeleteManifest removes the manifest with the given digest or the tag with the given name from the registry.
func (imh *manifestHandler) DeleteManifest(w http.ResponseWriter, r *http.Request) {
dcontext.GetLogger(imh).Debug("DeleteImageManifest")
if imh.App.isCache {
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
return
}
if imh.Tag != "" {
dcontext.GetLogger(imh).Debug("DeleteImageTag")
tagService := imh.Repository.Tags(imh.Context)
if err := tagService.Untag(imh.Context, imh.Tag); err != nil {
switch err.(type) {
case distribution.ErrTagUnknown, driver.PathNotFoundError:
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown.WithDetail(err))
default:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
w.WriteHeader(http.StatusAccepted)
return
}
manifests, err := imh.Repository.Manifests(imh)
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
err = manifests.Delete(imh, imh.Digest)
if err != nil {
switch err {
case digest.ErrDigestUnsupported:
case digest.ErrDigestInvalidFormat:
imh.Errors = append(imh.Errors, errcode.ErrorCodeDigestInvalid)
return
case distribution.ErrBlobUnknown:
imh.Errors = append(imh.Errors, errcode.ErrorCodeManifestUnknown)
return
case distribution.ErrUnsupported:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnsupported)
return
default:
imh.Errors = append(imh.Errors, errcode.ErrorCodeUnknown)
return
}
}
tagService := imh.Repository.Tags(imh)
referencedTags, err := tagService.Lookup(imh, v1.Descriptor{Digest: imh.Digest})
if err != nil {
imh.Errors = append(imh.Errors, err)
return
}
var (
errs []error
mu sync.Mutex
)
g := errgroup.Group{}
g.SetLimit(storage.DefaultConcurrencyLimit)
for _, tag := range referencedTags {
tag := tag
g.Go(func() error {
if err := tagService.Untag(imh, tag); err != nil {
mu.Lock()
errs = append(errs, err)
mu.Unlock()
}
return nil
})
}
_ = g.Wait() // imh will record all errors, so ignore the error of Wait()
imh.Errors = errs
w.WriteHeader(http.StatusAccepted)
}
package handlers
import (
"encoding/json"
"net/http"
"sort"
"strconv"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/api/errcode"
"github.com/gorilla/handlers"
)
// tagsDispatcher constructs the tags handler api endpoint.
func tagsDispatcher(ctx *Context, r *http.Request) http.Handler {
tagsHandler := &tagsHandler{
Context: ctx,
}
return handlers.MethodHandler{
http.MethodGet: http.HandlerFunc(tagsHandler.GetTags),
}
}
// tagsHandler handles requests for lists of tags under a repository name.
type tagsHandler struct {
*Context
}
type tagsAPIResponse struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
// GetTags returns a json list of tags for a specific image name.
func (th *tagsHandler) GetTags(w http.ResponseWriter, r *http.Request) {
tagService := th.Repository.Tags(th)
tags, err := tagService.All(th)
if err != nil {
switch err := err.(type) {
case distribution.ErrRepositoryUnknown:
th.Errors = append(th.Errors, errcode.ErrorCodeNameUnknown.WithDetail(map[string]string{"name": th.Repository.Named().Name()}))
case errcode.Error:
th.Errors = append(th.Errors, err)
default:
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
}
return
}
// do pagination if requested
q := r.URL.Query()
// get entries after latest, if any specified
if lastEntry := q.Get("last"); lastEntry != "" {
lastEntryIndex := sort.SearchStrings(tags, lastEntry)
// as`sort.SearchStrings` can return len(tags), if the
// specified `lastEntry` is not found, we need to
// ensure it does not panic when slicing.
if lastEntryIndex == len(tags) {
tags = []string{}
} else {
tags = tags[lastEntryIndex+1:]
}
}
// if no error, means that the user requested `n` entries
if n := q.Get("n"); n != "" {
maxEntries, err := strconv.Atoi(n)
if err != nil || maxEntries < 0 {
th.Errors = append(th.Errors, errcode.ErrorCodePaginationNumberInvalid.WithDetail(map[string]string{"n": n}))
return
}
// if there is requested more than or
// equal to the amount of tags we have,
// then set the request to equal `len(tags)`.
// the reason for the `=`, is so the else
// clause will only activate if there
// are tags left the user needs.
if maxEntries >= len(tags) {
maxEntries = len(tags)
} else if maxEntries > 0 {
// defined in `catalog.go`
urlStr, err := createLinkEntry(r.URL.String(), maxEntries, tags[maxEntries-1])
if err != nil {
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
w.Header().Set("Link", urlStr)
}
tags = tags[:maxEntries]
}
w.Header().Set("Content-Type", "application/json")
enc := json.NewEncoder(w)
if err := enc.Encode(tagsAPIResponse{
Name: th.Repository.Named().Name(),
Tags: tags,
}); err != nil {
th.Errors = append(th.Errors, errcode.ErrorCodeUnknown.WithDetail(err))
return
}
}
package middleware
import (
"context"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/storage"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
)
// InitFunc is the type of a RegistryMiddleware factory function and is
// used to register the constructor for different RegistryMiddleware backends.
type InitFunc func(ctx context.Context, registry distribution.Namespace, driver storagedriver.StorageDriver, options map[string]interface{}) (distribution.Namespace, error)
var (
middlewares map[string]InitFunc
registryoptions []storage.RegistryOption
)
// Register is used to register an InitFunc for
// a RegistryMiddleware backend with the given name.
func Register(name string, initFunc InitFunc) error {
if middlewares == nil {
middlewares = make(map[string]InitFunc)
}
if _, exists := middlewares[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
middlewares[name] = initFunc
return nil
}
// Get constructs a RegistryMiddleware with the given options using the named backend.
func Get(ctx context.Context, name string, options map[string]interface{}, registry distribution.Namespace, driver storagedriver.StorageDriver) (distribution.Namespace, error) {
if middlewares != nil {
if initFunc, exists := middlewares[name]; exists {
return initFunc(ctx, registry, driver, options)
}
}
return nil, fmt.Errorf("no registry middleware registered with name: %s", name)
}
// RegisterOptions adds more options to RegistryOption list. Options get applied before
// any other configuration-based options.
func RegisterOptions(options ...storage.RegistryOption) error {
registryoptions = append(registryoptions, options...)
return nil
}
// GetRegistryOptions returns list of RegistryOption.
func GetRegistryOptions() []storage.RegistryOption {
return registryoptions
}
package middleware
import (
"context"
"fmt"
"github.com/distribution/distribution/v3"
)
// InitFunc is the type of a RepositoryMiddleware factory function and is
// used to register the constructor for different RepositoryMiddleware backends.
type InitFunc func(ctx context.Context, repository distribution.Repository, options map[string]interface{}) (distribution.Repository, error)
var middlewares map[string]InitFunc
// Register is used to register an InitFunc for
// a RepositoryMiddleware backend with the given name.
func Register(name string, initFunc InitFunc) error {
if middlewares == nil {
middlewares = make(map[string]InitFunc)
}
if _, exists := middlewares[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
middlewares[name] = initFunc
return nil
}
// Get constructs a RepositoryMiddleware with the given options using the named backend.
func Get(ctx context.Context, name string, options map[string]interface{}, repository distribution.Repository) (distribution.Repository, error) {
if middlewares != nil {
if initFunc, exists := middlewares[name]; exists {
return initFunc(ctx, repository, options)
}
}
return nil, fmt.Errorf("no repository middleware registered with name: %s", name)
}
package proxy
import (
"net/http"
"net/url"
"strings"
"github.com/distribution/distribution/v3/internal/client/auth"
"github.com/distribution/distribution/v3/internal/client/auth/challenge"
"github.com/distribution/distribution/v3/internal/dcontext"
)
const challengeHeader = "Docker-Distribution-Api-Version"
type userpass struct {
username string
password string
}
func (u userpass) Basic(_ *url.URL) (string, string) {
return u.username, u.password
}
func (u userpass) RefreshToken(_ *url.URL, service string) string {
return ""
}
func (u userpass) SetRefreshToken(_ *url.URL, service, token string) {
}
type credentials struct {
creds map[string]userpass
}
func (c credentials) Basic(u *url.URL) (string, string) {
return c.creds[u.String()].Basic(u)
}
func (c credentials) RefreshToken(u *url.URL, service string) string {
return ""
}
func (c credentials) SetRefreshToken(u *url.URL, service, token string) {
}
// configureAuth stores credentials for challenge responses
func configureAuth(username, password, remoteURL string) (auth.CredentialStore, auth.CredentialStore, error) {
creds := map[string]userpass{}
authURLs, err := getAuthURLs(remoteURL)
if err != nil {
return nil, nil, err
}
for _, url := range authURLs {
dcontext.GetLogger(dcontext.Background()).Infof("Discovered token authentication URL: %s", url)
creds[url] = userpass{
username: username,
password: password,
}
}
return credentials{creds: creds}, userpass{username: username, password: password}, nil
}
func getAuthURLs(remoteURL string) ([]string, error) {
authURLs := []string{}
resp, err := http.Get(remoteURL + "/v2/")
if err != nil {
return nil, err
}
defer resp.Body.Close()
for _, c := range challenge.ResponseChallenges(resp) {
if strings.EqualFold(c.Scheme, "bearer") {
authURLs = append(authURLs, c.Parameters["realm"])
}
}
return authURLs, nil
}
func ping(manager challenge.Manager, endpoint, versionHeader string) error {
resp, err := http.Get(endpoint)
if err != nil {
return err
}
defer resp.Body.Close()
return manager.AddResponse(resp)
}
package proxy
import (
"net/url"
"sync"
"time"
"github.com/docker/docker-credential-helpers/client"
credspkg "github.com/docker/docker-credential-helpers/credentials"
"github.com/sirupsen/logrus"
"github.com/distribution/distribution/v3/configuration"
"github.com/distribution/distribution/v3/internal/client/auth"
)
type execCredentials struct {
m sync.Mutex
helper client.ProgramFunc
lifetime *time.Duration
creds *credspkg.Credentials
expiry time.Time
}
func (c *execCredentials) Basic(url *url.URL) (string, string) {
c.m.Lock()
defer c.m.Unlock()
now := time.Now()
if c.creds != nil && (c.lifetime == nil || now.Before(c.expiry)) {
return c.creds.Username, c.creds.Secret
}
creds, err := client.Get(c.helper, url.Host)
if err != nil {
logrus.Errorf("failed to run command: %v", err)
return "", ""
}
c.creds = creds
if c.lifetime != nil && *c.lifetime > 0 {
c.expiry = now.Add(*c.lifetime)
}
return c.creds.Username, c.creds.Secret
}
func (c *execCredentials) RefreshToken(_ *url.URL, _ string) string {
return ""
}
func (c *execCredentials) SetRefreshToken(_ *url.URL, _, _ string) {
}
func configureExecAuth(cfg configuration.ExecConfig) (auth.CredentialStore, error) {
return &execCredentials{
helper: client.NewShellProgramFunc(cfg.Command),
lifetime: cfg.Lifetime,
}, nil
}
package proxy
import (
"context"
"io"
"net/http"
"strconv"
"sync"
"time"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/proxy/scheduler"
"github.com/distribution/reference"
)
type proxyBlobStore struct {
localStore distribution.BlobStore
remoteStore distribution.BlobService
scheduler *scheduler.TTLExpirationScheduler
ttl *time.Duration
repositoryName reference.Named
authChallenger authChallenger
}
var _ distribution.BlobStore = &proxyBlobStore{}
// inflight tracks currently downloading blobs
var inflight = make(map[digest.Digest]struct{})
// mu protects inflight
var mu sync.Mutex
func setResponseHeaders(h http.Header, length int64, mediaType string, digest digest.Digest) {
h.Set("Content-Length", strconv.FormatInt(length, 10))
h.Set("Content-Type", mediaType)
h.Set("Docker-Content-Digest", digest.String())
h.Set("Etag", digest.String())
}
func (pbs *proxyBlobStore) copyContent(ctx context.Context, dgst digest.Digest, writer io.Writer, h http.Header) (v1.Descriptor, error) {
desc, err := pbs.remoteStore.Stat(ctx, dgst)
if err != nil {
return v1.Descriptor{}, err
}
setResponseHeaders(h, desc.Size, desc.MediaType, dgst)
remoteReader, err := pbs.remoteStore.Open(ctx, dgst)
if err != nil {
return v1.Descriptor{}, err
}
defer remoteReader.Close()
_, err = io.CopyN(writer, remoteReader, desc.Size)
if err != nil {
return v1.Descriptor{}, err
}
proxyMetrics.BlobPull(uint64(desc.Size))
proxyMetrics.BlobPush(uint64(desc.Size), false)
return desc, nil
}
func (pbs *proxyBlobStore) serveLocal(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) (bool, error) {
localDesc, err := pbs.localStore.Stat(ctx, dgst)
if err != nil {
// Stat can report a zero sized file here if it's checked between creation
// and population. Return nil error, and continue
return false, nil
}
proxyMetrics.BlobPush(uint64(localDesc.Size), true)
return true, pbs.localStore.ServeBlob(ctx, w, r, dgst)
}
func (pbs *proxyBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
served, err := pbs.serveLocal(ctx, w, r, dgst)
if err != nil {
dcontext.GetLogger(ctx).Errorf("Error serving blob from local storage: %s", err.Error())
return err
}
if served {
return nil
}
if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil {
return err
}
mu.Lock()
_, ok := inflight[dgst]
if ok {
// If the blob has been serving in other requests.
// Will return the blob from the remote store directly.
// TODO Maybe we could reuse the these blobs are serving remotely and caching locally.
mu.Unlock()
_, err := pbs.copyContent(ctx, dgst, w, w.Header())
return err
}
inflight[dgst] = struct{}{}
mu.Unlock()
defer func() {
mu.Lock()
delete(inflight, dgst)
mu.Unlock()
}()
bw, err := pbs.localStore.Create(ctx)
if err != nil {
return err
}
// Serving client and storing locally over same fetching request.
// This can prevent a redundant blob fetching.
multiWriter := io.MultiWriter(w, bw)
desc, err := pbs.copyContent(ctx, dgst, multiWriter, w.Header())
if err != nil {
return err
}
_, err = bw.Commit(ctx, desc)
if err != nil {
return err
}
blobRef, err := reference.WithDigest(pbs.repositoryName, dgst)
if err != nil {
dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return err
}
if pbs.scheduler != nil && pbs.ttl != nil {
if err := pbs.scheduler.AddBlob(blobRef, *pbs.ttl); err != nil {
dcontext.GetLogger(ctx).Errorf("Error adding blob: %s", err)
return err
}
}
return nil
}
func (pbs *proxyBlobStore) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
desc, err := pbs.localStore.Stat(ctx, dgst)
if err == nil {
return desc, err
}
if err != distribution.ErrBlobUnknown {
return v1.Descriptor{}, err
}
if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil {
return v1.Descriptor{}, err
}
return pbs.remoteStore.Stat(ctx, dgst)
}
func (pbs *proxyBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
blob, err := pbs.localStore.Get(ctx, dgst)
if err == nil {
return blob, nil
}
if err := pbs.authChallenger.tryEstablishChallenges(ctx); err != nil {
return []byte{}, err
}
blob, err = pbs.remoteStore.Get(ctx, dgst)
if err != nil {
return []byte{}, err
}
_, err = pbs.localStore.Put(ctx, "", blob)
if err != nil {
return []byte{}, err
}
return blob, nil
}
// Unsupported functions
func (pbs *proxyBlobStore) Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) {
return v1.Descriptor{}, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
return nil, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
return nil, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest) (v1.Descriptor, error) {
return v1.Descriptor{}, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
return nil, distribution.ErrUnsupported
}
func (pbs *proxyBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}
package proxy
import (
"context"
"time"
"github.com/opencontainers/go-digest"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/proxy/scheduler"
"github.com/distribution/reference"
)
type proxyManifestStore struct {
ctx context.Context
localManifests distribution.ManifestService
remoteManifests distribution.ManifestService
repositoryName reference.Named
scheduler *scheduler.TTLExpirationScheduler
ttl *time.Duration
authChallenger authChallenger
}
var _ distribution.ManifestService = &proxyManifestStore{}
func (pms proxyManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
exists, err := pms.localManifests.Exists(ctx, dgst)
if err != nil {
return false, err
}
if exists {
return true, nil
}
if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil {
return false, err
}
return pms.remoteManifests.Exists(ctx, dgst)
}
func (pms proxyManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
// At this point `dgst` was either specified explicitly, or returned by the
// tagstore with the most recent association.
var fromRemote bool
manifest, err := pms.localManifests.Get(ctx, dgst, options...)
if err != nil {
if err := pms.authChallenger.tryEstablishChallenges(ctx); err != nil {
return nil, err
}
manifest, err = pms.remoteManifests.Get(ctx, dgst, options...)
if err != nil {
return nil, err
}
fromRemote = true
}
_, payload, err := manifest.Payload()
if err != nil {
return nil, err
}
proxyMetrics.ManifestPush(uint64(len(payload)), !fromRemote)
if fromRemote {
proxyMetrics.ManifestPull(uint64(len(payload)))
_, err = pms.localManifests.Put(ctx, manifest)
if err != nil {
return nil, err
}
// Schedule the manifest blob for removal
repoBlob, err := reference.WithDigest(pms.repositoryName, dgst)
if err != nil {
dcontext.GetLogger(ctx).Errorf("Error creating reference: %s", err)
return nil, err
}
if pms.scheduler != nil && pms.ttl != nil {
if err := pms.scheduler.AddManifest(repoBlob, *pms.ttl); err != nil {
dcontext.GetLogger(ctx).Errorf("Error adding manifest: %s", err)
return nil, err
}
}
// Ensure the manifest blob is cleaned up
// pms.scheduler.AddBlob(blobRef, repositoryTTL)
}
return manifest, err
}
func (pms proxyManifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
var d digest.Digest
return d, distribution.ErrUnsupported
}
func (pms proxyManifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}
package proxy
import (
"expvar"
"sync/atomic"
prometheus "github.com/distribution/distribution/v3/metrics"
"github.com/docker/go-metrics"
)
var (
// requests is the number of total incoming proxy request received for blob/manifest
requests = prometheus.ProxyNamespace.NewLabeledCounter("requests", "The number of total incoming proxy request received", "type")
// hits is the number of total proxy request hits for blob/manifest
hits = prometheus.ProxyNamespace.NewLabeledCounter("hits", "The number of total proxy request hits", "type")
// hits is the number of total proxy request misses for blob/manifest
misses = prometheus.ProxyNamespace.NewLabeledCounter("misses", "The number of total proxy request misses", "type")
// pulledBytes is the size of total bytes pulled from the upstream for blob/manifest
pulledBytes = prometheus.ProxyNamespace.NewLabeledCounter("pulled_bytes", "The size of total bytes pulled from the upstream", "type")
// pushedBytes is the size of total bytes pushed to the client for blob/manifest
pushedBytes = prometheus.ProxyNamespace.NewLabeledCounter("pushed_bytes", "The size of total bytes pushed to the client", "type")
)
// Metrics is used to hold metric counters
// related to the proxy
type Metrics struct {
Requests uint64
Hits uint64
Misses uint64
BytesPulled uint64
BytesPushed uint64
}
type proxyMetricsCollector struct {
blobMetrics Metrics
manifestMetrics Metrics
}
// proxyMetrics tracks metrics about the proxy cache. This is
// kept globally and made available via expvar.
var proxyMetrics = &proxyMetricsCollector{}
func init() {
registry := expvar.Get("registry")
if registry == nil {
registry = expvar.NewMap("registry")
}
pm := registry.(*expvar.Map).Get("proxy")
if pm == nil {
pm = &expvar.Map{}
pm.(*expvar.Map).Init()
registry.(*expvar.Map).Set("proxy", pm)
}
pm.(*expvar.Map).Set("blobs", expvar.Func(func() interface{} {
return proxyMetrics.blobMetrics
}))
pm.(*expvar.Map).Set("manifests", expvar.Func(func() interface{} {
return proxyMetrics.manifestMetrics
}))
metrics.Register(prometheus.ProxyNamespace)
initPrometheusMetrics("blob")
initPrometheusMetrics("manifest")
}
func initPrometheusMetrics(value string) {
requests.WithValues(value).Inc(0)
hits.WithValues(value).Inc(0)
misses.WithValues(value).Inc(0)
pulledBytes.WithValues(value).Inc(0)
pushedBytes.WithValues(value).Inc(0)
}
// BlobPull tracks metrics about blobs pulled into the cache
func (pmc *proxyMetricsCollector) BlobPull(bytesPulled uint64) {
atomic.AddUint64(&pmc.blobMetrics.Misses, 1)
atomic.AddUint64(&pmc.blobMetrics.BytesPulled, bytesPulled)
misses.WithValues("blob").Inc(1)
pulledBytes.WithValues("blob").Inc(float64(bytesPulled))
}
// BlobPush tracks metrics about blobs pushed to clients
func (pmc *proxyMetricsCollector) BlobPush(bytesPushed uint64, isHit bool) {
atomic.AddUint64(&pmc.blobMetrics.Requests, 1)
atomic.AddUint64(&pmc.blobMetrics.BytesPushed, bytesPushed)
requests.WithValues("blob").Inc(1)
pushedBytes.WithValues("blob").Inc(float64(bytesPushed))
if isHit {
atomic.AddUint64(&pmc.blobMetrics.Hits, 1)
hits.WithValues("blob").Inc(1)
}
}
// ManifestPull tracks metrics related to Manifests pulled into the cache
func (pmc *proxyMetricsCollector) ManifestPull(bytesPulled uint64) {
atomic.AddUint64(&pmc.manifestMetrics.Misses, 1)
atomic.AddUint64(&pmc.manifestMetrics.BytesPulled, bytesPulled)
misses.WithValues("manifest").Inc(1)
pulledBytes.WithValues("manifest").Inc(float64(bytesPulled))
}
// ManifestPush tracks metrics about manifests pushed to clients
func (pmc *proxyMetricsCollector) ManifestPush(bytesPushed uint64, isHit bool) {
atomic.AddUint64(&pmc.manifestMetrics.Requests, 1)
atomic.AddUint64(&pmc.manifestMetrics.BytesPushed, bytesPushed)
requests.WithValues("manifest").Inc(1)
pushedBytes.WithValues("manifest").Inc(float64(bytesPushed))
if isHit {
atomic.AddUint64(&pmc.manifestMetrics.Hits, 1)
hits.WithValues("manifest").Inc(1)
}
}
package proxy
import (
"context"
"fmt"
"net/http"
"net/url"
"sync"
"time"
"github.com/distribution/reference"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/configuration"
"github.com/distribution/distribution/v3/internal/client"
"github.com/distribution/distribution/v3/internal/client/auth"
"github.com/distribution/distribution/v3/internal/client/auth/challenge"
"github.com/distribution/distribution/v3/internal/client/transport"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/proxy/scheduler"
"github.com/distribution/distribution/v3/registry/storage"
"github.com/distribution/distribution/v3/registry/storage/driver"
)
var repositoryTTL = 24 * 7 * time.Hour
// proxyingRegistry fetches content from a remote registry and caches it locally
type proxyingRegistry struct {
embedded distribution.Namespace // provides local registry functionality
scheduler *scheduler.TTLExpirationScheduler
ttl *time.Duration
remoteURL url.URL
authChallenger authChallenger
basicAuth auth.CredentialStore
}
// NewRegistryPullThroughCache creates a registry acting as a pull through cache
func NewRegistryPullThroughCache(ctx context.Context, registry distribution.Namespace, driver driver.StorageDriver, config configuration.Proxy) (distribution.Namespace, error) {
remoteURL, err := url.Parse(config.RemoteURL)
if err != nil {
return nil, err
}
v := storage.NewVacuum(ctx, driver)
var s *scheduler.TTLExpirationScheduler
var ttl *time.Duration
if config.TTL == nil {
// Default TTL is 7 days
ttl = &repositoryTTL
} else if *config.TTL > 0 {
ttl = config.TTL
} else {
// TTL is disabled, never expire
ttl = nil
}
if ttl != nil {
s = scheduler.New(ctx, driver, "/scheduler-state.json")
s.OnBlobExpire(func(ref reference.Reference) error {
var r reference.Canonical
var ok bool
if r, ok = ref.(reference.Canonical); !ok {
return fmt.Errorf("unexpected reference type : %T", ref)
}
repo, err := registry.Repository(ctx, r)
if err != nil {
return err
}
blobs := repo.Blobs(ctx)
// Clear the repository reference and descriptor caches
err = blobs.Delete(ctx, r.Digest())
if err != nil {
return err
}
err = v.RemoveBlob(r.Digest().String())
if err != nil {
return err
}
return nil
})
s.OnManifestExpire(func(ref reference.Reference) error {
var r reference.Canonical
var ok bool
if r, ok = ref.(reference.Canonical); !ok {
return fmt.Errorf("unexpected reference type : %T", ref)
}
repo, err := registry.Repository(ctx, r)
if err != nil {
return err
}
manifests, err := repo.Manifests(ctx)
if err != nil {
return err
}
err = manifests.Delete(ctx, r.Digest())
if err != nil {
return err
}
return nil
})
err = s.Start()
if err != nil {
return nil, err
}
}
cs, b, err := func() (auth.CredentialStore, auth.CredentialStore, error) {
switch {
case config.Exec != nil:
cs, err := configureExecAuth(*config.Exec)
return cs, cs, err
default:
return configureAuth(config.Username, config.Password, config.RemoteURL)
}
}()
if err != nil {
return nil, err
}
return &proxyingRegistry{
embedded: registry,
scheduler: s,
ttl: ttl,
remoteURL: *remoteURL,
authChallenger: &remoteAuthChallenger{
remoteURL: *remoteURL,
cm: challenge.NewSimpleManager(),
cs: cs,
},
basicAuth: b,
}, nil
}
func (pr *proxyingRegistry) Scope() distribution.Scope {
return distribution.GlobalScope
}
func (pr *proxyingRegistry) Repositories(ctx context.Context, repos []string, last string) (n int, err error) {
return pr.embedded.Repositories(ctx, repos, last)
}
func (pr *proxyingRegistry) Repository(ctx context.Context, name reference.Named) (distribution.Repository, error) {
c := pr.authChallenger
tkopts := auth.TokenHandlerOptions{
Transport: http.DefaultTransport,
Credentials: c.credentialStore(),
Scopes: []auth.Scope{
auth.RepositoryScope{
Repository: name.Name(),
Actions: []string{"pull"},
},
},
Logger: dcontext.GetLogger(ctx),
}
tr := transport.NewTransport(http.DefaultTransport,
auth.NewAuthorizer(c.challengeManager(),
auth.NewTokenHandlerWithOptions(tkopts),
auth.NewBasicHandler(pr.basicAuth)))
localRepo, err := pr.embedded.Repository(ctx, name)
if err != nil {
return nil, err
}
localManifests, err := localRepo.Manifests(ctx, storage.SkipLayerVerification())
if err != nil {
return nil, err
}
remoteRepo, err := client.NewRepository(name, pr.remoteURL.String(), tr)
if err != nil {
return nil, err
}
remoteManifests, err := remoteRepo.Manifests(ctx)
if err != nil {
return nil, err
}
return &proxiedRepository{
blobStore: &proxyBlobStore{
localStore: localRepo.Blobs(ctx),
remoteStore: remoteRepo.Blobs(ctx),
scheduler: pr.scheduler,
ttl: pr.ttl,
repositoryName: name,
authChallenger: pr.authChallenger,
},
manifests: &proxyManifestStore{
repositoryName: name,
localManifests: localManifests, // Options?
remoteManifests: remoteManifests,
ctx: ctx,
scheduler: pr.scheduler,
ttl: pr.ttl,
authChallenger: pr.authChallenger,
},
name: name,
tags: &proxyTagService{
localTags: localRepo.Tags(ctx),
remoteTags: remoteRepo.Tags(ctx),
authChallenger: pr.authChallenger,
},
}, nil
}
func (pr *proxyingRegistry) Blobs() distribution.BlobEnumerator {
return pr.embedded.Blobs()
}
func (pr *proxyingRegistry) BlobStatter() distribution.BlobStatter {
return pr.embedded.BlobStatter()
}
type Closer interface {
// Close release all resources used by this object
Close() error
}
func (pr *proxyingRegistry) Close() error {
return pr.scheduler.Stop()
}
// authChallenger encapsulates a request to the upstream to establish credential challenges
type authChallenger interface {
tryEstablishChallenges(context.Context) error
challengeManager() challenge.Manager
credentialStore() auth.CredentialStore
}
type remoteAuthChallenger struct {
remoteURL url.URL
sync.Mutex
cm challenge.Manager
cs auth.CredentialStore
}
func (r *remoteAuthChallenger) credentialStore() auth.CredentialStore {
return r.cs
}
func (r *remoteAuthChallenger) challengeManager() challenge.Manager {
return r.cm
}
// tryEstablishChallenges will attempt to get a challenge type for the upstream if none currently exist
func (r *remoteAuthChallenger) tryEstablishChallenges(ctx context.Context) error {
r.Lock()
defer r.Unlock()
remoteURL := r.remoteURL
remoteURL.Path = "/v2/"
challenges, err := r.cm.GetChallenges(remoteURL)
if err != nil {
return err
}
if len(challenges) > 0 {
return nil
}
// establish challenge type with upstream
if err := ping(r.cm, remoteURL.String(), challengeHeader); err != nil {
return err
}
dcontext.GetLogger(ctx).Infof("Challenge established with upstream: %s", remoteURL.Redacted())
return nil
}
// proxiedRepository uses proxying blob and manifest services to serve content
// locally, or pulling it through from a remote and caching it locally if it doesn't
// already exist
type proxiedRepository struct {
blobStore distribution.BlobStore
manifests distribution.ManifestService
name reference.Named
tags distribution.TagService
}
func (pr *proxiedRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
return pr.manifests, nil
}
func (pr *proxiedRepository) Blobs(ctx context.Context) distribution.BlobStore {
return pr.blobStore
}
func (pr *proxiedRepository) Named() reference.Named {
return pr.name
}
func (pr *proxiedRepository) Tags(ctx context.Context) distribution.TagService {
return pr.tags
}
package proxy
import (
"context"
"github.com/distribution/distribution/v3"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// proxyTagService supports local and remote lookup of tags.
type proxyTagService struct {
localTags distribution.TagService
remoteTags distribution.TagService
authChallenger authChallenger
}
var _ distribution.TagService = proxyTagService{}
// Get attempts to get the most recent digest for the tag by checking the remote
// tag service first and then caching it locally. If the remote is unavailable
// the local association is returned
func (pt proxyTagService) Get(ctx context.Context, tag string) (v1.Descriptor, error) {
err := pt.authChallenger.tryEstablishChallenges(ctx)
if err == nil {
desc, err := pt.remoteTags.Get(ctx, tag)
if err == nil {
err := pt.localTags.Tag(ctx, tag, desc)
if err != nil {
return v1.Descriptor{}, err
}
return desc, nil
}
}
desc, err := pt.localTags.Get(ctx, tag)
if err != nil {
return v1.Descriptor{}, err
}
return desc, nil
}
func (pt proxyTagService) Tag(ctx context.Context, tag string, desc v1.Descriptor) error {
return distribution.ErrUnsupported
}
func (pt proxyTagService) Untag(ctx context.Context, tag string) error {
err := pt.localTags.Untag(ctx, tag)
if err != nil {
return err
}
return nil
}
func (pt proxyTagService) All(ctx context.Context) ([]string, error) {
err := pt.authChallenger.tryEstablishChallenges(ctx)
if err == nil {
tags, err := pt.remoteTags.All(ctx)
if err == nil {
return tags, err
}
}
return pt.localTags.All(ctx)
}
func (pt proxyTagService) Lookup(ctx context.Context, digest v1.Descriptor) ([]string, error) {
return []string{}, distribution.ErrUnsupported
}
package scheduler
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/reference"
)
// onTTLExpiryFunc is called when a repository's TTL expires
type expiryFunc func(reference.Reference) error
const (
entryTypeBlob = iota
entryTypeManifest
indexSaveFrequency = 5 * time.Second
)
// schedulerEntry represents an entry in the scheduler
// fields are exported for serialization
type schedulerEntry struct {
Key string `json:"Key"`
Expiry time.Time `json:"ExpiryData"`
EntryType int `json:"EntryType"`
timer *time.Timer
}
// New returns a new instance of the scheduler
func New(ctx context.Context, driver driver.StorageDriver, path string) *TTLExpirationScheduler {
return &TTLExpirationScheduler{
entries: make(map[string]*schedulerEntry),
driver: driver,
pathToStateFile: path,
ctx: ctx,
stopped: true,
doneChan: make(chan struct{}),
saveTimer: time.NewTicker(indexSaveFrequency),
}
}
// TTLExpirationScheduler is a scheduler used to perform actions
// when TTLs expire
type TTLExpirationScheduler struct {
sync.Mutex
entries map[string]*schedulerEntry
driver driver.StorageDriver
ctx context.Context
pathToStateFile string
stopped bool
onBlobExpire expiryFunc
onManifestExpire expiryFunc
indexDirty bool
saveTimer *time.Ticker
doneChan chan struct{}
}
// OnBlobExpire is called when a scheduled blob's TTL expires
func (ttles *TTLExpirationScheduler) OnBlobExpire(f expiryFunc) {
ttles.Lock()
defer ttles.Unlock()
ttles.onBlobExpire = f
}
// OnManifestExpire is called when a scheduled manifest's TTL expires
func (ttles *TTLExpirationScheduler) OnManifestExpire(f expiryFunc) {
ttles.Lock()
defer ttles.Unlock()
ttles.onManifestExpire = f
}
// AddBlob schedules a blob cleanup after ttl expires
func (ttles *TTLExpirationScheduler) AddBlob(blobRef reference.Canonical, ttl time.Duration) error {
ttles.Lock()
defer ttles.Unlock()
if ttles.stopped {
return fmt.Errorf("scheduler not started")
}
ttles.add(blobRef, ttl, entryTypeBlob)
return nil
}
// AddManifest schedules a manifest cleanup after ttl expires
func (ttles *TTLExpirationScheduler) AddManifest(manifestRef reference.Canonical, ttl time.Duration) error {
ttles.Lock()
defer ttles.Unlock()
if ttles.stopped {
return fmt.Errorf("scheduler not started")
}
ttles.add(manifestRef, ttl, entryTypeManifest)
return nil
}
// Start starts the scheduler
func (ttles *TTLExpirationScheduler) Start() error {
ttles.Lock()
defer ttles.Unlock()
err := ttles.readState()
if err != nil {
return err
}
if !ttles.stopped {
return fmt.Errorf("scheduler already started")
}
dcontext.GetLogger(ttles.ctx).Infof("Starting cached object TTL expiration scheduler...")
ttles.stopped = false
// Start timer for each deserialized entry
for _, entry := range ttles.entries {
entry.timer = ttles.startTimer(entry, time.Until(entry.Expiry))
}
// Start a ticker to periodically save the entries index
go func() {
for {
select {
case <-ttles.saveTimer.C:
ttles.Lock()
if !ttles.indexDirty {
ttles.Unlock()
continue
}
err := ttles.writeState()
if err != nil {
dcontext.GetLogger(ttles.ctx).Errorf("Error writing scheduler state: %s", err)
} else {
ttles.indexDirty = false
}
ttles.Unlock()
case <-ttles.doneChan:
return
}
}
}()
return nil
}
func (ttles *TTLExpirationScheduler) add(r reference.Reference, ttl time.Duration, eType int) {
entry := &schedulerEntry{
Key: r.String(),
Expiry: time.Now().Add(ttl),
EntryType: eType,
}
dcontext.GetLogger(ttles.ctx).Infof("Adding new scheduler entry for %s with ttl=%s", entry.Key, time.Until(entry.Expiry))
if oldEntry, present := ttles.entries[entry.Key]; present && oldEntry.timer != nil {
oldEntry.timer.Stop()
}
ttles.entries[entry.Key] = entry
entry.timer = ttles.startTimer(entry, ttl)
ttles.indexDirty = true
}
func (ttles *TTLExpirationScheduler) startTimer(entry *schedulerEntry, ttl time.Duration) *time.Timer {
return time.AfterFunc(ttl, func() {
ttles.Lock()
defer ttles.Unlock()
var f expiryFunc
switch entry.EntryType {
case entryTypeBlob:
f = ttles.onBlobExpire
case entryTypeManifest:
f = ttles.onManifestExpire
default:
f = func(reference.Reference) error {
return fmt.Errorf("scheduler entry type")
}
}
ref, err := reference.Parse(entry.Key)
if err == nil {
if err := f(ref); err != nil {
dcontext.GetLogger(ttles.ctx).Errorf("Scheduler error returned from OnExpire(%s): %s", entry.Key, err)
}
} else {
dcontext.GetLogger(ttles.ctx).Errorf("Error unpacking reference: %s", err)
}
delete(ttles.entries, entry.Key)
ttles.indexDirty = true
})
}
// Stop stops the scheduler.
func (ttles *TTLExpirationScheduler) Stop() error {
ttles.Lock()
defer ttles.Unlock()
err := ttles.writeState()
if err != nil {
err = fmt.Errorf("error writing scheduler state: %w", err)
}
for _, entry := range ttles.entries {
entry.timer.Stop()
}
close(ttles.doneChan)
ttles.saveTimer.Stop()
ttles.stopped = true
return err
}
func (ttles *TTLExpirationScheduler) writeState() error {
jsonBytes, err := json.Marshal(ttles.entries)
if err != nil {
return err
}
err = ttles.driver.PutContent(ttles.ctx, ttles.pathToStateFile, jsonBytes)
if err != nil {
return err
}
return nil
}
func (ttles *TTLExpirationScheduler) readState() error {
if _, err := ttles.driver.Stat(ttles.ctx, ttles.pathToStateFile); err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return nil
default:
return err
}
}
bytes, err := ttles.driver.GetContent(ttles.ctx, ttles.pathToStateFile)
if err != nil {
return err
}
err = json.Unmarshal(bytes, &ttles.entries)
if err != nil {
return err
}
return nil
}
package storage
import (
"context"
"fmt"
"net/http"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
// TODO(stevvooe): This should configurable in the future.
const blobCacheControlMaxAge = 365 * 24 * time.Hour
// blobServer simply serves blobs from a driver instance using a path function
// to identify paths and a descriptor service to fill in metadata.
type blobServer struct {
driver driver.StorageDriver
statter distribution.BlobStatter
pathFn func(dgst digest.Digest) (string, error)
redirect bool // allows disabling RedirectURL redirects
}
func (bs *blobServer) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return err
}
path, err := bs.pathFn(desc.Digest)
if err != nil {
return err
}
if bs.redirect {
redirectURL, err := bs.driver.RedirectURL(r, path)
if err != nil {
return err
}
if redirectURL != "" {
// Redirect to storage URL.
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return nil
}
// Fallback to serving the content directly.
}
br, err := newFileReader(ctx, bs.driver, path, desc.Size)
if err != nil {
return err
}
defer br.Close()
w.Header().Set("ETag", fmt.Sprintf(`"%s"`, desc.Digest)) // If-None-Match handled by ServeContent
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.f", blobCacheControlMaxAge.Seconds()))
if w.Header().Get("Docker-Content-Digest") == "" {
w.Header().Set("Docker-Content-Digest", desc.Digest.String())
}
if w.Header().Get("Content-Type") == "" {
// Set the content type if not already set.
w.Header().Set("Content-Type", desc.MediaType)
}
if w.Header().Get("Content-Length") == "" {
// Set the content length if not already set.
w.Header().Set("Content-Length", fmt.Sprint(desc.Size))
}
http.ServeContent(w, r, desc.Digest.String(), time.Time{}, br)
return nil
}
package storage
import (
"context"
"io"
"path"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// blobStore implements the read side of the blob store interface over a
// driver without enforcing per-repository membership. This object is
// intentionally a leaky abstraction, providing utility methods that support
// creating and traversing backend links.
type blobStore struct {
driver driver.StorageDriver
statter distribution.BlobStatter
}
var _ distribution.BlobProvider = &blobStore{}
// Get implements the BlobProvider.Get call.
func (bs *blobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
bp, err := bs.path(dgst)
if err != nil {
return nil, err
}
p, err := getContent(ctx, bs.driver, bp)
if err != nil {
switch err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUnknown
}
return nil, err
}
return p, nil
}
func (bs *blobStore) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
desc, err := bs.statter.Stat(ctx, dgst)
if err != nil {
return nil, err
}
path, err := bs.path(desc.Digest)
if err != nil {
return nil, err
}
return newFileReader(ctx, bs.driver, path, desc.Size)
}
// Put stores the content p in the blob store, calculating the digest. If the
// content is already present, only the digest will be returned. This should
// only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations
func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) {
dgst := digest.FromBytes(p)
desc, err := bs.statter.Stat(ctx, dgst)
if err == nil {
// content already present
return desc, nil
} else if err != distribution.ErrBlobUnknown {
dcontext.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %v", dgst, err)
// real error, return it
return v1.Descriptor{}, err
}
bp, err := bs.path(dgst)
if err != nil {
return v1.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype here, as well.
return v1.Descriptor{
Size: int64(len(p)),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, bs.driver.PutContent(ctx, bp, p)
}
func (bs *blobStore) Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error {
specPath, err := pathFor(blobsPathSpec{})
if err != nil {
return err
}
return bs.driver.Walk(ctx, specPath, func(fileInfo driver.FileInfo) error {
// skip directories
if fileInfo.IsDir() {
return nil
}
currentPath := fileInfo.Path()
// we only want to parse paths that end with /data
_, fileName := path.Split(currentPath)
if fileName != "data" {
return nil
}
digest, err := digestFromPath(currentPath)
if err != nil {
return err
}
return ingester(digest)
})
}
// path returns the canonical path for the blob identified by digest. The blob
// may or may not exist.
func (bs *blobStore) path(dgst digest.Digest) (string, error) {
bp, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return "", err
}
return bp, nil
}
// link links the path to the provided digest by writing the digest into the
// target file. Caller must ensure that the blob actually exists.
func (bs *blobStore) link(ctx context.Context, path string, dgst digest.Digest) error {
// The contents of the "link" file are the exact string contents of the
// digest, which is specified in that package.
return bs.driver.PutContent(ctx, path, []byte(dgst))
}
// readlink returns the linked digest at path.
func (bs *blobStore) readlink(ctx context.Context, path string) (digest.Digest, error) {
content, err := bs.driver.GetContent(ctx, path)
if err != nil {
return "", err
}
linked, err := digest.Parse(string(content))
if err != nil {
return "", err
}
return linked, nil
}
type blobStatter struct {
driver driver.StorageDriver
}
var _ distribution.BlobDescriptorService = &blobStatter{}
// Stat implements BlobStatter.Stat by returning the descriptor for the blob
// in the main blob store. If this method returns successfully, there is
// strong guarantee that the blob exists and is available.
func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
path, err := pathFor(blobDataPathSpec{
digest: dgst,
})
if err != nil {
return v1.Descriptor{}, err
}
fi, err := bs.driver.Stat(ctx, path)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return v1.Descriptor{}, distribution.ErrBlobUnknown
default:
return v1.Descriptor{}, err
}
}
if fi.IsDir() {
// NOTE(stevvooe): This represents a corruption situation. Somehow, we
// calculated a blob path and then detected a directory. We log the
// error and then error on the side of not knowing about the blob.
dcontext.GetLogger(ctx).Warnf("blob path should not be a directory: %q", path)
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
// TODO(stevvooe): Add method to resolve the mediatype. We can store and
// cache a "global" media type for the blob, even if a specific repo has a
// mediatype that overrides the main one.
return v1.Descriptor{
Size: fi.Size(),
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}, nil
}
func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
return distribution.ErrUnsupported
}
func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
return distribution.ErrUnsupported
}
package storage
import (
"context"
"errors"
"fmt"
"io"
"path"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
var errResumableDigestNotAvailable = errors.New("resumable digest not available")
const (
// digestSha256Empty is the canonical sha256 digest of empty data
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
)
// blobWriter is used to control the various aspects of resumable
// blob upload.
type blobWriter struct {
ctx context.Context
blobStore *linkedBlobStore
id string
startedAt time.Time
digester digest.Digester
written int64 // track the write to digester
fileWriter storagedriver.FileWriter
driver storagedriver.StorageDriver
path string
resumableDigestEnabled bool
committed bool
}
var _ distribution.BlobWriter = &blobWriter{}
// ID returns the identifier for this upload.
func (bw *blobWriter) ID() string {
return bw.id
}
func (bw *blobWriter) StartedAt() time.Time {
return bw.startedAt
}
// Commit marks the upload as completed, returning a valid descriptor. The
// final size and digest are checked against the first descriptor provided.
func (bw *blobWriter) Commit(ctx context.Context, desc v1.Descriptor) (v1.Descriptor, error) {
dcontext.GetLogger(ctx).Debug("(*blobWriter).Commit")
if err := bw.fileWriter.Commit(ctx); err != nil {
return v1.Descriptor{}, err
}
bw.Close()
desc.Size = bw.Size()
canonical, err := bw.validateBlob(ctx, desc)
if err != nil {
return v1.Descriptor{}, err
}
if err := bw.moveBlob(ctx, canonical); err != nil {
return v1.Descriptor{}, err
}
if err := bw.blobStore.linkBlob(ctx, canonical, desc.Digest); err != nil {
return v1.Descriptor{}, err
}
if err := bw.removeResources(ctx); err != nil {
return v1.Descriptor{}, err
}
err = bw.blobStore.blobAccessController.SetDescriptor(ctx, canonical.Digest, canonical)
if err != nil {
return v1.Descriptor{}, err
}
bw.committed = true
return canonical, nil
}
// Cancel the blob upload process, releasing any resources associated with
// the writer and canceling the operation.
func (bw *blobWriter) Cancel(ctx context.Context) error {
dcontext.GetLogger(ctx).Debug("(*blobWriter).Cancel")
if err := bw.fileWriter.Cancel(ctx); err != nil {
return err
}
if err := bw.Close(); err != nil {
dcontext.GetLogger(ctx).Errorf("error closing blobwriter: %s", err)
}
return bw.removeResources(ctx)
}
func (bw *blobWriter) Size() int64 {
return bw.fileWriter.Size()
}
func (bw *blobWriter) Write(p []byte) (int, error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
_, err := bw.fileWriter.Write(p)
if err != nil {
return 0, err
}
n, err := bw.digester.Hash().Write(p)
bw.written += int64(n)
return n, err
}
func (bw *blobWriter) ReadFrom(r io.Reader) (n int64, err error) {
// Ensure that the current write offset matches how many bytes have been
// written to the digester. If not, we need to update the digest state to
// match the current write position.
if err := bw.resumeDigest(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return 0, err
}
// Using a TeeReader instead of MultiWriter ensures Copy returns
// the amount written to the digester as well as ensuring that we
// write to the fileWriter first
tee := io.TeeReader(r, bw.fileWriter)
nn, err := io.Copy(bw.digester.Hash(), tee)
bw.written += nn
return nn, err
}
func (bw *blobWriter) Close() error {
if bw.committed {
return errors.New("blobwriter close after commit")
}
if err := bw.storeHashState(bw.blobStore.ctx); err != nil && err != errResumableDigestNotAvailable {
return err
}
return bw.fileWriter.Close()
}
// validateBlob checks the data against the digest, returning an error if it
// does not match. The canonical descriptor is returned.
func (bw *blobWriter) validateBlob(ctx context.Context, desc v1.Descriptor) (v1.Descriptor, error) {
var (
verified, fullHash bool
canonical digest.Digest
)
if desc.Digest == "" {
// if no descriptors are provided, we have nothing to validate
// against. We don't really want to support this for the registry.
return v1.Descriptor{}, distribution.ErrBlobInvalidDigest{
Reason: fmt.Errorf("cannot validate against empty digest"),
}
}
var size int64
// Stat the on disk file
if fi, err := bw.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): We really don't care if the file is
// not actually present for the reader. We now assume
// that the desc length is zero.
desc.Size = 0
default:
// Any other error we want propagated up the stack.
return v1.Descriptor{}, err
}
} else {
if fi.IsDir() {
return v1.Descriptor{}, fmt.Errorf("unexpected directory at upload location %q", bw.path)
}
size = fi.Size()
}
if desc.Size > 0 {
if desc.Size != size {
return v1.Descriptor{}, distribution.ErrBlobInvalidLength
}
} else {
// if provided 0 or negative length, we can assume caller doesn't know or
// care about length.
desc.Size = size
}
// TODO(stevvooe): This section is very meandering. Need to be broken down
// to be a lot more clear.
if err := bw.resumeDigest(ctx); err == nil {
canonical = bw.digester.Digest()
if canonical.Algorithm() == desc.Digest.Algorithm() {
// Common case: client and server prefer the same canonical digest
// algorithm - currently SHA256.
verified = desc.Digest == canonical
} else {
// The client wants to use a different digest algorithm. They'll just
// have to be patient and wait for us to download and re-hash the
// uploaded content using that digest algorithm.
fullHash = true
}
} else if err == errResumableDigestNotAvailable {
// Not using resumable digests, so we need to hash the entire layer.
fullHash = true
} else {
return v1.Descriptor{}, err
}
if fullHash {
// a fantastic optimization: if the written data and the size are
// the same, we don't need to read the data from the backend. This is
// because we've written the entire file in the lifecycle of the
// current instance.
if bw.written == size && digest.Canonical == desc.Digest.Algorithm() {
canonical = bw.digester.Digest()
verified = desc.Digest == canonical
}
// If the check based on size fails, we fall back to the slowest of
// paths. We may be able to make the size-based check a stronger
// guarantee, so this may be defensive.
if !verified {
digester := digest.Canonical.Digester()
verifier := desc.Digest.Verifier()
// Read the file from the backend driver and validate it.
fr, err := newFileReader(ctx, bw.driver, bw.path, desc.Size)
if err != nil {
return v1.Descriptor{}, err
}
defer fr.Close()
tr := io.TeeReader(fr, digester.Hash())
if _, err := io.Copy(verifier, tr); err != nil {
return v1.Descriptor{}, err
}
canonical = digester.Digest()
verified = verifier.Verified()
}
}
if !verified {
dcontext.GetLoggerWithFields(ctx,
map[interface{}]interface{}{
"canonical": canonical,
"provided": desc.Digest,
}, "canonical", "provided").
Errorf("canonical digest does match provided digest")
return v1.Descriptor{}, distribution.ErrBlobInvalidDigest{
Digest: desc.Digest,
Reason: fmt.Errorf("content does not match digest"),
}
}
// update desc with canonical hash
desc.Digest = canonical
if desc.MediaType == "" {
desc.MediaType = "application/octet-stream"
}
return desc, nil
}
// moveBlob moves the data into its final, hash-qualified destination,
// identified by dgst. The layer should be validated before commencing the
// move.
func (bw *blobWriter) moveBlob(ctx context.Context, desc v1.Descriptor) error {
blobPath, err := pathFor(blobDataPathSpec{
digest: desc.Digest,
})
if err != nil {
return err
}
// Check for existence
if _, err := bw.blobStore.driver.Stat(ctx, blobPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // ensure that it doesn't exist.
default:
return err
}
} else {
// If the path exists, we can assume that the content has already
// been uploaded, since the blob storage is content-addressable.
// While it may be corrupted, detection of such corruption belongs
// elsewhere.
return nil
}
// If no data was received, we may not actually have a file on disk. Check
// the size here and write a zero-length file to blobPath if this is the
// case. For the most part, this should only ever happen with zero-length
// blobs.
if _, err := bw.blobStore.driver.Stat(ctx, bw.path); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// HACK(stevvooe): This is slightly dangerous: if we verify above,
// get a hash, then the underlying file is deleted, we risk moving
// a zero-length blob into a nonzero-length blob location. To
// prevent this horrid thing, we employ the hack of only allowing
// to this happen for the digest of an empty blob.
if desc.Digest == digestSha256Empty {
return bw.blobStore.driver.PutContent(ctx, blobPath, []byte{})
}
// We let this fail during the move below.
logrus.
WithField("upload.id", bw.ID()).
WithField("digest", desc.Digest).Warnf("attempted to move zero-length content with non-zero digest")
default:
return err // unrelated error
}
}
// TODO(stevvooe): We should also write the mediatype when executing this move.
return bw.blobStore.driver.Move(ctx, bw.path, blobPath)
}
// removeResources should clean up all resources associated with the upload
// instance. An error will be returned if the clean up cannot proceed. If the
// resources are already not present, no error will be returned.
func (bw *blobWriter) removeResources(ctx context.Context) error {
dataPath, err := pathFor(uploadDataPathSpec{
name: bw.blobStore.repository.Named().Name(),
id: bw.id,
})
if err != nil {
return err
}
// Resolve and delete the containing directory, which should include any
// upload related files.
dirPath := path.Dir(dataPath)
if err := bw.blobStore.driver.Delete(ctx, dirPath); err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
break // already gone!
default:
// This should be uncommon enough such that returning an error
// should be okay. At this point, the upload should be mostly
// complete, but perhaps the backend became unaccessible.
dcontext.GetLogger(ctx).Errorf("unable to delete layer upload resources %q: %v", dirPath, err)
return err
}
}
return nil
}
func (bw *blobWriter) Reader() (io.ReadCloser, error) {
// todo(richardscothern): Change to exponential backoff, i=0.5, e=2, n=4
try := 1
for try <= 5 {
_, err := bw.driver.Stat(bw.ctx, bw.path)
if err == nil {
break
}
switch err.(type) {
case storagedriver.PathNotFoundError:
dcontext.GetLogger(bw.ctx).Debugf("Nothing found on try %d, sleeping...", try)
time.Sleep(1 * time.Second)
try++
default:
return nil, err
}
}
readCloser, err := bw.driver.Reader(bw.ctx, bw.path, 0)
if err != nil {
return nil, err
}
return readCloser, nil
}
//go:build !noresumabledigest
// +build !noresumabledigest
package storage
import (
"context"
"encoding"
"fmt"
"hash"
"path"
"strconv"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/sirupsen/logrus"
)
// resumeDigest attempts to restore the state of the internal hash function
// by loading the most recent saved hash state equal to the current size of the blob.
func (bw *blobWriter) resumeDigest(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(encoding.BinaryUnmarshaler)
if !ok {
return errResumableDigestNotAvailable
}
offset := bw.fileWriter.Size()
if offset == bw.written {
// State of digester is already at the requested offset.
return nil
}
// List hash states from storage backend.
var hashStateMatch hashStateEntry
hashStates, err := bw.getStoredHashStates(ctx)
if err != nil {
return fmt.Errorf("unable to get stored hash states with offset %d: %s", offset, err)
}
// Find the highest stored hashState with offset equal to
// the requested offset.
for _, hashState := range hashStates {
if hashState.offset == offset {
hashStateMatch = hashState
break // Found an exact offset match.
}
}
if hashStateMatch.offset == 0 {
// No need to load any state, just reset the hasher.
h.(hash.Hash).Reset()
} else {
storedState, err := bw.driver.GetContent(ctx, hashStateMatch.path)
if err != nil {
return err
}
if err = h.UnmarshalBinary(storedState); err != nil {
return err
}
bw.written = hashStateMatch.offset
}
// Mind the gap.
if gapLen := offset - bw.written; gapLen > 0 {
return errResumableDigestNotAvailable
}
return nil
}
type hashStateEntry struct {
offset int64
path string
}
// getStoredHashStates returns a slice of hashStateEntries for this upload.
func (bw *blobWriter) getStoredHashStates(ctx context.Context) ([]hashStateEntry, error) {
uploadHashStatePathPrefix, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
list: true,
})
if err != nil {
return nil, err
}
paths, err := bw.blobStore.driver.List(ctx, uploadHashStatePathPrefix)
if err != nil {
if _, ok := err.(storagedriver.PathNotFoundError); !ok {
return nil, err
}
// Treat PathNotFoundError as no entries.
paths = nil
}
hashStateEntries := make([]hashStateEntry, 0, len(paths))
for _, p := range paths {
pathSuffix := path.Base(p)
// The suffix should be the offset.
offset, err := strconv.ParseInt(pathSuffix, 0, 64)
if err != nil {
logrus.Errorf("unable to parse offset from upload state path %q: %s", p, err)
}
hashStateEntries = append(hashStateEntries, hashStateEntry{offset: offset, path: p})
}
return hashStateEntries, nil
}
func (bw *blobWriter) storeHashState(ctx context.Context) error {
if !bw.resumableDigestEnabled {
return errResumableDigestNotAvailable
}
h, ok := bw.digester.Hash().(encoding.BinaryMarshaler)
if !ok {
return errResumableDigestNotAvailable
}
state, err := h.MarshalBinary()
if err != nil {
return err
}
uploadHashStatePath, err := pathFor(uploadHashStatePathSpec{
name: bw.blobStore.repository.Named().String(),
id: bw.id,
alg: bw.digester.Digest().Algorithm(),
offset: bw.written,
})
if err != nil {
return err
}
return bw.driver.PutContent(ctx, uploadHashStatePath, state)
}
// Package cache provides facilities to speed up access to the storage
// backend.
package cache
import (
"fmt"
"github.com/distribution/distribution/v3"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// BlobDescriptorCacheProvider provides repository scoped
// BlobDescriptorService cache instances and a global descriptor cache.
type BlobDescriptorCacheProvider interface {
distribution.BlobDescriptorService
RepositoryScoped(repo string) (distribution.BlobDescriptorService, error)
}
// ValidateDescriptor provides a helper function to ensure that caches have
// common criteria for admitting descriptors.
func ValidateDescriptor(desc v1.Descriptor) error {
if err := desc.Digest.Validate(); err != nil {
return err
}
if desc.Size < 0 {
return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size)
}
if desc.MediaType == "" {
return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc)
}
return nil
}
package cache
import (
"context"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
prometheus "github.com/distribution/distribution/v3/metrics"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type cachedBlobStatter struct {
cache distribution.BlobDescriptorService
backend distribution.BlobDescriptorService
}
var (
// cacheRequestCount is the number of total cache requests received.
cacheRequestCount = prometheus.StorageNamespace.NewCounter("cache_requests", "The number of cache request received")
// cacheRequestCount is the number of total cache requests received.
cacheHitCount = prometheus.StorageNamespace.NewCounter("cache_hits", "The number of cache request received")
// cacheErrorCount is the number of cache request errors.
cacheErrorCount = prometheus.StorageNamespace.NewCounter("cache_errors", "The number of cache request errors")
)
// NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend.
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
return &cachedBlobStatter{
cache: cache,
backend: backend,
}
}
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
cacheRequestCount.Inc(1)
// try getting from cache
desc, cacheErr := cbds.cache.Stat(ctx, dgst)
if cacheErr == nil {
cacheHitCount.Inc(1)
return desc, nil
}
// couldn't get from cache; get from backend
desc, err := cbds.backend.Stat(ctx, dgst)
if err != nil {
return desc, err
}
if cacheErr == distribution.ErrBlobUnknown {
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
}
// we don't need to return cache error upstream if any. continue returning value from backend
} else {
// unknown error from cache. just log and error. do not store cache as it may be trigger many set calls
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(cacheErr).Error("error from cache stat(ing) blob")
cacheErrorCount.Inc(1)
}
return desc, nil
}
func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error {
err := cbds.cache.Clear(ctx, dgst)
if err != nil {
return err
}
err = cbds.backend.Clear(ctx, dgst)
if err != nil {
return err
}
return nil
}
func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil {
dcontext.GetLoggerWithField(ctx, "blob", dgst).WithError(err).Error("error from cache setting desc")
}
return nil
}
package memory
import (
"context"
"math"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/storage/cache"
"github.com/distribution/reference"
"github.com/hashicorp/golang-lru/arc/v2"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// DefaultSize is the default cache size to use if no size is explicitly
// configured.
DefaultSize = 10000
// UnlimitedSize indicates the cache size should not be limited.
UnlimitedSize = math.MaxInt
)
type descriptorCacheKey struct {
digest digest.Digest
repo string
}
type inMemoryBlobDescriptorCacheProvider struct {
lru *arc.ARCCache[descriptorCacheKey, v1.Descriptor]
}
// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for
// storing blob descriptor data.
func NewInMemoryBlobDescriptorCacheProvider(size int) cache.BlobDescriptorCacheProvider {
if size <= 0 {
size = math.MaxInt
}
lruCache, err := arc.NewARC[descriptorCacheKey, v1.Descriptor](size)
if err != nil {
// NewARC can only fail if size is <= 0, so this unreachable
panic(err)
}
return &inMemoryBlobDescriptorCacheProvider{
lru: lruCache,
}
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
if _, err := reference.ParseNormalizedNamed(repo); err != nil {
if err == reference.ErrNameTooLong {
return nil, distribution.ErrRepositoryNameInvalid{
Name: repo,
Reason: reference.ErrNameTooLong,
}
}
return nil, err
}
return &repositoryScopedInMemoryBlobDescriptorCache{
repo: repo,
parent: imbdcp,
}, nil
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return v1.Descriptor{}, err
}
key := descriptorCacheKey{
digest: dgst,
}
descriptor, ok := imbdcp.lru.Get(key)
if ok {
return descriptor, nil
}
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error {
key := descriptorCacheKey{
digest: dgst,
}
imbdcp.lru.Remove(key)
return nil
}
func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
_, err := imbdcp.Stat(ctx, dgst)
if err == distribution.ErrBlobUnknown {
if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest {
// if the digests differ, set the other canonical mapping
if err := imbdcp.SetDescriptor(ctx, desc.Digest, desc); err != nil {
return err
}
}
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
key := descriptorCacheKey{
digest: dgst,
}
imbdcp.lru.Add(key, desc)
return nil
}
// we already know it, do nothing
return err
}
// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped
// repository cache. Instances are not thread-safe but the delegated
// operations are.
type repositoryScopedInMemoryBlobDescriptorCache struct {
repo string
parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return v1.Descriptor{}, err
}
key := descriptorCacheKey{
digest: dgst,
repo: rsimbdcp.repo,
}
descriptor, ok := rsimbdcp.parent.lru.Get(key)
if ok {
return descriptor, nil
}
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error {
key := descriptorCacheKey{
digest: dgst,
repo: rsimbdcp.repo,
}
rsimbdcp.parent.lru.Remove(key)
return nil
}
func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
key := descriptorCacheKey{
digest: dgst,
repo: rsimbdcp.repo,
}
rsimbdcp.parent.lru.Add(key, desc)
return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc)
}
package metrics
import (
"context"
"time"
"github.com/distribution/distribution/v3"
prometheus "github.com/distribution/distribution/v3/metrics"
"github.com/distribution/distribution/v3/registry/storage/cache"
"github.com/docker/go-metrics"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type prometheusCacheProvider struct {
cache.BlobDescriptorCacheProvider
latencyTimer metrics.LabeledTimer
}
func NewPrometheusCacheProvider(wrap cache.BlobDescriptorCacheProvider, name, help string) cache.BlobDescriptorCacheProvider {
return &prometheusCacheProvider{
wrap,
// TODO: May want to have fine grained buckets since redis calls are generally <1ms and the default minimum bucket is 5ms.
prometheus.StorageNamespace.NewLabeledTimer(name, help, "operation"),
}
}
func (p *prometheusCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
start := time.Now()
d, e := p.BlobDescriptorCacheProvider.Stat(ctx, dgst)
p.latencyTimer.WithValues("Stat").UpdateSince(start)
return d, e
}
func (p *prometheusCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
start := time.Now()
e := p.BlobDescriptorCacheProvider.SetDescriptor(ctx, dgst, desc)
p.latencyTimer.WithValues("SetDescriptor").UpdateSince(start)
return e
}
type prometheusRepoCacheProvider struct {
distribution.BlobDescriptorService
latencyTimer metrics.LabeledTimer
}
func (p *prometheusRepoCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
start := time.Now()
d, e := p.BlobDescriptorService.Stat(ctx, dgst)
p.latencyTimer.WithValues("RepoStat").UpdateSince(start)
return d, e
}
func (p *prometheusRepoCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
start := time.Now()
e := p.BlobDescriptorService.SetDescriptor(ctx, dgst, desc)
p.latencyTimer.WithValues("RepoSetDescriptor").UpdateSince(start)
return e
}
func (p *prometheusCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
s, err := p.BlobDescriptorCacheProvider.RepositoryScoped(repo)
if err != nil {
return nil, err
}
return &prometheusRepoCacheProvider{
s,
p.latencyTimer,
}, nil
}
package redis
import (
"context"
"fmt"
"strconv"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/storage/cache"
"github.com/distribution/distribution/v3/registry/storage/cache/metrics"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/redis/go-redis/v9"
)
// redisBlobDescriptorService provides an implementation of
// BlobDescriptorCacheProvider based on redis. Blob descriptors are stored in
// two parts. The first provide fast access to repository membership through a
// redis set for each repo. The second is a redis hash keyed by the digest of
// the layer, providing path, length and mediatype information. There is also
// a per-repository redis hash of the blob descriptor, allowing override of
// data. This is currently used to override the mediatype on a per-repository
// basis.
//
// Note that there is no implied relationship between these two caches. The
// layer may exist in one, both or none and the code must be written this way.
type redisBlobDescriptorService struct {
pool redis.UniversalClient
// TODO(stevvooe): We use a pool because we don't have great control over
// the cache lifecycle to manage connections. A new connection if fetched
// for each operation. Once we have better lifecycle management of the
// request objects, we can change this to a connection.
}
var _ distribution.BlobDescriptorService = &redisBlobDescriptorService{}
// NewRedisBlobDescriptorCacheProvider returns a new redis-based
// BlobDescriptorCacheProvider using the provided redis connection pool.
func NewRedisBlobDescriptorCacheProvider(pool redis.UniversalClient) cache.BlobDescriptorCacheProvider {
return metrics.NewPrometheusCacheProvider(
&redisBlobDescriptorService{
pool: pool,
},
"cache_redis",
"Number of seconds taken by redis",
)
}
// RepositoryScoped returns the scoped cache.
func (rbds *redisBlobDescriptorService) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) {
if _, err := reference.ParseNormalizedNamed(repo); err != nil {
if err == reference.ErrNameTooLong {
return nil, distribution.ErrRepositoryNameInvalid{
Name: repo,
Reason: reference.ErrNameTooLong,
}
}
return nil, err
}
return &repositoryScopedRedisBlobDescriptorService{
repo: repo,
upstream: rbds,
}, nil
}
// Stat retrieves the descriptor data from the redis hash entry.
func (rbds *redisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return v1.Descriptor{}, err
}
return rbds.stat(ctx, dgst)
}
func (rbds *redisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
if err := dgst.Validate(); err != nil {
return err
}
// Not atomic in redis <= 2.3
cmd := rbds.pool.HDel(ctx, rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")
res, err := cmd.Result()
if err != nil {
return err
}
if res == 0 {
return distribution.ErrBlobUnknown
}
return nil
}
func (rbds *redisBlobDescriptorService) stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
cmd := rbds.pool.HMGet(ctx, rbds.blobDescriptorHashKey(dgst), "digest", "size", "mediatype")
reply, err := cmd.Result()
if err != nil {
return v1.Descriptor{}, err
}
// NOTE(stevvooe): The "size" field used to be "length". We treat a
// missing "size" field here as an unknown blob, which causes a cache
// miss, effectively migrating the field.
if len(reply) < 3 || reply[0] == nil || reply[1] == nil { // don't care if mediatype is nil
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
var desc v1.Descriptor
digestString, ok := reply[0].(string)
if !ok {
return v1.Descriptor{}, fmt.Errorf("digest is not a string")
}
desc.Digest = digest.Digest(digestString)
sizeString, ok := reply[1].(string)
if !ok {
return v1.Descriptor{}, fmt.Errorf("size is not a string")
}
size, err := strconv.ParseInt(sizeString, 10, 64)
if err != nil {
return v1.Descriptor{}, err
}
desc.Size = size
if reply[2] != nil {
mediaType, ok := reply[2].(string)
if ok {
desc.MediaType = mediaType
}
}
return desc, nil
}
// SetDescriptor sets the descriptor data for the given digest using a redis
// hash. A hash is used here since we may store unrelated fields about a layer
// in the future.
func (rbds *redisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
return rbds.setDescriptor(ctx, dgst, desc)
}
func (rbds *redisBlobDescriptorService) setDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
cmd := rbds.pool.HMSet(ctx, rbds.blobDescriptorHashKey(dgst), "digest", desc.Digest.String(), "size", desc.Size)
if cmd.Err() != nil {
return cmd.Err()
}
cmd = rbds.pool.HSetNX(ctx, rbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType)
if cmd.Err() != nil {
return cmd.Err()
}
return nil
}
func (rbds *redisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
return "blobs::" + dgst.String()
}
type repositoryScopedRedisBlobDescriptorService struct {
repo string
upstream *redisBlobDescriptorService
}
var _ distribution.BlobDescriptorService = &repositoryScopedRedisBlobDescriptorService{}
// Stat ensures that the digest is a member of the specified repository and
// forwards the descriptor request to the global blob store. If the media type
// differs for the repository, we override it.
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
if err := dgst.Validate(); err != nil {
return v1.Descriptor{}, err
}
pool := rsrbds.upstream.pool
// Check membership to repository first
member, err := pool.SIsMember(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result()
if err != nil {
return v1.Descriptor{}, err
}
if !member {
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
upstream, err := rsrbds.upstream.stat(ctx, dgst)
if err != nil {
return v1.Descriptor{}, err
}
// We allow a per repository mediatype, let's look it up here.
mediatype, err := pool.HGet(ctx, rsrbds.blobDescriptorHashKey(dgst), "mediatype").Result()
if err != nil {
if err == redis.Nil {
return v1.Descriptor{}, distribution.ErrBlobUnknown
}
return v1.Descriptor{}, err
}
if mediatype != "" {
upstream.MediaType = mediatype
}
return upstream, nil
}
// Clear removes the descriptor from the cache and forwards to the upstream descriptor store
func (rsrbds *repositoryScopedRedisBlobDescriptorService) Clear(ctx context.Context, dgst digest.Digest) error {
if err := dgst.Validate(); err != nil {
return err
}
// Check membership to repository first
member, err := rsrbds.upstream.pool.SIsMember(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result()
if err != nil {
return err
}
if !member {
return distribution.ErrBlobUnknown
}
return rsrbds.upstream.Clear(ctx, dgst)
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
if err := dgst.Validate(); err != nil {
return err
}
if err := cache.ValidateDescriptor(desc); err != nil {
return err
}
if dgst != desc.Digest {
if dgst.Algorithm() == desc.Digest.Algorithm() {
return fmt.Errorf("redis cache: digest for descriptors differ but algorithm does not: %q != %q", dgst, desc.Digest)
}
}
return rsrbds.setDescriptor(ctx, dgst, desc)
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) setDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
conn := rsrbds.upstream.pool
_, err := conn.SAdd(ctx, rsrbds.repositoryBlobSetKey(rsrbds.repo), dgst.String()).Result()
if err != nil {
return err
}
if err := rsrbds.upstream.setDescriptor(ctx, dgst, desc); err != nil {
return err
}
// Override repository mediatype.
_, err = conn.HSet(ctx, rsrbds.blobDescriptorHashKey(dgst), "mediatype", desc.MediaType).Result()
if err != nil {
return err
}
// Also set the values for the primary descriptor, if they differ by
// algorithm (ie sha256 vs sha512).
if desc.Digest != "" && dgst != desc.Digest && dgst.Algorithm() != desc.Digest.Algorithm() {
if err := rsrbds.setDescriptor(ctx, desc.Digest, desc); err != nil {
return err
}
}
return nil
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) blobDescriptorHashKey(dgst digest.Digest) string {
return "repository::" + rsrbds.repo + "::blobs::" + dgst.String()
}
func (rsrbds *repositoryScopedRedisBlobDescriptorService) repositoryBlobSetKey(repo string) string {
return "repository::" + rsrbds.repo + "::blobs"
}
package storage
import (
"context"
"errors"
"io"
"path"
"strings"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/reference"
)
// Returns a list, or partial list, of repositories in the registry.
// Because it's a quite expensive operation, it should only be used when building up
// an initial set of repositories.
func (reg *registry) Repositories(ctx context.Context, repos []string, last string) (int, error) {
filledBuffer := false
foundRepos := 0
if len(repos) == 0 {
return 0, errors.New("Attempted to list 0 repositories")
}
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return 0, err
}
startAfter := ""
if last != "" {
startAfter, err = pathFor(manifestsPathSpec{name: last})
if err != nil {
return 0, err
}
}
err = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error {
err := handleRepository(fileInfo, root, last, func(repoPath string) error {
repos[foundRepos] = repoPath
foundRepos += 1
return nil
})
if err != nil {
return err
}
// if we've filled our slice, no need to walk any further
if foundRepos == len(repos) {
filledBuffer = true
return driver.ErrFilledBuffer
}
return nil
}, driver.WithStartAfterHint(startAfter))
if err != nil {
return foundRepos, err
}
if filledBuffer {
// There are potentially more repositories to list
return foundRepos, nil
}
// We didn't fill the buffer, so that's the end of the list of repos
return foundRepos, io.EOF
}
// Enumerate applies ingester to each repository
func (reg *registry) Enumerate(ctx context.Context, ingester func(string) error) error {
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}
err = reg.blobStore.driver.Walk(ctx, root, func(fileInfo driver.FileInfo) error {
return handleRepository(fileInfo, root, "", ingester)
})
return err
}
// Remove removes a repository from storage
func (reg *registry) Remove(ctx context.Context, name reference.Named) error {
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}
repoDir := path.Join(root, name.Name())
return reg.driver.Delete(ctx, repoDir)
}
// lessPath returns true if one path a is less than path b.
//
// A component-wise comparison is done, rather than the lexical comparison of
// strings.
func lessPath(a, b string) bool {
// we provide this behavior by making separator always sort first.
return compareReplaceInline(a, b, '/', '\x00') < 0
}
// compareReplaceInline modifies runtime.cmpstring to replace old with new
// during a byte-wise comparison.
func compareReplaceInline(s1, s2 string, old, new byte) int {
// TODO(stevvooe): We are missing an optimization when the s1 and s2 have
// the exact same slice header. It will make the code unsafe but can
// provide some extra performance.
l := len(s1)
if len(s2) < l {
l = len(s2)
}
for i := 0; i < l; i++ {
c1, c2 := s1[i], s2[i]
if c1 == old {
c1 = new
}
if c2 == old {
c2 = new
}
if c1 < c2 {
return -1
}
if c1 > c2 {
return +1
}
}
if len(s1) < len(s2) {
return -1
}
if len(s1) > len(s2) {
return +1
}
return 0
}
// handleRepository calls function fn with a repository path if fileInfo
// has a path of a repository under root and that it is lexographically
// after last. Otherwise, it will return ErrSkipDir or ErrFilledBuffer.
// These should be used with Walk to do handling with repositories in a
// storage.
func handleRepository(fileInfo driver.FileInfo, root, last string, fn func(repoPath string) error) error {
filePath := fileInfo.Path()
// lop the base path off
repo := filePath[len(root)+1:]
_, file := path.Split(repo)
if file == "_manifests" {
repo = strings.TrimSuffix(repo, "/_manifests")
if lessPath(last, repo) {
if err := fn(repo); err != nil {
return err
}
}
return driver.ErrSkipDir
} else if strings.HasPrefix(file, "_") {
return driver.ErrSkipDir
}
return nil
}
// Package base provides a base implementation of the storage driver that can
// be used to implement common checks. The goal is to increase the amount of
// code sharing.
//
// The canonical approach to use this class is to embed in the exported driver
// struct such that calls are proxied through this implementation. First,
// declare the internal driver, as follows:
//
// type driver struct { ... internal ...}
//
// The resulting type should implement StorageDriver such that it can be the
// target of a Base struct. The exported type can then be declared as follows:
//
// type Driver struct {
// Base
// }
//
// Because Driver embeds Base, it effectively implements Base. If the driver
// needs to intercept a call, before going to base, Driver should implement
// that method. Effectively, Driver can intercept calls before coming in and
// driver implements the actual logic.
//
// To further shield the embed from other packages, it is recommended to
// employ a private embed struct:
//
// type baseEmbed struct {
// base.Base
// }
//
// Then, declare driver to embed baseEmbed, rather than Base directly:
//
// type Driver struct {
// baseEmbed
// }
//
// The type now implements StorageDriver, proxying through Base, without
// exporting an unnecessary field.
package base
import (
"context"
"io"
"net/http"
"time"
"github.com/distribution/distribution/v3/internal/dcontext"
prometheus "github.com/distribution/distribution/v3/metrics"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/tracing"
"github.com/docker/go-metrics"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
)
// storageAction is the metrics of blob related operations
var storageAction = prometheus.StorageNamespace.NewLabeledTimer("action", "The number of seconds that the storage action takes", "driver", "action")
// tracer is the OpenTelemetry tracer utilized for tracing operations within
// this package's code.
var tracer = otel.Tracer("github.com/distribution/distribution/v3/registry/storage/driver/base")
func init() {
metrics.Register(prometheus.StorageNamespace)
}
// Base provides a wrapper around a storagedriver implementation that provides
// common path and bounds checking.
type Base struct {
storagedriver.StorageDriver
}
// Format errors received from the storage driver
func (base *Base) setDriverName(e error) error {
switch actual := e.(type) {
case nil:
return nil
case storagedriver.ErrUnsupportedMethod:
actual.DriverName = base.StorageDriver.Name()
return actual
case storagedriver.PathNotFoundError:
actual.DriverName = base.StorageDriver.Name()
return actual
case storagedriver.InvalidPathError:
actual.DriverName = base.StorageDriver.Name()
return actual
case storagedriver.InvalidOffsetError:
actual.DriverName = base.StorageDriver.Name()
return actual
default:
return storagedriver.Error{
DriverName: base.StorageDriver.Name(),
Detail: e,
}
}
}
// GetContent wraps GetContent of underlying storage driver.
func (base *Base) GetContent(ctx context.Context, path string) ([]byte, error) {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
}
ctx, span := tracer.Start(
ctx,
"GetContent",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
b, e := base.StorageDriver.GetContent(ctx, path)
storageAction.WithValues(base.Name(), "GetContent").UpdateSince(start)
return b, base.setDriverName(e)
}
// PutContent wraps PutContent of underlying storage driver.
func (base *Base) PutContent(ctx context.Context, path string, content []byte) error {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
attribute.Int(tracing.AttributePrefix+"storage.content.length", len(content)),
}
ctx, span := tracer.Start(
ctx,
"PutContent",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) {
return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
err := base.setDriverName(base.StorageDriver.PutContent(ctx, path, content))
storageAction.WithValues(base.Name(), "PutContent").UpdateSince(start)
return err
}
// Reader wraps Reader of underlying storage driver.
func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
attribute.Int64(tracing.AttributePrefix+"storage.offset", offset),
}
ctx, span := tracer.Start(
ctx,
"Reader",
trace.WithAttributes(attrs...))
defer span.End()
if offset < 0 {
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()}
}
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
rc, e := base.StorageDriver.Reader(ctx, path, offset)
return rc, base.setDriverName(e)
}
// Writer wraps Writer of underlying storage driver.
func (base *Base) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
attribute.Bool(tracing.AttributePrefix+"storage.append", append),
}
ctx, span := tracer.Start(
ctx,
"Writer",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
writer, e := base.StorageDriver.Writer(ctx, path, append)
return writer, base.setDriverName(e)
}
// Stat wraps Stat of underlying storage driver.
func (base *Base) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
}
ctx, span := tracer.Start(
ctx,
"Stat",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
fi, e := base.StorageDriver.Stat(ctx, path)
storageAction.WithValues(base.Name(), "Stat").UpdateSince(start)
return fi, base.setDriverName(e)
}
// List wraps List of underlying storage driver.
func (base *Base) List(ctx context.Context, path string) ([]string, error) {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
}
ctx, span := tracer.Start(
ctx,
"List",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
return nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
str, e := base.StorageDriver.List(ctx, path)
storageAction.WithValues(base.Name(), "List").UpdateSince(start)
return str, base.setDriverName(e)
}
// Move wraps Move of underlying storage driver.
func (base *Base) Move(ctx context.Context, sourcePath string, destPath string) error {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.source.path", sourcePath),
attribute.String(tracing.AttributePrefix+"storage.dest.path", destPath),
}
ctx, span := tracer.Start(
ctx,
"Move",
trace.WithAttributes(attrs...))
defer span.End()
ctx, done := dcontext.WithTrace(ctx)
defer done("%s.Move(%q, %q", base.Name(), sourcePath, destPath)
if !storagedriver.PathRegexp.MatchString(sourcePath) {
return storagedriver.InvalidPathError{Path: sourcePath, DriverName: base.StorageDriver.Name()}
} else if !storagedriver.PathRegexp.MatchString(destPath) {
return storagedriver.InvalidPathError{Path: destPath, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
err := base.setDriverName(base.StorageDriver.Move(ctx, sourcePath, destPath))
storageAction.WithValues(base.Name(), "Move").UpdateSince(start)
return err
}
// Delete wraps Delete of underlying storage driver.
func (base *Base) Delete(ctx context.Context, path string) error {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
}
ctx, span := tracer.Start(
ctx,
"Delete",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) {
return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
err := base.setDriverName(base.StorageDriver.Delete(ctx, path))
storageAction.WithValues(base.Name(), "Delete").UpdateSince(start)
return err
}
// RedirectURL wraps RedirectURL of the underlying storage driver.
func (base *Base) RedirectURL(r *http.Request, path string) (string, error) {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
}
ctx, span := tracer.Start(
r.Context(),
"RedirectURL",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) {
return "", storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
start := time.Now()
str, e := base.StorageDriver.RedirectURL(r.WithContext(ctx), path)
storageAction.WithValues(base.Name(), "RedirectURL").UpdateSince(start)
return str, base.setDriverName(e)
}
// Walk wraps Walk of underlying storage driver.
func (base *Base) Walk(ctx context.Context, path string, f storagedriver.WalkFn, options ...func(*storagedriver.WalkOptions)) error {
attrs := []attribute.KeyValue{
attribute.String(tracing.AttributePrefix+"storage.driver.name", base.Name()),
attribute.String(tracing.AttributePrefix+"storage.path", path),
}
ctx, span := tracer.Start(
ctx,
"Walk",
trace.WithAttributes(attrs...))
defer span.End()
if !storagedriver.PathRegexp.MatchString(path) && path != "/" {
return storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}
}
return base.setDriverName(base.StorageDriver.Walk(ctx, path, f, options...))
}
package base
import (
"context"
"fmt"
"io"
"net/http"
"reflect"
"strconv"
"sync"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
)
type regulator struct {
storagedriver.StorageDriver
*sync.Cond
available uint64
}
// GetLimitFromParameter takes an interface type as decoded from the YAML
// configuration and returns a uint64 representing the maximum number of
// concurrent calls given a minimum limit and default.
//
// If the parameter supplied is of an invalid type this returns an error.
func GetLimitFromParameter(param interface{}, min, def uint64) (uint64, error) {
limit := def
switch v := param.(type) {
case string:
var err error
if limit, err = strconv.ParseUint(v, 0, 64); err != nil {
return limit, fmt.Errorf("parameter must be an integer, '%v' invalid", param)
}
case uint64:
limit = v
case int, int32, int64:
val := reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Int()
// if param is negative casting to uint64 will wrap around and
// give you the hugest thread limit ever. Let's be sensible, here
if val > 0 {
limit = uint64(val)
} else {
limit = min
}
case uint, uint32:
limit = reflect.ValueOf(v).Convert(reflect.TypeOf(param)).Uint()
case nil:
// use the default
default:
return 0, fmt.Errorf("invalid value '%#v'", param)
}
if limit < min {
return min, nil
}
return limit, nil
}
// NewRegulator wraps the given driver and is used to regulate concurrent calls
// to the given storage driver to a maximum of the given limit. This is useful
// for storage drivers that would otherwise create an unbounded number of OS
// threads if allowed to be called unregulated.
func NewRegulator(driver storagedriver.StorageDriver, limit uint64) storagedriver.StorageDriver {
return ®ulator{
StorageDriver: driver,
Cond: sync.NewCond(&sync.Mutex{}),
available: limit,
}
}
func (r *regulator) enter() {
r.L.Lock()
for r.available == 0 {
r.Wait()
}
r.available--
r.L.Unlock()
}
func (r *regulator) exit() {
r.L.Lock()
r.Signal()
r.available++
r.L.Unlock()
}
// Name returns the human-readable "name" of the driver, useful in error
// messages and logging. By convention, this will just be the registration
// name, but drivers may provide other information here.
func (r *regulator) Name() string {
r.enter()
defer r.exit()
return r.StorageDriver.Name()
}
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
func (r *regulator) GetContent(ctx context.Context, path string) ([]byte, error) {
r.enter()
defer r.exit()
return r.StorageDriver.GetContent(ctx, path)
}
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
func (r *regulator) PutContent(ctx context.Context, path string, content []byte) error {
r.enter()
defer r.exit()
return r.StorageDriver.PutContent(ctx, path, content)
}
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Reader(ctx, path, offset)
}
// Writer stores the contents of the provided io.ReadCloser at a
// location designated by the given path.
// May be used to resume writing a stream by providing a nonzero offset.
// The offset must be no larger than the CurrentSize for this path.
func (r *regulator) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Writer(ctx, path, append)
}
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
func (r *regulator) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
r.enter()
defer r.exit()
return r.StorageDriver.Stat(ctx, path)
}
// List returns a list of the objects that are direct descendants of the
// given path.
func (r *regulator) List(ctx context.Context, path string) ([]string, error) {
r.enter()
defer r.exit()
return r.StorageDriver.List(ctx, path)
}
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
// Note: This may be no more efficient than a copy followed by a delete for
// many implementations.
func (r *regulator) Move(ctx context.Context, sourcePath string, destPath string) error {
r.enter()
defer r.exit()
return r.StorageDriver.Move(ctx, sourcePath, destPath)
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (r *regulator) Delete(ctx context.Context, path string) error {
r.enter()
defer r.exit()
return r.StorageDriver.Delete(ctx, path)
}
// RedirectURL returns a URL which may be used to retrieve the content stored at
// the given path.
func (r *regulator) RedirectURL(req *http.Request, path string) (string, error) {
r.enter()
defer r.exit()
return r.StorageDriver.RedirectURL(req, path)
}
package factory
import (
"context"
"fmt"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
)
// driverFactories stores an internal mapping between storage driver names and their respective
// factories
var driverFactories = make(map[string]StorageDriverFactory)
// StorageDriverFactory is a factory interface for creating storagedriver.StorageDriver interfaces
// Storage drivers should call Register() with a factory to make the driver available by name.
// Individual StorageDriver implementations generally register with the factory via the Register
// func (below) in their init() funcs, and as such they should be imported anonymously before use.
// See below for an example of how to register and get a StorageDriver for S3
//
// import _ "github.com/distribution/distribution/v3/registry/storage/driver/s3-aws"
// s3Driver, err = factory.Create("s3", storageParams)
// // assuming no error, s3Driver is the StorageDriver that communicates with S3 according to storageParams
type StorageDriverFactory interface {
// Create returns a new storagedriver.StorageDriver with the given parameters
// Parameters will vary by driver and may be ignored
// Each parameter key must only consist of lowercase letters and numbers
Create(ctx context.Context, parameters map[string]interface{}) (storagedriver.StorageDriver, error)
}
// Register makes a storage driver available by the provided name.
// If Register is called twice with the same name or if driver factory is nil, it panics.
// Additionally, it is not concurrency safe. Most Storage Drivers call this function
// in their init() functions. See the documentation for StorageDriverFactory for more.
func Register(name string, factory StorageDriverFactory) {
if factory == nil {
panic("Must not provide nil StorageDriverFactory")
}
_, registered := driverFactories[name]
if registered {
panic(fmt.Sprintf("StorageDriverFactory named %s already registered", name))
}
driverFactories[name] = factory
}
// Create a new storagedriver.StorageDriver with the given name and
// parameters. To use a driver, the StorageDriverFactory must first be
// registered with the given name. If no drivers are found, an
// InvalidStorageDriverError is returned
func Create(ctx context.Context, name string, parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
driverFactory, ok := driverFactories[name]
if !ok {
return nil, InvalidStorageDriverError{name}
}
return driverFactory.Create(ctx, parameters)
}
// InvalidStorageDriverError records an attempt to construct an unregistered storage driver
type InvalidStorageDriverError struct {
Name string
}
func (err InvalidStorageDriverError) Error() string {
return fmt.Sprintf("StorageDriver not registered: %s", err.Name)
}
package driver
import "time"
// FileInfo returns information about a given path. Inspired by os.FileInfo,
// it elides the base name method for a full path instead.
type FileInfo interface {
// Path provides the full path of the target of this file info.
Path() string
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
Size() int64
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
ModTime() time.Time
// IsDir returns true if the path is a directory.
IsDir() bool
}
// NOTE(stevvooe): The next two types, FileInfoFields and FileInfoInternal
// should only be used by storagedriver implementations. They should moved to
// a "driver" package, similar to database/sql.
// FileInfoFields provides the exported fields for implementing FileInfo
// interface in storagedriver implementations. It should be used with
// InternalFileInfo.
type FileInfoFields struct {
// Path provides the full path of the target of this file info.
Path string
// Size is current length in bytes of the file. The value of this field
// can be used to write to the end of the file at path. The value is
// meaningless if IsDir is set to true.
Size int64
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
ModTime time.Time
// IsDir returns true if the path is a directory.
IsDir bool
}
// FileInfoInternal implements the FileInfo interface. This should only be
// used by storagedriver implementations that don't have a specialized
// FileInfo type.
type FileInfoInternal struct {
FileInfoFields
}
var (
_ FileInfo = FileInfoInternal{}
_ FileInfo = &FileInfoInternal{}
)
// Path provides the full path of the target of this file info.
func (fi FileInfoInternal) Path() string {
return fi.FileInfoFields.Path
}
// Size returns current length in bytes of the file. The return value can
// be used to write to the end of the file at path. The value is
// meaningless if IsDir returns true.
func (fi FileInfoInternal) Size() int64 {
return fi.FileInfoFields.Size
}
// ModTime returns the modification time for the file. For backends that
// don't have a modification time, the creation time should be returned.
func (fi FileInfoInternal) ModTime() time.Time {
return fi.FileInfoFields.ModTime
}
// IsDir returns true if the path is a directory.
func (fi FileInfoInternal) IsDir() bool {
return fi.FileInfoFields.IsDir
}
package inmemory
import (
"context"
"fmt"
"io"
"net/http"
"sync"
"time"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/base"
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
)
const driverName = "inmemory"
func init() {
factory.Register(driverName, &inMemoryDriverFactory{})
}
// inMemoryDriverFacotry implements the factory.StorageDriverFactory interface.
type inMemoryDriverFactory struct{}
func (factory *inMemoryDriverFactory) Create(ctx context.Context, parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return New(), nil
}
type driver struct {
root *dir
mutex sync.RWMutex
}
// baseEmbed allows us to hide the Base embed.
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by a local map.
// Intended solely for example and testing purposes.
type Driver struct {
baseEmbed // embedded, hidden base driver.
}
var _ storagedriver.StorageDriver = &Driver{}
// New constructs a new Driver.
func New() *Driver {
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: &driver{
root: &dir{
common: common{
p: "/",
mod: time.Now(),
},
},
},
},
},
}
}
// Implement the storagedriver.StorageDriver interface.
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
rc, err := d.reader(ctx, path, 0)
if err != nil {
return nil, err
}
defer rc.Close()
return io.ReadAll(rc)
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, p string, contents []byte) error {
d.mutex.Lock()
defer d.mutex.Unlock()
normalized := normalize(p)
f, err := d.root.mkfile(normalized)
if err != nil {
// TODO(stevvooe): Again, we need to clarify when this is not a
// directory in StorageDriver API.
return fmt.Errorf("not a file")
}
f.truncate()
if _, err := f.WriteAt(contents, 0); err != nil {
return err
}
return nil
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
return d.reader(ctx, path, offset)
}
func (d *driver) reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
if offset < 0 {
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
}
normalized := normalize(path)
found := d.root.find(normalized)
if found.path() != normalized {
return nil, storagedriver.PathNotFoundError{Path: path}
}
if found.isdir() {
return nil, fmt.Errorf("%q is a directory", path)
}
return io.NopCloser(found.(*file).sectionReader(offset)), nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
func (d *driver) Writer(ctx context.Context, path string, append bool) (storagedriver.FileWriter, error) {
d.mutex.Lock()
defer d.mutex.Unlock()
normalized := normalize(path)
f, err := d.root.mkfile(normalized)
if err != nil {
return nil, fmt.Errorf("not a file")
}
if !append {
f.truncate()
}
return d.newWriter(f), nil
}
// Stat returns info about the provided path.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
normalized := normalize(path)
found := d.root.find(normalized)
if found.path() != normalized {
return nil, storagedriver.PathNotFoundError{Path: path}
}
fi := storagedriver.FileInfoFields{
Path: path,
IsDir: found.isdir(),
ModTime: found.modtime(),
}
if !fi.IsDir {
fi.Size = int64(len(found.(*file).data))
}
return storagedriver.FileInfoInternal{FileInfoFields: fi}, nil
}
// List returns a list of the objects that are direct descendants of the given
// path.
func (d *driver) List(ctx context.Context, path string) ([]string, error) {
d.mutex.RLock()
defer d.mutex.RUnlock()
normalized := normalize(path)
found := d.root.find(normalized)
if !found.isdir() {
return nil, fmt.Errorf("not a directory") // TODO(stevvooe): Need error type for this...
}
entries, err := found.(*dir).list(normalized)
if err != nil {
switch err {
case errNotExists:
return nil, storagedriver.PathNotFoundError{Path: path}
case errIsNotDir:
return nil, fmt.Errorf("not a directory")
default:
return nil, err
}
}
return entries, nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
d.mutex.Lock()
defer d.mutex.Unlock()
normalizedSrc, normalizedDst := normalize(sourcePath), normalize(destPath)
err := d.root.move(normalizedSrc, normalizedDst)
switch err {
case errNotExists:
return storagedriver.PathNotFoundError{Path: destPath}
default:
return err
}
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
func (d *driver) Delete(ctx context.Context, path string) error {
d.mutex.Lock()
defer d.mutex.Unlock()
normalized := normalize(path)
err := d.root.delete(normalized)
switch err {
case errNotExists:
return storagedriver.PathNotFoundError{Path: path}
default:
return err
}
}
// RedirectURL returns a URL which may be used to retrieve the content stored at the given path.
func (d *driver) RedirectURL(*http.Request, string) (string, error) {
return "", nil
}
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file and directory
func (d *driver) Walk(ctx context.Context, path string, f storagedriver.WalkFn, options ...func(*storagedriver.WalkOptions)) error {
return storagedriver.WalkFallback(ctx, d, path, f, options...)
}
type writer struct {
d *driver
f *file
buffer []byte
buffSize int
closed bool
committed bool
cancelled bool
}
func (d *driver) newWriter(f *file) storagedriver.FileWriter {
return &writer{
d: d,
f: f,
}
}
func (w *writer) Write(p []byte) (int, error) {
if w.closed {
return 0, fmt.Errorf("already closed")
} else if w.committed {
return 0, fmt.Errorf("already committed")
} else if w.cancelled {
return 0, fmt.Errorf("already cancelled")
}
w.d.mutex.Lock()
defer w.d.mutex.Unlock()
if cap(w.buffer) < len(p)+w.buffSize {
data := make([]byte, len(w.buffer), len(p)+w.buffSize)
copy(data, w.buffer)
w.buffer = data
}
w.buffer = w.buffer[:w.buffSize+len(p)]
n := copy(w.buffer[w.buffSize:w.buffSize+len(p)], p)
w.buffSize += n
return n, nil
}
func (w *writer) Size() int64 {
w.d.mutex.RLock()
defer w.d.mutex.RUnlock()
return int64(len(w.f.data))
}
func (w *writer) Close() error {
if w.closed {
return fmt.Errorf("already closed")
}
w.closed = true
if err := w.flush(); err != nil {
return err
}
return nil
}
func (w *writer) Cancel(ctx context.Context) error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
}
w.cancelled = true
w.d.mutex.Lock()
defer w.d.mutex.Unlock()
return w.d.root.delete(w.f.path())
}
func (w *writer) Commit(ctx context.Context) error {
if w.closed {
return fmt.Errorf("already closed")
} else if w.committed {
return fmt.Errorf("already committed")
} else if w.cancelled {
return fmt.Errorf("already cancelled")
}
w.committed = true
if err := w.flush(); err != nil {
return err
}
return nil
}
func (w *writer) flush() error {
w.d.mutex.Lock()
defer w.d.mutex.Unlock()
if _, err := w.f.WriteAt(w.buffer, int64(len(w.f.data))); err != nil {
return err
}
w.buffer = []byte{}
w.buffSize = 0
return nil
}
package inmemory
import (
"fmt"
"io"
"path"
"sort"
"strings"
"time"
)
var (
errExists = fmt.Errorf("exists")
errNotExists = fmt.Errorf("notexists")
errIsNotDir = fmt.Errorf("notdir")
errIsDir = fmt.Errorf("isdir")
)
type node interface {
name() string
path() string
isdir() bool
modtime() time.Time
}
// dir is the central type for the memory-based storagedriver. All operations
// are dispatched from a root dir.
type dir struct {
common
// TODO(stevvooe): Use sorted slice + search.
children map[string]node
}
var _ node = &dir{}
func (d *dir) isdir() bool {
return true
}
// add places the node n into dir d.
func (d *dir) add(n node) {
if d.children == nil {
d.children = make(map[string]node)
}
d.children[n.name()] = n
d.mod = time.Now()
}
// find searches for the node, given path q in dir. If the node is found, it
// will be returned. If the node is not found, the closet existing parent. If
// the node is found, the returned (node).path() will match q.
func (d *dir) find(q string) node {
q = strings.Trim(q, "/")
i := strings.Index(q, "/")
if q == "" {
return d
}
if i == 0 {
panic("shouldn't happen, no root paths")
}
var component string
if i < 0 {
// No more path components
component = q
} else {
component = q[:i]
}
child, ok := d.children[component]
if !ok {
// Node was not found. Return p and the current node.
return d
}
if child.isdir() {
// traverse down!
q = q[i+1:]
return child.(*dir).find(q)
}
return child
}
func (d *dir) list(p string) ([]string, error) {
n := d.find(p)
if n.path() != p {
return nil, errNotExists
}
if !n.isdir() {
return nil, errIsNotDir
}
// NOTE(milosgajdos): this is safe to do because
// n can only be *dir due to the compile time check
dirChildren := n.(*dir).children
children := make([]string, 0, len(dirChildren))
for _, child := range dirChildren {
children = append(children, child.path())
}
sort.Strings(children)
return children, nil
}
// mkfile or return the existing one. returns an error if it exists and is a
// directory. Essentially, this is open or create.
func (d *dir) mkfile(p string) (*file, error) {
n := d.find(p)
if n.path() == p {
if n.isdir() {
return nil, errIsDir
}
return n.(*file), nil
}
dirpath, filename := path.Split(p)
// Make any non-existent directories
n, err := d.mkdirs(dirpath)
if err != nil {
return nil, err
}
dd := n.(*dir)
n = &file{
common: common{
p: path.Join(dd.path(), filename),
mod: time.Now(),
},
}
dd.add(n)
return n.(*file), nil
}
// mkdirs creates any missing directory entries in p and returns the result.
func (d *dir) mkdirs(p string) (*dir, error) {
p = normalize(p)
n := d.find(p)
if !n.isdir() {
// Found something there
return nil, errIsNotDir
}
if n.path() == p {
return n.(*dir), nil
}
dd := n.(*dir)
relative := strings.Trim(strings.TrimPrefix(p, n.path()), "/")
if relative == "" {
return dd, nil
}
components := strings.Split(relative, "/")
for _, component := range components {
d, err := dd.mkdir(component)
if err != nil {
// This should actually never happen, since there are no children.
return nil, err
}
dd = d
}
return dd, nil
}
// mkdir creates a child directory under d with the given name.
func (d *dir) mkdir(name string) (*dir, error) {
if name == "" {
return nil, fmt.Errorf("invalid dirname")
}
_, ok := d.children[name]
if ok {
return nil, errExists
}
child := &dir{
common: common{
p: path.Join(d.path(), name),
mod: time.Now(),
},
}
d.add(child)
d.mod = time.Now()
return child, nil
}
func (d *dir) move(src, dst string) error {
dstDirname, _ := path.Split(dst)
dp, err := d.mkdirs(dstDirname)
if err != nil {
return err
}
srcDirname, srcFilename := path.Split(src)
sp := d.find(srcDirname)
if normalize(srcDirname) != normalize(sp.path()) {
return errNotExists
}
spd, ok := sp.(*dir)
if !ok {
return errIsNotDir // paranoid.
}
s, ok := spd.children[srcFilename]
if !ok {
return errNotExists
}
delete(spd.children, srcFilename)
switch n := s.(type) {
case *dir:
n.p = dst
case *file:
n.p = dst
}
dp.add(s)
return nil
}
func (d *dir) delete(p string) error {
dirname, filename := path.Split(p)
parent := d.find(dirname)
if normalize(dirname) != normalize(parent.path()) {
return errNotExists
}
parentDir, ok := parent.(*dir)
if !ok {
return errIsNotDir
}
if _, ok := parentDir.children[filename]; !ok {
return errNotExists
}
delete(parentDir.children, filename)
return nil
}
func (d *dir) String() string {
return fmt.Sprintf("&dir{path: %v, children: %v}", d.p, d.children)
}
// file stores actual data in the fs tree. It acts like an open, seekable file
// where operations are conducted through ReadAt and WriteAt. Use it with
// SectionReader for the best effect.
type file struct {
common
data []byte
}
var _ node = &file{}
func (f *file) isdir() bool {
return false
}
func (f *file) truncate() {
f.data = f.data[:0]
}
func (f *file) sectionReader(offset int64) io.Reader {
return io.NewSectionReader(f, offset, int64(len(f.data))-offset)
}
func (f *file) ReadAt(p []byte, offset int64) (n int, err error) {
if offset >= int64(len(f.data)) {
return 0, io.EOF
}
return copy(p, f.data[offset:]), nil
}
// reallocExponent is the exponent used to realloc a slice. The value roughly
// follows the behavior of Go built-in append function.
const reallocExponent = 1.25
func (f *file) WriteAt(p []byte, offset int64) (n int, err error) {
newLen := offset + int64(len(p))
if int64(cap(f.data)) < newLen {
// Grow slice exponentially to ensure amortized linear time complexity
// of reallocation
newCap := int64(float64(cap(f.data)) * reallocExponent)
if newCap < newLen {
newCap = newLen
}
data := make([]byte, len(f.data), newCap)
copy(data, f.data)
f.data = data
}
f.mod = time.Now()
f.data = f.data[:newLen]
return copy(f.data[offset:newLen], p), nil
}
func (f *file) String() string {
return fmt.Sprintf("&file{path: %q}", f.p)
}
// common provides shared fields and methods for node implementations.
type common struct {
p string
mod time.Time
}
func (c *common) name() string {
_, name := path.Split(c.p)
return name
}
func (c *common) path() string {
return c.p
}
func (c *common) modtime() time.Time {
return c.mod
}
func normalize(p string) string {
return "/" + strings.Trim(p, "/")
}
package storagemiddleware
import (
"context"
"fmt"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
)
// InitFunc is the type of a StorageMiddleware factory function and is
// used to register the constructor for different StorageMiddleware backends.
type InitFunc func(ctx context.Context, storageDriver storagedriver.StorageDriver, options map[string]interface{}) (storagedriver.StorageDriver, error)
var storageMiddlewares map[string]InitFunc
// Register is used to register an InitFunc for
// a StorageMiddleware backend with the given name.
func Register(name string, initFunc InitFunc) error {
if storageMiddlewares == nil {
storageMiddlewares = make(map[string]InitFunc)
}
if _, exists := storageMiddlewares[name]; exists {
return fmt.Errorf("name already registered: %s", name)
}
storageMiddlewares[name] = initFunc
return nil
}
// Get constructs a StorageMiddleware with the given options using the named backend.
func Get(ctx context.Context, name string, options map[string]interface{}, storageDriver storagedriver.StorageDriver) (storagedriver.StorageDriver, error) {
if storageMiddlewares != nil {
if initFunc, exists := storageMiddlewares[name]; exists {
return initFunc(ctx, storageDriver, options)
}
}
return nil, fmt.Errorf("no storage middleware registered with name: %s", name)
}
// Package s3 provides a storagedriver.StorageDriver implementation to
// store blobs in Amazon S3 cloud storage.
//
// This package leverages the official aws client library for interfacing with
// S3.
//
// Because S3 is a key, value store the Stat call does not support last modification
// time for directories (directories are an abstraction for key, value stores)
//
// Keep in mind that S3 guarantees only read-after-write consistency for new
// objects, but no read-after-update or list-after-write consistency.
package s3
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"math"
"net/http"
"path/filepath"
"slices"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/distribution/distribution/v3/internal/dcontext"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/distribution/v3/registry/storage/driver/base"
"github.com/distribution/distribution/v3/registry/storage/driver/factory"
)
const driverName = "s3aws"
// minChunkSize defines the minimum multipart upload chunk size
// S3 API requires multipart upload chunks to be at least 5MB
const minChunkSize = 5 * 1024 * 1024
const defaultChunkSize = 2 * minChunkSize
const (
// defaultMultipartCopyChunkSize defines the default chunk size for all
// but the last Upload Part - Copy operation of a multipart copy.
// Empirically, 32 MB is optimal.
defaultMultipartCopyChunkSize = 32 * 1024 * 1024
// defaultMultipartCopyMaxConcurrency defines the default maximum number
// of concurrent Upload Part - Copy operations for a multipart copy.
defaultMultipartCopyMaxConcurrency = 100
// defaultMultipartCopyThresholdSize defines the default object size
// above which multipart copy will be used. (PUT Object - Copy is used
// for objects at or below this size.) Empirically, 32 MB is optimal.
defaultMultipartCopyThresholdSize = 32 * 1024 * 1024
)
// listMax is the largest amount of objects you can request from S3 in a list call
const listMax = 1000
// noStorageClass defines the value to be used if storage class is not supported by the S3 endpoint
const noStorageClass = "NONE"
// s3StorageClasses lists all compatible (instant retrieval) S3 storage classes
var s3StorageClasses = []string{
noStorageClass,
s3.StorageClassStandard,
s3.StorageClassReducedRedundancy,
s3.StorageClassStandardIa,
s3.StorageClassOnezoneIa,
s3.StorageClassIntelligentTiering,
s3.StorageClassOutposts,
s3.StorageClassGlacierIr,
}
// validRegions maps known s3 region identifiers to region descriptors
var validRegions = map[string]struct{}{}
// validObjectACLs contains known s3 object Acls
var validObjectACLs = map[string]struct{}{}
// DriverParameters A struct that encapsulates all of the driver parameters after all values have been set
type DriverParameters struct {
AccessKey string
SecretKey string
Bucket string
Region string
RegionEndpoint string
ForcePathStyle bool
Encrypt bool
KeyID string
Secure bool
SkipVerify bool
V4Auth bool
ChunkSize int
MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64
RootDirectory string
StorageClass string
UserAgent string
ObjectACL string
SessionToken string
UseDualStack bool
Accelerate bool
LogLevel aws.LogLevelType
}
func init() {
partitions := endpoints.DefaultPartitions()
for _, p := range partitions {
for region := range p.Regions() {
validRegions[region] = struct{}{}
}
}
for _, objectACL := range []string{
s3.ObjectCannedACLPrivate,
s3.ObjectCannedACLPublicRead,
s3.ObjectCannedACLPublicReadWrite,
s3.ObjectCannedACLAuthenticatedRead,
s3.ObjectCannedACLAwsExecRead,
s3.ObjectCannedACLBucketOwnerRead,
s3.ObjectCannedACLBucketOwnerFullControl,
} {
validObjectACLs[objectACL] = struct{}{}
}
// Register this as the default s3 driver in addition to s3aws
factory.Register("s3", &s3DriverFactory{})
factory.Register(driverName, &s3DriverFactory{})
}
// s3DriverFactory implements the factory.StorageDriverFactory interface
type s3DriverFactory struct{}
func (factory *s3DriverFactory) Create(ctx context.Context, parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
return FromParameters(ctx, parameters)
}
var _ storagedriver.StorageDriver = &driver{}
type driver struct {
S3 *s3.S3
Bucket string
ChunkSize int
Encrypt bool
KeyID string
MultipartCopyChunkSize int64
MultipartCopyMaxConcurrency int64
MultipartCopyThresholdSize int64
RootDirectory string
StorageClass string
ObjectACL string
pool *sync.Pool
}
type baseEmbed struct {
base.Base
}
// Driver is a storagedriver.StorageDriver implementation backed by Amazon S3
// Objects are stored at absolute keys in the provided bucket.
type Driver struct {
baseEmbed
}
// FromParameters constructs a new Driver with a given parameters map
// Required parameters:
// - accesskey
// - secretkey
// - region
// - bucket
// - encrypt
func FromParameters(ctx context.Context, parameters map[string]interface{}) (*Driver, error) {
// Providing no values for these is valid in case the user is authenticating
// with an IAM on an ec2 instance (in which case the instance credentials will
// be summoned when GetAuth is called)
accessKey := parameters["accesskey"]
if accessKey == nil {
accessKey = ""
}
secretKey := parameters["secretkey"]
if secretKey == nil {
secretKey = ""
}
regionEndpoint := parameters["regionendpoint"]
if regionEndpoint == nil {
regionEndpoint = ""
}
forcePathStyleBool := false
forcePathStyle := parameters["forcepathstyle"]
switch forcePathStyle := forcePathStyle.(type) {
case string:
b, err := strconv.ParseBool(forcePathStyle)
if err != nil {
return nil, fmt.Errorf("the forcePathStyle parameter should be a boolean")
}
forcePathStyleBool = b
case bool:
forcePathStyleBool = forcePathStyle
case nil:
// do nothing
default:
return nil, fmt.Errorf("the forcePathStyle parameter should be a boolean")
}
regionName := parameters["region"]
region := fmt.Sprint(regionName)
// Don't check the region value if a custom endpoint is provided.
if regionEndpoint == "" {
if regionName == nil || region == "" {
return nil, fmt.Errorf("no region parameter provided")
}
if _, ok := validRegions[region]; !ok {
return nil, fmt.Errorf("invalid region provided: %v", region)
}
}
bucket := parameters["bucket"]
if bucket == nil || fmt.Sprint(bucket) == "" {
return nil, fmt.Errorf("no bucket parameter provided")
}
encryptBool := false
encrypt := parameters["encrypt"]
switch encrypt := encrypt.(type) {
case string:
b, err := strconv.ParseBool(encrypt)
if err != nil {
return nil, fmt.Errorf("the encrypt parameter should be a boolean")
}
encryptBool = b
case bool:
encryptBool = encrypt
case nil:
// do nothing
default:
return nil, fmt.Errorf("the encrypt parameter should be a boolean")
}
secureBool := true
secure := parameters["secure"]
switch secure := secure.(type) {
case string:
b, err := strconv.ParseBool(secure)
if err != nil {
return nil, fmt.Errorf("the secure parameter should be a boolean")
}
secureBool = b
case bool:
secureBool = secure
case nil:
// do nothing
default:
return nil, fmt.Errorf("the secure parameter should be a boolean")
}
skipVerifyBool := false
skipVerify := parameters["skipverify"]
switch skipVerify := skipVerify.(type) {
case string:
b, err := strconv.ParseBool(skipVerify)
if err != nil {
return nil, fmt.Errorf("the skipVerify parameter should be a boolean")
}
skipVerifyBool = b
case bool:
skipVerifyBool = skipVerify
case nil:
// do nothing
default:
return nil, fmt.Errorf("the skipVerify parameter should be a boolean")
}
v4Bool := true
v4auth := parameters["v4auth"]
switch v4auth := v4auth.(type) {
case string:
b, err := strconv.ParseBool(v4auth)
if err != nil {
return nil, fmt.Errorf("the v4auth parameter should be a boolean")
}
v4Bool = b
case bool:
v4Bool = v4auth
case nil:
// do nothing
default:
return nil, fmt.Errorf("the v4auth parameter should be a boolean")
}
keyID := parameters["keyid"]
if keyID == nil {
keyID = ""
}
chunkSize, err := getParameterAsInteger(parameters, "chunksize", defaultChunkSize, minChunkSize, maxChunkSize)
if err != nil {
return nil, err
}
multipartCopyChunkSize, err := getParameterAsInteger[int64](parameters, "multipartcopychunksize", defaultMultipartCopyChunkSize, minChunkSize, maxChunkSize)
if err != nil {
return nil, err
}
multipartCopyMaxConcurrency, err := getParameterAsInteger[int64](parameters, "multipartcopymaxconcurrency", defaultMultipartCopyMaxConcurrency, 1, math.MaxInt64)
if err != nil {
return nil, err
}
multipartCopyThresholdSize, err := getParameterAsInteger[int64](parameters, "multipartcopythresholdsize", defaultMultipartCopyThresholdSize, 0, maxChunkSize)
if err != nil {
return nil, err
}
rootDirectory := parameters["rootdirectory"]
if rootDirectory == nil {
rootDirectory = ""
}
storageClass := s3.StorageClassStandard
storageClassParam := parameters["storageclass"]
if storageClassParam != nil {
storageClassString, ok := storageClassParam.(string)
if !ok {
return nil, fmt.Errorf(
"the storageclass parameter must be one of %v, %v invalid",
s3StorageClasses,
storageClassParam,
)
}
// All valid storage class parameters are UPPERCASE, so be a bit more flexible here
storageClassString = strings.ToUpper(storageClassString)
if storageClassString != noStorageClass &&
storageClassString != s3.StorageClassStandard &&
storageClassString != s3.StorageClassReducedRedundancy &&
storageClassString != s3.StorageClassStandardIa &&
storageClassString != s3.StorageClassOnezoneIa &&
storageClassString != s3.StorageClassIntelligentTiering &&
storageClassString != s3.StorageClassOutposts &&
storageClassString != s3.StorageClassGlacierIr {
return nil, fmt.Errorf(
"the storageclass parameter must be one of %v, %v invalid",
s3StorageClasses,
storageClassParam,
)
}
storageClass = storageClassString
}
userAgent := parameters["useragent"]
if userAgent == nil {
userAgent = ""
}
objectACL := s3.ObjectCannedACLPrivate
objectACLParam := parameters["objectacl"]
if objectACLParam != nil {
objectACLString, ok := objectACLParam.(string)
if !ok {
return nil, fmt.Errorf("invalid value for objectacl parameter: %v", objectACLParam)
}
if _, ok = validObjectACLs[objectACLString]; !ok {
return nil, fmt.Errorf("invalid value for objectacl parameter: %v", objectACLParam)
}
objectACL = objectACLString
}
useDualStackBool := false
useDualStack := parameters["usedualstack"]
switch useDualStack := useDualStack.(type) {
case string:
b, err := strconv.ParseBool(useDualStack)
if err != nil {
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
}
useDualStackBool = b
case bool:
useDualStackBool = useDualStack
case nil:
// do nothing
default:
return nil, fmt.Errorf("the useDualStack parameter should be a boolean")
}
sessionToken := ""
accelerateBool := false
accelerate := parameters["accelerate"]
switch accelerate := accelerate.(type) {
case string:
b, err := strconv.ParseBool(accelerate)
if err != nil {
return nil, fmt.Errorf("the accelerate parameter should be a boolean")
}
accelerateBool = b
case bool:
accelerateBool = accelerate
case nil:
// do nothing
default:
return nil, fmt.Errorf("the accelerate parameter should be a boolean")
}
params := DriverParameters{
AccessKey: fmt.Sprint(accessKey),
SecretKey: fmt.Sprint(secretKey),
Bucket: fmt.Sprint(bucket),
Region: region,
RegionEndpoint: fmt.Sprint(regionEndpoint),
ForcePathStyle: forcePathStyleBool,
Encrypt: encryptBool,
KeyID: fmt.Sprint(keyID),
Secure: secureBool,
SkipVerify: skipVerifyBool,
V4Auth: v4Bool,
ChunkSize: chunkSize,
MultipartCopyChunkSize: multipartCopyChunkSize,
MultipartCopyMaxConcurrency: multipartCopyMaxConcurrency,
MultipartCopyThresholdSize: multipartCopyThresholdSize,
RootDirectory: fmt.Sprint(rootDirectory),
StorageClass: storageClass,
UserAgent: fmt.Sprint(userAgent),
ObjectACL: objectACL,
SessionToken: fmt.Sprint(sessionToken),
UseDualStack: useDualStackBool,
Accelerate: accelerateBool,
LogLevel: getS3LogLevelFromParam(parameters["loglevel"]),
}
return New(ctx, params)
}
func getS3LogLevelFromParam(param any) aws.LogLevelType {
if param == nil {
return aws.LogOff
}
// YAML 1.X interprets "off" as false
if b, ok := param.(bool); ok && !b {
return aws.LogOff
}
// if it's not a string, return off
logLevelParam, ok := param.(string)
if !ok {
return aws.LogOff
}
var logLevel aws.LogLevelType
switch strings.ToLower(logLevelParam) {
case "off":
logLevel = aws.LogOff
case "debug":
logLevel = aws.LogDebug
case "debugwithsigning":
logLevel = aws.LogDebugWithSigning
case "debugwithhttpbody":
logLevel = aws.LogDebugWithHTTPBody
case "debugwithrequestretries":
logLevel = aws.LogDebugWithRequestRetries
case "debugwithrequesterrors":
logLevel = aws.LogDebugWithRequestErrors
case "debugwitheventstreambody":
logLevel = aws.LogDebugWithEventStreamBody
default:
logLevel = aws.LogOff
}
return logLevel
}
type integer interface{ signed | unsigned }
type signed interface {
~int | ~int8 | ~int16 | ~int32 | ~int64
}
type unsigned interface {
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// getParameterAsInteger converts parameters[name] to T (using defaultValue if
// nil) and ensures it is in the range of min and max.
func getParameterAsInteger[T integer](parameters map[string]any, name string, defaultValue, min, max T) (T, error) {
v := defaultValue
if p := parameters[name]; p != nil {
if _, err := fmt.Sscanf(fmt.Sprint(p), "%d", &v); err != nil {
return 0, fmt.Errorf("%s parameter must be an integer, %v invalid", name, p)
}
}
if v < min || v > max {
return 0, fmt.Errorf("the %s %#v parameter should be a number between %d and %d (inclusive)", name, v, min, max)
}
return v, nil
}
// New constructs a new Driver with the given AWS credentials, region, encryption flag, and
// bucketName
func New(ctx context.Context, params DriverParameters) (*Driver, error) {
if !params.V4Auth &&
(params.RegionEndpoint == "" ||
strings.Contains(params.RegionEndpoint, "s3.amazonaws.com")) {
return nil, fmt.Errorf("on Amazon S3 this storage driver can only be used with v4 authentication")
}
awsConfig := aws.NewConfig().WithLogLevel(params.LogLevel)
if params.AccessKey != "" && params.SecretKey != "" {
creds := credentials.NewStaticCredentials(
params.AccessKey,
params.SecretKey,
params.SessionToken,
)
awsConfig.WithCredentials(creds)
}
if params.RegionEndpoint != "" {
awsConfig.WithEndpoint(params.RegionEndpoint)
}
awsConfig.WithS3ForcePathStyle(params.ForcePathStyle)
awsConfig.WithS3UseAccelerate(params.Accelerate)
awsConfig.WithRegion(params.Region)
awsConfig.WithDisableSSL(!params.Secure)
if params.UseDualStack {
awsConfig.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled
}
if params.SkipVerify {
httpTransport := http.DefaultTransport.(*http.Transport).Clone()
httpTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
awsConfig.WithHTTPClient(&http.Client{
Transport: httpTransport,
})
}
sess, err := session.NewSession(awsConfig)
if err != nil {
return nil, fmt.Errorf("failed to create new session with aws config: %v", err)
}
if params.UserAgent != "" {
sess.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler(params.UserAgent))
}
s3obj := s3.New(sess)
// enable S3 compatible signature v2 signing instead
if !params.V4Auth {
setv2Handlers(s3obj)
}
// TODO Currently multipart uploads have no timestamps, so this would be unwise
// if you initiated a new s3driver while another one is running on the same bucket.
// multis, _, err := bucket.ListMulti("", "")
// if err != nil {
// return nil, err
// }
// for _, multi := range multis {
// err := multi.Abort()
// //TODO appropriate to do this error checking?
// if err != nil {
// return nil, err
// }
// }
d := &driver{
S3: s3obj,
Bucket: params.Bucket,
ChunkSize: params.ChunkSize,
Encrypt: params.Encrypt,
KeyID: params.KeyID,
MultipartCopyChunkSize: params.MultipartCopyChunkSize,
MultipartCopyMaxConcurrency: params.MultipartCopyMaxConcurrency,
MultipartCopyThresholdSize: params.MultipartCopyThresholdSize,
RootDirectory: params.RootDirectory,
StorageClass: params.StorageClass,
ObjectACL: params.ObjectACL,
pool: &sync.Pool{
New: func() any { return &bytes.Buffer{} },
},
}
return &Driver{
baseEmbed: baseEmbed{
Base: base.Base{
StorageDriver: d,
},
},
}, nil
}
// Implement the storagedriver.StorageDriver interface
func (d *driver) Name() string {
return driverName
}
// GetContent retrieves the content stored at "path" as a []byte.
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
reader, err := d.Reader(ctx, path, 0)
if err != nil {
return nil, err
}
return io.ReadAll(reader)
}
// PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
_, err := d.S3.PutObjectWithContext(ctx, &s3.PutObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
Body: bytes.NewReader(contents),
})
return parseError(path, err)
}
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
// given byte offset.
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
resp, err := d.S3.GetObjectWithContext(ctx, &s3.GetObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
Range: aws.String("bytes=" + strconv.FormatInt(offset, 10) + "-"),
})
if err != nil {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "InvalidRange" {
return io.NopCloser(bytes.NewReader(nil)), nil
}
return nil, parseError(path, err)
}
return resp.Body, nil
}
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
// It only allows appending to paths with zero size committed content,
// in which the existing content is overridden with the new content.
// It returns storagedriver.Error when appending to paths
// with non-zero committed content.
func (d *driver) Writer(ctx context.Context, path string, appendMode bool) (storagedriver.FileWriter, error) {
key := d.s3Path(path)
if !appendMode {
// TODO (brianbland): cancel other uploads at this path
resp, err := d.S3.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
})
if err != nil {
return nil, err
}
return d.newWriter(ctx, key, *resp.UploadId, nil), nil
}
listMultipartUploadsInput := &s3.ListMultipartUploadsInput{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(key),
}
for {
resp, err := d.S3.ListMultipartUploadsWithContext(ctx, listMultipartUploadsInput)
if err != nil {
return nil, parseError(path, err)
}
// resp.Uploads can only be empty on the first call
// if there were no more results to return after the first call, resp.IsTruncated would have been false
// and the loop would be exited without recalling ListMultipartUploads
if len(resp.Uploads) == 0 {
fi, err := d.Stat(ctx, path)
if err != nil {
return nil, parseError(path, err)
}
if fi.Size() == 0 {
resp, err := d.S3.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
})
if err != nil {
return nil, err
}
return d.newWriter(ctx, key, *resp.UploadId, nil), nil
}
return nil, storagedriver.Error{
DriverName: driverName,
Detail: fmt.Errorf("append to zero-size path %s unsupported", path),
}
}
var allParts []*s3.Part
for _, multi := range resp.Uploads {
if key != *multi.Key {
continue
}
partsList, err := d.S3.ListPartsWithContext(ctx, &s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
})
if err != nil {
return nil, parseError(path, err)
}
allParts = append(allParts, partsList.Parts...)
for *partsList.IsTruncated {
partsList, err = d.S3.ListPartsWithContext(ctx, &s3.ListPartsInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(key),
UploadId: multi.UploadId,
PartNumberMarker: partsList.NextPartNumberMarker,
})
if err != nil {
return nil, parseError(path, err)
}
allParts = append(allParts, partsList.Parts...)
}
return d.newWriter(ctx, key, *multi.UploadId, allParts), nil
}
// resp.NextUploadIdMarker must have at least one element or we would have returned not found
listMultipartUploadsInput.UploadIdMarker = resp.NextUploadIdMarker
// from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned"
// if everything has been returned, break
if resp.IsTruncated == nil || !*resp.IsTruncated {
break
}
}
return nil, storagedriver.PathNotFoundError{Path: path}
}
func (d *driver) statHead(ctx context.Context, path string) (*storagedriver.FileInfoFields, error) {
resp, err := d.S3.HeadObjectWithContext(ctx, &s3.HeadObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
})
if err != nil {
return nil, err
}
return &storagedriver.FileInfoFields{
Path: path,
IsDir: false,
Size: *resp.ContentLength,
ModTime: *resp.LastModified,
}, nil
}
func (d *driver) statList(ctx context.Context, path string) (*storagedriver.FileInfoFields, error) {
s3Path := d.s3Path(path)
resp, err := d.S3.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(s3Path),
MaxKeys: aws.Int64(1),
})
if err != nil {
return nil, err
}
if len(resp.Contents) == 1 {
if *resp.Contents[0].Key != s3Path {
return &storagedriver.FileInfoFields{
Path: path,
IsDir: true,
}, nil
}
return &storagedriver.FileInfoFields{
Path: path,
Size: *resp.Contents[0].Size,
ModTime: *resp.Contents[0].LastModified,
}, nil
}
if len(resp.CommonPrefixes) == 1 {
return &storagedriver.FileInfoFields{
Path: path,
IsDir: true,
}, nil
}
return nil, storagedriver.PathNotFoundError{Path: path}
}
// Stat retrieves the FileInfo for the given path, including the current size
// in bytes and the creation time.
func (d *driver) Stat(ctx context.Context, path string) (storagedriver.FileInfo, error) {
fi, err := d.statHead(ctx, path)
if err != nil {
// For AWS errors, we fail over to ListObjects:
// Though the official docs https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_Errors
// are slightly outdated, the HeadObject actually returns NotFound error
// if querying a key which doesn't exist or a key which has nested keys
// and Forbidden if IAM/ACL permissions do not allow Head but allow List.
var awsErr awserr.Error
if errors.As(err, &awsErr) {
fi, err := d.statList(ctx, path)
if err != nil {
return nil, parseError(path, err)
}
return storagedriver.FileInfoInternal{FileInfoFields: *fi}, nil
}
// For non-AWS errors, return the error directly
return nil, err
}
return storagedriver.FileInfoInternal{FileInfoFields: *fi}, nil
}
// List returns a list of the objects that are direct descendants of the given path.
func (d *driver) List(ctx context.Context, opath string) ([]string, error) {
path := opath
if path != "/" && path[len(path)-1] != '/' {
path = path + "/"
}
// This is to cover for the cases when the rootDirectory of the driver is either "" or "/".
// In those cases, there is no root prefix to replace and we must actually add a "/" to all
// results in order to keep them as valid paths as recognized by storagedriver.PathRegexp
prefix := ""
if d.s3Path("") == "" {
prefix = "/"
}
resp, err := d.S3.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
})
if err != nil {
return nil, parseError(opath, err)
}
files := []string{}
directories := []string{}
for {
for _, key := range resp.Contents {
files = append(files, strings.Replace(*key.Key, d.s3Path(""), prefix, 1))
}
for _, commonPrefix := range resp.CommonPrefixes {
commonPrefix := *commonPrefix.Prefix
directories = append(directories, strings.Replace(commonPrefix[0:len(commonPrefix)-1], d.s3Path(""), prefix, 1))
}
if resp.IsTruncated == nil || !*resp.IsTruncated {
break
}
resp, err = d.S3.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(listMax),
ContinuationToken: resp.NextContinuationToken,
})
if err != nil {
return nil, err
}
}
if opath != "/" {
if len(files) == 0 && len(directories) == 0 {
// Treat empty response as missing directory, since we don't actually
// have directories in s3.
return nil, storagedriver.PathNotFoundError{Path: opath}
}
}
return append(files, directories...), nil
}
// Move moves an object stored at sourcePath to destPath, removing the original
// object.
func (d *driver) Move(ctx context.Context, sourcePath, destPath string) error {
/* This is terrible, but aws doesn't have an actual move. */
if err := d.copy(ctx, sourcePath, destPath); err != nil {
return err
}
return d.Delete(ctx, sourcePath)
}
// copy copies an object stored at sourcePath to destPath.
func (d *driver) copy(ctx context.Context, sourcePath, destPath string) error {
// S3 can copy objects up to 5 GB in size with a single PUT Object - Copy
// operation. For larger objects, the multipart upload API must be used.
//
// Empirically, multipart copy is fastest with 32 MB parts and is faster
// than PUT Object - Copy for objects larger than 32 MB.
fileInfo, err := d.Stat(ctx, sourcePath)
if err != nil {
return parseError(sourcePath, err)
}
if fileInfo.Size() <= d.MultipartCopyThresholdSize {
_, err := d.S3.CopyObjectWithContext(ctx, &s3.CopyObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(destPath)),
ContentType: d.getContentType(),
ACL: d.getACL(),
ServerSideEncryption: d.getEncryptionMode(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
StorageClass: d.getStorageClass(),
CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)),
})
if err != nil {
return parseError(sourcePath, err)
}
return nil
}
createResp, err := d.S3.CreateMultipartUploadWithContext(ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(destPath)),
ContentType: d.getContentType(),
ACL: d.getACL(),
SSEKMSKeyId: d.getSSEKMSKeyID(),
ServerSideEncryption: d.getEncryptionMode(),
StorageClass: d.getStorageClass(),
})
if err != nil {
return err
}
numParts := (fileInfo.Size() + d.MultipartCopyChunkSize - 1) / d.MultipartCopyChunkSize
completedParts := make([]*s3.CompletedPart, numParts)
errChan := make(chan error, numParts)
limiter := make(chan struct{}, d.MultipartCopyMaxConcurrency)
for i := range completedParts {
i := int64(i)
go func() {
limiter <- struct{}{}
firstByte := i * d.MultipartCopyChunkSize
lastByte := firstByte + d.MultipartCopyChunkSize - 1
if lastByte >= fileInfo.Size() {
lastByte = fileInfo.Size() - 1
}
uploadResp, err := d.S3.UploadPartCopyWithContext(ctx, &s3.UploadPartCopyInput{
Bucket: aws.String(d.Bucket),
CopySource: aws.String(d.Bucket + "/" + d.s3Path(sourcePath)),
Key: aws.String(d.s3Path(destPath)),
PartNumber: aws.Int64(i + 1),
UploadId: createResp.UploadId,
CopySourceRange: aws.String(fmt.Sprintf("bytes=%d-%d", firstByte, lastByte)),
})
if err == nil {
completedParts[i] = &s3.CompletedPart{
ETag: uploadResp.CopyPartResult.ETag,
PartNumber: aws.Int64(i + 1),
}
}
errChan <- err
<-limiter
}()
}
for range completedParts {
err := <-errChan
if err != nil {
return err
}
}
_, err = d.S3.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(destPath)),
UploadId: createResp.UploadId,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: completedParts},
})
return err
}
// Delete recursively deletes all objects stored at "path" and its subpaths.
// We must be careful since S3 does not guarantee read after delete consistency
func (d *driver) Delete(ctx context.Context, path string) error {
s3Objects := make([]*s3.ObjectIdentifier, 0, listMax)
s3Path := d.s3Path(path)
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(s3Path),
}
for {
// list all the objects
resp, err := d.S3.ListObjectsV2WithContext(ctx, listObjectsInput)
// resp.Contents can only be empty on the first call
// if there were no more results to return after the first call, resp.IsTruncated would have been false
// and the loop would exit without recalling ListObjects
if err != nil || len(resp.Contents) == 0 {
return storagedriver.PathNotFoundError{Path: path}
}
for _, key := range resp.Contents {
// Skip if we encounter a key that is not a subpath (so that deleting "/a" does not delete "/ab").
if len(*key.Key) > len(s3Path) && (*key.Key)[len(s3Path)] != '/' {
continue
}
s3Objects = append(s3Objects, &s3.ObjectIdentifier{
Key: key.Key,
})
}
// Delete objects only if the list is not empty, otherwise S3 API returns a cryptic error
if len(s3Objects) > 0 {
// NOTE: according to AWS docs https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
// by default the response returns up to 1,000 key names. The response _might_ contain fewer keys but it will never contain more.
// 10000 keys is coincidentally (?) also the max number of keys that can be deleted in a single Delete operation, so we'll just smack
// Delete here straight away and reset the object slice when successful.
resp, err := d.S3.DeleteObjectsWithContext(ctx, &s3.DeleteObjectsInput{
Bucket: aws.String(d.Bucket),
Delete: &s3.Delete{
Objects: s3Objects,
Quiet: aws.Bool(false),
},
})
if err != nil {
return err
}
if len(resp.Errors) > 0 {
// NOTE: AWS SDK s3.Error does not implement error interface which
// is pretty intensely sad, so we have to do away with this for now.
errs := make([]error, 0, len(resp.Errors))
for _, err := range resp.Errors {
errs = append(errs, errors.New(err.String()))
}
return storagedriver.Errors{
DriverName: driverName,
Errs: errs,
}
}
}
// NOTE: we don't want to reallocate
// the slice so we simply "reset" it
s3Objects = s3Objects[:0]
// resp.Contents must have at least one element or we would have returned not found
listObjectsInput.StartAfter = resp.Contents[len(resp.Contents)-1].Key
// from the s3 api docs, IsTruncated "specifies whether (true) or not (false) all of the results were returned"
// if everything has been returned, break
if resp.IsTruncated == nil || !*resp.IsTruncated {
break
}
}
return nil
}
// RedirectURL returns a URL which may be used to retrieve the content stored at the given path.
func (d *driver) RedirectURL(r *http.Request, path string) (string, error) {
expiresIn := 20 * time.Minute
var req *request.Request
switch r.Method {
case http.MethodGet:
req, _ = d.S3.GetObjectRequest(&s3.GetObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
})
case http.MethodHead:
req, _ = d.S3.HeadObjectRequest(&s3.HeadObjectInput{
Bucket: aws.String(d.Bucket),
Key: aws.String(d.s3Path(path)),
})
default:
return "", nil
}
return req.Presign(expiresIn)
}
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file
func (d *driver) Walk(ctx context.Context, from string, f storagedriver.WalkFn, options ...func(*storagedriver.WalkOptions)) error {
walkOptions := &storagedriver.WalkOptions{}
for _, o := range options {
o(walkOptions)
}
var objectCount int64
if err := d.doWalk(ctx, &objectCount, from, walkOptions.StartAfterHint, f); err != nil {
return err
}
return nil
}
func (d *driver) doWalk(parentCtx context.Context, objectCount *int64, from, startAfter string, f storagedriver.WalkFn) error {
var (
retError error
// the most recent directory walked for de-duping
prevDir string
// the most recent skip directory to avoid walking over undesirable files
prevSkipDir string
)
prevDir = from
path := from
if !strings.HasSuffix(path, "/") {
path = path + "/"
}
prefix := ""
if d.s3Path("") == "" {
prefix = "/"
}
listObjectsInput := &s3.ListObjectsV2Input{
Bucket: aws.String(d.Bucket),
Prefix: aws.String(d.s3Path(path)),
MaxKeys: aws.Int64(listMax),
StartAfter: aws.String(d.s3Path(startAfter)),
}
ctx, done := dcontext.WithTrace(parentCtx)
defer done("s3aws.ListObjectsV2PagesWithContext(%s)", listObjectsInput)
// When the "delimiter" argument is omitted, the S3 list API will list all objects in the bucket
// recursively, omitting directory paths. Objects are listed in sorted, depth-first order so we
// can infer all the directories by comparing each object path to the last one we saw.
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html
// With files returned in sorted depth-first order, directories are inferred in the same order.
// ErrSkipDir is handled by explicitly skipping over any files under the skipped directory. This may be sub-optimal
// for extreme edge cases but for the general use case in a registry, this is orders of magnitude
// faster than a more explicit recursive implementation.
listObjectErr := d.S3.ListObjectsV2PagesWithContext(ctx, listObjectsInput, func(objects *s3.ListObjectsV2Output, lastPage bool) bool {
walkInfos := make([]storagedriver.FileInfoInternal, 0, len(objects.Contents))
for _, file := range objects.Contents {
filePath := strings.Replace(*file.Key, d.s3Path(""), prefix, 1)
// get a list of all inferred directories between the previous directory and this file
dirs := directoryDiff(prevDir, filePath)
for _, dir := range dirs {
walkInfos = append(walkInfos, storagedriver.FileInfoInternal{
FileInfoFields: storagedriver.FileInfoFields{
IsDir: true,
Path: dir,
},
})
prevDir = dir
}
// in some cases the _uploads dir might be empty. when this happens, it would
// be appended twice to the walkInfos slice, once as [...]/_uploads and
// once more erroneously as [...]/_uploads/. the easiest way to avoid this is
// to skip appending filePath to walkInfos if it ends in "/". the loop through
// dirs will already have handled it in that case, so it's safe to continue this
// loop.
if strings.HasSuffix(filePath, "/") {
continue
}
walkInfos = append(walkInfos, storagedriver.FileInfoInternal{
FileInfoFields: storagedriver.FileInfoFields{
IsDir: false,
Size: *file.Size,
ModTime: *file.LastModified,
Path: filePath,
},
})
}
for _, walkInfo := range walkInfos {
// skip any results under the last skip directory
if prevSkipDir != "" && strings.HasPrefix(walkInfo.Path(), prevSkipDir) {
continue
}
err := f(walkInfo)
*objectCount++
if err != nil {
if err == storagedriver.ErrSkipDir {
prevSkipDir = walkInfo.Path()
continue
}
if err == storagedriver.ErrFilledBuffer {
return false
}
retError = err
return false
}
}
return true
})
if retError != nil {
return retError
}
if listObjectErr != nil {
return listObjectErr
}
return nil
}
// directoryDiff finds all directories that are not in common between
// the previous and current paths in sorted order.
//
// # Examples
//
// directoryDiff("/path/to/folder", "/path/to/folder/folder/file")
// // => [ "/path/to/folder/folder" ]
//
// directoryDiff("/path/to/folder/folder1", "/path/to/folder/folder2/file")
// // => [ "/path/to/folder/folder2" ]
//
// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/file")
// // => [ "/path/to/folder/folder2" ]
//
// directoryDiff("/path/to/folder/folder1/file", "/path/to/folder/folder2/folder1/file")
// // => [ "/path/to/folder/folder2", "/path/to/folder/folder2/folder1" ]
//
// directoryDiff("/", "/path/to/folder/folder/file")
// // => [ "/path", "/path/to", "/path/to/folder", "/path/to/folder/folder" ]
func directoryDiff(prev, current string) []string {
var paths []string
if prev == "" || current == "" {
return paths
}
parent := current
for {
parent = filepath.Dir(parent)
if parent == "/" || parent == prev || strings.HasPrefix(prev+"/", parent+"/") {
break
}
paths = append(paths, parent)
}
slices.Reverse(paths)
return paths
}
func (d *driver) s3Path(path string) string {
return strings.TrimLeft(strings.TrimRight(d.RootDirectory, "/")+path, "/")
}
// S3BucketKey returns the s3 bucket key for the given storage driver path.
func (d *Driver) S3BucketKey(path string) string {
return d.StorageDriver.(*driver).s3Path(path)
}
func parseError(path string, err error) error {
if s3Err, ok := err.(awserr.Error); ok && s3Err.Code() == "NoSuchKey" {
return storagedriver.PathNotFoundError{Path: path}
}
return err
}
func (d *driver) getEncryptionMode() *string {
if !d.Encrypt {
return nil
}
if d.KeyID == "" {
return aws.String("AES256")
}
return aws.String("aws:kms")
}
func (d *driver) getSSEKMSKeyID() *string {
if d.KeyID != "" {
return aws.String(d.KeyID)
}
return nil
}
func (d *driver) getContentType() *string {
return aws.String("application/octet-stream")
}
func (d *driver) getACL() *string {
return aws.String(d.ObjectACL)
}
func (d *driver) getStorageClass() *string {
if d.StorageClass == noStorageClass {
return nil
}
return aws.String(d.StorageClass)
}
// writer uploads parts to S3 in a buffered fashion where the length of each
// part is [writer.driver.ChunkSize], excluding the last part which may be
// smaller than the configured chunk size and never larger. This allows the
// multipart upload to be cleanly resumed in future. This is violated if
// [writer.Close] is called before at least one chunk is written.
type writer struct {
ctx context.Context
driver *driver
key string
uploadID string
parts []*s3.Part
size int64
buf *bytes.Buffer
closed bool
committed bool
cancelled bool
}
func (d *driver) newWriter(ctx context.Context, key, uploadID string, parts []*s3.Part) storagedriver.FileWriter {
var size int64
for _, part := range parts {
size += *part.Size
}
return &writer{
ctx: ctx,
driver: d,
key: key,
uploadID: uploadID,
parts: parts,
size: size,
buf: d.pool.Get().(*bytes.Buffer),
}
}
type completedParts []*s3.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
func (w *writer) Write(p []byte) (int, error) {
if err := w.done(); err != nil {
return 0, err
}
// If the last written part is smaller than minChunkSize, we need to make a
// new multipart upload :sadface:
if len(w.parts) > 0 && int(*w.parts[len(w.parts)-1].Size) < minChunkSize {
completedUploadedParts := make(completedParts, len(w.parts))
for i, part := range w.parts {
completedUploadedParts[i] = &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}
sort.Sort(completedUploadedParts)
_, err := w.driver.S3.CompleteMultipartUploadWithContext(w.ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: completedUploadedParts,
},
})
if err != nil {
if _, aErr := w.driver.S3.AbortMultipartUploadWithContext(w.ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
}); aErr != nil {
return 0, errors.Join(err, aErr)
}
return 0, err
}
resp, err := w.driver.S3.CreateMultipartUploadWithContext(w.ctx, &s3.CreateMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
ContentType: w.driver.getContentType(),
ACL: w.driver.getACL(),
ServerSideEncryption: w.driver.getEncryptionMode(),
StorageClass: w.driver.getStorageClass(),
})
if err != nil {
return 0, err
}
w.uploadID = *resp.UploadId
// If the entire written file is smaller than minChunkSize, we need to make
// a new part from scratch :double sad face:
if w.size < minChunkSize {
resp, err := w.driver.S3.GetObjectWithContext(w.ctx, &s3.GetObjectInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
})
if err != nil {
return 0, err
}
defer resp.Body.Close()
w.reset()
if _, err := io.Copy(w.buf, resp.Body); err != nil {
return 0, err
}
} else {
// Otherwise we can use the old file as the new first part
copyPartResp, err := w.driver.S3.UploadPartCopyWithContext(w.ctx, &s3.UploadPartCopyInput{
Bucket: aws.String(w.driver.Bucket),
CopySource: aws.String(w.driver.Bucket + "/" + w.key),
Key: aws.String(w.key),
PartNumber: aws.Int64(1),
UploadId: resp.UploadId,
})
if err != nil {
return 0, err
}
w.parts = []*s3.Part{{
ETag: copyPartResp.CopyPartResult.ETag,
PartNumber: aws.Int64(1),
Size: aws.Int64(w.size),
}}
}
}
n, _ := w.buf.Write(p)
for w.buf.Len() >= w.driver.ChunkSize {
if err := w.flush(); err != nil {
return 0, fmt.Errorf("flush: %w", err)
}
}
return n, nil
}
func (w *writer) Size() int64 {
return w.size
}
// Close flushes any remaining data in the buffer and releases the buffer back
// to the pool.
func (w *writer) Close() error {
if w.closed {
return fmt.Errorf("already closed")
}
w.closed = true
defer w.releaseBuffer()
return w.flush()
}
func (w *writer) reset() {
w.buf.Reset()
w.parts = nil
w.size = 0
}
// releaseBuffer resets the buffer and returns it to the pool.
func (w *writer) releaseBuffer() {
w.buf.Reset()
w.driver.pool.Put(w.buf)
}
// Cancel aborts the multipart upload and closes the writer.
func (w *writer) Cancel(ctx context.Context) error {
if err := w.done(); err != nil {
return err
}
w.cancelled = true
_, err := w.driver.S3.AbortMultipartUploadWithContext(ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
})
return err
}
// Commit flushes any remaining data in the buffer and completes the multipart
// upload.
func (w *writer) Commit(ctx context.Context) error {
if err := w.done(); err != nil {
return err
}
if err := w.flush(); err != nil {
return err
}
w.committed = true
completedUploadedParts := make(completedParts, len(w.parts))
for i, part := range w.parts {
completedUploadedParts[i] = &s3.CompletedPart{
ETag: part.ETag,
PartNumber: part.PartNumber,
}
}
// This is an edge case when we are trying to upload an empty file as part of
// the MultiPart upload. We get a PUT with Content-Length: 0 and sad things happen.
// The result is we are trying to Complete MultipartUpload with an empty list of
// completedUploadedParts which will always lead to 400 being returned from S3
// See: https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#CompletedMultipartUpload
// Solution: we upload the empty i.e. 0 byte part as a single part and then append it
// to the completedUploadedParts slice used to complete the Multipart upload.
if len(w.parts) == 0 {
resp, err := w.driver.S3.UploadPartWithContext(w.ctx, &s3.UploadPartInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
PartNumber: aws.Int64(1),
UploadId: aws.String(w.uploadID),
Body: bytes.NewReader(nil),
})
if err != nil {
return err
}
completedUploadedParts = append(completedUploadedParts, &s3.CompletedPart{
ETag: resp.ETag,
PartNumber: aws.Int64(1),
})
}
sort.Sort(completedUploadedParts)
if _, err := w.driver.S3.CompleteMultipartUploadWithContext(w.ctx, &s3.CompleteMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
MultipartUpload: &s3.CompletedMultipartUpload{
Parts: completedUploadedParts,
},
}); err != nil {
if _, aErr := w.driver.S3.AbortMultipartUploadWithContext(w.ctx, &s3.AbortMultipartUploadInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
UploadId: aws.String(w.uploadID),
}); aErr != nil {
return errors.Join(err, aErr)
}
return err
}
return nil
}
// flush writes at most [w.driver.ChunkSize] of the buffer to S3. flush is only
// called by [writer.Write] if the buffer is full, and always by [writer.Close]
// and [writer.Commit].
func (w *writer) flush() error {
if w.buf.Len() == 0 {
return nil
}
r := bytes.NewReader(w.buf.Next(w.driver.ChunkSize))
partSize := r.Len()
partNumber := aws.Int64(int64(len(w.parts)) + 1)
resp, err := w.driver.S3.UploadPartWithContext(w.ctx, &s3.UploadPartInput{
Bucket: aws.String(w.driver.Bucket),
Key: aws.String(w.key),
PartNumber: partNumber,
UploadId: aws.String(w.uploadID),
Body: r,
})
if err != nil {
return fmt.Errorf("upload part: %w", err)
}
w.parts = append(w.parts, &s3.Part{
ETag: resp.ETag,
PartNumber: partNumber,
Size: aws.Int64(int64(partSize)),
})
w.size += int64(partSize)
return nil
}
// done returns an error if the writer is in an invalid state.
func (w *writer) done() error {
switch {
case w.closed:
return fmt.Errorf("already closed")
case w.committed:
return fmt.Errorf("already committed")
case w.cancelled:
return fmt.Errorf("already cancelled")
}
return nil
}
package s3
// Source: https://github.com/pivotal-golang/s3cli
// Copyright (c) 2013 Damien Le Berrigaud and Nick Wade
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"net/http"
"net/url"
"sort"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
log "github.com/sirupsen/logrus"
)
type signer struct {
// Values that must be populated from the request
Request *http.Request
Time time.Time
Credentials *credentials.Credentials
Query url.Values
stringToSign string
signature string
}
var s3ParamsToSign = map[string]bool{
"acl": true,
"location": true,
"logging": true,
"notification": true,
"partNumber": true,
"policy": true,
"requestPayment": true,
"torrent": true,
"uploadId": true,
"uploads": true,
"versionId": true,
"versioning": true,
"versions": true,
"response-content-type": true,
"response-content-language": true,
"response-expires": true,
"response-cache-control": true,
"response-content-disposition": true,
"response-content-encoding": true,
"website": true,
"delete": true,
}
// setv2Handlers will setup v2 signature signing on the S3 driver
func setv2Handlers(svc *s3.S3) {
svc.Handlers.Build.PushBack(func(r *request.Request) {
parsedURL, err := url.Parse(r.HTTPRequest.URL.String())
if err != nil {
log.Fatalf("Failed to parse URL: %v", err)
}
r.HTTPRequest.URL.Opaque = parsedURL.Path
})
svc.Handlers.Sign.Clear()
svc.Handlers.Sign.PushBack(Sign)
svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
}
// Sign requests with signature version 2.
//
// Will sign the requests with the service config's Credentials object
// Signing is skipped if the credentials is the credentials.AnonymousCredentials
// object.
func Sign(req *request.Request) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
v2 := signer{
Request: req.HTTPRequest,
Time: req.Time,
Credentials: req.Config.Credentials,
}
// TODO(milosgajdos): figure this out; if Sign returns error which we should check,
// we should modify the codepath related to svc.Handlers.Sign.PushBack etc.
// nolint:errcheck
v2.Sign()
}
func (v2 *signer) Sign() error {
credValue, err := v2.Credentials.Get()
if err != nil {
return err
}
accessKey := credValue.AccessKeyID
var (
md5, ctype, date, xamz string
xamzDate bool
sarray []string
smap map[string]string
sharray []string
)
headers := v2.Request.Header
params := v2.Request.URL.Query()
parsedURL, err := url.Parse(v2.Request.URL.String())
if err != nil {
return err
}
host, canonicalPath := parsedURL.Host, parsedURL.Path
v2.Request.Header["Host"] = []string{host}
v2.Request.Header["date"] = []string{v2.Time.In(time.UTC).Format(time.RFC1123)}
if credValue.SessionToken != "" {
v2.Request.Header["x-amz-security-token"] = []string{credValue.SessionToken}
}
smap = make(map[string]string)
for k, v := range headers {
k = strings.ToLower(k)
switch k {
case "content-md5":
md5 = v[0]
case "content-type":
ctype = v[0]
case "date":
if !xamzDate {
date = v[0]
}
default:
if strings.HasPrefix(k, "x-amz-") {
vall := strings.Join(v, ",")
smap[k] = k + ":" + vall
if k == "x-amz-date" {
xamzDate = true
date = ""
}
sharray = append(sharray, k)
}
}
}
if len(sharray) > 0 {
sort.StringSlice(sharray).Sort()
for _, h := range sharray {
sarray = append(sarray, smap[h])
}
xamz = strings.Join(sarray, "\n") + "\n"
}
expires := false
if v, ok := params["Expires"]; ok {
expires = true
date = v[0]
params["AWSAccessKeyId"] = []string{accessKey}
}
sarray = sarray[0:0]
for k, v := range params {
if s3ParamsToSign[k] {
for _, vi := range v {
if vi == "" {
sarray = append(sarray, k)
} else {
sarray = append(sarray, k+"="+vi)
}
}
}
}
if len(sarray) > 0 {
sort.StringSlice(sarray).Sort()
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
}
v2.stringToSign = strings.Join([]string{
v2.Request.Method,
md5,
ctype,
date,
xamz + canonicalPath,
}, "\n")
hash := hmac.New(sha1.New, []byte(credValue.SecretAccessKey))
hash.Write([]byte(v2.stringToSign))
v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil))
if expires {
params["Signature"] = []string{v2.signature}
} else {
headers["Authorization"] = []string{"AWS " + accessKey + ":" + v2.signature}
}
log.WithFields(log.Fields{
"string-to-sign": v2.stringToSign,
"signature": v2.signature,
}).Debugln("request signature")
return nil
}
package driver
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"regexp"
"strconv"
"strings"
)
// Version is a string representing the storage driver version, of the form
// Major.Minor.
// The registry must accept storage drivers with equal major version and greater
// minor version, but may not be compatible with older storage driver versions.
type Version string
// Major returns the major (primary) component of a version.
func (version Version) Major() uint {
majorPart, _, _ := strings.Cut(string(version), ".")
major, _ := strconv.ParseUint(majorPart, 10, 0)
return uint(major)
}
// Minor returns the minor (secondary) component of a version.
func (version Version) Minor() uint {
_, minorPart, _ := strings.Cut(string(version), ".")
minor, _ := strconv.ParseUint(minorPart, 10, 0)
return uint(minor)
}
// CurrentVersion is the current storage driver Version.
const CurrentVersion Version = "0.1"
// WalkOptions provides options to the walk function that may adjust its behaviour
type WalkOptions struct {
// If StartAfterHint is set, the walk may start with the first item lexographically
// after the hint, but it is not guaranteed and drivers may start the walk from the path.
StartAfterHint string
}
func WithStartAfterHint(startAfterHint string) func(*WalkOptions) {
return func(s *WalkOptions) {
s.StartAfterHint = startAfterHint
}
}
// StorageDriver defines methods that a Storage Driver must implement for a
// filesystem-like key/value object storage. Storage Drivers are automatically
// registered via an internal registration mechanism, and generally created
// via the StorageDriverFactory interface (https://godoc.org/github.com/distribution/distribution/registry/storage/driver/factory).
// Please see the aforementioned factory package for example code showing how to get an instance
// of a StorageDriver
type StorageDriver interface {
// Name returns the human-readable "name" of the driver, useful in error
// messages and logging. By convention, this will just be the registration
// name, but drivers may provide other information here.
Name() string
// GetContent retrieves the content stored at "path" as a []byte.
// This should primarily be used for small objects.
GetContent(ctx context.Context, path string) ([]byte, error)
// PutContent stores the []byte content at a location designated by "path".
// This should primarily be used for small objects.
PutContent(ctx context.Context, path string, content []byte) error
// Reader retrieves an io.ReadCloser for the content stored at "path"
// with a given byte offset.
// May be used to resume reading a stream by providing a nonzero offset.
Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error)
// Writer returns a FileWriter which will store the content written to it
// at the location designated by "path" after the call to Commit.
// A path may be appended to if it has not been committed, or if the
// existing committed content is zero length.
//
// The behaviour of appending to paths with non-empty committed content is
// undefined. Specific implementations may document their own behavior.
Writer(ctx context.Context, path string, append bool) (FileWriter, error)
// Stat retrieves the FileInfo for the given path, including the current
// size in bytes and the creation time.
Stat(ctx context.Context, path string) (FileInfo, error)
// List returns a list of the objects that are direct descendants of the
// given path.
List(ctx context.Context, path string) ([]string, error)
// Move moves an object stored at sourcePath to destPath, removing the
// original object.
// Note: This may be no more efficient than a copy followed by a delete for
// many implementations.
Move(ctx context.Context, sourcePath string, destPath string) error
// Delete recursively deletes all objects stored at "path" and its subpaths.
Delete(ctx context.Context, path string) error
// RedirectURL returns a URL which the client of the request r may use
// to retrieve the content stored at path. Returning the empty string
// signals that the request may not be redirected.
RedirectURL(r *http.Request, path string) (string, error)
// Walk traverses a filesystem defined within driver, starting
// from the given path, calling f on each file.
// If the returned error from the WalkFn is ErrSkipDir and fileInfo refers
// to a directory, the directory will not be entered and Walk
// will continue the traversal.
// If the returned error from the WalkFn is ErrFilledBuffer, processing stops.
Walk(ctx context.Context, path string, f WalkFn, options ...func(*WalkOptions)) error
}
// FileWriter provides an abstraction for an opened writable file-like object in
// the storage backend. The FileWriter must flush all content written to it on
// the call to Close, but is only required to make its content readable on a
// call to Commit.
type FileWriter interface {
io.WriteCloser
// Size returns the number of bytes written to this FileWriter.
Size() int64
// Cancel removes any written content from this FileWriter.
Cancel(context.Context) error
// Commit flushes all content written to this FileWriter and makes it
// available for future calls to StorageDriver.GetContent and
// StorageDriver.Reader.
Commit(context.Context) error
}
// PathRegexp is the regular expression which each file path must match. A
// file path is absolute, beginning with a slash and containing a positive
// number of path components separated by slashes, where each component is
// restricted to alphanumeric characters or a period, underscore, or
// hyphen.
var PathRegexp = regexp.MustCompile(`^(/[A-Za-z0-9._-]+)+$`)
// ErrUnsupportedMethod may be returned in the case where a StorageDriver implementation does not support an optional method.
type ErrUnsupportedMethod struct {
DriverName string
}
func (err ErrUnsupportedMethod) Error() string {
return fmt.Sprintf("%s: unsupported method", err.DriverName)
}
// PathNotFoundError is returned when operating on a nonexistent path.
type PathNotFoundError struct {
Path string
DriverName string
}
func (err PathNotFoundError) Error() string {
return fmt.Sprintf("%s: Path not found: %s", err.DriverName, err.Path)
}
// InvalidPathError is returned when the provided path is malformed.
type InvalidPathError struct {
Path string
DriverName string
}
func (err InvalidPathError) Error() string {
return fmt.Sprintf("%s: invalid path: %s", err.DriverName, err.Path)
}
// InvalidOffsetError is returned when attempting to read or write from an
// invalid offset.
type InvalidOffsetError struct {
Path string
Offset int64
DriverName string
}
func (err InvalidOffsetError) Error() string {
return fmt.Sprintf("%s: invalid offset: %d for path: %s", err.DriverName, err.Offset, err.Path)
}
// Error is a catch-all error type which captures an error string and
// the driver type on which it occurred.
type Error struct {
DriverName string
Detail error
}
func (err Error) Error() string {
return fmt.Sprintf("%s: %s", err.DriverName, err.Detail)
}
func (err Error) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
DriverName string `json:"driver"`
Detail string `json:"detail"`
}{
DriverName: err.DriverName,
Detail: err.Detail.Error(),
})
}
// Errors provides the envelope for multiple errors
// for use within the storagedriver implementations.
type Errors struct {
DriverName string
Errs []error
}
var _ error = Errors{}
func (e Errors) Error() string {
switch len(e.Errs) {
case 0:
return fmt.Sprintf("%s: <nil>", e.DriverName)
case 1:
return fmt.Sprintf("%s: %s", e.DriverName, e.Errs[0].Error())
default:
msg := "errors:\n"
for _, err := range e.Errs {
msg += err.Error() + "\n"
}
return fmt.Sprintf("%s: %s", e.DriverName, msg)
}
}
// MarshalJSON converts slice of errors into the format
// that is serializable by JSON.
func (e Errors) MarshalJSON() ([]byte, error) {
tmpErrs := struct {
DriverName string `json:"driver"`
Details []string `json:"details"`
}{
DriverName: e.DriverName,
}
if len(e.Errs) == 0 {
tmpErrs.Details = make([]string, 0)
return json.Marshal(tmpErrs)
}
for _, err := range e.Errs {
tmpErrs.Details = append(tmpErrs.Details, err.Error())
}
return json.Marshal(tmpErrs)
}
package driver
import (
"context"
"errors"
"path/filepath"
"sort"
"strings"
"github.com/sirupsen/logrus"
)
// ErrSkipDir is used as a return value from onFileFunc to indicate that
// the directory named in the call is to be skipped. It is not returned
// as an error by any function.
var ErrSkipDir = errors.New("skip this directory")
// ErrFilledBuffer is used as a return value from onFileFunc to indicate
// that the requested number of entries has been reached and the walk can
// stop.
var ErrFilledBuffer = errors.New("we have enough entries")
// WalkFn is called once per file by Walk
type WalkFn func(fileInfo FileInfo) error
// WalkFallback traverses a filesystem defined within driver, starting
// from the given path, calling f on each file. It uses the List method and Stat to drive itself.
// If the returned error from the WalkFn is ErrSkipDir the directory will not be entered and Walk
// will continue the traversal. If the returned error from the WalkFn is ErrFilledBuffer, the walk
// stops.
func WalkFallback(ctx context.Context, driver StorageDriver, from string, f WalkFn, options ...func(*WalkOptions)) error {
walkOptions := &WalkOptions{}
for _, o := range options {
o(walkOptions)
}
startAfterHint := walkOptions.StartAfterHint
// Ensure that we are checking the hint is contained within from by adding a "/".
// Add to both in case the hint and form are the same, which would still count.
rel, err := filepath.Rel(from, startAfterHint)
if err != nil || strings.HasPrefix(rel, "..") {
// The startAfterHint is outside from, so check if we even need to walk anything
// Replace any path separators with \x00 so that the sort works in a depth-first way
if strings.ReplaceAll(startAfterHint, "/", "\x00") < strings.ReplaceAll(from, "/", "\x00") {
_, err := doWalkFallback(ctx, driver, from, "", f)
return err
}
} else {
// The startAfterHint is within from.
// Walk up the tree until we hit from - we know it is contained.
// Ensure startAfterHint is never deeper than a child of the base
// directory so that doWalkFallback doesn't have to worry about
// depth-first comparisons
base := startAfterHint
for strings.HasPrefix(base, from) {
_, err = doWalkFallback(ctx, driver, base, startAfterHint, f)
switch err.(type) {
case nil:
// No error
case PathNotFoundError:
// dir doesn't exist, so nothing to walk
default:
return err
}
if base == from {
break
}
startAfterHint = base
base, _ = filepath.Split(startAfterHint)
if len(base) > 1 {
base = strings.TrimSuffix(base, "/")
}
}
}
return nil
}
// doWalkFallback performs a depth first walk using recursion.
// from is the directory that this iteration of the function should walk.
// startAfterHint is the child within from to start the walk after. It should only ever be a child of from, or the empty string.
func doWalkFallback(ctx context.Context, driver StorageDriver, from string, startAfterHint string, f WalkFn) (bool, error) {
children, err := driver.List(ctx, from)
if err != nil {
return false, err
}
sort.Strings(children)
for _, child := range children {
// The startAfterHint has been sanitised in WalkFallback and will either be
// empty, or be suitable for an <= check for this _from_.
if child <= startAfterHint {
continue
}
// TODO(stevvooe): Calling driver.Stat for every entry is quite
// expensive when running against backends with a slow Stat
// implementation, such as GCS. This is very likely a serious
// performance bottleneck.
// Those backends should have custom walk functions. See S3.
fileInfo, err := driver.Stat(ctx, child)
if err != nil {
switch err.(type) {
case PathNotFoundError:
// repository was removed in between listing and enumeration. Ignore it.
logrus.WithField("path", child).Infof("ignoring deleted path")
continue
default:
return false, err
}
}
err = f(fileInfo)
if err == nil && fileInfo.IsDir() {
if ok, err := doWalkFallback(ctx, driver, child, startAfterHint, f); err != nil || !ok {
return ok, err
}
} else if err == ErrSkipDir {
// don't traverse into this directory
} else if err == ErrFilledBuffer {
return false, nil // no error but stop iteration
} else if err != nil {
return false, err
}
}
return true, nil
}
package storage
import "fmt"
// pushError formats an error type given a path and an error
// and pushes it to a slice of errors
func pushError(errors []error, path string, err error) []error {
return append(errors, fmt.Errorf("%s: %s", path, err))
}
package storage
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
)
// TODO(stevvooe): Set an optimal buffer size here. We'll have to
// understand the latency characteristics of the underlying network to
// set this correctly, so we may want to leave it to the driver. For
// out of process drivers, we'll have to optimize this buffer size for
// local communication.
const fileReaderBufferSize = 4 * 1024 * 1024
// remoteFileReader provides a read seeker interface to files stored in
// storagedriver. Used to implement part of layer interface and will be used
// to implement read side of LayerUpload.
type fileReader struct {
driver storagedriver.StorageDriver
ctx context.Context
// identifying fields
path string
size int64 // size is the total size, must be set.
// mutable fields
rc io.ReadCloser // remote read closer
brd *bufio.Reader // internal buffered io
offset int64 // offset is the current read offset
err error // terminal error, if set, reader is closed
}
// newFileReader initializes a file reader for the remote file. The reader
// takes on the size and path that must be determined externally with a stat
// call. The reader operates optimistically, assuming that the file is already
// there.
func newFileReader(ctx context.Context, driver storagedriver.StorageDriver, path string, size int64) (*fileReader, error) {
return &fileReader{
ctx: ctx,
driver: driver,
path: path,
size: size,
}, nil
}
func (fr *fileReader) Read(p []byte) (n int, err error) {
if fr.err != nil {
return 0, fr.err
}
rd, err := fr.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
fr.offset += int64(n)
// Simulate io.EOR error if we reach filesize.
if err == nil && fr.offset >= fr.size {
err = io.EOF
}
return n, err
}
func (fr *fileReader) Seek(offset int64, whence int) (int64, error) {
if fr.err != nil {
return 0, fr.err
}
var err error
newOffset := fr.offset
switch whence {
case io.SeekCurrent:
newOffset += offset
case io.SeekEnd:
newOffset = fr.size + offset
case io.SeekStart:
newOffset = offset
}
if newOffset < 0 {
err = fmt.Errorf("cannot seek to negative position")
} else {
if fr.offset != newOffset {
fr.reset()
}
// No problems, set the offset.
fr.offset = newOffset
}
return fr.offset, err
}
func (fr *fileReader) Close() error {
return fr.closeWithErr(fmt.Errorf("fileReader: closed"))
}
// reader prepares the current reader at the lrs offset, ensuring its buffered
// and ready to go.
func (fr *fileReader) reader() (io.Reader, error) {
if fr.err != nil {
return nil, fr.err
}
if fr.rc != nil {
return fr.brd, nil
}
// If we don't have a reader, open one up.
rc, err := fr.driver.Reader(fr.ctx, fr.path, fr.offset)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
// NOTE(stevvooe): If the path is not found, we simply return a
// reader that returns io.EOF. However, we do not set fr.rc,
// allowing future attempts at getting a reader to possibly
// succeed if the file turns up later.
return io.NopCloser(bytes.NewReader([]byte{})), nil
default:
return nil, err
}
}
fr.rc = rc
if fr.brd == nil {
fr.brd = bufio.NewReaderSize(fr.rc, fileReaderBufferSize)
} else {
fr.brd.Reset(fr.rc)
}
return fr.brd, nil
}
// resetReader resets the reader, forcing the read method to open up a new
// connection and rebuild the buffered reader. This should be called when the
// offset and the reader will become out of sync, such as during a seek
// operation.
func (fr *fileReader) reset() {
if fr.err != nil {
return
}
if fr.rc != nil {
fr.rc.Close()
fr.rc = nil
}
}
func (fr *fileReader) closeWithErr(err error) error {
if fr.err != nil {
return fr.err
}
fr.err = err
// close and release reader chain
if fr.rc != nil {
fr.rc.Close()
}
fr.rc = nil
fr.brd = nil
return fr.err
}
package storage
import (
"context"
"errors"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
func emit(format string, a ...interface{}) {
fmt.Printf(format+"\n", a...)
}
// GCOpts contains options for garbage collector
type GCOpts struct {
DryRun bool
RemoveUntagged bool
Quiet bool
}
// ManifestDel contains manifest structure which will be deleted
type ManifestDel struct {
Name string
Digest digest.Digest
Tags []string
}
// MarkAndSweep performs a mark and sweep of registry data
func MarkAndSweep(ctx context.Context, storageDriver driver.StorageDriver, registry distribution.Namespace, opts GCOpts) error {
repositoryEnumerator, ok := registry.(distribution.RepositoryEnumerator)
if !ok {
return fmt.Errorf("unable to convert Namespace to RepositoryEnumerator")
}
// mark
markSet := make(map[digest.Digest]struct{})
deleteLayerSet := make(map[string][]digest.Digest)
manifestArr := make([]ManifestDel, 0)
err := repositoryEnumerator.Enumerate(ctx, func(repoName string) error {
if !opts.Quiet {
emit(repoName)
}
var err error
named, err := reference.WithName(repoName)
if err != nil {
return fmt.Errorf("failed to parse repo name %s: %v", repoName, err)
}
repository, err := registry.Repository(ctx, named)
if err != nil {
return fmt.Errorf("failed to construct repository: %v", err)
}
manifestService, err := repository.Manifests(ctx)
if err != nil {
return fmt.Errorf("failed to construct manifest service: %v", err)
}
manifestEnumerator, ok := manifestService.(distribution.ManifestEnumerator)
if !ok {
return fmt.Errorf("unable to convert ManifestService into ManifestEnumerator")
}
err = manifestEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
if opts.RemoveUntagged {
// fetch all tags where this manifest is the latest one
tags, err := repository.Tags(ctx).Lookup(ctx, v1.Descriptor{Digest: dgst})
if err != nil {
return fmt.Errorf("failed to retrieve tags for digest %v: %v", dgst, err)
}
if len(tags) == 0 {
// fetch all tags from repository
// all of these tags could contain manifest in history
// which means that we need check (and delete) those references when deleting manifest
allTags, err := repository.Tags(ctx).All(ctx)
if err != nil {
if _, ok := err.(distribution.ErrRepositoryUnknown); ok {
if !opts.Quiet {
emit("manifest tags path of repository %s does not exist", repoName)
}
return nil
}
return fmt.Errorf("failed to retrieve tags %v", err)
}
manifestArr = append(manifestArr, ManifestDel{Name: repoName, Digest: dgst, Tags: allTags})
return nil
}
}
// Mark the manifest's blob
if !opts.Quiet {
emit("%s: marking manifest %s ", repoName, dgst)
}
markSet[dgst] = struct{}{}
return markManifestReferences(dgst, manifestService, ctx, func(d digest.Digest) bool {
_, marked := markSet[d]
if !marked {
markSet[d] = struct{}{}
if !opts.Quiet {
emit("%s: marking blob %s", repoName, d)
}
}
return marked
})
})
if err != nil {
// In certain situations such as unfinished uploads, deleting all
// tags in S3 or removing the _manifests folder manually, this
// error may be of type PathNotFound.
//
// In these cases we can continue marking other manifests safely.
if _, ok := err.(driver.PathNotFoundError); !ok {
return err
}
}
blobService := repository.Blobs(ctx)
layerEnumerator, ok := blobService.(distribution.ManifestEnumerator)
if !ok {
return errors.New("unable to convert BlobService into ManifestEnumerator")
}
var deleteLayers []digest.Digest
err = layerEnumerator.Enumerate(ctx, func(dgst digest.Digest) error {
if _, ok := markSet[dgst]; !ok {
deleteLayers = append(deleteLayers, dgst)
}
return nil
})
if len(deleteLayers) > 0 {
deleteLayerSet[repoName] = deleteLayers
}
return err
})
if err != nil {
return fmt.Errorf("failed to mark: %v", err)
}
manifestArr = unmarkReferencedManifest(manifestArr, markSet, opts.Quiet)
// sweep
vacuum := NewVacuum(ctx, storageDriver)
if !opts.DryRun {
for _, obj := range manifestArr {
err = vacuum.RemoveManifest(obj.Name, obj.Digest, obj.Tags)
if err != nil {
return fmt.Errorf("failed to delete manifest %s: %v", obj.Digest, err)
}
}
}
blobService := registry.Blobs()
deleteSet := make(map[digest.Digest]struct{})
err = blobService.Enumerate(ctx, func(dgst digest.Digest) error {
// check if digest is in markSet. If not, delete it!
if _, ok := markSet[dgst]; !ok {
deleteSet[dgst] = struct{}{}
}
return nil
})
if err != nil {
return fmt.Errorf("error enumerating blobs: %v", err)
}
if !opts.Quiet {
emit("\n%d blobs marked, %d blobs and %d manifests eligible for deletion", len(markSet), len(deleteSet), len(manifestArr))
}
for dgst := range deleteSet {
if !opts.Quiet {
emit("blob eligible for deletion: %s", dgst)
}
if opts.DryRun {
continue
}
err = vacuum.RemoveBlob(string(dgst))
if err != nil {
return fmt.Errorf("failed to delete blob %s: %v", dgst, err)
}
}
for repo, dgsts := range deleteLayerSet {
for _, dgst := range dgsts {
if !opts.Quiet {
emit("%s: layer link eligible for deletion: %s", repo, dgst)
}
if opts.DryRun {
continue
}
err = vacuum.RemoveLayer(repo, dgst)
if err != nil {
return fmt.Errorf("failed to delete layer link %s of repo %s: %v", dgst, repo, err)
}
}
}
return err
}
// unmarkReferencedManifest filters out manifest present in markSet
func unmarkReferencedManifest(manifestArr []ManifestDel, markSet map[digest.Digest]struct{}, quietOutput bool) []ManifestDel {
filtered := make([]ManifestDel, 0)
for _, obj := range manifestArr {
if _, ok := markSet[obj.Digest]; !ok {
if !quietOutput {
emit("manifest eligible for deletion: %s", obj)
}
filtered = append(filtered, obj)
}
}
return filtered
}
// markManifestReferences marks the manifest references
func markManifestReferences(dgst digest.Digest, manifestService distribution.ManifestService, ctx context.Context, ingester func(digest.Digest) bool) error {
manifest, err := manifestService.Get(ctx, dgst)
if err != nil {
return fmt.Errorf("failed to retrieve manifest for digest %v: %v", dgst, err)
}
descriptors := manifest.References()
for _, descriptor := range descriptors {
// do not visit references if already marked
if ingester(descriptor.Digest) {
continue
}
if ok, _ := manifestService.Exists(ctx, descriptor.Digest); ok {
err := markManifestReferences(descriptor.Digest, manifestService, ctx, ingester)
if err != nil {
return err
}
}
}
return nil
}
package storage
import (
"context"
"errors"
"io"
"github.com/distribution/distribution/v3/registry/storage/driver"
)
const (
maxBlobGetSize = 4 * 1024 * 1024
)
func getContent(ctx context.Context, driver driver.StorageDriver, p string) ([]byte, error) {
r, err := driver.Reader(ctx, p, 0)
if err != nil {
return nil, err
}
defer r.Close()
return readAllLimited(r, maxBlobGetSize)
}
func readAllLimited(r io.Reader, limit int64) ([]byte, error) {
r = limitReader(r, limit)
return io.ReadAll(r)
}
// limitReader returns a new reader limited to n bytes. Unlike io.LimitReader,
// this returns an error when the limit reached.
func limitReader(r io.Reader, n int64) io.Reader {
return &limitedReader{r: r, n: n}
}
// limitedReader implements a reader that errors when the limit is reached.
//
// Partially cribbed from net/http.MaxBytesReader.
type limitedReader struct {
r io.Reader // underlying reader
n int64 // max bytes remaining
err error // sticky error
}
func (l *limitedReader) Read(p []byte) (n int, err error) {
if l.err != nil {
return 0, l.err
}
if len(p) == 0 {
return 0, nil
}
// If they asked for a 32KB byte read but only 5 bytes are
// remaining, no need to read 32KB. 6 bytes will answer the
// question of the whether we hit the limit or go past it.
if int64(len(p)) > l.n+1 {
p = p[:l.n+1]
}
n, err = l.r.Read(p)
if int64(n) <= l.n {
l.n -= int64(n)
l.err = err
return n, err
}
n = int(l.n)
l.n = 0
l.err = errors.New("storage: read exceeds limit")
return n, l.err
}
package storage
import (
"context"
"fmt"
"io"
"net/http"
"path"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/internal/uuid"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/reference"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// linkPathFunc describes a function that can resolve a link based on the
// repository name and digest.
type linkPathFunc func(name string, dgst digest.Digest) (string, error)
// linkedBlobStore provides a full BlobService that namespaces the blobs to a
// given repository. Effectively, it manages the links in a given repository
// that grant access to the global blob store.
type linkedBlobStore struct {
*blobStore
registry *registry
blobServer distribution.BlobServer
blobAccessController distribution.BlobDescriptorService
repository distribution.Repository
ctx context.Context // only to be used where context can't come through method args
deleteEnabled bool
resumableDigestEnabled bool
// linkPath allows one to control the repository blob link set to which
// the blob store dispatches. This is required because manifest and layer
// blobs have not yet been fully merged. At some point, this functionality
// should be removed and the blob links folder should be merged.
linkPath linkPathFunc
// linkDirectoryPathSpec locates the root directories in which one might find links
linkDirectoryPathSpec pathSpec
}
var _ distribution.BlobStore = &linkedBlobStore{}
func (lbs *linkedBlobStore) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
return lbs.blobAccessController.Stat(ctx, dgst)
}
func (lbs *linkedBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return nil, err
}
return lbs.blobStore.Get(ctx, canonical.Digest)
}
func (lbs *linkedBlobStore) Open(ctx context.Context, dgst digest.Digest) (io.ReadSeekCloser, error) {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return nil, err
}
return lbs.blobStore.Open(ctx, canonical.Digest)
}
func (lbs *linkedBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {
canonical, err := lbs.Stat(ctx, dgst) // access check
if err != nil {
return err
}
if canonical.MediaType != "" {
// Set the repository local content type.
w.Header().Set("Content-Type", canonical.MediaType)
}
return lbs.blobServer.ServeBlob(ctx, w, r, canonical.Digest)
}
func (lbs *linkedBlobStore) Put(ctx context.Context, mediaType string, p []byte) (v1.Descriptor, error) {
dgst := digest.FromBytes(p)
// Place the data in the blob store first.
desc, err := lbs.blobStore.Put(ctx, mediaType, p)
if err != nil {
dcontext.GetLogger(ctx).Errorf("error putting into main store: %v", err)
return v1.Descriptor{}, err
}
if err := lbs.blobAccessController.SetDescriptor(ctx, dgst, desc); err != nil {
return v1.Descriptor{}, err
}
// TODO(stevvooe): Write out mediatype if incoming differs from what is
// returned by Put above. Note that we should allow updates for a given
// repository.
return desc, lbs.linkBlob(ctx, desc)
}
type optionFunc func(interface{}) error
func (f optionFunc) Apply(v interface{}) error {
return f(v)
}
// WithMountFrom returns a BlobCreateOption which designates that the blob should be
// mounted from the given canonical reference.
func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption {
return optionFunc(func(v interface{}) error {
opts, ok := v.(*distribution.CreateOptions)
if !ok {
return fmt.Errorf("unexpected options type: %T", v)
}
opts.Mount.ShouldMount = true
opts.Mount.From = ref
return nil
})
}
// Create begins a blob write session, returning a handle.
func (lbs *linkedBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) {
dcontext.GetLogger(ctx).Debug("(*linkedBlobStore).Create")
var opts distribution.CreateOptions
for _, option := range options {
err := option.Apply(&opts)
if err != nil {
return nil, err
}
}
if opts.Mount.ShouldMount {
desc, err := lbs.mount(ctx, opts.Mount.From, opts.Mount.From.Digest(), opts.Mount.Stat)
if err == nil {
// Mount successful, no need to initiate an upload session
return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc}
}
}
uuid := uuid.NewString()
startedAt := time.Now().UTC()
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Named().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Named().Name(),
id: uuid,
})
if err != nil {
return nil, err
}
// Write a startedat file for this upload
if err := lbs.blobStore.driver.PutContent(ctx, startedAtPath, []byte(startedAt.Format(time.RFC3339))); err != nil {
return nil, err
}
return lbs.newBlobUpload(ctx, uuid, path, startedAt, false)
}
func (lbs *linkedBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) {
dcontext.GetLogger(ctx).Debug("(*linkedBlobStore).Resume")
startedAtPath, err := pathFor(uploadStartedAtPathSpec{
name: lbs.repository.Named().Name(),
id: id,
})
if err != nil {
return nil, err
}
startedAtBytes, err := lbs.blobStore.driver.GetContent(ctx, startedAtPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return nil, distribution.ErrBlobUploadUnknown
default:
return nil, err
}
}
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
if err != nil {
return nil, err
}
path, err := pathFor(uploadDataPathSpec{
name: lbs.repository.Named().Name(),
id: id,
})
if err != nil {
return nil, err
}
return lbs.newBlobUpload(ctx, id, path, startedAt, true)
}
func (lbs *linkedBlobStore) Delete(ctx context.Context, dgst digest.Digest) error {
if !lbs.deleteEnabled {
return distribution.ErrUnsupported
}
// Ensure the blob is available for deletion
_, err := lbs.blobAccessController.Stat(ctx, dgst)
if err != nil {
return err
}
err = lbs.blobAccessController.Clear(ctx, dgst)
if err != nil {
return err
}
return nil
}
func (lbs *linkedBlobStore) Enumerate(ctx context.Context, ingestor func(digest.Digest) error) error {
rootPath, err := pathFor(lbs.linkDirectoryPathSpec)
if err != nil {
return err
}
return lbs.driver.Walk(ctx, rootPath, func(fileInfo driver.FileInfo) error {
// exit early if directory...
if fileInfo.IsDir() {
return nil
}
filePath := fileInfo.Path()
// check if it's a link
_, fileName := path.Split(filePath)
if fileName != "link" {
return nil
}
// read the digest found in link
digest, err := lbs.blobStore.readlink(ctx, filePath)
if err != nil {
return err
}
// ensure this conforms to the linkPathFns
_, err = lbs.Stat(ctx, digest)
if err != nil {
// we expect this error to occur so we move on
if err == distribution.ErrBlobUnknown {
return nil
}
return err
}
err = ingestor(digest)
if err != nil {
return err
}
return nil
})
}
func (lbs *linkedBlobStore) mount(ctx context.Context, sourceRepo reference.Named, dgst digest.Digest, sourceStat *v1.Descriptor) (v1.Descriptor, error) {
var stat v1.Descriptor
if sourceStat == nil {
// look up the blob info from the sourceRepo if not already provided
repo, err := lbs.registry.Repository(ctx, sourceRepo)
if err != nil {
return v1.Descriptor{}, err
}
stat, err = repo.Blobs(ctx).Stat(ctx, dgst)
if err != nil {
return v1.Descriptor{}, err
}
} else {
// use the provided blob info
stat = *sourceStat
}
desc := v1.Descriptor{
Size: stat.Size,
// NOTE(stevvooe): The central blob store firewalls media types from
// other users. The caller should look this up and override the value
// for the specific repository.
MediaType: "application/octet-stream",
Digest: dgst,
}
return desc, lbs.linkBlob(ctx, desc)
}
// newBlobUpload allocates a new upload controller with the given state.
func (lbs *linkedBlobStore) newBlobUpload(ctx context.Context, uuid, path string, startedAt time.Time, append bool) (distribution.BlobWriter, error) {
fw, err := lbs.driver.Writer(ctx, path, append)
if err != nil {
return nil, err
}
bw := &blobWriter{
ctx: ctx,
blobStore: lbs,
id: uuid,
startedAt: startedAt,
digester: digest.Canonical.Digester(),
fileWriter: fw,
driver: lbs.driver,
path: path,
resumableDigestEnabled: lbs.resumableDigestEnabled,
}
return bw, nil
}
// linkBlob links a valid, written blob into the registry under the named
// repository for the upload controller.
func (lbs *linkedBlobStore) linkBlob(ctx context.Context, canonical v1.Descriptor, aliases ...digest.Digest) error {
dgsts := append([]digest.Digest{canonical.Digest}, aliases...)
// TODO(stevvooe): Need to write out mediatype for only canonical hash
// since we don't care about the aliases. They are generally unused except
// for tarsum but those versions don't care about mediatype.
// Don't make duplicate links.
seenDigests := make(map[digest.Digest]struct{}, len(dgsts))
for _, dgst := range dgsts {
if _, seen := seenDigests[dgst]; seen {
continue
}
seenDigests[dgst] = struct{}{}
blobLinkPath, err := lbs.linkPath(lbs.repository.Named().Name(), dgst)
if err != nil {
return err
}
if err := lbs.blobStore.link(ctx, blobLinkPath, canonical.Digest); err != nil {
return err
}
}
return nil
}
type linkedBlobStatter struct {
*blobStore
repository distribution.Repository
// linkPath allows one to control the repository blob link set to which
// the blob store dispatches. This is required because manifest and layer
// blobs have not yet been fully merged. At some point, this functionality
// should be removed an the blob links folder should be merged.
linkPath linkPathFunc
}
var _ distribution.BlobDescriptorService = &linkedBlobStatter{}
func (lbs *linkedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (v1.Descriptor, error) {
blobLinkPath, err := lbs.linkPath(lbs.repository.Named().Name(), dgst)
if err != nil {
return v1.Descriptor{}, err
}
target, err := lbs.blobStore.readlink(ctx, blobLinkPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
return v1.Descriptor{}, distribution.ErrBlobUnknown
default:
return v1.Descriptor{}, err
}
}
if target != dgst {
// Track when we are doing cross-digest domain lookups. ie, sha512 to sha256.
dcontext.GetLogger(ctx).Warnf("looking up blob with canonical target: %v -> %v", dgst, target)
}
// TODO(stevvooe): Look up repository local mediatype and replace that on
// the returned descriptor.
return lbs.blobStore.statter.Stat(ctx, target)
}
func (lbs *linkedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) (err error) {
blobLinkPath, err := lbs.linkPath(lbs.repository.Named().Name(), dgst)
if err != nil {
return err
}
return lbs.blobStore.driver.Delete(ctx, blobLinkPath)
}
func (lbs *linkedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc v1.Descriptor) error {
// The canonical descriptor for a blob is set at the commit phase of upload
return nil
}
// blobLinkPath provides the path to the blob link, also known as layers.
func blobLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(layerLinkPathSpec{name: name, digest: dgst})
}
// manifestRevisionLinkPath provides the path to the manifest revision link.
func manifestRevisionLinkPath(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestRevisionLinkPathSpec{name: name, revision: dgst})
}
package storage
import (
"context"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/manifestlist"
"github.com/distribution/distribution/v3/manifest/ocischema"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// manifestListHandler is a ManifestHandler that covers schema2 manifest lists.
type manifestListHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore
ctx context.Context
validateImageIndexes validateImageIndexes
}
var _ ManifestHandler = &manifestListHandler{}
func (ms *manifestListHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Unmarshal")
m := &manifestlist.DeserializedManifestList{}
if err := m.UnmarshalJSON(content); err != nil {
return nil, err
}
return m, nil
}
func (ms *manifestListHandler) Put(ctx context.Context, manifestList distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*manifestListHandler).Put")
var schemaVersion int
switch m := manifestList.(type) {
case *manifestlist.DeserializedManifestList:
schemaVersion = m.SchemaVersion
case *ocischema.DeserializedImageIndex:
schemaVersion = m.SchemaVersion
default:
return "", fmt.Errorf("wrong type put to manifestListHandler: %T", manifestList)
}
const expectedSchemaVersion = 2
if schemaVersion != expectedSchemaVersion {
return "", fmt.Errorf("unrecognized manifest list schema version %d, expected %d", schemaVersion, expectedSchemaVersion)
}
if err := ms.verifyManifest(ms.ctx, manifestList, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := manifestList.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to
// store valid content, leaving trust policies of that content up to
// consumers.
func (ms *manifestListHandler) verifyManifest(ctx context.Context, mnfst distribution.Manifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
// Check if we should be validating the existence of any child images in images indexes
if ms.validateImageIndexes.imagesExist && !skipDependencyVerification {
// Get the manifest service we can use to check for the existence of child images
manifestService, err := ms.repository.Manifests(ctx)
if err != nil {
return err
}
for _, manifestDescriptor := range mnfst.References() {
if ms.platformMustExist(manifestDescriptor) {
exists, err := manifestService.Exists(ctx, manifestDescriptor.Digest)
if err != nil && err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
if err != nil || !exists {
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: manifestDescriptor.Digest})
}
}
}
}
if len(errs) != 0 {
return errs
}
return nil
}
// platformMustExist checks if a descriptor within an index should be validated as existing before accepting the manifest into the registry.
func (ms *manifestListHandler) platformMustExist(descriptor v1.Descriptor) bool {
// If there are no image platforms configured to validate, we must check the existence of all child images.
if len(ms.validateImageIndexes.imagePlatforms) == 0 {
return true
}
imagePlatform := descriptor.Platform
// If the platform matches a platform that is configured to validate, we must check the existence.
for _, platform := range ms.validateImageIndexes.imagePlatforms {
if imagePlatform.Architecture == platform.architecture &&
imagePlatform.OS == platform.os {
return true
}
}
// If the platform doesn't match a platform configured to validate, we don't need to check the existence.
return false
}
package storage
import (
"context"
"encoding/json"
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/manifestlist"
"github.com/distribution/distribution/v3/manifest/ocischema"
"github.com/distribution/distribution/v3/manifest/schema2"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// A ManifestHandler gets and puts manifests of a particular type.
type ManifestHandler interface {
// Unmarshal unmarshals the manifest from a byte slice.
Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error)
// Put creates or updates the given manifest returning the manifest digest.
Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error)
}
// SkipLayerVerification allows a manifest to be Put before its
// layers are on the filesystem
func SkipLayerVerification() distribution.ManifestServiceOption {
return skipLayerOption{}
}
type skipLayerOption struct{}
func (o skipLayerOption) Apply(m distribution.ManifestService) error {
if ms, ok := m.(*manifestStore); ok {
ms.skipDependencyVerification = true
return nil
}
return fmt.Errorf("skip layer verification only valid for manifestStore")
}
type manifestStore struct {
repository *repository
blobStore *linkedBlobStore
ctx context.Context
skipDependencyVerification bool
schema2Handler ManifestHandler
manifestListHandler ManifestHandler
ocischemaHandler ManifestHandler
ocischemaIndexHandler ManifestHandler
}
var _ distribution.ManifestService = &manifestStore{}
func (ms *manifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Exists")
_, err := ms.blobStore.Stat(ms.ctx, dgst)
if err != nil {
if err == distribution.ErrBlobUnknown {
return false, nil
}
return false, err
}
return true, nil
}
func (ms *manifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Get")
// TODO(stevvooe): Need to check descriptor from above to ensure that the
// mediatype is as we expect for the manifest store.
content, err := ms.blobStore.Get(ctx, dgst)
if err != nil {
if err == distribution.ErrBlobUnknown {
return nil, distribution.ErrManifestUnknownRevision{
Name: ms.repository.Named().Name(),
Revision: dgst,
}
}
return nil, err
}
// versioned is a minimal representation of a manifest with version and mediatype.
var versioned struct {
specs.Versioned
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}
if err = json.Unmarshal(content, &versioned); err != nil {
return nil, err
}
switch versioned.SchemaVersion {
case 2:
// This can be an image manifest or a manifest list
switch versioned.MediaType {
case schema2.MediaTypeManifest:
return ms.schema2Handler.Unmarshal(ctx, dgst, content)
case v1.MediaTypeImageManifest:
return ms.ocischemaHandler.Unmarshal(ctx, dgst, content)
case manifestlist.MediaTypeManifestList:
return ms.manifestListHandler.Unmarshal(ctx, dgst, content)
case v1.MediaTypeImageIndex:
return ms.ocischemaIndexHandler.Unmarshal(ctx, dgst, content)
case "":
// OCI image or image index - no media type in the content
// First see if it looks like an image index
res, err := ms.ocischemaIndexHandler.Unmarshal(ctx, dgst, content)
resIndex := res.(*ocischema.DeserializedImageIndex)
if err == nil && resIndex.Manifests != nil {
return resIndex, nil
}
// Otherwise, assume it must be an image manifest
return ms.ocischemaHandler.Unmarshal(ctx, dgst, content)
default:
return nil, distribution.ErrManifestVerification{fmt.Errorf("unrecognized manifest content type %s", versioned.MediaType)}
}
}
return nil, fmt.Errorf("unrecognized manifest schema version %d", versioned.SchemaVersion)
}
func (ms *manifestStore) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Put")
switch manifest.(type) {
case *schema2.DeserializedManifest:
return ms.schema2Handler.Put(ctx, manifest, ms.skipDependencyVerification)
case *ocischema.DeserializedManifest:
return ms.ocischemaHandler.Put(ctx, manifest, ms.skipDependencyVerification)
case *manifestlist.DeserializedManifestList:
return ms.manifestListHandler.Put(ctx, manifest, ms.skipDependencyVerification)
case *ocischema.DeserializedImageIndex:
return ms.ocischemaIndexHandler.Put(ctx, manifest, ms.skipDependencyVerification)
}
return "", fmt.Errorf("unrecognized manifest type %T", manifest)
}
// Delete removes the revision of the specified manifest.
func (ms *manifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
dcontext.GetLogger(ms.ctx).Debug("(*manifestStore).Delete")
return ms.blobStore.Delete(ctx, dgst)
}
func (ms *manifestStore) Enumerate(ctx context.Context, ingester func(digest.Digest) error) error {
err := ms.blobStore.Enumerate(ctx, func(dgst digest.Digest) error {
err := ingester(dgst)
if err != nil {
return err
}
return nil
})
return err
}
package storage
import (
"context"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/ocischema"
"github.com/opencontainers/go-digest"
)
// ocischemaIndexHandler is a ManifestHandler that covers the OCI Image Index.
type ocischemaIndexHandler struct {
*manifestListHandler
}
var _ ManifestHandler = &manifestListHandler{}
func (ms *ocischemaIndexHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*ociIndexHandler).Unmarshal")
m := &ocischema.DeserializedImageIndex{}
if err := m.UnmarshalJSON(content); err != nil {
return nil, err
}
return m, nil
}
package storage
import (
"context"
"fmt"
"net/url"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/ocischema"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// ocischemaManifestHandler is a ManifestHandler that covers ocischema manifests.
type ocischemaManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore
ctx context.Context
manifestURLs manifestURLs
}
var _ ManifestHandler = &ocischemaManifestHandler{}
func (ms *ocischemaManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Unmarshal")
m := &ocischema.DeserializedManifest{}
if err := m.UnmarshalJSON(content); err != nil {
return nil, err
}
return m, nil
}
func (ms *ocischemaManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*ocischemaManifestHandler).Put")
m, ok := manifest.(*ocischema.DeserializedManifest)
if !ok {
return "", fmt.Errorf("non-ocischema manifest put to ocischemaManifestHandler: %T", manifest)
}
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := m.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to store
// valid content, leaving trust policies of that content up to consumers.
func (ms *ocischemaManifestHandler) verifyManifest(ctx context.Context, mnfst ocischema.DeserializedManifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if mnfst.Manifest.SchemaVersion != 2 {
return fmt.Errorf("unrecognized manifest schema version %d", mnfst.Manifest.SchemaVersion)
}
if skipDependencyVerification {
return nil
}
manifestService, err := ms.repository.Manifests(ctx)
if err != nil {
return err
}
blobsService := ms.repository.Blobs(ctx)
for _, descriptor := range mnfst.References() {
err := descriptor.Digest.Validate()
if err != nil {
errs = append(errs, err, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})
continue
}
switch descriptor.MediaType {
case v1.MediaTypeImageLayer, v1.MediaTypeImageLayerGzip, v1.MediaTypeImageLayerNonDistributable, v1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // ignore A1019: v1.MediaTypeImageLayerNonDistributable is deprecated: Non-distributable layers are deprecated, and not recommended for future use.
allow := ms.manifestURLs.allow
deny := ms.manifestURLs.deny
for _, u := range descriptor.URLs {
var pu *url.URL
pu, err = url.Parse(u)
if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) {
err = errInvalidURL
break
}
}
if err == nil {
// check the presence if it is normal layer or
// there is no urls for non-distributable
if len(descriptor.URLs) == 0 ||
(descriptor.MediaType == v1.MediaTypeImageLayer || descriptor.MediaType == v1.MediaTypeImageLayerGzip) {
_, err = blobsService.Stat(ctx, descriptor.Digest)
}
}
case v1.MediaTypeImageManifest:
var exists bool
exists, err = manifestService.Exists(ctx, descriptor.Digest)
if err != nil || !exists {
err = distribution.ErrBlobUnknown // just coerce to unknown.
}
if err != nil {
dcontext.GetLogger(ms.ctx).WithError(err).Debugf("failed to ensure exists of %v in manifest service", descriptor.Digest)
}
fallthrough // double check the blob store.
default:
// check the presence
_, err = blobsService.Stat(ctx, descriptor.Digest)
}
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})
}
}
if len(errs) != 0 {
return errs
}
return nil
}
package storage
import (
"fmt"
"path"
"strings"
"github.com/opencontainers/go-digest"
)
const (
storagePathVersion = "v2" // fixed storage layout version
storagePathRoot = "/docker/registry/" // all driver paths have a prefix
// TODO(stevvooe): Get rid of the "storagePathRoot". Initially, we though
// storage path root would configurable for all drivers through this
// package. In reality, we've found it simpler to do this on a per driver
// basis.
)
// pathFor maps paths based on "object names" and their ids. The "object
// names" mapped by are internal to the storage system.
//
// The path layout in the storage backend is roughly as follows:
//
// <root>/v2
// ├── blobs
// │ └── <algorithm>
// │ └── <split directory content addressable storage>
// └── repositories
// └── <name>
// ├── _layers
// │ └── <layer links to blob store>
// ├── _manifests
// │ ├── revisions
// │ │ └── <manifest digest path>
// │ │ └── link
// │ └── tags
// │ └── <tag>
// │ ├── current
// │ │ └── link
// │ └── index
// │ └── <algorithm>
// │ └── <hex digest>
// │ └── link
// └── _uploads
// └── <id>
// ├── data
// ├── hashstates
// │ └── <algorithm>
// │ └── <offset>
// └── startedat
//
// The storage backend layout is broken up into a content-addressable blob
// store and repositories. The content-addressable blob store holds most data
// throughout the backend, keyed by algorithm and digests of the underlying
// content. Access to the blob store is controlled through links from the
// repository to blobstore.
//
// A repository is made up of layers, manifests and tags. The layers component
// is just a directory of layers which are "linked" into a repository. A layer
// can only be accessed through a qualified repository name if it is linked in
// the repository. Uploads of layers are managed in the uploads directory,
// which is key by upload id. When all data for an upload is received, the
// data is moved into the blob store and the upload directory is deleted.
// Abandoned uploads can be garbage collected by reading the startedat file
// and removing uploads that have been active for longer than a certain time.
//
// The third component of the repository directory is the manifests store,
// which is made up of a revision store and tag store. Manifests are stored in
// the blob store and linked into the revision store.
// While the registry can save all revisions of a manifest, no relationship is
// implied as to the ordering of changes to a manifest. The tag store provides
// support for name, tag lookups of manifests, using "current/link" under a
// named tag directory. An index is maintained to support deletions of all
// revisions of a given manifest tag.
//
// We cover the path formats implemented by this path mapper below.
//
// Repositories:
//
// repositoriesRootPathSpec: <root>/v2/repositories
//
// Manifests:
//
// manifestsPathSpec: <root>/v2/repositories/<name>/_manifests
// manifestRevisionsPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/
// manifestRevisionPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/
// manifestRevisionLinkPathSpec: <root>/v2/repositories/<name>/_manifests/revisions/<algorithm>/<hex digest>/link
//
// Tags:
//
// manifestTagsPathSpec: <root>/v2/repositories/<name>/_manifests/tags/
// manifestTagPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/
// manifestTagCurrentPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/current/link
// manifestTagIndexPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/
// manifestTagIndexEntryPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/
// manifestTagIndexEntryLinkPathSpec: <root>/v2/repositories/<name>/_manifests/tags/<tag>/index/<algorithm>/<hex digest>/link
//
// Blobs:
//
// layerLinkPathSpec: <root>/v2/repositories/<name>/_layers/<algorithm>/<hex digest>/link
// layersPathSpec: <root>/v2/repositories/<name>/_layers
//
// Uploads:
//
// uploadDataPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/data
// uploadStartedAtPathSpec: <root>/v2/repositories/<name>/_uploads/<id>/startedat
// uploadHashStatePathSpec: <root>/v2/repositories/<name>/_uploads/<id>/hashstates/<algorithm>/<offset>
//
// Blob Store:
//
// blobsPathSpec: <root>/v2/blobs/
// blobPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>
// blobDataPathSpec: <root>/v2/blobs/<algorithm>/<first two hex bytes of digest>/<hex digest>/data
//
// For more information on the semantic meaning of each path and their
// contents, please see the path spec documentation.
func pathFor(spec pathSpec) (string, error) {
// Switch on the path object type and return the appropriate path. At
// first glance, one may wonder why we don't use an interface to
// accomplish this. By keep the formatting separate from the pathSpec, we
// keep separate the path generation componentized. These specs could be
// passed to a completely different mapper implementation and generate a
// different set of paths.
//
// For example, imagine migrating from one backend to the other: one could
// build a filesystem walker that converts a string path in one version,
// to an intermediate path object, than can be consumed and mapped by the
// other version.
rootPrefix := []string{storagePathRoot, storagePathVersion}
repoPrefix := append(rootPrefix, "repositories")
switch v := spec.(type) {
case manifestsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests")...), nil
case manifestRevisionsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "revisions")...), nil
case manifestRevisionPathSpec:
components, err := digestPathComponents(v.revision, false)
if err != nil {
return "", err
}
return path.Join(append(append(repoPrefix, v.name, "_manifests", "revisions"), components...)...), nil
case manifestRevisionLinkPathSpec:
root, err := pathFor(manifestRevisionPathSpec(v))
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestTagsPathSpec:
return path.Join(append(repoPrefix, v.name, "_manifests", "tags")...), nil
case manifestTagPathSpec:
root, err := pathFor(manifestTagsPathSpec{
name: v.name,
})
if err != nil {
return "", err
}
return path.Join(root, v.tag), nil
case manifestTagCurrentPathSpec:
root, err := pathFor(manifestTagPathSpec(v))
if err != nil {
return "", err
}
return path.Join(root, "current", "link"), nil
case manifestTagIndexPathSpec:
root, err := pathFor(manifestTagPathSpec(v))
if err != nil {
return "", err
}
return path.Join(root, "index"), nil
case manifestTagIndexEntryLinkPathSpec:
root, err := pathFor(manifestTagIndexEntryPathSpec(v))
if err != nil {
return "", err
}
return path.Join(root, "link"), nil
case manifestTagIndexEntryPathSpec:
root, err := pathFor(manifestTagIndexPathSpec{
name: v.name,
tag: v.tag,
})
if err != nil {
return "", err
}
components, err := digestPathComponents(v.revision, false)
if err != nil {
return "", err
}
return path.Join(root, path.Join(components...)), nil
case layerLinkPathSpec:
components, err := digestPathComponents(v.digest, false)
if err != nil {
return "", err
}
// TODO(stevvooe): Right now, all blobs are linked under "_layers". If
// we have future migrations, we may want to rename this to "_blobs".
// A migration strategy would simply leave existing items in place and
// write the new paths, commit a file then delete the old files.
blobLinkPathComponents := append(repoPrefix, v.name, "_layers")
return path.Join(path.Join(append(blobLinkPathComponents, components...)...), "link"), nil
case layersPathSpec:
return path.Join(append(repoPrefix, v.name, "_layers")...), nil
case blobsPathSpec:
blobsPathPrefix := append(rootPrefix, "blobs")
return path.Join(blobsPathPrefix...), nil
case blobPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
blobPathPrefix := append(rootPrefix, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case blobDataPathSpec:
components, err := digestPathComponents(v.digest, true)
if err != nil {
return "", err
}
components = append(components, "data")
blobPathPrefix := append(rootPrefix, "blobs")
return path.Join(append(blobPathPrefix, components...)...), nil
case uploadDataPathSpec:
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "data")...), nil
case uploadStartedAtPathSpec:
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "startedat")...), nil
case uploadHashStatePathSpec:
offset := fmt.Sprintf("%d", v.offset)
if v.list {
offset = "" // Limit to the prefix for listing offsets.
}
return path.Join(append(repoPrefix, v.name, "_uploads", v.id, "hashstates", string(v.alg), offset)...), nil
case repositoriesRootPathSpec:
return path.Join(repoPrefix...), nil
default:
// TODO(sday): This is an internal error. Ensure it doesn't escape (panic?).
return "", fmt.Errorf("unknown path spec: %#v", v)
}
}
// pathSpec is a type to mark structs as path specs. There is no
// implementation because we'd like to keep the specs and the mappers
// decoupled.
type pathSpec interface {
pathSpec()
}
// manifestPathSpec describes the directory path for a manifest.
type manifestsPathSpec struct {
name string
}
func (manifestsPathSpec) pathSpec() {}
// manifestRevisionsPathSpec describes the directory path for
// a manifest revision.
type manifestRevisionsPathSpec struct {
name string
}
func (manifestRevisionsPathSpec) pathSpec() {}
// manifestRevisionPathSpec describes the components of the directory path for
// a manifest revision.
type manifestRevisionPathSpec struct {
name string
revision digest.Digest
}
func (manifestRevisionPathSpec) pathSpec() {}
// manifestRevisionLinkPathSpec describes the path components required to look
// up the data link for a revision of a manifest. If this file is not present,
// the manifest blob is not available in the given repo. The contents of this
// file should just be the digest.
type manifestRevisionLinkPathSpec struct {
name string
revision digest.Digest
}
func (manifestRevisionLinkPathSpec) pathSpec() {}
// manifestTagsPathSpec describes the path elements required to point to the
// manifest tags directory.
type manifestTagsPathSpec struct {
name string
}
func (manifestTagsPathSpec) pathSpec() {}
// manifestTagPathSpec describes the path elements required to point to the
// manifest tag links files under a repository. These contain a blob id that
// can be used to look up the data and signatures.
type manifestTagPathSpec struct {
name string
tag string
}
func (manifestTagPathSpec) pathSpec() {}
// manifestTagCurrentPathSpec describes the link to the current revision for a
// given tag.
type manifestTagCurrentPathSpec struct {
name string
tag string
}
func (manifestTagCurrentPathSpec) pathSpec() {}
// manifestTagCurrentPathSpec describes the link to the index of revisions
// with the given tag.
type manifestTagIndexPathSpec struct {
name string
tag string
}
func (manifestTagIndexPathSpec) pathSpec() {}
// manifestTagIndexEntryPathSpec contains the entries of the index by revision.
type manifestTagIndexEntryPathSpec struct {
name string
tag string
revision digest.Digest
}
func (manifestTagIndexEntryPathSpec) pathSpec() {}
// manifestTagIndexEntryLinkPathSpec describes the link to a revisions of a
// manifest with given tag within the index.
type manifestTagIndexEntryLinkPathSpec struct {
name string
tag string
revision digest.Digest
}
func (manifestTagIndexEntryLinkPathSpec) pathSpec() {}
// layersPathSpec contains the path for the layers inside a repo
type layersPathSpec struct {
name string
}
func (layersPathSpec) pathSpec() {}
// layerLinkPathSpec specifies a path for a blob link, which is a file with a
// blob id. The blob link will contain a content addressable blob id reference
// into the blob store. The format of the contents is as follows:
//
// <algorithm>:<hex digest of layer data>
//
// The following example of the file contents is more illustrative:
//
// sha256:96443a84ce518ac22acb2e985eda402b58ac19ce6f91980bde63726a79d80b36
//
// This indicates that there is a blob with the id/digest, calculated via
// sha256 that can be fetched from the blob store.
type layerLinkPathSpec struct {
name string
digest digest.Digest
}
func (layerLinkPathSpec) pathSpec() {}
// blobAlgorithmReplacer does some very simple path sanitization for user
// input. Paths should be "safe" before getting this far due to strict digest
// requirements but we can add further path conversion here, if needed.
var blobAlgorithmReplacer = strings.NewReplacer(
"+", "/",
".", "/",
";", "/",
)
// blobsPathSpec contains the path for the blobs directory
type blobsPathSpec struct{}
func (blobsPathSpec) pathSpec() {}
// blobPathSpec contains the path for the registry global blob store.
type blobPathSpec struct {
digest digest.Digest
}
func (blobPathSpec) pathSpec() {}
// blobDataPathSpec contains the path for the registry global blob store. For
// now, this contains layer data, exclusively.
type blobDataPathSpec struct {
digest digest.Digest
}
func (blobDataPathSpec) pathSpec() {}
// uploadDataPathSpec defines the path parameters of the data file for
// uploads.
type uploadDataPathSpec struct {
name string
id string
}
func (uploadDataPathSpec) pathSpec() {}
// uploadStartedAtPathSpec defines the path parameters for the file that stores the
// start time of an uploads. If it is missing, the upload is considered
// unknown. Admittedly, the presence of this file is an ugly hack to make sure
// we have a way to cleanup old or stalled uploads that doesn't rely on driver
// FileInfo behavior. If we come up with a more clever way to do this, we
// should remove this file immediately and rely on the startetAt field from
// the client to enforce time out policies.
type uploadStartedAtPathSpec struct {
name string
id string
}
func (uploadStartedAtPathSpec) pathSpec() {}
// uploadHashStatePathSpec defines the path parameters for the file that stores
// the hash function state of an upload at a specific byte offset. If `list` is
// set, then the path mapper will generate a list prefix for all hash state
// offsets for the upload identified by the name, id, and alg.
type uploadHashStatePathSpec struct {
name string
id string
alg digest.Algorithm
offset int64
list bool
}
func (uploadHashStatePathSpec) pathSpec() {}
// repositoriesRootPathSpec returns the root of repositories
type repositoriesRootPathSpec struct{}
func (repositoriesRootPathSpec) pathSpec() {}
// digestPathComponents provides a consistent path breakdown for a given
// digest. For a generic digest, it will be as follows:
//
// <algorithm>/<hex digest>
//
// If multilevel is true, the first two bytes of the digest will separate
// groups of digest folder. It will be as follows:
//
// <algorithm>/<first two bytes of digest>/<full digest>
func digestPathComponents(dgst digest.Digest, multilevel bool) ([]string, error) {
if err := dgst.Validate(); err != nil {
return nil, err
}
algorithm := blobAlgorithmReplacer.Replace(string(dgst.Algorithm()))
hex := dgst.Encoded()
prefix := []string{algorithm}
var suffix []string
if multilevel {
suffix = append(suffix, hex[:2])
}
suffix = append(suffix, hex)
return append(prefix, suffix...), nil
}
// Reconstructs a digest from a path
func digestFromPath(digestPath string) (digest.Digest, error) {
digestPath = strings.TrimSuffix(digestPath, "/data")
dir, hex := path.Split(digestPath)
dir = path.Dir(dir)
dir, next := path.Split(dir)
// next is either the algorithm OR the first two characters in the hex string
var algo string
if next == hex[:2] {
algo = path.Base(dir)
} else {
algo = next
}
dgst := digest.NewDigestFromEncoded(digest.Algorithm(algo), hex)
return dgst, dgst.Validate()
}
package storage
import (
"context"
"path"
"strings"
"time"
storageDriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
)
// uploadData stored the location of temporary files created during a layer upload
// along with the date the upload was started
type uploadData struct {
containingDir string
startedAt time.Time
}
func newUploadData() uploadData {
return uploadData{
containingDir: "",
// default to far in future to protect against missing startedat
startedAt: time.Now().Add(10000 * time.Hour),
}
}
// PurgeUploads deletes files from the upload directory
// created before olderThan. The list of files deleted and errors
// encountered are returned
func PurgeUploads(ctx context.Context, driver storageDriver.StorageDriver, olderThan time.Time, actuallyDelete bool) ([]string, []error) {
logrus.Infof("PurgeUploads starting: olderThan=%s, actuallyDelete=%t", olderThan, actuallyDelete)
uploadData, errors := getOutstandingUploads(ctx, driver)
var deleted []string
for _, uploadData := range uploadData {
if uploadData.startedAt.Before(olderThan) {
var err error
logrus.Infof("Upload files in %s have older date (%s) than purge date (%s). Removing upload directory.",
uploadData.containingDir, uploadData.startedAt, olderThan)
if actuallyDelete {
err = driver.Delete(ctx, uploadData.containingDir)
}
if err == nil {
deleted = append(deleted, uploadData.containingDir)
} else {
errors = append(errors, err)
}
}
}
logrus.Infof("Purge uploads finished. Num deleted=%d, num errors=%d", len(deleted), len(errors))
return deleted, errors
}
// getOutstandingUploads walks the upload directory, collecting files
// which could be eligible for deletion. The only reliable way to
// classify the age of a file is with the date stored in the startedAt
// file, so gather files by UUID with a date from startedAt.
func getOutstandingUploads(ctx context.Context, driver storageDriver.StorageDriver) (map[string]uploadData, []error) {
var errors []error
uploads := make(map[string]uploadData)
inUploadDir := false
root, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return uploads, append(errors, err)
}
err = driver.Walk(ctx, root, func(fileInfo storageDriver.FileInfo) error {
filePath := fileInfo.Path()
_, file := path.Split(filePath)
if file[0] == '_' {
// Reserved directory
inUploadDir = (file == "_uploads")
if fileInfo.IsDir() && !inUploadDir {
return storageDriver.ErrSkipDir
}
}
uuid, isContainingDir := uuidFromPath(filePath)
if uuid == "" {
// Cannot reliably delete
return nil
}
ud, ok := uploads[uuid]
if !ok {
ud = newUploadData()
}
if isContainingDir {
ud.containingDir = filePath
}
if file == "startedat" {
if t, err := readStartedAtFile(ctx, driver, filePath); err == nil {
ud.startedAt = t
} else {
errors = pushError(errors, filePath, err)
}
}
uploads[uuid] = ud
return nil
})
if err != nil {
errors = pushError(errors, root, err)
}
return uploads, errors
}
// uuidFromPath extracts the upload UUID from a given path
// If the UUID is the last path component, this is the containing
// directory for all upload files
func uuidFromPath(path string) (string, bool) {
components := strings.Split(path, "/")
for i := len(components) - 1; i >= 0; i-- {
if u, err := uuid.Parse(components[i]); err == nil {
return u.String(), i == len(components)-1
}
}
return "", false
}
// readStartedAtFile reads the date from an upload's startedAtFile
func readStartedAtFile(ctx context.Context, driver storageDriver.StorageDriver, path string) (time.Time, error) {
startedAtBytes, err := driver.GetContent(ctx, path)
if err != nil {
return time.Now(), err
}
startedAt, err := time.Parse(time.RFC3339, string(startedAtBytes))
if err != nil {
return time.Now(), err
}
return startedAt, nil
}
package storage
import (
"context"
"regexp"
"runtime"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/registry/storage/cache"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/distribution/reference"
)
var (
DefaultConcurrencyLimit = runtime.GOMAXPROCS(0)
)
// registry is the top-level implementation of Registry for use in the storage
// package. All instances should descend from this object.
type registry struct {
blobStore *blobStore
blobServer *blobServer
statter *blobStatter // global statter service.
blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider
deleteEnabled bool
tagLookupConcurrencyLimit int
resumableDigestEnabled bool
blobDescriptorServiceFactory distribution.BlobDescriptorServiceFactory
driver storagedriver.StorageDriver
// Validation
manifestURLs manifestURLs
validateImageIndexes validateImageIndexes
}
// manifestURLs holds regular expressions for controlling manifest URL whitelisting
type manifestURLs struct {
allow *regexp.Regexp
deny *regexp.Regexp
}
// validateImageIndexImages holds configuration for validation of image indexes
type validateImageIndexes struct {
// exist can be used to disable checking that platform images exist entirely. Default true.
imagesExist bool
// platforms can be used to only validate the existence of images for a set of platforms. The empty array means validate all platforms.
imagePlatforms []platform
}
// platform represents a platform to validate exists in the
type platform struct {
architecture string
os string
}
// RegistryOption is the type used for functional options for NewRegistry.
type RegistryOption func(*registry) error
// EnableRedirect is a functional option for NewRegistry. It causes the backend
// blob server to attempt using (StorageDriver).RedirectURL to serve all blobs.
func EnableRedirect(registry *registry) error {
registry.blobServer.redirect = true
return nil
}
func TagLookupConcurrencyLimit(concurrencyLimit int) RegistryOption {
return func(registry *registry) error {
registry.tagLookupConcurrencyLimit = concurrencyLimit
return nil
}
}
// EnableDelete is a functional option for NewRegistry. It enables deletion on
// the registry.
func EnableDelete(registry *registry) error {
registry.deleteEnabled = true
return nil
}
// DisableDigestResumption is a functional option for NewRegistry. It should be
// used if the registry is acting as a caching proxy.
func DisableDigestResumption(registry *registry) error {
registry.resumableDigestEnabled = false
return nil
}
// ManifestURLsAllowRegexp is a functional option for NewRegistry.
func ManifestURLsAllowRegexp(r *regexp.Regexp) RegistryOption {
return func(registry *registry) error {
registry.manifestURLs.allow = r
return nil
}
}
// ManifestURLsDenyRegexp is a functional option for NewRegistry.
func ManifestURLsDenyRegexp(r *regexp.Regexp) RegistryOption {
return func(registry *registry) error {
registry.manifestURLs.deny = r
return nil
}
}
// EnableValidateImageIndexImagesExist is a functional option for NewRegistry. It enables
// validation that references exist before an image index is accepted.
func EnableValidateImageIndexImagesExist(registry *registry) error {
registry.validateImageIndexes.imagesExist = true
return nil
}
// AddValidateImageIndexImagesExistPlatform returns a functional option for NewRegistry.
// It adds a platform to check for existence before an image index is accepted.
func AddValidateImageIndexImagesExistPlatform(architecture string, os string) RegistryOption {
return func(registry *registry) error {
registry.validateImageIndexes.imagePlatforms = append(
registry.validateImageIndexes.imagePlatforms,
platform{
architecture: architecture,
os: os,
},
)
return nil
}
}
// BlobDescriptorServiceFactory returns a functional option for NewRegistry. It sets the
// factory to create BlobDescriptorServiceFactory middleware.
func BlobDescriptorServiceFactory(factory distribution.BlobDescriptorServiceFactory) RegistryOption {
return func(registry *registry) error {
registry.blobDescriptorServiceFactory = factory
return nil
}
}
// BlobDescriptorCacheProvider returns a functional option for
// NewRegistry. It creates a cached blob statter for use by the
// registry.
func BlobDescriptorCacheProvider(blobDescriptorCacheProvider cache.BlobDescriptorCacheProvider) RegistryOption {
// TODO(aaronl): The duplication of statter across several objects is
// ugly, and prevents us from using interface types in the registry
// struct. Ideally, blobStore and blobServer should be lazily
// initialized, and use the current value of
// blobDescriptorCacheProvider.
return func(registry *registry) error {
if blobDescriptorCacheProvider != nil {
statter := cache.NewCachedBlobStatter(blobDescriptorCacheProvider, registry.statter)
registry.blobStore.statter = statter
registry.blobServer.statter = statter
registry.blobDescriptorCacheProvider = blobDescriptorCacheProvider
}
return nil
}
}
// NewRegistry creates a new registry instance from the provided driver. The
// resulting registry may be shared by multiple goroutines but is cheap to
// allocate. If the Redirect option is specified, the backend blob server will
// attempt to use (StorageDriver).RedirectURL to serve all blobs.
func NewRegistry(ctx context.Context, driver storagedriver.StorageDriver, options ...RegistryOption) (distribution.Namespace, error) {
// create global statter
statter := &blobStatter{
driver: driver,
}
bs := &blobStore{
driver: driver,
statter: statter,
}
registry := ®istry{
blobStore: bs,
blobServer: &blobServer{
driver: driver,
statter: statter,
pathFn: bs.path,
},
statter: statter,
resumableDigestEnabled: true,
driver: driver,
}
for _, option := range options {
if err := option(registry); err != nil {
return nil, err
}
}
return registry, nil
}
// Scope returns the namespace scope for a registry. The registry
// will only serve repositories contained within this scope.
func (reg *registry) Scope() distribution.Scope {
return distribution.GlobalScope
}
// Repository returns an instance of the repository tied to the registry.
// Instances should not be shared between goroutines but are cheap to
// allocate. In general, they should be request scoped.
func (reg *registry) Repository(ctx context.Context, canonicalName reference.Named) (distribution.Repository, error) {
var descriptorCache distribution.BlobDescriptorService
if reg.blobDescriptorCacheProvider != nil {
var err error
descriptorCache, err = reg.blobDescriptorCacheProvider.RepositoryScoped(canonicalName.Name())
if err != nil {
return nil, err
}
}
return &repository{
ctx: ctx,
registry: reg,
name: canonicalName,
descriptorCache: descriptorCache,
}, nil
}
func (reg *registry) Blobs() distribution.BlobEnumerator {
return reg.blobStore
}
func (reg *registry) BlobStatter() distribution.BlobStatter {
return reg.statter
}
// repository provides name-scoped access to various services.
type repository struct {
*registry
ctx context.Context
name reference.Named
descriptorCache distribution.BlobDescriptorService
}
// Name returns the name of the repository.
func (repo *repository) Named() reference.Named {
return repo.name
}
func (repo *repository) Tags(ctx context.Context) distribution.TagService {
limit := DefaultConcurrencyLimit
if repo.tagLookupConcurrencyLimit > 0 {
limit = repo.tagLookupConcurrencyLimit
}
tags := &tagStore{
repository: repo,
blobStore: repo.registry.blobStore,
concurrencyLimit: limit,
}
return tags
}
// Manifests returns an instance of ManifestService. Instantiation is cheap and
// may be context sensitive in the future. The instance should be used similar
// to a request local.
func (repo *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
manifestDirectoryPathSpec := manifestRevisionsPathSpec{name: repo.name.Name()}
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
blobStore: repo.blobStore,
repository: repo,
linkPath: manifestRevisionLinkPath,
}
if repo.descriptorCache != nil {
statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
}
if repo.registry.blobDescriptorServiceFactory != nil {
statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
}
blobStore := &linkedBlobStore{
ctx: ctx,
blobStore: repo.blobStore,
repository: repo,
deleteEnabled: repo.registry.deleteEnabled,
blobAccessController: statter,
// TODO(stevvooe): linkPath limits this blob store to only
// manifests. This instance cannot be used for blob checks.
linkPath: manifestRevisionLinkPath,
linkDirectoryPathSpec: manifestDirectoryPathSpec,
}
manifestListHandler := &manifestListHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
validateImageIndexes: repo.validateImageIndexes,
}
ms := &manifestStore{
ctx: ctx,
repository: repo,
blobStore: blobStore,
schema2Handler: &schema2ManifestHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
manifestURLs: repo.registry.manifestURLs,
},
manifestListHandler: manifestListHandler,
ocischemaHandler: &ocischemaManifestHandler{
ctx: ctx,
repository: repo,
blobStore: blobStore,
manifestURLs: repo.registry.manifestURLs,
},
ocischemaIndexHandler: &ocischemaIndexHandler{
manifestListHandler: manifestListHandler,
},
}
// Apply options
for _, option := range options {
err := option.Apply(ms)
if err != nil {
return nil, err
}
}
return ms, nil
}
// Blobs returns an instance of the BlobStore. Instantiation is cheap and
// may be context sensitive in the future. The instance should be used similar
// to a request local.
func (repo *repository) Blobs(ctx context.Context) distribution.BlobStore {
var statter distribution.BlobDescriptorService = &linkedBlobStatter{
blobStore: repo.blobStore,
repository: repo,
linkPath: blobLinkPath,
}
if repo.descriptorCache != nil {
statter = cache.NewCachedBlobStatter(repo.descriptorCache, statter)
}
if repo.registry.blobDescriptorServiceFactory != nil {
statter = repo.registry.blobDescriptorServiceFactory.BlobAccessController(statter)
}
return &linkedBlobStore{
registry: repo.registry,
blobStore: repo.blobStore,
blobServer: repo.blobServer,
blobAccessController: statter,
repository: repo,
ctx: ctx,
// TODO(stevvooe): linkPath limits this blob store to only layers.
// This instance cannot be used for manifest checks.
linkPath: blobLinkPath,
linkDirectoryPathSpec: layersPathSpec{name: repo.name.Name()},
deleteEnabled: repo.registry.deleteEnabled,
resumableDigestEnabled: repo.resumableDigestEnabled,
}
}
package storage
import (
"context"
"errors"
"fmt"
"net/url"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/schema2"
"github.com/opencontainers/go-digest"
)
var (
errMissingURL = errors.New("missing URL on layer")
errInvalidURL = errors.New("invalid URL on layer")
)
// schema2ManifestHandler is a ManifestHandler that covers schema2 manifests.
type schema2ManifestHandler struct {
repository distribution.Repository
blobStore distribution.BlobStore
ctx context.Context
manifestURLs manifestURLs
}
var _ ManifestHandler = &schema2ManifestHandler{}
func (ms *schema2ManifestHandler) Unmarshal(ctx context.Context, dgst digest.Digest, content []byte) (distribution.Manifest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Unmarshal")
m := &schema2.DeserializedManifest{}
if err := m.UnmarshalJSON(content); err != nil {
return nil, err
}
return m, nil
}
func (ms *schema2ManifestHandler) Put(ctx context.Context, manifest distribution.Manifest, skipDependencyVerification bool) (digest.Digest, error) {
dcontext.GetLogger(ms.ctx).Debug("(*schema2ManifestHandler).Put")
m, ok := manifest.(*schema2.DeserializedManifest)
if !ok {
return "", fmt.Errorf("non-schema2 manifest put to schema2ManifestHandler: %T", manifest)
}
if err := ms.verifyManifest(ms.ctx, *m, skipDependencyVerification); err != nil {
return "", err
}
mt, payload, err := m.Payload()
if err != nil {
return "", err
}
revision, err := ms.blobStore.Put(ctx, mt, payload)
if err != nil {
dcontext.GetLogger(ctx).Errorf("error putting payload into blobstore: %v", err)
return "", err
}
return revision.Digest, nil
}
// verifyManifest ensures that the manifest content is valid from the
// perspective of the registry. As a policy, the registry only tries to store
// valid content, leaving trust policies of that content up to consumers.
func (ms *schema2ManifestHandler) verifyManifest(ctx context.Context, mnfst schema2.DeserializedManifest, skipDependencyVerification bool) error {
var errs distribution.ErrManifestVerification
if mnfst.Manifest.SchemaVersion != 2 {
return fmt.Errorf("unrecognized manifest schema version %d", mnfst.Manifest.SchemaVersion)
}
if skipDependencyVerification {
return nil
}
manifestService, err := ms.repository.Manifests(ctx)
if err != nil {
return err
}
blobsService := ms.repository.Blobs(ctx)
for _, descriptor := range mnfst.References() {
err := descriptor.Digest.Validate()
if err != nil {
errs = append(errs, err, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})
continue
}
switch descriptor.MediaType {
case schema2.MediaTypeForeignLayer:
// Clients download this layer from an external URL, so do not check for
// its presence.
if len(descriptor.URLs) == 0 {
err = errMissingURL
}
allow := ms.manifestURLs.allow
deny := ms.manifestURLs.deny
for _, u := range descriptor.URLs {
var pu *url.URL
pu, err = url.Parse(u)
if err != nil || (pu.Scheme != "http" && pu.Scheme != "https") || pu.Fragment != "" || (allow != nil && !allow.MatchString(u)) || (deny != nil && deny.MatchString(u)) {
err = errInvalidURL
break
}
}
case schema2.MediaTypeManifest:
var exists bool
exists, err = manifestService.Exists(ctx, descriptor.Digest)
if err != nil || !exists {
err = distribution.ErrBlobUnknown // just coerce to unknown.
}
if err != nil {
dcontext.GetLogger(ms.ctx).WithError(err).Debugf("failed to ensure exists of %v in manifest service", descriptor.Digest)
}
fallthrough // double check the blob store.
default:
// check its presence
_, err = blobsService.Stat(ctx, descriptor.Digest)
}
if err != nil {
if err != distribution.ErrBlobUnknown {
errs = append(errs, err)
}
// On error here, we always append unknown blob errors.
errs = append(errs, distribution.ErrManifestBlobUnknown{Digest: descriptor.Digest})
}
}
if len(errs) != 0 {
return errs
}
return nil
}
package storage
import (
"context"
"path"
"sort"
"sync"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/sync/errgroup"
"github.com/distribution/distribution/v3"
storagedriver "github.com/distribution/distribution/v3/registry/storage/driver"
)
var _ distribution.TagService = &tagStore{}
// tagStore provides methods to manage manifest tags in a backend storage driver.
// This implementation uses the same on-disk layout as the (now deleted) tag
// store. This provides backward compatibility with current registry deployments
// which only makes use of the Digest field of the returned v1.Descriptor
// but does not enable full roundtripping of Descriptor objects
type tagStore struct {
repository *repository
blobStore *blobStore
concurrencyLimit int
}
// All returns all tags
func (ts *tagStore) All(ctx context.Context) ([]string, error) {
pathSpec, err := pathFor(manifestTagsPathSpec{
name: ts.repository.Named().Name(),
})
if err != nil {
return nil, err
}
entries, err := ts.blobStore.driver.List(ctx, pathSpec)
if err != nil {
switch err := err.(type) {
case storagedriver.PathNotFoundError:
return nil, distribution.ErrRepositoryUnknown{Name: ts.repository.Named().Name()}
default:
return nil, err
}
}
tags := make([]string, 0, len(entries))
for _, entry := range entries {
_, filename := path.Split(entry)
tags = append(tags, filename)
}
// there is no guarantee for the order,
// therefore sort before return.
sort.Strings(tags)
return tags, nil
}
// Tag tags the digest with the given tag, updating the store to point at
// the current tag. The digest must point to a manifest.
func (ts *tagStore) Tag(ctx context.Context, tag string, desc v1.Descriptor) error {
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
if err != nil {
return err
}
lbs := ts.linkedBlobStore(ctx, tag)
// Link into the index
if err := lbs.linkBlob(ctx, desc); err != nil {
return err
}
// Overwrite the current link
return ts.blobStore.link(ctx, currentPath, desc.Digest)
}
// resolve the current revision for name and tag.
func (ts *tagStore) Get(ctx context.Context, tag string) (v1.Descriptor, error) {
currentPath, err := pathFor(manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
if err != nil {
return v1.Descriptor{}, err
}
revision, err := ts.blobStore.readlink(ctx, currentPath)
if err != nil {
switch err.(type) {
case storagedriver.PathNotFoundError:
return v1.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
}
return v1.Descriptor{}, err
}
return v1.Descriptor{Digest: revision}, nil
}
// Untag removes the tag association
func (ts *tagStore) Untag(ctx context.Context, tag string) error {
tagPath, err := pathFor(manifestTagPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
})
if err != nil {
return err
}
return ts.blobStore.driver.Delete(ctx, tagPath)
}
// linkedBlobStore returns the linkedBlobStore for the named tag, allowing one
// to index manifest blobs by tag name. While the tag store doesn't map
// precisely to the linked blob store, using this ensures the links are
// managed via the same code path.
func (ts *tagStore) linkedBlobStore(ctx context.Context, tag string) *linkedBlobStore {
return &linkedBlobStore{
blobStore: ts.blobStore,
repository: ts.repository,
ctx: ctx,
linkPath: func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestTagIndexEntryLinkPathSpec{
name: name,
tag: tag,
revision: dgst,
})
},
}
}
// Lookup recovers a list of tags which refer to this digest. When a manifest is deleted by
// digest, tag entries which point to it need to be recovered to avoid dangling tags.
func (ts *tagStore) Lookup(ctx context.Context, desc v1.Descriptor) ([]string, error) {
allTags, err := ts.All(ctx)
switch err.(type) {
case distribution.ErrRepositoryUnknown:
// This tag store has been initialized but not yet populated
break
case nil:
break
default:
return nil, err
}
g, ctx := errgroup.WithContext(ctx)
g.SetLimit(ts.concurrencyLimit)
var (
tags []string
mu sync.Mutex
)
for _, tag := range allTags {
if ctx.Err() != nil {
break
}
tag := tag
g.Go(func() error {
tagLinkPathSpec := manifestTagCurrentPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
}
tagLinkPath, _ := pathFor(tagLinkPathSpec)
tagDigest, err := ts.blobStore.readlink(ctx, tagLinkPath)
if err != nil {
switch err.(type) {
case storagedriver.PathNotFoundError:
return nil
}
return err
}
if tagDigest == desc.Digest {
mu.Lock()
tags = append(tags, tag)
mu.Unlock()
}
return nil
})
}
err = g.Wait()
if err != nil {
return nil, err
}
return tags, nil
}
func (ts *tagStore) ManifestDigests(ctx context.Context, tag string) ([]digest.Digest, error) {
tagLinkPath := func(name string, dgst digest.Digest) (string, error) {
return pathFor(manifestTagIndexEntryLinkPathSpec{
name: name,
tag: tag,
revision: dgst,
})
}
lbs := &linkedBlobStore{
blobStore: ts.blobStore,
blobAccessController: &linkedBlobStatter{
blobStore: ts.blobStore,
repository: ts.repository,
linkPath: manifestRevisionLinkPath,
},
repository: ts.repository,
ctx: ctx,
linkPath: tagLinkPath,
linkDirectoryPathSpec: manifestTagIndexPathSpec{
name: ts.repository.Named().Name(),
tag: tag,
},
}
var dgsts []digest.Digest
err := lbs.Enumerate(ctx, func(dgst digest.Digest) error {
dgsts = append(dgsts, dgst)
return nil
})
if err != nil {
return nil, err
}
return dgsts, nil
}
package storage
import (
"context"
"path"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/registry/storage/driver"
"github.com/opencontainers/go-digest"
)
// vacuum contains functions for cleaning up repositories and blobs
// These functions will only reliably work on strongly consistent
// storage systems.
// https://en.wikipedia.org/wiki/Consistency_model
// NewVacuum creates a new Vacuum
func NewVacuum(ctx context.Context, driver driver.StorageDriver) Vacuum {
return Vacuum{
ctx: ctx,
driver: driver,
}
}
// Vacuum removes content from the filesystem
type Vacuum struct {
driver driver.StorageDriver
ctx context.Context
}
// RemoveBlob removes a blob from the filesystem
func (v Vacuum) RemoveBlob(dgst string) error {
d, err := digest.Parse(dgst)
if err != nil {
return err
}
blobPath, err := pathFor(blobPathSpec{digest: d})
if err != nil {
return err
}
dcontext.GetLogger(v.ctx).Infof("Deleting blob: %s", blobPath)
err = v.driver.Delete(v.ctx, blobPath)
if err != nil {
return err
}
return nil
}
// RemoveManifest removes a manifest from the filesystem
func (v Vacuum) RemoveManifest(name string, dgst digest.Digest, tags []string) error {
// remove a tag manifest reference, in case of not found continue to next one
for _, tag := range tags {
tagsPath, err := pathFor(manifestTagIndexEntryPathSpec{name: name, revision: dgst, tag: tag})
if err != nil {
return err
}
_, err = v.driver.Stat(v.ctx, tagsPath)
if err != nil {
switch err := err.(type) {
case driver.PathNotFoundError:
continue
default:
return err
}
}
dcontext.GetLogger(v.ctx).Infof("deleting manifest tag reference: %s", tagsPath)
err = v.driver.Delete(v.ctx, tagsPath)
if err != nil {
return err
}
}
manifestPath, err := pathFor(manifestRevisionPathSpec{name: name, revision: dgst})
if err != nil {
return err
}
dcontext.GetLogger(v.ctx).Infof("deleting manifest: %s", manifestPath)
return v.driver.Delete(v.ctx, manifestPath)
}
// RemoveRepository removes a repository directory from the
// filesystem
func (v Vacuum) RemoveRepository(repoName string) error {
rootForRepository, err := pathFor(repositoriesRootPathSpec{})
if err != nil {
return err
}
repoDir := path.Join(rootForRepository, repoName)
dcontext.GetLogger(v.ctx).Infof("Deleting repo: %s", repoDir)
err = v.driver.Delete(v.ctx, repoDir)
if err != nil {
return err
}
return nil
}
// RemoveLayer removes a layer link path from the storage
func (v Vacuum) RemoveLayer(repoName string, dgst digest.Digest) error {
layerLinkPath, err := pathFor(layerLinkPathSpec{name: repoName, digest: dgst})
if err != nil {
return err
}
dcontext.GetLogger(v.ctx).Infof("Deleting layer link path: %s", layerLinkPath)
err = v.driver.Delete(v.ctx, layerLinkPath)
if err != nil {
return err
}
return nil
}
package testutil
import (
"bytes"
"fmt"
"io"
"maps"
"net/http"
"net/url"
"sort"
"strings"
)
// RequestResponseMap is an ordered mapping from Requests to Responses
type RequestResponseMap []RequestResponseMapping
// RequestResponseMapping defines a Response to be sent in response to a given
// Request
type RequestResponseMapping struct {
Request Request
Response Response
}
// Request is a simplified http.Request object
type Request struct {
// Method is the http method of the request, for example GET
Method string
// Route is the http route of this request
Route string
// QueryParams are the query parameters of this request
QueryParams map[string][]string
// Body is the byte contents of the http request
Body []byte
// Headers are the header for this request
Headers http.Header
}
func (r Request) String() string {
queryString := ""
if len(r.QueryParams) > 0 {
keys := make([]string, 0, len(r.QueryParams))
queryParts := make([]string, 0, len(r.QueryParams))
for k := range r.QueryParams {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
for _, val := range r.QueryParams[k] {
queryParts = append(queryParts, fmt.Sprintf("%s=%s", k, url.QueryEscape(val)))
}
}
queryString = "?" + strings.Join(queryParts, "&")
}
var headers []string
if len(r.Headers) > 0 {
var headerKeys []string
for k := range r.Headers {
headerKeys = append(headerKeys, k)
}
sort.Strings(headerKeys)
for _, k := range headerKeys {
for _, val := range r.Headers[k] {
headers = append(headers, fmt.Sprintf("%s:%s", k, val))
}
}
}
return fmt.Sprintf("%s %s%s\n%s\n%s", r.Method, r.Route, queryString, headers, r.Body)
}
// Response is a simplified http.Response object
type Response struct {
// Statuscode is the http status code of the Response
StatusCode int
// Headers are the http headers of this Response
Headers http.Header
// Body is the response body
Body []byte
}
// testHandler is an http.Handler with a defined mapping from Request to an
// ordered list of Response objects
type testHandler struct {
responseMap map[string][]Response
}
// NewHandler returns a new test handler that responds to defined requests
// with specified responses
// Each time a Request is received, the next Response is returned in the
// mapping, until no Responses are defined, at which point a 404 is sent back
func NewHandler(requestResponseMap RequestResponseMap) http.Handler {
responseMap := make(map[string][]Response)
for _, mapping := range requestResponseMap {
responses, ok := responseMap[mapping.Request.String()]
if ok {
responseMap[mapping.Request.String()] = append(responses, mapping.Response)
} else {
responseMap[mapping.Request.String()] = []Response{mapping.Response}
}
}
return &testHandler{responseMap: responseMap}
}
func (app *testHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
requestBody, _ := io.ReadAll(r.Body)
request := Request{
Method: r.Method,
Route: r.URL.Path,
QueryParams: r.URL.Query(),
Body: requestBody,
Headers: make(map[string][]string),
}
// Add headers of interest here
for k, v := range r.Header {
if k == "If-None-Match" {
request.Headers[k] = v
}
}
responses, ok := app.responseMap[request.String()]
if !ok || len(responses) == 0 {
http.NotFound(w, r)
return
}
response := responses[0]
app.responseMap[request.String()] = responses[1:]
responseHeader := w.Header()
maps.Copy(responseHeader, response.Headers)
w.WriteHeader(response.StatusCode)
if _, err := io.Copy(w, bytes.NewReader(response.Body)); err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
}
package testutil
import (
"fmt"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/manifest/manifestlist"
"github.com/distribution/distribution/v3/manifest/ocischema"
"github.com/distribution/distribution/v3/manifest/schema2"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// MakeManifestList constructs a manifest list out of a list of manifest digests
func MakeManifestList(blobstatter distribution.BlobStatter, manifestDigests []digest.Digest) (*manifestlist.DeserializedManifestList, error) {
ctx := dcontext.Background()
manifestDescriptors := make([]manifestlist.ManifestDescriptor, 0, len(manifestDigests))
for _, manifestDigest := range manifestDigests {
descriptor, err := blobstatter.Stat(ctx, manifestDigest)
if err != nil {
return nil, err
}
platformSpec := manifestlist.PlatformSpec{
Architecture: "atari2600",
OS: "CP/M",
Variant: "ternary",
Features: []string{"VLIW", "superscalaroutoforderdevnull"},
}
manifestDescriptor := manifestlist.ManifestDescriptor{
Descriptor: descriptor,
Platform: platformSpec,
}
manifestDescriptors = append(manifestDescriptors, manifestDescriptor)
}
return manifestlist.FromDescriptors(manifestDescriptors)
}
// MakeSchema2Manifest constructs a schema 2 manifest from a given list of digests and returns
// the digest of the manifest
func MakeSchema2Manifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) {
ctx := dcontext.Background()
blobStore := repository.Blobs(ctx)
var configJSON []byte
d, err := blobStore.Put(ctx, schema2.MediaTypeImageConfig, configJSON)
if err != nil {
return nil, fmt.Errorf("unexpected error storing content in blobstore: %v", err)
}
builder := schema2.NewManifestBuilder(d, configJSON)
for _, dgst := range digests {
if err := builder.AppendReference(v1.Descriptor{Digest: dgst}); err != nil {
return nil, fmt.Errorf("unexpected error building schema2 manifest: %v", err)
}
}
mfst, err := builder.Build(ctx)
if err != nil {
return nil, fmt.Errorf("unexpected error generating schema2 manifest: %v", err)
}
return mfst, nil
}
func MakeOCIManifest(repository distribution.Repository, digests []digest.Digest) (distribution.Manifest, error) {
ctx := dcontext.Background()
blobStore := repository.Blobs(ctx)
var configJSON []byte
builder := ocischema.NewManifestBuilder(blobStore, configJSON, make(map[string]string))
for _, dgst := range digests {
if err := builder.AppendReference(v1.Descriptor{Digest: dgst}); err != nil {
return nil, fmt.Errorf("unexpected error building OCI manifest: %v", err)
}
}
mfst, err := builder.Build(ctx)
if err != nil {
return nil, fmt.Errorf("unexpected error generating OCI manifest: %v", err)
}
return mfst, nil
}
package testutil
import (
"context"
"fmt"
"io"
"github.com/distribution/distribution/v3"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// PushBlob pushes a blob with the given digest to the given repository.
func PushBlob(ctx context.Context, repository distribution.Repository, blobReader io.ReadSeeker, dgst digest.Digest) error {
blobs := repository.Blobs(ctx)
wr, err := blobs.Create(ctx)
if err != nil {
return fmt.Errorf("error creating layer upload: %v", err)
}
// Use the resumes, as well!
wr, err = blobs.Resume(ctx, wr.ID())
if err != nil {
return fmt.Errorf("error resuming layer upload: %v", err)
}
if _, err := io.Copy(wr, blobReader); err != nil {
return fmt.Errorf("unexpected error uploading: %v", err)
}
if _, err := wr.Commit(ctx, v1.Descriptor{Digest: dgst}); err != nil {
return fmt.Errorf("unexpected error finishing upload: %v", err)
}
return nil
}
package testutil
import (
"archive/tar"
"bytes"
crand "crypto/rand"
"fmt"
"io"
mrand "math/rand"
"time"
"github.com/distribution/distribution/v3"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// CreateRandomTarFile creates a random tarfile, returning it as an
// io.ReadSeeker along with its digest. An error is returned if there is a
// problem generating valid content.
func CreateRandomTarFile() (rs io.ReadSeeker, dgst digest.Digest, err error) {
nFiles := mrand.Intn(10) + 10
target := &bytes.Buffer{}
wr := tar.NewWriter(target)
// Perturb this on each iteration of the loop below.
header := &tar.Header{
Mode: 0o644,
ModTime: time.Now(),
Typeflag: tar.TypeReg,
Uname: "randocalrissian",
Gname: "cloudcity",
AccessTime: time.Now(),
ChangeTime: time.Now(),
}
for fileNumber := 0; fileNumber < nFiles; fileNumber++ {
fileSize := mrand.Int63n(1<<20) + 1<<20
header.Name = fmt.Sprint(fileNumber)
header.Size = fileSize
if err := wr.WriteHeader(header); err != nil {
return nil, "", err
}
randomData := make([]byte, fileSize)
// Fill up the buffer with some random data.
n, err := crand.Read(randomData)
if n != len(randomData) {
return nil, "", fmt.Errorf("short read creating random reader: %v bytes != %v bytes", n, len(randomData))
}
if err != nil {
return nil, "", err
}
nn, err := io.Copy(wr, bytes.NewReader(randomData))
if nn != fileSize {
return nil, "", fmt.Errorf("short copy writing random file to tar")
}
if err != nil {
return nil, "", err
}
if err := wr.Flush(); err != nil {
return nil, "", err
}
}
if err := wr.Close(); err != nil {
return nil, "", err
}
dgst = digest.FromBytes(target.Bytes())
return bytes.NewReader(target.Bytes()), dgst, nil
}
// CreateRandomLayers returns a map of n digests. We don't particularly care
// about the order of said digests (since they're all random anyway).
func CreateRandomLayers(n int) (map[digest.Digest]io.ReadSeeker, error) {
digestMap := map[digest.Digest]io.ReadSeeker{}
for i := 0; i < n; i++ {
rs, ds, err := CreateRandomTarFile()
if err != nil {
return nil, fmt.Errorf("unexpected error generating test layer file: %v", err)
}
digestMap[ds] = rs
}
return digestMap, nil
}
// UploadBlobs lets you upload blobs to a repository
func UploadBlobs(repository distribution.Repository, layers map[digest.Digest]io.ReadSeeker) error {
ctx := dcontext.Background()
for dgst, rs := range layers {
wr, err := repository.Blobs(ctx).Create(ctx)
if err != nil {
return fmt.Errorf("unexpected error creating upload: %v", err)
}
if _, err := io.Copy(wr, rs); err != nil {
return fmt.Errorf("unexpected error copying to upload: %v", err)
}
if _, err := wr.Commit(ctx, v1.Descriptor{Digest: dgst}); err != nil {
return fmt.Errorf("unexpected error committinng upload: %v", err)
}
}
return nil
}
package tracing
import (
"context"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
// compositeExporter is a custom exporter that wraps multiple SpanExporters.
// It allows you to export spans to multiple destinations, e.g., different telemetry backends.
type compositeExporter struct {
exporters []sdktrace.SpanExporter
}
func newCompositeExporter(exporters ...sdktrace.SpanExporter) *compositeExporter {
return &compositeExporter{exporters: exporters}
}
// ExportSpans iterates over each SpanExporter in the compositeExporter and
// exports the spans. If any exporter returns an error, the process is stopped
// and the error is returned. This ensures that span exporting behaves correctly
// and reports errors as expected.
func (ce *compositeExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
for _, exporter := range ce.exporters {
if err := exporter.ExportSpans(ctx, spans); err != nil {
return err
}
}
return nil
}
// Shutdown iterates over each SpanExporter in the compositeExporter and
// shuts them down. If any exporter returns an error during shutdown, the process
// is stopped and the error is returned. This ensures proper shutdown of all exporters.
func (ce *compositeExporter) Shutdown(ctx context.Context) error {
for _, exporter := range ce.exporters {
if err := exporter.Shutdown(ctx); err != nil {
return err
}
}
return nil
}
package tracing
import "github.com/distribution/distribution/v3/internal/dcontext"
// loggerWriter is a custom writer that implements the io.Writer interface.
// It is designed to redirect log messages to the Logger interface, specifically
// for use with OpenTelemetry's stdouttrace exporter.
type loggerWriter struct {
logger dcontext.Logger // Use the Logger interface
}
// Write logs the data using the Debug level of the provided logger.
func (lw *loggerWriter) Write(p []byte) (n int, err error) {
lw.logger.Debug(string(p))
return len(p), nil
}
// Handle logs the error using the Error level of the provided logger.
func (lw *loggerWriter) Handle(err error) {
lw.logger.Error(err)
}
package tracing
import (
"context"
"github.com/distribution/distribution/v3/internal/dcontext"
"github.com/distribution/distribution/v3/version"
"go.opentelemetry.io/contrib/exporters/autoexport"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
)
const (
// ServiceName is trace service name
serviceName = "distribution"
// DefaultSamplingRatio default sample ratio
defaultSamplingRatio = 1
// AttributePrefix defines a standardized prefix for custom telemetry attributes
// associated with the CNCF Distribution project.
AttributePrefix = "io.cncf.distribution."
)
// InitOpenTelemetry initializes OpenTelemetry for the application. This function sets up the
// necessary components for collecting telemetry data, such as traces.
func InitOpenTelemetry(ctx context.Context) error {
res := resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(serviceName),
semconv.ServiceVersionKey.String(version.Version()),
)
autoExp, err := autoexport.NewSpanExporter(ctx)
if err != nil {
return err
}
lw := &loggerWriter{
logger: dcontext.GetLogger(ctx),
}
loggerExp, err := stdouttrace.New(stdouttrace.WithWriter(lw))
if err != nil {
return err
}
compositeExp := newCompositeExporter(autoExp, loggerExp)
sp := sdktrace.NewBatchSpanProcessor(compositeExp)
provider := sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.TraceIDRatioBased(defaultSamplingRatio)),
sdktrace.WithResource(res),
sdktrace.WithSpanProcessor(sp),
)
otel.SetTracerProvider(provider)
otel.SetErrorHandler(lw)
pr := propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
otel.SetTextMapPropagator(pr)
return nil
}
package version
import (
"fmt"
"io"
"os"
)
// Package returns the overall, canonical project import path under
// which the package was built.
func Package() string {
return mainpkg
}
// Version returns returns the module version the running binary was
// built from.
func Version() string {
return version
}
// Revision returns the VCS (e.g. git) revision being used to build
// the program at linking time.
func Revision() string {
return revision
}
// FprintVersion outputs the version string to the writer, in the following
// format, followed by a newline:
//
// <cmd> <project> <version>
//
// For example, a binary "registry" built from github.com/distribution/distribution
// with version "v2.0" would print the following:
//
// registry github.com/distribution/distribution v2.0
func FprintVersion(w io.Writer) {
fmt.Fprintln(w, os.Args[0], Package(), Version())
}
// PrintVersion outputs the version information, from Fprint, to stdout.
func PrintVersion() {
FprintVersion(os.Stdout)
}