// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package cidata
import (
"compress/gzip"
"errors"
"fmt"
"io"
"maps"
"net"
"net/url"
"os"
"path"
"path/filepath"
"slices"
"strconv"
"strings"
"time"
"github.com/docker/go-units"
"github.com/sirupsen/logrus"
"github.com/lima-vm/lima/pkg/debugutil"
"github.com/lima-vm/lima/pkg/instance/hostname"
"github.com/lima-vm/lima/pkg/iso9660util"
"github.com/lima-vm/lima/pkg/limayaml"
"github.com/lima-vm/lima/pkg/localpathutil"
"github.com/lima-vm/lima/pkg/networks"
"github.com/lima-vm/lima/pkg/networks/usernet"
"github.com/lima-vm/lima/pkg/osutil"
"github.com/lima-vm/lima/pkg/sshutil"
"github.com/lima-vm/lima/pkg/store/filenames"
)
var netLookupIP = func(host string) []net.IP {
ips, err := net.LookupIP(host)
if err != nil {
logrus.Debugf("net.LookupIP %s: %s", host, err)
return nil
}
return ips
}
func setupEnv(instConfigEnv map[string]string, propagateProxyEnv bool, slirpGateway string) (map[string]string, error) {
// Start with the proxy variables from the system settings.
env, err := osutil.ProxySettings()
if err != nil {
return env, err
}
// env.* settings from lima.yaml override system settings without giving a warning
maps.Copy(env, instConfigEnv)
// Current process environment setting override both system settings and env.*
lowerVars := []string{"ftp_proxy", "http_proxy", "https_proxy", "no_proxy"}
upperVars := make([]string, len(lowerVars))
for i, name := range lowerVars {
upperVars[i] = strings.ToUpper(name)
}
if propagateProxyEnv {
for _, name := range append(lowerVars, upperVars...) {
if value, ok := os.LookupEnv(name); ok {
if _, ok := env[name]; ok && value != env[name] {
logrus.Infof("Overriding %q value %q with %q from limactl process environment",
name, env[name], value)
}
env[name] = value
}
}
}
// Replace IP that IsLoopback in proxy settings with the gateway address
// Delete settings with empty values, so the user can choose to ignore system settings.
for _, name := range append(lowerVars, upperVars...) {
value, ok := env[name]
if ok && value == "" {
delete(env, name)
} else if ok && !strings.EqualFold(name, "no_proxy") {
u, err := url.Parse(value)
if err != nil {
logrus.Warnf("Ignoring invalid proxy %q=%v: %s", name, value, err)
continue
}
for _, ip := range netLookupIP(u.Hostname()) {
if ip.IsLoopback() {
newHost := slirpGateway
if u.Port() != "" {
newHost = net.JoinHostPort(newHost, u.Port())
}
u.Host = newHost
value = u.String()
}
}
if value != env[name] {
logrus.Infof("Replacing %q value %q with %q", name, env[name], value)
env[name] = value
}
}
}
// Make sure uppercase variants have the same value as lowercase ones.
// If both are set, the lowercase variant value takes precedence.
for _, lowerName := range lowerVars {
upperName := strings.ToUpper(lowerName)
if _, ok := env[lowerName]; ok {
if _, ok := env[upperName]; ok && env[lowerName] != env[upperName] {
logrus.Warnf("Changing %q value from %q to %q to match %q",
upperName, env[upperName], env[lowerName], lowerName)
}
env[upperName] = env[lowerName]
} else if _, ok := env[upperName]; ok {
env[lowerName] = env[upperName]
}
}
return env, nil
}
func templateArgs(bootScripts bool, instDir, name string, instConfig *limayaml.LimaYAML, udpDNSLocalPort, tcpDNSLocalPort, vsockPort int, virtioPort string) (*TemplateArgs, error) {
if err := limayaml.Validate(instConfig, false); err != nil {
return nil, err
}
archive := "nerdctl-full.tgz"
args := TemplateArgs{
Debug: debugutil.Debug,
BootScripts: bootScripts,
Name: name,
Hostname: hostname.FromInstName(name), // TODO: support customization
User: *instConfig.User.Name,
Comment: *instConfig.User.Comment,
Home: *instConfig.User.Home,
Shell: *instConfig.User.Shell,
UID: *instConfig.User.UID,
GuestInstallPrefix: *instConfig.GuestInstallPrefix,
UpgradePackages: *instConfig.UpgradePackages,
Containerd: Containerd{System: *instConfig.Containerd.System, User: *instConfig.Containerd.User, Archive: archive},
SlirpNICName: networks.SlirpNICName,
RosettaEnabled: *instConfig.Rosetta.Enabled,
RosettaBinFmt: *instConfig.Rosetta.BinFmt,
VMType: *instConfig.VMType,
VSockPort: vsockPort,
VirtioPort: virtioPort,
Plain: *instConfig.Plain,
TimeZone: *instConfig.TimeZone,
Param: instConfig.Param,
}
firstUsernetIndex := limayaml.FirstUsernetIndex(instConfig)
var subnet net.IP
var err error
if firstUsernetIndex != -1 {
usernetName := instConfig.Networks[firstUsernetIndex].Lima
subnet, err = usernet.Subnet(usernetName)
if err != nil {
return nil, err
}
args.SlirpGateway = usernet.GatewayIP(subnet)
args.SlirpDNS = usernet.GatewayIP(subnet)
} else {
subnet, _, err = net.ParseCIDR(networks.SlirpNetwork)
if err != nil {
return nil, err
}
args.SlirpGateway = usernet.GatewayIP(subnet)
if *instConfig.VMType == limayaml.VZ {
args.SlirpDNS = usernet.GatewayIP(subnet)
} else {
args.SlirpDNS = usernet.DNSIP(subnet)
}
args.SlirpIPAddress = networks.SlirpIPAddress
}
// change instance id on every boot so network config will be processed again
args.IID = fmt.Sprintf("iid-%d", time.Now().Unix())
pubKeys, err := sshutil.DefaultPubKeys(*instConfig.SSH.LoadDotSSHPubKeys)
if err != nil {
return nil, err
}
if len(pubKeys) == 0 {
return nil, errors.New("no SSH key was found, run `ssh-keygen`")
}
for _, f := range pubKeys {
args.SSHPubKeys = append(args.SSHPubKeys, f.Content)
}
var fstype string
switch *instConfig.MountType {
case limayaml.REVSSHFS:
fstype = "sshfs"
case limayaml.NINEP:
fstype = "9p"
case limayaml.VIRTIOFS:
fstype = "virtiofs"
}
hostHome, err := localpathutil.Expand("~")
if err != nil {
return nil, err
}
for i, f := range instConfig.Mounts {
tag := fmt.Sprintf("mount%d", i)
options := "defaults"
switch fstype {
case "9p", "virtiofs":
options = "ro"
if *f.Writable {
options = "rw"
}
if fstype == "9p" {
options += ",trans=virtio"
options += fmt.Sprintf(",version=%s", *f.NineP.ProtocolVersion)
msize, err := units.RAMInBytes(*f.NineP.Msize)
if err != nil {
return nil, fmt.Errorf("failed to parse msize for %q: %w", f.Location, err)
}
options += fmt.Sprintf(",msize=%d", msize)
options += fmt.Sprintf(",cache=%s", *f.NineP.Cache)
}
// don't fail the boot, if virtfs is not available
options += ",nofail"
}
args.Mounts = append(args.Mounts, Mount{Tag: tag, MountPoint: *f.MountPoint, Type: fstype, Options: options})
if f.Location == hostHome {
args.HostHomeMountPoint = *f.MountPoint
}
}
switch *instConfig.MountType {
case limayaml.REVSSHFS:
args.MountType = "reverse-sshfs"
case limayaml.NINEP:
args.MountType = "9p"
case limayaml.VIRTIOFS:
args.MountType = "virtiofs"
}
for i, d := range instConfig.AdditionalDisks {
format := true
if d.Format != nil {
format = *d.Format
}
fstype := ""
if d.FSType != nil {
fstype = *d.FSType
}
args.Disks = append(args.Disks, Disk{
Name: d.Name,
Device: diskDeviceNameFromOrder(i),
Format: format,
FSType: fstype,
FSArgs: d.FSArgs,
})
}
args.Networks = append(args.Networks, Network{MACAddress: limayaml.MACAddress(instDir), Interface: networks.SlirpNICName, Metric: 200})
for i, nw := range instConfig.Networks {
if i == firstUsernetIndex {
continue
}
args.Networks = append(args.Networks, Network{MACAddress: nw.MACAddress, Interface: nw.Interface, Metric: *nw.Metric})
}
args.Env, err = setupEnv(instConfig.Env, *instConfig.PropagateProxyEnv, args.SlirpGateway)
if err != nil {
return nil, err
}
switch {
case len(instConfig.DNS) > 0:
for _, addr := range instConfig.DNS {
args.DNSAddresses = append(args.DNSAddresses, addr.String())
}
case firstUsernetIndex != -1 || *instConfig.VMType == limayaml.VZ:
args.DNSAddresses = append(args.DNSAddresses, args.SlirpDNS)
case *instConfig.HostResolver.Enabled:
args.UDPDNSLocalPort = udpDNSLocalPort
args.TCPDNSLocalPort = tcpDNSLocalPort
args.DNSAddresses = append(args.DNSAddresses, args.SlirpDNS)
default:
args.DNSAddresses, err = osutil.DNSAddresses()
if err != nil {
return nil, err
}
}
args.CACerts.RemoveDefaults = instConfig.CACertificates.RemoveDefaults
for _, path := range instConfig.CACertificates.Files {
expanded, err := localpathutil.Expand(path)
if err != nil {
return nil, err
}
content, err := os.ReadFile(expanded)
if err != nil {
return nil, err
}
cert := getCert(string(content))
args.CACerts.Trusted = append(args.CACerts.Trusted, cert)
}
for _, content := range instConfig.CACertificates.Certs {
cert := getCert(content)
args.CACerts.Trusted = append(args.CACerts.Trusted, cert)
}
// Remove empty caCerts (default values) from configuration yaml
if !*args.CACerts.RemoveDefaults && len(args.CACerts.Trusted) == 0 {
args.CACerts.RemoveDefaults = nil
args.CACerts.Trusted = nil
}
args.BootCmds = getBootCmds(instConfig.Provision)
for i, f := range instConfig.Provision {
if f.Mode == limayaml.ProvisionModeDependency && *f.SkipDefaultDependencyResolution {
args.SkipDefaultDependencyResolution = true
}
if f.Mode == limayaml.ProvisionModeData {
args.DataFiles = append(args.DataFiles, DataFile{
FileName: fmt.Sprintf("%08d", i),
Overwrite: strconv.FormatBool(*f.Overwrite),
Owner: *f.Owner,
Path: *f.Path,
Permissions: *f.Permissions,
})
}
}
return &args, nil
}
func GenerateCloudConfig(instDir, name string, instConfig *limayaml.LimaYAML) error {
args, err := templateArgs(false, instDir, name, instConfig, 0, 0, 0, "")
if err != nil {
return err
}
// mounts are not included here
args.Mounts = nil
// resolv_conf is not included here
args.DNSAddresses = nil
if err := ValidateTemplateArgs(args); err != nil {
return err
}
config, err := ExecuteTemplateCloudConfig(args)
if err != nil {
return err
}
os.RemoveAll(filepath.Join(instDir, filenames.CloudConfig)) // delete existing
return os.WriteFile(filepath.Join(instDir, filenames.CloudConfig), config, 0o444)
}
func GenerateISO9660(instDir, name string, instConfig *limayaml.LimaYAML, udpDNSLocalPort, tcpDNSLocalPort int, guestAgentBinary, nerdctlArchive string, vsockPort int, virtioPort string) error {
args, err := templateArgs(true, instDir, name, instConfig, udpDNSLocalPort, tcpDNSLocalPort, vsockPort, virtioPort)
if err != nil {
return err
}
if err := ValidateTemplateArgs(args); err != nil {
return err
}
layout, err := ExecuteTemplateCIDataISO(args)
if err != nil {
return err
}
for i, f := range instConfig.Provision {
switch f.Mode {
case limayaml.ProvisionModeSystem, limayaml.ProvisionModeUser, limayaml.ProvisionModeDependency:
layout = append(layout, iso9660util.Entry{
Path: fmt.Sprintf("provision.%s/%08d", f.Mode, i),
Reader: strings.NewReader(f.Script),
})
case limayaml.ProvisionModeData:
layout = append(layout, iso9660util.Entry{
Path: fmt.Sprintf("provision.%s/%08d", f.Mode, i),
Reader: strings.NewReader(*f.Content),
})
case limayaml.ProvisionModeBoot:
continue
case limayaml.ProvisionModeAnsible:
continue
default:
return fmt.Errorf("unknown provision mode %q", f.Mode)
}
}
if guestAgentBinary != "" {
var guestAgent io.ReadCloser
if strings.HasSuffix(guestAgentBinary, ".gz") {
logrus.Debugf("Decompressing %s", guestAgentBinary)
guestAgentGz, err := os.Open(guestAgentBinary)
if err != nil {
return err
}
defer guestAgentGz.Close()
guestAgent, err = gzip.NewReader(guestAgentGz)
if err != nil {
return err
}
} else {
guestAgent, err = os.Open(guestAgentBinary)
if err != nil {
return err
}
}
defer guestAgent.Close()
layout = append(layout, iso9660util.Entry{
Path: "lima-guestagent",
Reader: guestAgent,
})
}
if nerdctlArchive != "" {
nftgz := args.Containerd.Archive
nftgzR, err := os.Open(nerdctlArchive)
if err != nil {
return err
}
defer nftgzR.Close()
layout = append(layout, iso9660util.Entry{
// ISO9660 requires len(Path) <= 30
Path: nftgz,
Reader: nftgzR,
})
}
if args.VMType == limayaml.WSL2 {
layout = append(layout, iso9660util.Entry{
Path: "ssh_authorized_keys",
Reader: strings.NewReader(strings.Join(args.SSHPubKeys, "\n")),
})
return writeCIDataDir(filepath.Join(instDir, filenames.CIDataISODir), layout)
}
return iso9660util.Write(filepath.Join(instDir, filenames.CIDataISO), "cidata", layout)
}
func getCert(content string) Cert {
lines := []string{}
for _, line := range strings.Split(content, "\n") {
if line == "" {
continue
}
lines = append(lines, strings.TrimSpace(line))
}
// return lines
return Cert{Lines: lines}
}
func getBootCmds(p []limayaml.Provision) []BootCmds {
var bootCmds []BootCmds
for _, f := range p {
if f.Mode == limayaml.ProvisionModeBoot {
lines := []string{}
for _, line := range strings.Split(f.Script, "\n") {
if line == "" {
continue
}
lines = append(lines, strings.TrimSpace(line))
}
bootCmds = append(bootCmds, BootCmds{Lines: lines})
}
}
return bootCmds
}
func diskDeviceNameFromOrder(order int) string {
return fmt.Sprintf("vd%c", int('b')+order)
}
func writeCIDataDir(rootPath string, layout []iso9660util.Entry) error {
slices.SortFunc(layout, func(a, b iso9660util.Entry) int {
return strings.Compare(strings.ToLower(a.Path), strings.ToLower(b.Path))
})
if err := os.RemoveAll(rootPath); err != nil {
return err
}
for _, e := range layout {
if dir := path.Dir(e.Path); dir != "" && dir != "/" {
if err := os.MkdirAll(filepath.Join(rootPath, dir), 0o700); err != nil {
return err
}
}
f, err := os.OpenFile(filepath.Join(rootPath, e.Path), os.O_CREATE|os.O_RDWR, 0o700)
if err != nil {
return err
}
if _, err := io.Copy(f, e.Reader); err != nil {
_ = f.Close()
return err
}
_ = f.Close()
}
return nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package cidata
import (
"bytes"
"embed"
"errors"
"fmt"
"io/fs"
"path"
"github.com/lima-vm/lima/pkg/identifiers"
"github.com/lima-vm/lima/pkg/iso9660util"
"github.com/lima-vm/lima/pkg/textutil"
)
//go:embed cidata.TEMPLATE.d
var templateFS embed.FS
const templateFSRoot = "cidata.TEMPLATE.d"
type CACerts struct {
RemoveDefaults *bool
Trusted []Cert
}
type Cert struct {
Lines []string
}
type Containerd struct {
System bool
User bool
Archive string
}
type Network struct {
MACAddress string
Interface string
Metric uint32
}
type Mount struct {
Tag string
MountPoint string // abs path, accessible by the User
Type string
Options string
}
type BootCmds struct {
Lines []string
}
type DataFile struct {
FileName string
Overwrite string
Owner string
Path string
Permissions string
}
type Disk struct {
Name string
Device string
Format bool
FSType string
FSArgs []string
}
type TemplateArgs struct {
Debug bool
Name string // instance name
Hostname string // instance hostname
IID string // instance id
User string // user name
Comment string // user information
Home string // home directory
Shell string // login shell
UID uint32
SSHPubKeys []string
Mounts []Mount
MountType string
Disks []Disk
GuestInstallPrefix string
UpgradePackages bool
Containerd Containerd
Networks []Network
SlirpNICName string
SlirpGateway string
SlirpDNS string
SlirpIPAddress string
UDPDNSLocalPort int
TCPDNSLocalPort int
Env map[string]string
Param map[string]string
BootScripts bool
DataFiles []DataFile
DNSAddresses []string
CACerts CACerts
HostHomeMountPoint string
BootCmds []BootCmds
RosettaEnabled bool
RosettaBinFmt bool
SkipDefaultDependencyResolution bool
VMType string
VSockPort int
VirtioPort string
Plain bool
TimeZone string
}
func ValidateTemplateArgs(args *TemplateArgs) error {
if err := identifiers.Validate(args.Name); err != nil {
return err
}
// args.User is intentionally not validated here; the user can override with any name they want
// limayaml.FillDefault will validate the default (local) username, but not an explicit setting
if args.User == "root" {
return errors.New("field User must not be \"root\"")
}
if args.UID == 0 {
return errors.New("field UID must not be 0")
}
if args.Home == "" {
return errors.New("field Home must be set")
}
if args.Shell == "" {
return errors.New("field Shell must be set")
}
if len(args.SSHPubKeys) == 0 {
return errors.New("field SSHPubKeys must be set")
}
for i, m := range args.Mounts {
f := m.MountPoint
if !path.IsAbs(f) {
return fmt.Errorf("field mounts[%d] must be absolute, got %q", i, f)
}
}
return nil
}
func ExecuteTemplateCloudConfig(args *TemplateArgs) ([]byte, error) {
if err := ValidateTemplateArgs(args); err != nil {
return nil, err
}
userData, err := templateFS.ReadFile(path.Join(templateFSRoot, "user-data"))
if err != nil {
return nil, err
}
cloudConfigYaml := string(userData)
return textutil.ExecuteTemplate(cloudConfigYaml, args)
}
func ExecuteTemplateCIDataISO(args *TemplateArgs) ([]iso9660util.Entry, error) {
if err := ValidateTemplateArgs(args); err != nil {
return nil, err
}
fsys, err := fs.Sub(templateFS, templateFSRoot)
if err != nil {
return nil, err
}
var layout []iso9660util.Entry
walkFn := func(path string, d fs.DirEntry, walkErr error) error {
if walkErr != nil {
return walkErr
}
if d.IsDir() {
return nil
}
if !d.Type().IsRegular() {
return fmt.Errorf("got non-regular file %q", path)
}
templateB, err := fs.ReadFile(fsys, path)
if err != nil {
return err
}
b, err := textutil.ExecuteTemplate(string(templateB), args)
if err != nil {
return err
}
layout = append(layout, iso9660util.Entry{
Path: path,
Reader: bytes.NewReader(b),
})
return nil
}
if err := fs.WalkDir(fsys, ".", walkFn); err != nil {
return nil, err
}
return layout, nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package downloader
import (
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync/atomic"
"time"
"github.com/cheggaaa/pb/v3"
"github.com/containerd/continuity/fs"
"github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
"github.com/lima-vm/lima/pkg/httpclientutil"
"github.com/lima-vm/lima/pkg/localpathutil"
"github.com/lima-vm/lima/pkg/lockutil"
"github.com/lima-vm/lima/pkg/progressbar"
)
// HideProgress is used only for testing.
var HideProgress bool
// hideBar is used only for testing.
func hideBar(bar *progressbar.ProgressBar) {
bar.Set(pb.Static, true)
}
type Status = string
const (
StatusUnknown Status = ""
StatusDownloaded Status = "downloaded"
StatusSkipped Status = "skipped"
StatusUsedCache Status = "used-cache"
)
type Result struct {
Status Status
CachePath string // "/Users/foo/Library/Caches/lima/download/by-url-sha256/<SHA256_OF_URL>/data"
LastModified time.Time
ContentType string
ValidatedDigest bool
}
type options struct {
cacheDir string // default: empty (disables caching)
decompress bool // default: false (keep compression)
description string // default: url
expectedDigest digest.Digest
}
func (o *options) apply(opts []Opt) error {
for _, f := range opts {
if err := f(o); err != nil {
return err
}
}
return nil
}
type Opt func(*options) error
// WithCache enables caching using filepath.Join(os.UserCacheDir(), "lima") as the cache dir.
func WithCache() Opt {
return func(o *options) error {
ucd, err := os.UserCacheDir()
if err != nil {
return err
}
cacheDir := filepath.Join(ucd, "lima")
return WithCacheDir(cacheDir)(o)
}
}
// WithCacheDir enables caching using the specified dir.
// Empty value disables caching.
func WithCacheDir(cacheDir string) Opt {
return func(o *options) error {
o.cacheDir = cacheDir
return nil
}
}
// WithDescription adds a user description of the download.
func WithDescription(description string) Opt {
return func(o *options) error {
o.description = description
return nil
}
}
// WithDecompress decompress the download from the cache.
func WithDecompress(decompress bool) Opt {
return func(o *options) error {
o.decompress = decompress
return nil
}
}
// WithExpectedDigest is used to validate the downloaded file against the expected digest.
//
// The digest is not verified in the following cases:
// - The digest was not specified.
// - The file already exists in the local target path.
//
// When the `data` file exists in the cache dir with `<ALGO>.digest` file,
// the digest is verified by comparing the content of `<ALGO>.digest` with the expected
// digest string. So, the actual digest of the `data` file is not computed.
func WithExpectedDigest(expectedDigest digest.Digest) Opt {
return func(o *options) error {
if expectedDigest != "" {
if !expectedDigest.Algorithm().Available() {
return fmt.Errorf("expected digest algorithm %q is not available", expectedDigest.Algorithm())
}
if err := expectedDigest.Validate(); err != nil {
return err
}
}
o.expectedDigest = expectedDigest
return nil
}
}
func readFile(path string) string {
if path == "" {
return ""
}
if _, err := os.Stat(path); err != nil {
return ""
}
b, err := os.ReadFile(path)
if err != nil {
return ""
}
return string(b)
}
func readTime(path string) time.Time {
if path == "" {
return time.Time{}
}
if _, err := os.Stat(path); err != nil {
return time.Time{}
}
b, err := os.ReadFile(path)
if err != nil {
return time.Time{}
}
t, err := time.Parse(http.TimeFormat, string(b))
if err != nil {
return time.Time{}
}
return t
}
// Download downloads the remote resource into the local path.
//
// Download caches the remote resource if WithCache or WithCacheDir option is specified.
// Local files are not cached.
//
// When the local path already exists, Download returns Result with StatusSkipped.
// (So, the local path cannot be set to /dev/null for "caching only" mode.)
//
// The local path can be an empty string for "caching only" mode.
func Download(ctx context.Context, local, remote string, opts ...Opt) (*Result, error) {
var o options
if err := o.apply(opts); err != nil {
return nil, err
}
var localPath string
if local == "" {
if o.cacheDir == "" {
return nil, errors.New("caching-only mode requires the cache directory to be specified")
}
} else {
var err error
localPath, err = canonicalLocalPath(local)
if err != nil {
return nil, err
}
if _, err := os.Stat(localPath); err == nil {
logrus.Debugf("file %q already exists, skipping downloading from %q (and skipping digest validation)", localPath, remote)
res := &Result{
Status: StatusSkipped,
ValidatedDigest: false,
}
return res, nil
} else if !errors.Is(err, os.ErrNotExist) {
return nil, err
}
localPathDir := filepath.Dir(localPath)
if err := os.MkdirAll(localPathDir, 0o755); err != nil {
return nil, err
}
}
ext := path.Ext(remote)
if IsLocal(remote) {
if err := copyLocal(ctx, localPath, remote, ext, o.decompress, o.description, o.expectedDigest); err != nil {
return nil, err
}
res := &Result{
Status: StatusDownloaded,
ValidatedDigest: o.expectedDigest != "",
}
return res, nil
}
if o.cacheDir == "" {
if err := downloadHTTP(ctx, localPath, "", "", remote, o.description, o.expectedDigest); err != nil {
return nil, err
}
res := &Result{
Status: StatusDownloaded,
ValidatedDigest: o.expectedDigest != "",
}
return res, nil
}
shad := cacheDirectoryPath(o.cacheDir, remote)
if err := os.MkdirAll(shad, 0o700); err != nil {
return nil, err
}
var res *Result
err := lockutil.WithDirLock(shad, func() error {
var err error
res, err = getCached(ctx, localPath, remote, o)
if err != nil {
return err
}
if res != nil {
return nil
}
res, err = fetch(ctx, localPath, remote, o)
return err
})
return res, err
}
// getCached tries to copy the file from the cache to local path. Return result,
// nil if the file was copied, nil, nil if the file is not in the cache or the
// cache needs update, or nil, error on fatal error.
func getCached(ctx context.Context, localPath, remote string, o options) (*Result, error) {
shad := cacheDirectoryPath(o.cacheDir, remote)
shadData := filepath.Join(shad, "data")
shadTime := filepath.Join(shad, "time")
shadType := filepath.Join(shad, "type")
shadDigest, err := cacheDigestPath(shad, o.expectedDigest)
if err != nil {
return nil, err
}
if _, err := os.Stat(shadData); err != nil {
return nil, nil
}
ext := path.Ext(remote)
logrus.Debugf("file %q is cached as %q", localPath, shadData)
if _, err := os.Stat(shadDigest); err == nil {
logrus.Debugf("Comparing digest %q with the cached digest file %q, not computing the actual digest of %q",
o.expectedDigest, shadDigest, shadData)
if err := validateCachedDigest(shadDigest, o.expectedDigest); err != nil {
return nil, err
}
if err := copyLocal(ctx, localPath, shadData, ext, o.decompress, "", ""); err != nil {
return nil, err
}
} else {
if match, lmCached, lmRemote, err := matchLastModified(ctx, shadTime, remote); err != nil {
logrus.WithError(err).Info("Failed to retrieve last-modified for cached digest-less image; using cached image.")
} else if match {
if err := copyLocal(ctx, localPath, shadData, ext, o.decompress, o.description, o.expectedDigest); err != nil {
return nil, err
}
} else {
logrus.Infof("Re-downloading digest-less image: last-modified mismatch (cached: %q, remote: %q)", lmCached, lmRemote)
return nil, nil
}
}
res := &Result{
Status: StatusUsedCache,
CachePath: shadData,
LastModified: readTime(shadTime),
ContentType: readFile(shadType),
ValidatedDigest: o.expectedDigest != "",
}
return res, nil
}
// fetch downloads remote to the cache and copy the cached file to local path.
func fetch(ctx context.Context, localPath, remote string, o options) (*Result, error) {
shad := cacheDirectoryPath(o.cacheDir, remote)
shadData := filepath.Join(shad, "data")
shadTime := filepath.Join(shad, "time")
shadType := filepath.Join(shad, "type")
shadDigest, err := cacheDigestPath(shad, o.expectedDigest)
if err != nil {
return nil, err
}
ext := path.Ext(remote)
shadURL := filepath.Join(shad, "url")
if err := os.WriteFile(shadURL, []byte(remote), 0o644); err != nil {
return nil, err
}
if err := downloadHTTP(ctx, shadData, shadTime, shadType, remote, o.description, o.expectedDigest); err != nil {
return nil, err
}
if shadDigest != "" && o.expectedDigest != "" {
if err := os.WriteFile(shadDigest, []byte(o.expectedDigest.String()), 0o644); err != nil {
return nil, err
}
}
// no need to pass the digest to copyLocal(), as we already verified the digest
if err := copyLocal(ctx, localPath, shadData, ext, o.decompress, "", ""); err != nil {
return nil, err
}
res := &Result{
Status: StatusDownloaded,
CachePath: shadData,
LastModified: readTime(shadTime),
ContentType: readFile(shadType),
ValidatedDigest: o.expectedDigest != "",
}
return res, nil
}
// Cached checks if the remote resource is in the cache.
//
// Download caches the remote resource if WithCache or WithCacheDir option is specified.
// Local files are not cached.
//
// When the cache path already exists, Cached returns Result with StatusUsedCache.
func Cached(remote string, opts ...Opt) (*Result, error) {
var o options
if err := o.apply(opts); err != nil {
return nil, err
}
if o.cacheDir == "" {
return nil, errors.New("caching-only mode requires the cache directory to be specified")
}
if IsLocal(remote) {
return nil, errors.New("local files are not cached")
}
shad := cacheDirectoryPath(o.cacheDir, remote)
shadData := filepath.Join(shad, "data")
shadTime := filepath.Join(shad, "time")
shadType := filepath.Join(shad, "type")
shadDigest, err := cacheDigestPath(shad, o.expectedDigest)
if err != nil {
return nil, err
}
// Checking if data file exists is safe without locking.
if _, err := os.Stat(shadData); err != nil {
return nil, err
}
// But validating the digest or the data file must take the lock to avoid races
// with parallel downloads.
if err := os.MkdirAll(shad, 0o700); err != nil {
return nil, err
}
err = lockutil.WithDirLock(shad, func() error {
if _, err := os.Stat(shadDigest); err != nil {
if err := validateCachedDigest(shadDigest, o.expectedDigest); err != nil {
return err
}
} else {
if err := validateLocalFileDigest(shadData, o.expectedDigest); err != nil {
return err
}
}
return nil
})
if err != nil {
return nil, err
}
res := &Result{
Status: StatusUsedCache,
CachePath: shadData,
LastModified: readTime(shadTime),
ContentType: readFile(shadType),
ValidatedDigest: o.expectedDigest != "",
}
return res, nil
}
// cacheDirectoryPath returns the cache subdirectory path.
// - "url" file contains the url
// - "data" file contains the data
// - "time" file contains the time (Last-Modified header)
// - "type" file contains the type (Content-Type header)
func cacheDirectoryPath(cacheDir, remote string) string {
return filepath.Join(cacheDir, "download", "by-url-sha256", CacheKey(remote))
}
// cacheDigestPath returns the cache digest file path.
// - "<ALGO>.digest" contains the digest
func cacheDigestPath(shad string, expectedDigest digest.Digest) (string, error) {
shadDigest := ""
if expectedDigest != "" {
algo := expectedDigest.Algorithm().String()
if strings.Contains(algo, "/") || strings.Contains(algo, "\\") {
return "", fmt.Errorf("invalid digest algorithm %q", algo)
}
shadDigest = filepath.Join(shad, algo+".digest")
}
return shadDigest, nil
}
func IsLocal(s string) bool {
return !strings.Contains(s, "://") || strings.HasPrefix(s, "file://")
}
// canonicalLocalPath canonicalizes the local path string.
// - Make sure the file has no scheme, or the `file://` scheme
// - If it has the `file://` scheme, strip the scheme and make sure the filename is absolute
// - Expand a leading `~`, or convert relative to absolute name
func canonicalLocalPath(s string) (string, error) {
if s == "" {
return "", errors.New("got empty path")
}
if !IsLocal(s) {
return "", fmt.Errorf("got non-local path: %q", s)
}
if strings.HasPrefix(s, "file://") {
res := strings.TrimPrefix(s, "file://")
if !filepath.IsAbs(res) {
return "", fmt.Errorf("got non-absolute path %q", res)
}
return res, nil
}
return localpathutil.Expand(s)
}
func copyLocal(ctx context.Context, dst, src, ext string, decompress bool, description string, expectedDigest digest.Digest) error {
srcPath, err := canonicalLocalPath(src)
if err != nil {
return err
}
if expectedDigest != "" {
logrus.Debugf("verifying digest of local file %q (%s)", srcPath, expectedDigest)
}
if err := validateLocalFileDigest(srcPath, expectedDigest); err != nil {
return err
}
if dst == "" {
// empty dst means caching-only mode
return nil
}
dstPath, err := canonicalLocalPath(dst)
if err != nil {
return err
}
if decompress {
command := decompressor(ext)
if command != "" {
return decompressLocal(ctx, command, dstPath, srcPath, ext, description)
}
commandByMagic := decompressorByMagic(srcPath)
if commandByMagic != "" {
return decompressLocal(ctx, commandByMagic, dstPath, srcPath, ext, description)
}
}
// TODO: progress bar for copy
return fs.CopyFile(dstPath, srcPath)
}
func decompressor(ext string) string {
switch ext {
case ".gz":
return "gzip"
case ".bz2":
return "bzip2"
case ".xz":
return "xz"
case ".zst":
return "zstd"
default:
return ""
}
}
func decompressorByMagic(file string) string {
f, err := os.Open(file)
if err != nil {
return ""
}
defer f.Close()
header := make([]byte, 6)
if _, err := f.Read(header); err != nil {
return ""
}
if _, err := f.Seek(0, io.SeekStart); err != nil {
return ""
}
if bytes.HasPrefix(header, []byte{0x1f, 0x8b}) {
return "gzip"
}
if bytes.HasPrefix(header, []byte{0x42, 0x5a}) {
return "bzip2"
}
if bytes.HasPrefix(header, []byte{0xfd, 0x37, 0x7a, 0x58, 0x5a, 0x00}) {
return "xz"
}
if bytes.HasPrefix(header, []byte{0x28, 0xb5, 0x2f, 0xfd}) {
return "zstd"
}
return ""
}
func decompressLocal(ctx context.Context, decompressCmd, dst, src, ext, description string) error {
logrus.Infof("decompressing %s with %v", ext, decompressCmd)
st, err := os.Stat(src)
if err != nil {
return err
}
bar, err := progressbar.New(st.Size())
if err != nil {
return err
}
if HideProgress {
hideBar(bar)
}
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0o644)
if err != nil {
return err
}
defer out.Close()
buf := new(bytes.Buffer)
cmd := exec.CommandContext(ctx, decompressCmd, "-d") // -d --decompress
cmd.Stdin = bar.NewProxyReader(in)
cmd.Stdout = out
cmd.Stderr = buf
if !HideProgress {
if description == "" {
description = filepath.Base(src)
}
logrus.Infof("Decompressing %s\n", description)
}
bar.Start()
err = cmd.Run()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok {
ee.Stderr = buf.Bytes()
}
}
bar.Finish()
return err
}
func validateCachedDigest(shadDigest string, expectedDigest digest.Digest) error {
if expectedDigest == "" {
return nil
}
shadDigestB, err := os.ReadFile(shadDigest)
if err != nil {
return err
}
shadDigestS := strings.TrimSpace(string(shadDigestB))
if shadDigestS != expectedDigest.String() {
return fmt.Errorf("expected digest %q, got %q", expectedDigest, shadDigestS)
}
return nil
}
func validateLocalFileDigest(localPath string, expectedDigest digest.Digest) error {
if localPath == "" {
return errors.New("validateLocalFileDigest: got empty localPath")
}
if expectedDigest == "" {
return nil
}
algo := expectedDigest.Algorithm()
if !algo.Available() {
return fmt.Errorf("expected digest algorithm %q is not available", algo)
}
r, err := os.Open(localPath)
if err != nil {
return err
}
defer r.Close()
actualDigest, err := algo.FromReader(r)
if err != nil {
return err
}
if actualDigest != expectedDigest {
return fmt.Errorf("expected digest %q, got %q", expectedDigest, actualDigest)
}
return nil
}
// mathLastModified takes params:
// - ctx: context for calling httpclientutil.Head
// - lastModifiedPath: path of the cached last-modified time file
// - url: URL to fetch the last-modified time
//
// returns:
// - matched: whether the last-modified time matches
// - lmCached: last-modified time string from the lastModifiedPath
// - lmRemote: last-modified time string from the URL
// - err: error if fetching the last-modified time from the URL fails
func matchLastModified(ctx context.Context, lastModifiedPath, url string) (matched bool, lmCached, lmRemote string, err error) {
lmCached = readFile(lastModifiedPath)
if lmCached == "" {
return false, "<not cached>", "<not checked>", nil
}
resp, err := httpclientutil.Head(ctx, http.DefaultClient, url)
if err != nil {
return false, lmCached, "<failed to fetch remote>", err
}
defer resp.Body.Close()
lmRemote = resp.Header.Get("Last-Modified")
if lmRemote == "" {
return false, lmCached, "<missing Last-Modified header>", nil
}
lmCachedTime, errParsingCachedTime := time.Parse(http.TimeFormat, lmCached)
lmRemoteTime, errParsingRemoteTime := time.Parse(http.TimeFormat, lmRemote)
if errParsingCachedTime != nil && errParsingRemoteTime != nil {
// both time strings are failed to parse, so compare them as strings
return lmCached == lmRemote, lmCached, lmRemote, nil
} else if errParsingCachedTime == nil && errParsingRemoteTime == nil {
// both time strings are successfully parsed, so compare them as times
return lmRemoteTime.Equal(lmCachedTime), lmCached, lmRemote, nil
}
// ignore parsing errors for either time string and assume they are different
return false, lmCached, lmRemote, nil
}
func downloadHTTP(ctx context.Context, localPath, lastModified, contentType, url, description string, expectedDigest digest.Digest) error {
if localPath == "" {
return errors.New("downloadHTTP: got empty localPath")
}
logrus.Debugf("downloading %q into %q", url, localPath)
resp, err := httpclientutil.Get(ctx, http.DefaultClient, url)
if err != nil {
return err
}
if lastModified != "" {
lm := resp.Header.Get("Last-Modified")
if err := os.WriteFile(lastModified, []byte(lm), 0o644); err != nil {
return err
}
}
if contentType != "" {
ct := resp.Header.Get("Content-Type")
if err := os.WriteFile(contentType, []byte(ct), 0o644); err != nil {
return err
}
}
defer resp.Body.Close()
bar, err := progressbar.New(resp.ContentLength)
if err != nil {
return err
}
if HideProgress {
hideBar(bar)
}
localPathTmp := perProcessTempfile(localPath)
fileWriter, err := os.Create(localPathTmp)
if err != nil {
return err
}
defer fileWriter.Close()
defer os.RemoveAll(localPathTmp)
writers := []io.Writer{fileWriter}
var digester digest.Digester
if expectedDigest != "" {
algo := expectedDigest.Algorithm()
if !algo.Available() {
return fmt.Errorf("unsupported digest algorithm %q", algo)
}
digester = algo.Digester()
hasher := digester.Hash()
writers = append(writers, hasher)
}
multiWriter := io.MultiWriter(writers...)
if !HideProgress {
if description == "" {
description = url
}
// stderr corresponds to the progress bar output
fmt.Fprintf(os.Stderr, "Downloading %s\n", description)
}
bar.Start()
if _, err := io.Copy(multiWriter, bar.NewProxyReader(resp.Body)); err != nil {
return err
}
bar.Finish()
if digester != nil {
actualDigest := digester.Digest()
if actualDigest != expectedDigest {
return fmt.Errorf("expected digest %q, got %q", expectedDigest, actualDigest)
}
}
if err := fileWriter.Sync(); err != nil {
return err
}
if err := fileWriter.Close(); err != nil {
return err
}
return os.Rename(localPathTmp, localPath)
}
var tempfileCount atomic.Uint64
// To allow parallel download we use a per-process unique suffix for temporary
// files. Renaming the temporary file to the final file is safe without
// synchronization on posix.
// To make it easy to test we also include a counter ensuring that each
// temporary file is unique in the same process.
// https://github.com/lima-vm/lima/issues/2722
func perProcessTempfile(path string) string {
return fmt.Sprintf("%s.tmp.%d.%d", path, os.Getpid(), tempfileCount.Add(1))
}
// CacheEntries returns a map of cache entries.
// The key is the SHA256 of the URL.
// The value is the path to the cache entry.
func CacheEntries(opts ...Opt) (map[string]string, error) {
entries := make(map[string]string)
var o options
if err := o.apply(opts); err != nil {
return nil, err
}
if o.cacheDir == "" {
return entries, nil
}
downloadDir := filepath.Join(o.cacheDir, "download", "by-url-sha256")
_, err := os.Stat(downloadDir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return entries, nil
}
return nil, err
}
cacheEntries, err := os.ReadDir(downloadDir)
if err != nil {
return nil, err
}
for _, entry := range cacheEntries {
entries[entry.Name()] = filepath.Join(downloadDir, entry.Name())
}
return entries, nil
}
// CacheKey returns the key for a cache entry of the remote URL.
func CacheKey(remote string) string {
return fmt.Sprintf("%x", sha256.Sum256([]byte(remote)))
}
// RemoveAllCacheDir removes the cache directory.
func RemoveAllCacheDir(opts ...Opt) error {
var o options
if err := o.apply(opts); err != nil {
return err
}
if o.cacheDir == "" {
return nil
}
logrus.Infof("Pruning %q", o.cacheDir)
return os.RemoveAll(o.cacheDir)
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package procnettcp
import (
"bufio"
"encoding/hex"
"errors"
"fmt"
"io"
"net"
"strconv"
"strings"
"golang.org/x/sys/cpu"
)
type Kind = string
const (
TCP Kind = "tcp"
TCP6 Kind = "tcp6"
UDP Kind = "udp"
UDP6 Kind = "udp6"
// TODO: "udplite", "udplite6".
)
type State = int
const (
TCPEstablished State = 0x1
TCPListen State = 0xA
UDPEstablished State = 0x7
)
type Entry struct {
Kind Kind `json:"kind"`
IP net.IP `json:"ip"`
Port uint16 `json:"port"`
State State `json:"state"`
}
func Parse(r io.Reader, kind Kind) ([]Entry, error) {
return ParseWithEndian(r, kind, cpu.IsBigEndian)
}
func ParseWithEndian(r io.Reader, kind Kind, isBE bool) ([]Entry, error) {
switch kind {
case TCP, TCP6, UDP, UDP6:
default:
return nil, fmt.Errorf("unexpected kind %q", kind)
}
var entries []Entry
sc := bufio.NewScanner(r)
// As of kernel 5.11, ["local_address"] = 1
fieldNames := make(map[string]int)
for i := 0; sc.Scan(); i++ {
line := strings.TrimSpace(sc.Text())
if line == "" {
continue
}
fields := strings.Fields(line)
switch i {
case 0:
for j := range fields {
fieldNames[fields[j]] = j
}
if _, ok := fieldNames["local_address"]; !ok {
return nil, errors.New("field \"local_address\" not found")
}
if _, ok := fieldNames["st"]; !ok {
return nil, errors.New("field \"st\" not found")
}
default:
// localAddress is like "0100007F:053A"
localAddress := fields[fieldNames["local_address"]]
ip, port, err := ParseAddressWithEndian(localAddress, isBE)
if err != nil {
return entries, err
}
stStr := fields[fieldNames["st"]]
st, err := strconv.ParseUint(stStr, 16, 8)
if err != nil {
return entries, err
}
ent := Entry{
Kind: kind,
IP: ip,
Port: port,
State: int(st),
}
entries = append(entries, ent)
}
}
if err := sc.Err(); err != nil {
return entries, err
}
return entries, nil
}
// ParseAddress parses a string.
//
// Little endian hosts:
// "0100007F:0050" (127.0.0.1:80)
// "000080FE00000000FF57A6705DC771FE:0050" ([fe80::70a6:57ff:fe71:c75d]:80)
// "00000000000000000000000000000000:0050" (0.0.0.0:80)
//
// Big endian hosts:
// "7F000001:0050" (127.0.0.1:80)
// "FE8000000000000070A657FFFE71C75D:0050" ([fe80::70a6:57ff:fe71:c75d]:80)
// "00000000000000000000000000000000:0050" (0.0.0.0:80)
//
// See https://serverfault.com/questions/592574/why-does-proc-net-tcp6-represents-1-as-1000
func ParseAddress(s string) (net.IP, uint16, error) {
return ParseAddressWithEndian(s, cpu.IsBigEndian)
}
func ParseAddressWithEndian(s string, isBE bool) (net.IP, uint16, error) {
split := strings.SplitN(s, ":", 2)
if len(split) != 2 {
return nil, 0, fmt.Errorf("unparsable address %q", s)
}
switch l := len(split[0]); l {
case 8, 32:
default:
return nil, 0, fmt.Errorf("unparsable address %q, expected length of %q to be 8 or 32, got %d",
s, split[0], l)
}
ipBytes := make([]byte, len(split[0])/2) // 4 bytes (8 chars) or 16 bytes (32 chars)
for i := range len(split[0]) / 8 {
quartet := split[0][8*i : 8*(i+1)]
quartetB, err := hex.DecodeString(quartet) // surprisingly little endian, per 4 bytes, on little endian hosts
if err != nil {
return nil, 0, fmt.Errorf("unparsable address %q: unparsable quartet %q: %w", s, quartet, err)
}
if isBE {
for j := range quartetB {
ipBytes[4*i+j] = quartetB[j]
}
} else {
for j := range quartetB {
ipBytes[4*i+len(quartetB)-1-j] = quartetB[j]
}
}
}
ip := net.IP(ipBytes)
port64, err := strconv.ParseUint(split[1], 16, 16)
if err != nil {
return nil, 0, fmt.Errorf("unparsable address %q: unparsable port %q", s, split[1])
}
port := uint16(port64)
return ip, port, nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package procnettcp
import (
"errors"
"os"
)
// ParseFiles parses /proc/net/{tcp, tcp6}.
func ParseFiles() ([]Entry, error) {
var res []Entry
files := map[string]Kind{
"/proc/net/tcp": TCP,
"/proc/net/tcp6": TCP6,
"/proc/net/udp": UDP,
"/proc/net/udp6": UDP6,
}
for file, kind := range files {
r, err := os.Open(file)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
continue
}
return res, err
}
parsed, err := Parse(r, kind)
if err != nil {
_ = r.Close()
return res, err
}
_ = r.Close()
res = append(res, parsed...)
}
return res, nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package iso9660util
import (
"io"
"os"
"path"
"github.com/diskfs/go-diskfs/backend/file"
"github.com/diskfs/go-diskfs/filesystem"
"github.com/diskfs/go-diskfs/filesystem/iso9660"
"github.com/sirupsen/logrus"
)
type Entry struct {
Path string
Reader io.Reader
}
func Write(isoPath, label string, layout []Entry) error {
if err := os.RemoveAll(isoPath); err != nil {
return err
}
isoFile, err := os.Create(isoPath)
if err != nil {
return err
}
backendFile := file.New(isoFile, false)
defer isoFile.Close()
workdir, err := os.MkdirTemp("", "diskfs_iso")
if err != nil {
return err
}
logrus.Debugf("Creating iso file %s", isoFile.Name())
logrus.Debugf("Using %s as workspace", workdir)
fs, err := iso9660.Create(backendFile, 0, 0, 0, workdir)
if err != nil {
return err
}
for _, f := range layout {
if _, err := WriteFile(fs, f.Path, f.Reader); err != nil {
return err
}
}
finalizeOptions := iso9660.FinalizeOptions{
RockRidge: true,
VolumeIdentifier: label,
}
if err := fs.Finalize(finalizeOptions); err != nil {
return err
}
return isoFile.Close()
}
func WriteFile(fs filesystem.FileSystem, pathStr string, r io.Reader) (int64, error) {
if dir := path.Dir(pathStr); dir != "" && dir != "/" {
if err := fs.Mkdir(dir); err != nil {
return 0, err
}
}
f, err := fs.OpenFile(pathStr, os.O_CREATE|os.O_RDWR)
if err != nil {
return 0, err
}
defer f.Close()
return io.Copy(f, r)
}
func IsISO9660(imagePath string) (bool, error) {
imageFile, err := os.Open(imagePath)
if err != nil {
return false, err
}
defer imageFile.Close()
backendFile := file.New(imageFile, true)
fileInfo, err := imageFile.Stat()
if err != nil {
return false, err
}
_, err = iso9660.Read(backendFile, fileInfo.Size(), 0, 0)
return err == nil, nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
// Package nativeimgutil provides image utilities that do not depend on `qemu-img` binary.
package nativeimgutil
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
containerdfs "github.com/containerd/continuity/fs"
"github.com/docker/go-units"
"github.com/lima-vm/go-qcow2reader"
"github.com/lima-vm/go-qcow2reader/convert"
"github.com/lima-vm/go-qcow2reader/image/qcow2"
"github.com/lima-vm/go-qcow2reader/image/raw"
"github.com/sirupsen/logrus"
"github.com/lima-vm/lima/pkg/progressbar"
)
// Disk image size must be aligned to sector size. Qemu block layer is rounding
// up the size to 512 bytes. Apple virtualization framework reject disks not
// aligned to 512 bytes.
const sectorSize = 512
// RoundUp rounds size up to sectorSize.
func RoundUp(size int) int {
sectors := (size + sectorSize - 1) / sectorSize
return sectors * sectorSize
}
// CreateRawDisk creates an empty raw data disk.
func CreateRawDisk(disk string, size int) error {
if _, err := os.Stat(disk); err == nil || !errors.Is(err, fs.ErrNotExist) {
return err
}
f, err := os.Create(disk)
if err != nil {
return err
}
defer f.Close()
roundedSize := RoundUp(size)
return f.Truncate(int64(roundedSize))
}
// ResizeRawDisk resizes a raw data disk.
func ResizeRawDisk(disk string, size int) error {
roundedSize := RoundUp(size)
return os.Truncate(disk, int64(roundedSize))
}
// ConvertToRaw converts a source disk into a raw disk.
// source and dest may be same.
// ConvertToRaw is a NOP if source == dest, and no resizing is needed.
func ConvertToRaw(source, dest string, size *int64, allowSourceWithBackingFile bool) error {
srcF, err := os.Open(source)
if err != nil {
return err
}
defer srcF.Close()
srcImg, err := qcow2reader.Open(srcF)
if err != nil {
return fmt.Errorf("failed to detect the format of %q: %w", source, err)
}
if size != nil && *size < srcImg.Size() {
return fmt.Errorf("specified size %d is smaller than the original image size (%d) of %q", *size, srcImg.Size(), source)
}
logrus.Infof("Converting %q (%s) to a raw disk %q", source, srcImg.Type(), dest)
switch t := srcImg.Type(); t {
case raw.Type:
if err = srcF.Close(); err != nil {
return err
}
return convertRawToRaw(source, dest, size)
case qcow2.Type:
if !allowSourceWithBackingFile {
q, ok := srcImg.(*qcow2.Qcow2)
if !ok {
return fmt.Errorf("unexpected qcow2 image %T", srcImg)
}
if q.BackingFile != "" {
return fmt.Errorf("qcow2 image %q has an unexpected backing file: %q", source, q.BackingFile)
}
}
default:
logrus.Warnf("image %q has an unexpected format: %q", source, t)
}
if err = srcImg.Readable(); err != nil {
return fmt.Errorf("image %q is not readable: %w", source, err)
}
// Create a tmp file because source and dest can be same.
destTmpF, err := os.CreateTemp(filepath.Dir(dest), filepath.Base(dest)+".lima-*.tmp")
if err != nil {
return err
}
destTmp := destTmpF.Name()
defer os.RemoveAll(destTmp)
defer destTmpF.Close()
// Truncating before copy eliminates the seeks during copy and provide a
// hint to the file system that may minimize allocations and fragmentation
// of the file.
if err := MakeSparse(destTmpF, srcImg.Size()); err != nil {
return err
}
// Copy
bar, err := progressbar.New(srcImg.Size())
if err != nil {
return err
}
bar.Start()
err = convert.Convert(destTmpF, srcImg, convert.Options{Progress: bar})
bar.Finish()
if err != nil {
return fmt.Errorf("failed to convert image: %w", err)
}
// Resize
if size != nil {
logrus.Infof("Expanding to %s", units.BytesSize(float64(*size)))
if err = MakeSparse(destTmpF, *size); err != nil {
return err
}
}
if err = destTmpF.Close(); err != nil {
return err
}
// Rename destTmp into dest
if err = os.RemoveAll(dest); err != nil {
return err
}
return os.Rename(destTmp, dest)
}
func convertRawToRaw(source, dest string, size *int64) error {
if source != dest {
// continuity attempts clonefile
if err := containerdfs.CopyFile(dest, source); err != nil {
return fmt.Errorf("failed to copy %q into %q: %w", source, dest, err)
}
}
if size != nil {
logrus.Infof("Expanding to %s", units.BytesSize(float64(*size)))
destF, err := os.OpenFile(dest, os.O_RDWR, 0o644)
if err != nil {
return err
}
if err = MakeSparse(destF, *size); err != nil {
_ = destF.Close()
return err
}
return destF.Close()
}
return nil
}
func MakeSparse(f *os.File, n int64) error {
if _, err := f.Seek(n, io.SeekStart); err != nil {
return err
}
return f.Truncate(n)
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package store
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"github.com/lima-vm/go-qcow2reader"
"github.com/lima-vm/lima/pkg/store/filenames"
)
type Disk struct {
Name string `json:"name"`
Size int64 `json:"size"`
Format string `json:"format"`
Dir string `json:"dir"`
Instance string `json:"instance"`
InstanceDir string `json:"instanceDir"`
MountPoint string `json:"mountPoint"`
}
func InspectDisk(diskName string) (*Disk, error) {
disk := &Disk{
Name: diskName,
}
diskDir, err := DiskDir(diskName)
if err != nil {
return nil, err
}
disk.Dir = diskDir
dataDisk := filepath.Join(diskDir, filenames.DataDisk)
if _, err := os.Stat(dataDisk); err != nil {
return nil, err
}
disk.Size, disk.Format, err = inspectDisk(dataDisk)
if err != nil {
return nil, err
}
instDir, err := os.Readlink(filepath.Join(diskDir, filenames.InUseBy))
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
return nil, err
}
} else {
disk.Instance = filepath.Base(instDir)
disk.InstanceDir = instDir
}
disk.MountPoint = fmt.Sprintf("/mnt/lima-%s", diskName)
return disk, nil
}
// inspectDisk attempts to inspect the disk size and format with qcow2reader.
func inspectDisk(fName string) (size int64, format string, _ error) {
f, err := os.Open(fName)
if err != nil {
return -1, "", err
}
defer f.Close()
img, err := qcow2reader.Open(f)
if err != nil {
return -1, "", err
}
sz := img.Size()
if sz < 0 {
return -1, "", fmt.Errorf("cannot determine size of %q", fName)
}
return sz, string(img.Type()), nil
}
func (d *Disk) Lock(instanceDir string) error {
inUseBy := filepath.Join(d.Dir, filenames.InUseBy)
return os.Symlink(instanceDir, inUseBy)
}
func (d *Disk) Unlock() error {
inUseBy := filepath.Join(d.Dir, filenames.InUseBy)
return os.Remove(inUseBy)
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package store
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
"github.com/docker/go-units"
"github.com/sirupsen/logrus"
hostagentclient "github.com/lima-vm/lima/pkg/hostagent/api/client"
"github.com/lima-vm/lima/pkg/instance/hostname"
"github.com/lima-vm/lima/pkg/limayaml"
"github.com/lima-vm/lima/pkg/store/dirnames"
"github.com/lima-vm/lima/pkg/store/filenames"
"github.com/lima-vm/lima/pkg/textutil"
"github.com/lima-vm/lima/pkg/version/versionutil"
)
type Status = string
const (
StatusUnknown Status = ""
StatusUninitialized Status = "Uninitialized"
StatusInstalling Status = "Installing"
StatusBroken Status = "Broken"
StatusStopped Status = "Stopped"
StatusRunning Status = "Running"
)
type Instance struct {
Name string `json:"name"`
// Hostname, not HostName (corresponds to SSH's naming convention)
Hostname string `json:"hostname"`
Status Status `json:"status"`
Dir string `json:"dir"`
VMType limayaml.VMType `json:"vmType"`
Arch limayaml.Arch `json:"arch"`
CPUType string `json:"cpuType"`
CPUs int `json:"cpus,omitempty"`
Memory int64 `json:"memory,omitempty"` // bytes
Disk int64 `json:"disk,omitempty"` // bytes
Message string `json:"message,omitempty"`
AdditionalDisks []limayaml.Disk `json:"additionalDisks,omitempty"`
Networks []limayaml.Network `json:"network,omitempty"`
SSHLocalPort int `json:"sshLocalPort,omitempty"`
SSHConfigFile string `json:"sshConfigFile,omitempty"`
HostAgentPID int `json:"hostAgentPID,omitempty"`
DriverPID int `json:"driverPID,omitempty"`
Errors []error `json:"errors,omitempty"`
Config *limayaml.LimaYAML `json:"config,omitempty"`
SSHAddress string `json:"sshAddress,omitempty"`
Protected bool `json:"protected"`
LimaVersion string `json:"limaVersion"`
Param map[string]string `json:"param,omitempty"`
}
// Inspect returns err only when the instance does not exist (os.ErrNotExist).
// Other errors are returned as *Instance.Errors.
func Inspect(instName string) (*Instance, error) {
inst := &Instance{
Name: instName,
// TODO: support customizing hostname
Hostname: hostname.FromInstName(instName),
Status: StatusUnknown,
}
// InstanceDir validates the instName but does not check whether the instance exists
instDir, err := InstanceDir(instName)
if err != nil {
return nil, err
}
// Make sure inst.Dir is set, even when YAML validation fails
inst.Dir = instDir
yamlPath := filepath.Join(instDir, filenames.LimaYAML)
y, err := LoadYAMLByFilePath(yamlPath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, err
}
inst.Errors = append(inst.Errors, err)
return inst, nil
}
inst.Config = y
inst.Arch = *y.Arch
inst.VMType = *y.VMType
inst.CPUType = y.CPUType[*y.Arch]
inst.SSHAddress = "127.0.0.1"
inst.SSHLocalPort = *y.SSH.LocalPort // maybe 0
inst.SSHConfigFile = filepath.Join(instDir, filenames.SSHConfig)
inst.HostAgentPID, err = ReadPIDFile(filepath.Join(instDir, filenames.HostAgentPID))
if err != nil {
inst.Status = StatusBroken
inst.Errors = append(inst.Errors, err)
}
if inst.HostAgentPID != 0 {
haSock := filepath.Join(instDir, filenames.HostAgentSock)
haClient, err := hostagentclient.NewHostAgentClient(haSock)
if err != nil {
inst.Status = StatusBroken
inst.Errors = append(inst.Errors, fmt.Errorf("failed to connect to %q: %w", haSock, err))
} else {
ctx, cancel := context.WithTimeout(context.TODO(), 3*time.Second)
defer cancel()
info, err := haClient.Info(ctx)
if err != nil {
inst.Status = StatusBroken
inst.Errors = append(inst.Errors, fmt.Errorf("failed to get Info from %q: %w", haSock, err))
} else {
inst.SSHLocalPort = info.SSHLocalPort
}
}
}
inst.CPUs = *y.CPUs
memory, err := units.RAMInBytes(*y.Memory)
if err == nil {
inst.Memory = memory
}
disk, err := units.RAMInBytes(*y.Disk)
if err == nil {
inst.Disk = disk
}
inst.AdditionalDisks = y.AdditionalDisks
inst.Networks = y.Networks
// 0 out values since not configurable on WSL2
if inst.VMType == limayaml.WSL2 {
inst.Memory = 0
inst.CPUs = 0
inst.Disk = 0
}
protected := filepath.Join(instDir, filenames.Protected)
if _, err := os.Lstat(protected); !errors.Is(err, os.ErrNotExist) {
inst.Protected = true
}
inspectStatus(instDir, inst, y)
tmpl, err := template.New("format").Parse(y.Message)
if err != nil {
inst.Errors = append(inst.Errors, fmt.Errorf("message %q is not a valid template: %w", y.Message, err))
inst.Status = StatusBroken
} else {
data, err := AddGlobalFields(inst)
if err != nil {
inst.Errors = append(inst.Errors, fmt.Errorf("cannot add global fields to instance data: %w", err))
inst.Status = StatusBroken
} else {
var message strings.Builder
err = tmpl.Execute(&message, data)
if err != nil {
inst.Errors = append(inst.Errors, fmt.Errorf("cannot execute template %q: %w", y.Message, err))
inst.Status = StatusBroken
} else {
inst.Message = message.String()
}
}
}
limaVersionFile := filepath.Join(instDir, filenames.LimaVersion)
if version, err := os.ReadFile(limaVersionFile); err == nil {
inst.LimaVersion = strings.TrimSpace(string(version))
if _, err = versionutil.Parse(inst.LimaVersion); err != nil {
logrus.Warnf("treating lima version %q from %q as very latest release", inst.LimaVersion, limaVersionFile)
}
} else if !errors.Is(err, os.ErrNotExist) {
inst.Errors = append(inst.Errors, err)
}
inst.Param = y.Param
return inst, nil
}
func inspectStatusWithPIDFiles(instDir string, inst *Instance, y *limayaml.LimaYAML) {
var err error
inst.DriverPID, err = ReadPIDFile(filepath.Join(instDir, filenames.PIDFile(*y.VMType)))
if err != nil {
inst.Status = StatusBroken
inst.Errors = append(inst.Errors, err)
}
if inst.Status == StatusUnknown {
switch {
case inst.HostAgentPID > 0 && inst.DriverPID > 0:
inst.Status = StatusRunning
case inst.HostAgentPID == 0 && inst.DriverPID == 0:
inst.Status = StatusStopped
case inst.HostAgentPID > 0 && inst.DriverPID == 0:
inst.Errors = append(inst.Errors, errors.New("host agent is running but driver is not"))
inst.Status = StatusBroken
default:
inst.Errors = append(inst.Errors, fmt.Errorf("%s driver is running but host agent is not", inst.VMType))
inst.Status = StatusBroken
}
}
}
// ReadPIDFile returns 0 if the PID file does not exist or the process has already terminated
// (in which case the PID file will be removed).
func ReadPIDFile(path string) (int, error) {
b, err := os.ReadFile(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return 0, nil
}
return 0, err
}
pid, err := strconv.Atoi(strings.TrimSpace(string(b)))
if err != nil {
return 0, err
}
proc, err := os.FindProcess(pid)
if err != nil {
return 0, err
}
// os.FindProcess will only return running processes on Windows, exit early
if runtime.GOOS == "windows" {
return pid, nil
}
err = proc.Signal(syscall.Signal(0))
if err != nil {
if errors.Is(err, os.ErrProcessDone) {
_ = os.Remove(path)
return 0, nil
}
// We may not have permission to send the signal (e.g. to network daemon running as root).
// But if we get a permissions error, it means the process is still running.
if !errors.Is(err, os.ErrPermission) {
return 0, err
}
}
return pid, nil
}
type FormatData struct {
Instance
HostOS string
HostArch string
LimaHome string
IdentityFile string
}
var FormatHelp = "\n" +
"These functions are available to go templates:\n\n" +
textutil.IndentString(2,
strings.Join(textutil.FuncHelp, "\n")+"\n")
func AddGlobalFields(inst *Instance) (FormatData, error) {
var data FormatData
data.Instance = *inst
// Add HostOS
data.HostOS = runtime.GOOS
// Add HostArch
data.HostArch = limayaml.NewArch(runtime.GOARCH)
// Add IdentityFile
configDir, err := dirnames.LimaConfigDir()
if err != nil {
return FormatData{}, err
}
data.IdentityFile = filepath.Join(configDir, filenames.UserPrivateKey)
// Add LimaHome
data.LimaHome, err = dirnames.LimaDir()
if err != nil {
return FormatData{}, err
}
return data, nil
}
type PrintOptions struct {
AllFields bool
TerminalWidth int
}
// PrintInstances prints instances in a requested format to a given io.Writer.
// Supported formats are "json", "yaml", "table", or a go template.
func PrintInstances(w io.Writer, instances []*Instance, format string, options *PrintOptions) error {
switch format {
case "json":
format = "{{json .}}"
case "yaml":
format = "{{yaml .}}"
case "table":
types := map[string]int{}
archs := map[string]int{}
for _, instance := range instances {
types[instance.VMType]++
archs[instance.Arch]++
}
all := options != nil && options.AllFields
width := 0
if options != nil {
width = options.TerminalWidth
}
columnWidth := 8
hideType := false
hideArch := false
hideDir := false
columns := 1 // NAME
columns += 2 // STATUS
columns += 2 // SSH
// can we still fit the remaining columns (7)
if width == 0 || (columns+7)*columnWidth > width && !all {
hideType = len(types) == 1
}
if !hideType {
columns++ // VMTYPE
}
// only hide arch if it is the same as the host arch
goarch := limayaml.NewArch(runtime.GOARCH)
// can we still fit the remaining columns (6)
if width == 0 || (columns+6)*columnWidth > width && !all {
hideArch = len(archs) == 1 && instances[0].Arch == goarch
}
if !hideArch {
columns++ // ARCH
}
columns++ // CPUS
columns++ // MEMORY
columns++ // DISK
// can we still fit the remaining columns (2)
if width != 0 && (columns+2)*columnWidth > width && !all {
hideDir = true
}
if !hideDir {
columns += 2 // DIR
}
_ = columns
w := tabwriter.NewWriter(w, 4, 8, 4, ' ', 0)
fmt.Fprint(w, "NAME\tSTATUS\tSSH")
if !hideType {
fmt.Fprint(w, "\tVMTYPE")
}
if !hideArch {
fmt.Fprint(w, "\tARCH")
}
fmt.Fprint(w, "\tCPUS\tMEMORY\tDISK")
if !hideDir {
fmt.Fprint(w, "\tDIR")
}
fmt.Fprintln(w)
homeDir, err := os.UserHomeDir()
if err != nil {
return err
}
for _, instance := range instances {
dir := instance.Dir
if strings.HasPrefix(dir, homeDir) {
dir = strings.Replace(dir, homeDir, "~", 1)
}
fmt.Fprintf(w, "%s\t%s\t%s",
instance.Name,
instance.Status,
fmt.Sprintf("%s:%d", instance.SSHAddress, instance.SSHLocalPort),
)
if !hideType {
fmt.Fprintf(w, "\t%s",
instance.VMType,
)
}
if !hideArch {
fmt.Fprintf(w, "\t%s",
instance.Arch,
)
}
fmt.Fprintf(w, "\t%d\t%s\t%s",
instance.CPUs,
units.BytesSize(float64(instance.Memory)),
units.BytesSize(float64(instance.Disk)),
)
if !hideDir {
fmt.Fprintf(w, "\t%s",
dir,
)
}
fmt.Fprint(w, "\n")
}
return w.Flush()
default:
// NOP
}
tmpl, err := template.New("format").Funcs(textutil.TemplateFuncMap).Parse(format)
if err != nil {
return fmt.Errorf("invalid go template: %w", err)
}
for _, instance := range instances {
data, err := AddGlobalFields(instance)
if err != nil {
return err
}
data.Message = strings.TrimSuffix(instance.Message, "\n")
err = tmpl.Execute(w, data)
if err != nil {
return err
}
fmt.Fprintln(w)
}
return nil
}
// Protect protects the instance to prohibit accidental removal.
// Protect does not return an error even when the instance is already protected.
func (inst *Instance) Protect() error {
protected := filepath.Join(inst.Dir, filenames.Protected)
// TODO: Do an equivalent of `chmod +a "everyone deny delete,delete_child,file_inherit,directory_inherit"`
// https://github.com/lima-vm/lima/issues/1595
if err := os.WriteFile(protected, nil, 0o400); err != nil {
return err
}
inst.Protected = true
return nil
}
// Unprotect unprotects the instance.
// Unprotect does not return an error even when the instance is already unprotected.
func (inst *Instance) Unprotect() error {
protected := filepath.Join(inst.Dir, filenames.Protected)
if err := os.RemoveAll(protected); err != nil {
return err
}
inst.Protected = false
return nil
}
//go:build !windows
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package store
import "github.com/lima-vm/lima/pkg/limayaml"
func inspectStatus(instDir string, inst *Instance, y *limayaml.LimaYAML) {
inspectStatusWithPIDFiles(instDir, inst, y)
}
func GetSSHAddress(_ string) (string, error) {
return "127.0.0.1", nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package store
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/lima-vm/lima/pkg/identifiers"
"github.com/lima-vm/lima/pkg/limayaml"
"github.com/lima-vm/lima/pkg/store/dirnames"
"github.com/lima-vm/lima/pkg/store/filenames"
)
// Directory returns the LimaDir.
func Directory() string {
limaDir, err := dirnames.LimaDir()
if err != nil {
return ""
}
return limaDir
}
// Validate checks the LimaDir.
func Validate() error {
limaDir, err := dirnames.LimaDir()
if err != nil {
return err
}
names, err := Instances()
if err != nil {
return err
}
for _, name := range names {
// Each instance directory needs to have limayaml
instDir := filepath.Join(limaDir, name)
yamlPath := filepath.Join(instDir, filenames.LimaYAML)
if _, err := os.Stat(yamlPath); err != nil {
return err
}
}
return nil
}
// ValidateInstName checks if the name is a valid instance name. For this it needs to
// be a valid identifier, and not end in .yml or .yaml (case insensitively).
func ValidateInstName(name string) error {
if err := identifiers.Validate(name); err != nil {
return fmt.Errorf("instance name %q is not a valid identifier: %w", name, err)
}
lower := strings.ToLower(name)
if strings.HasSuffix(lower, ".yml") || strings.HasSuffix(lower, ".yaml") {
return fmt.Errorf("instance name %q must not end with .yml or .yaml suffix", name)
}
return nil
}
// Instances returns the names of the instances under LimaDir.
func Instances() ([]string, error) {
limaDir, err := dirnames.LimaDir()
if err != nil {
return nil, err
}
limaDirList, err := os.ReadDir(limaDir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, err
}
var names []string
for _, f := range limaDirList {
if strings.HasPrefix(f.Name(), ".") || strings.HasPrefix(f.Name(), "_") {
continue
}
if !f.IsDir() {
continue
}
names = append(names, f.Name())
}
return names, nil
}
func Disks() ([]string, error) {
limaDiskDir, err := dirnames.LimaDisksDir()
if err != nil {
return nil, err
}
limaDiskDirList, err := os.ReadDir(limaDiskDir)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, nil
}
return nil, err
}
var names []string
for _, f := range limaDiskDirList {
names = append(names, f.Name())
}
return names, nil
}
// InstanceDir returns the instance dir.
// InstanceDir does not check whether the instance exists.
func InstanceDir(name string) (string, error) {
if err := ValidateInstName(name); err != nil {
return "", err
}
limaDir, err := dirnames.LimaDir()
if err != nil {
return "", err
}
dir := filepath.Join(limaDir, name)
return dir, nil
}
func DiskDir(name string) (string, error) {
if err := identifiers.Validate(name); err != nil {
return "", err
}
limaDisksDir, err := dirnames.LimaDisksDir()
if err != nil {
return "", err
}
dir := filepath.Join(limaDisksDir, name)
return dir, nil
}
// LoadYAMLByFilePath loads and validates the yaml.
func LoadYAMLByFilePath(filePath string) (*limayaml.LimaYAML, error) {
// We need to use the absolute path because it may be used to determine hostSocket locations.
absPath, err := filepath.Abs(filePath)
if err != nil {
return nil, err
}
yContent, err := os.ReadFile(absPath)
if err != nil {
return nil, err
}
y, err := limayaml.Load(yContent, absPath)
if err != nil {
return nil, err
}
if err := limayaml.Validate(y, false); err != nil {
return nil, err
}
return y, nil
}
// SPDX-FileCopyrightText: Copyright The Lima Authors
// SPDX-License-Identifier: Apache-2.0
package yqutil
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"strings"
"github.com/google/yamlfmt"
"github.com/google/yamlfmt/formatters/basic"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"github.com/sirupsen/logrus"
logging "gopkg.in/op/go-logging.v1"
)
// ValidateContent decodes the content yaml, to check it for syntax errors.
func ValidateContent(content []byte) error {
memory := logging.NewMemoryBackend(0)
backend := logging.AddModuleLevel(memory)
logging.SetBackend(backend)
yqlib.InitExpressionParser()
decoder := yqlib.NewYamlDecoder(yqlib.ConfiguredYamlPreferences)
reader := bytes.NewReader(content)
err := decoder.Init(reader)
if err != nil {
return err
}
_, err = decoder.Decode()
if errors.Is(err, io.EOF) {
return nil
}
return err
}
// EvaluateExpressionPlain evaluates the yq expression and returns the yq result.
func EvaluateExpressionPlain(expression, content string) (string, error) {
if expression == "" {
return content, nil
}
logrus.Debugf("Evaluating yq expression: %q", expression)
memory := logging.NewMemoryBackend(0)
backend := logging.AddModuleLevel(memory)
logging.SetBackend(backend)
yqlib.InitExpressionParser()
encoderPrefs := yqlib.ConfiguredYamlPreferences.Copy()
encoderPrefs.Indent = 2
encoderPrefs.ColorsEnabled = false
encoder := yqlib.NewYamlEncoder(encoderPrefs)
decoder := yqlib.NewYamlDecoder(yqlib.ConfiguredYamlPreferences)
out, err := yqlib.NewStringEvaluator().EvaluateAll(expression, content, encoder, decoder)
if err != nil {
logger := logrus.StandardLogger()
for node := memory.Head(); node != nil; node = node.Next() {
entry := logrus.NewEntry(logger).WithTime(node.Record.Time)
prefix := fmt.Sprintf("[%s] ", node.Record.Module)
message := prefix + node.Record.Message()
switch node.Record.Level {
case logging.CRITICAL:
entry.Fatal(message)
case logging.ERROR:
entry.Error(message)
case logging.WARNING:
entry.Warn(message)
case logging.NOTICE:
entry.Info(message)
case logging.INFO:
entry.Info(message)
case logging.DEBUG:
entry.Debug(message)
}
}
return "", err
}
return out, nil
}
// EvaluateExpression evaluates the yq expression and returns the output formatted with yamlfmt.
func EvaluateExpression(expression string, content []byte) ([]byte, error) {
if expression == "" {
return content, nil
}
formatter, err := yamlfmtBasicFormatter()
if err != nil {
return nil, err
}
// `ApplyFeatures()` is being called directly before passing content to `yqlib`.
// This results in `ApplyFeatures()` being called twice with `FeatureApplyBefore`:
// once here and once inside `formatter.Format`.
// Currently, calling `ApplyFeatures()` with `FeatureApplyBefore` twice is not an issue,
// but future changes to `yamlfmt` might cause problems if it is called twice.
_, contentModified, err := formatter.Features.ApplyFeatures(context.Background(), content, yamlfmt.FeatureApplyBefore)
if err != nil {
return nil, err
}
out, err := EvaluateExpressionPlain(expression, string(contentModified))
if err != nil {
return nil, err
}
return formatter.Format([]byte(out))
}
func Join(yqExprs []string) string {
return strings.Join(yqExprs, " | ")
}
func yamlfmtBasicFormatter() (*basic.BasicFormatter, error) {
factory := basic.BasicFormatterFactory{}
config := map[string]any{
"indentless_arrays": true,
"line_ending": "lf", // prefer LF even on Windows
"pad_line_comments": 2,
"retain_line_breaks": true,
"retain_line_breaks_single": false,
}
formatter, err := factory.NewFormatter(config)
if err != nil {
return nil, err
}
basicFormatter, ok := formatter.(*basic.BasicFormatter)
if !ok {
return nil, fmt.Errorf("unexpected formatter type: %T", formatter)
}
return basicFormatter, nil
}