// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// CdnSource cdn source
// swagger:model CdnSource
type CdnSource string
const (
// CdnSourceSupernode captures enum value "supernode"
CdnSourceSupernode CdnSource = "supernode"
// CdnSourceSource captures enum value "source"
CdnSourceSource CdnSource = "source"
)
// for schema
var cdnSourceEnum []interface{}
func init() {
var res []CdnSource
if err := json.Unmarshal([]byte(`["supernode","source"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
cdnSourceEnum = append(cdnSourceEnum, v)
}
}
func (m CdnSource) validateCdnSourceEnum(path, location string, value CdnSource) error {
if err := validate.Enum(path, location, value, cdnSourceEnum); err != nil {
return err
}
return nil
}
// Validate validates this cdn source
func (m CdnSource) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validateCdnSourceEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// DfGetTask A download process initiated by dfget or other clients.
//
// swagger:model DfGetTask
type DfGetTask struct {
// CID means the client ID. It maps to the specific dfget process.
// When user wishes to download an image/file, user would start a dfget process to do this.
// This dfget is treated a client and carries a client ID.
// Thus, multiple dfget processes on the same peer have different CIDs.
//
CID string `json:"cID,omitempty"`
// This attribute represents where the dfget requests come from. Dfget will pass
// this field to supernode and supernode can do some checking and filtering via
// black/white list mechanism to guarantee security, or some other purposes like debugging.
//
// Min Length: 1
CallSystem string `json:"callSystem,omitempty"`
// tells whether it is a call from dfdaemon. dfdaemon is a long running
// process which works for container engines. It translates the image
// pulling request into raw requests into those dfget recognizes.
//
Dfdaemon bool `json:"dfdaemon,omitempty"`
// path is used in one peer A for uploading functionality. When peer B hopes
// to get piece C from peer A, B must provide a URL for piece C.
// Then when creating a task in supernode, peer A must provide this URL in request.
//
Path string `json:"path,omitempty"`
// PeerID uniquely identifies a peer, and the cID uniquely identifies a
// download task belonging to a peer. One peer can initiate multiple download tasks,
// which means that one peer corresponds to multiple cIDs.
//
PeerID string `json:"peerID,omitempty"`
// The size of pieces which is calculated as per the following strategy
// 1. If file's total size is less than 200MB, then the piece size is 4MB by default.
// 2. Otherwise, it equals to the smaller value between totalSize/100MB + 2 MB and 15MB.
//
PieceSize int32 `json:"pieceSize,omitempty"`
// The status of Dfget download process.
//
// Enum: [WAITING RUNNING FAILED SUCCESS]
Status string `json:"status,omitempty"`
// IP address of supernode which the peer connects to
SupernodeIP string `json:"supernodeIP,omitempty"`
// task Id
TaskID string `json:"taskId,omitempty"`
}
// Validate validates this df get task
func (m *DfGetTask) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCallSystem(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *DfGetTask) validateCallSystem(formats strfmt.Registry) error {
if swag.IsZero(m.CallSystem) { // not required
return nil
}
if err := validate.MinLength("callSystem", "body", string(m.CallSystem), 1); err != nil {
return err
}
return nil
}
var dfGetTaskTypeStatusPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["WAITING","RUNNING","FAILED","SUCCESS"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
dfGetTaskTypeStatusPropEnum = append(dfGetTaskTypeStatusPropEnum, v)
}
}
const (
// DfGetTaskStatusWAITING captures enum value "WAITING"
DfGetTaskStatusWAITING string = "WAITING"
// DfGetTaskStatusRUNNING captures enum value "RUNNING"
DfGetTaskStatusRUNNING string = "RUNNING"
// DfGetTaskStatusFAILED captures enum value "FAILED"
DfGetTaskStatusFAILED string = "FAILED"
// DfGetTaskStatusSUCCESS captures enum value "SUCCESS"
DfGetTaskStatusSUCCESS string = "SUCCESS"
)
// prop value enum
func (m *DfGetTask) validateStatusEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, dfGetTaskTypeStatusPropEnum); err != nil {
return err
}
return nil
}
func (m *DfGetTask) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
// value enum
if err := m.validateStatusEnum("status", "body", m.Status); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *DfGetTask) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DfGetTask) UnmarshalBinary(b []byte) error {
var res DfGetTask
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// DragonflyVersion Version and build information of Dragonfly components.
//
// swagger:model DragonflyVersion
type DragonflyVersion struct {
// Dragonfly components's architecture target
Arch string `json:"Arch,omitempty"`
// Build Date of Dragonfly components
BuildDate string `json:"BuildDate,omitempty"`
// Golang runtime version
GoVersion string `json:"GoVersion,omitempty"`
// Dragonfly components's operating system
OS string `json:"OS,omitempty"`
// Git commit when building Dragonfly components
Revision string `json:"Revision,omitempty"`
// Version of Dragonfly components
Version string `json:"Version,omitempty"`
}
// Validate validates this dragonfly version
func (m *DragonflyVersion) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *DragonflyVersion) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *DragonflyVersion) UnmarshalBinary(b []byte) error {
var res DragonflyVersion
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// Error error
// swagger:model Error
type Error struct {
// message
Message string `json:"message,omitempty"`
}
// Validate validates this error
func (m *Error) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *Error) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Error) UnmarshalBinary(b []byte) error {
var res Error
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ErrorResponse It contains a code that identify which error occurred for client processing and a detailed error message to read.
//
// swagger:model ErrorResponse
type ErrorResponse struct {
// the code of this error, it's convenient for client to process with certain error.
//
Code int64 `json:"code,omitempty"`
// detailed error message
Message string `json:"message,omitempty"`
}
// Validate validates this error response
func (m *ErrorResponse) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ErrorResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ErrorResponse) UnmarshalBinary(b []byte) error {
var res ErrorResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// HeartBeatRequest The request is to report peer to supernode to keep alive.
// swagger:model HeartBeatRequest
type HeartBeatRequest struct {
// IP address which peer client carries
// Format: ipv4
IP strfmt.IPv4 `json:"IP,omitempty"`
// CID means the client ID. It maps to the specific dfget process.
// When user wishes to download an image/file, user would start a dfget process to do this.
// This dfget is treated a client and carries a client ID.
// Thus, multiple dfget processes on the same peer have different CIDs.
//
CID string `json:"cID,omitempty"`
// when registering, dfget will setup one uploader process.
// This one acts as a server for peer pulling tasks.
// This port is which this server listens on.
//
// Maximum: 65000
// Minimum: 15000
Port int32 `json:"port,omitempty"`
}
// Validate validates this heart beat request
func (m *HeartBeatRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validatePort(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *HeartBeatRequest) validateIP(formats strfmt.Registry) error {
if swag.IsZero(m.IP) { // not required
return nil
}
if err := validate.FormatOf("IP", "body", "ipv4", m.IP.String(), formats); err != nil {
return err
}
return nil
}
func (m *HeartBeatRequest) validatePort(formats strfmt.Registry) error {
if swag.IsZero(m.Port) { // not required
return nil
}
if err := validate.MinimumInt("port", "body", int64(m.Port), 15000, false); err != nil {
return err
}
if err := validate.MaximumInt("port", "body", int64(m.Port), 65000, false); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *HeartBeatRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HeartBeatRequest) UnmarshalBinary(b []byte) error {
var res HeartBeatRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// HeartBeatResponse heart beat response
// swagger:model HeartBeatResponse
type HeartBeatResponse struct {
// If peer do not register in supernode, set needRegister to be true, else set to be false.
//
NeedRegister bool `json:"needRegister,omitempty"`
// The array of seed taskID which now are selected as seed for the peer. If peer have other seed file which
// is not included in the array, these seed file should be weed out.
//
SeedTaskIds []string `json:"seedTaskIDs"`
// The version of supernode. If supernode restarts, version should be different, so dfdaemon could know
// the restart of supernode.
//
Version string `json:"version,omitempty"`
}
// Validate validates this heart beat response
func (m *HeartBeatResponse) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *HeartBeatResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *HeartBeatResponse) UnmarshalBinary(b []byte) error {
var res HeartBeatResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// NetworkInfoFetchRequest The request is to fetch p2p network info from supernode.
// swagger:model NetworkInfoFetchRequest
type NetworkInfoFetchRequest struct {
// The urls is to filter the peer node, the url should be match with taskURL in TaskInfo.
//
Urls []string `json:"urls"`
}
// Validate validates this network info fetch request
func (m *NetworkInfoFetchRequest) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *NetworkInfoFetchRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NetworkInfoFetchRequest) UnmarshalBinary(b []byte) error {
var res NetworkInfoFetchRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// NetworkInfoFetchResponse The response is from supernode to peer which is requested to fetch p2p network info.
// swagger:model NetworkInfoFetchResponse
type NetworkInfoFetchResponse struct {
// nodes
Nodes []*Node `json:"nodes"`
}
// Validate validates this network info fetch response
func (m *NetworkInfoFetchResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateNodes(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *NetworkInfoFetchResponse) validateNodes(formats strfmt.Registry) error {
if swag.IsZero(m.Nodes) { // not required
return nil
}
for i := 0; i < len(m.Nodes); i++ {
if swag.IsZero(m.Nodes[i]) { // not required
continue
}
if m.Nodes[i] != nil {
if err := m.Nodes[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("nodes" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *NetworkInfoFetchResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *NetworkInfoFetchResponse) UnmarshalBinary(b []byte) error {
var res NetworkInfoFetchResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// Node The object shows the basic info of node and the task belongs to the node.
// swagger:model Node
type Node struct {
// Basic node info
Basic *PeerInfo `json:"basic,omitempty"`
// extra
Extra map[string]string `json:"extra,omitempty"`
// The load of node, which as the schedule weight in peer schedule.
Load int64 `json:"load,omitempty"`
// tasks
Tasks []*TaskFetchInfo `json:"tasks"`
}
// Validate validates this node
func (m *Node) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateBasic(formats); err != nil {
res = append(res, err)
}
if err := m.validateTasks(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *Node) validateBasic(formats strfmt.Registry) error {
if swag.IsZero(m.Basic) { // not required
return nil
}
if m.Basic != nil {
if err := m.Basic.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("basic")
}
return err
}
}
return nil
}
func (m *Node) validateTasks(formats strfmt.Registry) error {
if swag.IsZero(m.Tasks) { // not required
return nil
}
for i := 0; i < len(m.Tasks); i++ {
if swag.IsZero(m.Tasks[i]) { // not required
continue
}
if m.Tasks[i] != nil {
if err := m.Tasks[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("tasks" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
// MarshalBinary interface implementation
func (m *Node) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *Node) UnmarshalBinary(b []byte) error {
var res Node
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PeerCreateRequest PeerCreateRequest is used to create a peer instance in supernode.
// Usually, when dfget is going to register in supernode as a peer,
// it will send PeerCreateRequest to supernode.
//
// swagger:model PeerCreateRequest
type PeerCreateRequest struct {
// IP address which peer client carries
// Format: ipv4
IP strfmt.IPv4 `json:"IP,omitempty"`
// host name of peer client node, as a valid RFC 1123 hostname.
// Min Length: 1
// Format: hostname
HostName strfmt.Hostname `json:"hostName,omitempty"`
// when registering, dfget will setup one uploader process.
// This one acts as a server for peer pulling tasks.
// This port is which this server listens on.
//
// Maximum: 65000
// Minimum: 15000
Port int32 `json:"port,omitempty"`
// version number of dfget binary.
Version string `json:"version,omitempty"`
}
// Validate validates this peer create request
func (m *PeerCreateRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostName(formats); err != nil {
res = append(res, err)
}
if err := m.validatePort(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PeerCreateRequest) validateIP(formats strfmt.Registry) error {
if swag.IsZero(m.IP) { // not required
return nil
}
if err := validate.FormatOf("IP", "body", "ipv4", m.IP.String(), formats); err != nil {
return err
}
return nil
}
func (m *PeerCreateRequest) validateHostName(formats strfmt.Registry) error {
if swag.IsZero(m.HostName) { // not required
return nil
}
if err := validate.MinLength("hostName", "body", string(m.HostName), 1); err != nil {
return err
}
if err := validate.FormatOf("hostName", "body", "hostname", m.HostName.String(), formats); err != nil {
return err
}
return nil
}
func (m *PeerCreateRequest) validatePort(formats strfmt.Registry) error {
if swag.IsZero(m.Port) { // not required
return nil
}
if err := validate.MinimumInt("port", "body", int64(m.Port), 15000, false); err != nil {
return err
}
if err := validate.MaximumInt("port", "body", int64(m.Port), 65000, false); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PeerCreateRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PeerCreateRequest) UnmarshalBinary(b []byte) error {
var res PeerCreateRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PeerCreateResponse ID of created peer.
// swagger:model PeerCreateResponse
type PeerCreateResponse struct {
// Peer ID of the node which dfget locates on.
// Every peer has a unique ID among peer network.
// It is generated via host's hostname and IP address.
//
ID string `json:"ID,omitempty"`
}
// Validate validates this peer create response
func (m *PeerCreateResponse) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PeerCreateResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PeerCreateResponse) UnmarshalBinary(b []byte) error {
var res PeerCreateResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PeerInfo The detailed information of a peer in supernode.
//
// swagger:model PeerInfo
type PeerInfo struct {
// ID of peer
ID string `json:"ID,omitempty"`
// IP address which peer client carries.
// (TODO) make IP field contain more information, for example
// WAN/LAN IP address for supernode to recognize.
//
// Format: ipv4
IP strfmt.IPv4 `json:"IP,omitempty"`
// the time to join the P2P network
// Format: date-time
Created strfmt.DateTime `json:"created,omitempty"`
// host name of peer client node, as a valid RFC 1123 hostname.
// Min Length: 1
// Format: hostname
HostName strfmt.Hostname `json:"hostName,omitempty"`
// when registering, dfget will setup one uploader process.
// This one acts as a server for peer pulling tasks.
// This port is which this server listens on.
//
// Maximum: 65000
// Minimum: 15000
Port int32 `json:"port,omitempty"`
// version number of dfget binary
Version string `json:"version,omitempty"`
}
// Validate validates this peer info
func (m *PeerInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validateCreated(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostName(formats); err != nil {
res = append(res, err)
}
if err := m.validatePort(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PeerInfo) validateIP(formats strfmt.Registry) error {
if swag.IsZero(m.IP) { // not required
return nil
}
if err := validate.FormatOf("IP", "body", "ipv4", m.IP.String(), formats); err != nil {
return err
}
return nil
}
func (m *PeerInfo) validateCreated(formats strfmt.Registry) error {
if swag.IsZero(m.Created) { // not required
return nil
}
if err := validate.FormatOf("created", "body", "date-time", m.Created.String(), formats); err != nil {
return err
}
return nil
}
func (m *PeerInfo) validateHostName(formats strfmt.Registry) error {
if swag.IsZero(m.HostName) { // not required
return nil
}
if err := validate.MinLength("hostName", "body", string(m.HostName), 1); err != nil {
return err
}
if err := validate.FormatOf("hostName", "body", "hostname", m.HostName.String(), formats); err != nil {
return err
}
return nil
}
func (m *PeerInfo) validatePort(formats strfmt.Registry) error {
if swag.IsZero(m.Port) { // not required
return nil
}
if err := validate.MinimumInt("port", "body", int64(m.Port), 15000, false); err != nil {
return err
}
if err := validate.MaximumInt("port", "body", int64(m.Port), 65000, false); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PeerInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PeerInfo) UnmarshalBinary(b []byte) error {
var res PeerInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PieceErrorRequest Peer's detailed information in supernode.
// swagger:model PieceErrorRequest
type PieceErrorRequest struct {
// the peer ID of the target Peer.
//
DstIP string `json:"dstIP,omitempty"`
// the peer ID of the target Peer.
//
DstPid string `json:"dstPid,omitempty"`
// the error type when failed to download from supernode that dfget will report to supernode
//
// Enum: [FILE_NOT_EXIST FILE_MD5_NOT_MATCH]
ErrorType string `json:"errorType,omitempty"`
// the MD5 value of piece which returned by the supernode that
// in order to verify the correctness of the piece content which
// downloaded from the other peers.
//
ExpectedMd5 string `json:"expectedMd5,omitempty"`
// the range of specific piece in the task, example "0-45565".
//
Range string `json:"range,omitempty"`
// the MD5 information of piece which calculated by the piece content
// which downloaded from the target peer.
//
RealMd5 string `json:"realMd5,omitempty"`
// the CID of the src Peer.
//
SrcCid string `json:"srcCid,omitempty"`
// the taskID of the piece.
//
TaskID string `json:"taskId,omitempty"`
}
// Validate validates this piece error request
func (m *PieceErrorRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateErrorType(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var pieceErrorRequestTypeErrorTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["FILE_NOT_EXIST","FILE_MD5_NOT_MATCH"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
pieceErrorRequestTypeErrorTypePropEnum = append(pieceErrorRequestTypeErrorTypePropEnum, v)
}
}
const (
// PieceErrorRequestErrorTypeFILENOTEXIST captures enum value "FILE_NOT_EXIST"
PieceErrorRequestErrorTypeFILENOTEXIST string = "FILE_NOT_EXIST"
// PieceErrorRequestErrorTypeFILEMD5NOTMATCH captures enum value "FILE_MD5_NOT_MATCH"
PieceErrorRequestErrorTypeFILEMD5NOTMATCH string = "FILE_MD5_NOT_MATCH"
)
// prop value enum
func (m *PieceErrorRequest) validateErrorTypeEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, pieceErrorRequestTypeErrorTypePropEnum); err != nil {
return err
}
return nil
}
func (m *PieceErrorRequest) validateErrorType(formats strfmt.Registry) error {
if swag.IsZero(m.ErrorType) { // not required
return nil
}
// value enum
if err := m.validateErrorTypeEnum("errorType", "body", m.ErrorType); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PieceErrorRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PieceErrorRequest) UnmarshalBinary(b []byte) error {
var res PieceErrorRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PieceInfo Peer's detailed information in supernode.
// swagger:model PieceInfo
type PieceInfo struct {
// the peerID that dfget task should download from
PID string `json:"pID,omitempty"`
// The URL path to download the specific piece from the target peer's uploader.
//
Path string `json:"path,omitempty"`
// When dfget needs to download a piece from another peer. Supernode will return a PieceInfo
// that contains a peerIP. This peerIP represents the IP of this dfget's target peer.
//
PeerIP string `json:"peerIP,omitempty"`
// When dfget needs to download a piece from another peer. Supernode will return a PieceInfo
// that contains a peerPort. This peerPort represents the port of this dfget's target peer's uploader.
//
PeerPort int32 `json:"peerPort,omitempty"`
// the MD5 information of piece which is generated by supernode when doing CDN cache.
// This value will be returned to dfget in order to validate the piece's completeness.
//
PieceMD5 string `json:"pieceMD5,omitempty"`
// the range of specific piece in the task, example "0-45565".
//
PieceRange string `json:"pieceRange,omitempty"`
// The size of pieces which is calculated as per the following strategy
// 1. If file's total size is less than 200MB, then the piece size is 4MB by default.
// 2. Otherwise, it equals to the smaller value between totalSize/100MB + 2 MB and 15MB.
//
PieceSize int32 `json:"pieceSize,omitempty"`
}
// Validate validates this piece info
func (m *PieceInfo) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PieceInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PieceInfo) UnmarshalBinary(b []byte) error {
var res PieceInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PiecePullRequest request used to pull pieces that have not been downloaded.
// swagger:model PiecePullRequest
type PiecePullRequest struct {
// dfgetTaskStatus indicates whether the dfgetTask is running.
//
// Enum: [STARTED RUNNING FINISHED]
DfgetTaskStatus string `json:"dfgetTaskStatus,omitempty"`
// the uploader peerID
//
DstPID string `json:"dstPID,omitempty"`
// the range of specific piece in the task, example "0-45565".
//
PieceRange string `json:"pieceRange,omitempty"`
// pieceResult It indicates whether the dfgetTask successfully download the piece.
// It's only useful when `status` is `RUNNING`.
//
// Enum: [FAILED SUCCESS INVALID SEMISUC]
PieceResult string `json:"pieceResult,omitempty"`
}
// Validate validates this piece pull request
func (m *PiecePullRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateDfgetTaskStatus(formats); err != nil {
res = append(res, err)
}
if err := m.validatePieceResult(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var piecePullRequestTypeDfgetTaskStatusPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["STARTED","RUNNING","FINISHED"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
piecePullRequestTypeDfgetTaskStatusPropEnum = append(piecePullRequestTypeDfgetTaskStatusPropEnum, v)
}
}
const (
// PiecePullRequestDfgetTaskStatusSTARTED captures enum value "STARTED"
PiecePullRequestDfgetTaskStatusSTARTED string = "STARTED"
// PiecePullRequestDfgetTaskStatusRUNNING captures enum value "RUNNING"
PiecePullRequestDfgetTaskStatusRUNNING string = "RUNNING"
// PiecePullRequestDfgetTaskStatusFINISHED captures enum value "FINISHED"
PiecePullRequestDfgetTaskStatusFINISHED string = "FINISHED"
)
// prop value enum
func (m *PiecePullRequest) validateDfgetTaskStatusEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, piecePullRequestTypeDfgetTaskStatusPropEnum); err != nil {
return err
}
return nil
}
func (m *PiecePullRequest) validateDfgetTaskStatus(formats strfmt.Registry) error {
if swag.IsZero(m.DfgetTaskStatus) { // not required
return nil
}
// value enum
if err := m.validateDfgetTaskStatusEnum("dfgetTaskStatus", "body", m.DfgetTaskStatus); err != nil {
return err
}
return nil
}
var piecePullRequestTypePieceResultPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["FAILED","SUCCESS","INVALID","SEMISUC"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
piecePullRequestTypePieceResultPropEnum = append(piecePullRequestTypePieceResultPropEnum, v)
}
}
const (
// PiecePullRequestPieceResultFAILED captures enum value "FAILED"
PiecePullRequestPieceResultFAILED string = "FAILED"
// PiecePullRequestPieceResultSUCCESS captures enum value "SUCCESS"
PiecePullRequestPieceResultSUCCESS string = "SUCCESS"
// PiecePullRequestPieceResultINVALID captures enum value "INVALID"
PiecePullRequestPieceResultINVALID string = "INVALID"
// PiecePullRequestPieceResultSEMISUC captures enum value "SEMISUC"
PiecePullRequestPieceResultSEMISUC string = "SEMISUC"
)
// prop value enum
func (m *PiecePullRequest) validatePieceResultEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, piecePullRequestTypePieceResultPropEnum); err != nil {
return err
}
return nil
}
func (m *PiecePullRequest) validatePieceResult(formats strfmt.Registry) error {
if swag.IsZero(m.PieceResult) { // not required
return nil
}
// value enum
if err := m.validatePieceResultEnum("pieceResult", "body", m.PieceResult); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PiecePullRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PiecePullRequest) UnmarshalBinary(b []byte) error {
var res PiecePullRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PieceUpdateRequest request used to update piece attributes.
// swagger:model PieceUpdateRequest
type PieceUpdateRequest struct {
// the downloader clientID
//
ClientID string `json:"clientID,omitempty"`
// the uploader peerID
//
DstPID string `json:"dstPID,omitempty"`
// pieceStatus indicates whether the peer task successfully download the piece.
//
// Enum: [FAILED SUCCESS INVALID SEMISUC]
PieceStatus string `json:"pieceStatus,omitempty"`
}
// Validate validates this piece update request
func (m *PieceUpdateRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePieceStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var pieceUpdateRequestTypePieceStatusPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["FAILED","SUCCESS","INVALID","SEMISUC"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
pieceUpdateRequestTypePieceStatusPropEnum = append(pieceUpdateRequestTypePieceStatusPropEnum, v)
}
}
const (
// PieceUpdateRequestPieceStatusFAILED captures enum value "FAILED"
PieceUpdateRequestPieceStatusFAILED string = "FAILED"
// PieceUpdateRequestPieceStatusSUCCESS captures enum value "SUCCESS"
PieceUpdateRequestPieceStatusSUCCESS string = "SUCCESS"
// PieceUpdateRequestPieceStatusINVALID captures enum value "INVALID"
PieceUpdateRequestPieceStatusINVALID string = "INVALID"
// PieceUpdateRequestPieceStatusSEMISUC captures enum value "SEMISUC"
PieceUpdateRequestPieceStatusSEMISUC string = "SEMISUC"
)
// prop value enum
func (m *PieceUpdateRequest) validatePieceStatusEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, pieceUpdateRequestTypePieceStatusPropEnum); err != nil {
return err
}
return nil
}
func (m *PieceUpdateRequest) validatePieceStatus(formats strfmt.Registry) error {
if swag.IsZero(m.PieceStatus) { // not required
return nil
}
// value enum
if err := m.validatePieceStatusEnum("pieceStatus", "body", m.PieceStatus); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PieceUpdateRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PieceUpdateRequest) UnmarshalBinary(b []byte) error {
var res PieceUpdateRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PreheatCreateRequest Request option of creating a preheat task in supernode.
//
// swagger:model PreheatCreateRequest
type PreheatCreateRequest struct {
// URL may contains some changeful query parameters such as authentication parameters. Dragonfly will
// filter these parameter via 'filter'. The usage of it is that different URL may generate the same
// download taskID.
//
Filter string `json:"filter,omitempty"`
// If there is any authentication step of the remote server, the headers should contains authenticated information.
// Dragonfly will sent request taking the headers to remote server.
//
Headers map[string]string `json:"headers,omitempty"`
// This field is used for generating new downloading taskID to identify different downloading task of remote URL.
//
Identifier string `json:"identifier,omitempty"`
// this must be image or file
//
// Required: true
// Enum: [image file]
Type *string `json:"type"`
// the image or file location
// Required: true
// Min Length: 3
URL *string `json:"url"`
}
// Validate validates this preheat create request
func (m *PreheatCreateRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateType(formats); err != nil {
res = append(res, err)
}
if err := m.validateURL(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var preheatCreateRequestTypeTypePropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["image","file"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
preheatCreateRequestTypeTypePropEnum = append(preheatCreateRequestTypeTypePropEnum, v)
}
}
const (
// PreheatCreateRequestTypeImage captures enum value "image"
PreheatCreateRequestTypeImage string = "image"
// PreheatCreateRequestTypeFile captures enum value "file"
PreheatCreateRequestTypeFile string = "file"
)
// prop value enum
func (m *PreheatCreateRequest) validateTypeEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, preheatCreateRequestTypeTypePropEnum); err != nil {
return err
}
return nil
}
func (m *PreheatCreateRequest) validateType(formats strfmt.Registry) error {
if err := validate.Required("type", "body", m.Type); err != nil {
return err
}
// value enum
if err := m.validateTypeEnum("type", "body", *m.Type); err != nil {
return err
}
return nil
}
func (m *PreheatCreateRequest) validateURL(formats strfmt.Registry) error {
if err := validate.Required("url", "body", m.URL); err != nil {
return err
}
if err := validate.MinLength("url", "body", string(*m.URL), 3); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PreheatCreateRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PreheatCreateRequest) UnmarshalBinary(b []byte) error {
var res PreheatCreateRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// PreheatCreateResponse Response of a preheat creation request.
//
// swagger:model PreheatCreateResponse
type PreheatCreateResponse struct {
// ID
ID string `json:"ID,omitempty"`
}
// Validate validates this preheat create response
func (m *PreheatCreateResponse) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *PreheatCreateResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PreheatCreateResponse) UnmarshalBinary(b []byte) error {
var res PreheatCreateResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// PreheatInfo return detailed information of a preheat task in supernode. An image preheat task may contain multiple downloading
// task because that an image may have more than one layer.
//
// swagger:model PreheatInfo
type PreheatInfo struct {
// ID of preheat task.
//
ID string `json:"ID,omitempty"`
// the preheat task finish time
// Format: date-time
FinishTime strfmt.DateTime `json:"finishTime,omitempty"`
// the preheat task start time
// Format: date-time
StartTime strfmt.DateTime `json:"startTime,omitempty"`
// The status of preheat task.
// WAITING -----> RUNNING -----> SUCCESS
// |--> FAILED
// The initial status of a created preheat task is WAITING.
// It's finished when a preheat task's status is FAILED or SUCCESS.
// A finished preheat task's information can be queried within 24 hours.
//
Status PreheatStatus `json:"status,omitempty"`
// the error message of preheat task when failed
ErrorMsg string `json:"errorMsg,omitempty"`
}
// Validate validates this preheat info
func (m *PreheatInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateFinishTime(formats); err != nil {
res = append(res, err)
}
if err := m.validateStartTime(formats); err != nil {
res = append(res, err)
}
if err := m.validateStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *PreheatInfo) validateFinishTime(formats strfmt.Registry) error {
if swag.IsZero(m.FinishTime) { // not required
return nil
}
if err := validate.FormatOf("finishTime", "body", "date-time", m.FinishTime.String(), formats); err != nil {
return err
}
return nil
}
func (m *PreheatInfo) validateStartTime(formats strfmt.Registry) error {
if swag.IsZero(m.StartTime) { // not required
return nil
}
if err := validate.FormatOf("startTime", "body", "date-time", m.StartTime.String(), formats); err != nil {
return err
}
return nil
}
func (m *PreheatInfo) validateStatus(formats strfmt.Registry) error {
if swag.IsZero(m.Status) { // not required
return nil
}
if err := m.Status.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("status")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *PreheatInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *PreheatInfo) UnmarshalBinary(b []byte) error {
var res PreheatInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/validate"
)
// PreheatStatus The status of preheat task.
// WAITING -----> RUNNING -----> SUCCESS
// |--> FAILED
// The initial status of a created preheat task is WAITING.
// It's finished when a preheat task's status is FAILED or SUCCESS.
// A finished preheat task's information can be queried within 24 hours.
//
// swagger:model PreheatStatus
type PreheatStatus string
const (
// PreheatStatusWAITING captures enum value "WAITING"
PreheatStatusWAITING PreheatStatus = "WAITING"
// PreheatStatusRUNNING captures enum value "RUNNING"
PreheatStatusRUNNING PreheatStatus = "RUNNING"
// PreheatStatusFAILED captures enum value "FAILED"
PreheatStatusFAILED PreheatStatus = "FAILED"
// PreheatStatusSUCCESS captures enum value "SUCCESS"
PreheatStatusSUCCESS PreheatStatus = "SUCCESS"
)
// for schema
var preheatStatusEnum []interface{}
func init() {
var res []PreheatStatus
if err := json.Unmarshal([]byte(`["WAITING","RUNNING","FAILED","SUCCESS"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
preheatStatusEnum = append(preheatStatusEnum, v)
}
}
func (m PreheatStatus) validatePreheatStatusEnum(path, location string, value PreheatStatus) error {
if err := validate.Enum(path, location, value, preheatStatusEnum); err != nil {
return err
}
return nil
}
// Validate validates this preheat status
func (m PreheatStatus) Validate(formats strfmt.Registry) error {
var res []error
// value enum
if err := m.validatePreheatStatusEnum("", "body", m); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ResultInfo The returned information from supernode.
//
// swagger:model ResultInfo
type ResultInfo struct {
// the result code
Code int32 `json:"code,omitempty"`
// the result data
Data interface{} `json:"data,omitempty"`
// the result msg
Msg string `json:"msg,omitempty"`
}
// Validate validates this result info
func (m *ResultInfo) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ResultInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ResultInfo) UnmarshalBinary(b []byte) error {
var res ResultInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/dragonflyoss/Dragonfly/supernode/config"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// TaskCreateRequest task create request
// swagger:model TaskCreateRequest
type TaskCreateRequest struct {
// CID means the client ID. It maps to the specific dfget process.
// When user wishes to download an image/file, user would start a dfget process to do this.
// This dfget is treated a client and carries a client ID.
// Thus, multiple dfget processes on the same peer have different CIDs.
//
CID string `json:"cID,omitempty"`
// This attribute represents where the dfget requests come from. Dfget will pass
// this field to supernode and supernode can do some checking and filtering via
// black/white list mechanism to guarantee security, or some other purposes like debugging.
//
// Min Length: 1
CallSystem string `json:"callSystem,omitempty"`
// tells whether it is a call from dfdaemon. dfdaemon is a long running
// process which works for container engines. It translates the image
// pulling request into raw requests into those dfget recognizes.
//
Dfdaemon bool `json:"dfdaemon,omitempty"`
// This attribute represents the length of resource, dfdaemon or dfget catches and calculates
// this parameter from the headers of request URL. If fileLength is vaild, the supernode need
// not get the length of resource by accessing the rawURL.
//
FileLength int64 `json:"fileLength,omitempty"`
// filter is used to filter request queries in URL.
// For example, when a user wants to start to download a task which has a remote URL of
// a.b.com/fileA?user=xxx&auth=yyy, user can add a filter parameter ["user", "auth"]
// to filter the url to a.b.com/fileA. Then this parameter can potentially avoid repeatable
// downloads, if there is already a task a.b.com/fileA.
//
Filter []string `json:"filter"`
// extra HTTP headers sent to the rawURL.
// This field is carried with the request to supernode.
// Supernode will extract these HTTP headers, and set them in HTTP downloading requests
// from source server as user's wish.
//
Headers map[string]string `json:"headers,omitempty"`
// special attribute of remote source file. This field is used with taskURL to generate new taskID to
// identify different downloading task of remote source file. For example, if user A and user B uses
// the same taskURL and taskID to download file, A and B will share the same peer network to distribute files.
// If user A additionally adds an identifier with taskURL, while user B still carries only taskURL, then A's
// generated taskID is different from B, and the result is that two users use different peer networks.
//
Identifier string `json:"identifier,omitempty"`
// md5 checksum for the resource to distribute. dfget catches this parameter from dfget's CLI
// and passes it to supernode. When supernode finishes downloading file/image from the source location,
// it will validate the source file with this md5 value to check whether this is a valid file.
//
Md5 string `json:"md5,omitempty"`
// path is used in one peer A for uploading functionality. When peer B hopes
// to get piece C from peer A, B must provide a URL for piece C.
// Then when creating a task in supernode, peer A must provide this URL in request.
//
Path string `json:"path,omitempty"`
// PeerID is used to uniquely identifies a peer which will be used to create a dfgetTask.
// The value must be the value in the response after registering a peer.
//
PeerID string `json:"peerID,omitempty"`
// The is the resource's URL which user uses dfget to download. The location of URL can be anywhere, LAN or WAN.
// For image distribution, this is image layer's URL in image registry.
// The resource url is provided by command line parameter.
//
RawURL string `json:"rawURL,omitempty"`
// IP address of supernode which the peer connects to
SupernodeIP string `json:"supernodeIP,omitempty"`
// This attribute represents the digest of resource, dfdaemon or dfget catches this parameter
// from the headers of request URL. The digest will be considered as the taskID if not null.
//
TaskID string `json:"taskId,omitempty"`
// taskURL is generated from rawURL. rawURL may contains some queries or parameter, dfget will filter some queries via
// --filter parameter of dfget. The usage of it is that different rawURL may generate the same taskID.
//
TaskURL string `json:"taskURL,omitempty"`
// peer Pattern p2p or cdn
PeerPattern config.Pattern `json:"peerPattern,omitempty"`
}
// Validate validates this task create request
func (m *TaskCreateRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCallSystem(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TaskCreateRequest) validateCallSystem(formats strfmt.Registry) error {
if swag.IsZero(m.CallSystem) { // not required
return nil
}
if err := validate.MinLength("callSystem", "body", string(m.CallSystem), 1); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TaskCreateRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskCreateRequest) UnmarshalBinary(b []byte) error {
var res TaskCreateRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// TaskCreateResponse response get from task creation request.
// swagger:model TaskCreateResponse
type TaskCreateResponse struct {
// ID of the created task.
ID string `json:"ID,omitempty"`
// cdn source
CdnSource CdnSource `json:"cdnSource,omitempty"`
// The length of the file dfget requests to download in bytes.
//
FileLength int64 `json:"fileLength,omitempty"`
// The size of pieces which is calculated as per the following strategy
// 1. If file's total size is less than 200MB, then the piece size is 4MB by default.
// 2. Otherwise, it equals to the smaller value between totalSize/100MB + 2 MB and 15MB.
//
PieceSize int32 `json:"pieceSize,omitempty"`
}
// Validate validates this task create response
func (m *TaskCreateResponse) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCdnSource(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TaskCreateResponse) validateCdnSource(formats strfmt.Registry) error {
if swag.IsZero(m.CdnSource) { // not required
return nil
}
if err := m.CdnSource.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("cdnSource")
}
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TaskCreateResponse) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskCreateResponse) UnmarshalBinary(b []byte) error {
var res TaskCreateResponse
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
)
// TaskFetchInfo It shows the task info and pieces info.
// swagger:model TaskFetchInfo
type TaskFetchInfo struct {
// The pieces which should belong to the peer node
Pieces []*PieceInfo `json:"pieces"`
// task
Task *TaskInfo `json:"task,omitempty"`
}
// Validate validates this task fetch info
func (m *TaskFetchInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validatePieces(formats); err != nil {
res = append(res, err)
}
if err := m.validateTask(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TaskFetchInfo) validatePieces(formats strfmt.Registry) error {
if swag.IsZero(m.Pieces) { // not required
return nil
}
for i := 0; i < len(m.Pieces); i++ {
if swag.IsZero(m.Pieces[i]) { // not required
continue
}
if m.Pieces[i] != nil {
if err := m.Pieces[i].Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("pieces" + "." + strconv.Itoa(i))
}
return err
}
}
}
return nil
}
func (m *TaskFetchInfo) validateTask(formats strfmt.Registry) error {
if swag.IsZero(m.Task) { // not required
return nil
}
if m.Task != nil {
if err := m.Task.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("task")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *TaskFetchInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskFetchInfo) UnmarshalBinary(b []byte) error {
var res TaskFetchInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// TaskInfo detailed information about task in supernode.
// swagger:model TaskInfo
type TaskInfo struct {
// ID of the task.
ID string `json:"ID,omitempty"`
// This attribute represents the node as a seed node for the taskURL.
//
AsSeed bool `json:"asSeed,omitempty"`
// The status of the created task related to CDN functionality.
//
// Enum: [WAITING RUNNING FAILED SUCCESS SOURCE_ERROR]
CdnStatus string `json:"cdnStatus,omitempty"`
// The length of the file dfget requests to download in bytes
// which including the header and the trailer of each piece.
//
FileLength int64 `json:"fileLength,omitempty"`
// extra HTTP headers sent to the rawURL.
// This field is carried with the request to supernode.
// Supernode will extract these HTTP headers, and set them in HTTP downloading requests
// from source server as user's wish.
//
Headers map[string]string `json:"headers,omitempty"`
// The length of the source file in bytes.
//
HTTPFileLength int64 `json:"httpFileLength,omitempty"`
// special attribute of remote source file. This field is used with taskURL to generate new taskID to
// identify different downloading task of remote source file. For example, if user A and user B uses
// the same taskURL and taskID to download file, A and B will share the same peer network to distribute files.
// If user A additionally adds an identifier with taskURL, while user B still carries only taskURL, then A's
// generated taskID is different from B, and the result is that two users use different peer networks.
//
Identifier string `json:"identifier,omitempty"`
// md5 checksum for the resource to distribute. dfget catches this parameter from dfget's CLI
// and passes it to supernode. When supernode finishes downloading file/image from the source location,
// it will validate the source file with this md5 value to check whether this is a valid file.
//
Md5 string `json:"md5,omitempty"`
// The size of pieces which is calculated as per the following strategy
// 1. If file's total size is less than 200MB, then the piece size is 4MB by default.
// 2. Otherwise, it equals to the smaller value between totalSize/100MB + 2 MB and 15MB.
//
PieceSize int32 `json:"pieceSize,omitempty"`
// piece total
PieceTotal int32 `json:"pieceTotal,omitempty"`
// The is the resource's URL which user uses dfget to download. The location of URL can be anywhere, LAN or WAN.
// For image distribution, this is image layer's URL in image registry.
// The resource url is provided by command line parameter.
//
RawURL string `json:"rawURL,omitempty"`
// when supernode finishes downloading file/image from the source location,
// the md5 sum of the source file will be calculated as the value of the realMd5.
// And it will be used to compare with md5 value to check whether this is a valid file.
//
RealMd5 string `json:"realMd5,omitempty"`
// taskURL is generated from rawURL. rawURL may contains some queries or parameter, dfget will filter some queries via
// --filter parameter of dfget. The usage of it is that different rawURL may generate the same taskID.
//
TaskURL string `json:"taskURL,omitempty"`
}
// Validate validates this task info
func (m *TaskInfo) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCdnStatus(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var taskInfoTypeCdnStatusPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["WAITING","RUNNING","FAILED","SUCCESS","SOURCE_ERROR"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
taskInfoTypeCdnStatusPropEnum = append(taskInfoTypeCdnStatusPropEnum, v)
}
}
const (
// TaskInfoCdnStatusWAITING captures enum value "WAITING"
TaskInfoCdnStatusWAITING string = "WAITING"
// TaskInfoCdnStatusRUNNING captures enum value "RUNNING"
TaskInfoCdnStatusRUNNING string = "RUNNING"
// TaskInfoCdnStatusFAILED captures enum value "FAILED"
TaskInfoCdnStatusFAILED string = "FAILED"
// TaskInfoCdnStatusSUCCESS captures enum value "SUCCESS"
TaskInfoCdnStatusSUCCESS string = "SUCCESS"
// TaskInfoCdnStatusSOURCEERROR captures enum value "SOURCE_ERROR"
TaskInfoCdnStatusSOURCEERROR string = "SOURCE_ERROR"
)
// prop value enum
func (m *TaskInfo) validateCdnStatusEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, taskInfoTypeCdnStatusPropEnum); err != nil {
return err
}
return nil
}
func (m *TaskInfo) validateCdnStatus(formats strfmt.Registry) error {
if swag.IsZero(m.CdnStatus) { // not required
return nil
}
// value enum
if err := m.validateCdnStatusEnum("cdnStatus", "body", m.CdnStatus); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TaskInfo) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskInfo) UnmarshalBinary(b []byte) error {
var res TaskInfo
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// TaskMetricsRequest task metrics request
// swagger:model TaskMetricsRequest
type TaskMetricsRequest struct {
// IP address which peer client carries
IP string `json:"IP,omitempty"`
// when registering, dfget will setup one uploader process.
// This one acts as a server for peer pulling tasks.
// This port is which this server listens on.
//
BacksourceReason string `json:"backsourceReason,omitempty"`
// CID means the client ID. It maps to the specific dfget process.
// When user wishes to download an image/file, user would start a dfget process to do this.
// This dfget is treated a client and carries a client ID.
// Thus, multiple dfget processes on the same peer have different CIDs.
//
CID string `json:"cID,omitempty"`
// This attribute represents where the dfget requests come from. Dfget will pass
// this field to supernode and supernode can do some checking and filtering via
// black/white list mechanism to guarantee security, or some other purposes like debugging.
//
// Min Length: 1
CallSystem string `json:"callSystem,omitempty"`
// Duration for dfget task.
//
Duration float64 `json:"duration,omitempty"`
// The length of the file dfget requests to download in bytes.
FileLength int64 `json:"fileLength,omitempty"`
// when registering, dfget will setup one uploader process.
// This one acts as a server for peer pulling tasks.
// This port is which this server listens on.
//
// Maximum: 65000
// Minimum: 15000
Port int32 `json:"port,omitempty"`
// whether the download task success or not
Success bool `json:"success,omitempty"`
// IP address which peer client carries
TaskID string `json:"taskId,omitempty"`
}
// Validate validates this task metrics request
func (m *TaskMetricsRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCallSystem(formats); err != nil {
res = append(res, err)
}
if err := m.validatePort(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TaskMetricsRequest) validateCallSystem(formats strfmt.Registry) error {
if swag.IsZero(m.CallSystem) { // not required
return nil
}
if err := validate.MinLength("callSystem", "body", string(m.CallSystem), 1); err != nil {
return err
}
return nil
}
func (m *TaskMetricsRequest) validatePort(formats strfmt.Registry) error {
if swag.IsZero(m.Port) { // not required
return nil
}
if err := validate.MinimumInt("port", "body", int64(m.Port), 15000, false); err != nil {
return err
}
if err := validate.MaximumInt("port", "body", int64(m.Port), 65000, false); err != nil {
return err
}
return nil
}
// MarshalBinary interface implementation
func (m *TaskMetricsRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskMetricsRequest) UnmarshalBinary(b []byte) error {
var res TaskMetricsRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// TaskRegisterRequest task register request
// swagger:model TaskRegisterRequest
type TaskRegisterRequest struct {
// IP address which peer client carries
// Format: ipv4
IP strfmt.IPv4 `json:"IP,omitempty"`
// This attribute represents the node as a seed node for the taskURL.
//
AsSeed bool `json:"asSeed,omitempty"`
// CID means the client ID. It maps to the specific dfget process.
// When user wishes to download an image/file, user would start a dfget process to do this.
// This dfget is treated a client and carries a client ID.
// Thus, multiple dfget processes on the same peer have different CIDs.
//
CID string `json:"cID,omitempty"`
// This attribute represents where the dfget requests come from. Dfget will pass
// this field to supernode and supernode can do some checking and filtering via
// black/white list mechanism to guarantee security, or some other purposes like debugging.
//
// Min Length: 1
CallSystem string `json:"callSystem,omitempty"`
// tells whether it is a call from dfdaemon. dfdaemon is a long running
// process which works for container engines. It translates the image
// pulling request into raw requests into those dfget recognizes.
//
Dfdaemon bool `json:"dfdaemon,omitempty"`
// This attribute represents the length of resource, dfdaemon or dfget catches and calculates
// this parameter from the headers of request URL. If fileLength is vaild, the supernode need
// not get the length of resource by accessing the rawURL.
//
FileLength int64 `json:"fileLength,omitempty"`
// extra HTTP headers sent to the rawURL.
// This field is carried with the request to supernode.
// Supernode will extract these HTTP headers, and set them in HTTP downloading requests
// from source server as user's wish.
//
Headers []string `json:"headers"`
// host name of peer client node.
// Min Length: 1
HostName string `json:"hostName,omitempty"`
// special attribute of remote source file. This field is used with taskURL to generate new taskID to
// identify different downloading task of remote source file. For example, if user A and user B uses
// the same taskURL and taskID to download file, A and B will share the same peer network to distribute files.
// If user A additionally adds an identifier with taskURL, while user B still carries only taskURL, then A's
// generated taskID is different from B, and the result is that two users use different peer networks.
//
Identifier string `json:"identifier,omitempty"`
// tells whether skip secure verify when supernode download the remote source file.
//
Insecure bool `json:"insecure,omitempty"`
// md5 checksum for the resource to distribute. dfget catches this parameter from dfget's CLI
// and passes it to supernode. When supernode finishes downloading file/image from the source location,
// it will validate the source file with this md5 value to check whether this is a valid file.
//
Md5 string `json:"md5,omitempty"`
// path is used in one peer A for uploading functionality. When peer B hopes
// to get piece C from peer A, B must provide a URL for piece C.
// Then when creating a task in supernode, peer A must provide this URL in request.
//
Path string `json:"path,omitempty"`
// when registering, dfget will setup one uploader process.
// This one acts as a server for peer pulling tasks.
// This port is which this server listens on.
//
// Maximum: 65000
// Minimum: 15000
Port int32 `json:"port,omitempty"`
// The is the resource's URL which user uses dfget to download. The location of URL can be anywhere, LAN or WAN.
// For image distribution, this is image layer's URL in image registry.
// The resource url is provided by command line parameter.
//
RawURL string `json:"rawURL,omitempty"`
// The root ca cert from client used to download the remote source file.
//
RootCAs []strfmt.Base64 `json:"rootCAs"`
// The address of supernode that the client can connect to
SuperNodeIP string `json:"superNodeIp,omitempty"`
// Dfdaemon or dfget could specific the taskID which will represents the key of this resource
// in supernode.
//
TaskID string `json:"taskId,omitempty"`
// taskURL is generated from rawURL. rawURL may contains some queries or parameter, dfget will filter some queries via
// --filter parameter of dfget. The usage of it is that different rawURL may generate the same taskID.
//
TaskURL string `json:"taskURL,omitempty"`
// version number of dfget binary.
Version string `json:"version,omitempty"`
// Pattern download must be p2p,cdn,source
Pattern string `json:"pattern,omitempty"`
}
// Validate validates this task register request
func (m *TaskRegisterRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateIP(formats); err != nil {
res = append(res, err)
}
if err := m.validateCallSystem(formats); err != nil {
res = append(res, err)
}
if err := m.validateHostName(formats); err != nil {
res = append(res, err)
}
if err := m.validatePort(formats); err != nil {
res = append(res, err)
}
if err := m.validateRootCAs(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
func (m *TaskRegisterRequest) validateIP(formats strfmt.Registry) error {
if swag.IsZero(m.IP) { // not required
return nil
}
if err := validate.FormatOf("IP", "body", "ipv4", m.IP.String(), formats); err != nil {
return err
}
return nil
}
func (m *TaskRegisterRequest) validateCallSystem(formats strfmt.Registry) error {
if swag.IsZero(m.CallSystem) { // not required
return nil
}
if err := validate.MinLength("callSystem", "body", string(m.CallSystem), 1); err != nil {
return err
}
return nil
}
func (m *TaskRegisterRequest) validateHostName(formats strfmt.Registry) error {
if swag.IsZero(m.HostName) { // not required
return nil
}
if err := validate.MinLength("hostName", "body", string(m.HostName), 1); err != nil {
return err
}
return nil
}
func (m *TaskRegisterRequest) validatePort(formats strfmt.Registry) error {
if swag.IsZero(m.Port) { // not required
return nil
}
if err := validate.MinimumInt("port", "body", int64(m.Port), 15000, false); err != nil {
return err
}
if err := validate.MaximumInt("port", "body", int64(m.Port), 65000, false); err != nil {
return err
}
return nil
}
func (m *TaskRegisterRequest) validateRootCAs(formats strfmt.Registry) error {
if swag.IsZero(m.RootCAs) { // not required
return nil
}
for i := 0; i < len(m.RootCAs); i++ {
// Format "byte" (base64 string) is already validated when unmarshalled
}
return nil
}
// MarshalBinary interface implementation
func (m *TaskRegisterRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskRegisterRequest) UnmarshalBinary(b []byte) error {
var res TaskRegisterRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
// Code generated by go-swagger; DO NOT EDIT.
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// TaskUpdateRequest request used to update task attributes.
// swagger:model TaskUpdateRequest
type TaskUpdateRequest struct {
// ID of the peer which has finished to download the whole task.
PeerID string `json:"peerID,omitempty"`
}
// Validate validates this task update request
func (m *TaskUpdateRequest) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *TaskUpdateRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *TaskUpdateRequest) UnmarshalBinary(b []byte) error {
var res TaskUpdateRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package config holds all Properties of dfget.
package config
import (
"encoding/json"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/dflog"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/netutils"
"github.com/dragonflyoss/Dragonfly/pkg/printer"
"github.com/dragonflyoss/Dragonfly/pkg/rate"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/pkg/errors"
"gopkg.in/gcfg.v1"
"gopkg.in/warnings.v0"
)
// ----------------------------------------------------------------------------
// Properties
// Properties holds all configurable Properties.
// Support INI(or conf) and YAML(since 0.3.0).
// Before 0.3.0, only support INI config and only have one property(node):
// [node]
// address=127.0.0.1,10.10.10.1
// Since 0.2.0, the INI config is just to be compatible with previous versions.
// The YAML config will have more properties:
// nodes:
// - 127.0.0.1=1
// - 10.10.10.1:8002=2
// localLimit: 20M
// totalLimit: 20M
// clientQueueSize: 6
type Properties struct {
// Supernodes specify supernodes with weight.
// The type of weight must be integer.
// All weights will be divided by the greatest common divisor in the end.
//
// E.g. ["192.168.33.21=1", "192.168.33.22=2"]
Supernodes []*NodeWeight `yaml:"nodes,omitempty" json:"nodes,omitempty"`
// LocalLimit rate limit about a single download task, format: G(B)/g/M(B)/m/K(B)/k/B
// pure number will also be parsed as Byte.
LocalLimit rate.Rate `yaml:"localLimit,omitempty" json:"localLimit,omitempty"`
// Minimal rate about a single download task, format: G(B)/g/M(B)/m/K(B)/k/B
// pure number will also be parsed as Byte.
MinRate rate.Rate `yaml:"minRate,omitempty" json:"minRate,omitempty"`
// TotalLimit rate limit about the whole host, format: G(B)/g/M(B)/m/K(B)/k/B
// pure number will also be parsed as Byte.
TotalLimit rate.Rate `yaml:"totalLimit,omitempty" json:"totalLimit,omitempty"`
// ClientQueueSize is the size of client queue
// which controls the number of pieces that can be processed simultaneously.
// It is only useful when the Pattern equals "source".
// The default value is 6.
ClientQueueSize int `yaml:"clientQueueSize" json:"clientQueueSize,omitempty"`
// WorkHome work home path,
// default: `$HOME/.small-dragonfly`.
WorkHome string `yaml:"workHome" json:"workHome,omitempty"`
LogConfig dflog.LogConfig `yaml:"logConfig" json:"logConfig"`
}
// NewProperties creates a new properties with default values.
func NewProperties() *Properties {
// don't set Supernodes as default value, the SupernodeLocator will
// do this in a better way.
return &Properties{
LocalLimit: DefaultLocalLimit,
MinRate: DefaultMinRate,
ClientQueueSize: DefaultClientQueueSize,
}
}
func (p *Properties) String() string {
str, _ := json.Marshal(p)
return string(str)
}
// Load loads properties from config file.
func (p *Properties) Load(path string) error {
switch p.fileType(path) {
case "ini":
return p.loadFromIni(path)
case "yaml":
return fileutils.LoadYaml(path, p)
}
return fmt.Errorf("extension of %s is not in 'conf/ini/yaml/yml'", path)
}
// loadFromIni to be compatible with previous versions(before 0.2.0).
func (p *Properties) loadFromIni(path string) error {
oldConfig := struct {
Node struct {
Address string
}
}{}
if err := gcfg.ReadFileInto(&oldConfig, path); err != nil {
// only fail on a fatal error occurred
if e, ok := err.(warnings.List); !ok || e.Fatal != nil {
return fmt.Errorf("read ini config from %s error: %v", path, err)
}
}
nodes, err := ParseNodesString(oldConfig.Node.Address)
if err != nil {
return errors.Wrapf(err, "failed to handle nodes")
}
p.Supernodes = nodes
return err
}
func (p *Properties) fileType(path string) string {
ext := filepath.Ext(path)
switch v := strings.ToLower(ext); v {
case ".conf", ".ini":
return "ini"
case ".yaml", ".yml":
return "yaml"
default:
return v
}
}
// ----------------------------------------------------------------------------
// Config
// Config holds all the runtime config information.
type Config struct {
// URL download URL.
URL string `json:"url"`
// Output full output path.
Output string `json:"output"`
// Timeout download timeout(second).
Timeout time.Duration `json:"timeout,omitempty"`
// Md5 expected file md5.
Md5 string `json:"md5,omitempty"`
// Identifier identify download task, it is available merely when md5 param not exist.
Identifier string `json:"identifier,omitempty"`
// CallSystem system name that executes dfget.
CallSystem string `json:"callSystem,omitempty"`
// Pattern download pattern, must be 'p2p' or 'cdn' or 'source',
// default:`p2p`.
Pattern string `json:"pattern,omitempty"`
// CA certificate to verify when supernode interact with the source.
Cacerts []string `json:"cacert,omitempty"`
// Filter filter some query params of url, use char '&' to separate different params.
// eg: -f 'key&sign' will filter 'key' and 'sign' query param.
// in this way, different urls correspond one same download task that can use p2p mode.
Filter []string `json:"filter,omitempty"`
// Header of http request.
// eg: --header='Accept: *' --header='Host: abc'.
Header []string `json:"header,omitempty"`
// Notbs indicates whether to not back source to download when p2p fails.
Notbs bool `json:"notbs,omitempty"`
// DFDaemon indicates whether the caller is from dfdaemon
DFDaemon bool `json:"dfdaemon,omitempty"`
// Insecure indicates whether skip secure verify when supernode interact with the source.
Insecure bool `json:"insecure,omitempty"`
// ShowBar shows progress bar, it's conflict with `--console`.
ShowBar bool `json:"showBar,omitempty"`
// Console shows log on console, it's conflict with `--showbar`.
Console bool `json:"console,omitempty"`
// Verbose indicates whether to be verbose.
// If set true, log level will be 'debug'.
Verbose bool `json:"verbose,omitempty"`
// Nodes specify supernodes.
Nodes []string `json:"-"`
// Start time.
StartTime time.Time `json:"-"`
// Sign the value is 'Pid + float64(time.Now().UnixNano())/float64(time.Second) format: "%d-%.3f"'.
// It is unique for downloading task, and is used for debugging.
Sign string `json:"-"`
// Username of the system currently logged in.
User string `json:"-"`
// Config file paths,
// default:["/etc/dragonfly/dfget.yml","/etc/dragonfly.conf"].
//
// NOTE: It is recommended to use `/etc/dragonfly/dfget.yml` as default,
// and the `/etc/dragonfly.conf` is just to ensure compatibility with previous versions.
ConfigFiles []string `json:"-"`
// RV stores the variables that are initialized and used at downloading task executing.
RV RuntimeVariable `json:"-"`
// The reason of backing to source.
BackSourceReason int `json:"-"`
// Embedded Properties holds all configurable properties.
Properties
}
func (cfg *Config) String() string {
js, _ := json.Marshal(cfg)
return string(js)
}
// NewConfig creates and initializes a Config.
func NewConfig() *Config {
cfg := new(Config)
cfg.StartTime = time.Now()
cfg.Sign = fmt.Sprintf("%d-%.3f",
os.Getpid(), float64(time.Now().UnixNano())/float64(time.Second))
currentUser, err := user.Current()
if err != nil {
printer.Println(fmt.Sprintf("get user error: %s", err))
os.Exit(CodeGetUserError)
}
cfg.User = currentUser.Username
cfg.RV.FileLength = -1
cfg.ConfigFiles = []string{DefaultYamlConfigFile, DefaultIniConfigFile}
return cfg
}
// AssertConfig checks the config and return errors.
func AssertConfig(cfg *Config) (err error) {
if cfg == nil {
return errors.Wrap(errortypes.ErrNotInitialized, "runtime config")
}
if !netutils.IsValidURL(cfg.URL) {
return errors.Wrapf(errortypes.ErrInvalidValue, "url: %v", cfg.URL)
}
if err := checkOutput(cfg); err != nil {
return errors.Wrapf(errortypes.ErrInvalidValue, "output: %v", err)
}
return nil
}
// This function must be called after checkURL
func checkOutput(cfg *Config) error {
if stringutils.IsEmptyStr(cfg.Output) {
url := strings.TrimRight(cfg.URL, "/")
idx := strings.LastIndexByte(url, '/')
if idx < 0 {
return fmt.Errorf("get output from url[%s] error", cfg.URL)
}
cfg.Output = url[idx+1:]
}
if !filepath.IsAbs(cfg.Output) {
absPath, err := filepath.Abs(cfg.Output)
if err != nil {
return fmt.Errorf("get absolute path[%s] error: %v", cfg.Output, err)
}
cfg.Output = absPath
}
if f, err := os.Stat(cfg.Output); err == nil && f.IsDir() {
return fmt.Errorf("path[%s] is directory but requires file path", cfg.Output)
}
// check permission
for dir := cfg.Output; !stringutils.IsEmptyStr(dir); dir = filepath.Dir(dir) {
if err := syscall.Access(dir, syscall.O_RDWR); err == nil {
break
} else if os.IsPermission(err) || dir == "/" {
return fmt.Errorf("user[%s] path[%s] %v", cfg.User, cfg.Output, err)
}
}
return nil
}
// RuntimeVariable stores the variables that are initialized and used
// at downloading task executing.
type RuntimeVariable struct {
// MetaPath specify the path of meta file which store the meta info of the peer that should be persisted.
// Only server port information is stored currently.
MetaPath string
// SystemDataDir specifies a default directory to store temporary files.
SystemDataDir string
// DataDir specifies a directory to store temporary files.
// For now, the value of `DataDir` always equals `SystemDataDir`,
// and there is no difference between them.
// TODO: If there is insufficient disk space, we should set it to the `TargetDir`.
DataDir string
// RealTarget specifies the full target path whose value is equal to the `Output`.
RealTarget string
// StreamMode specifies that all pieces will be wrote to a Pipe, currently only support cdn mode.
// when StreamMode is true, all data will write directly.
// the mode is prepared for this issue https://github.com/dragonflyoss/Dragonfly/issues/1164
// TODO: support p2p mode
StreamMode bool
// TargetDir is the directory of the RealTarget path.
TargetDir string
// TempTarget is a temp file path that try to determine
// whether the `TargetDir` and the `DataDir` belong to the same disk by making a hard link.
TempTarget string
// Cid means the client ID which is a string composed of `localIP + "-" + sign` which represents a peer node.
// NOTE: Multiple dfget processes on the same peer have different CIDs.
Cid string
// TaskURL is generated from rawURL which may contains some queries or parameter.
// Dfget will filter some volatile queries such as timestamps via --filter parameter of dfget.
TaskURL string
// TaskFileName is a string composed of `the last element of RealTarget path + "-" + sign`.
TaskFileName string
// LocalIP is the native IP which can connect supernode successfully.
LocalIP string
// PeerPort is the TCP port on which the file upload service listens as a peer node.
PeerPort int
// FileLength the length of the file to download.
FileLength int64
// DataExpireTime specifies the caching duration for which
// cached files keep no accessed by any process.
// After this period, the cached files will be deleted.
DataExpireTime time.Duration
// ServerAliveTime specifies the alive duration for which
// uploader keeps no accessing by any uploading requests.
// After this period, the uploader will automatically exit.
ServerAliveTime time.Duration
}
func (rv *RuntimeVariable) String() string {
js, _ := json.Marshal(rv)
return string(js)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"encoding/json"
"io/ioutil"
)
// NewMetaData creates a MetaData instance.
func NewMetaData(metaPath string) *MetaData {
return &MetaData{
MetaPath: metaPath,
}
}
// MetaData stores meta information that should be persisted.
type MetaData struct {
// ServicePort the TCP port on which the file upload service listens.
ServicePort int `json:"servicePort"`
// MetaPath the path of meta file.
MetaPath string `json:"-"`
}
// Persist writes meta information into storage.
func (md *MetaData) Persist() error {
content, err := json.Marshal(md)
if err == nil {
return ioutil.WriteFile(md.MetaPath, content, 0755)
}
return err
}
// Load loads meta information from storage.
func (md *MetaData) Load() error {
content, err := ioutil.ReadFile(md.MetaPath)
if err == nil {
return json.Unmarshal(content, md)
}
return err
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/dragonflyoss/Dragonfly/pkg/algorithm"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/pkg/errors"
)
const weightSeparator = '='
type SupernodesValue struct {
Nodes *[]*NodeWeight
}
type NodeWeight struct {
Node string
Weight int
}
func NewSupernodesValue(p *[]*NodeWeight, val []*NodeWeight) *SupernodesValue {
ssv := new(SupernodesValue)
ssv.Nodes = p
*ssv.Nodes = val
return ssv
}
// GetDefaultSupernodesValue returns the default value of supernodes.
// default: ["127.0.0.1:8002=1"]
func GetDefaultSupernodesValue() []*NodeWeight {
var result = make([]*NodeWeight, 0)
result = append(result, &NodeWeight{
Node: fmt.Sprintf("%s:%d", DefaultSupernodeIP, DefaultSupernodePort),
Weight: DefaultSupernodeWeight,
})
return result
}
// String implements the pflag.Value interface.
func (sv *SupernodesValue) String() string {
var result []string
for _, v := range *sv.Nodes {
result = append(result, v.string())
}
return strings.Join(result, ",")
}
// Set implements the pflag.Value interface.
func (sv *SupernodesValue) Set(value string) error {
nodes, err := ParseNodesString(value)
if err != nil {
return err
}
*sv.Nodes = nodes
return nil
}
// Type implements the pflag.Value interface.
func (sv *SupernodesValue) Type() string {
return "supernodes"
}
// MarshalYAML implements the yaml.Marshaler interface.
func (nw *NodeWeight) MarshalYAML() (interface{}, error) {
return nw.string(), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (nw *NodeWeight) UnmarshalYAML(unmarshal func(interface{}) error) error {
var value string
if err := unmarshal(&value); err != nil {
return err
}
nodeWeight, err := string2NodeWeight(value)
if err != nil {
return err
}
*nw = *nodeWeight
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (nw *NodeWeight) MarshalJSON() ([]byte, error) {
return json.Marshal(nw.string())
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (nw *NodeWeight) UnmarshalJSON(b []byte) error {
str, _ := strconv.Unquote(string(b))
nodeWeight, err := string2NodeWeight(str)
if err != nil {
return err
}
*nw = *nodeWeight
return nil
}
func (nw *NodeWeight) string() string {
return fmt.Sprintf("%s%c%d", nw.Node, weightSeparator, nw.Weight)
}
// ParseNodesString parses the value in string type to []*NodeWeight.
func ParseNodesString(value string) ([]*NodeWeight, error) {
return ParseNodesSlice(strings.Split(value, ","))
}
// ParseNodesSlice parses the value in string slice type to []*NodeWeight.
func ParseNodesSlice(value []string) ([]*NodeWeight, error) {
nodeWeightSlice := make([]*NodeWeight, 0)
weightKey := make([]int, 0)
// split node and weight
for _, v := range value {
nodeWeight, err := string2NodeWeight(v)
if err != nil {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "node: %s %v", v, err)
}
weightKey = append(weightKey, nodeWeight.Weight)
nodeWeightSlice = append(nodeWeightSlice, nodeWeight)
}
var result []*NodeWeight
// get the greatest common divisor of the weight slice and
// divide all weights by the greatest common divisor.
gcdNumber := algorithm.GCDSlice(weightKey)
for _, v := range nodeWeightSlice {
result = append(result, &NodeWeight{
Node: v.Node,
Weight: v.Weight / gcdNumber,
})
}
return result, nil
}
// NodeWeightSlice2StringSlice parses nodeWeight slice to string slice.
// It takes the NodeWeight.Node as the value and every value will be appended the corresponding NodeWeight.Weight times.
func NodeWeightSlice2StringSlice(supernodes []*NodeWeight) []string {
var nodes []string
for _, v := range supernodes {
for i := 0; i < v.Weight; i++ {
nodes = append(nodes, v.Node)
}
}
return nodes
}
func string2NodeWeight(value string) (*NodeWeight, error) {
node, weight, err := splitNodeAndWeight(value)
if err != nil {
return nil, err
}
node, err = handleDefaultPort(node)
if err != nil {
return nil, err
}
return &NodeWeight{
Node: node,
Weight: weight,
}, nil
}
// splitNodeAndWeight returns the node address and weight which parsed by the given value.
// If no weight specified, the DefaultSupernodeWeight will be returned as the weight value.
func splitNodeAndWeight(value string) (string, int, error) {
result := strings.Split(value, string(weightSeparator))
splitLength := len(result)
switch splitLength {
case 1:
return result[0], DefaultSupernodeWeight, nil
case 2:
v, err := strconv.Atoi(result[1])
if err != nil {
return "", 0, err
}
return result[0], v, nil
default:
return "", 0, errortypes.ErrInvalidValue
}
}
func handleDefaultPort(node string) (string, error) {
result := strings.Split(node, ":")
splitLength := len(result)
if splitLength == 2 {
if result[0] == "" || result[1] == "" {
return "", errortypes.ErrInvalidValue
}
return node, nil
}
if splitLength == 1 {
return fmt.Sprintf("%s:%d", node, DefaultSupernodePort), nil
}
return "", errortypes.ErrInvalidValue
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/dragonflyoss/Dragonfly/pkg/rangeutils"
"github.com/dragonflyoss/Dragonfly/version"
)
// DownloadRequest wraps the request which is sent to peer
// for downloading one piece.
type DownloadRequest struct {
Path string
PieceRange string
PieceNum int
PieceSize int32
Headers map[string]string
}
// DownloadAPI defines the download method between dfget and peer server.
type DownloadAPI interface {
// Download downloads a piece and returns an HTTP response.
Download(ip string, port int, req *DownloadRequest, timeout time.Duration) (*http.Response, error)
}
// downloadAPI is an implementation of interface DownloadAPI.
type downloadAPI struct {
}
var _ DownloadAPI = &downloadAPI{}
// NewDownloadAPI returns a new DownloadAPI.
func NewDownloadAPI() DownloadAPI {
return &downloadAPI{}
}
func (d *downloadAPI) Download(ip string, port int, req *DownloadRequest, timeout time.Duration) (*http.Response, error) {
if req == nil {
return nil, fmt.Errorf("nil dwonload request")
}
headers := make(map[string]string)
headers[config.StrPieceNum] = strconv.Itoa(req.PieceNum)
headers[config.StrPieceSize] = fmt.Sprint(req.PieceSize)
headers[config.StrUserAgent] = "dfget/" + version.DFGetVersion
if req.Headers != nil {
for k, v := range req.Headers {
headers[k] = v
}
}
var (
url string
rangeStr string
)
if isFromSource(req) {
rangeStr = getRealRange(req.PieceRange, headers[config.StrRange])
url = req.Path
} else {
rangeStr = req.PieceRange
url = fmt.Sprintf("http://%s:%d%s", ip, port, req.Path)
}
headers[config.StrRange] = httputils.ConstructRangeStr(rangeStr)
return httputils.HTTPGetTimeout(url, headers, timeout)
}
func isFromSource(req *DownloadRequest) bool {
return strings.Contains(req.Path, "://")
}
// getRealRange
// pieceRange: "start-end"
// rangeHeaderValue: "bytes=sourceStart-sourceEnd"
// return: "realStart-realEnd"
func getRealRange(pieceRange string, rangeHeaderValue string) string {
if rangeHeaderValue == "" {
return pieceRange
}
rangeEle := strings.Split(rangeHeaderValue, "=")
if len(rangeEle) != 2 {
return pieceRange
}
lower, upper, err := rangeutils.ParsePieceIndex(rangeEle[1])
if err != nil {
return pieceRange
}
start, end, err := rangeutils.ParsePieceIndex(pieceRange)
if err != nil {
return pieceRange
}
realStart := start + lower
realEnd := end + lower
if realEnd > upper {
realEnd = upper
}
return fmt.Sprintf("%d-%d", realStart, realEnd)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"encoding/json"
"fmt"
"time"
api_types "github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/dfget/types"
"github.com/dragonflyoss/Dragonfly/pkg/constants"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
/* the url paths of supernode APIs*/
const (
peerRegisterPath = "/peer/registry"
peerPullPieceTaskPath = "/peer/task"
peerReportPiecePath = "/peer/piece/suc"
peerClientErrorPath = "/peer/piece/error"
peerServiceDownPath = "/peer/service/down"
metricsReportPath = "/task/metrics"
fetchP2PNetworkPath = "/peer/network"
peerHeartBeatPath = "/peer/heartbeat"
)
// NewSupernodeAPI creates a new instance of SupernodeAPI with default value.
func NewSupernodeAPI() SupernodeAPI {
return &supernodeAPI{
Scheme: "http",
Timeout: 5 * time.Second,
HTTPClient: httputils.DefaultHTTPClient,
}
}
// SupernodeAPI defines the communication methods between supernode and dfget.
type SupernodeAPI interface {
Register(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, e error)
PullPieceTask(node string, req *types.PullPieceTaskRequest) (resp *types.PullPieceTaskResponse, e error)
ReportPiece(node string, req *types.ReportPieceRequest) (resp *types.BaseResponse, e error)
ServiceDown(node string, taskID string, cid string) (resp *types.BaseResponse, e error)
ReportClientError(node string, req *types.ClientErrorRequest) (resp *types.BaseResponse, e error)
ReportMetrics(node string, req *api_types.TaskMetricsRequest) (resp *types.BaseResponse, e error)
HeartBeat(node string, req *api_types.HeartBeatRequest) (resp *types.HeartBeatResponse, err error)
FetchP2PNetworkInfo(node string, start int, limit int, req *api_types.NetworkInfoFetchRequest) (resp *api_types.NetworkInfoFetchResponse, e error)
ReportResource(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, err error)
ApplyForSeedNode(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, err error)
ReportResourceDeleted(node string, taskID string, cid string) (resp *types.BaseResponse, err error)
}
type supernodeAPI struct {
Scheme string
Timeout time.Duration
HTTPClient httputils.SimpleHTTPClient
}
var _ SupernodeAPI = &supernodeAPI{}
// Register sends a request to the supernode to register itself as a peer
// and create downloading task.
func (api *supernodeAPI) Register(node string, req *types.RegisterRequest) (
resp *types.RegisterResponse, e error) {
var (
code int
body []byte
)
url := fmt.Sprintf("%s://%s%s",
api.Scheme, node, peerRegisterPath)
if code, body, e = api.HTTPClient.PostJSON(url, req, api.Timeout); e != nil {
return nil, e
}
if !httputils.HTTPStatusOk(code) {
return nil, fmt.Errorf("%d:%s", code, body)
}
resp = new(types.RegisterResponse)
if e = json.Unmarshal(body, resp); e != nil {
return nil, e
}
return resp, e
}
// PullPieceTask pull a piece downloading task from supernode, and get a
// response that describes from which peer to download.
func (api *supernodeAPI) PullPieceTask(node string, req *types.PullPieceTaskRequest) (
resp *types.PullPieceTaskResponse, e error) {
url := fmt.Sprintf("%s://%s%s?%s",
api.Scheme, node, peerPullPieceTaskPath, httputils.ParseQuery(req))
resp = new(types.PullPieceTaskResponse)
if e = api.get(url, resp); e != nil {
return nil, e
}
return
}
// ReportPiece reports the status of piece downloading task to supernode.
func (api *supernodeAPI) ReportPiece(node string, req *types.ReportPieceRequest) (
resp *types.BaseResponse, e error) {
url := fmt.Sprintf("%s://%s%s?%s",
api.Scheme, node, peerReportPiecePath, httputils.ParseQuery(req))
resp = new(types.BaseResponse)
if e = api.get(url, resp); e != nil {
logrus.Errorf("failed to report piece{taskid:%s,range:%s},err: %v", req.TaskID, req.PieceRange, e)
return nil, errors.Wrapf(e, "failed to report piece{taskid:%s,range:%s}", req.TaskID, req.PieceRange)
}
if resp.Code != constants.CodeGetPieceReport {
logrus.Errorf("failed to report piece{taskid:%s,range:%s} to supernode: api response code is %d not equal to %d", req.TaskID, req.PieceRange, resp.Code, constants.CodeGetPieceReport)
return nil, errors.Wrapf(e, "failed to report piece{taskid:%s,range:%s} to supernode: api response code is %d not equal to %d", req.TaskID, req.PieceRange, resp.Code, constants.CodeGetPieceReport)
}
return
}
// ServiceDown reports the status of the local peer to supernode.
func (api *supernodeAPI) ServiceDown(node string, taskID string, cid string) (
resp *types.BaseResponse, e error) {
url := fmt.Sprintf("%s://%s%s?taskId=%s&cid=%s",
api.Scheme, node, peerServiceDownPath, taskID, cid)
resp = new(types.BaseResponse)
if e = api.get(url, resp); e != nil {
logrus.Errorf("failed to send service down,err: %v", e)
return nil, e
}
if resp.Code != constants.CodeGetPeerDown {
logrus.Errorf("failed to send service down to supernode: api response code is %d not equal to %d", resp.Code, constants.CodeGetPeerDown)
}
return
}
// ReportClientError reports the client error when downloading piece to supernode.
func (api *supernodeAPI) ReportClientError(node string, req *types.ClientErrorRequest) (
resp *types.BaseResponse, e error) {
url := fmt.Sprintf("%s://%s%s?%s",
api.Scheme, node, peerClientErrorPath, httputils.ParseQuery(req))
resp = new(types.BaseResponse)
e = api.get(url, resp)
return
}
func (api *supernodeAPI) ReportMetrics(node string, req *api_types.TaskMetricsRequest) (resp *types.BaseResponse, err error) {
var (
code int
body []byte
)
url := fmt.Sprintf("%s://%s%s",
api.Scheme, node, metricsReportPath)
if code, body, err = api.HTTPClient.PostJSON(url, req, api.Timeout); err != nil {
return nil, err
}
if !httputils.HTTPStatusOk(code) {
return nil, fmt.Errorf("%d:%s", code, body)
}
resp = new(types.BaseResponse)
if err = json.Unmarshal(body, resp); err != nil {
return nil, err
}
return resp, err
}
func (api *supernodeAPI) get(url string, resp interface{}) error {
var (
code int
body []byte
e error
)
if url == "" {
return fmt.Errorf("invalid url")
}
if code, body, e = api.HTTPClient.Get(url, api.Timeout); e != nil {
return e
}
if !httputils.HTTPStatusOk(code) {
return fmt.Errorf("%d:%s", code, body)
}
return json.Unmarshal(body, resp)
}
// report resource
func (api *supernodeAPI) ReportResource(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, err error) {
var (
code int
body []byte
)
url := fmt.Sprintf("%s://%s%s",
api.Scheme, node, peerRegisterPath)
header := map[string]string{
"X-report-resource": "true",
}
if code, body, err = api.HTTPClient.PostJSONWithHeaders(url, header, req, api.Timeout); err != nil {
return nil, err
}
logrus.Infof("ReportResource, url: %s, header: %v, req: %v, "+
"code: %d, body: %s", url, header, req, code, string(body))
if !httputils.HTTPStatusOk(code) {
return nil, fmt.Errorf("%d:%s", code, body)
}
resp = new(types.RegisterResponse)
if err = json.Unmarshal(body, resp); err != nil {
return nil, err
}
return resp, err
}
func (api *supernodeAPI) ReportResourceDeleted(node string, taskID string, cid string) (resp *types.BaseResponse, err error) {
url := fmt.Sprintf("%s://%s%s?taskId=%s&cid=%s",
api.Scheme, node, peerServiceDownPath, taskID, cid)
header := map[string]string{
"X-report-resource": "true",
}
logrus.Infof("Call ReportResourceDeleted, node: %s, taskID: %s, cid: %s, "+
"url: %s, header: %v", node, taskID, cid, url, header)
resp = new(types.BaseResponse)
resp.Code = constants.Success
if err = api.get(url, resp); err != nil {
logrus.Errorf("failed to send service down,err: %v", err)
return nil, err
}
if resp.Code != constants.CodeGetPeerDown {
logrus.Errorf("failed to send service down to supernode: api response code is %d not equal to %d", resp.Code, constants.CodeGetPeerDown)
}
return
}
// apply for seed node to supernode, if selected as seed, the resp.AsSeed will set true.
func (api *supernodeAPI) ApplyForSeedNode(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, err error) {
var (
code int
body []byte
)
url := fmt.Sprintf("%s://%s%s",
api.Scheme, node, peerRegisterPath)
header := map[string]string{
"X-report-resource": "true",
}
if code, body, err = api.HTTPClient.PostJSONWithHeaders(url, header, req, api.Timeout); err != nil {
return nil, err
}
logrus.Infof("ReportResource, url: %s, header: %v, req: %v, "+
"code: %d, body: %s", url, header, req, code, string(body))
if !httputils.HTTPStatusOk(code) {
return nil, fmt.Errorf("%d:%s", code, body)
}
resp = new(types.RegisterResponse)
if err = json.Unmarshal(body, resp); err != nil {
return nil, err
}
return resp, err
}
// FetchP2PNetworkInfo fetch the p2p network info from supernode.
// @parameter
// start: the start index for array of result
// limit: the limit size of array of result, if -1 means no paging
func (api *supernodeAPI) FetchP2PNetworkInfo(node string, start int, limit int, req *api_types.NetworkInfoFetchRequest) (resp *api_types.NetworkInfoFetchResponse, err error) {
var (
code int
body []byte
)
if start < 0 {
start = 0
}
if limit < 0 {
limit = -1
}
if limit == 0 {
//todo: the page default limit should be configuration item of dfdaemon
limit = 500
}
url := fmt.Sprintf("%s://%s%s?start=%d&limit=%d",
api.Scheme, node, fetchP2PNetworkPath, start, limit)
if code, body, err = api.HTTPClient.PostJSON(url, req, api.Timeout); err != nil {
return nil, err
}
logrus.Debugf("in FetchP2PNetworkInfo, req url: %s, timeout: %v, body: %v", url, api.Timeout, req)
logrus.Debugf("in FetchP2PNetworkInfo, resp code: %d, body: %s", code, string(body))
if !httputils.HTTPStatusOk(code) {
return nil, fmt.Errorf("%d:%s", code, body)
}
rr := new(types.FetchP2PNetworkInfoResponse)
if err = json.Unmarshal(body, rr); err != nil {
return nil, err
}
if rr.Code != constants.Success {
return nil, fmt.Errorf("%d:%s", code, rr.Msg)
}
return rr.Data, nil
}
func (api *supernodeAPI) HeartBeat(node string, req *api_types.HeartBeatRequest) (resp *types.HeartBeatResponse, err error) {
var (
code int
body []byte
)
url := fmt.Sprintf("%s://%s%s",
api.Scheme, node, peerHeartBeatPath)
if code, body, err = api.HTTPClient.PostJSON(url, req, api.Timeout); err != nil {
return nil, err
}
if !httputils.HTTPStatusOk(code) {
logrus.Errorf("failed to heart beat, code %d, body: %s", code, string(body))
return nil, fmt.Errorf("%d:%s", code, string(body))
}
logrus.Debugf("heart beat resp: %s", string(body))
resp = new(types.HeartBeatResponse)
if err = json.Unmarshal(body, resp); err != nil {
return nil, err
}
return resp, err
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"fmt"
"net/http"
"strconv"
"time"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
)
// UploaderAPI defines the communication methods between dfget and uploader.
type UploaderAPI interface {
// ParseRate sends a request to uploader to calculate the rateLimit dynamically
// for the speed limit of the whole host machine.
ParseRate(ip string, port int, req *ParseRateRequest) (string, error)
// CheckServer checks the peer server on port whether is available.
CheckServer(ip string, port int, req *CheckServerRequest) (string, error)
// FinishTask report a finished task to peer server.
FinishTask(ip string, port int, req *FinishTaskRequest) error
// PingServer send a request to determine whether the server has started.
PingServer(ip string, port int) bool
}
// uploaderAPI is an implementation of interface UploaderAPI.
type uploaderAPI struct {
timeout time.Duration
}
var _ UploaderAPI = &uploaderAPI{}
// NewUploaderAPI returns a new UploaderAPI.
func NewUploaderAPI(timeout time.Duration) UploaderAPI {
return &uploaderAPI{
timeout: timeout,
}
}
func (u *uploaderAPI) ParseRate(ip string, port int, req *ParseRateRequest) (string, error) {
headers := make(map[string]string)
headers[config.StrRateLimit] = strconv.Itoa(req.RateLimit)
url := fmt.Sprintf("http://%s:%d%s%s", ip, port, config.LocalHTTPPathRate, req.TaskFileName)
return httputils.Do(url, headers, u.timeout)
}
func (u *uploaderAPI) CheckServer(ip string, port int, req *CheckServerRequest) (string, error) {
headers := make(map[string]string)
headers[config.StrDataDir] = req.DataDir
headers[config.StrTotalLimit] = strconv.Itoa(req.TotalLimit)
url := fmt.Sprintf("http://%s:%d%s%s", ip, port, config.LocalHTTPPathCheck, req.TaskFileName)
return httputils.Do(url, headers, u.timeout)
}
func (u *uploaderAPI) FinishTask(ip string, port int, req *FinishTaskRequest) error {
url := fmt.Sprintf("http://%s:%d%sfinish?"+
config.StrTaskFileName+"=%s&"+
config.StrTaskID+"=%s&"+
config.StrClientID+"=%s&"+
config.StrSuperNode+"=%s",
ip, port, config.LocalHTTPPathClient,
req.TaskFileName, req.TaskID, req.ClientID, req.Node)
code, body, err := httputils.Get(url, u.timeout)
if code == http.StatusOK {
return nil
}
if err == nil {
return fmt.Errorf("%d:%s", code, body)
}
return err
}
func (u *uploaderAPI) PingServer(ip string, port int) bool {
url := fmt.Sprintf("http://%s:%d%s", ip, port, config.LocalHTTPPing)
code, _, _ := httputils.Get(url, u.timeout)
return code == http.StatusOK
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package helper
import (
"path/filepath"
"strings"
)
// GetTaskFile returns file path of task file.
func GetTaskFile(taskFileName, dataDir string) string {
return filepath.Join(dataDir, taskFileName)
}
// GetServiceFile returns file path of service file.
func GetServiceFile(taskFileName, dataDir string) string {
return GetTaskFile(taskFileName, dataDir) + ".service"
}
// GetTaskName extracts and returns task name from serviceFile.
func GetTaskName(serviceFile string) string {
if idx := strings.LastIndex(serviceFile, ".service"); idx != -1 {
return serviceFile[:idx]
}
return serviceFile
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package helper
import (
"context"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
api_types "github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/dfget/core/api"
"github.com/dragonflyoss/Dragonfly/dfget/types"
"github.com/dragonflyoss/Dragonfly/pkg/constants"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/sirupsen/logrus"
)
// CreateConfig creates a temporary config.
func CreateConfig(writer io.Writer, workHome string) *config.Config {
if writer == nil {
writer = ioutil.Discard
}
cfg := config.NewConfig()
cfg.WorkHome = workHome
cfg.RV.MetaPath = filepath.Join(cfg.WorkHome, "meta", "host.meta")
cfg.RV.SystemDataDir = filepath.Join(cfg.WorkHome, "data")
fileutils.CreateDirectory(filepath.Dir(cfg.RV.MetaPath))
fileutils.CreateDirectory(cfg.RV.SystemDataDir)
logrus.StandardLogger().Out = writer
return cfg
}
// CreateTestFile creates a temp file and write a string.
func CreateTestFile(path string, content string) error {
f, err := createFile(path, content)
if f != nil {
f.Close()
}
return err
}
// CreateTestFileWithMD5 creates a temp file and write a string
// and return the md5 of the file.
func CreateTestFileWithMD5(path string, content string) string {
f, err := createFile(path, content)
if err != nil {
return ""
}
defer f.Close()
return fileutils.Md5Sum(f.Name())
}
func createFile(path string, content string) (*os.File, error) {
f, err := os.Create(path)
if err != nil {
return nil, err
}
if content != "" {
f.WriteString(content)
}
return f, nil
}
// CreateRandomString creates a random string of specified length.
func CreateRandomString(cap int) string {
var letterBytes = "abcdefghijklmnopqrstuvwxyz"
var length = len(letterBytes)
b := make([]byte, cap)
for i := range b {
b[i] = letterBytes[rand.Intn(length)]
}
return string(b)
}
// ----------------------------------------------------------------------------
// MockSupernodeAPI
// RegisterFuncType function type of SupernodeAPI#Register
type RegisterFuncType func(ip string, req *types.RegisterRequest) (*types.RegisterResponse, error)
// PullFuncType function type of SupernodeAPI#PullPiece
type PullFuncType func(ip string, req *types.PullPieceTaskRequest) (*types.PullPieceTaskResponse, error)
// ReportFuncType function type of SupernodeAPI#ReportPiece
type ReportFuncType func(ip string, req *types.ReportPieceRequest) (*types.BaseResponse, error)
// ServiceDownFuncType function type of SupernodeAPI#ServiceDown
type ServiceDownFuncType func(ip string, taskID string, cid string) (*types.BaseResponse, error)
// ClientErrorFuncType function type of SupernodeAPI#ReportClientError
type ClientErrorFuncType func(ip string, req *types.ClientErrorRequest) (*types.BaseResponse, error)
// ClientErrorFuncType function type of SupernodeAPI#ReportMetricsType
type ReportMetricsFuncType func(node string, req *api_types.TaskMetricsRequest) (*types.BaseResponse, error)
// MockSupernodeAPI mocks the SupernodeAPI.
type MockSupernodeAPI struct {
RegisterFunc RegisterFuncType
PullFunc PullFuncType
ReportFunc ReportFuncType
ServiceDownFunc ServiceDownFuncType
ClientErrorFunc ClientErrorFuncType
ReportMetricsFunc ReportMetricsFuncType
}
var _ api.SupernodeAPI = &MockSupernodeAPI{}
// Register implements SupernodeAPI#Register.
func (m *MockSupernodeAPI) Register(ip string, req *types.RegisterRequest) (
*types.RegisterResponse, error) {
if m.RegisterFunc != nil {
return m.RegisterFunc(ip, req)
}
return nil, nil
}
// PullPieceTask implements SupernodeAPI#PullPiece.
func (m *MockSupernodeAPI) PullPieceTask(ip string, req *types.PullPieceTaskRequest) (
*types.PullPieceTaskResponse, error) {
if m.PullFunc != nil {
return m.PullFunc(ip, req)
}
return nil, nil
}
// ReportPiece implements SupernodeAPI#ReportPiece.
func (m *MockSupernodeAPI) ReportPiece(ip string, req *types.ReportPieceRequest) (
*types.BaseResponse, error) {
if m.ReportFunc != nil {
return m.ReportFunc(ip, req)
}
return nil, nil
}
// ServiceDown implements SupernodeAPI#ServiceDown.
func (m *MockSupernodeAPI) ServiceDown(ip string, taskID string, cid string) (
*types.BaseResponse, error) {
if m.ServiceDownFunc != nil {
return m.ServiceDownFunc(ip, taskID, cid)
}
return nil, nil
}
// ReportClientError implements SupernodeAPI#ReportClientError.
func (m *MockSupernodeAPI) ReportClientError(ip string, req *types.ClientErrorRequest) (resp *types.BaseResponse, e error) {
if m.ClientErrorFunc != nil {
return m.ClientErrorFunc(ip, req)
}
return nil, nil
}
func (m *MockSupernodeAPI) ReportMetrics(ip string, req *api_types.TaskMetricsRequest) (resp *types.BaseResponse, e error) {
if m.ClientErrorFunc != nil {
return m.ReportMetricsFunc(ip, req)
}
return nil, nil
}
func (m *MockSupernodeAPI) HeartBeat(node string, req *api_types.HeartBeatRequest) (resp *types.HeartBeatResponse, err error) {
return nil, nil
}
func (m *MockSupernodeAPI) FetchP2PNetworkInfo(node string, start int, limit int, req *api_types.NetworkInfoFetchRequest) (resp *api_types.NetworkInfoFetchResponse, e error) {
return nil, nil
}
func (m *MockSupernodeAPI) ReportResource(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, err error) {
return nil, nil
}
func (m *MockSupernodeAPI) ApplyForSeedNode(node string, req *types.RegisterRequest) (resp *types.RegisterResponse, err error) {
return nil, nil
}
func (m *MockSupernodeAPI) ReportResourceDeleted(node string, taskID string, cid string) (resp *types.BaseResponse, err error) {
return nil, nil
}
// CreateRegisterFunc creates a mock register function.
func CreateRegisterFunc() RegisterFuncType {
var newResponse = func(code int, msg string) *types.RegisterResponse {
return &types.RegisterResponse{
BaseResponse: &types.BaseResponse{Code: code, Msg: msg},
}
}
return func(ip string, req *types.RegisterRequest) (*types.RegisterResponse, error) {
if ip == "" {
return nil, fmt.Errorf("connection refused")
}
switch req.RawURL {
case "":
return newResponse(501, "invalid source url"), nil
case "http://taobao.com":
return newResponse(constants.CodeNeedAuth, "need auth"), nil
case "http://github.com":
return newResponse(constants.CodeWaitAuth, "wait auth"), nil
case "http://x.com":
return newResponse(constants.CodeURLNotReachable, "not reachable"), nil
case "http://lowzj.com":
resp := newResponse(constants.Success, "")
resp.Data = &types.RegisterResponseData{
TaskID: "a",
FileLength: 100,
PieceSize: 10,
}
return resp, nil
}
return nil, nil
}
}
// MockFileServer mocks the file server.
type MockFileServer struct {
sync.Mutex
Port int
fileMap map[string]*mockFile
sr *http.Server
}
func NewMockFileServer() *MockFileServer {
return &MockFileServer{
fileMap: make(map[string]*mockFile),
}
}
// StartServer asynchronously starts the server, it will not be blocked.
func (fs *MockFileServer) StartServer(ctx context.Context, port int) error {
addr, err := net.ResolveTCPAddr("", fmt.Sprintf(":%d", port))
if err != nil {
return err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return err
}
fs.Port = addr.Port
sr := &http.Server{}
sr.Handler = fs
fs.sr = sr
go func() {
if err := fs.sr.Serve(l); err != nil {
panic(err)
}
}()
go func() {
for {
select {
case <-ctx.Done():
fs.sr.Close()
return
}
}
}()
return nil
}
func (fs *MockFileServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
var (
err error
reqRange *httputils.RangeStruct
)
if req.Method != http.MethodGet {
resp.WriteHeader(http.StatusNotFound)
return
}
path := req.URL.Path
path = strings.Trim(path, "/")
fs.Lock()
mf, exist := fs.fileMap[path]
fs.Unlock()
if !exist {
resp.WriteHeader(http.StatusNotFound)
return
}
rangeSt := []*httputils.RangeStruct{}
rangeStr := req.Header.Get("Range")
if rangeStr != "" {
rangeSt, err = httputils.GetRangeSE(rangeStr, math.MaxInt64)
if err != nil {
resp.WriteHeader(http.StatusBadRequest)
return
}
}
if len(rangeSt) > 0 {
reqRange = rangeSt[0]
}
fs.MockResp(resp, mf, reqRange)
}
func (fs *MockFileServer) RegisterFile(path string, size int64, repeatStr string) error {
fs.Lock()
defer fs.Unlock()
path = strings.Trim(path, "/")
_, exist := fs.fileMap[path]
if exist {
return os.ErrExist
}
data := []byte(repeatStr)
if len(data) < 1024 {
for {
newData := make([]byte, len(data)*2)
copy(newData, data)
copy(newData[len(data):], data)
data = newData
if len(data) >= 1024 {
break
}
}
}
fs.fileMap[path] = &mockFile{
path: path,
size: size,
repeatStr: data,
}
return nil
}
func (fs *MockFileServer) UnRegisterFile(path string) {
fs.Lock()
defer fs.Unlock()
delete(fs.fileMap, strings.Trim(path, "/"))
}
func (fs *MockFileServer) MockResp(resp http.ResponseWriter, mf *mockFile, rangeSt *httputils.RangeStruct) {
var (
respCode int
start int64
end = mf.size - 1
)
if rangeSt != nil {
start = rangeSt.StartIndex
if rangeSt.EndIndex < end {
end = rangeSt.EndIndex
}
respCode = http.StatusPartialContent
} else {
respCode = http.StatusOK
}
resp.Header().Set("Content-Length", fmt.Sprintf("%d", end-start+1))
resp.WriteHeader(respCode)
repeatStrLen := int64(len(mf.repeatStr))
strIndex := start % int64(repeatStrLen)
for {
if start > end {
break
}
copyDataLen := repeatStrLen - strIndex
if copyDataLen > (end - start + 1) {
copyDataLen = end - start + 1
}
resp.Write(mf.repeatStr[strIndex : copyDataLen+strIndex])
strIndex = 0
start += copyDataLen
}
return
}
type mockFile struct {
path string
size int64
repeatStr []byte
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package helper
import (
"strings"
"github.com/dragonflyoss/Dragonfly/dfget/config"
)
// IsP2P returns whether the pattern is PatternP2P.
func IsP2P(pattern string) bool {
return strings.ToLower(pattern) == config.PatternP2P
}
// IsCDN returns whether the pattern is PatternCDN.
func IsCDN(pattern string) bool {
return strings.ToLower(pattern) == config.PatternCDN
}
// IsSource returns whether the pattern is PatternSource.
func IsSource(pattern string) bool {
return strings.ToLower(pattern) == config.PatternSource
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uploader
import (
"context"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
apiTypes "github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/dfget/core/api"
"github.com/dragonflyoss/Dragonfly/dfget/core/helper"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/limitreader"
"github.com/dragonflyoss/Dragonfly/pkg/ratelimiter"
"github.com/dragonflyoss/Dragonfly/version"
"github.com/gorilla/mux"
"github.com/sirupsen/logrus"
)
// newPeerServer returns a new P2PServer.
func newPeerServer(cfg *config.Config, port int) *peerServer {
s := &peerServer{
cfg: cfg,
finished: make(chan struct{}),
host: cfg.RV.LocalIP,
port: port,
api: api.NewSupernodeAPI(),
}
r := s.initRouter()
s.Server = &http.Server{
Addr: net.JoinHostPort(s.host, strconv.Itoa(port)),
Handler: r,
}
return s
}
// ----------------------------------------------------------------------------
// peerServer structure
// peerServer offers file-block to other clients.
type peerServer struct {
cfg *config.Config
// finished indicates whether the peer server is shutdown
finished chan struct{}
// server related fields
host string
port int
*http.Server
api api.SupernodeAPI
rateLimiter *ratelimiter.RateLimiter
// totalLimitRate is the total network bandwidth shared by tasks on the same host
totalLimitRate int
// syncTaskMap stores the meta name of tasks on the host
syncTaskMap sync.Map
}
// taskConfig refers to some name about peer task.
type taskConfig struct {
taskID string
rateLimit int
cid string
dataDir string
superNode string
finished bool
accessTime time.Time
}
// uploadParam refers to all params needed in the handler of upload.
type uploadParam struct {
padSize int64
start int64
length int64
pieceSize int64
pieceNum int64
}
// ----------------------------------------------------------------------------
// init method of peerServer
func (ps *peerServer) initRouter() *mux.Router {
r := mux.NewRouter()
r.HandleFunc(config.PeerHTTPPathPrefix+"{commonFile:.*}", ps.uploadHandler).Methods("GET")
r.HandleFunc(config.LocalHTTPPathRate+"{commonFile:.*}", ps.parseRateHandler).Methods("GET")
r.HandleFunc(config.LocalHTTPPathCheck+"{commonFile:.*}", ps.checkHandler).Methods("GET")
r.HandleFunc(config.LocalHTTPPathClient+"finish", ps.oneFinishHandler).Methods("GET")
r.HandleFunc(config.LocalHTTPPing, ps.pingHandler).Methods("GET")
return r
}
// ----------------------------------------------------------------------------
// peerServer handlers
// uploadHandler uses to upload a task file when other peers download from it.
func (ps *peerServer) uploadHandler(w http.ResponseWriter, r *http.Request) {
sendAlive(ps.cfg)
var (
up *uploadParam
f *os.File
size int64
err error
)
taskFileName := mux.Vars(r)["commonFile"]
rangeStr := r.Header.Get(config.StrRange)
cdnSource := r.Header.Get(config.StrCDNSource)
logrus.Debugf("upload file:%s to %s, req:%v", taskFileName, r.RemoteAddr, jsonStr(r.Header))
// Step1: parse param
if up, err = parseParams(rangeStr, r.Header.Get(config.StrPieceNum),
r.Header.Get(config.StrPieceSize)); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
logrus.Warnf("invalid param file:%s req:%v, %v", taskFileName, r.Header, err)
return
}
// Step2: get task file
if f, size, err = ps.getTaskFile(taskFileName); err != nil {
rangeErrorResponse(w, err)
logrus.Errorf("failed to open file:%s, %v", taskFileName, err)
return
}
defer f.Close()
// Step3: amend range with piece meta data
if err = amendRange(size, cdnSource != string(apiTypes.CdnSourceSource), up); err != nil {
rangeErrorResponse(w, err)
logrus.Errorf("failed to amend range of file %s: %v", taskFileName, err)
return
}
// Step4: send piece wrapped by meta data
if err := ps.uploadPiece(f, w, up); err != nil {
logrus.Errorf("failed to send range(%s) of file(%s): %v", rangeStr, taskFileName, err)
}
}
func (ps *peerServer) parseRateHandler(w http.ResponseWriter, r *http.Request) {
sendAlive(ps.cfg)
// get params from request
taskFileName := mux.Vars(r)["commonFile"]
rateLimit := r.Header.Get(config.StrRateLimit)
clientRate, err := strconv.Atoi(rateLimit)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprint(w, err.Error())
logrus.Errorf("failed to convert rateLimit %v, %v", rateLimit, err)
return
}
sendSuccess(w)
// update the rateLimit of commonFile
if v, ok := ps.syncTaskMap.Load(taskFileName); ok {
param := v.(*taskConfig)
param.rateLimit = clientRate
}
// no need to calculate rate when totalLimitRate less than or equals zero.
if ps.totalLimitRate <= 0 {
fmt.Fprint(w, rateLimit)
return
}
clientRate = ps.calculateRateLimit(clientRate)
fmt.Fprint(w, strconv.Itoa(clientRate))
}
// checkHandler is used to check the server status.
// TODO: Disassemble this function for too many things done.
func (ps *peerServer) checkHandler(w http.ResponseWriter, r *http.Request) {
sendAlive(ps.cfg)
sendSuccess(w)
// handle totalLimit
totalLimit, err := strconv.Atoi(r.Header.Get(config.StrTotalLimit))
if err == nil && totalLimit > 0 {
if ps.rateLimiter == nil {
ps.rateLimiter = ratelimiter.NewRateLimiter(int64(totalLimit), 2)
} else {
ps.rateLimiter.SetRate(ratelimiter.TransRate(int64(totalLimit)))
}
ps.totalLimitRate = totalLimit
logrus.Infof("update total limit to %d", totalLimit)
}
// get parameters
taskFileName := mux.Vars(r)["commonFile"]
dataDir := r.Header.Get(config.StrDataDir)
param := &taskConfig{
dataDir: dataDir,
}
ps.syncTaskMap.Store(taskFileName, param)
fmt.Fprintf(w, "%s@%s", taskFileName, version.DFGetVersion)
}
// oneFinishHandler is used to update the status of peer task.
func (ps *peerServer) oneFinishHandler(w http.ResponseWriter, r *http.Request) {
if err := r.ParseForm(); err != nil {
sendHeader(w, http.StatusBadRequest)
fmt.Fprint(w, err.Error())
return
}
taskFileName := r.FormValue(config.StrTaskFileName)
taskID := r.FormValue(config.StrTaskID)
cid := r.FormValue(config.StrClientID)
superNode := r.FormValue(config.StrSuperNode)
if taskFileName == "" || taskID == "" || cid == "" {
sendHeader(w, http.StatusBadRequest)
fmt.Fprintf(w, "invalid params")
return
}
if v, ok := ps.syncTaskMap.Load(taskFileName); ok {
task := v.(*taskConfig)
task.taskID = taskID
task.rateLimit = 0
task.cid = cid
task.superNode = superNode
task.finished = true
task.accessTime = time.Now()
} else {
ps.syncTaskMap.Store(taskFileName, &taskConfig{
taskID: taskID,
cid: cid,
dataDir: ps.cfg.RV.SystemDataDir,
superNode: superNode,
finished: true,
accessTime: time.Now(),
})
}
sendSuccess(w)
fmt.Fprintf(w, "success")
}
func (ps *peerServer) pingHandler(w http.ResponseWriter, r *http.Request) {
sendSuccess(w)
fmt.Fprintf(w, "success")
}
// ----------------------------------------------------------------------------
// handler process
// getTaskFile finds the file and returns the File object.
func (ps *peerServer) getTaskFile(taskFileName string) (*os.File, int64, error) {
errSize := int64(-1)
v, ok := ps.syncTaskMap.Load(taskFileName)
if !ok {
return nil, errSize, fmt.Errorf("failed to get taskPath: %s", taskFileName)
}
tc, ok := v.(*taskConfig)
if !ok {
return nil, errSize, fmt.Errorf("failed to assert: %s", taskFileName)
}
// update the accessTime of taskFileName
tc.accessTime = time.Now()
taskPath := helper.GetServiceFile(taskFileName, tc.dataDir)
fileInfo, err := os.Stat(taskPath)
if err != nil {
return nil, errSize, err
}
taskFile, err := os.Open(taskPath)
if err != nil {
return nil, errSize, err
}
return taskFile, fileInfo.Size(), nil
}
func amendRange(size int64, needPad bool, up *uploadParam) error {
up.padSize = 0
if needPad {
up.padSize = config.PieceMetaSize
up.start -= up.pieceNum * up.padSize
}
// we must send an whole piece with both piece head and tail
if up.length < up.padSize || up.start < 0 {
return errortypes.ErrRangeNotSatisfiable
}
if up.start >= size && !needPad {
return errortypes.ErrRangeNotSatisfiable
}
if up.start+up.length-up.padSize > size {
up.length = size - up.start + up.padSize
if size == 0 {
up.length = up.padSize
}
}
return nil
}
// parseParams validates the parameter range and parses it.
func parseParams(rangeVal, pieceNumStr, pieceSizeStr string) (*uploadParam, error) {
var (
err error
up = &uploadParam{}
)
if up.pieceNum, err = strconv.ParseInt(pieceNumStr, 10, 64); err != nil {
return nil, err
}
if up.pieceSize, err = strconv.ParseInt(pieceSizeStr, 10, 64); err != nil {
return nil, err
}
if strings.Count(rangeVal, "=") != 1 {
return nil, fmt.Errorf("invalid range: %s", rangeVal)
}
rangeStr := strings.Split(rangeVal, "=")[1]
if strings.Count(rangeStr, "-") != 1 {
return nil, fmt.Errorf("invalid range: %s", rangeStr)
}
rangeArr := strings.Split(rangeStr, "-")
if up.start, err = strconv.ParseInt(rangeArr[0], 10, 64); err != nil {
return nil, err
}
var end int64
if end, err = strconv.ParseInt(rangeArr[1], 10, 64); err != nil {
return nil, err
}
if end <= up.start {
return nil, fmt.Errorf("invalid range: %s", rangeStr)
}
up.length = end - up.start + 1
return up, nil
}
// uploadPiece sends a piece of the file to the remote peer.
func (ps *peerServer) uploadPiece(f *os.File, w http.ResponseWriter, up *uploadParam) (e error) {
w.Header().Set(config.StrContentLength, strconv.FormatInt(up.length, 10))
sendHeader(w, http.StatusPartialContent)
readLen := up.length - up.padSize
buf := make([]byte, 256*1024)
if up.padSize > 0 {
binary.BigEndian.PutUint32(buf, uint32((readLen)|(up.pieceSize)<<4))
w.Write(buf[:config.PieceHeadSize])
defer w.Write([]byte{config.PieceTailChar})
}
f.Seek(up.start, 0)
r := io.LimitReader(f, readLen)
if ps.rateLimiter != nil {
lr := limitreader.NewLimitReaderWithLimiter(ps.rateLimiter, r, false)
_, e = io.CopyBuffer(w, lr, buf)
} else {
_, e = io.CopyBuffer(w, r, buf)
}
return
}
func (ps *peerServer) calculateRateLimit(clientRate int) int {
total := 0
// define a function that Range will call it sequentially
// for each key and value present in the map
f := func(key, value interface{}) bool {
if task, ok := value.(*taskConfig); ok {
if !task.finished {
total += task.rateLimit
}
}
return true
}
ps.syncTaskMap.Range(f)
// calculate the rate limit again according to totalLimit
if total > ps.totalLimitRate {
return (clientRate*ps.totalLimitRate + total - 1) / total
}
return clientRate
}
// ----------------------------------------------------------------------------
// methods of peerServer
func (ps *peerServer) isFinished() bool {
if ps.finished == nil {
return true
}
select {
case _, notClose := <-ps.finished:
return !notClose
default:
return false
}
}
func (ps *peerServer) setFinished() {
if !ps.isFinished() {
close(ps.finished)
}
}
func (ps *peerServer) waitForShutdown() {
if ps.finished == nil {
return
}
for {
select {
case _, notClose := <-ps.finished:
if !notClose {
return
}
}
}
}
func (ps *peerServer) shutdown() {
// tell supernode this peer node is down and delete related files.
ps.syncTaskMap.Range(func(key, value interface{}) bool {
task, ok := value.(*taskConfig)
if ok {
ps.api.ServiceDown(task.superNode, task.taskID, task.cid)
serviceFile := helper.GetServiceFile(key.(string), task.dataDir)
os.Remove(serviceFile)
logrus.Infof("shutdown, remove task id:%s file:%s",
task.taskID, serviceFile)
}
return true
})
c, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Minute))
ps.Shutdown(c)
cancel()
updateServicePortInMeta(ps.cfg.RV.MetaPath, 0)
logrus.Info("peer server is shutdown.")
ps.setFinished()
}
func (ps *peerServer) deleteExpiredFile(path string, info os.FileInfo,
expireTime time.Duration) bool {
taskName := helper.GetTaskName(info.Name())
if v, ok := ps.syncTaskMap.Load(taskName); ok {
task, ok := v.(*taskConfig)
if ok && !task.finished {
return false
}
var lastAccessTime = task.accessTime
// use the bigger of access time and modify time to
// check whether the task is expired
if task.accessTime.Sub(info.ModTime()) < 0 {
lastAccessTime = info.ModTime()
}
// if the last access time is expireTime ago
if time.Since(lastAccessTime) > expireTime {
if ok {
ps.api.ServiceDown(task.superNode, task.taskID, task.cid)
}
os.Remove(path)
ps.syncTaskMap.Delete(taskName)
return true
}
} else {
os.Remove(path)
return true
}
return false
}
// ----------------------------------------------------------------------------
// helper functions
func sendSuccess(w http.ResponseWriter) {
sendHeader(w, http.StatusOK)
}
func sendHeader(w http.ResponseWriter, code int) {
w.Header().Set(config.StrContentType, ctype)
w.WriteHeader(code)
}
func rangeErrorResponse(w http.ResponseWriter, err error) {
if errortypes.IsRangeNotSatisfiable(err) {
http.Error(w, config.RangeNotSatisfiableDesc, http.StatusRequestedRangeNotSatisfiable)
} else if os.IsPermission(err) {
http.Error(w, err.Error(), http.StatusForbidden)
} else if os.IsNotExist(err) {
http.Error(w, err.Error(), http.StatusNotFound)
} else {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func jsonStr(v interface{}) string {
b, _ := json.Marshal(v)
return string(b)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uploader
import (
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/version"
"github.com/sirupsen/logrus"
)
var (
defaultExecutor PeerServerExecutor = &peerServerExecutor{}
)
// SetupPeerServerExecutor setup a giving executor instance instead of default implementation.
func SetupPeerServerExecutor(executor PeerServerExecutor) {
defaultExecutor = executor
}
// GetPeerServerExecutor returns the current executor instance.
func GetPeerServerExecutor() PeerServerExecutor {
return defaultExecutor
}
// StartPeerServerProcess starts an independent peer server process for uploading downloaded files
// if it doesn't exist.
// This function is invoked when dfget starts to download files in p2p pattern.
func StartPeerServerProcess(cfg *config.Config) (port int, err error) {
if defaultExecutor != nil {
return defaultExecutor.StartPeerServerProcess(cfg)
}
return 0, fmt.Errorf("executor of peer server hasn't been initialized")
}
// PeerServerExecutor starts an independent peer server process for uploading downloaded files.
type PeerServerExecutor interface {
StartPeerServerProcess(cfg *config.Config) (port int, err error)
}
// ---------------------------------------------------------------------------
// PeerServerExecutor default implementation
type peerServerExecutor struct {
}
var _ PeerServerExecutor = &peerServerExecutor{}
func (pe *peerServerExecutor) StartPeerServerProcess(cfg *config.Config) (port int, err error) {
if port = pe.checkPeerServerExist(cfg, 0); port > 0 {
return port, nil
}
fileLock := fileutils.NewFileLock(filepath.Dir(cfg.RV.MetaPath))
if err = fileLock.Lock(); err != nil {
return 0, err
}
defer fileLock.Unlock()
if port = pe.checkPeerServerExist(cfg, 0); port > 0 {
return port, nil
}
cmd := exec.Command(os.Args[0], "server",
"--ip", cfg.RV.LocalIP,
"--port", strconv.Itoa(cfg.RV.PeerPort),
"--meta", cfg.RV.MetaPath,
"--data", cfg.RV.SystemDataDir,
"--home", cfg.WorkHome,
"--expiretime", cfg.RV.DataExpireTime.String(),
"--alivetime", cfg.RV.ServerAliveTime.String())
if cfg.Verbose {
cmd.Args = append(cmd.Args, "--verbose")
}
var stdout io.ReadCloser
if stdout, err = cmd.StdoutPipe(); err != nil {
return 0, err
}
if err = cmd.Start(); err == nil {
port, err = readPort(stdout)
}
if err == nil && pe.checkPeerServerExist(cfg, port) <= 0 {
err = fmt.Errorf("invalid server on port:%d", port)
port = 0
}
return
}
func readPort(r io.Reader) (int, error) {
done := make(chan error)
var port int32
go func() {
buf := make([]byte, 256)
n, err := r.Read(buf)
if err != nil {
done <- err
}
content := string(buf[:n])
contentSlice := strings.Split(content, " ")
portValue, err := strconv.Atoi(strings.TrimSpace(contentSlice[len(contentSlice)-1]))
// avoid data race
atomic.StoreInt32(&port, int32(portValue))
done <- err
}()
select {
case err := <-done:
return int(atomic.LoadInt32(&port)), err
case <-time.After(time.Second):
return 0, fmt.Errorf("get peer server's port timeout")
}
}
// checkPeerServerExist checks the peer server on port whether is available.
// if the parameter port <= 0, it will get port from meta file and checks.
func (pe *peerServerExecutor) checkPeerServerExist(cfg *config.Config, port int) int {
taskFileName := cfg.RV.TaskFileName
if port <= 0 {
port = getPortFromMeta(cfg.RV.MetaPath)
}
if port <= 0 {
// port 0 is invalid
return 0
}
// check the peer server whether is available
result, err := checkServer(cfg.RV.LocalIP, port, cfg.RV.DataDir, taskFileName, int(cfg.TotalLimit))
logrus.Infof("local http result:%s err:%v, port:%d path:%s",
result, err, port, config.LocalHTTPPathCheck)
if err == nil {
if result == taskFileName {
logrus.Infof("use peer server on port:%d", port)
return port
}
logrus.Warnf("not found process on port:%d, version:%s", port, version.DFGetVersion)
}
return 0
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package uploader implements an uploader server. It is the important role
// - peer - in P2P pattern that will wait for other P2PDownloader to download
// its downloaded files.
package uploader
import (
"fmt"
"os"
"os/signal"
"path/filepath"
"strings"
"sync/atomic"
"syscall"
"time"
"unsafe"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/dfget/core/api"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/dragonflyoss/Dragonfly/pkg/queue"
"github.com/sirupsen/logrus"
)
const (
ctype = "application/octet-stream"
)
var (
p2p *peerServer
)
var (
aliveQueue = queue.NewQueue(0)
uploaderAPI = api.NewUploaderAPI(httputils.DefaultTimeout)
)
// -----------------------------------------------------------------------------
// dfget server functions
// WaitForShutdown waits for peer server shutdown.
func WaitForShutdown() {
if p2p != nil {
p2p.waitForShutdown()
}
}
// LaunchPeerServer launches a server to send piece data.
func LaunchPeerServer(cfg *config.Config) (int, error) {
// avoid data race caused by reading and writing variable 'p2p'
// in different routines
var p2pPtr unsafe.Pointer
logrus.Infof("********************")
logrus.Infof("start peer server...")
res := make(chan error)
go func() {
res <- launch(cfg, &p2pPtr)
}()
if err := waitForStartup(res, &p2pPtr); err != nil {
logrus.Errorf("start peer server error:%v, exit directly", err)
return 0, err
}
p2p = loadSrvPtr(&p2pPtr)
updateServicePortInMeta(cfg.RV.MetaPath, p2p.port)
logrus.Infof("start peer server success, host:%s, port:%d",
p2p.host, p2p.port)
go monitorAlive(cfg, 15*time.Second)
return p2p.port, nil
}
func launch(cfg *config.Config, p2pPtr *unsafe.Pointer) error {
var (
retryCount = 10
port = 0
shouldGeneratePort = true
)
if cfg.RV.PeerPort > 0 {
retryCount = 1
port = cfg.RV.PeerPort
shouldGeneratePort = false
}
for i := 0; i < retryCount; i++ {
if shouldGeneratePort {
port = generatePort(i)
}
tmp := newPeerServer(cfg, port)
storeSrvPtr(p2pPtr, tmp)
if err := tmp.ListenAndServe(); err != nil {
if !strings.Contains(err.Error(), "address already in use") {
// start failed or shutdown
return err
} else if uploaderAPI.PingServer(tmp.host, tmp.port) {
// a peer server is already existing
return nil
}
logrus.Warnf("start error:%v, remain retry times:%d",
err, retryCount-i)
}
}
return fmt.Errorf("start peer server error and retried at most %d times", retryCount)
}
// waitForStartup It's a goal to start 'dfget server' process and make it working
// within 300ms, such as in the case of downloading very small files, especially
// in parallel.
// The ticker which has a 5ms period can test the server whether is working
// successfully as soon as possible.
// Actually, it costs about 70ms for 'dfget client' to start a `dfget server`
// process if everything goes right without any failure. So the remaining time
// for retrying to launch server internal is about 230ms. And '233' is just
// right the smallest number which is greater than 230, a prime, and not a
// multiple of '5'.
// And there is only one situation which should be retried again: the address
// already in use. The remaining time is enough for it to retry 10 times to find
// another available address in majority of cases.
func waitForStartup(result chan error, p2pPtr *unsafe.Pointer) (err error) {
ticker := time.NewTicker(5 * time.Millisecond)
defer ticker.Stop()
timeout := time.After(233 * time.Millisecond)
for {
select {
case <-ticker.C:
tmp := loadSrvPtr(p2pPtr)
if tmp != nil && uploaderAPI.PingServer(tmp.host, tmp.port) {
return nil
}
case err = <-result:
tmp := loadSrvPtr(p2pPtr)
if err == nil {
logrus.Infof("reuse exist server on port:%d", tmp.port)
tmp.setFinished()
}
return err
case <-timeout:
// The peer server go routine will block and serve if it starts successfully.
// So we have to wait a moment and check again whether the peer server is
// started.
tmp := loadSrvPtr(p2pPtr)
if tmp == nil {
return fmt.Errorf("initialize peer server error")
}
if !uploaderAPI.PingServer(tmp.host, tmp.port) {
return fmt.Errorf("can't ping port:%d", tmp.port)
}
return nil
}
}
}
func serverGC(cfg *config.Config, interval time.Duration) {
logrus.Info("start server gc, expireTime:", cfg.RV.DataExpireTime)
var walkFn filepath.WalkFunc = func(path string, info os.FileInfo, err error) error {
if path == cfg.RV.SystemDataDir || info == nil || err != nil {
return nil
}
if info.IsDir() {
os.RemoveAll(path)
return filepath.SkipDir
}
if p2p != nil && p2p.deleteExpiredFile(path, info, cfg.RV.DataExpireTime) {
logrus.Info("server gc, delete file:", path)
}
return nil
}
for {
if !isRunning() {
return
}
if err := filepath.Walk(cfg.RV.SystemDataDir, walkFn); err != nil {
logrus.Warnf("server gc error:%v", err)
}
time.Sleep(interval)
}
}
func captureQuitSignal() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
s := <-c
logrus.Infof("capture stop signal: %s, will shutdown...", s)
if p2p != nil {
p2p.shutdown()
}
}
func monitorAlive(cfg *config.Config, interval time.Duration) {
if !isRunning() {
return
}
logrus.Info("monitor peer server whether is alive, aliveTime:",
cfg.RV.ServerAliveTime)
go serverGC(cfg, interval)
go captureQuitSignal()
if cfg.RV.ServerAliveTime <= 0 {
return
}
for {
if _, ok := aliveQueue.PollTimeout(cfg.RV.ServerAliveTime); !ok {
if aliveQueue.Len() > 0 {
continue
}
if p2p != nil {
logrus.Info("no more task, peer server will stop...")
p2p.shutdown()
}
return
}
}
}
func sendAlive(cfg *config.Config) {
if cfg.RV.ServerAliveTime <= 0 {
return
}
aliveQueue.Put(true)
}
func isRunning() bool {
return p2p != nil && !p2p.isFinished()
}
// -----------------------------------------------------------------------------
// helper functions
func storeSrvPtr(addr *unsafe.Pointer, ptr *peerServer) {
atomic.StorePointer(addr, unsafe.Pointer(ptr))
}
func loadSrvPtr(addr *unsafe.Pointer) *peerServer {
return (*peerServer)(atomic.LoadPointer(addr))
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uploader
func FuzzParseParams(data []byte) int {
s := string(data)
_, err := parseParams(s, s, s)
if err != nil {
return 0
}
return 1
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uploader
import (
"strings"
"time"
"github.com/dragonflyoss/Dragonfly/dfget/config"
"github.com/dragonflyoss/Dragonfly/dfget/core/api"
"github.com/dragonflyoss/Dragonfly/version"
)
// FinishTask reports a finished task to peer server.
func FinishTask(ip string, port int, taskFileName, cid, taskID, node string) error {
req := &api.FinishTaskRequest{
TaskFileName: taskFileName,
TaskID: taskID,
ClientID: cid,
Node: node,
}
return uploaderAPI.FinishTask(ip, port, req)
}
// checkServer checks if the server is available.
func checkServer(ip string, port int, dataDir, taskFileName string, totalLimit int) (string, error) {
// prepare the request body
req := &api.CheckServerRequest{
TaskFileName: taskFileName,
TotalLimit: totalLimit,
DataDir: dataDir,
}
// send the request
result, err := uploaderAPI.CheckServer(ip, port, req)
if err != nil {
return "", err
}
// parse resp result
resultSuffix := "@" + version.DFGetVersion
if strings.HasSuffix(result, resultSuffix) {
return result[:len(result)-len(resultSuffix)], nil
}
return "", nil
}
func generatePort(inc int) int {
lowerLimit := config.ServerPortLowerLimit
upperLimit := config.ServerPortUpperLimit
return int(time.Now().Unix()/300)%(upperLimit-lowerLimit) + lowerLimit + inc
}
func getPortFromMeta(metaPath string) int {
meta := config.NewMetaData(metaPath)
if err := meta.Load(); err != nil {
return 0
}
return meta.ServicePort
}
func updateServicePortInMeta(metaPath string, port int) {
meta := config.NewMetaData(metaPath)
meta.Load()
if meta.ServicePort != port {
meta.ServicePort = port
meta.Persist()
}
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
// BaseResponse defines the common fields of responses from supernode.
// Types of supernode's responses could be defines as following:
// type XXResponse struct {
// *BaseResponse
// Data *CustomizedDataStruct
// }
type BaseResponse struct {
// Code represents whether the response is successful.
Code int `json:"code"`
// Msg describes the detailed error message if the response is failed.
Msg string `json:"msg,omitempty"`
}
// NewBaseResponse creates a BaseResponse instance.
func NewBaseResponse(code int, msg string) *BaseResponse {
res := new(BaseResponse)
res.Code = code
res.Msg = msg
return res
}
// IsSuccess is used for determining whether the response is successful.
func (res *BaseResponse) IsSuccess() bool {
return res.Code == 1
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import (
"encoding/json"
"github.com/dragonflyoss/Dragonfly/pkg/constants"
)
// PullPieceTaskResponse is the response of PullPieceTaskRequest.
type PullPieceTaskResponse struct {
*BaseResponse
Data json.RawMessage `json:"data,omitempty"`
data interface{}
}
func (res *PullPieceTaskResponse) String() string {
if b, e := json.Marshal(res); e == nil {
return string(b)
}
return ""
}
// FinishData gets structured data from json.RawMessage when the task is finished.
func (res *PullPieceTaskResponse) FinishData() *PullPieceTaskResponseFinishData {
if res.Code != constants.CodePeerFinish || res.Data == nil {
return nil
}
if res.data == nil {
data := new(PullPieceTaskResponseFinishData)
if e := json.Unmarshal(res.Data, data); e != nil {
return nil
}
res.data = data
}
return res.data.(*PullPieceTaskResponseFinishData)
}
// ContinueData gets structured data from json.RawMessage when the task is continuing.
func (res *PullPieceTaskResponse) ContinueData() []*PullPieceTaskResponseContinueData {
if res.Code != constants.CodePeerContinue || res.Data == nil {
return nil
}
if res.data == nil {
var data []*PullPieceTaskResponseContinueData
if e := json.Unmarshal(res.Data, &data); e != nil {
return nil
}
res.data = data
}
return res.data.([]*PullPieceTaskResponseContinueData)
}
// PullPieceTaskResponseFinishData is the data when successfully pulling piece task
// and the task is finished.
type PullPieceTaskResponseFinishData struct {
Md5 string `json:"md5"`
FileLength int64 `json:"fileLength"`
}
func (data *PullPieceTaskResponseFinishData) String() string {
b, _ := json.Marshal(data)
return string(b)
}
// PullPieceTaskResponseContinueData is the data when successfully pulling piece task
// and the task is continuing.
type PullPieceTaskResponseContinueData struct {
Range string `json:"range"`
PieceNum int `json:"pieceNum"`
PieceSize int32 `json:"pieceSize"`
PieceMd5 string `json:"pieceMd5"`
Cid string `json:"cid"`
PeerIP string `json:"peerIp"`
PeerPort int `json:"peerPort"`
Path string `json:"path"`
DownLink int `json:"downLink"`
}
func (data *PullPieceTaskResponseContinueData) String() string {
b, _ := json.Marshal(data)
return string(b)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import (
"encoding/json"
)
// RegisterRequest contains all the parameters that need to be passed to the
// supernode when registering a downloading task.
type RegisterRequest struct {
SupernodeIP string `json:"superNodeIp"`
RawURL string `json:"rawUrl"`
TaskURL string `json:"taskUrl"`
Cid string `json:"cid"`
IP string `json:"ip"`
HostName string `json:"hostName"`
Port int `json:"port"`
Path string `json:"path"`
Version string `json:"version,omitempty"`
Md5 string `json:"md5,omitempty"`
Identifier string `json:"identifier,omitempty"`
CallSystem string `json:"callSystem,omitempty"`
Headers []string `json:"headers,omitempty"`
Dfdaemon bool `json:"dfdaemon,omitempty"`
Insecure bool `json:"insecure,omitempty"`
RootCAs [][]byte `json:"rootCAs,omitempty"`
TaskID string `json:"taskId,omitempty"`
FileLength int64 `json:"fileLength,omitempty"`
AsSeed bool `json:"asSeed,omitempty"`
Pattern string `json:"pattern"`
}
func (r *RegisterRequest) String() string {
if b, e := json.Marshal(r); e == nil {
return string(b)
}
return ""
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package types
import (
"encoding/json"
apiTypes "github.com/dragonflyoss/Dragonfly/apis/types"
)
// RegisterResponse is the response of register request.
type RegisterResponse struct {
*BaseResponse
Data *RegisterResponseData `json:"data,omitempty"`
}
func (res *RegisterResponse) String() string {
if b, e := json.Marshal(res); e == nil {
return string(b)
}
return ""
}
// RegisterResponseData is the data when registering supernode successfully.
type RegisterResponseData struct {
TaskID string `json:"taskId"`
FileLength int64 `json:"fileLength"`
PieceSize int32 `json:"pieceSize"`
CDNSource apiTypes.CdnSource `json:"cdnSource"`
// in seed pattern, if peer selected as seed, AsSeed sets true.
AsSeed bool `json:"asSeed"`
// in seed pattern, if as seed, SeedTaskID is the taskID of seed file.
SeedTaskID string `json:"seedTaskID"`
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package algorithm
import (
"math/rand"
"sort"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
// ContainsString returns whether the value is in arr.
func ContainsString(arr []string, value string) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// Shuffle pseudo-randomizes the order of elements.
// n is the number of elements.
// swap swaps the elements with indexes i and j.
// copy from rand.Shuffle of go1.10.
func Shuffle(n int, swap func(int, int)) {
if n < 2 {
return
}
i := n - 1
for ; i > 1<<31-1-1; i-- {
j := int(rand.Int63n(int64(i + 1)))
swap(i, j)
}
for ; i > 0; i-- {
j := int(int31n(int32(i + 1)))
swap(i, j)
}
}
func int31n(n int32) int32 {
v := rand.Uint32()
prod := uint64(v) * uint64(n)
low := uint32(prod)
if low < uint32(n) {
thresh := uint32(-n) % uint32(n)
for low < thresh {
v = rand.Uint32()
prod = uint64(v) * uint64(n)
low = uint32(prod)
}
}
return int32(prod >> 32)
}
// GCDSlice returns the greatest common divisor of a slice.
// It returns 1 when s is empty because that any number divided by 1 is still
// itself.
func GCDSlice(s []int) int {
length := len(s)
if length == 0 {
return 1
}
if length == 1 {
return s[0]
}
commonDivisor := s[0]
for i := 1; i < length; i++ {
if commonDivisor == 1 {
return commonDivisor
}
commonDivisor = GCD(commonDivisor, s[i])
}
return commonDivisor
}
// GCD returns the greatest common divisor of x and y.
func GCD(x, y int) int {
var z int
for y != 0 {
z = x % y
x = y
y = z
}
return x
}
// DedupStringArr removes duplicate string in array.
func DedupStringArr(input []string) []string {
if len(input) == 0 {
return []string{}
}
out := make([]string, len(input))
copy(out, input)
sort.Strings(out)
idx := 0
for i := 1; i < len(input); i++ {
if out[idx] != out[i] {
idx++
out[idx] = out[i]
}
}
return out[:idx+1]
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package atomiccount
import (
"sync/atomic"
)
// AtomicInt is a struct that can be added or subtracted atomically.
type AtomicInt struct {
count *int32
}
// NewAtomicInt returns a new AtomicInt.
func NewAtomicInt(value int32) *AtomicInt {
return &AtomicInt{
count: &value,
}
}
// Add atomically adds delta to count and returns the new value.
func (ac *AtomicInt) Add(delta int32) int32 {
if ac != nil {
return atomic.AddInt32(ac.count, delta)
}
return 0
}
// Get the value atomically.
func (ac *AtomicInt) Get() int32 {
if ac != nil {
return *ac.count
}
return 0
}
// Set to value atomically and returns the previous value.
func (ac *AtomicInt) Set(value int32) int32 {
return atomic.SwapInt32(ac.count, value)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package constants
// This file defines the code required for both dfget and supernode.
var cmmap = make(map[int]string)
func init() {
cmmap[Success] = "success"
cmmap[CodeSystemError] = "system error"
cmmap[CodeParamError] = "param is illegal"
cmmap[CodeTargetNotFound] = "target not found"
cmmap[CodePeerFinish] = "peer task end"
cmmap[CodePeerContinue] = "peer task go on"
cmmap[CodePeerWait] = "peer task wait"
cmmap[CodePeerLimited] = "peer down limit"
cmmap[CodeSuperFail] = "super node sync source fail"
cmmap[CodeUnknownError] = "unknown error"
cmmap[CodeTaskConflict] = "task conflict"
cmmap[CodeURLNotReachable] = "url is not reachable"
cmmap[CodeNeedAuth] = "need auth"
cmmap[CodeWaitAuth] = "wait auth"
}
// GetMsgByCode gets the description of the code.
func GetMsgByCode(code int) string {
if v, ok := cmmap[code]; ok {
return v
}
return ""
}
const (
// HTTPError represents that there is an error between client and server.
HTTPError = -100
)
/* the response code returned by supernode */
const (
// Success represents the request is success.
Success = 200
CodeSystemError = 500
CodeParamError = 501
CodeTargetNotFound = 502
CodePeerFinish = 600
CodePeerContinue = 601
CodePeerWait = 602
CodePeerLimited = 603
CodeSuperFail = 604
CodeUnknownError = 605
CodeTaskConflict = 606
CodeURLNotReachable = 607
CodeNeedAuth = 608
CodeWaitAuth = 609
CodeSourceError = 610
CodeGetPieceReport = 611
CodeGetPeerDown = 612
)
/* the code of task result that dfget will report to supernode */
const (
ResultFail = 500
ResultSuc = 501
ResultInvalid = 502
// ResultSemiSuc represents the result is partial successful.
ResultSemiSuc = 503
)
/* the code of task status that dfget will report to supernode */
const (
TaskStatusStart = 700
TaskStatusRunning = 701
TaskStatusFinish = 702
)
/* the client error when downloading from supernode that dfget will report to supernode */
const (
ClientErrorFileNotExist = "FILE_NOT_EXIST"
ClientErrorFileMd5NotMatch = "FILE_MD5_NOT_MATCH"
)
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dflog
import (
"bytes"
"fmt"
"os"
"strings"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"gopkg.in/natefinch/lumberjack.v2"
)
// LogConfig holds all configurable properties of log.
type LogConfig struct {
// MaxSize is the maximum size in megabytes of the log file before it gets rotated.
// It defaults to 40 megabytes.
MaxSize int `yaml:"maxSize" json:"maxSize"`
// MaxBackups is the maximum number of old log files to retain.
// The default value is 1.
MaxBackups int `yaml:"maxBackups" json:"maxBackups"`
// Path is the location of log file
// The default value is logs/dfdaemon.log
Path string `yaml:"path" json:"path"`
}
// DefaultLogTimeFormat defines the timestamp format.
const DefaultLogTimeFormat = "2006-01-02 15:04:05.000"
// Option is a functional configuration for the given logrus logger.
type Option func(l *logrus.Logger) error
// WithDebug sets the log level to debug.
func WithDebug(debug bool) Option {
return func(l *logrus.Logger) error {
if debug {
l.SetLevel(logrus.DebugLevel)
}
return nil
}
}
func getLumberjack(l *logrus.Logger) *lumberjack.Logger {
if logger, ok := l.Out.(*lumberjack.Logger); ok {
return logger
}
return nil
}
// WithLogFile sets the logger to output to the given file, with log rotation.
//
// If the given file is empty, nothing will be done.
//
// The maxSize is the maximum size in megabytes of the log file before it gets rotated.
// It defaults to 40 megabytes.
//
// The maxBackups is the maximum number of old log files to retain.
// The default value is 1.
func WithLogFile(f string, maxSize, maxBackups int) Option {
return func(l *logrus.Logger) error {
if f == "" {
return nil
}
if maxSize <= 0 {
maxSize = 40
}
if maxBackups <= 0 {
maxBackups = 1
}
if logger := getLumberjack(l); logger == nil {
l.SetOutput(&lumberjack.Logger{
Filename: f,
MaxSize: maxSize, // mb
MaxBackups: maxBackups,
})
} else {
logger.Filename = f
}
return nil
}
}
// WithMaxSizeMB sets the max size of log files in MB. If the logger is not configured
// to use a log file, an error is returned.
func WithMaxSizeMB(max uint) Option {
return func(l *logrus.Logger) error {
if logger := getLumberjack(l); logger != nil {
logger.MaxSize = int(max)
return nil
}
return errors.Errorf("lumberjack is not configured")
}
}
// WithConsole adds a hook to output logs to stdout.
func WithConsole() Option {
return func(l *logrus.Logger) error {
consoleLog := &logrus.Logger{
Out: os.Stdout,
Formatter: l.Formatter,
Hooks: make(logrus.LevelHooks),
Level: l.Level,
}
hook := &ConsoleHook{
logger: consoleLog,
levels: logrus.AllLevels,
}
l.AddHook(hook)
return nil
}
}
// WithSign sets the sign in formatter.
func WithSign(sign string) Option {
return func(l *logrus.Logger) error {
l.Formatter = &DragonflyFormatter{
TimestampFormat: DefaultLogTimeFormat,
Sign: sign,
}
return nil
}
}
// Init initializes the logger with given options. If no option is provided,
// the logger's formatter will be set with an empty sign.
func Init(l *logrus.Logger, opts ...Option) error {
opts = append([]Option{
WithSign(""),
}, opts...)
for _, opt := range opts {
if err := opt(l); err != nil {
return err
}
}
return nil
}
// ConsoleHook shows logs on console.
type ConsoleHook struct {
logger *logrus.Logger
levels []logrus.Level
}
// Fire implements Hook#Fire.
func (ch *ConsoleHook) Fire(entry *logrus.Entry) error {
if ch.logger.Level >= entry.Level {
switch entry.Level {
case logrus.PanicLevel, logrus.FatalLevel:
defer func() {
recover()
}()
ch.logger.Panic(entry.Message)
case logrus.ErrorLevel:
ch.logger.Error(entry.Message)
case logrus.WarnLevel:
ch.logger.Warn(entry.Message)
case logrus.InfoLevel:
ch.logger.Info(entry.Message)
case logrus.DebugLevel:
ch.logger.Debug(entry.Message)
}
}
return nil
}
// Levels implements Hook#Levels().
func (ch *ConsoleHook) Levels() []logrus.Level {
return ch.levels
}
// DragonflyFormatter customizes the dragonfly log format.
type DragonflyFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
Sign string
}
// Format implements Formatter#Format.
func (f *DragonflyFormatter) Format(entry *logrus.Entry) ([]byte, error) {
b := &bytes.Buffer{}
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultLogTimeFormat
}
f.appendValue(b, entry.Time.Format(timestampFormat), true)
f.appendValue(b,
fmt.Sprintf("%-4.4s", strings.ToUpper(entry.Level.String())),
true)
if f.Sign != "" {
fmt.Fprintf(b, "sign:%s ", f.Sign)
}
b.WriteString(": ")
if entry.Message != "" {
f.appendValue(b, entry.Message, false)
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *DragonflyFormatter) appendValue(b *bytes.Buffer, value interface{}, withSpace bool) {
switch value := value.(type) {
case string:
b.WriteString(value)
case error:
b.WriteString(value.Error())
default:
fmt.Fprint(b, value)
}
if withSpace {
b.WriteByte(' ')
}
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package digest
import (
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"io"
)
// Sha256 returns the SHA-256 checksum of the data.
func Sha256(value string) string {
h := sha256.New()
h.Write([]byte(value))
return hex.EncodeToString(h.Sum(nil))
}
// Sha1 returns the SHA-1 checksum of the contents.
func Sha1(contents []string) string {
h := sha1.New()
for _, content := range contents {
io.WriteString(h, content)
}
return hex.EncodeToString(h.Sum(nil))
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package errortypes defines all exceptions happened in dragonfly.
package errortypes
import (
"fmt"
"github.com/pkg/errors"
)
var (
// ErrDataNotFound represents the data cannot be found.
ErrDataNotFound = DfError{codeDataNotFound, "data not found"}
// ErrEmptyValue represents the value is empty or nil.
ErrEmptyValue = DfError{codeEmptyValue, "empty value"}
// ErrInvalidValue represents the value is invalid.
ErrInvalidValue = DfError{codeInvalidValue, "invalid value"}
// ErrNotInitialized represents the object is not initialized.
ErrNotInitialized = DfError{codeNotInitialized, "not initialized"}
// ErrConvertFailed represents failed to convert.
ErrConvertFailed = DfError{codeConvertFailed, "convert failed"}
// ErrRangeNotSatisfiable represents the length of file is insufficient.
ErrRangeNotSatisfiable = DfError{codeRangeNotSatisfiable, "range not satisfiable"}
)
const (
codeDataNotFound = iota
codeEmptyValue
codeInvalidValue
codeNotInitialized
codeConvertFailed
codeRangeNotSatisfiable
// supernode
codeSystemError
codeCDNFail
codeCDNWait
codePeerWait
codeUnknownError
codePeerContinue
codeURLNotReachable
codeTaskIDDuplicate
codeAuthenticationRequired
)
// DfError represents a Dragonfly error.
type DfError struct {
Code int
Msg string
}
type ErrAssertFunc func(err error) bool
// New function creates a DfError.
func New(code int, msg string) *DfError {
return &DfError{
Code: code,
Msg: msg,
}
}
// Newf function creates a DfError with a message according to
// a format specifier.
func Newf(code int, format string, a ...interface{}) *DfError {
return &DfError{
Code: code,
Msg: fmt.Sprintf(format, a...),
}
}
func (s DfError) Error() string {
return fmt.Sprintf("{\"Code\":%d,\"Msg\":\"%s\"}", s.Code, s.Msg)
}
// IsNilError checks the error is nil or not.
func IsNilError(err error) bool {
return err == nil
}
// IsDataNotFound checks the error is the data cannot be found.
func IsDataNotFound(err error) bool {
return checkError(err, codeDataNotFound)
}
// IsEmptyValue checks the error is the value is empty or nil.
func IsEmptyValue(err error) bool {
return checkError(err, codeEmptyValue)
}
// IsInvalidValue checks the error is the value is invalid or not.
func IsInvalidValue(err error) bool {
return checkError(err, codeInvalidValue)
}
// IsNotInitialized checks the error is the object is not initialized or not.
func IsNotInitialized(err error) bool {
return checkError(err, codeNotInitialized)
}
// IsConvertFailed checks the error is a conversion error or not.
func IsConvertFailed(err error) bool {
return checkError(err, codeConvertFailed)
}
// IsRangeNotSatisfiable checks the error is a
// range not exist error or not.
func IsRangeNotSatisfiable(err error) bool {
return checkError(err, codeRangeNotSatisfiable)
}
func checkError(err error, code int) bool {
e, ok := errors.Cause(err).(DfError)
return ok && e.Code == code
}
type HTTPError struct {
Code int
Msg string
}
// NewHTTPError function creates a HTTPError.
func NewHTTPError(code int, msg string) *HTTPError {
return &HTTPError{
Code: code,
Msg: msg,
}
}
// Error function implements the interface of error.Error().
func (s HTTPError) Error() string {
return fmt.Sprintf("{\"Code\":%d,\"Msg\":\"%s\"}", s.Code, s.Msg)
}
// HTTPCode return the http code.
func (s HTTPError) HTTPCode() int {
return s.Code
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package errortypes defines all exceptions happened in supernode's runtime.
package errortypes
var (
// ErrSystemError represents the error is a system error.
ErrSystemError = DfError{codeSystemError, "system error"}
// ErrCDNFail represents the cdn status is fail.
ErrCDNFail = DfError{codeCDNFail, "cdn status is fail"}
// ErrCDNWait represents the cdn status is wait.
ErrCDNWait = DfError{codeCDNWait, "cdn status is wait"}
// ErrPeerWait represents the peer should wait.
ErrPeerWait = DfError{codePeerWait, "peer should wait"}
// ErrUnknownError represents the error should not happen
// and the cause of that is unknown.
ErrUnknownError = DfError{codeUnknownError, "unknown error"}
// PeerContinue represents the peer should wait.
PeerContinue = DfError{codePeerContinue, "peer continue"}
// ErrURLNotReachable represents the url is a not reachable.
ErrURLNotReachable = DfError{codeURLNotReachable, "url not reachable"}
// ErrTaskIDDuplicate represents the task id is in conflict.
ErrTaskIDDuplicate = DfError{codeTaskIDDuplicate, "taskId conflict"}
// ErrAuthenticationRequired represents the authentication is required.
ErrAuthenticationRequired = DfError{codeAuthenticationRequired, "authentication required"}
)
// IsSystemError checks the error is a system error or not.
func IsSystemError(err error) bool {
return checkError(err, codeSystemError)
}
// IsCDNFail checks the error is CDNFail or not.
func IsCDNFail(err error) bool {
return checkError(err, codeCDNFail)
}
// IsCDNWait checks the error is CDNWait or not.
func IsCDNWait(err error) bool {
return checkError(err, codeCDNWait)
}
// IsPeerWait checks the error is PeerWait or not.
func IsPeerWait(err error) bool {
return checkError(err, codePeerWait)
}
// IsUnknowError checks the error is UnknowError or not.
func IsUnknowError(err error) bool {
return checkError(err, codeUnknownError)
}
// IsPeerContinue checks the error is PeerContinue or not.
func IsPeerContinue(err error) bool {
return checkError(err, codePeerContinue)
}
// IsURLNotReachable checks the error is a url not reachable or not.
func IsURLNotReachable(err error) bool {
return checkError(err, codeURLNotReachable)
}
// IsTaskIDDuplicate checks the error is a TaskIDDuplicate error or not.
func IsTaskIDDuplicate(err error) bool {
return checkError(err, codeTaskIDDuplicate)
}
// IsAuthenticationRequired checks the error is an AuthenticationRequired error or not.
func IsAuthenticationRequired(err error) bool {
return checkError(err, codeAuthenticationRequired)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fileutils
import (
"fmt"
"os"
"syscall"
"github.com/pkg/errors"
)
// FileLock defines a file lock implemented by syscall.Flock
type FileLock struct {
fileName string
fd *os.File
}
// NewFileLock create a FileLock instance
func NewFileLock(name string) *FileLock {
return &FileLock{
fileName: name,
}
}
// Lock locks file.
// If the file is already locked, the calling goroutine blocks until the file is unlocked.
// If lock has been invoked without unlock, lock again will return an error.
func (l *FileLock) Lock() error {
var (
fd *os.File
err error
)
if l.fd != nil {
return fmt.Errorf("file %s has already been locked", l.fileName)
}
if fd, err = os.Open(l.fileName); err != nil {
return err
}
l.fd = fd
if err := syscall.Flock(int(l.fd.Fd()), syscall.LOCK_EX); err != nil {
return errors.Wrapf(err, "file %s lock failed", l.fileName)
}
return nil
}
// Unlock unlocks file.
// If lock has not been invoked before unlock, unlock will return an error.
func (l *FileLock) Unlock() error {
if l.fd == nil {
return fmt.Errorf("file %s descriptor is nil", l.fileName)
}
fd := l.fd
l.fd = nil
defer fd.Close()
if err := syscall.Flock(int(fd.Fd()), syscall.LOCK_UN); err != nil {
return errors.Wrapf(err, "file %s unlock failed", l.fileName)
}
return nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fileutils
import (
"fmt"
"regexp"
"strconv"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/pkg/errors"
)
// Fsize is a wrapper type which indicates the file size.
type Fsize int64
const (
B Fsize = 1
KB = 1024 * B
MB = 1024 * KB
GB = 1024 * MB
)
// fsizeRegex only supports the format G(B)/M(B)/K(B)/B or pure number.
var fsizeRegex = regexp.MustCompile("^([0-9]+)([GMK]B?|B)$")
// MarshalYAML implements the yaml.Marshaler interface.
func (f Fsize) MarshalYAML() (interface{}, error) {
result := FsizeToString(f)
return result, nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (f *Fsize) UnmarshalYAML(unmarshal func(interface{}) error) error {
var fsizeStr string
if err := unmarshal(&fsizeStr); err != nil {
return err
}
fsize, err := StringToFSize(fsizeStr)
if err != nil {
return err
}
*f = Fsize(fsize)
return nil
}
// FsizeToString parses a Fsize value into string.
func FsizeToString(fsize Fsize) string {
var (
n = int64(fsize)
symbol = "B"
unit = B
)
if n == 0 {
return "0B"
}
switch int64(0) {
case n % int64(GB):
symbol = "GB"
unit = GB
case n % int64(MB):
symbol = "MB"
unit = MB
case n % int64(KB):
symbol = "KB"
unit = KB
}
return fmt.Sprintf("%v%v", n/int64(unit), symbol)
}
// StringToFSize parses a string into Fsize.
func StringToFSize(fsize string) (Fsize, error) {
var n int
n, err := strconv.Atoi(fsize)
if err == nil && n >= 0 {
return Fsize(n), nil
}
if n < 0 {
return 0, errors.Wrapf(errortypes.ErrInvalidValue, "%s is not a negative value fsize", fsize)
}
matches := fsizeRegex.FindStringSubmatch(fsize)
if len(matches) != 3 {
return 0, errors.Wrapf(errortypes.ErrInvalidValue, "%s and supported format: G(B)/M(B)/K(B)/B or pure number", fsize)
}
n, _ = strconv.Atoi(matches[1])
switch unit := matches[2]; {
case unit == "G" || unit == "GB":
n *= int(GB)
case unit == "M" || unit == "MB":
n *= int(MB)
case unit == "K" || unit == "KB":
n *= int(KB)
case unit == "B":
// Value already correct
default:
return 0, errors.Wrapf(errortypes.ErrInvalidValue, "%s and supported format: G(B)/M(B)/K(B)/B or pure number", fsize)
}
return Fsize(n), nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fileutils
import (
"bufio"
"crypto/md5"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"gopkg.in/yaml.v2"
)
// BufferSize defines the buffer size when reading and writing file.
const BufferSize = 8 * 1024 * 1024
// CreateDirectory creates directory recursively.
func CreateDirectory(dirPath string) error {
f, e := os.Stat(dirPath)
if e != nil {
if os.IsNotExist(e) {
return os.MkdirAll(dirPath, 0755)
}
return fmt.Errorf("failed to create dir %s: %v", dirPath, e)
}
if !f.IsDir() {
return fmt.Errorf("failed to create dir %s: dir path already exists and is not a directory", dirPath)
}
return e
}
// DeleteFile deletes a file not a directory.
func DeleteFile(filePath string) error {
if !PathExist(filePath) {
return fmt.Errorf("failed to delete file %s: file not exist", filePath)
}
if IsDir(filePath) {
return fmt.Errorf("failed to delete file %s: file path is a directory rather than a file", filePath)
}
return os.Remove(filePath)
}
// DeleteFiles deletes all the given files.
func DeleteFiles(filePaths ...string) {
if len(filePaths) > 0 {
for _, f := range filePaths {
DeleteFile(f)
}
}
}
// OpenFile opens a file. If the parent directory of the file isn't exist,
// it will create the directory.
func OpenFile(path string, flag int, perm os.FileMode) (*os.File, error) {
if PathExist(path) {
return os.OpenFile(path, flag, perm)
}
if err := CreateDirectory(filepath.Dir(path)); err != nil {
return nil, err
}
return os.OpenFile(path, flag, perm)
}
// Link creates a hard link pointing to src named linkName for a file.
func Link(src string, linkName string) error {
if PathExist(linkName) {
if IsDir(linkName) {
return fmt.Errorf("failed to link %s to %s: link name already exists and is a directory", linkName, src)
}
if err := DeleteFile(linkName); err != nil {
return fmt.Errorf("failed to link %s to %s when deleting target file: %v", linkName, src, err)
}
}
return os.Link(src, linkName)
}
// SymbolicLink creates target as a symbolic link to src.
func SymbolicLink(src string, target string) error {
if !PathExist(src) {
return fmt.Errorf("failed to symlink %s to %s: src no such file or directory", target, src)
}
if PathExist(target) {
if IsDir(target) {
return fmt.Errorf("failed to symlink %s to %s: link name already exists and is a directory", target, src)
}
if err := DeleteFile(target); err != nil {
return fmt.Errorf("failed to symlink %s to %s when deleting target file: %v", target, src, err)
}
}
return os.Symlink(src, target)
}
// CopyFile copies the file src to dst.
func CopyFile(src string, dst string) (err error) {
var (
s *os.File
d *os.File
)
if !IsRegularFile(src) {
return fmt.Errorf("failed to copy %s to %s: src is not a regular file", src, dst)
}
if s, err = os.Open(src); err != nil {
return fmt.Errorf("failed to copy %s to %s when opening source file: %v", src, dst, err)
}
defer s.Close()
if PathExist(dst) {
return fmt.Errorf("failed to copy %s to %s: dst file already exists", src, dst)
}
if d, err = OpenFile(dst, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755); err != nil {
return fmt.Errorf("failed to copy %s to %s when opening destination file: %v", src, dst, err)
}
defer d.Close()
buf := make([]byte, BufferSize)
for {
n, err := s.Read(buf)
if err != nil && err != io.EOF {
return fmt.Errorf("failed to copy %s to %s when reading src file: %v", src, dst, err)
}
if n == 0 || err == io.EOF {
break
}
if _, err := d.Write(buf[:n]); err != nil {
return fmt.Errorf("failed to copy %s to %s when writing dst file: %v", src, dst, err)
}
}
return nil
}
// MoveFile moves the file src to dst.
func MoveFile(src string, dst string) error {
if !IsRegularFile(src) {
return fmt.Errorf("failed to move %s to %s: src is not a regular file", src, dst)
}
if PathExist(dst) && !IsDir(dst) {
if err := DeleteFile(dst); err != nil {
return fmt.Errorf("failed to move %s to %s when deleting dst file: %v", src, dst, err)
}
}
return os.Rename(src, dst)
}
// MoveFileAfterCheckMd5 will check whether the file's md5 is equals to the param md5
// before move the file src to dst.
func MoveFileAfterCheckMd5(src string, dst string, md5 string) error {
if !IsRegularFile(src) {
return fmt.Errorf("failed to move file with md5 check %s to %s: src is not a regular file", src, dst)
}
m := Md5Sum(src)
if m != md5 {
return fmt.Errorf("failed to move file with md5 check %s to %s: md5 of source file doesn't match against the given md5 value", src, dst)
}
return MoveFile(src, dst)
}
// PathExist reports whether the path is exist.
// Any error get from os.Stat, it will return false.
func PathExist(name string) bool {
_, err := os.Stat(name)
return err == nil
}
// IsDir reports whether the path is a directory.
func IsDir(name string) bool {
f, e := os.Stat(name)
if e != nil {
return false
}
return f.IsDir()
}
// IsRegularFile reports whether the file is a regular file.
// If the given file is a symbol link, it will follow the link.
func IsRegularFile(name string) bool {
f, e := os.Stat(name)
if e != nil {
return false
}
return f.Mode().IsRegular()
}
// Md5Sum generates md5 for a given file.
func Md5Sum(name string) string {
if !IsRegularFile(name) {
return ""
}
f, err := os.Open(name)
if err != nil {
return ""
}
defer f.Close()
r := bufio.NewReaderSize(f, BufferSize)
h := md5.New()
_, err = io.Copy(h, r)
if err != nil {
return ""
}
return GetMd5Sum(h, nil)
}
// GetMd5Sum gets md5 sum as a string and appends the current hash to b.
func GetMd5Sum(md5 hash.Hash, b []byte) string {
return fmt.Sprintf("%x", md5.Sum(b))
}
// GetSys returns the underlying data source of the os.FileInfo.
func GetSys(info os.FileInfo) (*syscall.Stat_t, bool) {
sys, ok := info.Sys().(*syscall.Stat_t)
return sys, ok
}
// LoadYaml loads yaml config file.
func LoadYaml(path string, out interface{}) error {
content, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to load yaml %s when reading file: %v", path, err)
}
if err = yaml.Unmarshal(content, out); err != nil {
return fmt.Errorf("failed to load yaml %s: %v", path, err)
}
return nil
}
// GetFreeSpace gets the free disk space of the path.
func GetFreeSpace(path string) (Fsize, error) {
fs := syscall.Statfs_t{}
if err := syscall.Statfs(path, &fs); err != nil {
return 0, err
}
return Fsize(fs.Bavail * uint64(fs.Bsize)), nil
}
// IsEmptyDir check whether the directory is empty.
func IsEmptyDir(path string) (bool, error) {
f, err := os.Open(path)
if err != nil {
return false, err
}
defer f.Close()
if _, err = f.Readdirnames(1); err == io.EOF {
return true, nil
}
return false, err
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package httputils
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/util"
"github.com/pkg/errors"
"github.com/valyala/fasthttp"
)
/* http content types */
const (
ApplicationJSONUtf8Value = "application/json;charset=utf-8"
)
const (
// RequestTag is the tag name for parsing structure to query parameters.
// see function ParseQuery.
RequestTag = "request"
// DefaultTimeout is the default timeout to check connect.
DefaultTimeout = 500 * time.Millisecond
)
var (
// DefaultBuiltInTransport is the transport for HTTPWithHeaders.
DefaultBuiltInTransport *http.Transport
// DefaultBuiltInHTTPClient is the http client for HTTPWithHeaders.
DefaultBuiltInHTTPClient *http.Client
)
// DefaultHTTPClient is the default implementation of SimpleHTTPClient.
var DefaultHTTPClient SimpleHTTPClient = &defaultHTTPClient{}
// protocols stores custom protocols
// key: schema value: transport
var protocols = sync.Map{}
// validURLSchemas stores valid schemas
// when call RegisterProtocol, validURLSchemas will be also updated.
var validURLSchemas = "https?|HTTPS?"
// SimpleHTTPClient defines some http functions used frequently.
type SimpleHTTPClient interface {
PostJSON(url string, body interface{}, timeout time.Duration) (code int, res []byte, e error)
Get(url string, timeout time.Duration) (code int, res []byte, e error)
PostJSONWithHeaders(url string, headers map[string]string, body interface{}, timeout time.Duration) (code int, resBody []byte, err error)
GetWithHeaders(url string, headers map[string]string, timeout time.Duration) (code int, resBody []byte, err error)
}
func init() {
http.DefaultClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 3 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
DefaultBuiltInTransport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
DefaultBuiltInHTTPClient = &http.Client{
Transport: DefaultBuiltInTransport,
}
RegisterProtocolOnTransport(DefaultBuiltInTransport)
}
// ----------------------------------------------------------------------------
// defaultHTTPClient
type defaultHTTPClient struct {
}
var _ SimpleHTTPClient = &defaultHTTPClient{}
// PostJSON sends a POST request whose content-type is 'application/json;charset=utf-8'.
// When timeout <= 0, it will block until receiving response from server.
func (c *defaultHTTPClient) PostJSON(url string, body interface{}, timeout time.Duration) (
code int, resBody []byte, err error) {
return c.PostJSONWithHeaders(url, nil, body, timeout)
}
// Get sends a GET request to server.
// When timeout <= 0, it will block until receiving response from server.
func (c *defaultHTTPClient) Get(url string, timeout time.Duration) (
code int, body []byte, e error) {
if timeout > 0 {
return fasthttp.GetTimeout(nil, url, timeout)
}
return fasthttp.Get(nil, url)
}
// PostJSONWithHeaders sends a POST request with headers whose content-type is 'application/json;charset=utf-8'.
// When timeout <= 0, it will block until receiving response from server.
func (c *defaultHTTPClient) PostJSONWithHeaders(url string, headers map[string]string, body interface{}, timeout time.Duration) (
code int, resBody []byte, err error) {
var jsonByte []byte
if body != nil {
jsonByte, err = json.Marshal(body)
if err != nil {
return fasthttp.StatusBadRequest, nil, err
}
}
return do(url, headers, timeout, func(req *fasthttp.Request) error {
req.SetBody(jsonByte)
req.Header.SetMethod("POST")
req.Header.SetContentType(ApplicationJSONUtf8Value)
return nil
})
}
// GetWithHeaders sends a GET request with headers to server.
// When timeout <= 0, it will block until receiving response from server.
func (c *defaultHTTPClient) GetWithHeaders(url string, headers map[string]string, timeout time.Duration) (
code int, body []byte, e error) {
return do(url, headers, timeout, nil)
}
// requestSetFunc a function that will set some values to the *req.
type requestSetFunc func(req *fasthttp.Request) error
func do(url string, headers map[string]string, timeout time.Duration, rsf requestSetFunc) (statusCode int, body []byte, err error) {
// init request and response
req := fasthttp.AcquireRequest()
defer fasthttp.ReleaseRequest(req)
req.SetRequestURI(url)
for k, v := range headers {
req.Header.Add(k, v)
}
// set request
if rsf != nil {
err = rsf(req)
if err != nil {
return
}
}
resp := fasthttp.AcquireResponse()
defer fasthttp.ReleaseResponse(resp)
// send request
if timeout > 0 {
err = fasthttp.DoTimeout(req, resp, timeout)
} else {
err = fasthttp.Do(req, resp)
}
if err != nil {
return
}
statusCode = resp.StatusCode()
data := resp.Body()
body = make([]byte, len(data))
copy(body, data)
return
}
// ---------------------------------------------------------------------------
// util functions
// PostJSON sends a POST request whose content-type is 'application/json;charset=utf-8'.
func PostJSON(url string, body interface{}, timeout time.Duration) (int, []byte, error) {
return DefaultHTTPClient.PostJSON(url, body, timeout)
}
// Get sends a GET request to server.
// When timeout <= 0, it will block until receiving response from server.
func Get(url string, timeout time.Duration) (int, []byte, error) {
return DefaultHTTPClient.Get(url, timeout)
}
// PostJSONWithHeaders sends a POST request whose content-type is 'application/json;charset=utf-8'.
func PostJSONWithHeaders(url string, headers map[string]string, body interface{}, timeout time.Duration) (int, []byte, error) {
return DefaultHTTPClient.PostJSONWithHeaders(url, headers, body, timeout)
}
// GetWithHeaders sends a GET request to server.
// When timeout <= 0, it will block until receiving response from server.
func GetWithHeaders(url string, headers map[string]string, timeout time.Duration) (code int, resBody []byte, err error) {
return DefaultHTTPClient.GetWithHeaders(url, headers, timeout)
}
// Do performs the given http request and fills the given http response.
// When timeout <= 0, it will block until receiving response from server.
func Do(url string, headers map[string]string, timeout time.Duration) (string, error) {
statusCode, body, err := do(url, headers, timeout, nil)
if err != nil {
return "", err
}
if statusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code: %d", statusCode)
}
result := string(body)
return result, nil
}
// HTTPGet sends an HTTP GET request with headers.
func HTTPGet(url string, headers map[string]string) (*http.Response, error) {
return HTTPWithHeaders("GET", url, headers, 0, nil)
}
// HTTPGetTimeout sends an HTTP GET request with timeout.
func HTTPGetTimeout(url string, headers map[string]string, timeout time.Duration) (*http.Response, error) {
return HTTPWithHeaders("GET", url, headers, timeout, nil)
}
// HTTPGetWithTLS sends an HTTP GET request with TLS config.
func HTTPGetWithTLS(url string, headers map[string]string, timeout time.Duration, cacerts []string, insecure bool) (*http.Response, error) {
roots := x509.NewCertPool()
appendSuccess := false
for _, certPath := range cacerts {
certBytes, err := ioutil.ReadFile(certPath)
if err != nil {
return nil, err
}
appendSuccess = appendSuccess || roots.AppendCertsFromPEM(certBytes)
}
tlsConfig := &tls.Config{
InsecureSkipVerify: insecure,
}
if appendSuccess {
tlsConfig.RootCAs = roots
}
return HTTPWithHeaders("GET", url, headers, timeout, tlsConfig)
}
// HTTPWithHeaders sends an HTTP request with headers and specified method.
func HTTPWithHeaders(method, url string, headers map[string]string, timeout time.Duration, tlsConfig *tls.Config) (*http.Response, error) {
var (
cancel func()
)
req, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Add(k, v)
}
if timeout > 0 {
timeoutCtx, cancelFunc := context.WithTimeout(context.Background(), timeout)
req = req.WithContext(timeoutCtx)
cancel = cancelFunc
}
var c = DefaultBuiltInHTTPClient
if tlsConfig != nil {
// copy from http.DefaultTransport
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
RegisterProtocolOnTransport(transport)
transport.TLSClientConfig = tlsConfig
c = &http.Client{
Transport: transport,
}
}
res, err := c.Do(req)
if err != nil {
return nil, err
}
if cancel == nil {
return res, nil
}
// do cancel() when close the body.
res.Body = newWithFuncReadCloser(res.Body, cancel)
return res, nil
}
// HTTPStatusOk reports whether the http response code is 200.
func HTTPStatusOk(code int) bool {
return fasthttp.StatusOK == code
}
// ParseQuery only parses the fields with tag 'request' of the query to parameters.
// query must be a pointer to a struct.
func ParseQuery(query interface{}) string {
if util.IsNil(query) {
return ""
}
b := bytes.Buffer{}
wrote := false
t := reflect.TypeOf(query).Elem()
v := reflect.ValueOf(query).Elem()
for i := 0; i < t.NumField(); i++ {
tag := t.Field(i).Tag.Get(RequestTag)
if tag != "" {
if wrote {
b.WriteByte('&')
}
b.WriteString(tag)
b.WriteByte('=')
b.WriteString(fmt.Sprintf("%v", v.Field(i)))
wrote = true
}
}
return b.String()
}
// CheckConnect checks the network connectivity between local and remote.
// param timeout: its unit is milliseconds, reset to 500 ms if <= 0
// returns localIP
func CheckConnect(ip string, port int, timeout int) (localIP string, e error) {
t := time.Duration(timeout) * time.Millisecond
if timeout <= 0 {
t = DefaultTimeout
}
var conn net.Conn
addr := fmt.Sprintf("%s:%d", ip, port)
// Just temporarily limit users can only use IP addr for IPv4 format.
// In the near future, if we want to support IPv6, we can revise logic as below.
if conn, e = net.DialTimeout("tcp4", addr, t); e == nil {
localIP = conn.LocalAddr().String()
conn.Close()
if idx := strings.LastIndexByte(localIP, ':'); idx >= 0 {
localIP = localIP[:idx]
}
}
return
}
// ConstructRangeStr wraps the rangeStr as a HTTP Range header value.
func ConstructRangeStr(rangeStr string) string {
return fmt.Sprintf("bytes=%s", rangeStr)
}
// RangeStruct contains the start and end of a http header range.
type RangeStruct struct {
StartIndex int64
EndIndex int64
}
// GetRangeSE parses the start and the end from range HTTP header and returns them.
func GetRangeSE(rangeHTTPHeader string, length int64) ([]*RangeStruct, error) {
var rangeStr = rangeHTTPHeader
// when rangeHTTPHeader looks like "bytes=0-1023", and then gets "0-1023".
if strings.ContainsAny(rangeHTTPHeader, "=") {
rangeSlice := strings.Split(rangeHTTPHeader, "=")
if len(rangeSlice) != 2 {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "invalid range: %s, should be like bytes=0-1023", rangeStr)
}
rangeStr = rangeSlice[1]
}
var result []*RangeStruct
rangeArr := strings.Split(rangeStr, ",")
rangeCount := len(rangeArr)
if rangeCount == 0 {
result = append(result, &RangeStruct{
StartIndex: 0,
EndIndex: length - 1,
})
return result, nil
}
for i := 0; i < rangeCount; i++ {
if strings.Count(rangeArr[i], "-") != 1 {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "invalid range: %s, should be like 0-1023", rangeArr[i])
}
// -{length}
if strings.HasPrefix(rangeArr[i], "-") {
rangeStruct, err := handlePrefixRange(rangeArr[i], length)
if err != nil {
return nil, err
}
result = append(result, rangeStruct)
continue
}
// {startIndex}-
if strings.HasSuffix(rangeArr[i], "-") {
rangeStruct, err := handleSuffixRange(rangeArr[i], length)
if err != nil {
return nil, err
}
result = append(result, rangeStruct)
continue
}
rangeStruct, err := handlePairRange(rangeArr[i], length)
if err != nil {
return nil, err
}
result = append(result, rangeStruct)
}
return result, nil
}
func handlePrefixRange(rangeStr string, length int64) (*RangeStruct, error) {
downLength, err := strconv.ParseInt(strings.TrimPrefix(rangeStr, "-"), 10, 64)
if err != nil || downLength < 0 {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "failed to parse range: %s to int: %v", rangeStr, err)
}
if downLength > length {
return nil, errors.Wrapf(errortypes.ErrRangeNotSatisfiable, "range: %s", rangeStr)
}
return &RangeStruct{
StartIndex: length - downLength,
EndIndex: length - 1,
}, nil
}
func handleSuffixRange(rangeStr string, length int64) (*RangeStruct, error) {
startIndex, err := strconv.ParseInt(strings.TrimSuffix(rangeStr, "-"), 10, 64)
if err != nil || startIndex < 0 {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "failed to parse range: %s to int: %v", rangeStr, err)
}
if startIndex > length {
return nil, errors.Wrapf(errortypes.ErrRangeNotSatisfiable, "range: %s", rangeStr)
}
return &RangeStruct{
StartIndex: startIndex,
EndIndex: length - 1,
}, nil
}
func handlePairRange(rangeStr string, length int64) (*RangeStruct, error) {
rangePair := strings.Split(rangeStr, "-")
startIndex, err := strconv.ParseInt(rangePair[0], 10, 64)
if err != nil || startIndex < 0 {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "failed to parse range: %s to int: %v", rangeStr, err)
}
if startIndex > length {
return nil, errors.Wrapf(errortypes.ErrRangeNotSatisfiable, "range: %s", rangeStr)
}
endIndex, err := strconv.ParseInt(rangePair[1], 10, 64)
if err != nil || endIndex < 0 {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "failed to parse range: %s to int: %v", rangeStr, err)
}
if endIndex > length {
return nil, errors.Wrapf(errortypes.ErrRangeNotSatisfiable, "range: %s", rangeStr)
}
if endIndex < startIndex {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "range: %s, the start is larger the end", rangeStr)
}
return &RangeStruct{
StartIndex: startIndex,
EndIndex: endIndex,
}, nil
}
// RegisterProtocol registers custom protocols in global variable "protocols" which will be used in dfget and supernode
// Example:
// protocols := "helloworld"
// newTransport := funcNewTransport
// httputils.RegisterProtocol(protocols, newTransport)
// RegisterProtocol must be called before initialise dfget or supernode instances.
func RegisterProtocol(scheme string, rt http.RoundTripper) {
validURLSchemas += "|" + scheme
protocols.Store(scheme, rt)
}
// RegisterProtocolOnTransport registers all new protocols in "protocols" for a special Transport
// this function will be used in supernode and dfwget
func RegisterProtocolOnTransport(tr *http.Transport) {
protocols.Range(
func(key, value interface{}) bool {
tr.RegisterProtocol(key.(string), value.(http.RoundTripper))
return true
})
}
func GetValidURLSchemas() string {
return validURLSchemas
}
func newWithFuncReadCloser(rc io.ReadCloser, f func()) io.ReadCloser {
return &withFuncReadCloser{
f: f,
ReadCloser: rc,
}
}
type withFuncReadCloser struct {
f func()
io.ReadCloser
}
func (wrc *withFuncReadCloser) Close() error {
if wrc.f != nil {
wrc.f()
}
return wrc.ReadCloser.Close()
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package httputils
import (
"time"
)
type postJSONFunc = func(url string, body interface{},
timeout time.Duration) (int, []byte, error)
type getFunc = func(url string, timeout time.Duration) (int, []byte, error)
type postJSONWithHeadersFunc = func(url string, headers map[string]string, body interface{},
timeout time.Duration) (int, []byte, error)
type getWithHeadersFunc = func(url string, headers map[string]string,
timeout time.Duration) (int, []byte, error)
// MockHTTPClient fakes a customized implementation of util.SimpleHTTPClient.
type MockHTTPClient struct {
PostJSONFunc postJSONFunc
GetFunc getFunc
PostJSONWithHeadersFunc postJSONWithHeadersFunc
GetWithHeadersFunc getWithHeadersFunc
}
// NewMockHTTPClient returns a new MockHTTPClient instance.
func NewMockHTTPClient() *MockHTTPClient {
return &MockHTTPClient{}
}
// PostJSON mocks base method.
func (m *MockHTTPClient) PostJSON(url string, body interface{}, timeout time.Duration) (
int, []byte, error) {
if m.PostJSONFunc != nil {
return m.PostJSONFunc(url, body, timeout)
}
return 0, nil, nil
}
// Get mocks base method.
func (m *MockHTTPClient) Get(url string, timeout time.Duration) (int, []byte, error) {
if m.GetFunc != nil {
return m.GetFunc(url, timeout)
}
return 0, nil, nil
}
// PostJSONWithHeaders mocks base method.
func (m *MockHTTPClient) PostJSONWithHeaders(url string, headers map[string]string, body interface{}, timeout time.Duration) (
int, []byte, error) {
if m.PostJSONWithHeadersFunc != nil {
return m.PostJSONWithHeadersFunc(url, headers, body, timeout)
}
return 0, nil, nil
}
// GetWithHeaders mocks base method.
func (m *MockHTTPClient) GetWithHeaders(url string, headers map[string]string, timeout time.Duration) (
int, []byte, error) {
if m.GetWithHeadersFunc != nil {
return m.GetWithHeadersFunc(url, headers, timeout)
}
return 0, nil, nil
}
// Reset the MockHTTPClient.
func (m *MockHTTPClient) Reset() {
m.PostJSONFunc = nil
m.GetFunc = nil
m.PostJSONWithHeadersFunc = nil
m.GetWithHeadersFunc = nil
}
// CreatePostJSONFunc returns a mock postJSONFunc func
// which will always return the specific results.
func (m *MockHTTPClient) CreatePostJSONFunc(code int, res []byte, e error) postJSONFunc {
return func(string, interface{}, time.Duration) (int, []byte, error) {
return code, res, e
}
}
// CreateGetFunc returns a mock getFunc func
// which will always return the specific results.
func (m *MockHTTPClient) CreateGetFunc(code int, res []byte, e error) getFunc {
return func(string, time.Duration) (int, []byte, error) {
return code, res, e
}
}
// CreatePostJSONWithHeadersFunc returns a mock postJSONWithHeadersFunc func
// which will always return the specific results.
func (m *MockHTTPClient) CreatePostJSONWithHeadersFunc(code int, res []byte, e error) postJSONWithHeadersFunc {
return func(string, map[string]string, interface{}, time.Duration) (int, []byte, error) {
return code, res, e
}
}
// CreateGetWithHeadersFunc returns a mock getWithHeadersFunc func
// which will always return the specific results.
func (m *MockHTTPClient) CreateGetWithHeadersFunc(code int, res []byte, e error) getWithHeadersFunc {
return func(string, map[string]string, time.Duration) (int, []byte, error) {
return code, res, e
}
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package limitreader
import (
"crypto/md5"
"hash"
"io"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/ratelimiter"
)
// NewLimitReader creates a LimitReader.
// src: reader
// rate: bytes/second
func NewLimitReader(src io.Reader, rate int64, calculateMd5 bool) *LimitReader {
return NewLimitReaderWithLimiter(newRateLimiterWithDefaultWindow(rate), src, calculateMd5)
}
// NewLimitReaderWithLimiter creates LimitReader with a rateLimiter.
// src: reader
// rate: bytes/second
func NewLimitReaderWithLimiter(rl *ratelimiter.RateLimiter, src io.Reader, calculateMd5 bool) *LimitReader {
var md5sum hash.Hash
if calculateMd5 {
md5sum = md5.New()
}
return &LimitReader{
Src: src,
Limiter: rl,
md5sum: md5sum,
}
}
// NewLimitReaderWithMD5Sum creates LimitReader with a md5 sum.
// src: reader
// rate: bytes/second
func NewLimitReaderWithMD5Sum(src io.Reader, rate int64, md5sum hash.Hash) *LimitReader {
return NewLimitReaderWithLimiterAndMD5Sum(src, newRateLimiterWithDefaultWindow(rate), md5sum)
}
// NewLimitReaderWithLimiterAndMD5Sum creates LimitReader with rateLimiter and md5 sum.
// src: reader
// rate: bytes/second
func NewLimitReaderWithLimiterAndMD5Sum(src io.Reader, rl *ratelimiter.RateLimiter, md5sum hash.Hash) *LimitReader {
return &LimitReader{
Src: src,
Limiter: rl,
md5sum: md5sum,
}
}
func newRateLimiterWithDefaultWindow(rate int64) *ratelimiter.RateLimiter {
return ratelimiter.NewRateLimiter(ratelimiter.TransRate(rate), 2)
}
// LimitReader reads stream with RateLimiter.
type LimitReader struct {
Src io.Reader
Limiter *ratelimiter.RateLimiter
md5sum hash.Hash
}
func (lr *LimitReader) Read(p []byte) (n int, err error) {
n, e := lr.Src.Read(p)
if e != nil && e != io.EOF {
return n, e
}
if n > 0 {
if lr.md5sum != nil {
lr.md5sum.Write(p[:n])
}
lr.Limiter.AcquireBlocking(int64(n))
}
return n, e
}
// Md5 calculates the md5 of all contents read.
func (lr *LimitReader) Md5() string {
if lr.md5sum != nil {
return fileutils.GetMd5Sum(lr.md5sum, nil)
}
return ""
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package metricsutils
import (
"github.com/prometheus/client_golang/prometheus"
)
const (
namespace = "dragonfly"
)
// NewCounter will register a Counter metric to specified registry and return it.
// If registry is not specified, it will register metric to default prometheus registry.
func NewCounter(subsystem, name, help string, labels []string, register prometheus.Registerer) *prometheus.CounterVec {
if register == nil {
register = prometheus.DefaultRegisterer
}
m := prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: help,
},
labels,
)
register.MustRegister(m)
return m
}
// NewGauge will register a Gauge metric to specified registry and return it.
// If registry is not specified, it will register metric to default prometheus registry.
func NewGauge(subsystem, name, help string, labels []string, register prometheus.Registerer) *prometheus.GaugeVec {
if register == nil {
register = prometheus.DefaultRegisterer
}
m := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: help,
},
labels,
)
register.MustRegister(m)
return m
}
// NewSummary will register a Summary metric to specified registry and return it.
// If registry is not specified, it will register metric to default prometheus registry.
func NewSummary(subsystem, name, help string, labels []string, objectives map[float64]float64, register prometheus.Registerer) *prometheus.SummaryVec {
if register == nil {
register = prometheus.DefaultRegisterer
}
m := prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: help,
Objectives: objectives,
},
labels,
)
register.MustRegister(m)
return m
}
// NewHistogram will register a Histogram metric to specified registry and return it.
// If registry is not specified, it will register metric to default prometheus registry.
func NewHistogram(subsystem, name, help string, labels []string, buckets []float64, register prometheus.Registerer) *prometheus.HistogramVec {
if register == nil {
register = prometheus.DefaultRegisterer
}
m := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: help,
Buckets: buckets,
},
labels,
)
register.MustRegister(m)
return m
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package netutils
import (
"bufio"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/dragonflyoss/Dragonfly/pkg/rate"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
log "github.com/sirupsen/logrus"
)
const (
separator = "&"
layoutGMT = "GMT"
)
// default rate limit is 20M.
var defaultRateLimit = 20 * rate.MB
// NetLimit parse speed of interface that it has prefix of eth.
func NetLimit() *rate.Rate {
defer func() {
if err := recover(); err != nil {
log.Errorf("parse default net limit error:%v", err)
}
}()
if runtime.NumCPU() < 24 {
return &defaultRateLimit
}
var ethtool string
if path, err := exec.LookPath("ethtool"); err == nil {
ethtool = path
} else if _, err := os.Stat("/usr/sbin/ethtool"); err == nil || os.IsExist(err) {
ethtool = "/usr/sbin/ethtool"
}
if ethtool == "" {
log.Warn("ethtool not found")
return &defaultRateLimit
}
var maxInterfaceLimit = uint64(0)
interfaces, err := net.Interfaces()
if err != nil {
return &defaultRateLimit
}
compile := regexp.MustCompile("^[[:space:]]*([[:digit:]]+)[[:space:]]*Mb/s[[:space:]]*$")
for _, dev := range interfaces {
if !strings.HasPrefix(dev.Name, "enp") {
continue
}
cmd := exec.Command(ethtool, dev.Name)
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
continue
}
if err := cmd.Start(); err != nil {
log.Warnf("ethtool %s error:%v", dev.Name, err)
continue
}
scanner := bufio.NewScanner(stdoutPipe)
// TODO(yeya24): using scanner.Scan() will execute multiple syscall to read data,
// change to use a single syscall to read all data here.
for scanner.Scan() {
fields := strings.Split(strings.TrimSpace(scanner.Text()), ":")
if len(fields) != 2 {
continue
}
if strings.ToLower(strings.TrimSpace(fields[0])) != "speed" {
continue
}
speed := compile.FindStringSubmatch(fields[1])
if len(speed) <= 1 {
continue
}
if tmpLimit, err := strconv.ParseUint(speed[1], 0, 32); err == nil {
tmpLimit = tmpLimit / 8
if tmpLimit > maxInterfaceLimit {
maxInterfaceLimit = tmpLimit
}
}
}
cmd.Wait()
}
if maxInterfaceLimit > 0 {
r := rate.Rate(maxInterfaceLimit) * rate.MB
return &r
}
return &defaultRateLimit
}
// ExtractHost extracts host ip from the giving string.
func ExtractHost(hostAndPort string) string {
fields := strings.Split(strings.TrimSpace(hostAndPort), ":")
return fields[0]
}
// GetIPAndPortFromNode returns ip and port by parsing the node value.
// It will return defaultPort as the value of port
// when the node is a string without port or with an illegal port.
func GetIPAndPortFromNode(node string, defaultPort int) (string, int) {
if stringutils.IsEmptyStr(node) {
return "", defaultPort
}
nodeFields := strings.Split(node, ":")
switch len(nodeFields) {
case 1:
return nodeFields[0], defaultPort
case 2:
port, err := strconv.Atoi(nodeFields[1])
if err != nil {
return nodeFields[0], defaultPort
}
return nodeFields[0], port
default:
return "", defaultPort
}
}
// FilterURLParam filters request queries in URL.
// Eg:
// If you pass parameters as follows:
// url: http://a.b.com/locate?key1=value1&key2=value2&key3=value3
// filter: key2
// and then you will get the following value as the return:
// http://a.b.com/locate?key1=value1&key3=value3
func FilterURLParam(url string, filters []string) string {
rawUrls := strings.SplitN(url, "?", 2)
if len(filters) <= 0 || len(rawUrls) != 2 || strings.TrimSpace(rawUrls[1]) == "" {
return url
}
filtersMap := slice2Map(filters)
var params []string
for _, param := range strings.Split(rawUrls[1], separator) {
kv := strings.SplitN(param, "=", 2)
if !(len(kv) >= 1 && isExist(filtersMap, kv[0])) {
params = append(params, param)
}
}
if len(params) > 0 {
return rawUrls[0] + "?" + strings.Join(params, separator)
}
return rawUrls[0]
}
// ConvertHeaders converts headers from array type to map type for http request.
func ConvertHeaders(headers []string) map[string]string {
if len(headers) == 0 {
return nil
}
hm := make(map[string]string)
for _, header := range headers {
kv := strings.SplitN(header, ":", 2)
if len(kv) != 2 {
continue
}
k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
if v == "" {
continue
}
if _, in := hm[k]; in {
hm[k] = hm[k] + "," + v
} else {
hm[k] = v
}
}
return hm
}
// IsValidURL returns whether the string url is a valid HTTP URL.
func IsValidURL(urlStr string) bool {
u, err := url.Parse(urlStr)
if err != nil {
return false
}
if len(u.Host) == 0 || len(u.Scheme) == 0 {
return false
}
// with custom schemas, url like "x://y/z" is valid
reg := regexp.MustCompile(`(` +
httputils.GetValidURLSchemas() +
`)://([\w_]+:[\w_]+@)?([\w-]+\.)*[\w-]+(/[\w- ./?%&=]*)?`)
if result := reg.FindString(urlStr); stringutils.IsEmptyStr(result) {
return false
}
return true
}
// IsValidIP returns whether the string ip is a valid IP Address.
func IsValidIP(ip string) bool {
if strings.TrimSpace(ip) == "" {
return false
}
// str is a regex which matches a digital
// greater than or equal to 0 and less than or equal to 255
str := "(?:25[0-5]|2[0-4]\\d|[01]?\\d?\\d)"
result, err := regexp.MatchString("^(?:"+str+"\\.){3}"+str+"$", ip)
if err != nil {
return false
}
return result
}
// GetAllIPs returns all non-loopback IPV4 addresses.
func GetAllIPs() (ipList []string, err error) {
// get all system's unicast interface addresses.
addrs, err := net.InterfaceAddrs()
if err != nil {
return nil, err
}
// filter all loopback addresses.
for _, v := range addrs {
if ipNet, ok := v.(*net.IPNet); ok && !ipNet.IP.IsLoopback() {
if ipNet.IP.To4() != nil {
ipList = append(ipList, ipNet.IP.String())
}
}
}
return
}
// ConvertTimeStringToInt converts a string time to an int64 timestamp.
func ConvertTimeStringToInt(timeStr string) (int64, error) {
formatTime, err := time.ParseInLocation(http.TimeFormat, timeStr, time.UTC)
if err != nil {
return 0, err
}
return formatTime.Unix() * int64(1000), nil
}
// ConvertTimeIntToString converts an int64 timestamp to a string time.
func ConvertTimeIntToString(timestamp int64) (string, error) {
localTime := time.Unix(timestamp/int64(1000), 0)
timeString := localTime.UTC().Format(http.TimeFormat)
return fmt.Sprintf("%s%s", timeString[:len(timeString)-3], layoutGMT), nil
}
// slice2Map translates a slice to a map with
// the value in slice as the key and true as the value.
func slice2Map(value []string) map[string]bool {
mmap := make(map[string]bool)
for _, v := range value {
mmap[v] = true
}
return mmap
}
// isExist returns whether the map contains the key.
func isExist(mmap map[string]bool, key string) bool {
if _, ok := mmap[key]; ok {
return true
}
return false
}
// CalculateTimeout calculates the timeout(in seconds) according to the fileLength and the min rate of network.
//
// The 0 will be returned when both minRate and defaultMinRate both are <=0.
func CalculateTimeout(fileLength int64, minRate, defaultMinRate rate.Rate, reservedTime time.Duration) time.Duration {
// ensure the minRate to avoid trigger panic when minRate equals zero
if fileLength <= 0 ||
(minRate <= 0 && defaultMinRate <= 0) {
return 0
}
if minRate <= 0 {
minRate = defaultMinRate
}
return time.Duration(fileLength/int64(minRate))*time.Second + reservedTime
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package printer carries a stdout fd. It helps caller to print message on console
// even if it has set the log fd redirect.
package printer
import (
"fmt"
"io"
"os"
)
var (
// Printer is global StdPrinter.
Printer = &StdPrinter{Out: os.Stdout}
)
// StdPrinter outputs info to console directly.
type StdPrinter struct {
Out io.Writer
}
// Print outputs info to console directly.
func (sp *StdPrinter) Print(msg string) {
if sp.Out != nil {
fmt.Fprint(sp.Out, msg)
}
}
// Println outputs info to console directly.
func (sp *StdPrinter) Println(msg string) {
if sp.Out != nil {
fmt.Fprintln(sp.Out, msg)
}
}
// Printf formats according to a format specifier.
func (sp *StdPrinter) Printf(format string, a ...interface{}) {
if sp.Out != nil {
fmt.Fprintf(sp.Out, format+"\n", a...)
}
}
// Print outputs info to console directly.
func Print(msg string) {
if Printer.Out != nil {
fmt.Fprint(Printer.Out, msg)
}
}
// Println outputs info to console directly.
func Println(msg string) {
if Printer.Out != nil {
fmt.Fprintln(Printer.Out, msg)
}
}
// Printf formats according to a format specifier.
func Printf(format string, a ...interface{}) {
if Printer.Out != nil {
fmt.Fprintf(Printer.Out, format+"\n", a...)
}
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package queue
import (
"container/list"
"sync"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
)
// cQElementData is the value of list.Element.Value.
// It records the key and data of item.
type cQElementData struct {
key string
data interface{}
}
// LRUQueue is implementation of LRU.
type LRUQueue struct {
lock sync.Mutex
capacity int
itemMap map[string]*list.Element
l *list.List
}
func NewLRUQueue(capacity int) *LRUQueue {
return &LRUQueue{
capacity: capacity,
itemMap: make(map[string]*list.Element, capacity),
l: list.New(),
}
}
// Put puts item to front, return the obsolete item
func (q *LRUQueue) Put(key string, data interface{}) (obsoleteKey string, obsoleteData interface{}) {
q.lock.Lock()
defer q.lock.Unlock()
if i, ok := q.itemMap[key]; ok {
i.Value.(*cQElementData).data = data
q.putAtFront(i)
return
}
if len(q.itemMap) >= q.capacity {
// remove the earliest item
i := q.removeFromTail()
if i != nil {
delete(q.itemMap, i.Value.(*cQElementData).key)
obsoleteKey = i.Value.(*cQElementData).key
obsoleteData = i.Value.(*cQElementData).data
}
}
i := q.putValue(&cQElementData{key: key, data: data})
q.itemMap[key] = i
return
}
// Get will return the item by key. And it will put the item to front.
func (q *LRUQueue) Get(key string) (interface{}, error) {
q.lock.Lock()
defer q.lock.Unlock()
data, exist := q.itemMap[key]
if !exist {
return nil, errortypes.ErrDataNotFound
}
q.putAtFront(data)
return data.Value.(*cQElementData).data, nil
}
// GetFront will get several items from front and not poll out them.
func (q *LRUQueue) GetFront(count int) []interface{} {
if count <= 0 {
return nil
}
q.lock.Lock()
defer q.lock.Unlock()
result := make([]interface{}, count)
item := q.l.Front()
index := 0
for {
if item == nil {
break
}
result[index] = item.Value.(*cQElementData).data
index++
if index >= count {
break
}
item = item.Next()
}
return result[:index]
}
// GetItemByKey will return the item by key. But it will not put the item to front.
func (q *LRUQueue) GetItemByKey(key string) (interface{}, error) {
q.lock.Lock()
defer q.lock.Unlock()
if data, exist := q.itemMap[key]; exist {
return data.Value.(*cQElementData).data, nil
}
return nil, errortypes.ErrDataNotFound
}
// Delete deletes the item by key, return the deleted item if item exists.
func (q *LRUQueue) Delete(key string) interface{} {
q.lock.Lock()
defer q.lock.Unlock()
data, exist := q.itemMap[key]
if !exist {
return nil
}
retData := data.Value.(*cQElementData).data
delete(q.itemMap, key)
q.removeElement(data)
return retData
}
func (q *LRUQueue) putAtFront(i *list.Element) {
q.l.MoveToFront(i)
}
func (q *LRUQueue) putValue(data interface{}) *list.Element {
e := q.l.PushFront(data)
return e
}
func (q *LRUQueue) removeFromTail() *list.Element {
e := q.l.Back()
q.l.Remove(e)
return e
}
func (q *LRUQueue) removeElement(i *list.Element) {
q.l.Remove(i)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package queue
import (
"container/list"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/dragonflyoss/Dragonfly/pkg/util"
)
// Queue blocking queue. The items putted into queue mustn't be nil.
type Queue interface {
// Put puts item into the queue and keeps blocking if the queue is full.
// It will return immediately and do nothing if the item is nil.
Put(item interface{})
// PutTimeout puts item into the queue and waits for timeout if the queue is full.
// If timeout <= 0, it will return false immediately when queue is full.
// It will return immediately and do nothing if the item is nil.
PutTimeout(item interface{}, timeout time.Duration) bool
// Poll gets an item from the queue and keeps blocking if the queue is empty.
Poll() interface{}
// PollTimeout gets an item from the queue and waits for timeout if the queue is empty.
// If timeout <= 0, it will return (nil, bool) immediately when queue is empty.
PollTimeout(timeout time.Duration) (interface{}, bool)
// Len returns the current size of the queue.
Len() int
}
// NewQueue creates a blocking queue.
// If capacity <= 0, the queue capacity is infinite.
func NewQueue(capacity int) Queue {
if capacity <= 0 {
c := make(chan struct{})
return &infiniteQueue{
store: list.New(),
empty: unsafe.Pointer(&c),
}
}
return &finiteQueue{
store: make(chan interface{}, capacity),
}
}
// infiniteQueue implements infinite blocking queue.
type infiniteQueue struct {
sync.Mutex
store *list.List
empty unsafe.Pointer
}
var _ Queue = &infiniteQueue{}
func (q *infiniteQueue) Put(item interface{}) {
if util.IsNil(item) {
return
}
q.Lock()
defer q.Unlock()
q.store.PushBack(item)
if q.store.Len() < 2 {
// empty -> has one element
q.broadcast()
}
}
func (q *infiniteQueue) PutTimeout(item interface{}, timeout time.Duration) bool {
q.Put(item)
return !util.IsNil(item)
}
func (q *infiniteQueue) Poll() interface{} {
q.Lock()
defer q.Unlock()
for q.store.Len() == 0 {
q.wait()
}
item := q.store.Front()
q.store.Remove(item)
return item.Value
}
func (q *infiniteQueue) PollTimeout(timeout time.Duration) (interface{}, bool) {
deadline := time.Now().Add(timeout)
q.Lock()
defer q.Unlock()
for q.store.Len() == 0 {
timeout = -time.Since(deadline)
if timeout <= 0 || !q.waitTimeout(timeout) {
return nil, false
}
}
item := q.store.Front()
q.store.Remove(item)
return item.Value, true
}
func (q *infiniteQueue) Len() int {
q.Lock()
defer q.Unlock()
return q.store.Len()
}
func (q *infiniteQueue) wait() {
c := q.notifyChan()
q.Unlock()
defer q.Lock()
<-c
}
func (q *infiniteQueue) waitTimeout(timeout time.Duration) bool {
c := q.notifyChan()
q.Unlock()
defer q.Lock()
select {
case <-c:
return true
case <-time.After(timeout):
return false
}
}
func (q *infiniteQueue) notifyChan() <-chan struct{} {
ptr := atomic.LoadPointer(&q.empty)
return *((*chan struct{})(ptr))
}
// broadcast notifies all the Poll goroutines to re-check whether the queue is empty.
func (q *infiniteQueue) broadcast() {
c := make(chan struct{})
old := atomic.SwapPointer(&q.empty, unsafe.Pointer(&c))
close(*(*chan struct{})(old))
}
// finiteQueue implements finite blocking queue by buffered channel.
type finiteQueue struct {
store chan interface{}
}
func (q *finiteQueue) Put(item interface{}) {
if util.IsNil(item) {
return
}
q.store <- item
}
func (q *finiteQueue) PutTimeout(item interface{}, timeout time.Duration) bool {
if util.IsNil(item) {
return false
}
if timeout <= 0 {
select {
case q.store <- item:
return true
default:
return false
}
}
select {
case q.store <- item:
return true
case <-time.After(timeout):
return false
}
}
func (q *finiteQueue) Poll() interface{} {
item := <-q.store
return item
}
func (q *finiteQueue) PollTimeout(timeout time.Duration) (interface{}, bool) {
if timeout <= 0 {
select {
case item := <-q.store:
return item, true
default:
return nil, false
}
}
select {
case item := <-q.store:
return item, true
case <-time.After(timeout):
return nil, false
}
}
func (q *finiteQueue) Len() int {
return len(q.store)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rangeutils
import (
"fmt"
"strconv"
"strings"
)
const (
separator = "-"
invalidPieceIndex = -1
)
// CalculatePieceSize calculates the size of piece
// according to the parameter range.
func CalculatePieceSize(rangeStr string) int64 {
startIndex, endIndex, err := ParsePieceIndex(rangeStr)
if err != nil {
return 0
}
return endIndex - startIndex + 1
}
// CalculatePieceNum calculates the number of piece
// according to the parameter range.
func CalculatePieceNum(rangeStr string) int {
startIndex, endIndex, err := ParsePieceIndex(rangeStr)
if err != nil {
return -1
}
pieceSize := endIndex - startIndex + 1
return int(startIndex / pieceSize)
}
// ParsePieceIndex parses the start and end index ​​according to range string.
// rangeStr: "start-end"
func ParsePieceIndex(rangeStr string) (start, end int64, err error) {
ranges := strings.Split(rangeStr, separator)
if len(ranges) != 2 {
return invalidPieceIndex, invalidPieceIndex, fmt.Errorf("range value(%s) is illegal which should be like 0-45535", rangeStr)
}
startIndex, err := strconv.ParseInt(ranges[0], 10, 64)
if err != nil {
return invalidPieceIndex, invalidPieceIndex, fmt.Errorf("range(%s) start is not a number", rangeStr)
}
endIndex, err := strconv.ParseInt(ranges[1], 10, 64)
if err != nil {
return invalidPieceIndex, invalidPieceIndex, fmt.Errorf("range(%s) end is not a number", rangeStr)
}
if endIndex < startIndex {
return invalidPieceIndex, invalidPieceIndex, fmt.Errorf("range(%s) start is larger than end", rangeStr)
}
return startIndex, endIndex, nil
}
// CalculateBreakRange calculates the start and end of piece
// with the following formula:
// start = pieceNum * pieceSize
// end = rangeLength - 1
// The different with the CalculatePieceRange function is that
// the end is calculated by rangeLength which is passed in by the caller itself.
func CalculateBreakRange(startPieceNum, pieceContSize int, rangeLength int64) (string, error) {
// This method is to resume downloading from break-point,
// so there is no need to call this function when the startPieceNum equals to 0.
// It is recommended to check this value before calling this function.
if startPieceNum <= 0 {
return "", fmt.Errorf("startPieceNum is illegal for value: %d", startPieceNum)
}
if rangeLength <= 0 {
return "", fmt.Errorf("rangeLength is illegal for value: %d", rangeLength)
}
start := int64(startPieceNum) * int64(pieceContSize)
end := rangeLength - 1
if start > end {
return "", fmt.Errorf("start: %d is larger than end: %d", start, end)
}
return getRangeString(start, end), nil
}
// CalculatePieceRange calculates the start and end of piece
// with the following formula:
// start = pieceNum * pieceSize
// end = start + pieceSize - 1
func CalculatePieceRange(pieceNum int, pieceSize int32) string {
startIndex := int64(pieceNum) * int64(pieceSize)
endIndex := startIndex + int64(pieceSize) - 1
return getRangeString(startIndex, endIndex)
}
func getRangeString(startIndex, endIndex int64) string {
return fmt.Sprintf("%s%s%s", strconv.FormatInt(startIndex, 10), separator, strconv.FormatInt(endIndex, 10))
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rate
import (
"encoding/json"
"fmt"
"regexp"
"strconv"
)
// Rate wraps int64. It is used to parse the custom rate format
// from YAML and JSON.
// This type should not propagate beyond the scope of input/output processing.
type Rate int64
const (
B Rate = 1
KB = 1024 * B
MB = 1024 * KB
GB = 1024 * MB
)
// Set implements pflag/flag.Value
func (d *Rate) Set(s string) error {
var err error
*d, err = ParseRate(s)
return err
}
// Type implements pflag.Value
func (d *Rate) Type() string {
return "rate"
}
var rateRE = regexp.MustCompile("^([0-9]+)(MB?|m|KB?|k|GB?|g|B)$")
// ParseRate parses a string into a int64.
func ParseRate(rateStr string) (Rate, error) {
var n int
n, err := strconv.Atoi(rateStr)
if err == nil && n >= 0 {
return Rate(n), nil
}
if n < 0 {
return 0, fmt.Errorf("not a valid rate string: %d, only non-negative values are supported", n)
}
matches := rateRE.FindStringSubmatch(rateStr)
if len(matches) != 3 {
return 0, fmt.Errorf("not a valid rate string: %q, supported format: G(B)/g/M(B)/m/K(B)/k/B or pure number", rateStr)
}
n, _ = strconv.Atoi(matches[1])
switch unit := matches[2]; {
case unit == "g" || unit == "G" || unit == "GB":
n *= int(GB)
case unit == "m" || unit == "M" || unit == "MB":
n *= int(MB)
case unit == "k" || unit == "K" || unit == "KB":
n *= int(KB)
case unit == "B":
// Value already correct
default:
return 0, fmt.Errorf("invalid unit in rate string: %q, supported format: G(B)/g/M(B)/m/K(B)/k/B or pure number", unit)
}
return Rate(n), nil
}
// String returns the rate with an uppercase unit.
func (d Rate) String() string {
var (
n = int64(d)
symbol = "B"
unit = B
)
if n == 0 {
return "0B"
}
switch int64(0) {
case n % int64(GB):
symbol = "GB"
unit = GB
case n % int64(MB):
symbol = "MB"
unit = MB
case n % int64(KB):
symbol = "KB"
unit = KB
}
return fmt.Sprintf("%v%v", n/int64(unit), symbol)
}
// MarshalYAML implements the yaml.Marshaler interface.
func (d Rate) MarshalYAML() (interface{}, error) {
return d.String(), nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (d *Rate) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
}
rate, err := ParseRate(s)
if err != nil {
return err
}
*d = rate
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (d Rate) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (d *Rate) UnmarshalJSON(b []byte) error {
str, _ := strconv.Unquote(string(b))
rate, err := ParseRate(str)
if err != nil {
return err
}
*d = rate
return nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ratelimiter
import (
"sync"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/util"
)
// RateLimiter is used for limiting the rate of transporting.
type RateLimiter struct {
capacity int64
bucket int64
rate int64
ratePerWindow int64
window int64
last int64
mu sync.Mutex
}
// NewRateLimiter creates a RateLimiter instance.
// rate: how many tokens are generated per second. 0 represents that don't limit the rate.
// window: generating tokens interval (millisecond, [1,1000]).
// The production of rate and window should be division by 1000.
func NewRateLimiter(rate int64, window int64) *RateLimiter {
rl := new(RateLimiter)
rl.capacity = rate
rl.bucket = 0
rl.rate = rate
rl.setWindow(window)
rl.computeRatePerWindow()
rl.last = time.Now().UnixNano()
return rl
}
// AcquireBlocking acquires tokens. It will be blocking unit the bucket has enough required
// number of tokens.
func (rl *RateLimiter) AcquireBlocking(token int64) int64 {
return rl.acquire(token, true)
}
// AcquireNonBlocking acquires tokens. It will return -1 immediately when there is no enough
// number of tokens.
func (rl *RateLimiter) AcquireNonBlocking(token int64) int64 {
return rl.acquire(token, false)
}
// SetRate sets rate of RateLimiter.
func (rl *RateLimiter) SetRate(rate int64) {
if rl.rate != rate {
rl.capacity = rate
rl.rate = rate
rl.computeRatePerWindow()
}
}
func (rl *RateLimiter) acquire(token int64, blocking bool) int64 {
if rl.capacity <= 0 || token < 1 {
return token
}
tmpCapacity := util.Max(rl.capacity, token)
var process func() int64
process = func() int64 {
now := time.Now().UnixNano()
newTokens := rl.createTokens(now)
curTotal := util.Min(newTokens+rl.bucket, tmpCapacity)
if curTotal >= token {
rl.bucket = curTotal - token
rl.last = now
return token
}
if blocking {
rl.blocking(token - curTotal)
return process()
}
return -1
}
rl.mu.Lock()
defer rl.mu.Unlock()
return process()
}
func (rl *RateLimiter) setWindow(window int64) {
if window >= 1 && window <= 1000 {
rl.window = window
} else if window < 1 {
rl.window = 1
} else {
rl.window = 1000
}
}
func (rl *RateLimiter) computeRatePerWindow() {
if rl.rate <= 0 {
return
}
ratePerWindow := int64(rl.rate) * int64(rl.window) / 1000
if ratePerWindow > 0 {
rl.ratePerWindow = ratePerWindow
return
}
rl.ratePerWindow = 1
rl.setWindow(int64(rl.ratePerWindow * 1000 / rl.rate))
}
func (rl *RateLimiter) createTokens(timeNano int64) int64 {
diff := timeNano - rl.last
if diff < time.Millisecond.Nanoseconds() {
return 0
}
return diff / (rl.window * time.Millisecond.Nanoseconds()) * rl.ratePerWindow
}
func (rl *RateLimiter) blocking(requiredToken int64) {
if requiredToken <= 0 {
return
}
windowCount := int64(util.Max(requiredToken/rl.ratePerWindow, 1))
time.Sleep(time.Duration(windowCount * rl.window * time.Millisecond.Nanoseconds()))
}
// TransRate trans the rate to multiples of 1000.
// For NewRateLimiter, the production of rate should be division by 1000.
func TransRate(rate int64) int64 {
if rate <= 0 {
rate = 10 * 1024 * 1024
}
rate = (rate/1000 + 1) * 1000
return rate
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
import (
"syscall"
"time"
)
// Atime returns the last access time in time.Time.
func Atime(stat *syscall.Stat_t) time.Time {
return time.Unix(stat.Atim.Sec, stat.Atim.Nsec)
}
// AtimeSec returns the last access time in seconds.
func AtimeSec(stat *syscall.Stat_t) int64 {
return stat.Atim.Sec
}
// Ctime returns the create time in time.Time.
func Ctime(stat *syscall.Stat_t) time.Time {
return time.Unix(stat.Ctim.Sec, stat.Ctim.Nsec)
}
// CtimeSec returns the create time in seconds.
func CtimeSec(stat *syscall.Stat_t) int64 {
return stat.Ctim.Sec
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stringutils
import "unicode"
// SubString returns the subString of {str} which begins at {start} and end at {end - 1}.
func SubString(str string, start, end int) string {
runes := []rune(str)
length := len(runes)
if start < 0 || start >= length ||
end <= 0 || end > length ||
start > end {
return ""
}
return string(runes[start:end])
}
// IsEmptyStr returns whether the string s is empty.
func IsEmptyStr(s string) bool {
for _, v := range s {
if !unicode.IsSpace(v) {
return false
}
}
return true
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package syncmap
import (
"strconv"
"sync"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/atomiccount"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/pkg/errors"
"github.com/willf/bitset"
)
// SyncMap is a thread-safe map.
type SyncMap struct {
*sync.Map
}
// NewSyncMap returns a new SyncMap.
func NewSyncMap() *SyncMap {
return &SyncMap{&sync.Map{}}
}
// Add adds a key-value pair into the *sync.Map.
// The ErrEmptyValue error will be returned if the key is empty.
func (mmap *SyncMap) Add(key string, value interface{}) error {
if stringutils.IsEmptyStr(key) {
return errors.Wrap(errortypes.ErrEmptyValue, "key")
}
mmap.Store(key, value)
return nil
}
// Get returns result as interface{} according to the key.
// The ErrEmptyValue error will be returned if the key is empty.
// And the ErrDataNotFound error will be returned if the key cannot be found.
func (mmap *SyncMap) Get(key string) (interface{}, error) {
if stringutils.IsEmptyStr(key) {
return nil, errors.Wrap(errortypes.ErrEmptyValue, "key")
}
if v, ok := mmap.Load(key); ok {
return v, nil
}
return nil, errors.Wrapf(errortypes.ErrDataNotFound, "failed to get key %s from map", key)
}
// GetAsBitset returns result as *bitset.BitSet.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsBitset(key string) (*bitset.BitSet, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(*bitset.BitSet); ok {
return value, nil
}
return nil, errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsMap returns result as SyncMap.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsMap(key string) (*SyncMap, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(*SyncMap); ok {
return value, nil
}
return nil, errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsInt returns result as int.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsInt(key string) (int, error) {
v, err := mmap.Get(key)
if err != nil {
return 0, errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(int); ok {
return value, nil
}
return 0, errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsInt64 returns result as int64.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsInt64(key string) (int64, error) {
v, err := mmap.Get(key)
if err != nil {
return 0, errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(int64); ok {
return value, nil
}
return 0, errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsString returns result as string.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsString(key string) (string, error) {
v, err := mmap.Get(key)
if err != nil {
return "", errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(string); ok {
return value, nil
}
return "", errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsBool returns result as bool.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsBool(key string) (bool, error) {
v, err := mmap.Get(key)
if err != nil {
return false, errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(bool); ok {
return value, nil
}
return false, errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsAtomicInt returns result as *AtomicInt.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsAtomicInt(key string) (*atomiccount.AtomicInt, error) {
v, err := mmap.Get(key)
if err != nil {
return nil, errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(*atomiccount.AtomicInt); ok {
return value, nil
}
return nil, errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// GetAsTime returns result as Time.
// The ErrConvertFailed error will be returned if the assertion fails.
func (mmap *SyncMap) GetAsTime(key string) (time.Time, error) {
v, err := mmap.Get(key)
if err != nil {
return time.Now(), errors.Wrapf(err, "failed to get key %s from map", key)
}
if value, ok := v.(time.Time); ok {
return value, nil
}
return time.Now(), errors.Wrapf(errortypes.ErrConvertFailed, "failed to get key %s from map with value %s", key, v)
}
// Remove deletes the key-value pair from the mmap.
// The ErrEmptyValue error will be returned if the key is empty.
// And the ErrDataNotFound error will be returned if the key cannot be found.
func (mmap *SyncMap) Remove(key string) error {
if stringutils.IsEmptyStr(key) {
return errors.Wrap(errortypes.ErrEmptyValue, "key")
}
if _, ok := mmap.Load(key); !ok {
return errors.Wrapf(errortypes.ErrDataNotFound, "failed to get key %s from map", key)
}
mmap.Delete(key)
return nil
}
// ListKeyAsStringSlice returns the list of keys as a string slice.
func (mmap *SyncMap) ListKeyAsStringSlice() (result []string) {
if mmap == nil {
return []string{}
}
rangeFunc := func(key, value interface{}) bool {
if v, ok := key.(string); ok {
result = append(result, v)
return true
}
return true
}
mmap.Range(rangeFunc)
return
}
// ListKeyAsIntSlice returns the list of keys as an int slice.
func (mmap *SyncMap) ListKeyAsIntSlice() (result []int) {
if mmap == nil {
return []int{}
}
rangeFunc := func(key, value interface{}) bool {
if v, ok := key.(string); ok {
if value, err := strconv.Atoi(v); err == nil {
result = append(result, value)
return true
}
}
return true
}
mmap.Range(rangeFunc)
return
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package timeutils
import (
"time"
)
// GetCurrentTimeMillis returns the time in millis for now.
func GetCurrentTimeMillis() int64 {
return time.Now().UnixNano() / time.Millisecond.Nanoseconds()
}
// SinceInMilliseconds gets the time since the specified start in milliseconds.
func SinceInMilliseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds()) / float64(time.Millisecond.Nanoseconds())
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package util provides some utility tools for other components.
// Such as net-transporting, file-operating, rate-limiter.
package util
import (
"encoding/json"
"reflect"
"strconv"
)
// Max returns the larger of x or y.
func Max(x, y int64) int64 {
if x > y {
return x
}
return y
}
// Min returns the smaller of x or y.
func Min(x, y int64) int64 {
if x < y {
return x
}
return y
}
// IsNil returns whether the value is nil.
func IsNil(value interface{}) bool {
if value == nil {
return true
}
switch v := reflect.ValueOf(value); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}
// IsTrue returns whether the value is true.
func IsTrue(value bool) bool {
return value
}
// IsPositive returns whether the value is a positive number.
func IsPositive(value int64) bool {
return value > 0
}
// IsNatural returns whether the value>=0.
func IsNatural(value string) bool {
if v, err := strconv.Atoi(value); err == nil {
return v >= 0
}
return false
}
// IsNumeric returns whether the value is a numeric.
// If the bitSize of value below 0 or above 64 an error is returned.
func IsNumeric(value string) bool {
if _, err := strconv.Atoi(value); err != nil {
return false
}
return true
}
// JSONString returns json string of the v.
func JSONString(v interface{}) string {
if str, e := json.Marshal(v); e == nil {
return string(str)
}
return ""
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"fmt"
"path/filepath"
"strings"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/dflog"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/rate"
"gopkg.in/yaml.v2"
)
// NewConfig creates an instant with default values.
func NewConfig() *Config {
return &Config{
BaseProperties: NewBaseProperties(),
}
}
// Config contains all configuration of supernode.
type Config struct {
*BaseProperties `yaml:"base"`
Plugins map[PluginType][]*PluginProperties `yaml:"plugins"`
Storages map[string]interface{} `yaml:"storages"`
}
// Load loads config properties from the giving file.
func (c *Config) Load(path string) error {
return fileutils.LoadYaml(path, c)
}
func (c *Config) String() string {
if out, err := yaml.Marshal(c); err == nil {
return string(out)
}
return ""
}
// SetCIDPrefix sets a string as the prefix for supernode CID
// which used to distinguish from the other peer nodes.
func (c *Config) SetCIDPrefix(ip string) {
c.cIDPrefix = fmt.Sprintf("%s%s~", SuperNodeCIdPrefix, ip)
}
// GetSuperCID returns the cid string for taskID.
func (c *Config) GetSuperCID(taskID string) string {
return fmt.Sprintf("%s%s", c.cIDPrefix, taskID)
}
// IsSuperCID returns whether the clientID represents supernode.
func (c *Config) IsSuperCID(clientID string) bool {
return strings.HasPrefix(clientID, c.cIDPrefix)
}
// SetSuperPID sets the value of supernode PID.
func (c *Config) SetSuperPID(pid string) {
c.superNodePID = pid
}
// GetSuperPID returns the pid string for supernode.
func (c *Config) GetSuperPID() string {
return c.superNodePID
}
// IsSuperPID returns whether the peerID represents supernode.
func (c *Config) IsSuperPID(peerID string) bool {
return peerID == c.superNodePID
}
// NewBaseProperties creates an instant with default values.
func NewBaseProperties() *BaseProperties {
home := filepath.Join(string(filepath.Separator), "home", "admin", "supernode")
return &BaseProperties{
ListenPort: DefaultListenPort,
DownloadPort: DefaultDownloadPort,
HomeDir: home,
SchedulerCorePoolSize: DefaultSchedulerCorePoolSize,
DownloadPath: filepath.Join(home, "repo", "download"),
PeerUpLimit: DefaultPeerUpLimit,
PeerDownLimit: DefaultPeerDownLimit,
EliminationLimit: DefaultEliminationLimit,
FailureCountLimit: DefaultFailureCountLimit,
LinkLimit: DefaultLinkLimit,
SystemReservedBandwidth: DefaultSystemReservedBandwidth,
MaxBandwidth: DefaultMaxBandwidth,
EnableProfiler: false,
Debug: false,
FailAccessInterval: DefaultFailAccessInterval,
GCInitialDelay: DefaultGCInitialDelay,
GCMetaInterval: DefaultGCMetaInterval,
GCDiskInterval: DefaultGCDiskInterval,
YoungGCThreshold: DefaultYoungGCThreshold,
FullGCThreshold: DefaultFullGCThreshold,
IntervalThreshold: DefaultIntervalThreshold,
TaskExpireTime: DefaultTaskExpireTime,
PeerGCDelay: DefaultPeerGCDelay,
CleanRatio: DefaultCleanRatio,
}
}
type CDNPattern string
const (
CDNPatternLocal = "local"
CDNPatternSource = "source"
)
// BaseProperties contains all basic properties of supernode.
type BaseProperties struct {
// CDNPattern cdn pattern which must be in ["local", "source"].
// default: CDNPatternLocal
CDNPattern CDNPattern `yaml:"cdnPattern"`
// ListenPort is the port supernode server listens on.
// default: 8002
ListenPort int `yaml:"listenPort"`
// DownloadPort is the port for download files from supernode.
// default: 8001
DownloadPort int `yaml:"downloadPort"`
// HomeDir is working directory of supernode.
// default: /home/admin/supernode
HomeDir string `yaml:"homeDir"`
// the core pool size of ScheduledExecutorService.
// When a request to start a download task, supernode will construct a thread concurrent pool
// to download pieces of source file and write to specified storage.
// Note: source file downloading is into pieces via range attribute set in HTTP header.
// default: 10
SchedulerCorePoolSize int `yaml:"schedulerCorePoolSize"`
// DownloadPath specifies the path where to store downloaded files from source address.
DownloadPath string
// PeerUpLimit is the upload limit of a peer. When dfget starts to play a role of peer,
// it can only stand PeerUpLimit upload tasks from other peers.
// default: 5
PeerUpLimit int `yaml:"peerUpLimit"`
// PeerDownLimit is the download limit of a peer. When a peer starts to download a file/image,
// it will download file/image in the form of pieces. PeerDownLimit mean that a peer can only
// stand starting PeerDownLimit concurrent downloading tasks.
// default: 4
PeerDownLimit int `yaml:"peerDownLimit"`
// When dfget node starts to play a role of peer, it will provide services for other peers
// to pull pieces. If it runs into an issue when providing services for a peer, its self failure
// increases by 1. When the failure limit reaches EliminationLimit, the peer will isolate itself
// as a unhealthy state. Then this dfget will be no longer called by other peers.
// default: 5
EliminationLimit int `yaml:"eliminationLimit"`
// FailureCountLimit is the failure count limit set in supernode for dfget client.
// When a dfget client takes part in the peer network constructed by supernode,
// supernode will command the peer to start distribution task.
// When dfget client fails to finish distribution task, the failure count of client
// increases by 1. When failure count of client reaches to FailureCountLimit(default 5),
// dfget client will be moved to blacklist of supernode to stop playing as a peer.
// default: 5
FailureCountLimit int `yaml:"failureCountLimit"`
// LinkLimit is set for supernode to limit every piece download network speed.
// default: 20 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
LinkLimit rate.Rate `yaml:"linkLimit"`
// SystemReservedBandwidth is the network bandwidth reserved for system software.
// default: 20 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
SystemReservedBandwidth rate.Rate `yaml:"systemReservedBandwidth"`
// MaxBandwidth is the network bandwidth that supernode can use.
// default: 200 MB, in format of G(B)/g/M(B)/m/K(B)/k/B, pure number will also be parsed as Byte.
MaxBandwidth rate.Rate `yaml:"maxBandwidth"`
// Whether to enable profiler
// default: false
EnableProfiler bool `yaml:"enableProfiler"`
// Whether to open DEBUG level
// default: false
Debug bool `yaml:"debug"`
// AdvertiseIP is used to set the ip that we advertise to other peer in the p2p-network.
// By default, the first non-loop address is advertised.
AdvertiseIP string `yaml:"advertiseIP"`
// FailAccessInterval is the interval time after failed to access the URL.
// unit: minutes
// default: 3
FailAccessInterval time.Duration `yaml:"failAccessInterval"`
// cIDPrefix is a prefix string used to indicate that the CID is supernode.
cIDPrefix string
// superNodePID is the ID of supernode, which is the same as peer ID of dfget.
superNodePID string
// gc related
// GCInitialDelay is the delay time from the start to the first GC execution.
// default: 6s
GCInitialDelay time.Duration `yaml:"gcInitialDelay"`
// GCMetaInterval is the interval time to execute GC meta.
// default: 2min
GCMetaInterval time.Duration `yaml:"gcMetaInterval"`
// TaskExpireTime when a task is not accessed within the taskExpireTime,
// and it will be treated to be expired.
// default: 3min
TaskExpireTime time.Duration `yaml:"taskExpireTime"`
// PeerGCDelay is the delay time to execute the GC after the peer has reported the offline.
// default: 3min
PeerGCDelay time.Duration `yaml:"peerGCDelay"`
// GCDiskInterval is the interval time to execute GC disk.
// default: 15s
GCDiskInterval time.Duration `yaml:"gcDiskInterval"`
// YoungGCThreshold if the available disk space is more than YoungGCThreshold
// and there is no need to GC disk.
//
// default: 100GB
YoungGCThreshold fileutils.Fsize `yaml:"youngGCThreshold"`
// FullGCThreshold if the available disk space is less than FullGCThreshold
// and the supernode should gc all task files which are not being used.
//
// default: 5GB
FullGCThreshold fileutils.Fsize `yaml:"fullGCThreshold"`
// IntervalThreshold is the threshold of the interval at which the task file is accessed.
// default: 2h
IntervalThreshold time.Duration `yaml:"IntervalThreshold"`
// CleanRatio is the ratio to clean the disk and it is based on 10.
// It means the value of CleanRatio should be [1-10].
//
// default: 1
CleanRatio int
LogConfig dflog.LogConfig `yaml:"logConfig" json:"logConfig"`
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/dragonflyoss/Dragonfly/supernode/httpclient"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type cacheDetector struct {
cacheStore *store.Store
metaDataManager *fileMetaDataManager
originClient httpclient.OriginHTTPClient
}
func newCacheDetector(cacheStore *store.Store, metaDataManager *fileMetaDataManager, originClient httpclient.OriginHTTPClient) *cacheDetector {
return &cacheDetector{
cacheStore: cacheStore,
metaDataManager: metaDataManager,
originClient: originClient,
}
}
// detectCache detects whether there is a corresponding file in the local.
// If any, check whether the entire file has been completely downloaded.
//
// If so, return the md5 of task file and return startPieceNum as -1.
// And if not, return the latest piece num that has been downloaded.
func (cd *cacheDetector) detectCache(ctx context.Context, task *types.TaskInfo) (int, *fileMetaData, error) {
var breakNum int
var metaData *fileMetaData
var err error
if metaData, err = cd.metaDataManager.readFileMetaData(ctx, task.ID); err == nil &&
checkSameFile(task, metaData) {
breakNum = cd.parseBreakNum(ctx, task, metaData)
}
logrus.Infof("taskID: %s, detects cache breakNum: %d", task.ID, breakNum)
if breakNum == 0 {
if metaData, err = cd.resetRepo(ctx, task); err != nil {
return 0, nil, errors.Wrapf(err, "failed to reset repo")
}
} else {
logrus.Debugf("start to update access time with taskID(%s)", task.ID)
cd.metaDataManager.updateAccessTime(ctx, task.ID, getCurrentTimeMillisFunc())
}
return breakNum, metaData, nil
}
func (cd *cacheDetector) parseBreakNum(ctx context.Context, task *types.TaskInfo, metaData *fileMetaData) int {
expired, err := cd.originClient.IsExpired(task.RawURL, task.Headers, metaData.LastModified, metaData.ETag)
if err != nil {
logrus.Errorf("failed to check whether the task(%s) has expired: %v", task.ID, err)
}
logrus.Debugf("success to get expired result: %t for taskID(%s)", expired, task.ID)
if expired {
return 0
}
if metaData.Finish {
if metaData.Success {
return -1
}
return 0
}
supportRange, err := cd.originClient.IsSupportRange(task.TaskURL, task.Headers)
if err != nil {
logrus.Errorf("failed to check whether the task(%s) supports partial requests: %v", task.ID, err)
}
if !supportRange || task.FileLength < 0 {
return 0
}
return cd.parseBreakNumByCheckFile(ctx, task.ID)
}
func (cd *cacheDetector) parseBreakNumByCheckFile(ctx context.Context, taskID string) int {
cacheReader := newSuperReader()
reader, err := cd.cacheStore.Get(ctx, getDownloadRawFunc(taskID))
if err != nil {
logrus.Errorf("taskID: %s, failed to read key file: %v", taskID, err)
return 0
}
result, err := cacheReader.readFile(ctx, reader, false, false)
if err != nil {
logrus.Errorf("taskID: %s, read file gets error: %v", taskID, err)
}
if result != nil {
return result.pieceCount
}
return 0
}
func (cd *cacheDetector) resetRepo(ctx context.Context, task *types.TaskInfo) (*fileMetaData, error) {
logrus.Infof("reset repo for taskID: %s", task.ID)
if err := deleteTaskFiles(ctx, cd.cacheStore, task.ID); err != nil {
logrus.Errorf("reset repo: failed to delete task(%s) files: %v", task.ID, err)
}
return cd.metaDataManager.writeFileMetaDataByTask(ctx, task)
}
func checkSameFile(task *types.TaskInfo, metaData *fileMetaData) (result bool) {
defer func() {
logrus.Debugf("check same File for taskID(%s) get result: %t", task.ID, result)
}()
if task == nil || metaData == nil {
return false
}
if metaData.PieceSize != task.PieceSize {
return false
}
if metaData.TaskID != task.ID {
return false
}
if metaData.URL != task.TaskURL {
return false
}
if !stringutils.IsEmptyStr(task.Md5) {
return metaData.Md5 == task.Md5
}
return metaData.Identifier == task.Identifier
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"bytes"
"context"
"io/ioutil"
"github.com/sirupsen/logrus"
)
func Fuzz(data []byte) int {
// Don't spam output with parse failures
logrus.SetOutput(ioutil.Discard)
r := bytes.NewReader(data)
sr := newSuperReader()
_, err := sr.readFile(context.Background(), r, true, true)
if err != nil {
return 0
}
return 1
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"os"
"strings"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/daemon/mgr"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/emirpasic/gods/maps/treemap"
godsutils "github.com/emirpasic/gods/utils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// GetGCTaskIDs returns the taskIDs that should exec GC operations as a string slice.
//
// It should return nil when the free disk of cdn storage is lager than config.YoungGCThreshold.
// It should return all taskIDs that are not running when the free disk of cdn storage is less than config.FullGCThreshold.
func (cm *Manager) GetGCTaskIDs(ctx context.Context, taskMgr mgr.TaskMgr) ([]string, error) {
var gcTaskIDs []string
freeDisk, err := cm.cacheStore.GetAvailSpace(ctx, getHomeRawFunc())
if err != nil {
if store.IsKeyNotFound(err) {
return nil, nil
}
return nil, errors.Wrapf(err, "failed to get avail space")
}
if freeDisk > cm.cfg.YoungGCThreshold {
return nil, nil
}
fullGC := false
if freeDisk <= cm.cfg.FullGCThreshold {
fullGC = true
}
logrus.Debugf("start to exec gc with fullGC: %t", fullGC)
gapTasks := treemap.NewWith(godsutils.Int64Comparator)
intervalTasks := treemap.NewWith(godsutils.Int64Comparator)
// walkTaskIDs is used to avoid processing multiple times for the same taskID
// which is extracted from file name.
walkTaskIDs := make(map[string]bool)
walkFn := func(path string, info os.FileInfo, err error) error {
logrus.Debugf("start to walk path(%s)", path)
if err != nil {
logrus.Errorf("failed to access path(%s): %v", path, err)
return err
}
if info.IsDir() {
return nil
}
taskID := strings.Split(info.Name(), ".")[0]
// If the taskID has been handled, and no need to do that again.
if walkTaskIDs[taskID] {
return nil
}
walkTaskIDs[taskID] = true
// we should return directly when we success to get info which means it is being used
if _, err := taskMgr.Get(ctx, taskID); err == nil || !errortypes.IsDataNotFound(err) {
if err != nil {
logrus.Errorf("failed to get taskID(%s): %v", taskID, err)
}
return nil
}
// add taskID to gcTaskIDs slice directly when fullGC equals true.
if fullGC {
gcTaskIDs = append(gcTaskIDs, taskID)
return nil
}
metaData, err := cm.metaDataManager.readFileMetaData(ctx, taskID)
if err != nil || metaData == nil {
logrus.Debugf("failed to get metadata taskID(%s): %v", taskID, err)
// TODO: delete the file when failed to get metadata
return nil
}
// put taskID into gapTasks or intervalTasks which will sort by some rules
if err := cm.sortInert(ctx, gapTasks, intervalTasks, metaData); err != nil {
logrus.Errorf("failed to parse inert metaData(%+v): %v", metaData, err)
}
return nil
}
raw := &store.Raw{
Bucket: config.DownloadHome,
WalkFn: walkFn,
}
if err := cm.cacheStore.Walk(ctx, raw); err != nil {
return nil, err
}
if !fullGC {
gcTaskIDs = append(gcTaskIDs, getGCTasks(gapTasks, intervalTasks)...)
}
return gcTaskIDs, nil
}
func (cm *Manager) sortInert(ctx context.Context, gapTasks, intervalTasks *treemap.Map, metaData *fileMetaData) error {
gap := getCurrentTimeMillisFunc() - metaData.AccessTime
if metaData.Interval > 0 &&
gap <= metaData.Interval+(int64(cm.cfg.IntervalThreshold.Seconds())*int64(time.Millisecond)) {
info, err := cm.cacheStore.Stat(ctx, getDownloadRaw(metaData.TaskID))
if err != nil {
return err
}
v, found := intervalTasks.Get(info.Size)
if !found {
v = make([]string, 0)
}
tasks := v.([]string)
tasks = append(tasks, metaData.TaskID)
intervalTasks.Put(info.Size, tasks)
return nil
}
v, found := gapTasks.Get(gap)
if !found {
v = make([]string, 0)
}
tasks := v.([]string)
tasks = append(tasks, metaData.TaskID)
gapTasks.Put(gap, tasks)
return nil
}
func getGCTasks(gapTasks, intervalTasks *treemap.Map) []string {
var gcTasks = make([]string, 0)
for _, v := range gapTasks.Values() {
if taskIDs, ok := v.([]string); ok {
gcTasks = append(gcTasks, taskIDs...)
}
}
for _, v := range intervalTasks.Values() {
if taskIDs, ok := v.([]string); ok {
gcTasks = append(gcTasks, taskIDs...)
}
}
return gcTasks
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"fmt"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/pkg/timeutils"
)
var getCurrentTimeMillisFunc = timeutils.GetCurrentTimeMillis
// getContentLengthByHeader calculates the piece content length by piece header.
func getContentLengthByHeader(pieceHeader uint32) int32 {
return int32(pieceHeader & 0xffffff)
}
func getPieceHeader(dataSize, pieceSize int32) uint32 {
return uint32(dataSize | (pieceSize << 4))
}
func getUpdateTaskInfoWithStatusOnly(cdnStatus string) *types.TaskInfo {
return getUpdateTaskInfo(cdnStatus, "", 0)
}
func getUpdateTaskInfo(cdnStatus, realMD5 string, fileLength int64) *types.TaskInfo {
return &types.TaskInfo{
CdnStatus: cdnStatus,
FileLength: fileLength,
RealMd5: realMD5,
}
}
func getPieceMd5Value(pieceMd5Sum string, pieceLength int32) string {
return fmt.Sprintf("%s:%d", pieceMd5Sum, pieceLength)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"net/http"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
errorType "github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/dragonflyoss/Dragonfly/pkg/rangeutils"
"github.com/dragonflyoss/Dragonfly/supernode/httpclient"
)
// download downloads the file from the original address and
// sets the "Range" header to the undownloaded file range.
//
// If the returned error is nil, the Response will contain a non-nil
// Body which the caller is expected to close.
func (cm *Manager) download(ctx context.Context, taskID, url string, headers map[string]string,
startPieceNum int, httpFileLength int64, pieceContSize int32) (*http.Response, error) {
checkCode := []int{http.StatusOK, http.StatusPartialContent}
if startPieceNum > 0 {
breakRange, err := rangeutils.CalculateBreakRange(startPieceNum, int(pieceContSize), httpFileLength)
if err != nil {
return nil, errors.Wrapf(errorType.ErrInvalidValue, "failed to calculate the breakRange: %v", err)
}
// check if Range in header? if Range already in Header, use this range directly
if !hasRange(headers) {
headers = httpclient.CopyHeader(
map[string]string{"Range": httputils.ConstructRangeStr(breakRange)},
headers)
}
checkCode = []int{http.StatusPartialContent}
}
logrus.Infof("start to download for taskId(%s) with fileUrl: %s"+
" header: %v checkCode: %d", taskID, url, headers, checkCode)
return cm.originClient.Download(url, headers, checkStatusCode(checkCode))
}
func hasRange(headers map[string]string) bool {
if headers == nil {
return false
}
_, ok := headers["Range"]
return ok
}
func checkStatusCode(statusCode []int) func(int) bool {
return func(status int) bool {
for _, s := range statusCode {
if status == s {
return true
}
}
return false
}
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"encoding/json"
"strings"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/pkg/digest"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/dragonflyoss/Dragonfly/supernode/util"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type fileMetaData struct {
TaskID string `json:"taskID"`
URL string `json:"url"`
PieceSize int32 `json:"pieceSize"`
HTTPFileLen int64 `json:"httpFileLen"`
Identifier string `json:"bizId"`
AccessTime int64 `json:"accessTime"`
Interval int64 `json:"interval"`
FileLength int64 `json:"fileLength"`
Md5 string `json:"md5"`
RealMd5 string `json:"realMd5"`
LastModified int64 `json:"lastModified"`
ETag string `json:"eTag"`
Finish bool `json:"finish"`
Success bool `json:"success"`
}
// fileMetaDataManager manages the meta file and md5 file of each taskID.
type fileMetaDataManager struct {
fileStore *store.Store
locker *util.LockerPool
}
func newFileMetaDataManager(store *store.Store) *fileMetaDataManager {
return &fileMetaDataManager{
fileStore: store,
locker: util.NewLockerPool(),
}
}
// writeFileMetaData stores the metadata of task.ID to storage.
func (mm *fileMetaDataManager) writeFileMetaDataByTask(ctx context.Context, task *types.TaskInfo) (*fileMetaData, error) {
metaData := &fileMetaData{
TaskID: task.ID,
URL: task.TaskURL,
PieceSize: task.PieceSize,
HTTPFileLen: task.HTTPFileLength,
Identifier: task.Identifier,
AccessTime: getCurrentTimeMillisFunc(),
FileLength: task.FileLength,
Md5: task.Md5,
}
if err := mm.writeFileMetaData(ctx, metaData); err != nil {
return nil, err
}
return metaData, nil
}
// writeFileMetaData stores the metadata of task.ID to storage.
func (mm *fileMetaDataManager) writeFileMetaData(ctx context.Context, metaData *fileMetaData) error {
data, err := json.Marshal(metaData)
if err != nil {
return errors.Wrapf(err, "failed to marshal metadata")
}
return mm.fileStore.PutBytes(ctx, getMetaDataRawFunc(metaData.TaskID), data)
}
// readFileMetaData returns the fileMetaData info according to the taskID.
func (mm *fileMetaDataManager) readFileMetaData(ctx context.Context, taskID string) (*fileMetaData, error) {
bytes, err := mm.fileStore.GetBytes(ctx, getMetaDataRawFunc(taskID))
if err != nil {
return nil, errors.Wrapf(err, "failed to get metadata bytes")
}
metaData := &fileMetaData{}
if err := json.Unmarshal(bytes, metaData); err != nil {
return nil, errors.Wrapf(err, "failed to unmarshal metadata bytes")
}
logrus.Debugf("success to read metadata: %+v for taskID: %s", metaData, taskID)
if metaData.PieceSize == 0 {
metaData.PieceSize = config.DefaultPieceSize
}
return metaData, nil
}
func (mm *fileMetaDataManager) updateAccessTime(ctx context.Context, taskID string, accessTime int64) error {
mm.locker.GetLock(taskID, false)
defer mm.locker.ReleaseLock(taskID, false)
originMetaData, err := mm.readFileMetaData(ctx, taskID)
if err != nil {
return errors.Wrapf(err, "failed to get origin metaData")
}
interval := accessTime - originMetaData.AccessTime
originMetaData.Interval = interval
if interval <= 0 {
logrus.Warnf("taskId:%s file hit interval:%d", taskID, interval)
originMetaData.Interval = 0
}
originMetaData.AccessTime = accessTime
return mm.writeFileMetaData(ctx, originMetaData)
}
func (mm *fileMetaDataManager) updateLastModifiedAndETag(ctx context.Context, taskID string, lastModified int64, eTag string) error {
mm.locker.GetLock(taskID, false)
defer mm.locker.ReleaseLock(taskID, false)
originMetaData, err := mm.readFileMetaData(ctx, taskID)
if err != nil {
return err
}
originMetaData.LastModified = lastModified
originMetaData.ETag = eTag
return mm.writeFileMetaData(ctx, originMetaData)
}
func (mm *fileMetaDataManager) updateStatusAndResult(ctx context.Context, taskID string, metaData *fileMetaData) error {
mm.locker.GetLock(taskID, false)
defer mm.locker.ReleaseLock(taskID, false)
originMetaData, err := mm.readFileMetaData(ctx, taskID)
if err != nil {
return errors.Wrapf(err, "failed to get origin metadata")
}
originMetaData.Finish = metaData.Finish
originMetaData.Success = metaData.Success
if originMetaData.Success {
originMetaData.FileLength = metaData.FileLength
if !stringutils.IsEmptyStr(metaData.RealMd5) {
originMetaData.RealMd5 = metaData.RealMd5
}
}
return mm.writeFileMetaData(ctx, originMetaData)
}
// writePieceMD5s writes the piece md5s to storage for the md5 file of taskID.
//
// And it should append the fileMD5 which means that the md5 of the task file
// and the SHA-1 digest of fileMD5 at the end of the file.
func (mm *fileMetaDataManager) writePieceMD5s(ctx context.Context, taskID, fileMD5 string, pieceMD5s []string) error {
mm.locker.GetLock(taskID, false)
defer mm.locker.ReleaseLock(taskID, false)
if len(pieceMD5s) == 0 {
logrus.Warnf("failed to write empty pieceMD5s for taskID: %s", taskID)
return nil
}
// append fileMD5
pieceMD5s = append(pieceMD5s, fileMD5)
// append the SHA-1 checksum of pieceMD5s
pieceMD5s = append(pieceMD5s, digest.Sha1(pieceMD5s))
pieceMD5Str := strings.Join(pieceMD5s, "\n")
return mm.fileStore.PutBytes(ctx, getMd5DataRawFunc(taskID), []byte(pieceMD5Str))
}
// readPieceMD5s reads the md5 file of the taskID and returns the pieceMD5s.
func (mm *fileMetaDataManager) readPieceMD5s(ctx context.Context, taskID, fileMD5 string) (pieceMD5s []string, err error) {
mm.locker.GetLock(taskID, true)
defer mm.locker.ReleaseLock(taskID, true)
bytes, err := mm.fileStore.GetBytes(ctx, getMd5DataRawFunc(taskID))
if err != nil {
return nil, err
}
pieceMD5s = strings.Split(strings.TrimSpace(string(bytes)), "\n")
if len(pieceMD5s) == 0 {
return nil, nil
}
// validate the SHA-1 checksum of pieceMD5s
pieceMD5sLength := len(pieceMD5s)
pieceMD5sWithoutSha1Value := pieceMD5s[:pieceMD5sLength-1]
expectedSha1Value := digest.Sha1(pieceMD5sWithoutSha1Value)
realSha1Value := pieceMD5s[pieceMD5sLength-1]
if expectedSha1Value != realSha1Value {
logrus.Errorf("failed to validate the SHA-1 checksum of pieceMD5s, expected: %s, real: %s", expectedSha1Value, realSha1Value)
return nil, nil
}
// validate the fileMD5
realFileMD5 := pieceMD5s[pieceMD5sLength-2]
if realFileMD5 != fileMD5 {
logrus.Errorf("failed to validate the fileMD5, expected: %s, real: %s", fileMD5, realFileMD5)
return nil, nil
}
return pieceMD5s[:pieceMD5sLength-2], nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"crypto/md5"
"fmt"
"path"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/pkg/limitreader"
"github.com/dragonflyoss/Dragonfly/pkg/metricsutils"
"github.com/dragonflyoss/Dragonfly/pkg/netutils"
"github.com/dragonflyoss/Dragonfly/pkg/rangeutils"
"github.com/dragonflyoss/Dragonfly/pkg/ratelimiter"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/daemon/mgr"
"github.com/dragonflyoss/Dragonfly/supernode/httpclient"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/dragonflyoss/Dragonfly/supernode/util"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
const (
PieceMd5SourceDefault = "default"
PieceMd5SourceMemory = "memory"
PieceMd5SourceMeta = "meta"
PieceMd5SourceFile = "file"
)
var _ mgr.CDNMgr = &Manager{}
type metrics struct {
cdnCacheHitCount *prometheus.CounterVec
cdnDownloadCount *prometheus.CounterVec
cdnDownloadBytes *prometheus.CounterVec
cdnDownloadFailCount *prometheus.CounterVec
}
func newMetrics(register prometheus.Registerer) *metrics {
return &metrics{
cdnCacheHitCount: metricsutils.NewCounter(config.SubsystemSupernode, "cdn_cache_hit_total",
"Total times of hitting cdn cache", []string{}, register),
cdnDownloadCount: metricsutils.NewCounter(config.SubsystemSupernode, "cdn_download_total",
"Total times of cdn download", []string{}, register),
cdnDownloadBytes: metricsutils.NewCounter(config.SubsystemSupernode, "cdn_download_size_bytes_total",
"total file size of cdn downloaded from source in bytes", []string{}, register,
),
cdnDownloadFailCount: metricsutils.NewCounter(config.SubsystemSupernode, "cdn_download_failed_total",
"Total failure times of cdn download", []string{}, register),
}
}
func init() {
mgr.Register(config.CDNPatternLocal, NewManager)
}
// Manager is an implementation of the interface of CDNMgr.
type Manager struct {
cfg *config.Config
cacheStore *store.Store
limiter *ratelimiter.RateLimiter
cdnLocker *util.LockerPool
progressManager mgr.ProgressMgr
metaDataManager *fileMetaDataManager
cdnReporter *reporter
detector *cacheDetector
originClient httpclient.OriginHTTPClient
pieceMD5Manager *pieceMD5Mgr
writer *superWriter
metrics *metrics
}
// NewManager returns a new Manager.
func NewManager(cfg *config.Config, cacheStore *store.Store, progressManager mgr.ProgressMgr,
originClient httpclient.OriginHTTPClient, register prometheus.Registerer) (mgr.CDNMgr, error) {
return newManager(cfg, cacheStore, progressManager, originClient, register)
}
func newManager(cfg *config.Config, cacheStore *store.Store, progressManager mgr.ProgressMgr,
originClient httpclient.OriginHTTPClient, register prometheus.Registerer) (*Manager, error) {
rateLimiter := ratelimiter.NewRateLimiter(ratelimiter.TransRate(int64(cfg.MaxBandwidth-cfg.SystemReservedBandwidth)), 2)
metaDataManager := newFileMetaDataManager(cacheStore)
pieceMD5Manager := newpieceMD5Mgr()
cdnReporter := newReporter(cfg, cacheStore, progressManager, metaDataManager, pieceMD5Manager)
return &Manager{
cfg: cfg,
cacheStore: cacheStore,
limiter: rateLimiter,
cdnLocker: util.NewLockerPool(),
progressManager: progressManager,
metaDataManager: metaDataManager,
pieceMD5Manager: pieceMD5Manager,
cdnReporter: cdnReporter,
detector: newCacheDetector(cacheStore, metaDataManager, originClient),
originClient: originClient,
writer: newSuperWriter(cacheStore, cdnReporter),
metrics: newMetrics(register),
}, nil
}
// TriggerCDN will trigger CDN to download the file from sourceUrl.
func (cm *Manager) TriggerCDN(ctx context.Context, task *types.TaskInfo) (*types.TaskInfo, error) {
httpFileLength := task.HTTPFileLength
if httpFileLength == 0 {
httpFileLength = -1
}
cm.cdnLocker.GetLock(task.ID, false)
defer cm.cdnLocker.ReleaseLock(task.ID, false)
// detect Cache
startPieceNum, metaData, err := cm.detector.detectCache(ctx, task)
if err != nil {
logrus.Errorf("failed to detect cache for task %s: %v", task.ID, err)
}
fileMD5, updateTaskInfo, err := cm.cdnReporter.reportCache(ctx, task.ID, metaData, startPieceNum)
if err != nil {
logrus.Errorf("failed to report cache for taskId: %s : %v", task.ID, err)
}
if startPieceNum == -1 {
logrus.Infof("cache full hit for taskId:%s on local", task.ID)
cm.metrics.cdnCacheHitCount.WithLabelValues().Inc()
return updateTaskInfo, nil
}
if fileMD5 == nil {
fileMD5 = md5.New()
}
// get piece content size which not including the piece header and trailer
pieceContSize := task.PieceSize - config.PieceWrapSize
// start to download the source file
resp, err := cm.download(ctx, task.ID, task.RawURL, task.Headers, startPieceNum, httpFileLength, pieceContSize)
cm.metrics.cdnDownloadCount.WithLabelValues().Inc()
if err != nil {
cm.metrics.cdnDownloadFailCount.WithLabelValues().Inc()
return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFAILED), err
}
defer resp.Body.Close()
cm.updateLastModifiedAndETag(ctx, task.ID, resp.Header.Get("Last-Modified"), resp.Header.Get("Etag"))
reader := limitreader.NewLimitReaderWithLimiterAndMD5Sum(resp.Body, cm.limiter, fileMD5)
downloadMetadata, err := cm.writer.startWriter(ctx, cm.cfg, reader, task, startPieceNum, httpFileLength, pieceContSize)
if err != nil {
logrus.Errorf("failed to write for task %s: %v", task.ID, err)
return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFAILED), err
}
cm.metrics.cdnDownloadBytes.WithLabelValues().Add(float64(downloadMetadata.realHTTPFileLength))
realMD5 := reader.Md5()
success, err := cm.handleCDNResult(ctx, task, realMD5, httpFileLength, downloadMetadata.realHTTPFileLength, downloadMetadata.realFileLength)
if err != nil || !success {
return getUpdateTaskInfoWithStatusOnly(types.TaskInfoCdnStatusFAILED), err
}
return getUpdateTaskInfo(types.TaskInfoCdnStatusSUCCESS, realMD5, downloadMetadata.realFileLength), nil
}
// GetHTTPPath returns the http download path of taskID.
// The returned path joined the DownloadRaw.Bucket and DownloadRaw.Key.
func (cm *Manager) GetHTTPPath(ctx context.Context, taskInfo *types.TaskInfo) (string, error) {
raw := getDownloadRawFunc(taskInfo.ID)
return path.Join("/", raw.Bucket, raw.Key), nil
}
// GetStatus gets the status of the file.
func (cm *Manager) GetStatus(ctx context.Context, taskID string) (cdnStatus string, err error) {
return types.TaskInfoCdnStatusSUCCESS, nil
}
// GetPieceMD5 gets the piece Md5 accorrding to the specified taskID and pieceNum.
func (cm *Manager) GetPieceMD5(ctx context.Context, taskID string, pieceNum int, pieceRange, source string) (pieceMd5 string, err error) {
if stringutils.IsEmptyStr(source) ||
source == PieceMd5SourceDefault ||
source == PieceMd5SourceMemory {
return cm.pieceMD5Manager.getPieceMD5(taskID, pieceNum)
}
if source == PieceMd5SourceMeta {
// get file meta data
fileMeta, err := cm.metaDataManager.readFileMetaData(ctx, taskID)
if err != nil {
return "", errors.Wrapf(err, "failed to get file meta data taskID(%s)", taskID)
}
// get piece md5s from meta data file
pieceMD5s, err := cm.metaDataManager.readPieceMD5s(ctx, taskID, fileMeta.RealMd5)
if err != nil {
return "", errors.Wrapf(err, "failed to get piece MD5s from meta data taskID(%s)", taskID)
}
if len(pieceMD5s) < pieceNum {
return "", fmt.Errorf("not enough piece MD5 for pieceNum(%d)", pieceNum)
}
return pieceMD5s[pieceNum], nil
}
if source == PieceMd5SourceFile {
// get piece length
start, end, err := rangeutils.ParsePieceIndex(pieceRange)
if err != nil {
return "", errors.Wrapf(err, "failed to parse piece range(%s)", pieceRange)
}
pieceLength := end - start + 1
// get piece content reader
pieceRaw := getDownloadRawFunc(taskID)
pieceRaw.Offset = start
pieceRaw.Length = pieceLength
reader, err := cm.cacheStore.Get(ctx, pieceRaw)
if err != nil {
return "", errors.Wrapf(err, "failed to get file reader taskID(%s)", taskID)
}
// get piece Md5 by read source file
return getMD5ByReadFile(reader, int32(pieceLength))
}
return "", nil
}
// CheckFile checks the file whether exists.
func (cm *Manager) CheckFile(ctx context.Context, taskID string) bool {
if _, err := cm.cacheStore.Stat(ctx, getDownloadRaw(taskID)); err != nil {
return false
}
return true
}
// Delete the cdn meta with specified taskID.
// It will also delete the files on the disk when the force equals true.
func (cm *Manager) Delete(ctx context.Context, taskID string, force bool) error {
if !force {
return cm.pieceMD5Manager.removePieceMD5sByTaskID(taskID)
}
return deleteTaskFiles(ctx, cm.cacheStore, taskID)
}
func (cm *Manager) handleCDNResult(ctx context.Context, task *types.TaskInfo, realMd5 string, httpFileLength, realHTTPFileLength, realFileLength int64) (bool, error) {
var isSuccess = true
if !stringutils.IsEmptyStr(task.Md5) && task.Md5 != realMd5 {
logrus.Errorf("taskId:%s url:%s file md5 not match expected:%s real:%s", task.ID, task.TaskURL, task.Md5, realMd5)
isSuccess = false
}
if isSuccess && httpFileLength >= 0 && httpFileLength != realHTTPFileLength {
logrus.Errorf("taskId:%s url:%s file length not match expected:%d real:%d", task.ID, task.TaskURL, httpFileLength, realHTTPFileLength)
isSuccess = false
}
if !isSuccess {
realFileLength = 0
}
if err := cm.metaDataManager.updateStatusAndResult(ctx, task.ID, &fileMetaData{
Finish: true,
Success: isSuccess,
RealMd5: realMd5,
FileLength: realFileLength,
}); err != nil {
return false, err
}
if !isSuccess {
return false, nil
}
logrus.Infof("success to get taskID: %s fileLength: %d realMd5: %s", task.ID, realFileLength, realMd5)
pieceMD5s, err := cm.pieceMD5Manager.getPieceMD5sByTaskID(task.ID)
if err != nil {
return false, err
}
if err := cm.metaDataManager.writePieceMD5s(ctx, task.ID, realMd5, pieceMD5s); err != nil {
return false, err
}
return true, nil
}
func (cm *Manager) updateLastModifiedAndETag(ctx context.Context, taskID, lastModified, eTag string) {
lastModifiedInt, _ := netutils.ConvertTimeStringToInt(lastModified)
if err := cm.metaDataManager.updateLastModifiedAndETag(ctx, taskID, lastModifiedInt, eTag); err != nil {
logrus.Errorf("failed to update LastModified(%s) and ETag(%s) for taskID %s: %v", lastModified, eTag, taskID, err)
}
logrus.Infof("success to update LastModified(%s) and ETag(%s) for taskID: %s", lastModified, eTag, taskID)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"path"
"github.com/sirupsen/logrus"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/store"
)
var getDownloadRawFunc = getDownloadRaw
var getMetaDataRawFunc = getMetaDataRaw
var getMd5DataRawFunc = getMd5DataRaw
var getHomeRawFunc = getHomeRaw
func getDownloadKey(taskID string) string {
return path.Join(getParentKey(taskID), taskID)
}
func getMetaDataKey(taskID string) string {
return path.Join(getParentKey(taskID), taskID+".meta")
}
func getMd5DataKey(taskID string) string {
return path.Join(getParentKey(taskID), taskID+".md5")
}
func getParentKey(taskID string) string {
return stringutils.SubString(taskID, 0, 3)
}
func getDownloadRaw(taskID string) *store.Raw {
return &store.Raw{
Bucket: config.DownloadHome,
Key: getDownloadKey(taskID),
}
}
func getMetaDataRaw(taskID string) *store.Raw {
return &store.Raw{
Bucket: config.DownloadHome,
Key: getMetaDataKey(taskID),
Trunc: true,
}
}
func getMd5DataRaw(taskID string) *store.Raw {
return &store.Raw{
Bucket: config.DownloadHome,
Key: getMd5DataKey(taskID),
Trunc: true,
}
}
func getParentRaw(taskID string) *store.Raw {
return &store.Raw{
Bucket: config.DownloadHome,
Key: getParentKey(taskID),
}
}
func getHomeRaw() *store.Raw {
return &store.Raw{
Bucket: config.DownloadHome,
}
}
func deleteTaskFiles(ctx context.Context, cacheStore *store.Store, taskID string) error {
if err := cacheStore.Remove(ctx, getMetaDataRaw(taskID)); err != nil &&
!store.IsKeyNotFound(err) {
return err
}
if err := cacheStore.Remove(ctx, getMd5DataRaw(taskID)); err != nil &&
!store.IsKeyNotFound(err) {
return err
}
if err := cacheStore.Remove(ctx, getDownloadRaw(taskID)); err != nil &&
!store.IsKeyNotFound(err) {
return err
}
// try to clean the parent bucket
if err := cacheStore.Remove(ctx, getParentRaw(taskID)); err != nil &&
!store.IsKeyNotFound(err) {
logrus.Warnf("taskID:%s failed remove parent bucket:%v", taskID, err)
}
return nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"sort"
"strconv"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/syncmap"
)
type pieceMD5Mgr struct {
taskPieceMD5s *syncmap.SyncMap
}
func newpieceMD5Mgr() *pieceMD5Mgr {
return &pieceMD5Mgr{
taskPieceMD5s: syncmap.NewSyncMap(),
}
}
// getPieceMD5 returns the md5 of pieceRange for taskID.
func (pmm *pieceMD5Mgr) getPieceMD5(taskID string, pieceNum int) (pieceMD5 string, err error) {
pieceMD5s, err := pmm.taskPieceMD5s.GetAsMap(taskID)
if err != nil {
return "", err
}
return pieceMD5s.GetAsString(strconv.Itoa(pieceNum))
}
// setPieceMD5 sets the md5 for pieceRange of taskID.
func (pmm *pieceMD5Mgr) setPieceMD5(taskID string, pieceNum int, pieceMD5 string) (err error) {
pieceMD5s, err := pmm.taskPieceMD5s.GetAsMap(taskID)
if err != nil && !errortypes.IsDataNotFound(err) {
return err
}
if pieceMD5s == nil {
pieceMD5s = syncmap.NewSyncMap()
pmm.taskPieceMD5s.Add(taskID, pieceMD5s)
}
return pieceMD5s.Add(strconv.Itoa(pieceNum), pieceMD5)
}
// getPieceMD5sByTaskID returns all pieceMD5s as a string slice.
func (pmm *pieceMD5Mgr) getPieceMD5sByTaskID(taskID string) (pieceMD5s []string, err error) {
pieceMD5sMap, err := pmm.taskPieceMD5s.GetAsMap(taskID)
if err != nil {
return nil, err
}
pieceNums := pieceMD5sMap.ListKeyAsIntSlice()
sort.Ints(pieceNums)
for i := 0; i < len(pieceNums); i++ {
pieceMD5, err := pieceMD5sMap.GetAsString(strconv.Itoa(pieceNums[i]))
if err != nil {
return nil, err
}
pieceMD5s = append(pieceMD5s, pieceMD5)
}
return pieceMD5s, nil
}
func (pmm *pieceMD5Mgr) removePieceMD5sByTaskID(taskID string) error {
return pmm.taskPieceMD5s.Remove(taskID)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"hash"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/daemon/mgr"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/sirupsen/logrus"
)
type reporter struct {
cfg *config.Config
cacheStore *store.Store
progressManager mgr.ProgressMgr
metaDataManager *fileMetaDataManager
pieceMD5Manager *pieceMD5Mgr
}
func newReporter(cfg *config.Config, cacheStore *store.Store, progressManager mgr.ProgressMgr,
metaDataManager *fileMetaDataManager, pieceMD5Manager *pieceMD5Mgr) *reporter {
return &reporter{
cfg: cfg,
cacheStore: cacheStore,
progressManager: progressManager,
metaDataManager: metaDataManager,
pieceMD5Manager: pieceMD5Manager,
}
}
func (re *reporter) reportCache(ctx context.Context, taskID string, metaData *fileMetaData,
breakNum int) (hash.Hash, *types.TaskInfo, error) {
// cache not hit
if breakNum == 0 {
return nil, nil, nil
}
success, updateTaskInfo, err := re.processCacheByQuick(ctx, taskID, metaData, breakNum)
if err == nil && success {
// it is possible to succeed only if breakNum equals -1
return nil, updateTaskInfo, nil
}
logrus.Errorf("failed to process cache by quick taskID(%s): %v", taskID, err)
// If we can't get the information quickly from fileMetaData,
// and then we have to get that by reading the file.
return re.processCacheByReadFile(ctx, taskID, metaData, breakNum)
}
func (re *reporter) processCacheByQuick(ctx context.Context, taskID string, metaData *fileMetaData, breakNum int) (bool, *types.TaskInfo, error) {
if breakNum != -1 {
logrus.Debugf("failed to processCacheByQuick: breakNum not equals -1 for taskID %s", taskID)
return false, nil, nil
}
// validate the file md5
if stringutils.IsEmptyStr(metaData.RealMd5) {
logrus.Debugf("failed to processCacheByQuick: empty RealMd5 for taskID %s", taskID)
return false, nil, nil
}
// validate the piece md5s
var pieceMd5s []string
var err error
if pieceMd5s, err = re.pieceMD5Manager.getPieceMD5sByTaskID(taskID); err != nil {
logrus.Debugf("failed to processCacheByQuick: failed to get pieceMd5s taskID %s: %v", taskID, err)
return false, nil, err
}
if len(pieceMd5s) == 0 {
if pieceMd5s, err = re.metaDataManager.readPieceMD5s(ctx, taskID, metaData.RealMd5); err != nil {
logrus.Debugf("failed to processCacheByQuick: failed to read pieceMd5s taskID %s: %v", taskID, err)
return false, nil, err
}
}
if len(pieceMd5s) == 0 {
logrus.Debugf("failed to processCacheByQuick: empty pieceMd5s taskID %s: %v", taskID, err)
return false, nil, nil
}
return true, getUpdateTaskInfo(types.TaskInfoCdnStatusSUCCESS, metaData.Md5, metaData.FileLength),
re.reportPiecesStatus(ctx, taskID, pieceMd5s)
}
func (re *reporter) processCacheByReadFile(ctx context.Context, taskID string, metaData *fileMetaData, breakNum int) (hash.Hash, *types.TaskInfo, error) {
var calculateFileMd5 = true
if breakNum == -1 && !stringutils.IsEmptyStr(metaData.RealMd5) {
calculateFileMd5 = false
}
cacheReader := newSuperReader()
reader, err := re.cacheStore.Get(ctx, getDownloadRawFunc(taskID))
if err != nil {
logrus.Errorf("failed to read key file taskID(%s): %v", taskID, err)
return nil, nil, err
}
result, err := cacheReader.readFile(ctx, reader, true, calculateFileMd5)
if err != nil {
logrus.Errorf("failed to read cache file taskID(%s): %v", taskID, err)
return nil, nil, err
}
logrus.Infof("success to get cache result: %+v by read file", result)
if err := re.reportPiecesStatus(ctx, taskID, result.pieceMd5s); err != nil {
return nil, nil, err
}
if breakNum != -1 {
return result.fileMd5, nil, nil
}
fileMd5Value := metaData.RealMd5
if stringutils.IsEmptyStr(fileMd5Value) {
fileMd5Value = fileutils.GetMd5Sum(result.fileMd5, nil)
}
fmd := &fileMetaData{
Finish: true,
Success: true,
RealMd5: fileMd5Value,
FileLength: result.fileLength,
}
if err := re.metaDataManager.updateStatusAndResult(ctx, taskID, fmd); err != nil {
logrus.Errorf("failed to update status and result fileMetaData(%+v) for taskID(%s): %v", fmd, taskID, err)
return nil, nil, err
}
logrus.Infof("success to update status and result fileMetaData(%+v) for taskID(%s)", fmd, taskID)
return nil, getUpdateTaskInfo(types.TaskInfoCdnStatusSUCCESS, fileMd5Value, result.fileLength),
re.metaDataManager.writePieceMD5s(ctx, taskID, fileMd5Value, result.pieceMd5s)
}
func (re *reporter) reportPiecesStatus(ctx context.Context, taskID string, pieceMd5s []string) error {
// report pieces status
for pieceNum := 0; pieceNum < len(pieceMd5s); pieceNum++ {
if err := re.reportPieceStatus(ctx, taskID, pieceNum, pieceMd5s[pieceNum], config.PieceSUCCESS); err != nil {
return err
}
}
return nil
}
func (re *reporter) reportPieceStatus(ctx context.Context, taskID string, pieceNum int, md5 string, pieceStatus int) (err error) {
defer func() {
if err == nil {
logrus.Debugf("success to report piece status with taskID(%s) pieceNum(%d)", taskID, pieceNum)
}
}()
if pieceStatus == config.PieceSUCCESS {
if err := re.pieceMD5Manager.setPieceMD5(taskID, pieceNum, md5); err != nil {
return err
}
}
return re.progressManager.UpdateProgress(ctx, taskID, re.cfg.GetSuperCID(taskID), re.cfg.GetSuperPID(), "", pieceNum, pieceStatus)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"context"
"crypto/md5"
"encoding/binary"
"fmt"
"hash"
"io"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/util"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type cdnCacheResult struct {
pieceCount int
fileLength int64
pieceMd5s []string
fileMd5 hash.Hash
}
type superReader struct{}
func newSuperReader() *superReader {
return &superReader{}
}
func (sr *superReader) readFile(ctx context.Context, reader io.Reader, calculatePieceMd5, calculateFileMd5 bool) (result *cdnCacheResult, err error) {
result = &cdnCacheResult{}
var pieceMd5 hash.Hash
if calculatePieceMd5 {
pieceMd5 = md5.New()
}
if calculateFileMd5 {
result.fileMd5 = md5.New()
}
for {
// read header and get piece content length
ret, err := readHeader(reader, pieceMd5)
if err != nil {
if err == io.EOF {
return result, nil
}
return result, errors.Wrapf(err, "failed to read header for count %d", result.pieceCount+1)
}
result.fileLength += config.PieceHeadSize
pieceLen := getContentLengthByHeader(ret)
logrus.Debugf("get piece length: %d with count: %d from header", pieceLen, result.pieceCount)
// read content
if err := readContent(reader, pieceLen, pieceMd5, result.fileMd5); err != nil {
logrus.Errorf("failed to read content for count %d: %v", result.pieceCount, err)
return result, err
}
result.fileLength += int64(pieceLen)
// read tailer
if err := readTailer(reader, pieceMd5); err != nil {
return result, errors.Wrapf(err, "failed to read tailer for count %d", result.pieceCount)
}
result.fileLength++
result.pieceCount++
if calculatePieceMd5 {
pieceSum := fileutils.GetMd5Sum(pieceMd5, nil)
pieceLength := pieceLen + config.PieceWrapSize
result.pieceMd5s = append(result.pieceMd5s, getPieceMd5Value(pieceSum, pieceLength))
pieceMd5.Reset()
}
}
}
func readHeader(reader io.Reader, pieceMd5 hash.Hash) (uint32, error) {
header := make([]byte, 4)
n, err := reader.Read(header)
if err != nil {
return 0, err
}
if n != config.PieceHeadSize {
return 0, fmt.Errorf("unexpected head size: %d", n)
}
if pieceMd5 != nil {
pieceMd5.Write(header)
}
return binary.BigEndian.Uint32(header), nil
}
func readContent(reader io.Reader, pieceLen int32, pieceMd5 hash.Hash, fileMd5 hash.Hash) error {
bufSize := int32(256 * 1024)
if pieceLen < bufSize {
bufSize = pieceLen
}
pieceContent := make([]byte, bufSize)
var curContent int32
for {
if curContent+bufSize <= pieceLen {
if err := binary.Read(reader, binary.BigEndian, pieceContent); err != nil {
return err
}
curContent += bufSize
// calculate the md5
if !util.IsNil(pieceMd5) {
pieceMd5.Write(pieceContent)
}
if !util.IsNil(fileMd5) {
fileMd5.Write(pieceContent)
}
} else {
readLen := pieceLen - curContent
if err := binary.Read(reader, binary.BigEndian, pieceContent[:readLen]); err != nil {
return err
}
curContent += readLen
// calculate the md5
if !util.IsNil(pieceMd5) {
pieceMd5.Write(pieceContent[:readLen])
}
if !util.IsNil(fileMd5) {
fileMd5.Write(pieceContent[:readLen])
}
}
if curContent >= pieceLen {
break
}
}
return nil
}
func readTailer(reader io.Reader, pieceMd5 hash.Hash) error {
tailer := make([]byte, 1)
if err := binary.Read(reader, binary.BigEndian, tailer); err != nil {
return err
}
if tailer[0] != config.PieceTailChar {
return fmt.Errorf("unexpected tailer: %v", tailer[0])
}
if !util.IsNil(pieceMd5) {
pieceMd5.Write(tailer)
}
return nil
}
func getMD5ByReadFile(reader io.Reader, pieceLen int32) (string, error) {
if pieceLen <= 0 {
return fileutils.GetMd5Sum(md5.New(), nil), nil
}
pieceMd5 := md5.New()
if err := readContent(reader, pieceLen, pieceMd5, nil); err != nil {
return "", err
}
return fileutils.GetMd5Sum(pieceMd5, nil), nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"bytes"
"context"
"io"
"sync"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/sirupsen/logrus"
)
type protocolContent struct {
taskID string
pieceNum int
pieceSize int32
pieceContentSize int32
pieceContent *bytes.Buffer
}
type downloadMetadata struct {
realFileLength int64
realHTTPFileLength int64
pieceCount int
}
type superWriter struct {
cdnStore *store.Store
cdnReporter *reporter
}
func newSuperWriter(cdnStore *store.Store, cdnReporter *reporter) *superWriter {
return &superWriter{
cdnStore: cdnStore,
cdnReporter: cdnReporter,
}
}
// startWriter writes the stream data from the reader to the underlying storage.
func (cw *superWriter) startWriter(ctx context.Context, cfg *config.Config, reader io.Reader,
task *types.TaskInfo, startPieceNum int, httpFileLength int64, pieceContSize int32) (*downloadMetadata, error) {
// realFileLength is used to calculate the file Length dynamically
realFileLength := int64(startPieceNum) * int64(task.PieceSize)
// realHTTPFileLength is used to calculate the http file Length dynamically
realHTTPFileLength := int64(startPieceNum) * int64(pieceContSize)
// the left size of data for a complete piece
pieceContLeft := pieceContSize
// the pieceNum currently processed
curPieceNum := startPieceNum
buf := make([]byte, pieceContSize)
var bb = &bytes.Buffer{}
// start writer pool
routineCount := calculateRoutineCount(httpFileLength, task.PieceSize)
var wg = &sync.WaitGroup{}
jobCh := make(chan *protocolContent)
cw.writerPool(ctx, wg, routineCount, jobCh)
for {
n, e := reader.Read(buf)
if n > 0 {
logrus.Debugf("success to read content with length: %d", n)
realFileLength += int64(n)
realHTTPFileLength += int64(n)
if int(pieceContLeft) <= n {
bb.Write(buf[:pieceContLeft])
pc := &protocolContent{
taskID: task.ID,
pieceNum: curPieceNum,
pieceSize: task.PieceSize,
pieceContentSize: pieceContSize,
pieceContent: bb,
}
jobCh <- pc
logrus.Debugf("send the protocolContent taskID: %s pieceNum: %d", task.ID, curPieceNum)
realFileLength += config.PieceWrapSize
curPieceNum++
// write the data left to a new buffer
// TODO: recycling bytes.Buffer
bb = bytes.NewBuffer([]byte{})
n -= int(pieceContLeft)
if n > 0 {
bb.Write(buf[pieceContLeft : int(pieceContLeft)+n])
}
pieceContLeft = pieceContSize
} else {
bb.Write(buf[:n])
}
pieceContLeft -= int32(n)
}
if e == io.EOF {
if realFileLength == 0 || bb.Len() > 0 {
jobCh <- &protocolContent{
taskID: task.ID,
pieceNum: curPieceNum,
pieceSize: task.PieceSize,
pieceContentSize: int32(bb.Len()),
pieceContent: bb,
}
logrus.Debugf("send the protocolContent taskID: %s pieceNum: %d", task.ID, curPieceNum)
realFileLength += config.PieceWrapSize
}
logrus.Infof("send all protocolContents with realFileLength(%d) and wait for superwriter", realFileLength)
break
}
if e != nil {
close(jobCh)
return nil, e
}
}
close(jobCh)
wg.Wait()
return &downloadMetadata{
realFileLength: realFileLength,
realHTTPFileLength: realHTTPFileLength,
pieceCount: curPieceNum,
}, nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cdn
import (
"bytes"
"context"
"crypto/md5"
"encoding/binary"
"hash"
"sync"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/sirupsen/logrus"
)
func calculateRoutineCount(httpFileLength int64, pieceSize int32) int {
routineSize := config.CDNWriterRoutineLimit
if httpFileLength < 0 || pieceSize <= 0 {
return routineSize
}
if httpFileLength == 0 {
return 1
}
pieceContSize := pieceSize - config.PieceWrapSize
tmpSize := (int)((httpFileLength + int64(pieceContSize-1)) / int64(pieceContSize))
if tmpSize == 0 {
tmpSize = 1
}
if tmpSize < routineSize {
routineSize = tmpSize
}
return routineSize
}
func (cw *superWriter) writerPool(ctx context.Context, wg *sync.WaitGroup, n int, jobCh chan *protocolContent) {
for i := 0; i < n; i++ {
wg.Add(1)
go func(i int) {
for job := range jobCh {
var pieceMd5 = md5.New()
if err := cw.writeToFile(ctx, job.pieceContent, job.taskID, job.pieceNum, job.pieceContentSize, job.pieceSize, pieceMd5); err != nil {
logrus.Errorf("failed to write taskID %s pieceNum %d file: %v", job.taskID, job.pieceNum, err)
// NOTE: should we redo the job?
continue
}
// report piece status
pieceSum := fileutils.GetMd5Sum(pieceMd5, nil)
pieceMd5Value := getPieceMd5Value(pieceSum, job.pieceContentSize+config.PieceWrapSize)
if cw.cdnReporter != nil {
if err := cw.cdnReporter.reportPieceStatus(ctx, job.taskID, job.pieceNum, pieceMd5Value, config.PieceSUCCESS); err != nil {
// NOTE: should we do this job again?
logrus.Errorf("failed to report piece status taskID %s pieceNum %d pieceMD5 %s: %v", job.taskID, job.pieceNum, pieceMd5Value, err)
continue
}
}
}
wg.Done()
}(i)
}
}
// writeToFile wraps the piece content with piece header and tailer,
// and then writes to the storage.
func (cw *superWriter) writeToFile(ctx context.Context, bytesBuffer *bytes.Buffer, taskID string, pieceNum int, pieceContSize, pieceSize int32, pieceMd5 hash.Hash) error {
var resultBuf = &bytes.Buffer{}
// write piece header
var header = make([]byte, 4)
binary.BigEndian.PutUint32(header, getPieceHeader(pieceContSize, pieceSize))
resultBuf.Write(header)
// write piece content
var pieceContent []byte
if pieceContSize > 0 {
pieceContent = make([]byte, pieceContSize)
if _, err := bytesBuffer.Read(pieceContent); err != nil {
return err
}
bytesBuffer.Reset()
binary.Write(resultBuf, binary.BigEndian, pieceContent)
}
// write piece tailer
tailer := []byte{config.PieceTailChar}
binary.Write(resultBuf, binary.BigEndian, tailer)
if pieceMd5 != nil {
pieceMd5.Write(header)
if len(pieceContent) > 0 {
pieceMd5.Write(pieceContent)
}
pieceMd5.Write(tailer)
}
// write to the storage
return cw.cdnStore.Put(ctx, &store.Raw{
Bucket: config.DownloadHome,
Key: getDownloadKey(taskID),
Offset: int64(pieceNum) * int64(pieceSize),
Length: int64(pieceContSize) + config.PieceWrapSize,
}, resultBuf)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mgr
import (
"context"
"fmt"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/httpclient"
"github.com/dragonflyoss/Dragonfly/supernode/store"
"github.com/prometheus/client_golang/prometheus"
)
type CDNBuilder func(cfg *config.Config, cacheStore *store.Store, progressManager ProgressMgr,
originClient httpclient.OriginHTTPClient, register prometheus.Registerer) (CDNMgr, error)
var cdnBuilderMap = make(map[config.CDNPattern]CDNBuilder)
func Register(name config.CDNPattern, builder CDNBuilder) {
cdnBuilderMap[name] = builder
}
func GetCDNManager(cfg *config.Config, cacheStore *store.Store, progressManager ProgressMgr,
originClient httpclient.OriginHTTPClient, register prometheus.Registerer) (CDNMgr, error) {
name := cfg.CDNPattern
if name == "" {
name = config.CDNPatternLocal
}
cdnBuilder, ok := cdnBuilderMap[name]
if !ok {
return nil, fmt.Errorf("unexpected cdn pattern(%s) which must be in [\"local\", \"source\"]", name)
}
return cdnBuilder(cfg, cacheStore, progressManager, originClient, register)
}
// CDNMgr as an interface defines all operations against CDN and
// operates on the underlying files stored on the local disk, etc.
type CDNMgr interface {
// TriggerCDN will trigger CDN to download the file from sourceUrl.
// It includes the following steps:
// 1). download the source file
// 2). write the file to disk
//
// In fact, it's a very time consuming operation.
// So if not necessary, it should usually be executed concurrently.
// In addition, it's not thread-safe.
TriggerCDN(ctx context.Context, taskInfo *types.TaskInfo) (*types.TaskInfo, error)
// GetHTTPPath returns the http download path of taskID.
GetHTTPPath(ctx context.Context, taskInfo *types.TaskInfo) (path string, err error)
// GetStatus gets the status of the file.
GetStatus(ctx context.Context, taskID string) (cdnStatus string, err error)
// GetGCTaskIDs returns the taskIDs that should exec GC operations as a string slice.
//
// It should return nil when the free disk of cdn storage is lager than config.YoungGCThreshold.
// It should return all taskIDs that are not running when the free disk of cdn storage is less than config.FullGCThreshold.
GetGCTaskIDs(ctx context.Context, taskMgr TaskMgr) ([]string, error)
// GetPieceMD5 gets the piece Md5 accorrding to the specified taskID and pieceNum.
GetPieceMD5(ctx context.Context, taskID string, pieceNum int, pieceRange, source string) (pieceMd5 string, err error)
// CheckFile checks the file whether exists.
CheckFile(ctx context.Context, taskID string) bool
// Delete the cdn meta with specified taskID.
// The file on the disk will be deleted when the force is true.
Delete(ctx context.Context, taskID string, force bool) error
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
import (
"net/http"
"sort"
"strconv"
"strings"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/pkg/errors"
)
const (
// PAGENUM identifies the page number of the data.
// The default value is 0.
PAGENUM = "pageNum"
// PAGESIZE identifies the page size of the data.
// If this value equals 0, return all values.
PAGESIZE = "pageSize"
// SORTKEY identifies the sort key of the data.
// Each mgr needs to define acceptable values based on its own implementation.
SORTKEY = "sortKey"
// SORTDIRECT identifies the sort direct of the data.
// The value can only be ASC or DESC.
SORTDIRECT = "sortDirect"
// ASCDIRECT means to sort the records in ascending order.
ASCDIRECT = "ASC"
// DESCDIRECT means to sort the records in descending order.
DESCDIRECT = "DESC"
)
var sortDirectMap = map[string]bool{
ASCDIRECT: true,
DESCDIRECT: true,
}
// PageFilter is a struct.
type PageFilter struct {
PageNum int
PageSize int
SortKey []string
SortDirect string
}
// ParseFilter gets filter params from request and returns a map[string][]string.
func ParseFilter(req *http.Request, sortKeyMap map[string]bool) (pageFilter *PageFilter, err error) {
v := req.URL.Query()
pageFilter = &PageFilter{}
// pageNum
pageNum, err := stoi(v.Get(PAGENUM))
if err != nil {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "pageNum %d is not a number: %v", pageNum, err)
}
pageFilter.PageNum = pageNum
// pageSize
pageSize, err := stoi(v.Get(PAGESIZE))
if err != nil {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "pageSize %d is not a number: %v", pageSize, err)
}
pageFilter.PageSize = pageSize
// sortDirect
sortDirect := v.Get(SORTDIRECT)
if sortDirect == "" {
sortDirect = ASCDIRECT
}
pageFilter.SortDirect = sortDirect
// sortKey
if sortKey, ok := v[SORTKEY]; ok {
pageFilter.SortKey = sortKey
}
err = ValidateFilter(pageFilter, sortKeyMap)
if err != nil {
return nil, err
}
return
}
func stoi(str string) (int, error) {
if stringutils.IsEmptyStr(str) {
return 0, nil
}
result, err := strconv.Atoi(str)
if err != nil || result < 0 {
return -1, err
}
return result, nil
}
// ValidateFilter validates the param of filter.
// The caller should customize the sortKeyMap which specifies the sort keys it supports.
func ValidateFilter(pageFilter *PageFilter, sortKeyMap map[string]bool) error {
// pageNum
if pageFilter.PageNum < 0 {
return errors.Wrapf(errortypes.ErrInvalidValue, "pageNum %d is not a natural number", pageFilter.PageNum)
}
// pageSize
if pageFilter.PageSize < 0 {
return errors.Wrapf(errortypes.ErrInvalidValue, "pageSize %d is not a natural number", pageFilter.PageSize)
}
// sortDirect
if _, ok := sortDirectMap[strings.ToUpper(pageFilter.SortDirect)]; !ok {
return errors.Wrapf(errortypes.ErrInvalidValue, "unexpected sortDirect %s", pageFilter.SortDirect)
}
// sortKey
if len(pageFilter.SortKey) == 0 || sortKeyMap == nil {
return nil
}
for _, value := range pageFilter.SortKey {
if v, ok := sortKeyMap[value]; !ok || !v {
return errors.Wrapf(errortypes.ErrInvalidValue, "unexpected sortKey %s", value)
}
}
return nil
}
// GetPageValues gets some pages of metaSlice after ordering it.
// The less is a function that reports whether the element with
// index i should sort before the element with index j.
//
// Eg:
// people := []struct {
// Name string
// Age int
// }{
// {"Gopher", 7},
// {"Alice", 55},
// {"Vera", 24},
// {"Bob", 75},
// }
//
// If you want to sort it by age, and the less function should be defined as follows:
//
// less := func(i, j int) bool { return people[i].Age < people[j].Age }
func GetPageValues(metaSlice []interface{}, pageNum, pageSize int,
less func(i, j int) bool) []interface{} {
if metaSlice == nil {
return nil
}
if less == nil {
return metaSlice
}
// sort the data slice
sort.Slice(metaSlice, less)
if pageSize == 0 {
return metaSlice
}
sliceLength := len(metaSlice)
start := pageNum * pageSize
end := (pageNum + 1) * pageSize
if sliceLength < start {
return nil
}
if sliceLength < end {
return metaSlice[start:sliceLength]
}
return metaSlice[start:end]
}
// IsDESC returns whether the sortDirect is desc.
func IsDESC(str string) bool {
return strings.ToUpper(str) == DESCDIRECT
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
import (
"github.com/dragonflyoss/Dragonfly/pkg/syncmap"
)
// Store maintains some metadata information in memory.
type Store struct {
*syncmap.SyncMap
}
// NewStore returns a new Store.
func NewStore() *Store {
return &Store{syncmap.NewSyncMap()}
}
// Put a key-value pair into the store.
func (s *Store) Put(key string, value interface{}) error {
return s.Add(key, value)
}
// Delete a key-value pair from the store with specified key.
func (s *Store) Delete(key string) error {
return s.Remove(key)
}
// List returns all key-value pairs in the store.
// And the order of results is random.
func (s *Store) List() []interface{} {
metaSlice := make([]interface{}, 0)
rangeFunc := func(key, value interface{}) bool {
metaSlice = append(metaSlice, value)
return true
}
s.Range(rangeFunc)
return metaSlice
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package httpclient
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
netUrl "net/url"
"sync"
"time"
"github.com/dragonflyoss/Dragonfly/pkg/errortypes"
"github.com/dragonflyoss/Dragonfly/pkg/httputils"
"github.com/dragonflyoss/Dragonfly/pkg/netutils"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
strfmt "github.com/go-openapi/strfmt"
"github.com/pkg/errors"
)
type StatusCodeChecker func(int) bool
// OriginHTTPClient supply apis that interact with the source.
type OriginHTTPClient interface {
RegisterTLSConfig(rawURL string, insecure bool, caBlock []strfmt.Base64)
GetContentLength(url string, headers map[string]string) (int64, int, error)
IsSupportRange(url string, headers map[string]string) (bool, error)
IsExpired(url string, headers map[string]string, lastModified int64, eTag string) (bool, error)
Download(url string, headers map[string]string, checkCode StatusCodeChecker) (*http.Response, error)
}
// OriginClient is an implementation of the interface of OriginHTTPClient.
type OriginClient struct {
clientMap *sync.Map
defaultHTTPClient *http.Client
}
// NewOriginClient returns a new OriginClient.
func NewOriginClient() OriginHTTPClient {
defaultTransport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 3 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
httputils.RegisterProtocolOnTransport(defaultTransport)
return &OriginClient{
clientMap: &sync.Map{},
defaultHTTPClient: &http.Client{
Transport: defaultTransport,
},
}
}
// RegisterTLSConfig saves tls config into map as http client.
// tlsMap:
// key->host value->*http.Client
func (client *OriginClient) RegisterTLSConfig(rawURL string, insecure bool, caBlock []strfmt.Base64) {
url, err := netUrl.Parse(rawURL)
if err != nil {
return
}
tlsConfig := &tls.Config{
InsecureSkipVerify: insecure,
}
appendSuccess := false
roots := x509.NewCertPool()
for _, caBytes := range caBlock {
appendSuccess = appendSuccess || roots.AppendCertsFromPEM(caBytes)
}
if appendSuccess {
tlsConfig.RootCAs = roots
}
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 3 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: tlsConfig,
}
httputils.RegisterProtocolOnTransport(transport)
client.clientMap.Store(url.Host, &http.Client{
Transport: transport,
})
}
// GetContentLength sends a head request to get file length.
func (client *OriginClient) GetContentLength(url string, headers map[string]string) (int64, int, error) {
// send request
resp, err := client.HTTPWithHeaders(http.MethodGet, url, headers, 4*time.Second)
if err != nil {
return 0, 0, err
}
resp.Body.Close()
return resp.ContentLength, resp.StatusCode, nil
}
// IsSupportRange checks if the source url support partial requests.
func (client *OriginClient) IsSupportRange(url string, headers map[string]string) (bool, error) {
// set headers: headers is a reference to map, should not change it
copied := CopyHeader(nil, headers)
copied["Range"] = "bytes=0-0"
// send request
resp, err := client.HTTPWithHeaders(http.MethodGet, url, copied, 4*time.Second)
if err != nil {
return false, err
}
_ = resp.Body.Close()
if resp.StatusCode == http.StatusPartialContent {
return true, nil
}
return false, nil
}
// IsExpired checks if a resource received or stored is the same.
func (client *OriginClient) IsExpired(url string, headers map[string]string, lastModified int64, eTag string) (bool, error) {
if lastModified <= 0 && stringutils.IsEmptyStr(eTag) {
return true, nil
}
// set headers: headers is a reference to map, should not change it
copied := CopyHeader(nil, headers)
if lastModified > 0 {
lastModifiedStr, _ := netutils.ConvertTimeIntToString(lastModified)
copied["If-Modified-Since"] = lastModifiedStr
}
if !stringutils.IsEmptyStr(eTag) {
copied["If-None-Match"] = eTag
}
// send request
resp, err := client.HTTPWithHeaders(http.MethodGet, url, copied, 4*time.Second)
if err != nil {
return false, err
}
resp.Body.Close()
return resp.StatusCode != http.StatusNotModified, nil
}
// Download downloads the file from the original address
func (client *OriginClient) Download(url string, headers map[string]string, checkCode StatusCodeChecker) (*http.Response, error) {
// TODO: add timeout
resp, err := client.HTTPWithHeaders(http.MethodGet, url, headers, 0)
if err != nil {
return nil, err
}
if checkCode(resp.StatusCode) {
return resp, nil
}
resp.Body.Close()
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
// HTTPWithHeaders uses host-matched client to request the origin resource.
func (client *OriginClient) HTTPWithHeaders(method, url string, headers map[string]string, timeout time.Duration) (*http.Response, error) {
req, err := http.NewRequest(method, url, nil)
if err != nil {
return nil, err
}
if timeout > 0 {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
req = req.WithContext(ctx)
defer cancel()
}
for k, v := range headers {
req.Header.Add(k, v)
}
httpClientObject, existed := client.clientMap.Load(req.Host)
if !existed {
// use client.defaultHTTPClient to support custom protocols
httpClientObject = client.defaultHTTPClient
}
httpClient, ok := httpClientObject.(*http.Client)
if !ok {
return nil, errors.Wrapf(errortypes.ErrInvalidValue, "http client type check error: %T", httpClientObject)
}
return httpClient.Do(req)
}
// CopyHeader copies the src to dst and return a non-nil dst map.
func CopyHeader(dst, src map[string]string) map[string]string {
if dst == nil {
dst = make(map[string]string)
}
for k, v := range src {
dst[k] = v
}
return dst
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package plugins
import (
"sync"
"github.com/dragonflyoss/Dragonfly/supernode/config"
)
// NewManager creates a default plugin manager instant.
func NewManager() Manager {
return &managerIml{
builders: NewRepository(),
plugins: NewRepository(),
}
}
// NewRepository creates a default repository instant.
func NewRepository() Repository {
return &repositoryIml{
repos: make(map[config.PluginType]*sync.Map),
}
}
// Manager manages all plugin builders and plugin instants.
type Manager interface {
// GetBuilder adds a Builder object with the giving plugin type and name.
AddBuilder(pt config.PluginType, name string, b Builder)
// GetBuilder returns a Builder object with the giving plugin type and name.
GetBuilder(pt config.PluginType, name string) Builder
// DeleteBuilder deletes a builder with the giving plugin type and name.
DeleteBuilder(pt config.PluginType, name string)
// AddPlugin adds a plugin into this manager.
AddPlugin(p Plugin)
// GetPlugin returns a plugin with the giving plugin type and name.
GetPlugin(pt config.PluginType, name string) Plugin
// DeletePlugin deletes a plugin with the giving plugin type and name.
DeletePlugin(pt config.PluginType, name string)
}
// Plugin defines methods that plugins need to implement.
type Plugin interface {
// Type returns the type of this plugin.
Type() config.PluginType
// Name returns the name of this plugin.
Name() string
}
// Builder is a function that creates a new plugin instant with the giving conf.
type Builder func(conf string) (Plugin, error)
// Repository stores data related to plugin.
type Repository interface {
// Add adds a data to this repository.
Add(pt config.PluginType, name string, data interface{})
// Get gets a data with the giving type and name from this
// repository.
Get(pt config.PluginType, name string) interface{}
// Delete deletes a data with the giving type and name from
// this repository.
Delete(pt config.PluginType, name string)
}
// -----------------------------------------------------------------------------
// implementation of Manager
type managerIml struct {
builders Repository
plugins Repository
}
var _ Manager = (*managerIml)(nil)
func (m *managerIml) AddBuilder(pt config.PluginType, name string, b Builder) {
if b == nil {
return
}
m.builders.Add(pt, name, b)
}
func (m *managerIml) GetBuilder(pt config.PluginType, name string) Builder {
data := m.builders.Get(pt, name)
if data == nil {
return nil
}
if builder, ok := data.(Builder); ok {
return builder
}
return nil
}
func (m *managerIml) DeleteBuilder(pt config.PluginType, name string) {
m.builders.Delete(pt, name)
}
func (m *managerIml) AddPlugin(p Plugin) {
if p == nil {
return
}
m.plugins.Add(p.Type(), p.Name(), p)
}
func (m *managerIml) GetPlugin(pt config.PluginType, name string) Plugin {
data := m.plugins.Get(pt, name)
if data == nil {
return nil
}
if plugin, ok := data.(Plugin); ok {
return plugin
}
return nil
}
func (m *managerIml) DeletePlugin(pt config.PluginType, name string) {
m.plugins.Delete(pt, name)
}
// -----------------------------------------------------------------------------
// implementation of Repository
type repositoryIml struct {
repos map[config.PluginType]*sync.Map
lock sync.Mutex
}
var _ Repository = (*repositoryIml)(nil)
func (r *repositoryIml) Add(pt config.PluginType, name string, data interface{}) {
if data == nil || !validate(pt, name) {
return
}
m := r.getRepo(pt)
m.Store(name, data)
}
func (r *repositoryIml) Get(pt config.PluginType, name string) interface{} {
if !validate(pt, name) {
return nil
}
m := r.getRepo(pt)
if v, ok := m.Load(name); ok && v != nil {
return v
}
return nil
}
func (r *repositoryIml) Delete(pt config.PluginType, name string) {
if !validate(pt, name) {
return
}
m := r.getRepo(pt)
m.Delete(name)
}
func (r *repositoryIml) getRepo(pt config.PluginType) *sync.Map {
var (
m *sync.Map
ok bool
)
if m, ok = r.repos[pt]; ok && m != nil {
return m
}
r.lock.Lock()
if m, ok = r.repos[pt]; !ok || m == nil {
m = &sync.Map{}
r.repos[pt] = m
}
r.lock.Unlock()
return m
}
// -----------------------------------------------------------------------------
// helper functions
func validate(pt config.PluginType, name string) bool {
if name == "" {
return false
}
for i := len(config.PluginTypes) - 1; i >= 0; i-- {
if pt == config.PluginTypes[i] {
return true
}
}
return false
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package plugins
import (
"fmt"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/sirupsen/logrus"
)
var mgr = NewManager()
// SetManager sets a Manager implementation instead of the default one.
func SetManager(m Manager) {
mgr = m
}
// Initialize builds all plugins defined in config file.
func Initialize(cfg *config.Config) error {
for pt, value := range cfg.Plugins {
for _, v := range value {
if !v.Enabled {
logrus.Infof("plugin[%s][%s] is disabled", pt, v.Name)
continue
}
builder := mgr.GetBuilder(pt, v.Name)
if builder == nil {
return fmt.Errorf("cannot find builder to create plugin[%s][%s]",
pt, v.Name)
}
p, err := builder(v.Config)
if err != nil {
return fmt.Errorf("failed to build plugin[%s][%s]: %v",
pt, v.Name, err)
}
mgr.AddPlugin(p)
logrus.Infof("add plugin[%s][%s]", pt, v.Name)
}
}
return nil
}
// RegisterPlugin register a plugin builder that will be called to create a new
// plugin instant when supernode starts.
func RegisterPlugin(pt config.PluginType, name string, builder Builder) {
mgr.AddBuilder(pt, name, builder)
}
// GetPlugin returns a plugin instant with the giving plugin type and name.
func GetPlugin(pt config.PluginType, name string) Plugin {
return mgr.GetPlugin(pt, name)
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package store
import (
"fmt"
"github.com/pkg/errors"
)
var (
// ErrKeyNotFound is an error which will be returned
// when the key can not be found.
ErrKeyNotFound = StorageError{codeKeyNotFound, "the key not found"}
// ErrEmptyKey is an error when the key is empty.
ErrEmptyKey = StorageError{codeEmptyKey, "the key is empty"}
// ErrInvalidValue represents the value is invalid.
ErrInvalidValue = StorageError{codeInvalidValue, "invalid value"}
// ErrRangeNotSatisfiable represents the length of file is insufficient.
ErrRangeNotSatisfiable = StorageError{codeRangeNotSatisfiable, "range not satisfiable"}
)
const (
codeKeyNotFound = iota
codeEmptyKey
codeInvalidValue
codeRangeNotSatisfiable
)
// StorageError represents a storage error.
type StorageError struct {
Code int
Msg string
}
func (s StorageError) Error() string {
return fmt.Sprintf("{\"Code\":%d,\"Msg\":\"%s\"}", s.Code, s.Msg)
}
// IsNilError checks the error is nil or not.
func IsNilError(err error) bool {
return err == nil
}
// IsKeyNotFound checks the error is the key cannot be found.
func IsKeyNotFound(err error) bool {
return checkError(err, codeKeyNotFound)
}
// IsEmptyKey checks the error is the key is empty or nil.
func IsEmptyKey(err error) bool {
return checkError(err, codeEmptyKey)
}
// IsInvalidValue checks the error is the value is invalid or not.
func IsInvalidValue(err error) bool {
return checkError(err, codeInvalidValue)
}
// IsRangeNotSatisfiable checks the error is a
// range not exist error or not.
func IsRangeNotSatisfiable(err error) bool {
return checkError(err, codeRangeNotSatisfiable)
}
func checkError(err error, code int) bool {
e, ok := errors.Cause(err).(StorageError)
return ok && e.Code == code
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package store
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
statutils "github.com/dragonflyoss/Dragonfly/pkg/stat"
"github.com/dragonflyoss/Dragonfly/supernode/util"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
// LocalStorageDriver is a const of local storage driver.
const LocalStorageDriver = "local"
var fileLocker = util.NewLockerPool()
func init() {
Register(LocalStorageDriver, NewLocalStorage)
}
func lock(path string, offset int64, ro bool) {
if offset != -1 {
fileLocker.GetLock(getLockKey(path, -1), true)
}
fileLocker.GetLock(getLockKey(path, offset), ro)
}
func unLock(path string, offset int64, ro bool) {
if offset != -1 {
fileLocker.ReleaseLock(getLockKey(path, -1), true)
}
fileLocker.ReleaseLock(getLockKey(path, offset), ro)
}
// localStorage is one of the implementations of StorageDriver using local file system.
type localStorage struct {
// BaseDir is the dir that local storage driver will store content based on it.
BaseDir string `yaml:"baseDir"`
}
// NewLocalStorage performs initialization for localStorage and return a StorageDriver.
func NewLocalStorage(conf string) (StorageDriver, error) {
// type assertion for config
cfg := &localStorage{}
if err := yaml.Unmarshal([]byte(conf), cfg); err != nil {
return nil, fmt.Errorf("failed to parse config: %v", err)
}
// prepare the base dir
if !filepath.IsAbs(cfg.BaseDir) {
return nil, fmt.Errorf("not absolute path: %s", cfg.BaseDir)
}
if err := fileutils.CreateDirectory(cfg.BaseDir); err != nil {
return nil, err
}
return &localStorage{
BaseDir: cfg.BaseDir,
}, nil
}
// Get the content of key from storage and return in io stream.
func (ls *localStorage) Get(ctx context.Context, raw *Raw) (io.Reader, error) {
path, info, err := ls.statPath(raw.Bucket, raw.Key)
if err != nil {
return nil, err
}
if err := checkGetRaw(raw, info.Size()); err != nil {
return nil, err
}
r, w := io.Pipe()
go func() {
defer w.Close()
lock(path, raw.Offset, true)
defer unLock(path, raw.Offset, true)
f, err := os.Open(path)
if err != nil {
return
}
defer f.Close()
f.Seek(raw.Offset, 0)
var reader io.Reader
reader = f
if raw.Length > 0 {
reader = io.LimitReader(f, raw.Length)
}
buf := make([]byte, 256*1024)
io.CopyBuffer(w, reader, buf)
}()
return r, nil
}
// GetBytes gets the content of key from storage and return in bytes.
func (ls *localStorage) GetBytes(ctx context.Context, raw *Raw) (data []byte, err error) {
path, info, err := ls.statPath(raw.Bucket, raw.Key)
if err != nil {
return nil, err
}
if err := checkGetRaw(raw, info.Size()); err != nil {
return nil, err
}
lock(path, raw.Offset, true)
defer unLock(path, raw.Offset, true)
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
f.Seek(raw.Offset, 0)
if raw.Length <= 0 {
data, err = ioutil.ReadAll(f)
} else {
data = make([]byte, raw.Length)
_, err = f.Read(data)
}
if err != nil {
return nil, err
}
return data, nil
}
// Put reads the content from reader and put it into storage.
func (ls *localStorage) Put(ctx context.Context, raw *Raw, data io.Reader) error {
if err := checkPutRaw(raw); err != nil {
return err
}
path, err := ls.preparePath(raw.Bucket, raw.Key)
if err != nil {
return err
}
if data == nil {
return nil
}
lock(path, raw.Offset, false)
defer unLock(path, raw.Offset, false)
var f *os.File
if raw.Trunc {
f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
} else {
f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
}
if err != nil {
return err
}
defer f.Close()
f.Seek(raw.Offset, 0)
if raw.Length > 0 {
if _, err = io.CopyN(f, data, raw.Length); err != nil {
return err
}
return nil
}
buf := make([]byte, 256*1024)
if _, err = io.CopyBuffer(f, data, buf); err != nil {
return err
}
return nil
}
// PutBytes puts the content of key from storage with bytes.
func (ls *localStorage) PutBytes(ctx context.Context, raw *Raw, data []byte) error {
if err := checkPutRaw(raw); err != nil {
return err
}
path, err := ls.preparePath(raw.Bucket, raw.Key)
if err != nil {
return err
}
lock(path, raw.Offset, false)
defer unLock(path, raw.Offset, false)
var f *os.File
if raw.Trunc {
f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0644)
} else {
f, err = fileutils.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
}
if err != nil {
return err
}
defer f.Close()
f.Seek(raw.Offset, 0)
if raw.Length == 0 {
if _, err := f.Write(data); err != nil {
return err
}
return nil
}
if _, err := f.Write(data[:raw.Length]); err != nil {
return err
}
return nil
}
// Stat determines whether the file exists.
func (ls *localStorage) Stat(ctx context.Context, raw *Raw) (*StorageInfo, error) {
_, fileInfo, err := ls.statPath(raw.Bucket, raw.Key)
if err != nil {
return nil, err
}
sys, ok := fileutils.GetSys(fileInfo)
if !ok {
return nil, fmt.Errorf("get create time error")
}
return &StorageInfo{
Path: filepath.Join(raw.Bucket, raw.Key),
Size: fileInfo.Size(),
CreateTime: statutils.Ctime(sys),
ModTime: fileInfo.ModTime(),
}, nil
}
// Remove deletes a file or dir.
// It will force delete the file or dir when the raw.Trunc is true.
func (ls *localStorage) Remove(ctx context.Context, raw *Raw) error {
path, info, err := ls.statPath(raw.Bucket, raw.Key)
if err != nil {
return err
}
lock(path, -1, false)
defer unLock(path, -1, false)
if raw.Trunc || !info.IsDir() {
return os.RemoveAll(path)
}
empty, err := fileutils.IsEmptyDir(path)
if empty {
return os.RemoveAll(path)
}
return err
}
// GetAvailSpace returns the available disk space in B.
func (ls *localStorage) GetAvailSpace(ctx context.Context, raw *Raw) (fileutils.Fsize, error) {
path, _, err := ls.statPath(raw.Bucket, raw.Key)
if err != nil {
return 0, err
}
lock(path, -1, true)
defer unLock(path, -1, true)
return fileutils.GetFreeSpace(path)
}
// Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key,
// calling walkFn for each file or directory in the tree, including root.
func (ls *localStorage) Walk(ctx context.Context, raw *Raw) error {
path, _, err := ls.statPath(raw.Bucket, raw.Key)
if err != nil {
return err
}
lock(path, -1, true)
defer unLock(path, -1, true)
return filepath.Walk(path, raw.WalkFn)
}
// helper function
// preparePath gets the target path and creates the upper directory if it does not exist.
func (ls *localStorage) preparePath(bucket, key string) (string, error) {
dir := filepath.Join(ls.BaseDir, bucket)
if err := fileutils.CreateDirectory(dir); err != nil {
return "", err
}
target := filepath.Join(dir, key)
return target, nil
}
// statPath determines whether the target file exists and returns an fileMutex if so.
func (ls *localStorage) statPath(bucket, key string) (string, os.FileInfo, error) {
filePath := filepath.Join(ls.BaseDir, bucket, key)
f, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
return "", nil, errors.Wrapf(ErrKeyNotFound, "bucket(%s) key(%s)", bucket, key)
}
return "", nil, err
}
return filePath, f, nil
}
func getLockKey(path string, offset int64) string {
return fmt.Sprintf("%s:%d", path, offset)
}
func checkGetRaw(raw *Raw, fileLength int64) error {
if fileLength < raw.Offset {
return errors.Wrapf(ErrRangeNotSatisfiable, "the offset: %d is lager than the file length: %d", raw.Offset, fileLength)
}
if raw.Length < 0 {
return errors.Wrapf(ErrInvalidValue, "the length: %d is not a positive integer", raw.Length)
}
if fileLength < (raw.Offset + raw.Length) {
return errors.Wrapf(ErrRangeNotSatisfiable, "the offset: %d and length: %d is lager than the file length: %d", raw.Offset, raw.Length, fileLength)
}
return nil
}
func checkPutRaw(raw *Raw) error {
if raw.Length < 0 {
return errors.Wrapf(ErrInvalidValue, "the length: %d should not be a negative integer", raw.Length)
}
return nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package store
import (
"context"
"fmt"
"io"
"github.com/dragonflyoss/Dragonfly/pkg/fileutils"
"github.com/dragonflyoss/Dragonfly/pkg/stringutils"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/pkg/errors"
)
// Store is a wrapper of the storage which implements the interface of StorageDriver.
type Store struct {
// name is a unique identifier, you can also name it ID.
driverName string
// config is used to init storage driver.
config interface{}
// driver holds a storage which implements the interface of StorageDriver.
driver StorageDriver
}
// NewStore creates a new Store instance.
func NewStore(name string, builder StorageBuilder, cfg string) (*Store, error) {
if name == "" || builder == nil {
return nil, fmt.Errorf("plugin name or builder cannot be nil")
}
// init driver with specific config
driver, err := builder(cfg)
if err != nil {
return nil, fmt.Errorf("failed to init storage driver %s: %v", name, err)
}
return &Store{
driverName: name,
config: cfg,
driver: driver,
}, nil
}
// Type returns the plugin type: StoragePlugin.
func (s *Store) Type() config.PluginType {
return config.StoragePlugin
}
// Name returns the plugin name.
func (s *Store) Name() string {
return s.driverName
}
// Get the data from the storage driver in io stream.
func (s *Store) Get(ctx context.Context, raw *Raw) (io.Reader, error) {
if err := checkEmptyKey(raw); err != nil {
return nil, err
}
return s.driver.Get(ctx, raw)
}
// GetBytes gets the data from the storage driver in bytes.
func (s *Store) GetBytes(ctx context.Context, raw *Raw) ([]byte, error) {
if err := checkEmptyKey(raw); err != nil {
return nil, err
}
return s.driver.GetBytes(ctx, raw)
}
// Put puts data into the storage in io stream.
func (s *Store) Put(ctx context.Context, raw *Raw, data io.Reader) error {
if err := checkEmptyKey(raw); err != nil {
return err
}
return s.driver.Put(ctx, raw, data)
}
// PutBytes puts data into the storage in bytes.
func (s *Store) PutBytes(ctx context.Context, raw *Raw, data []byte) error {
if err := checkEmptyKey(raw); err != nil {
return err
}
return s.driver.PutBytes(ctx, raw, data)
}
// Remove the data from the storage based on raw information.
func (s *Store) Remove(ctx context.Context, raw *Raw) error {
if raw == nil || (stringutils.IsEmptyStr(raw.Key) &&
stringutils.IsEmptyStr(raw.Bucket)) {
return errors.Wrapf(ErrEmptyKey, "cannot set both key and bucket empty at the same time")
}
return s.driver.Remove(ctx, raw)
}
// Stat determines whether the data exists based on raw information.
// If that, and return some info that in the form of struct StorageInfo.
// If not, return the ErrNotFound.
func (s *Store) Stat(ctx context.Context, raw *Raw) (*StorageInfo, error) {
if err := checkEmptyKey(raw); err != nil {
return nil, err
}
return s.driver.Stat(ctx, raw)
}
// GetAvailSpace returns the available disk space in B.
func (s *Store) GetAvailSpace(ctx context.Context, raw *Raw) (fileutils.Fsize, error) {
return s.driver.GetAvailSpace(ctx, raw)
}
// Walk walks the file tree rooted at root which determined by raw.Bucket and raw.Key,
// calling walkFn for each file or directory in the tree, including root.
func (s *Store) Walk(ctx context.Context, raw *Raw) error {
return s.driver.Walk(ctx, raw)
}
func checkEmptyKey(raw *Raw) error {
if raw == nil || stringutils.IsEmptyStr(raw.Key) {
return ErrEmptyKey
}
return nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package store
import (
"fmt"
"path/filepath"
"sync"
"github.com/dragonflyoss/Dragonfly/supernode/config"
"github.com/dragonflyoss/Dragonfly/supernode/plugins"
)
// StorageBuilder is a function that creates a new storage plugin instant
// with the giving conf.
type StorageBuilder func(conf string) (StorageDriver, error)
// Register defines an interface to register a driver with specified name.
// All drivers should call this function to register itself to the driverFactory.
func Register(name string, builder StorageBuilder) {
var f plugins.Builder = func(conf string) (plugin plugins.Plugin, e error) {
return NewStore(name, builder, conf)
}
plugins.RegisterPlugin(config.StoragePlugin, name, f)
}
// Manager manages stores.
type Manager struct {
cfg *config.Config
defaultStorage *Store
mutex sync.Mutex
}
// NewManager creates a store manager.
func NewManager(cfg *config.Config) (*Manager, error) {
return &Manager{
cfg: cfg,
}, nil
}
// Get a store from manager with specified name.
func (sm *Manager) Get(name string) (*Store, error) {
v := plugins.GetPlugin(config.StoragePlugin, name)
if v == nil {
if name == LocalStorageDriver {
return sm.getDefaultStorage()
}
return nil, fmt.Errorf("not existed storage: %s", name)
}
if store, ok := v.(*Store); ok {
return store, nil
}
return nil, fmt.Errorf("get store error: unknown reason")
}
func (sm *Manager) getDefaultStorage() (*Store, error) {
if sm.defaultStorage != nil {
return sm.defaultStorage, nil
}
sm.mutex.Lock()
defer sm.mutex.Unlock()
// check again to avoid initializing repeatedly
if sm.defaultStorage != nil {
return sm.defaultStorage, nil
}
if sm.cfg == nil {
return nil, fmt.Errorf("cannot init local storage without home path")
}
cfg := fmt.Sprintf("baseDir: %s", filepath.Join(sm.cfg.HomeDir, "repo"))
s, err := NewStore(LocalStorageDriver, NewLocalStorage, cfg)
if err != nil {
return nil, err
}
sm.defaultStorage = s
return sm.defaultStorage, nil
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
import (
"sync"
"github.com/dragonflyoss/Dragonfly/pkg/atomiccount"
)
type countRWMutex struct {
count *atomiccount.AtomicInt
sync.RWMutex
}
func newCountRWMutex() *countRWMutex {
return &countRWMutex{
count: atomiccount.NewAtomicInt(0),
}
}
func (cr *countRWMutex) reset() {
cr.count.Set(0)
}
func (cr *countRWMutex) increaseCount() int32 {
cr.count.Add(1)
return cr.count.Get()
}
func (cr *countRWMutex) decreaseCount() int32 {
cr.count.Add(-1)
return cr.count.Get()
}
func (cr *countRWMutex) lock(ro bool) {
if ro {
cr.RLock()
return
}
cr.Lock()
}
func (cr *countRWMutex) unlock(ro bool) {
if ro {
cr.RUnlock()
return
}
cr.Unlock()
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package util
import (
"sync"
)
var defaultLocker = NewLockerPool()
// GetLock locks key with defaultLocker.
func GetLock(key string, ro bool) {
defaultLocker.GetLock(key, ro)
}
// ReleaseLock unlocks key with defaultLocker.
func ReleaseLock(key string, ro bool) {
defaultLocker.ReleaseLock(key, ro)
}
// LockerPool is a set of reader/writer mutual exclusion locks.
type LockerPool struct {
// use syncPool to cache allocated but unused *countRWMutex items for later reuse
syncPool *sync.Pool
lockerMap map[string]*countRWMutex
sync.Mutex
}
// NewLockerPool returns a *LockerPool with self-defined prefix.
func NewLockerPool() *LockerPool {
return &LockerPool{
syncPool: &sync.Pool{
New: func() interface{} {
return newCountRWMutex()
},
},
lockerMap: make(map[string]*countRWMutex),
}
}
// GetLock locks key.
// If ro(readonly) is true, then it locks key for reading.
// Otherwise, locks key for writing.
func (l *LockerPool) GetLock(key string, ro bool) {
l.Lock()
locker, ok := l.lockerMap[key]
if !ok {
locker = l.syncPool.Get().(*countRWMutex)
l.lockerMap[key] = locker
}
locker.increaseCount()
l.Unlock()
locker.lock(ro)
}
// ReleaseLock unlocks key.
// If ro(readonly) is true, then it unlocks key for reading.
// Otherwise, unlocks key for writing.
func (l *LockerPool) ReleaseLock(key string, ro bool) {
l.Lock()
defer l.Unlock()
locker, ok := l.lockerMap[key]
if !ok {
return
}
locker.unlock(ro)
if locker.decreaseCount() < 1 {
locker.reset()
l.syncPool.Put(locker)
delete(l.lockerMap, key)
}
}
/*
* Copyright The Dragonfly Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package version represents the version the project Dragonfly.
package version
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"runtime"
"strings"
"text/template"
"github.com/dragonflyoss/Dragonfly/apis/types"
"github.com/dragonflyoss/Dragonfly/pkg/metricsutils"
"github.com/prometheus/client_golang/prometheus"
)
var (
// version is the version of project Dragonfly
// populate via ldflags
version string
// revision is the current git commit revision
// populate via ldflags
revision string
// buildDate is the build date of project Dragonfly
// populate via ldflags
buildDate string
// goVersion is the running program's golang version.
goVersion = runtime.Version()
// os is the running program's operating system.
os = runtime.GOOS
// arch is the running program's architecture target.
arch = runtime.GOARCH
// DFDaemonVersion is the version of dfdaemon.
DFDaemonVersion = version
// DFGetVersion is the version of dfget.
DFGetVersion = version
// SupernodeVersion is the version of supernode.
SupernodeVersion = version
// DFVersion is the global instance of DragonflyVersion.
DFVersion *types.DragonflyVersion
)
func init() {
DFVersion = &types.DragonflyVersion{
BuildDate: buildDate,
Arch: arch,
OS: os,
GoVersion: goVersion,
Version: version,
}
}
// versionInfoTmpl contains the template used by Info.
var versionInfoTmpl = `
{{.program}} version {{.version}}
Git commit: {{.revision}}
Build date: {{.buildDate}}
Go version: {{.goVersion}}
OS/Arch: {{.OS}}/{{.Arch}}
`
// Print returns version information.
func Print(program string) string {
m := map[string]string{
"program": program,
"version": version,
"revision": revision,
"buildDate": buildDate,
"goVersion": goVersion,
"OS": os,
"Arch": arch,
}
t := template.Must(template.New("version").Parse(versionInfoTmpl))
var buf bytes.Buffer
if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
panic(err)
}
return strings.TrimSpace(buf.String())
}
// NewBuildInfo registers a collector which exports metrics about version and build information.
func NewBuildInfo(program string, registerer prometheus.Registerer) {
buildInfo := metricsutils.NewGauge(program, "build_info",
fmt.Sprintf("A metric with a constant '1' value labeled by version, revision, os, "+
"arch and goversion from which %s was built.", program),
[]string{"version", "revision", "os", "arch", "goversion"},
registerer,
)
buildInfo.WithLabelValues(version, revision, os, arch, goVersion).Set(1)
}
// Handler returns build information.
func Handler(w http.ResponseWriter, r *http.Request) {
data, err := json.Marshal(DFVersion)
if err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
} else if _, err := w.Write(data); err != nil {
http.Error(w, fmt.Sprintf("error writing the data to the connection: %s", err), http.StatusInternalServerError)
} else {
w.WriteHeader(http.StatusOK)
}
}
// HandlerWithCtx returns build information.
func HandlerWithCtx(context context.Context, w http.ResponseWriter, r *http.Request) (err error) {
Handler(w, r)
return
}