package fuzz
import (
"bytes"
"fmt"
"github.com/filecoin-project/lotus/chain/types"
"github.com/google/go-cmp/cmp"
gfuzz "github.com/google/gofuzz"
)
// Fuzzes DecodeBlockMsg using random data
func FuzzBlockMsg(data []byte) int {
msg, err := types.DecodeBlockMsg(data)
if err != nil {
return 0
}
encodedMsg, err := msg.Serialize()
if err != nil {
panic(fmt.Sprintf("Error in serializing BlockMsg: %v", err))
}
// Checks if the encoded message is different to the fuzz data.
if !bytes.Equal(encodedMsg, data) {
panic(fmt.Sprintf("Fuzz data and serialized data are not equal: %v", err))
}
return 1
}
// Structural fuzzing on the BlockMsg struct to provide valid binary data.
func FuzzBlockMsgStructural(data []byte) int {
blockmsg := types.BlockMsg{}
f := gfuzz.NewFromGoFuzz(data).NilChance(0)
f.Fuzz(&blockmsg)
encodedMsg, err := blockmsg.Serialize()
if err != nil {
return 0
}
msg, err := types.DecodeBlockMsg(encodedMsg)
if err != nil {
panic(fmt.Sprintf("Error in decoding BlockMsg: %v", err))
}
// Checks if the decoded message is different to the initial blockmsg.
if !cmp.Equal(blockmsg, msg) {
panic(fmt.Sprintf("Decoded BlockMsg and serialized BlockMsg are not equal: %v", err))
}
return 1
}
// Fuzzes DecodeBlock function for a given BlockHeader.
func FuzzBlockHeader(data []byte) int {
blockheader := types.BlockHeader{}
f := gfuzz.NewFromGoFuzz(data).NilChance(0)
f.Fuzz(&blockheader)
encodedHeader, err := blockheader.Serialize()
if err != nil {
return 0
}
header, err := types.DecodeBlock(encodedHeader)
if err != nil {
panic(fmt.Sprintf("Error in decoding BlockHeader: %v", err))
}
// Checks if the decoded BlockHeader is different to the initial BlockHeader.
if !cmp.Equal(blockheader, header) {
panic(fmt.Sprintf("Decoded BlockHeader and serialized BlockHeader are not equal: %v", err))
}
return 1
}
// Differential fuzzing to check whether any inaccuracies can be introduced by using fixed width arithmetic
// It's not going to overflow, but could it lose accuracy?
// Ans: no, not a problem with the current MaxIndex
// Some private fields copied from go-amt-ipld/amt.go 6263827e49
package fuzz
import (
"fmt"
"math"
"math/big"
"math/rand"
"github.com/google/gofuzz/bytesource"
)
// from go-amt-ipld/amt.go:18 6263827e49
const width = 8
// created once to avoid reAllocating every execution
var bigWidth = big.NewInt(8)
//var bigMaxUint64 = big.NewInt(math.MaxUint64)
// what is the max height?
// width ^ maxHeight == MaxIndex (+-1?)
// 8 ^ 16 == (1 << 48)
// index [0..maxIndex)
// Height starts at 0, so we use maxHeight + 1 == 16
const maxHeight = 15
// from go-amt-ipld/amt.go:461 6263827e49
func nodesForHeight(width, height int) uint64 {
val := math.Pow(float64(width), float64(height))
if val >= float64(math.MaxUint64) {
//log.Errorf("nodesForHeight overflow! This should never happen, please report this if you see this log message")
// panic here instead for fuzzing purposes?
return math.MaxUint64
}
return uint64(val)
}
// implementation avoiding limited-accuracy float64
func bigNodesForHeight(width, height *big.Int) uint64 {
bigVal := big.NewInt(0)
bigVal.Exp(width, height, nil)
// bigVal >= math.MaxUint64
// could also use bigVal.IsUint64
//if bigVal.Cmp(bigMaxUint64) >= 0 {
// //log.Errorf("nodesForHeight overflow! This should never happen, please report this if you see this log message")
// // panic here instead for fuzzing purposes?
// return math.MaxUint64
//}
if !bigVal.IsUint64() {
// Do the check to ensure Uint64() result is not undefined
//panic("Bug in harness, shouldn't be possible")
return math.MaxUint64
}
return bigVal.Uint64()
}
// TODO also check when modifying width, even though this is a const?
// currently search space is small enought that you might as well just test all possibilities
func FuzzNodesForHeight(data []byte) int {
// because we only want a single int within our range, we just use rand
// rather than the full gofuzz `Fuzzer`
// could also just use mod, but I like this more :)
r := rand.New(bytesource.New(data))
height := r.Intn(maxHeight + 2) // should only be +1 but check for breathing room
bigHeight := big.NewInt(int64(height))
result1 := nodesForHeight(width, height)
result2 := bigNodesForHeight(bigWidth, bigHeight)
if result1 != result2 {
fmt.Printf("Input: width=%d, height=%d", width, height)
fmt.Printf("Result1=%d", result1)
fmt.Printf("Result2=%d", result2)
panic("Not Equal!")
}
return 0
}
package buildconstants
import (
"sort"
"github.com/filecoin-project/go-state-types/abi"
)
type DrandEnum int
const (
DrandMainnet DrandEnum = iota + 1
DrandTestnet
_ // kept to retain iota numbering, used to be Devnet
_ // kept to retain iota numbering, used to be Localnet
DrandIncentinet
DrandQuicknet
)
type DrandConfig struct {
Servers []string
Relays []string
ChainInfoJSON string
IsChained bool // Prior to Drand quicknet, beacons form a chain, post quicknet they do not (FIP-0063)
}
type DrandPoint struct {
Start abi.ChainEpoch
Config DrandConfig
}
var DrandConfigs = map[DrandEnum]DrandConfig{
DrandQuicknet: {
Servers: []string{
"https://api.drand.sh",
"https://api2.drand.sh",
"https://api3.drand.sh",
"https://drand.cloudflare.com",
"https://api.drand.secureweb3.com:6875", // Storswift
},
Relays: []string{
"/dnsaddr/api.drand.sh/",
"/dnsaddr/api2.drand.sh/",
"/dnsaddr/api3.drand.sh/",
},
IsChained: false,
ChainInfoJSON: `{"public_key":"83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a","period":3,"genesis_time":1692803367,"hash":"52db9ba70e0cc0f6eaf7803dd07447a1f5477735fd3f661792ba94600c84e971","groupHash":"f477d5c89f21a17c863a7f937c6a6d15859414d2be09cd448d4279af331c5d3e","schemeID":"bls-unchained-g1-rfc9380","metadata":{"beaconID":"quicknet"}}`,
},
DrandTestnet: {
Servers: []string{
"https://pl-eu.testnet.drand.sh",
"https://pl-us.testnet.drand.sh",
},
Relays: []string{
"/dnsaddr/pl-eu.testnet.drand.sh/",
"/dnsaddr/pl-us.testnet.drand.sh/",
},
IsChained: true,
ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`,
},
// legacy randomness sources, their ChainInfo must remain here forever
// to allow validating randomness from past epochs
DrandIncentinet: {
IsChained: true,
ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`,
},
DrandMainnet: {
IsChained: true,
ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`,
},
}
func DrandConfigSchedule() []DrandPoint {
out := make([]DrandPoint, 0, len(DrandSchedule))
for start, network := range DrandSchedule {
out = append(out, DrandPoint{Start: start, Config: DrandConfigs[network]})
}
sort.Slice(out, func(i, j int) bool {
return out[i].Start < out[j].Start
})
return out
}
package buildconstants
import "github.com/filecoin-project/go-state-types/network"
const (
BuildDefault = iota
BuildMainnet
Build2k
BuildDebug
BuildCalibnet
BuildInteropnet
unusedFormerNerpanet // removed in https://github.com/filecoin-project/lotus/pull/7373/files#diff-4592eccb93b506c1e7e175be9b631c7ccdeed4c1c5c4173a1ecd6d974e105190L15
BuildButterflynet
)
var BuildType int
func BuildTypeString() string {
switch BuildType {
case BuildDefault:
return ""
case BuildMainnet:
return "+mainnet"
case Build2k:
return "+2k"
case BuildDebug:
return "+debug"
case BuildCalibnet:
return "+calibnet"
case BuildInteropnet:
return "+interopnet"
case BuildButterflynet:
return "+butterflynet"
default:
return "+huh?"
}
}
var Devnet = true
// The agent string used by the node and reported to other nodes in the network.
const UserAgent = "lotus"
// Used by tests and some obscure tooling
/* inline-gen template
const TestNetworkVersion = network.Version{{.latestNetworkVersion}}
/* inline-gen start */
const TestNetworkVersion = network.Version27
/* inline-gen end */
//go:build !debug && !2k && !testground && !calibnet && !butterflynet && !interopnet
// +build !debug,!2k,!testground,!calibnet,!butterflynet,!interopnet
package buildconstants
import (
_ "embed"
"math"
"os"
"strconv"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/network"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/builtin"
)
var DrandSchedule = map[abi.ChainEpoch]DrandEnum{
0: DrandIncentinet,
UpgradeSmokeHeight: DrandMainnet,
UpgradePhoenixHeight: DrandQuicknet,
}
var NetworkBundle = "mainnet"
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is consensus critical.
const ActorDebugging = false
const GenesisNetworkVersion = network.Version0
const BootstrappersFile = "mainnet.pi"
const GenesisFile = "mainnet.car.zst"
const UpgradeBreezeHeight abi.ChainEpoch = 41280
const BreezeGasTampingDuration abi.ChainEpoch = 120
const UpgradeSmokeHeight abi.ChainEpoch = 51000
const UpgradeIgnitionHeight abi.ChainEpoch = 94000
const UpgradeRefuelHeight abi.ChainEpoch = 130800
var UpgradeAssemblyHeight abi.ChainEpoch = 138720
const UpgradeTapeHeight abi.ChainEpoch = 140760
// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier.
// Miners, clients, developers, custodians all need time to prepare.
// We still have upgrades and state changes to do, but can happen after signaling timing here.
const UpgradeLiftoffHeight abi.ChainEpoch = 148888
const UpgradeKumquatHeight abi.ChainEpoch = 170000
const UpgradeCalicoHeight abi.ChainEpoch = 265200
const UpgradePersianHeight abi.ChainEpoch = UpgradeCalicoHeight + (builtin2.EpochsInHour * 60)
const UpgradeOrangeHeight abi.ChainEpoch = 336458
// 2020-12-22T02:00:00Z
// var because of wdpost_test.go
var UpgradeClausHeight abi.ChainEpoch = 343200
// 2021-03-04T00:00:30Z
const UpgradeTrustHeight abi.ChainEpoch = 550321
// 2021-04-12T22:00:00Z
const UpgradeNorwegianHeight abi.ChainEpoch = 665280
// 2021-04-29T06:00:00Z
const UpgradeTurboHeight abi.ChainEpoch = 712320
// 2021-06-30T22:00:00Z
const UpgradeHyperdriveHeight abi.ChainEpoch = 892800
// 2021-10-26T13:30:00Z
const UpgradeChocolateHeight abi.ChainEpoch = 1231620
// 2022-03-01T15:00:00Z
const UpgradeOhSnapHeight abi.ChainEpoch = 1594680
// 2022-07-06T14:00:00Z
const UpgradeSkyrHeight abi.ChainEpoch = 1960320
// 2022-11-30T14:00:00Z
const UpgradeSharkHeight abi.ChainEpoch = 2383680
// 2023-03-14T15:14:00Z
const UpgradeHyggeHeight abi.ChainEpoch = 2683348
// 2023-04-27T13:00:00Z
const UpgradeLightningHeight abi.ChainEpoch = 2809800
// 2023-05-18T13:00:00Z
const UpgradeThunderHeight abi.ChainEpoch = UpgradeLightningHeight + 2880*21
// 2023-12-12T13:30:00Z
const UpgradeWatermelonHeight abi.ChainEpoch = 3469380
// This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFixHeight abi.ChainEpoch = -1
// This fix upgrade only ran on calibrationnet
const UpgradeWatermelonFix2Height abi.ChainEpoch = -2
// 2024-04-24T14:00:00Z
const UpgradeDragonHeight abi.ChainEpoch = 3855360
// This fix upgrade only ran on calibrationnet
const UpgradeCalibrationDragonFixHeight abi.ChainEpoch = -3
// This epoch, 120 epochs after the "rest" of the nv22 upgrade, is when we switch to Drand quicknet
// 2024-04-11T15:00:00Z
const UpgradePhoenixHeight abi.ChainEpoch = UpgradeDragonHeight + 120
// 2024-08-06T12:00:00Z
const UpgradeWaffleHeight abi.ChainEpoch = 4154640
// 2024-11-20T23:00:00Z
// var because of TestMigrationNV24 in itests/migration_test.go to test the FIP-0081 pledge ramp
var UpgradeTuktukHeight abi.ChainEpoch = 4461240
// FIP-0081: for the power actor state for pledge calculations.
// UpgradeTuktukPowerRampDurationEpochs ends up in the power actor state after
// Tuktuk migration. along with a RampStartEpoch matching the upgrade height.
var UpgradeTuktukPowerRampDurationEpochs = uint64(builtin.EpochsInYear)
// 2025-04-14T23:00:00Z
var UpgradeTeepHeight = abi.ChainEpoch(4878840)
// This epoch, 90 days after Teep is the completion of FIP-0100 where actors will start applying
// the new daily fee to pre-Teep sectors being extended.
var UpgradeTockHeight = UpgradeTeepHeight + builtin.EpochsInDay*90
// Only applied to calibnet which was already upgraded to Teep&Tock
var UpgradeTockFixHeight = abi.ChainEpoch(-1)
// ??????
var UpgradeXxHeight = abi.ChainEpoch(9999999999)
var UpgradeTeepInitialFilReserved = InitialFilReserved // FIP-0100: no change for mainnet
var ConsensusMinerMinPower = abi.NewStoragePower(10 << 40)
var PreCommitChallengeDelay = abi.ChainEpoch(150)
var PropagationDelaySecs = uint64(10)
var EquivocationDelaySecs = uint64(2)
func init() {
var addrNetwork address.Network
if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" {
addrNetwork = address.Mainnet
} else {
addrNetwork = address.Testnet
}
SetAddressNetwork(addrNetwork)
if os.Getenv("LOTUS_DISABLE_XX") == "1" {
UpgradeXxHeight = math.MaxInt64 - 1
}
// NOTE: DO NOT change this unless you REALLY know what you're doing. This is not consensus critical, however,
//set this value too high may impacts your block submission; set this value too low may cause you miss
//parent tipsets for blocking forming and mining.
if len(os.Getenv("PROPAGATION_DELAY_SECS")) != 0 {
pds, err := strconv.ParseUint(os.Getenv("PROPAGATION_DELAY_SECS"), 10, 64)
if err != nil {
log.Warnw("Error setting PROPAGATION_DELAY_SECS, %v, proceed with default value %s", err,
PropagationDelaySecs)
} else {
PropagationDelaySecs = pds
log.Warnw(" !!WARNING!! propagation delay is set to be %s second, "+
"this value impacts your message republish interval and block forming - monitor with caution!!", PropagationDelaySecs)
}
}
Devnet = false
BuildType = BuildMainnet
}
const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds)
// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start
const BootstrapPeerThreshold = 4
// ChainId defines the chain ID used in the Ethereum JSON-RPC endpoint.
// As per https://github.com/ethereum-lists/chains
const Eip155ChainId = 314
// WhitelistedBlock skips checks on message validity in this block to sidestep the zero-bls signature
var WhitelistedBlock = cid.MustParse("bafy2bzaceapyg2uyzk7vueh3xccxkuwbz3nxewjyguoxvhx77malc2lzn2ybi")
const F3Enabled = true
//go:embed f3manifest_mainnet.json
var F3ManifestBytes []byte
//go:build !testground
// +build !testground
package buildconstants
import (
"os"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
// /////
// Consensus / Network
func init() {
policy.SetConsensusMinerMinPower(ConsensusMinerMinPower)
policy.SetPreCommitChallengeDelay(PreCommitChallengeDelay)
}
const AllowableClockDriftSecs = uint64(1)
// Blocks (e)
var BlocksPerEpoch = uint64(builtin2.ExpectedLeadersPerEpoch)
// Epochs
const MessageConfidence = uint64(5)
// constants for Weight calculation
// The ratio of weight contributed by short-term vs long-term factors in a given round
const WRatioNum = int64(1)
const WRatioDen = uint64(2)
// /////
// Proofs
// Epochs
// TODO: unused
const SealRandomnessLookback = policy.SealRandomnessLookback
// /////
// Mining
// Epochs
const TicketRandomnessLookback = abi.ChainEpoch(1)
// /////
// Address
const AddressMainnetEnvVar = "_mainnet_"
// the 'f' prefix doesn't matter
var ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a")
const FilBase = uint64(2_000_000_000)
const FilecoinPrecision = uint64(1_000_000_000_000_000_000)
var InitialRewardBalance = wholeFIL(1_100_000_000)
var InitialFilReserved = wholeFIL(300_000_000)
func init() {
if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar {
SetAddressNetwork(address.Mainnet)
}
}
// Sync
const BadBlockCacheSize = 1 << 15
// assuming 4000 messages per round, this lets us not lose any messages across a
// 10 block reorg.
const BlsSignatureCacheSize = 40000
// Size of signature verification cache
// 32k keeps the cache around 10MB in size, max
const VerifSigCacheSize = 32000
// ///////
// Limits
const BlockMessageLimit = 10000
var BlockGasLimit = int64(10_000_000_000)
var BlockGasTarget = BlockGasLimit / 2
const BaseFeeMaxChangeDenom int64 = 8 // 12.5%
const InitialBaseFee int64 = 100e6
const MinimumBaseFee int64 = 100
const PackingEfficiencyNum int64 = 4
const PackingEfficiencyDenom int64 = 5
// SafeHeightDistance is the distance from the latest tipset, i.e. heaviest, that
// is considered to be safe from re-orgs at an increasingly diminishing
// probability.
//
// This is used to determine the safe tipset when using the "safe" tag in
// TipSetSelector or via Eth JSON-RPC APIs. Note that "safe" doesn't guarantee
// finality, but rather a high probability of not being reverted. For guaranteed
// finality, use the "finalized" tag.
//
// This constant is experimental and may change in the future.
// Discussion on this current value and a tracking item to document the
// probabilistic impact of various values is in
// https://github.com/filecoin-project/go-f3/issues/944
const SafeHeightDistance abi.ChainEpoch = 200
package buildconstants
import (
"encoding/json"
"math/big"
"os"
"time"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-f3/manifest"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/lotus/chain/actors/policy"
)
// moved from now-defunct build/paramfetch.go
var log = logging.Logger("build/buildtypes")
func SetAddressNetwork(n address.Network) {
address.CurrentNetwork = n
}
func MustParseAddress(addr string) address.Address {
ret, err := address.NewFromString(addr)
if err != nil {
panic(err)
}
return ret
}
func IsNearUpgrade(epoch, upgradeEpoch abi.ChainEpoch) bool {
if upgradeEpoch < 0 {
return false
}
return epoch > upgradeEpoch-policy.ChainFinality && epoch < upgradeEpoch+policy.ChainFinality
}
func MustParseID(id string) peer.ID {
p, err := peer.Decode(id)
if err != nil {
panic(err)
}
return p
}
func wholeFIL(whole uint64) *big.Int {
bigWhole := big.NewInt(int64(whole))
return bigWhole.Mul(bigWhole, big.NewInt(int64(FilecoinPrecision)))
}
func F3Manifest() *manifest.Manifest {
if F3ManifestBytes == nil {
return nil
}
var manif manifest.Manifest
if err := json.Unmarshal(F3ManifestBytes, &manif); err != nil {
log.Panicf("failed to unmarshal F3 manifest: %s", err)
}
if err := manif.Validate(); err != nil {
log.Panicf("invalid F3 manifest: %s", err)
}
if ptCid := os.Getenv("F3_INITIAL_POWERTABLE_CID"); ptCid != "" {
if k, err := cid.Parse(ptCid); err != nil {
log.Errorf("failed to parse F3_INITIAL_POWERTABLE_CID %q: %s", ptCid, err)
} else if manif.InitialPowerTable.Defined() && k != manif.InitialPowerTable {
log.Errorf("ignoring F3_INITIAL_POWERTABLE_CID as lotus has a hard-coded initial F3 power table")
} else {
manif.InitialPowerTable = k
}
}
if !manif.InitialPowerTable.Defined() {
log.Warn("initial power table is not specified, it will be populated automatically assuming this is testing network")
}
// EC Period sanity check
if manif.EC.Period != time.Duration(BlockDelaySecs)*time.Second {
log.Panicf("static manifest EC period is %v, expected %v", manif.EC.Period, time.Duration(BlockDelaySecs)*time.Second)
}
return &manif
}
package actors
import (
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/manifest"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
)
// GetActorCodeID looks up a builtin actor's code CID by actor version and canonical actor name.
func GetActorCodeID(av actorstypes.Version, name string) (cid.Cid, bool) {
// Actors V8 and above
if av >= actorstypes.Version8 {
if cids, ok := GetActorCodeIDsFromManifest(av); ok {
c, ok := cids[name]
return c, ok
}
}
// Actors V7 and lower
switch name {
case manifest.AccountKey:
switch av {
case actorstypes.Version0:
return builtin0.AccountActorCodeID, true
case actorstypes.Version2:
return builtin2.AccountActorCodeID, true
case actorstypes.Version3:
return builtin3.AccountActorCodeID, true
case actorstypes.Version4:
return builtin4.AccountActorCodeID, true
case actorstypes.Version5:
return builtin5.AccountActorCodeID, true
case actorstypes.Version6:
return builtin6.AccountActorCodeID, true
case actorstypes.Version7:
return builtin7.AccountActorCodeID, true
}
case manifest.CronKey:
switch av {
case actorstypes.Version0:
return builtin0.CronActorCodeID, true
case actorstypes.Version2:
return builtin2.CronActorCodeID, true
case actorstypes.Version3:
return builtin3.CronActorCodeID, true
case actorstypes.Version4:
return builtin4.CronActorCodeID, true
case actorstypes.Version5:
return builtin5.CronActorCodeID, true
case actorstypes.Version6:
return builtin6.CronActorCodeID, true
case actorstypes.Version7:
return builtin7.CronActorCodeID, true
}
case manifest.InitKey:
switch av {
case actorstypes.Version0:
return builtin0.InitActorCodeID, true
case actorstypes.Version2:
return builtin2.InitActorCodeID, true
case actorstypes.Version3:
return builtin3.InitActorCodeID, true
case actorstypes.Version4:
return builtin4.InitActorCodeID, true
case actorstypes.Version5:
return builtin5.InitActorCodeID, true
case actorstypes.Version6:
return builtin6.InitActorCodeID, true
case actorstypes.Version7:
return builtin7.InitActorCodeID, true
}
case manifest.MarketKey:
switch av {
case actorstypes.Version0:
return builtin0.StorageMarketActorCodeID, true
case actorstypes.Version2:
return builtin2.StorageMarketActorCodeID, true
case actorstypes.Version3:
return builtin3.StorageMarketActorCodeID, true
case actorstypes.Version4:
return builtin4.StorageMarketActorCodeID, true
case actorstypes.Version5:
return builtin5.StorageMarketActorCodeID, true
case actorstypes.Version6:
return builtin6.StorageMarketActorCodeID, true
case actorstypes.Version7:
return builtin7.StorageMarketActorCodeID, true
}
case manifest.MinerKey:
switch av {
case actorstypes.Version0:
return builtin0.StorageMinerActorCodeID, true
case actorstypes.Version2:
return builtin2.StorageMinerActorCodeID, true
case actorstypes.Version3:
return builtin3.StorageMinerActorCodeID, true
case actorstypes.Version4:
return builtin4.StorageMinerActorCodeID, true
case actorstypes.Version5:
return builtin5.StorageMinerActorCodeID, true
case actorstypes.Version6:
return builtin6.StorageMinerActorCodeID, true
case actorstypes.Version7:
return builtin7.StorageMinerActorCodeID, true
}
case manifest.MultisigKey:
switch av {
case actorstypes.Version0:
return builtin0.MultisigActorCodeID, true
case actorstypes.Version2:
return builtin2.MultisigActorCodeID, true
case actorstypes.Version3:
return builtin3.MultisigActorCodeID, true
case actorstypes.Version4:
return builtin4.MultisigActorCodeID, true
case actorstypes.Version5:
return builtin5.MultisigActorCodeID, true
case actorstypes.Version6:
return builtin6.MultisigActorCodeID, true
case actorstypes.Version7:
return builtin7.MultisigActorCodeID, true
}
case manifest.PaychKey:
switch av {
case actorstypes.Version0:
return builtin0.PaymentChannelActorCodeID, true
case actorstypes.Version2:
return builtin2.PaymentChannelActorCodeID, true
case actorstypes.Version3:
return builtin3.PaymentChannelActorCodeID, true
case actorstypes.Version4:
return builtin4.PaymentChannelActorCodeID, true
case actorstypes.Version5:
return builtin5.PaymentChannelActorCodeID, true
case actorstypes.Version6:
return builtin6.PaymentChannelActorCodeID, true
case actorstypes.Version7:
return builtin7.PaymentChannelActorCodeID, true
}
case manifest.PowerKey:
switch av {
case actorstypes.Version0:
return builtin0.StoragePowerActorCodeID, true
case actorstypes.Version2:
return builtin2.StoragePowerActorCodeID, true
case actorstypes.Version3:
return builtin3.StoragePowerActorCodeID, true
case actorstypes.Version4:
return builtin4.StoragePowerActorCodeID, true
case actorstypes.Version5:
return builtin5.StoragePowerActorCodeID, true
case actorstypes.Version6:
return builtin6.StoragePowerActorCodeID, true
case actorstypes.Version7:
return builtin7.StoragePowerActorCodeID, true
}
case manifest.RewardKey:
switch av {
case actorstypes.Version0:
return builtin0.RewardActorCodeID, true
case actorstypes.Version2:
return builtin2.RewardActorCodeID, true
case actorstypes.Version3:
return builtin3.RewardActorCodeID, true
case actorstypes.Version4:
return builtin4.RewardActorCodeID, true
case actorstypes.Version5:
return builtin5.RewardActorCodeID, true
case actorstypes.Version6:
return builtin6.RewardActorCodeID, true
case actorstypes.Version7:
return builtin7.RewardActorCodeID, true
}
case manifest.SystemKey:
switch av {
case actorstypes.Version0:
return builtin0.SystemActorCodeID, true
case actorstypes.Version2:
return builtin2.SystemActorCodeID, true
case actorstypes.Version3:
return builtin3.SystemActorCodeID, true
case actorstypes.Version4:
return builtin4.SystemActorCodeID, true
case actorstypes.Version5:
return builtin5.SystemActorCodeID, true
case actorstypes.Version6:
return builtin6.SystemActorCodeID, true
case actorstypes.Version7:
return builtin7.SystemActorCodeID, true
}
case manifest.VerifregKey:
switch av {
case actorstypes.Version0:
return builtin0.VerifiedRegistryActorCodeID, true
case actorstypes.Version2:
return builtin2.VerifiedRegistryActorCodeID, true
case actorstypes.Version3:
return builtin3.VerifiedRegistryActorCodeID, true
case actorstypes.Version4:
return builtin4.VerifiedRegistryActorCodeID, true
case actorstypes.Version5:
return builtin5.VerifiedRegistryActorCodeID, true
case actorstypes.Version6:
return builtin6.VerifiedRegistryActorCodeID, true
case actorstypes.Version7:
return builtin7.VerifiedRegistryActorCodeID, true
}
}
return cid.Undef, false
}
// GetActorCodeIDs looks up all builtin actor's code CIDs by actor version.
func GetActorCodeIDs(av actorstypes.Version) (map[string]cid.Cid, error) {
cids, ok := GetActorCodeIDsFromManifest(av)
if ok {
return cids, nil
}
actorsKeys := manifest.GetBuiltinActorsKeys(av)
synthCids := make(map[string]cid.Cid)
for _, key := range actorsKeys {
c, ok := GetActorCodeID(av, key)
if !ok {
return nil, xerrors.Errorf("could not find builtin actor cids for Actors version %d", av)
}
synthCids[key] = c
}
return synthCids, nil
}
package adt
import (
"bytes"
typegen "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-state-types/abi"
)
// AdtArrayDiff generalizes adt.Array diffing by accepting a Deferred type that can unmarshalled to its corresponding struct
// in an interface implantation.
// Add should be called when a new k,v is added to the array
// Modify should be called when a value is modified in the array
// Remove should be called when a value is removed from the array
type AdtArrayDiff interface {
Add(key uint64, val *typegen.Deferred) error
Modify(key uint64, from, to *typegen.Deferred) error
Remove(key uint64, val *typegen.Deferred) error
}
// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104
// CBOR Marshaling will likely be the largest performance bottleneck here.
// DiffAdtArray accepts two *adt.Array's and an AdtArrayDiff implementation. It does the following:
// - All values that exist in preArr and not in curArr are passed to AdtArrayDiff.Remove()
// - All values that exist in curArr nnd not in prevArr are passed to adtArrayDiff.Add()
// - All values that exist in preArr and in curArr are passed to AdtArrayDiff.Modify()
// - It is the responsibility of AdtArrayDiff.Modify() to determine if the values it was passed have been modified.
func DiffAdtArray(preArr, curArr Array, out AdtArrayDiff) error {
notNew := make(map[int64]struct{}, curArr.Length())
prevVal := new(typegen.Deferred)
if err := preArr.ForEach(prevVal, func(i int64) error {
curVal := new(typegen.Deferred)
found, err := curArr.Get(uint64(i), curVal)
if err != nil {
return err
}
if !found {
if err := out.Remove(uint64(i), prevVal); err != nil {
return err
}
return nil
}
// no modification
if !bytes.Equal(prevVal.Raw, curVal.Raw) {
if err := out.Modify(uint64(i), prevVal, curVal); err != nil {
return err
}
}
notNew[i] = struct{}{}
return nil
}); err != nil {
return err
}
curVal := new(typegen.Deferred)
return curArr.ForEach(curVal, func(i int64) error {
if _, ok := notNew[i]; ok {
return nil
}
return out.Add(uint64(i), curVal)
})
}
// TODO Performance can be improved by diffing the underlying IPLD graph, e.g. https://github.com/ipfs/go-merkledag/blob/749fd8717d46b4f34c9ce08253070079c89bc56d/dagutils/diff.go#L104
// CBOR Marshaling will likely be the largest performance bottleneck here.
// AdtMapDiff generalizes adt.Map diffing by accepting a Deferred type that can unmarshalled to its corresponding struct
// in an interface implantation.
// AsKey should return the Keyer implementation specific to the map
// Add should be called when a new k,v is added to the map
// Modify should be called when a value is modified in the map
// Remove should be called when a value is removed from the map
type AdtMapDiff interface {
AsKey(key string) (abi.Keyer, error)
Add(key string, val *typegen.Deferred) error
Modify(key string, from, to *typegen.Deferred) error
Remove(key string, val *typegen.Deferred) error
}
func DiffAdtMap(preMap, curMap Map, out AdtMapDiff) error {
notNew := make(map[string]struct{})
prevVal := new(typegen.Deferred)
if err := preMap.ForEach(prevVal, func(key string) error {
curVal := new(typegen.Deferred)
k, err := out.AsKey(key)
if err != nil {
return err
}
found, err := curMap.Get(k, curVal)
if err != nil {
return err
}
if !found {
if err := out.Remove(key, prevVal); err != nil {
return err
}
return nil
}
// no modification
if !bytes.Equal(prevVal.Raw, curVal.Raw) {
if err := out.Modify(key, prevVal, curVal); err != nil {
return err
}
}
notNew[key] = struct{}{}
return nil
}); err != nil {
return err
}
curVal := new(typegen.Deferred)
return curMap.ForEach(curVal, func(key string) error {
if _, ok := notNew[key]; ok {
return nil
}
return out.Add(key, curVal)
})
}
package adt
import (
"context"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/filecoin-project/specs-actors/actors/util/adt"
)
type Store interface {
Context() context.Context
cbor.IpldStore
}
func WrapStore(ctx context.Context, store cbor.IpldStore) Store {
return adt.WrapStore(ctx, store)
}
package aerrors
import (
"fmt"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/exitcode"
)
func IsFatal(err ActorError) bool {
return err != nil && err.IsFatal()
}
func RetCode(err ActorError) exitcode.ExitCode {
if err == nil {
return 0
}
return err.RetCode()
}
type internalActorError interface {
ActorError
FormatError(p xerrors.Printer) (next error)
Unwrap() error
}
type ActorError interface {
error
IsFatal() bool
RetCode() exitcode.ExitCode
}
type actorError struct {
fatal bool
retCode exitcode.ExitCode
msg string
frame xerrors.Frame
err error
}
func (e *actorError) IsFatal() bool {
return e.fatal
}
func (e *actorError) RetCode() exitcode.ExitCode {
return e.retCode
}
func (e *actorError) Error() string {
return fmt.Sprint(e)
}
func (e *actorError) Format(s fmt.State, v rune) { xerrors.FormatError(e, s, v) }
func (e *actorError) FormatError(p xerrors.Printer) (next error) {
p.Print(e.msg)
if e.fatal {
p.Print(" (FATAL)")
} else {
p.Printf(" (RetCode=%d)", e.retCode)
}
e.frame.Format(p)
return e.err
}
func (e *actorError) Unwrap() error {
return e.err
}
var _ internalActorError = (*actorError)(nil)
package aerrors
import (
"errors"
"fmt"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/exitcode"
)
// New creates a new non-fatal error
func New(retCode exitcode.ExitCode, message string) ActorError {
if retCode == 0 {
return &actorError{
fatal: true,
retCode: 0,
msg: "tried creating an error and setting RetCode to 0",
frame: xerrors.Caller(1),
err: errors.New(message),
}
}
return &actorError{
retCode: retCode,
msg: message,
frame: xerrors.Caller(1),
}
}
// Newf creates a new non-fatal error
func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
if retCode == 0 {
return &actorError{
fatal: true,
retCode: 0,
msg: "tried creating an error and setting RetCode to 0",
frame: xerrors.Caller(1),
err: fmt.Errorf(format, args...),
}
}
return &actorError{
retCode: retCode,
msg: fmt.Sprintf(format, args...),
frame: xerrors.Caller(1),
}
}
// todo: bit hacky
func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
if retCode == 0 {
return &actorError{
fatal: true,
retCode: 0,
msg: "tried creating an error and setting RetCode to 0",
frame: xerrors.Caller(skip),
err: fmt.Errorf(format, args...),
}
}
return &actorError{
retCode: retCode,
msg: fmt.Sprintf(format, args...),
frame: xerrors.Caller(skip),
}
}
func Fatal(message string, args ...interface{}) ActorError {
return &actorError{
fatal: true,
msg: message,
frame: xerrors.Caller(1),
}
}
func Fatalf(format string, args ...interface{}) ActorError {
return &actorError{
fatal: true,
msg: fmt.Sprintf(format, args...),
frame: xerrors.Caller(1),
}
}
// Wrap extens chain of errors with a message
func Wrap(err ActorError, message string) ActorError {
if err == nil {
return nil
}
return &actorError{
fatal: IsFatal(err),
retCode: RetCode(err),
msg: message,
frame: xerrors.Caller(1),
err: err,
}
}
// Wrapf extens chain of errors with a message
func Wrapf(err ActorError, format string, args ...interface{}) ActorError {
if err == nil {
return nil
}
return &actorError{
fatal: IsFatal(err),
retCode: RetCode(err),
msg: fmt.Sprintf(format, args...),
frame: xerrors.Caller(1),
err: err,
}
}
// Absorb takes and error and makes in not fatal ActorError
func Absorb(err error, retCode exitcode.ExitCode, msg string) ActorError {
if err == nil {
return nil
}
if aerr, ok := err.(ActorError); ok && IsFatal(aerr) {
return &actorError{
fatal: true,
retCode: 0,
msg: "tried absorbing an error that is already a fatal error",
frame: xerrors.Caller(1),
err: err,
}
}
if retCode == 0 {
return &actorError{
fatal: true,
retCode: 0,
msg: "tried absorbing an error and setting RetCode to 0",
frame: xerrors.Caller(1),
err: err,
}
}
return &actorError{
fatal: false,
retCode: retCode,
msg: msg,
frame: xerrors.Caller(1),
err: err,
}
}
// Escalate takes and error and escalates it into a fatal error
func Escalate(err error, msg string) ActorError {
if err == nil {
return nil
}
return &actorError{
fatal: true,
msg: msg,
frame: xerrors.Caller(1),
err: err,
}
}
func HandleExternalError(err error, msg string) ActorError {
if err == nil {
return nil
}
if aerr, ok := err.(ActorError); ok {
return &actorError{
fatal: IsFatal(aerr),
retCode: RetCode(aerr),
msg: msg,
frame: xerrors.Caller(1),
err: aerr,
}
}
if errors.Is(err, &cbor.SerializationError{}) {
return &actorError{
fatal: false,
retCode: 253,
msg: msg,
frame: xerrors.Caller(1),
err: err,
}
}
return &actorError{
fatal: false,
retCode: 219,
msg: msg,
frame: xerrors.Caller(1),
err: err,
}
}
package builtin
import (
"fmt"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/builtin"
minertypes "github.com/filecoin-project/go-state-types/builtin/v15/miner"
smoothingtypes "github.com/filecoin-project/go-state-types/builtin/v8/util/smoothing"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/go-state-types/proof"
builtin0 "github.com/filecoin-project/specs-actors/actors/builtin"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
"github.com/filecoin-project/lotus/chain/actors"
)
var InitActorAddr = builtin.InitActorAddr
var SystemActorAddr = builtin.SystemActorAddr
var BurntFundsActorAddr = builtin.BurntFundsActorAddr
var CronActorAddr = builtin.CronActorAddr
var DatacapActorAddr = builtin.DatacapActorAddr
var EthereumAddressManagerActorAddr = builtin.EthereumAddressManagerActorAddr
var SaftAddress = makeAddress("t0122")
var ReserveAddress = makeAddress("t090")
var RootVerifierAddress = makeAddress("t080")
var (
ExpectedLeadersPerEpoch = builtin.ExpectedLeadersPerEpoch
)
const (
EpochDurationSeconds = builtin.EpochDurationSeconds
EpochsInDay = builtin.EpochsInDay
EpochsInYear = builtin.EpochsInYear
SecondsInDay = builtin.SecondsInDay
)
const (
MethodSend = builtin.MethodSend
MethodConstructor = builtin.MethodConstructor
)
// These are all just type aliases across actor versions. In the future, that might change
// and we might need to do something fancier.
type SectorInfo = proof.SectorInfo
type ExtendedSectorInfo = proof.ExtendedSectorInfo
type PoStProof = proof.PoStProof
type FilterEstimate = smoothingtypes.FilterEstimate
func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, verifiedWeight abi.DealWeight) abi.StoragePower {
return minertypes.QAPowerForWeight(size, duration, verifiedWeight)
}
func ActorNameByCode(c cid.Cid) string {
if name, version, ok := actors.GetActorMetaByCode(c); ok {
return fmt.Sprintf("fil/%d/%s", version, name)
}
switch {
case builtin0.IsBuiltinActor(c):
return builtin0.ActorNameByCode(c)
case builtin2.IsBuiltinActor(c):
return builtin2.ActorNameByCode(c)
case builtin3.IsBuiltinActor(c):
return builtin3.ActorNameByCode(c)
case builtin4.IsBuiltinActor(c):
return builtin4.ActorNameByCode(c)
case builtin5.IsBuiltinActor(c):
return builtin5.ActorNameByCode(c)
case builtin6.IsBuiltinActor(c):
return builtin6.ActorNameByCode(c)
case builtin7.IsBuiltinActor(c):
return builtin7.ActorNameByCode(c)
default:
return "<unknown>"
}
}
func IsBuiltinActor(c cid.Cid) bool {
_, _, ok := actors.GetActorMetaByCode(c)
if ok {
return true
}
if builtin0.IsBuiltinActor(c) {
return true
}
if builtin2.IsBuiltinActor(c) {
return true
}
if builtin3.IsBuiltinActor(c) {
return true
}
if builtin4.IsBuiltinActor(c) {
return true
}
if builtin5.IsBuiltinActor(c) {
return true
}
if builtin6.IsBuiltinActor(c) {
return true
}
if builtin7.IsBuiltinActor(c) {
return true
}
return false
}
func IsAccountActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == "account"
}
if c == builtin0.AccountActorCodeID {
return true
}
if c == builtin2.AccountActorCodeID {
return true
}
if c == builtin3.AccountActorCodeID {
return true
}
if c == builtin4.AccountActorCodeID {
return true
}
if c == builtin5.AccountActorCodeID {
return true
}
if c == builtin6.AccountActorCodeID {
return true
}
if c == builtin7.AccountActorCodeID {
return true
}
return false
}
func IsStorageMinerActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == manifest.MinerKey
}
if c == builtin0.StorageMinerActorCodeID {
return true
}
if c == builtin2.StorageMinerActorCodeID {
return true
}
if c == builtin3.StorageMinerActorCodeID {
return true
}
if c == builtin4.StorageMinerActorCodeID {
return true
}
if c == builtin5.StorageMinerActorCodeID {
return true
}
if c == builtin6.StorageMinerActorCodeID {
return true
}
if c == builtin7.StorageMinerActorCodeID {
return true
}
return false
}
func IsMultisigActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == manifest.MultisigKey
}
if c == builtin0.MultisigActorCodeID {
return true
}
if c == builtin2.MultisigActorCodeID {
return true
}
if c == builtin3.MultisigActorCodeID {
return true
}
if c == builtin4.MultisigActorCodeID {
return true
}
if c == builtin5.MultisigActorCodeID {
return true
}
if c == builtin6.MultisigActorCodeID {
return true
}
if c == builtin7.MultisigActorCodeID {
return true
}
return false
}
func IsPaymentChannelActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == "paymentchannel"
}
if c == builtin0.PaymentChannelActorCodeID {
return true
}
if c == builtin2.PaymentChannelActorCodeID {
return true
}
if c == builtin3.PaymentChannelActorCodeID {
return true
}
if c == builtin4.PaymentChannelActorCodeID {
return true
}
if c == builtin5.PaymentChannelActorCodeID {
return true
}
if c == builtin6.PaymentChannelActorCodeID {
return true
}
if c == builtin7.PaymentChannelActorCodeID {
return true
}
return false
}
func IsPlaceholderActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == manifest.PlaceholderKey
}
return false
}
func IsEvmActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == manifest.EvmKey
}
return false
}
func IsEthAccountActor(c cid.Cid) bool {
name, _, ok := actors.GetActorMetaByCode(c)
if ok {
return name == manifest.EthAccountKey
}
return false
}
func makeAddress(addr string) address.Address {
ret, err := address.NewFromString(addr)
if err != nil {
panic(err)
}
return ret
}
package builtin
import (
"reflect"
"runtime"
"strings"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/builtin"
account10 "github.com/filecoin-project/go-state-types/builtin/v10/account"
cron10 "github.com/filecoin-project/go-state-types/builtin/v10/cron"
datacap10 "github.com/filecoin-project/go-state-types/builtin/v10/datacap"
eam10 "github.com/filecoin-project/go-state-types/builtin/v10/eam"
ethaccount10 "github.com/filecoin-project/go-state-types/builtin/v10/ethaccount"
evm10 "github.com/filecoin-project/go-state-types/builtin/v10/evm"
_init10 "github.com/filecoin-project/go-state-types/builtin/v10/init"
market10 "github.com/filecoin-project/go-state-types/builtin/v10/market"
miner10 "github.com/filecoin-project/go-state-types/builtin/v10/miner"
multisig10 "github.com/filecoin-project/go-state-types/builtin/v10/multisig"
paych10 "github.com/filecoin-project/go-state-types/builtin/v10/paych"
placeholder10 "github.com/filecoin-project/go-state-types/builtin/v10/placeholder"
power10 "github.com/filecoin-project/go-state-types/builtin/v10/power"
reward10 "github.com/filecoin-project/go-state-types/builtin/v10/reward"
system10 "github.com/filecoin-project/go-state-types/builtin/v10/system"
verifreg10 "github.com/filecoin-project/go-state-types/builtin/v10/verifreg"
account11 "github.com/filecoin-project/go-state-types/builtin/v11/account"
cron11 "github.com/filecoin-project/go-state-types/builtin/v11/cron"
datacap11 "github.com/filecoin-project/go-state-types/builtin/v11/datacap"
eam11 "github.com/filecoin-project/go-state-types/builtin/v11/eam"
ethaccount11 "github.com/filecoin-project/go-state-types/builtin/v11/ethaccount"
evm11 "github.com/filecoin-project/go-state-types/builtin/v11/evm"
_init11 "github.com/filecoin-project/go-state-types/builtin/v11/init"
market11 "github.com/filecoin-project/go-state-types/builtin/v11/market"
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
multisig11 "github.com/filecoin-project/go-state-types/builtin/v11/multisig"
paych11 "github.com/filecoin-project/go-state-types/builtin/v11/paych"
placeholder11 "github.com/filecoin-project/go-state-types/builtin/v11/placeholder"
power11 "github.com/filecoin-project/go-state-types/builtin/v11/power"
reward11 "github.com/filecoin-project/go-state-types/builtin/v11/reward"
system11 "github.com/filecoin-project/go-state-types/builtin/v11/system"
verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg"
account12 "github.com/filecoin-project/go-state-types/builtin/v12/account"
cron12 "github.com/filecoin-project/go-state-types/builtin/v12/cron"
datacap12 "github.com/filecoin-project/go-state-types/builtin/v12/datacap"
eam12 "github.com/filecoin-project/go-state-types/builtin/v12/eam"
ethaccount12 "github.com/filecoin-project/go-state-types/builtin/v12/ethaccount"
evm12 "github.com/filecoin-project/go-state-types/builtin/v12/evm"
_init12 "github.com/filecoin-project/go-state-types/builtin/v12/init"
market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
multisig12 "github.com/filecoin-project/go-state-types/builtin/v12/multisig"
paych12 "github.com/filecoin-project/go-state-types/builtin/v12/paych"
placeholder12 "github.com/filecoin-project/go-state-types/builtin/v12/placeholder"
power12 "github.com/filecoin-project/go-state-types/builtin/v12/power"
reward12 "github.com/filecoin-project/go-state-types/builtin/v12/reward"
system12 "github.com/filecoin-project/go-state-types/builtin/v12/system"
verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
account13 "github.com/filecoin-project/go-state-types/builtin/v13/account"
cron13 "github.com/filecoin-project/go-state-types/builtin/v13/cron"
datacap13 "github.com/filecoin-project/go-state-types/builtin/v13/datacap"
eam13 "github.com/filecoin-project/go-state-types/builtin/v13/eam"
ethaccount13 "github.com/filecoin-project/go-state-types/builtin/v13/ethaccount"
evm13 "github.com/filecoin-project/go-state-types/builtin/v13/evm"
_init13 "github.com/filecoin-project/go-state-types/builtin/v13/init"
market13 "github.com/filecoin-project/go-state-types/builtin/v13/market"
miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
multisig13 "github.com/filecoin-project/go-state-types/builtin/v13/multisig"
paych13 "github.com/filecoin-project/go-state-types/builtin/v13/paych"
placeholder13 "github.com/filecoin-project/go-state-types/builtin/v13/placeholder"
power13 "github.com/filecoin-project/go-state-types/builtin/v13/power"
reward13 "github.com/filecoin-project/go-state-types/builtin/v13/reward"
system13 "github.com/filecoin-project/go-state-types/builtin/v13/system"
verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
account14 "github.com/filecoin-project/go-state-types/builtin/v14/account"
cron14 "github.com/filecoin-project/go-state-types/builtin/v14/cron"
datacap14 "github.com/filecoin-project/go-state-types/builtin/v14/datacap"
eam14 "github.com/filecoin-project/go-state-types/builtin/v14/eam"
ethaccount14 "github.com/filecoin-project/go-state-types/builtin/v14/ethaccount"
evm14 "github.com/filecoin-project/go-state-types/builtin/v14/evm"
_init14 "github.com/filecoin-project/go-state-types/builtin/v14/init"
market14 "github.com/filecoin-project/go-state-types/builtin/v14/market"
miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner"
multisig14 "github.com/filecoin-project/go-state-types/builtin/v14/multisig"
paych14 "github.com/filecoin-project/go-state-types/builtin/v14/paych"
placeholder14 "github.com/filecoin-project/go-state-types/builtin/v14/placeholder"
power14 "github.com/filecoin-project/go-state-types/builtin/v14/power"
reward14 "github.com/filecoin-project/go-state-types/builtin/v14/reward"
system14 "github.com/filecoin-project/go-state-types/builtin/v14/system"
verifreg14 "github.com/filecoin-project/go-state-types/builtin/v14/verifreg"
account15 "github.com/filecoin-project/go-state-types/builtin/v15/account"
cron15 "github.com/filecoin-project/go-state-types/builtin/v15/cron"
datacap15 "github.com/filecoin-project/go-state-types/builtin/v15/datacap"
eam15 "github.com/filecoin-project/go-state-types/builtin/v15/eam"
ethaccount15 "github.com/filecoin-project/go-state-types/builtin/v15/ethaccount"
evm15 "github.com/filecoin-project/go-state-types/builtin/v15/evm"
_init15 "github.com/filecoin-project/go-state-types/builtin/v15/init"
market15 "github.com/filecoin-project/go-state-types/builtin/v15/market"
miner15 "github.com/filecoin-project/go-state-types/builtin/v15/miner"
multisig15 "github.com/filecoin-project/go-state-types/builtin/v15/multisig"
paych15 "github.com/filecoin-project/go-state-types/builtin/v15/paych"
placeholder15 "github.com/filecoin-project/go-state-types/builtin/v15/placeholder"
power15 "github.com/filecoin-project/go-state-types/builtin/v15/power"
reward15 "github.com/filecoin-project/go-state-types/builtin/v15/reward"
system15 "github.com/filecoin-project/go-state-types/builtin/v15/system"
verifreg15 "github.com/filecoin-project/go-state-types/builtin/v15/verifreg"
account16 "github.com/filecoin-project/go-state-types/builtin/v16/account"
cron16 "github.com/filecoin-project/go-state-types/builtin/v16/cron"
datacap16 "github.com/filecoin-project/go-state-types/builtin/v16/datacap"
eam16 "github.com/filecoin-project/go-state-types/builtin/v16/eam"
ethaccount16 "github.com/filecoin-project/go-state-types/builtin/v16/ethaccount"
evm16 "github.com/filecoin-project/go-state-types/builtin/v16/evm"
_init16 "github.com/filecoin-project/go-state-types/builtin/v16/init"
market16 "github.com/filecoin-project/go-state-types/builtin/v16/market"
miner16 "github.com/filecoin-project/go-state-types/builtin/v16/miner"
multisig16 "github.com/filecoin-project/go-state-types/builtin/v16/multisig"
paych16 "github.com/filecoin-project/go-state-types/builtin/v16/paych"
placeholder16 "github.com/filecoin-project/go-state-types/builtin/v16/placeholder"
power16 "github.com/filecoin-project/go-state-types/builtin/v16/power"
reward16 "github.com/filecoin-project/go-state-types/builtin/v16/reward"
system16 "github.com/filecoin-project/go-state-types/builtin/v16/system"
verifreg16 "github.com/filecoin-project/go-state-types/builtin/v16/verifreg"
account17 "github.com/filecoin-project/go-state-types/builtin/v17/account"
cron17 "github.com/filecoin-project/go-state-types/builtin/v17/cron"
datacap17 "github.com/filecoin-project/go-state-types/builtin/v17/datacap"
eam17 "github.com/filecoin-project/go-state-types/builtin/v17/eam"
ethaccount17 "github.com/filecoin-project/go-state-types/builtin/v17/ethaccount"
evm17 "github.com/filecoin-project/go-state-types/builtin/v17/evm"
_init17 "github.com/filecoin-project/go-state-types/builtin/v17/init"
market17 "github.com/filecoin-project/go-state-types/builtin/v17/market"
miner17 "github.com/filecoin-project/go-state-types/builtin/v17/miner"
multisig17 "github.com/filecoin-project/go-state-types/builtin/v17/multisig"
paych17 "github.com/filecoin-project/go-state-types/builtin/v17/paych"
placeholder17 "github.com/filecoin-project/go-state-types/builtin/v17/placeholder"
power17 "github.com/filecoin-project/go-state-types/builtin/v17/power"
reward17 "github.com/filecoin-project/go-state-types/builtin/v17/reward"
system17 "github.com/filecoin-project/go-state-types/builtin/v17/system"
verifreg17 "github.com/filecoin-project/go-state-types/builtin/v17/verifreg"
account8 "github.com/filecoin-project/go-state-types/builtin/v8/account"
cron8 "github.com/filecoin-project/go-state-types/builtin/v8/cron"
_init8 "github.com/filecoin-project/go-state-types/builtin/v8/init"
market8 "github.com/filecoin-project/go-state-types/builtin/v8/market"
miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner"
multisig8 "github.com/filecoin-project/go-state-types/builtin/v8/multisig"
paych8 "github.com/filecoin-project/go-state-types/builtin/v8/paych"
power8 "github.com/filecoin-project/go-state-types/builtin/v8/power"
reward8 "github.com/filecoin-project/go-state-types/builtin/v8/reward"
system8 "github.com/filecoin-project/go-state-types/builtin/v8/system"
verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
account9 "github.com/filecoin-project/go-state-types/builtin/v9/account"
cron9 "github.com/filecoin-project/go-state-types/builtin/v9/cron"
datacap9 "github.com/filecoin-project/go-state-types/builtin/v9/datacap"
_init9 "github.com/filecoin-project/go-state-types/builtin/v9/init"
market9 "github.com/filecoin-project/go-state-types/builtin/v9/market"
miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
multisig9 "github.com/filecoin-project/go-state-types/builtin/v9/multisig"
paych9 "github.com/filecoin-project/go-state-types/builtin/v9/paych"
power9 "github.com/filecoin-project/go-state-types/builtin/v9/power"
reward9 "github.com/filecoin-project/go-state-types/builtin/v9/reward"
system9 "github.com/filecoin-project/go-state-types/builtin/v9/system"
verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/manifest"
rtt "github.com/filecoin-project/go-state-types/rt"
"github.com/filecoin-project/lotus/chain/actors"
)
type RegistryEntry struct {
state cbor.Er
code cid.Cid
methods map[abi.MethodNum]builtin.MethodMeta
}
func (r RegistryEntry) State() cbor.Er {
return r.state
}
func (r RegistryEntry) Exports() map[abi.MethodNum]builtin.MethodMeta {
return r.methods
}
func (r RegistryEntry) Code() cid.Cid {
return r.code
}
func MakeRegistryLegacy(actors []rtt.VMActor) []RegistryEntry {
registry := make([]RegistryEntry, 0)
for _, actor := range actors {
methodMap := make(map[abi.MethodNum]builtin.MethodMeta)
for methodNum, method := range actor.Exports() {
if method != nil {
methodMap[abi.MethodNum(methodNum)] = makeMethodMeta(method)
}
}
registry = append(registry, RegistryEntry{
code: actor.Code(),
methods: methodMap,
state: actor.State(),
})
}
return registry
}
func makeMethodMeta(method interface{}) builtin.MethodMeta {
ev := reflect.ValueOf(method)
// Extract the method names using reflection. These
// method names always match the field names in the
// `builtin.Method*` structs (tested in the specs-actors
// tests).
fnName := runtime.FuncForPC(ev.Pointer()).Name()
fnName = strings.TrimSuffix(fnName[strings.LastIndexByte(fnName, '.')+1:], "-fm")
return builtin.MethodMeta{
Name: fnName,
Method: method,
}
}
func MakeRegistry(av actorstypes.Version) []RegistryEntry {
if av < actorstypes.Version8 {
panic("expected version v8 and up only, use specs-actors for v0-7")
}
registry := make([]RegistryEntry, 0)
codeIDs, err := actors.GetActorCodeIDs(av)
if err != nil {
panic(err)
}
switch av {
case actorstypes.Version8:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account8.Methods,
state: new(account8.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron8.Methods,
state: new(cron8.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init8.Methods,
state: new(_init8.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market8.Methods,
state: new(market8.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner8.Methods,
state: new(miner8.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig8.Methods,
state: new(multisig8.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych8.Methods,
state: new(paych8.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power8.Methods,
state: new(power8.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward8.Methods,
state: new(reward8.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system8.Methods,
state: new(system8.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg8.Methods,
state: new(verifreg8.State),
})
}
}
case actorstypes.Version9:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account9.Methods,
state: new(account9.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron9.Methods,
state: new(cron9.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init9.Methods,
state: new(_init9.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market9.Methods,
state: new(market9.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner9.Methods,
state: new(miner9.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig9.Methods,
state: new(multisig9.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych9.Methods,
state: new(paych9.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power9.Methods,
state: new(power9.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward9.Methods,
state: new(reward9.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system9.Methods,
state: new(system9.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg9.Methods,
state: new(verifreg9.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap9.Methods,
state: new(datacap9.State),
})
}
}
case actorstypes.Version10:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account10.Methods,
state: new(account10.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron10.Methods,
state: new(cron10.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init10.Methods,
state: new(_init10.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market10.Methods,
state: new(market10.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner10.Methods,
state: new(miner10.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig10.Methods,
state: new(multisig10.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych10.Methods,
state: new(paych10.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power10.Methods,
state: new(power10.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward10.Methods,
state: new(reward10.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system10.Methods,
state: new(system10.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg10.Methods,
state: new(verifreg10.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap10.Methods,
state: new(datacap10.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm10.Methods,
state: new(evm10.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam10.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder10.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount10.Methods,
state: nil,
})
}
}
case actorstypes.Version11:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account11.Methods,
state: new(account11.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron11.Methods,
state: new(cron11.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init11.Methods,
state: new(_init11.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market11.Methods,
state: new(market11.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner11.Methods,
state: new(miner11.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig11.Methods,
state: new(multisig11.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych11.Methods,
state: new(paych11.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power11.Methods,
state: new(power11.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward11.Methods,
state: new(reward11.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system11.Methods,
state: new(system11.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg11.Methods,
state: new(verifreg11.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap11.Methods,
state: new(datacap11.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm11.Methods,
state: new(evm11.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam11.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder11.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount11.Methods,
state: nil,
})
}
}
case actorstypes.Version12:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account12.Methods,
state: new(account12.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron12.Methods,
state: new(cron12.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init12.Methods,
state: new(_init12.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market12.Methods,
state: new(market12.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner12.Methods,
state: new(miner12.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig12.Methods,
state: new(multisig12.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych12.Methods,
state: new(paych12.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power12.Methods,
state: new(power12.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward12.Methods,
state: new(reward12.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system12.Methods,
state: new(system12.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg12.Methods,
state: new(verifreg12.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap12.Methods,
state: new(datacap12.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm12.Methods,
state: new(evm12.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam12.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder12.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount12.Methods,
state: nil,
})
}
}
case actorstypes.Version13:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account13.Methods,
state: new(account13.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron13.Methods,
state: new(cron13.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init13.Methods,
state: new(_init13.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market13.Methods,
state: new(market13.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner13.Methods,
state: new(miner13.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig13.Methods,
state: new(multisig13.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych13.Methods,
state: new(paych13.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power13.Methods,
state: new(power13.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward13.Methods,
state: new(reward13.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system13.Methods,
state: new(system13.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg13.Methods,
state: new(verifreg13.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap13.Methods,
state: new(datacap13.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm13.Methods,
state: new(evm13.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam13.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder13.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount13.Methods,
state: nil,
})
}
}
case actorstypes.Version14:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account14.Methods,
state: new(account14.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron14.Methods,
state: new(cron14.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init14.Methods,
state: new(_init14.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market14.Methods,
state: new(market14.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner14.Methods,
state: new(miner14.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig14.Methods,
state: new(multisig14.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych14.Methods,
state: new(paych14.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power14.Methods,
state: new(power14.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward14.Methods,
state: new(reward14.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system14.Methods,
state: new(system14.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg14.Methods,
state: new(verifreg14.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap14.Methods,
state: new(datacap14.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm14.Methods,
state: new(evm14.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam14.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder14.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount14.Methods,
state: nil,
})
}
}
case actorstypes.Version15:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account15.Methods,
state: new(account15.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron15.Methods,
state: new(cron15.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init15.Methods,
state: new(_init15.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market15.Methods,
state: new(market15.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner15.Methods,
state: new(miner15.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig15.Methods,
state: new(multisig15.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych15.Methods,
state: new(paych15.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power15.Methods,
state: new(power15.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward15.Methods,
state: new(reward15.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system15.Methods,
state: new(system15.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg15.Methods,
state: new(verifreg15.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap15.Methods,
state: new(datacap15.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm15.Methods,
state: new(evm15.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam15.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder15.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount15.Methods,
state: nil,
})
}
}
case actorstypes.Version16:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account16.Methods,
state: new(account16.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron16.Methods,
state: new(cron16.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init16.Methods,
state: new(_init16.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market16.Methods,
state: new(market16.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner16.Methods,
state: new(miner16.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig16.Methods,
state: new(multisig16.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych16.Methods,
state: new(paych16.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power16.Methods,
state: new(power16.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward16.Methods,
state: new(reward16.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system16.Methods,
state: new(system16.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg16.Methods,
state: new(verifreg16.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap16.Methods,
state: new(datacap16.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm16.Methods,
state: new(evm16.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam16.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder16.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount16.Methods,
state: nil,
})
}
}
case actorstypes.Version17:
for key, codeID := range codeIDs {
switch key {
case manifest.AccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: account17.Methods,
state: new(account17.State),
})
case manifest.CronKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: cron17.Methods,
state: new(cron17.State),
})
case manifest.InitKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: _init17.Methods,
state: new(_init17.State),
})
case manifest.MarketKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: market17.Methods,
state: new(market17.State),
})
case manifest.MinerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: miner17.Methods,
state: new(miner17.State),
})
case manifest.MultisigKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: multisig17.Methods,
state: new(multisig17.State),
})
case manifest.PaychKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: paych17.Methods,
state: new(paych17.State),
})
case manifest.PowerKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: power17.Methods,
state: new(power17.State),
})
case manifest.RewardKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: reward17.Methods,
state: new(reward17.State),
})
case manifest.SystemKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: system17.Methods,
state: new(system17.State),
})
case manifest.VerifregKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: verifreg17.Methods,
state: new(verifreg17.State),
})
case manifest.DatacapKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: datacap17.Methods,
state: new(datacap17.State),
})
case manifest.EvmKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: evm17.Methods,
state: new(evm17.State),
})
case manifest.EamKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: eam17.Methods,
state: nil,
})
case manifest.PlaceholderKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: placeholder17.Methods,
state: nil,
})
case manifest.EthAccountKey:
registry = append(registry, RegistryEntry{
code: codeID,
methods: ethaccount17.Methods,
state: nil,
})
}
}
default:
panic("expected version v8 and up only, use specs-actors for v0-7")
}
return registry
}
package actors
import (
"context"
"strings"
"sync"
"github.com/ipfs/go-cid"
cbor "github.com/ipfs/go-ipld-cbor"
"golang.org/x/xerrors"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/manifest"
"github.com/filecoin-project/lotus/chain/actors/adt"
)
var manifestCids = make(map[actorstypes.Version]cid.Cid)
var manifests = make(map[actorstypes.Version]map[string]cid.Cid)
var actorMeta = make(map[cid.Cid]actorEntry)
var (
manifestMx sync.RWMutex
)
type actorEntry struct {
name string
version actorstypes.Version
}
// ClearManifests clears all known manifests. This is usually used in tests that need to switch networks.
func ClearManifests() {
manifestMx.Lock()
defer manifestMx.Unlock()
manifestCids = make(map[actorstypes.Version]cid.Cid)
manifests = make(map[actorstypes.Version]map[string]cid.Cid)
actorMeta = make(map[cid.Cid]actorEntry)
}
// RegisterManifest registers an actors manifest with lotus.
func RegisterManifest(av actorstypes.Version, manifestCid cid.Cid, entries map[string]cid.Cid) {
manifestMx.Lock()
defer manifestMx.Unlock()
manifestCids[av] = manifestCid
manifests[av] = entries
for name, c := range entries {
actorMeta[c] = actorEntry{name: name, version: av}
}
}
func AddActorMeta(name string, codeId cid.Cid, av actorstypes.Version) {
manifestMx.Lock()
defer manifestMx.Unlock()
actorMeta[codeId] = actorEntry{name: name, version: av}
}
// GetManifest gets a loaded manifest.
func GetManifest(av actorstypes.Version) (cid.Cid, bool) {
manifestMx.RLock()
defer manifestMx.RUnlock()
c, ok := manifestCids[av]
return c, ok
}
// ReadManifest reads a manifest from a blockstore. It does not "add" it.
func ReadManifest(ctx context.Context, store cbor.IpldStore, mfCid cid.Cid) (map[string]cid.Cid, error) {
adtStore := adt.WrapStore(ctx, store)
var mf manifest.Manifest
if err := adtStore.Get(ctx, mfCid, &mf); err != nil {
return nil, xerrors.Errorf("error reading manifest (cid: %s): %w", mfCid, err)
}
if err := mf.Load(ctx, adtStore); err != nil {
return nil, xerrors.Errorf("error loading manifest (cid: %s): %w", mfCid, err)
}
var manifestData manifest.ManifestData
if err := store.Get(ctx, mf.Data, &manifestData); err != nil {
return nil, xerrors.Errorf("error loading manifest data: %w", err)
}
metadata := make(map[string]cid.Cid)
for _, entry := range manifestData.Entries {
metadata[entry.Name] = entry.Code
}
return metadata, nil
}
// GetActorCodeIDsFromManifest looks up all builtin actor's code CIDs by actor version for versions that have a manifest.
func GetActorCodeIDsFromManifest(av actorstypes.Version) (map[string]cid.Cid, bool) {
manifestMx.RLock()
defer manifestMx.RUnlock()
cids, ok := manifests[av]
return cids, ok
}
// LoadManifest will get the manifest for a given Manifest CID from the store and Load data into its entries
func LoadManifest(ctx context.Context, mfCid cid.Cid, adtStore adt.Store) (*manifest.Manifest, error) {
var mf manifest.Manifest
if err := adtStore.Get(ctx, mfCid, &mf); err != nil {
return nil, xerrors.Errorf("error reading manifest: %w", err)
}
if err := mf.Load(ctx, adtStore); err != nil {
return nil, xerrors.Errorf("error loading manifest entries data: %w", err)
}
return &mf, nil
}
func GetActorMetaByCode(c cid.Cid) (string, actorstypes.Version, bool) {
manifestMx.RLock()
defer manifestMx.RUnlock()
entry, ok := actorMeta[c]
if !ok {
return "", -1, false
}
return entry.name, entry.version, true
}
func CanonicalName(name string) string {
idx := strings.LastIndex(name, "/")
if idx >= 0 {
return name[idx+1:]
}
return name
}
package actors
import (
"bytes"
cbg "github.com/whyrusleeping/cbor-gen"
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/lotus/chain/actors/aerrors"
)
func SerializeParams(i cbg.CBORMarshaler) ([]byte, aerrors.ActorError) {
buf := new(bytes.Buffer)
if err := i.MarshalCBOR(buf); err != nil {
// TODO: shouldn't this be a fatal error?
return nil, aerrors.Absorb(err, exitcode.ErrSerialization, "failed to encode parameter")
}
return buf.Bytes(), nil
}
package policy
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
actorstypes "github.com/filecoin-project/go-state-types/actors"
"github.com/filecoin-project/go-state-types/big"
builtin10 "github.com/filecoin-project/go-state-types/builtin"
builtin11 "github.com/filecoin-project/go-state-types/builtin"
builtin12 "github.com/filecoin-project/go-state-types/builtin"
builtin13 "github.com/filecoin-project/go-state-types/builtin"
builtin14 "github.com/filecoin-project/go-state-types/builtin"
builtin15 "github.com/filecoin-project/go-state-types/builtin"
builtin16 "github.com/filecoin-project/go-state-types/builtin"
builtin17 "github.com/filecoin-project/go-state-types/builtin"
builtin8 "github.com/filecoin-project/go-state-types/builtin"
builtin9 "github.com/filecoin-project/go-state-types/builtin"
market10 "github.com/filecoin-project/go-state-types/builtin/v10/market"
miner10 "github.com/filecoin-project/go-state-types/builtin/v10/miner"
verifreg10 "github.com/filecoin-project/go-state-types/builtin/v10/verifreg"
market11 "github.com/filecoin-project/go-state-types/builtin/v11/market"
miner11 "github.com/filecoin-project/go-state-types/builtin/v11/miner"
verifreg11 "github.com/filecoin-project/go-state-types/builtin/v11/verifreg"
market12 "github.com/filecoin-project/go-state-types/builtin/v12/market"
miner12 "github.com/filecoin-project/go-state-types/builtin/v12/miner"
verifreg12 "github.com/filecoin-project/go-state-types/builtin/v12/verifreg"
market13 "github.com/filecoin-project/go-state-types/builtin/v13/market"
miner13 "github.com/filecoin-project/go-state-types/builtin/v13/miner"
verifreg13 "github.com/filecoin-project/go-state-types/builtin/v13/verifreg"
market14 "github.com/filecoin-project/go-state-types/builtin/v14/market"
miner14 "github.com/filecoin-project/go-state-types/builtin/v14/miner"
verifreg14 "github.com/filecoin-project/go-state-types/builtin/v14/verifreg"
market15 "github.com/filecoin-project/go-state-types/builtin/v15/market"
miner15 "github.com/filecoin-project/go-state-types/builtin/v15/miner"
verifreg15 "github.com/filecoin-project/go-state-types/builtin/v15/verifreg"
market16 "github.com/filecoin-project/go-state-types/builtin/v16/market"
miner16 "github.com/filecoin-project/go-state-types/builtin/v16/miner"
verifreg16 "github.com/filecoin-project/go-state-types/builtin/v16/verifreg"
market17 "github.com/filecoin-project/go-state-types/builtin/v17/market"
miner17 "github.com/filecoin-project/go-state-types/builtin/v17/miner"
paych17 "github.com/filecoin-project/go-state-types/builtin/v17/paych"
verifreg17 "github.com/filecoin-project/go-state-types/builtin/v17/verifreg"
market8 "github.com/filecoin-project/go-state-types/builtin/v8/market"
miner8 "github.com/filecoin-project/go-state-types/builtin/v8/miner"
verifreg8 "github.com/filecoin-project/go-state-types/builtin/v8/verifreg"
market9 "github.com/filecoin-project/go-state-types/builtin/v9/market"
miner9 "github.com/filecoin-project/go-state-types/builtin/v9/miner"
verifreg9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg"
"github.com/filecoin-project/go-state-types/network"
market0 "github.com/filecoin-project/specs-actors/actors/builtin/market"
miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
power0 "github.com/filecoin-project/specs-actors/actors/builtin/power"
verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin"
market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market"
miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner"
verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg"
builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin"
market3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/market"
miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner"
verifreg3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/verifreg"
builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin"
market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market"
miner4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/miner"
verifreg4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/verifreg"
builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin"
market5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/market"
miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner"
verifreg5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/verifreg"
builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin"
market6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/market"
miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner"
verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg"
builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin"
market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market"
miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner"
verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg"
)
const (
ChainFinality = miner17.ChainFinality
SealRandomnessLookback = ChainFinality
PaychSettleDelay = paych17.SettleDelay
MaxPreCommitRandomnessLookback = builtin17.EpochsInDay + SealRandomnessLookback
DeclarationsMax = 3000
)
var (
MarketDefaultAllocationTermBuffer = market17.MarketDefaultAllocationTermBuffer
)
// SetSupportedProofTypes sets supported proof types, across all actor versions.
// This should only be used for testing.
func SetSupportedProofTypes(types ...abi.RegisteredSealProof) {
miner0.SupportedProofTypes = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner2.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner2.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner3.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner3.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner3.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner4.PreCommitSealProofTypesV0 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner4.PreCommitSealProofTypesV7 = make(map[abi.RegisteredSealProof]struct{}, len(types)*2)
miner4.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner5.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types))
AddSupportedProofTypes(types...)
}
// AddSupportedProofTypes sets supported proof types, across all actor versions.
// This should only be used for testing.
func AddSupportedProofTypes(types ...abi.RegisteredSealProof) {
for _, t := range types {
if t >= abi.RegisteredSealProof_StackedDrg2KiBV1_1 {
panic("must specify v1 proof types only")
}
// Set for all miner versions.
miner0.SupportedProofTypes[t] = struct{}{}
miner2.PreCommitSealProofTypesV0[t] = struct{}{}
miner2.PreCommitSealProofTypesV7[t] = struct{}{}
miner2.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner2.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner3.PreCommitSealProofTypesV0[t] = struct{}{}
miner3.PreCommitSealProofTypesV7[t] = struct{}{}
miner3.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner3.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner4.PreCommitSealProofTypesV0[t] = struct{}{}
miner4.PreCommitSealProofTypesV7[t] = struct{}{}
miner4.PreCommitSealProofTypesV7[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner4.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
miner5.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
wpp, err := t.RegisteredWindowPoStProof()
if err != nil {
// Fine to panic, this is a test-only method
panic(err)
}
miner5.WindowPoStProofTypes[wpp] = struct{}{}
miner6.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
wpp, err = t.RegisteredWindowPoStProof()
if err != nil {
// Fine to panic, this is a test-only method
panic(err)
}
miner6.WindowPoStProofTypes[wpp] = struct{}{}
miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{}
wpp, err = t.RegisteredWindowPoStProof()
if err != nil {
// Fine to panic, this is a test-only method
panic(err)
}
miner7.WindowPoStProofTypes[wpp] = struct{}{}
}
}
// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all
// actors versions. Use for testing.
func SetPreCommitChallengeDelay(delay abi.ChainEpoch) {
// Set for all miner versions.
miner0.PreCommitChallengeDelay = delay
miner2.PreCommitChallengeDelay = delay
miner3.PreCommitChallengeDelay = delay
miner4.PreCommitChallengeDelay = delay
miner5.PreCommitChallengeDelay = delay
miner6.PreCommitChallengeDelay = delay
miner7.PreCommitChallengeDelay = delay
miner8.PreCommitChallengeDelay = delay
miner9.PreCommitChallengeDelay = delay
miner10.PreCommitChallengeDelay = delay
miner11.PreCommitChallengeDelay = delay
miner12.PreCommitChallengeDelay = delay
miner13.PreCommitChallengeDelay = delay
miner14.PreCommitChallengeDelay = delay
miner15.PreCommitChallengeDelay = delay
miner16.PreCommitChallengeDelay = delay
miner17.PreCommitChallengeDelay = delay
}
func GetPreCommitChallengeDelay() abi.ChainEpoch {
// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay.
return miner17.PreCommitChallengeDelay
}
// SetConsensusMinerMinPower sets the minimum power of an individual miner must
// meet for leader election, across all actor versions. This should only be used
// for testing.
func SetConsensusMinerMinPower(p abi.StoragePower) {
power0.ConsensusMinerMinPower = p
for _, policy := range builtin2.SealProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin3.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin4.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin5.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin6.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin7.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin8.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin9.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin10.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin11.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin12.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin13.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin14.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin15.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin16.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
for _, policy := range builtin17.PoStProofPolicies {
policy.ConsensusMinerMinPower = p
}
}
// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should
// only be used for testing.
func SetMinVerifiedDealSize(size abi.StoragePower) {
verifreg0.MinVerifiedDealSize = size
verifreg2.MinVerifiedDealSize = size
verifreg3.MinVerifiedDealSize = size
verifreg4.MinVerifiedDealSize = size
verifreg5.MinVerifiedDealSize = size
verifreg6.MinVerifiedDealSize = size
verifreg7.MinVerifiedDealSize = size
verifreg8.MinVerifiedDealSize = size
verifreg9.MinVerifiedDealSize = size
verifreg10.MinVerifiedDealSize = size
verifreg11.MinVerifiedDealSize = size
verifreg12.MinVerifiedDealSize = size
verifreg13.MinVerifiedDealSize = size
verifreg14.MinVerifiedDealSize = size
verifreg15.MinVerifiedDealSize = size
verifreg16.MinVerifiedDealSize = size
verifreg17.MinVerifiedDealSize = size
}
func GetMaxProveCommitDuration(ver actorstypes.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) {
switch ver {
case actorstypes.Version0:
return miner0.MaxSealDuration[t], nil
case actorstypes.Version2:
return miner2.MaxProveCommitDuration[t], nil
case actorstypes.Version3:
return miner3.MaxProveCommitDuration[t], nil
case actorstypes.Version4:
return miner4.MaxProveCommitDuration[t], nil
case actorstypes.Version5:
return miner5.MaxProveCommitDuration[t], nil
case actorstypes.Version6:
return miner6.MaxProveCommitDuration[t], nil
case actorstypes.Version7:
return miner7.MaxProveCommitDuration[t], nil
case actorstypes.Version8:
return miner8.MaxProveCommitDuration[t], nil
case actorstypes.Version9:
return miner9.MaxProveCommitDuration[t], nil
case actorstypes.Version10:
return miner10.MaxProveCommitDuration[t], nil
case actorstypes.Version11:
return miner11.MaxProveCommitDuration[t], nil
case actorstypes.Version12:
return miner12.MaxProveCommitDuration[t], nil
case actorstypes.Version13:
return miner13.MaxProveCommitDuration[t], nil
case actorstypes.Version14:
return miner14.MaxProveCommitDuration[t], nil
case actorstypes.Version15:
return miner15.MaxProveCommitDuration[t], nil
case actorstypes.Version16:
return miner16.MaxProveCommitDuration[t], nil
case actorstypes.Version17:
return miner17.MaxProveCommitDuration[t], nil
default:
return 0, xerrors.Errorf("unsupported actors version")
}
}
// SetProviderCollateralSupplyTarget sets the percentage of normalized circulating
// supply that must be covered by provider collateral in a deal. This should
// only be used for testing.
func SetProviderCollateralSupplyTarget(num, denom big.Int) {
market2.ProviderCollateralSupplyTarget = builtin2.BigFrac{
Numerator: num,
Denominator: denom,
}
market3.ProviderCollateralSupplyTarget = builtin3.BigFrac{
Numerator: num,
Denominator: denom,
}
market4.ProviderCollateralSupplyTarget = builtin4.BigFrac{
Numerator: num,
Denominator: denom,
}
market5.ProviderCollateralSupplyTarget = builtin5.BigFrac{
Numerator: num,
Denominator: denom,
}
market6.ProviderCollateralSupplyTarget = builtin6.BigFrac{
Numerator: num,
Denominator: denom,
}
market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{
Numerator: num,
Denominator: denom,
}
market8.ProviderCollateralSupplyTarget = builtin8.BigFrac{
Numerator: num,
Denominator: denom,
}
market9.ProviderCollateralSupplyTarget = builtin9.BigFrac{
Numerator: num,
Denominator: denom,
}
market10.ProviderCollateralSupplyTarget = builtin10.BigFrac{
Numerator: num,
Denominator: denom,
}
market11.ProviderCollateralSupplyTarget = builtin11.BigFrac{
Numerator: num,
Denominator: denom,
}
market12.ProviderCollateralSupplyTarget = builtin12.BigFrac{
Numerator: num,
Denominator: denom,
}
market13.ProviderCollateralSupplyTarget = builtin13.BigFrac{
Numerator: num,
Denominator: denom,
}
market14.ProviderCollateralSupplyTarget = builtin14.BigFrac{
Numerator: num,
Denominator: denom,
}
market15.ProviderCollateralSupplyTarget = builtin15.BigFrac{
Numerator: num,
Denominator: denom,
}
market16.ProviderCollateralSupplyTarget = builtin16.BigFrac{
Numerator: num,
Denominator: denom,
}
market17.ProviderCollateralSupplyTarget = builtin17.BigFrac{
Numerator: num,
Denominator: denom,
}
}
func DealProviderCollateralBounds(
size abi.PaddedPieceSize, verified bool,
rawBytePower, qaPower, baselinePower abi.StoragePower,
circulatingFil abi.TokenAmount, nwVer network.Version,
) (min, max abi.TokenAmount, err error) {
v, err := actorstypes.VersionForNetwork(nwVer)
if err != nil {
return big.Zero(), big.Zero(), err
}
switch v {
case actorstypes.Version0:
min, max := market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer)
return min, max, nil
case actorstypes.Version2:
min, max := market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version3:
min, max := market3.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version4:
min, max := market4.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version5:
min, max := market5.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version6:
min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version7:
min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version8:
min, max := market8.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version9:
min, max := market9.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version10:
min, max := market10.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version11:
min, max := market11.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version12:
min, max := market12.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version13:
min, max := market13.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version14:
min, max := market14.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version15:
min, max := market15.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version16:
min, max := market16.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
case actorstypes.Version17:
min, max := market17.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil)
return min, max, nil
default:
return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version")
}
}
func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) {
return market17.DealDurationBounds(pieceSize)
}
// SetWPoStChallengeWindow sets the challenge window and scales the proving period to match (such
// that there are always 48 challenge windows in a proving period).
func SetWPoStChallengeWindow(period abi.ChainEpoch) {
miner0.WPoStChallengeWindow = period
miner0.WPoStProvingPeriod = period * abi.ChainEpoch(miner0.WPoStPeriodDeadlines)
miner2.WPoStChallengeWindow = period
miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines)
miner3.WPoStChallengeWindow = period
miner3.WPoStProvingPeriod = period * abi.ChainEpoch(miner3.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner3.WPoStDisputeWindow = period * 30
miner4.WPoStChallengeWindow = period
miner4.WPoStProvingPeriod = period * abi.ChainEpoch(miner4.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner4.WPoStDisputeWindow = period * 30
miner5.WPoStChallengeWindow = period
miner5.WPoStProvingPeriod = period * abi.ChainEpoch(miner5.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner5.WPoStDisputeWindow = period * 30
miner6.WPoStChallengeWindow = period
miner6.WPoStProvingPeriod = period * abi.ChainEpoch(miner6.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner6.WPoStDisputeWindow = period * 30
miner7.WPoStChallengeWindow = period
miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner7.WPoStDisputeWindow = period * 30
miner8.WPoStChallengeWindow = period
miner8.WPoStProvingPeriod = period * abi.ChainEpoch(miner8.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner8.WPoStDisputeWindow = period * 30
miner9.WPoStChallengeWindow = period
miner9.WPoStProvingPeriod = period * abi.ChainEpoch(miner9.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner9.WPoStDisputeWindow = period * 30
miner10.WPoStChallengeWindow = period
miner10.WPoStProvingPeriod = period * abi.ChainEpoch(miner10.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner10.WPoStDisputeWindow = period * 30
miner11.WPoStChallengeWindow = period
miner11.WPoStProvingPeriod = period * abi.ChainEpoch(miner11.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner11.WPoStDisputeWindow = period * 30
miner12.WPoStChallengeWindow = period
miner12.WPoStProvingPeriod = period * abi.ChainEpoch(miner12.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner12.WPoStDisputeWindow = period * 30
miner13.WPoStChallengeWindow = period
miner13.WPoStProvingPeriod = period * abi.ChainEpoch(miner13.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner13.WPoStDisputeWindow = period * 30
miner14.WPoStChallengeWindow = period
miner14.WPoStProvingPeriod = period * abi.ChainEpoch(miner14.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner14.WPoStDisputeWindow = period * 30
miner15.WPoStChallengeWindow = period
miner15.WPoStProvingPeriod = period * abi.ChainEpoch(miner15.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner15.WPoStDisputeWindow = period * 30
miner16.WPoStChallengeWindow = period
miner16.WPoStProvingPeriod = period * abi.ChainEpoch(miner16.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner16.WPoStDisputeWindow = period * 30
miner17.WPoStChallengeWindow = period
miner17.WPoStProvingPeriod = period * abi.ChainEpoch(miner17.WPoStPeriodDeadlines)
// by default, this is 2x finality which is 30 periods.
// scale it if we're scaling the challenge period.
miner17.WPoStDisputeWindow = period * 30
}
func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version3 {
return 10
}
// NOTE: if this ever changes, adjust it in a (*Miner).mineOne() logline as well
return ChainFinality
}
func GetMaxSectorExpirationExtension(nv network.Version) (abi.ChainEpoch, error) {
v, err := actorstypes.VersionForNetwork(nv)
if err != nil {
return 0, xerrors.Errorf("failed to get actors version: %w", err)
}
switch v {
case actorstypes.Version0:
return miner0.MaxSectorExpirationExtension, nil
case actorstypes.Version2:
return miner2.MaxSectorExpirationExtension, nil
case actorstypes.Version3:
return miner3.MaxSectorExpirationExtension, nil
case actorstypes.Version4:
return miner4.MaxSectorExpirationExtension, nil
case actorstypes.Version5:
return miner5.MaxSectorExpirationExtension, nil
case actorstypes.Version6:
return miner6.MaxSectorExpirationExtension, nil
case actorstypes.Version7:
return miner7.MaxSectorExpirationExtension, nil
case actorstypes.Version8:
return miner8.MaxSectorExpirationExtension, nil
case actorstypes.Version9:
return miner9.MaxSectorExpirationExtension, nil
case actorstypes.Version10:
return miner10.MaxSectorExpirationExtension, nil
case actorstypes.Version11:
return miner11.MaxSectorExpirationExtension, nil
case actorstypes.Version12:
return miner12.MaxSectorExpirationExtension, nil
case actorstypes.Version13:
return miner13.MaxSectorExpirationExtension, nil
case actorstypes.Version14:
return miner14.MaxSectorExpirationExtension, nil
case actorstypes.Version15:
return miner15.MaxSectorExpirationExtension, nil
case actorstypes.Version16:
return miner16.MaxSectorExpirationExtension, nil
case actorstypes.Version17:
return miner17.MaxSectorExpirationExtension, nil
default:
return 0, xerrors.Errorf("unsupported network version")
}
}
func GetMinSectorExpiration() abi.ChainEpoch {
return miner17.MinSectorExpiration
}
func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) {
sectorsPerPart, err := builtin17.PoStProofWindowPoStPartitionSectors(p)
if err != nil {
return 0, err
}
maxSectors, err := GetAddressedSectorsMax(nv)
if err != nil {
return 0, err
}
return min(miner17.PoStedPartitionsMax, int(uint64(maxSectors)/sectorsPerPart)), nil
}
func GetDefaultAggregationProof() abi.RegisteredAggregationProof {
return abi.RegisteredAggregationProof_SnarkPackV1
}
func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) abi.ChainEpoch {
if nwVer <= network.Version10 {
return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime
}
return builtin17.SealProofPoliciesV11[proof].SectorMaxLifetime
}
func GetAddressedSectorsMax(nwVer network.Version) (int, error) {
v, err := actorstypes.VersionForNetwork(nwVer)
if err != nil {
return 0, err
}
switch v {
case actorstypes.Version0:
return miner0.AddressedSectorsMax, nil
case actorstypes.Version2:
return miner2.AddressedSectorsMax, nil
case actorstypes.Version3:
return miner3.AddressedSectorsMax, nil
case actorstypes.Version4:
return miner4.AddressedSectorsMax, nil
case actorstypes.Version5:
return miner5.AddressedSectorsMax, nil
case actorstypes.Version6:
return miner6.AddressedSectorsMax, nil
case actorstypes.Version7:
return miner7.AddressedSectorsMax, nil
case actorstypes.Version8:
return miner8.AddressedSectorsMax, nil
case actorstypes.Version9:
return miner9.AddressedSectorsMax, nil
case actorstypes.Version10:
return miner10.AddressedSectorsMax, nil
case actorstypes.Version11:
return miner11.AddressedSectorsMax, nil
case actorstypes.Version12:
return miner12.AddressedSectorsMax, nil
case actorstypes.Version13:
return miner13.AddressedSectorsMax, nil
case actorstypes.Version14:
return miner14.AddressedSectorsMax, nil
case actorstypes.Version15:
return miner15.AddressedSectorsMax, nil
case actorstypes.Version16:
return miner16.AddressedSectorsMax, nil
case actorstypes.Version17:
return miner17.AddressedSectorsMax, nil
default:
return 0, xerrors.Errorf("unsupported network version")
}
}
func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, baseFee abi.TokenAmount) (abi.TokenAmount, error) {
v, err := actorstypes.VersionForNetwork(nwVer)
if err != nil {
return big.Zero(), err
}
switch v {
case actorstypes.Version0:
return big.Zero(), nil
case actorstypes.Version2:
return big.Zero(), nil
case actorstypes.Version3:
return big.Zero(), nil
case actorstypes.Version4:
return big.Zero(), nil
case actorstypes.Version5:
return big.Zero(), nil
case actorstypes.Version6:
return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version7:
return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version8:
return miner8.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version9:
return miner9.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version10:
return miner10.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version11:
return miner11.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version12:
return miner12.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version13:
return miner13.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version14:
return miner14.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version15:
return miner15.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil
case actorstypes.Version16:
return big.Zero(), nil
case actorstypes.Version17:
return big.Zero(), nil
default:
return big.Zero(), xerrors.Errorf("unsupported network version")
}
}
var PoStToSealMap map[abi.RegisteredPoStProof]abi.RegisteredSealProof
func init() {
PoStToSealMap = make(map[abi.RegisteredPoStProof]abi.RegisteredSealProof)
for sealProof, info := range abi.SealProofInfos {
PoStToSealMap[info.WinningPoStProof] = sealProof
PoStToSealMap[info.WindowPoStProof] = sealProof
}
}
func GetSealProofFromPoStProof(postProof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) {
sealProof, exists := PoStToSealMap[postProof]
if !exists {
return 0, xerrors.New("no corresponding RegisteredSealProof for the given RegisteredPoStProof")
}
return sealProof, nil
}
package types
import (
"errors"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-address"
)
var ErrActorNotFound = errors.New("actor not found")
// ActorV4 is Actor State for state tree version up to 4
type ActorV4 struct {
// Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`.
Code cid.Cid
Head cid.Cid
Nonce uint64
Balance BigInt
}
// ActorV5 is Actor State for state tree version 5
type ActorV5 struct {
// Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`.
Code cid.Cid
Head cid.Cid
Nonce uint64
Balance BigInt
// The f4 address of the actor, if any.
DelegatedAddress *address.Address
}
type Actor = ActorV5
func AsActorV4(a *ActorV5) *ActorV4 {
return &ActorV4{
Code: a.Code,
Head: a.Head,
Nonce: a.Nonce,
Balance: a.Balance,
}
}
func AsActorV5(a *ActorV4) *ActorV5 {
return &ActorV5{
Code: a.Code,
Head: a.Head,
Nonce: a.Nonce,
Balance: a.Balance,
}
}
package types
import (
"fmt"
"math/big"
big2 "github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build/buildconstants"
)
const BigIntMaxSerializedLen = 128 // is this big enough? or too big?
var TotalFilecoinInt = FromFil(buildconstants.FilBase)
var EmptyInt = BigInt{}
type BigInt = big2.Int
func NewInt(i uint64) BigInt {
return BigInt{Int: big.NewInt(0).SetUint64(i)}
}
func FromFil(i uint64) BigInt {
return BigMul(NewInt(i), NewInt(buildconstants.FilecoinPrecision))
}
func BigFromBytes(b []byte) BigInt {
i := big.NewInt(0).SetBytes(b)
return BigInt{Int: i}
}
func BigFromString(s string) (BigInt, error) {
return big2.FromString(s)
}
func BigMul(a, b BigInt) BigInt {
return big2.Mul(a, b)
}
func BigDiv(a, b BigInt) BigInt {
return big2.Div(a, b)
}
func BigDivFloat(num, den BigInt) float64 {
if den.NilOrZero() {
panic("divide by zero")
}
if num.NilOrZero() {
return 0
}
res, _ := new(big.Rat).SetFrac(num.Int, den.Int).Float64()
return res
}
func BigMod(a, b BigInt) BigInt {
return big2.Mod(a, b)
}
func BigAdd(a, b BigInt) BigInt {
return big2.Add(a, b)
}
func BigSub(a, b BigInt) BigInt {
return big2.Sub(a, b)
}
func BigCmp(a, b BigInt) int {
return big2.Cmp(a, b)
}
var byteSizeUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"}
func SizeStr(bi BigInt) string {
if bi.NilOrZero() {
return "0 B"
}
r := new(big.Rat).SetInt(bi.Int)
den := big.NewRat(1, 1024)
var i int
for f, _ := r.Float64(); f >= 1024 && i+1 < len(byteSizeUnits); f, _ = r.Float64() {
i++
r = r.Mul(r, den)
}
f, _ := r.Float64()
return fmt.Sprintf("%.4g %s", f, byteSizeUnits[i])
}
var deciUnits = []string{"", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"}
func DeciStr(bi BigInt) string {
if bi.NilOrZero() {
return "0 B"
}
r := new(big.Rat).SetInt(bi.Int)
den := big.NewRat(1, 1024)
var i int
for f, _ := r.Float64(); f >= 1024 && i+1 < len(deciUnits); f, _ = r.Float64() {
i++
r = r.Mul(r, den)
}
f, _ := r.Float64()
return fmt.Sprintf("%.3g %s", f, deciUnits[i])
}
package types
import (
"bytes"
"math/big"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"golang.org/x/crypto/blake2b"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
"github.com/filecoin-project/go-state-types/proof"
)
type Ticket struct {
VRFProof []byte
}
func (t *Ticket) Quality() float64 {
ticketHash := blake2b.Sum256(t.VRFProof)
ticketNum := BigFromBytes(ticketHash[:]).Int
ticketDenu := big.NewInt(1)
ticketDenu.Lsh(ticketDenu, 256)
tv, _ := new(big.Rat).SetFrac(ticketNum, ticketDenu).Float64()
tq := 1 - tv
return tq
}
type BeaconEntry struct {
Round uint64
Data []byte
}
func NewBeaconEntry(round uint64, data []byte) BeaconEntry {
return BeaconEntry{
Round: round,
Data: data,
}
}
type BlockHeader struct {
Miner address.Address // 0 unique per block/miner
Ticket *Ticket // 1 unique per block/miner: should be a valid VRF
ElectionProof *ElectionProof // 2 unique per block/miner: should be a valid VRF
BeaconEntries []BeaconEntry // 3 identical for all blocks in same tipset
WinPoStProof []proof.PoStProof // 4 unique per block/miner
Parents []cid.Cid // 5 identical for all blocks in same tipset
ParentWeight BigInt // 6 identical for all blocks in same tipset
Height abi.ChainEpoch // 7 identical for all blocks in same tipset
ParentStateRoot cid.Cid // 8 identical for all blocks in same tipset
ParentMessageReceipts cid.Cid // 9 identical for all blocks in same tipset
Messages cid.Cid // 10 unique per block
BLSAggregate *crypto.Signature // 11 unique per block: aggrregate of BLS messages from above
Timestamp uint64 // 12 identical for all blocks in same tipset / hard-tied to the value of Height above
BlockSig *crypto.Signature // 13 unique per block/miner: miner signature
ForkSignaling uint64 // 14 currently unused/undefined
ParentBaseFee abi.TokenAmount // 15 identical for all blocks in same tipset: the base fee after executing parent tipset
validated bool // internal, true if the signature has been validated
}
func (blk *BlockHeader) ToStorageBlock() (block.Block, error) {
data, err := blk.Serialize()
if err != nil {
return nil, err
}
c, err := abi.CidBuilder.Sum(data)
if err != nil {
return nil, err
}
return block.NewBlockWithCid(data, c)
}
func (blk *BlockHeader) Cid() cid.Cid {
sb, err := blk.ToStorageBlock()
if err != nil {
panic(err) // Not sure i'm entirely comfortable with this one, needs to be checked
}
return sb.Cid()
}
func DecodeBlock(b []byte) (*BlockHeader, error) {
var blk BlockHeader
if err := blk.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
return nil, err
}
return &blk, nil
}
func (blk *BlockHeader) Serialize() ([]byte, error) {
buf := new(bytes.Buffer)
if err := blk.MarshalCBOR(buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (blk *BlockHeader) LastTicket() *Ticket {
return blk.Ticket
}
func (blk *BlockHeader) SigningBytes() ([]byte, error) {
blkcopy := *blk
blkcopy.BlockSig = nil
return blkcopy.Serialize()
}
func (blk *BlockHeader) SetValidated() {
blk.validated = true
}
func (blk *BlockHeader) IsValidated() bool {
return blk.validated
}
type MsgMeta struct {
BlsMessages cid.Cid
SecpkMessages cid.Cid
}
func (mm *MsgMeta) Cid() cid.Cid {
b, err := mm.ToStorageBlock()
if err != nil {
panic(err) // also maybe sketchy
}
return b.Cid()
}
func (mm *MsgMeta) ToStorageBlock() (block.Block, error) {
var buf bytes.Buffer
if err := mm.MarshalCBOR(&buf); err != nil {
return nil, xerrors.Errorf("failed to marshal MsgMeta: %w", err)
}
c, err := abi.CidBuilder.Sum(buf.Bytes())
if err != nil {
return nil, err
}
return block.NewBlockWithCid(buf.Bytes(), c)
}
func CidArrsEqual(a, b []cid.Cid) bool {
if len(a) != len(b) {
return false
}
// order ignoring compare...
s := make(map[cid.Cid]bool)
for _, c := range a {
s[c] = true
}
for _, c := range b {
if !s[c] {
return false
}
}
return true
}
func CidArrsSubset(a, b []cid.Cid) bool {
// order ignoring compare...
s := make(map[cid.Cid]bool)
for _, c := range b {
s[c] = true
}
for _, c := range a {
if !s[c] {
return false
}
}
return true
}
func CidArrsContains(a []cid.Cid, b cid.Cid) bool {
for _, elem := range a {
if elem.Equals(b) {
return true
}
}
return false
}
func (t *Ticket) Equals(ot *Ticket) bool {
return bytes.Equal(t.VRFProof, ot.VRFProof)
}
package types
import (
"bytes"
"fmt"
"github.com/ipfs/go-cid"
)
type BlockMsg struct {
Header *BlockHeader
BlsMessages []cid.Cid
SecpkMessages []cid.Cid
}
func DecodeBlockMsg(b []byte) (*BlockMsg, error) {
var bm BlockMsg
data := bytes.NewReader(b)
if err := bm.UnmarshalCBOR(data); err != nil {
return nil, err
}
if l := data.Len(); l != 0 {
return nil, fmt.Errorf("extraneous data in BlockMsg CBOR encoding: got %d unexpected bytes", l)
}
return &bm, nil
}
func (bm *BlockMsg) Cid() cid.Cid {
return bm.Header.Cid()
}
func (bm *BlockMsg) Serialize() ([]byte, error) {
buf := new(bytes.Buffer)
if err := bm.MarshalCBOR(buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT.
package types
import (
"fmt"
"io"
"math"
"sort"
time "time"
cid "github.com/ipfs/go-cid"
cbg "github.com/whyrusleeping/cbor-gen"
xerrors "golang.org/x/xerrors"
address "github.com/filecoin-project/go-address"
abi "github.com/filecoin-project/go-state-types/abi"
crypto "github.com/filecoin-project/go-state-types/crypto"
exitcode "github.com/filecoin-project/go-state-types/exitcode"
proof "github.com/filecoin-project/go-state-types/proof"
)
var _ = xerrors.Errorf
var _ = cid.Undef
var _ = math.E
var _ = sort.Sort
var lengthBufBlockHeader = []byte{144}
func (t *BlockHeader) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufBlockHeader); err != nil {
return err
}
// t.Miner (address.Address) (struct)
if err := t.Miner.MarshalCBOR(cw); err != nil {
return err
}
// t.Ticket (types.Ticket) (struct)
if err := t.Ticket.MarshalCBOR(cw); err != nil {
return err
}
// t.ElectionProof (types.ElectionProof) (struct)
if err := t.ElectionProof.MarshalCBOR(cw); err != nil {
return err
}
// t.BeaconEntries ([]types.BeaconEntry) (slice)
if len(t.BeaconEntries) > 8192 {
return xerrors.Errorf("Slice value in field t.BeaconEntries was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.BeaconEntries))); err != nil {
return err
}
for _, v := range t.BeaconEntries {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
// t.WinPoStProof ([]proof.PoStProof) (slice)
if len(t.WinPoStProof) > 8192 {
return xerrors.Errorf("Slice value in field t.WinPoStProof was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.WinPoStProof))); err != nil {
return err
}
for _, v := range t.WinPoStProof {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
// t.Parents ([]cid.Cid) (slice)
if len(t.Parents) > 8192 {
return xerrors.Errorf("Slice value in field t.Parents was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Parents))); err != nil {
return err
}
for _, v := range t.Parents {
if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
}
}
// t.ParentWeight (big.Int) (struct)
if err := t.ParentWeight.MarshalCBOR(cw); err != nil {
return err
}
// t.Height (abi.ChainEpoch) (int64)
if t.Height >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Height)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil {
return err
}
}
// t.ParentStateRoot (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.ParentStateRoot); err != nil {
return xerrors.Errorf("failed to write cid field t.ParentStateRoot: %w", err)
}
// t.ParentMessageReceipts (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.ParentMessageReceipts); err != nil {
return xerrors.Errorf("failed to write cid field t.ParentMessageReceipts: %w", err)
}
// t.Messages (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Messages); err != nil {
return xerrors.Errorf("failed to write cid field t.Messages: %w", err)
}
// t.BLSAggregate (crypto.Signature) (struct)
if err := t.BLSAggregate.MarshalCBOR(cw); err != nil {
return err
}
// t.Timestamp (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil {
return err
}
// t.BlockSig (crypto.Signature) (struct)
if err := t.BlockSig.MarshalCBOR(cw); err != nil {
return err
}
// t.ForkSignaling (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ForkSignaling)); err != nil {
return err
}
// t.ParentBaseFee (big.Int) (struct)
if err := t.ParentBaseFee.MarshalCBOR(cw); err != nil {
return err
}
return nil
}
func (t *BlockHeader) UnmarshalCBOR(r io.Reader) (err error) {
*t = BlockHeader{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 16 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Miner (address.Address) (struct)
{
if err := t.Miner.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Miner: %w", err)
}
}
// t.Ticket (types.Ticket) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Ticket = new(Ticket)
if err := t.Ticket.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err)
}
}
}
// t.ElectionProof (types.ElectionProof) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.ElectionProof = new(ElectionProof)
if err := t.ElectionProof.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err)
}
}
}
// t.BeaconEntries ([]types.BeaconEntry) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.BeaconEntries: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.BeaconEntries = make([]BeaconEntry, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
if err := t.BeaconEntries[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.BeaconEntries[i]: %w", err)
}
}
}
}
// t.WinPoStProof ([]proof.PoStProof) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.WinPoStProof: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.WinPoStProof = make([]proof.PoStProof, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
if err := t.WinPoStProof[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.WinPoStProof[i]: %w", err)
}
}
}
}
// t.Parents ([]cid.Cid) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.Parents: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Parents = make([]cid.Cid, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Parents[i]: %w", err)
}
t.Parents[i] = c
}
}
}
// t.ParentWeight (big.Int) (struct)
{
if err := t.ParentWeight.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.ParentWeight: %w", err)
}
}
// t.Height (abi.ChainEpoch) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Height = abi.ChainEpoch(extraI)
}
// t.ParentStateRoot (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.ParentStateRoot: %w", err)
}
t.ParentStateRoot = c
}
// t.ParentMessageReceipts (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.ParentMessageReceipts: %w", err)
}
t.ParentMessageReceipts = c
}
// t.Messages (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Messages: %w", err)
}
t.Messages = c
}
// t.BLSAggregate (crypto.Signature) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.BLSAggregate = new(crypto.Signature)
if err := t.BLSAggregate.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err)
}
}
}
// t.Timestamp (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Timestamp = uint64(extra)
}
// t.BlockSig (crypto.Signature) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.BlockSig = new(crypto.Signature)
if err := t.BlockSig.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err)
}
}
}
// t.ForkSignaling (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ForkSignaling = uint64(extra)
}
// t.ParentBaseFee (big.Int) (struct)
{
if err := t.ParentBaseFee.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.ParentBaseFee: %w", err)
}
}
return nil
}
var lengthBufTicket = []byte{129}
func (t *Ticket) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufTicket); err != nil {
return err
}
// t.VRFProof ([]uint8) (slice)
if len(t.VRFProof) > 2097152 {
return xerrors.Errorf("Byte array in field t.VRFProof was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.VRFProof))); err != nil {
return err
}
if _, err := cw.Write(t.VRFProof); err != nil {
return err
}
return nil
}
func (t *Ticket) UnmarshalCBOR(r io.Reader) (err error) {
*t = Ticket{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 1 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.VRFProof ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.VRFProof = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.VRFProof); err != nil {
return err
}
return nil
}
var lengthBufElectionProof = []byte{130}
func (t *ElectionProof) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufElectionProof); err != nil {
return err
}
// t.WinCount (int64) (int64)
if t.WinCount >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.WinCount)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.WinCount-1)); err != nil {
return err
}
}
// t.VRFProof ([]uint8) (slice)
if len(t.VRFProof) > 2097152 {
return xerrors.Errorf("Byte array in field t.VRFProof was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.VRFProof))); err != nil {
return err
}
if _, err := cw.Write(t.VRFProof); err != nil {
return err
}
return nil
}
func (t *ElectionProof) UnmarshalCBOR(r io.Reader) (err error) {
*t = ElectionProof{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 2 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.WinCount (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.WinCount = int64(extraI)
}
// t.VRFProof ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.VRFProof: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.VRFProof = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.VRFProof); err != nil {
return err
}
return nil
}
var lengthBufMessage = []byte{138}
func (t *Message) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufMessage); err != nil {
return err
}
// t.Version (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Version)); err != nil {
return err
}
// t.To (address.Address) (struct)
if err := t.To.MarshalCBOR(cw); err != nil {
return err
}
// t.From (address.Address) (struct)
if err := t.From.MarshalCBOR(cw); err != nil {
return err
}
// t.Nonce (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil {
return err
}
// t.Value (big.Int) (struct)
if err := t.Value.MarshalCBOR(cw); err != nil {
return err
}
// t.GasLimit (int64) (int64)
if t.GasLimit >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.GasLimit-1)); err != nil {
return err
}
}
// t.GasFeeCap (big.Int) (struct)
if err := t.GasFeeCap.MarshalCBOR(cw); err != nil {
return err
}
// t.GasPremium (big.Int) (struct)
if err := t.GasPremium.MarshalCBOR(cw); err != nil {
return err
}
// t.Method (abi.MethodNum) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Method)); err != nil {
return err
}
// t.Params ([]uint8) (slice)
if len(t.Params) > 2097152 {
return xerrors.Errorf("Byte array in field t.Params was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil {
return err
}
if _, err := cw.Write(t.Params); err != nil {
return err
}
return nil
}
func (t *Message) UnmarshalCBOR(r io.Reader) (err error) {
*t = Message{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 10 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Version (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Version = uint64(extra)
}
// t.To (address.Address) (struct)
{
if err := t.To.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.To: %w", err)
}
}
// t.From (address.Address) (struct)
{
if err := t.From.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.From: %w", err)
}
}
// t.Nonce (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Nonce = uint64(extra)
}
// t.Value (big.Int) (struct)
{
if err := t.Value.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Value: %w", err)
}
}
// t.GasLimit (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.GasLimit = int64(extraI)
}
// t.GasFeeCap (big.Int) (struct)
{
if err := t.GasFeeCap.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.GasFeeCap: %w", err)
}
}
// t.GasPremium (big.Int) (struct)
{
if err := t.GasPremium.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.GasPremium: %w", err)
}
}
// t.Method (abi.MethodNum) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Method = abi.MethodNum(extra)
}
// t.Params ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.Params: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Params = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Params); err != nil {
return err
}
return nil
}
var lengthBufSignedMessage = []byte{130}
func (t *SignedMessage) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufSignedMessage); err != nil {
return err
}
// t.Message (types.Message) (struct)
if err := t.Message.MarshalCBOR(cw); err != nil {
return err
}
// t.Signature (crypto.Signature) (struct)
if err := t.Signature.MarshalCBOR(cw); err != nil {
return err
}
return nil
}
func (t *SignedMessage) UnmarshalCBOR(r io.Reader) (err error) {
*t = SignedMessage{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 2 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Message (types.Message) (struct)
{
if err := t.Message.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Message: %w", err)
}
}
// t.Signature (crypto.Signature) (struct)
{
if err := t.Signature.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Signature: %w", err)
}
}
return nil
}
var lengthBufMsgMeta = []byte{130}
func (t *MsgMeta) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufMsgMeta); err != nil {
return err
}
// t.BlsMessages (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.BlsMessages); err != nil {
return xerrors.Errorf("failed to write cid field t.BlsMessages: %w", err)
}
// t.SecpkMessages (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.SecpkMessages); err != nil {
return xerrors.Errorf("failed to write cid field t.SecpkMessages: %w", err)
}
return nil
}
func (t *MsgMeta) UnmarshalCBOR(r io.Reader) (err error) {
*t = MsgMeta{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 2 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.BlsMessages (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.BlsMessages: %w", err)
}
t.BlsMessages = c
}
// t.SecpkMessages (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.SecpkMessages: %w", err)
}
t.SecpkMessages = c
}
return nil
}
var lengthBufActorV4 = []byte{132}
func (t *ActorV4) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufActorV4); err != nil {
return err
}
// t.Code (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Code); err != nil {
return xerrors.Errorf("failed to write cid field t.Code: %w", err)
}
// t.Head (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Head); err != nil {
return xerrors.Errorf("failed to write cid field t.Head: %w", err)
}
// t.Nonce (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil {
return err
}
// t.Balance (big.Int) (struct)
if err := t.Balance.MarshalCBOR(cw); err != nil {
return err
}
return nil
}
func (t *ActorV4) UnmarshalCBOR(r io.Reader) (err error) {
*t = ActorV4{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 4 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Code (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Code: %w", err)
}
t.Code = c
}
// t.Head (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Head: %w", err)
}
t.Head = c
}
// t.Nonce (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Nonce = uint64(extra)
}
// t.Balance (big.Int) (struct)
{
if err := t.Balance.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Balance: %w", err)
}
}
return nil
}
var lengthBufActorV5 = []byte{133}
func (t *ActorV5) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufActorV5); err != nil {
return err
}
// t.Code (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Code); err != nil {
return xerrors.Errorf("failed to write cid field t.Code: %w", err)
}
// t.Head (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Head); err != nil {
return xerrors.Errorf("failed to write cid field t.Head: %w", err)
}
// t.Nonce (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil {
return err
}
// t.Balance (big.Int) (struct)
if err := t.Balance.MarshalCBOR(cw); err != nil {
return err
}
// t.DelegatedAddress (address.Address) (struct)
if err := t.DelegatedAddress.MarshalCBOR(cw); err != nil {
return err
}
return nil
}
func (t *ActorV5) UnmarshalCBOR(r io.Reader) (err error) {
*t = ActorV5{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 5 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Code (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Code: %w", err)
}
t.Code = c
}
// t.Head (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Head: %w", err)
}
t.Head = c
}
// t.Nonce (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Nonce = uint64(extra)
}
// t.Balance (big.Int) (struct)
{
if err := t.Balance.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Balance: %w", err)
}
}
// t.DelegatedAddress (address.Address) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.DelegatedAddress = new(address.Address)
if err := t.DelegatedAddress.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.DelegatedAddress pointer: %w", err)
}
}
}
return nil
}
var lengthBufBlockMsg = []byte{131}
func (t *BlockMsg) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufBlockMsg); err != nil {
return err
}
// t.Header (types.BlockHeader) (struct)
if err := t.Header.MarshalCBOR(cw); err != nil {
return err
}
// t.BlsMessages ([]cid.Cid) (slice)
if len(t.BlsMessages) > 8192 {
return xerrors.Errorf("Slice value in field t.BlsMessages was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.BlsMessages))); err != nil {
return err
}
for _, v := range t.BlsMessages {
if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
}
}
// t.SecpkMessages ([]cid.Cid) (slice)
if len(t.SecpkMessages) > 8192 {
return xerrors.Errorf("Slice value in field t.SecpkMessages was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.SecpkMessages))); err != nil {
return err
}
for _, v := range t.SecpkMessages {
if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
}
}
return nil
}
func (t *BlockMsg) UnmarshalCBOR(r io.Reader) (err error) {
*t = BlockMsg{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Header (types.BlockHeader) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Header = new(BlockHeader)
if err := t.Header.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Header pointer: %w", err)
}
}
}
// t.BlsMessages ([]cid.Cid) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.BlsMessages: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.BlsMessages = make([]cid.Cid, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.BlsMessages[i]: %w", err)
}
t.BlsMessages[i] = c
}
}
}
// t.SecpkMessages ([]cid.Cid) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.SecpkMessages = make([]cid.Cid, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.SecpkMessages[i]: %w", err)
}
t.SecpkMessages[i] = c
}
}
}
return nil
}
var lengthBufExpTipSet = []byte{131}
func (t *ExpTipSet) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufExpTipSet); err != nil {
return err
}
// t.Cids ([]cid.Cid) (slice)
if len(t.Cids) > 8192 {
return xerrors.Errorf("Slice value in field t.Cids was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Cids))); err != nil {
return err
}
for _, v := range t.Cids {
if err := cbg.WriteCid(cw, v); err != nil {
return xerrors.Errorf("failed to write cid field v: %w", err)
}
}
// t.Blocks ([]*types.BlockHeader) (slice)
if len(t.Blocks) > 8192 {
return xerrors.Errorf("Slice value in field t.Blocks was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Blocks))); err != nil {
return err
}
for _, v := range t.Blocks {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
// t.Height (abi.ChainEpoch) (int64)
if t.Height >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Height)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Height-1)); err != nil {
return err
}
}
return nil
}
func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) (err error) {
*t = ExpTipSet{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Cids ([]cid.Cid) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.Cids: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Cids = make([]cid.Cid, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Cids[i]: %w", err)
}
t.Cids[i] = c
}
}
}
// t.Blocks ([]*types.BlockHeader) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.Blocks: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Blocks = make([]*BlockHeader, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.Blocks[i] = new(BlockHeader)
if err := t.Blocks[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Blocks[i] pointer: %w", err)
}
}
}
}
}
// t.Height (abi.ChainEpoch) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.Height = abi.ChainEpoch(extraI)
}
return nil
}
var lengthBufBeaconEntry = []byte{130}
func (t *BeaconEntry) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufBeaconEntry); err != nil {
return err
}
// t.Round (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Round)); err != nil {
return err
}
// t.Data ([]uint8) (slice)
if len(t.Data) > 2097152 {
return xerrors.Errorf("Byte array in field t.Data was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Data))); err != nil {
return err
}
if _, err := cw.Write(t.Data); err != nil {
return err
}
return nil
}
func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) (err error) {
*t = BeaconEntry{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 2 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Round (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Round = uint64(extra)
}
// t.Data ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.Data: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Data = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Data); err != nil {
return err
}
return nil
}
var lengthBufStateRoot = []byte{131}
func (t *StateRoot) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufStateRoot); err != nil {
return err
}
// t.Version (types.StateTreeVersion) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Version)); err != nil {
return err
}
// t.Actors (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Actors); err != nil {
return xerrors.Errorf("failed to write cid field t.Actors: %w", err)
}
// t.Info (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Info); err != nil {
return xerrors.Errorf("failed to write cid field t.Info: %w", err)
}
return nil
}
func (t *StateRoot) UnmarshalCBOR(r io.Reader) (err error) {
*t = StateRoot{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Version (types.StateTreeVersion) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Version = StateTreeVersion(extra)
}
// t.Actors (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Actors: %w", err)
}
t.Actors = c
}
// t.Info (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Info: %w", err)
}
t.Info = c
}
return nil
}
var lengthBufStateInfo0 = []byte{128}
func (t *StateInfo0) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufStateInfo0); err != nil {
return err
}
return nil
}
func (t *StateInfo0) UnmarshalCBOR(r io.Reader) (err error) {
*t = StateInfo0{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 0 {
return fmt.Errorf("cbor input had wrong number of fields")
}
return nil
}
var lengthBufEvent = []byte{130}
func (t *Event) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufEvent); err != nil {
return err
}
// t.Emitter (abi.ActorID) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Emitter)); err != nil {
return err
}
// t.Entries ([]types.EventEntry) (slice)
if len(t.Entries) > 8192 {
return xerrors.Errorf("Slice value in field t.Entries was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Entries))); err != nil {
return err
}
for _, v := range t.Entries {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
return nil
}
func (t *Event) UnmarshalCBOR(r io.Reader) (err error) {
*t = Event{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 2 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Emitter (abi.ActorID) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Emitter = abi.ActorID(extra)
}
// t.Entries ([]types.EventEntry) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 8192 {
return fmt.Errorf("t.Entries: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Entries = make([]EventEntry, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
if err := t.Entries[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Entries[i]: %w", err)
}
}
}
}
return nil
}
var lengthBufEventEntry = []byte{132}
func (t *EventEntry) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufEventEntry); err != nil {
return err
}
// t.Flags (uint8) (uint8)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Flags)); err != nil {
return err
}
// t.Key (string) (string)
if len(t.Key) > 8192 {
return xerrors.Errorf("Value in field t.Key was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Key))); err != nil {
return err
}
if _, err := cw.WriteString(string(t.Key)); err != nil {
return err
}
// t.Codec (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Codec)); err != nil {
return err
}
// t.Value ([]uint8) (slice)
if len(t.Value) > 2097152 {
return xerrors.Errorf("Byte array in field t.Value was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Value))); err != nil {
return err
}
if _, err := cw.Write(t.Value); err != nil {
return err
}
return nil
}
func (t *EventEntry) UnmarshalCBOR(r io.Reader) (err error) {
*t = EventEntry{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 4 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Flags (uint8) (uint8)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint8 field")
}
if extra > math.MaxUint8 {
return fmt.Errorf("integer in input was too large for uint8 field")
}
t.Flags = uint8(extra)
// t.Key (string) (string)
{
sval, err := cbg.ReadStringWithMax(cr, 8192)
if err != nil {
return err
}
t.Key = string(sval)
}
// t.Codec (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Codec = uint64(extra)
}
// t.Value ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.Value: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Value = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Value); err != nil {
return err
}
return nil
}
var lengthBufGasTrace = []byte{133}
func (t *GasTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufGasTrace); err != nil {
return err
}
// t.Name (string) (string)
if len(t.Name) > 8192 {
return xerrors.Errorf("Value in field t.Name was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Name))); err != nil {
return err
}
if _, err := cw.WriteString(string(t.Name)); err != nil {
return err
}
// t.TotalGas (int64) (int64)
if t.TotalGas >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TotalGas)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TotalGas-1)); err != nil {
return err
}
}
// t.ComputeGas (int64) (int64)
if t.ComputeGas >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ComputeGas)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ComputeGas-1)); err != nil {
return err
}
}
// t.StorageGas (int64) (int64)
if t.StorageGas >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.StorageGas)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.StorageGas-1)); err != nil {
return err
}
}
// t.TimeTaken (time.Duration) (int64)
if t.TimeTaken >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.TimeTaken)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.TimeTaken-1)); err != nil {
return err
}
}
return nil
}
func (t *GasTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = GasTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 5 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Name (string) (string)
{
sval, err := cbg.ReadStringWithMax(cr, 8192)
if err != nil {
return err
}
t.Name = string(sval)
}
// t.TotalGas (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.TotalGas = int64(extraI)
}
// t.ComputeGas (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.ComputeGas = int64(extraI)
}
// t.StorageGas (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.StorageGas = int64(extraI)
}
// t.TimeTaken (time.Duration) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.TimeTaken = time.Duration(extraI)
}
return nil
}
var lengthBufActorTrace = []byte{130}
func (t *ActorTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufActorTrace); err != nil {
return err
}
// t.Id (abi.ActorID) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Id)); err != nil {
return err
}
// t.State (types.ActorV5) (struct)
if err := t.State.MarshalCBOR(cw); err != nil {
return err
}
return nil
}
func (t *ActorTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = ActorTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 2 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Id (abi.ActorID) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Id = abi.ActorID(extra)
}
// t.State (types.ActorV5) (struct)
{
if err := t.State.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.State: %w", err)
}
}
return nil
}
var lengthBufMessageTrace = []byte{136}
func (t *MessageTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufMessageTrace); err != nil {
return err
}
// t.From (address.Address) (struct)
if err := t.From.MarshalCBOR(cw); err != nil {
return err
}
// t.To (address.Address) (struct)
if err := t.To.MarshalCBOR(cw); err != nil {
return err
}
// t.Value (big.Int) (struct)
if err := t.Value.MarshalCBOR(cw); err != nil {
return err
}
// t.Method (abi.MethodNum) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Method)); err != nil {
return err
}
// t.Params ([]uint8) (slice)
if len(t.Params) > 2097152 {
return xerrors.Errorf("Byte array in field t.Params was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Params))); err != nil {
return err
}
if _, err := cw.Write(t.Params); err != nil {
return err
}
// t.ParamsCodec (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ParamsCodec)); err != nil {
return err
}
// t.GasLimit (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil {
return err
}
// t.ReadOnly (bool) (bool)
if err := cbg.WriteBool(w, t.ReadOnly); err != nil {
return err
}
return nil
}
func (t *MessageTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = MessageTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 8 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.From (address.Address) (struct)
{
if err := t.From.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.From: %w", err)
}
}
// t.To (address.Address) (struct)
{
if err := t.To.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.To: %w", err)
}
}
// t.Value (big.Int) (struct)
{
if err := t.Value.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Value: %w", err)
}
}
// t.Method (abi.MethodNum) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Method = abi.MethodNum(extra)
}
// t.Params ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.Params: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Params = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Params); err != nil {
return err
}
// t.ParamsCodec (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ParamsCodec = uint64(extra)
}
// t.GasLimit (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.GasLimit = uint64(extra)
}
// t.ReadOnly (bool) (bool)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajOther {
return fmt.Errorf("booleans must be major type 7")
}
switch extra {
case 20:
t.ReadOnly = false
case 21:
t.ReadOnly = true
default:
return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra)
}
return nil
}
var lengthBufReturnTrace = []byte{131}
func (t *ReturnTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufReturnTrace); err != nil {
return err
}
// t.ExitCode (exitcode.ExitCode) (int64)
if t.ExitCode >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil {
return err
}
}
// t.Return ([]uint8) (slice)
if len(t.Return) > 2097152 {
return xerrors.Errorf("Byte array in field t.Return was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil {
return err
}
if _, err := cw.Write(t.Return); err != nil {
return err
}
// t.ReturnCodec (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ReturnCodec)); err != nil {
return err
}
return nil
}
func (t *ReturnTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = ReturnTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.ExitCode (exitcode.ExitCode) (int64)
{
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
var extraI int64
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.ExitCode = exitcode.ExitCode(extraI)
}
// t.Return ([]uint8) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 2097152 {
return fmt.Errorf("t.Return: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Return = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Return); err != nil {
return err
}
// t.ReturnCodec (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.ReturnCodec = uint64(extra)
}
return nil
}
var lengthBufTraceIpld = []byte{131}
func (t *TraceIpld) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufTraceIpld); err != nil {
return err
}
// t.Op (types.Op) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Op)); err != nil {
return err
}
// t.Cid (cid.Cid) (struct)
if err := cbg.WriteCid(cw, t.Cid); err != nil {
return xerrors.Errorf("failed to write cid field t.Cid: %w", err)
}
// t.Size (uint64) (uint64)
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil {
return err
}
return nil
}
func (t *TraceIpld) UnmarshalCBOR(r io.Reader) (err error) {
*t = TraceIpld{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 3 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Op (types.Op) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Op = Op(extra)
}
// t.Cid (cid.Cid) (struct)
{
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.Cid: %w", err)
}
t.Cid = c
}
// t.Size (uint64) (uint64)
{
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if maj != cbg.MajUnsignedInt {
return fmt.Errorf("wrong type for uint64 field")
}
t.Size = uint64(extra)
}
return nil
}
var lengthBufExecutionTrace = []byte{135}
func (t *ExecutionTrace) MarshalCBOR(w io.Writer) error {
if t == nil {
_, err := w.Write(cbg.CborNull)
return err
}
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufExecutionTrace); err != nil {
return err
}
// t.Msg (types.MessageTrace) (struct)
if err := t.Msg.MarshalCBOR(cw); err != nil {
return err
}
// t.MsgRct (types.ReturnTrace) (struct)
if err := t.MsgRct.MarshalCBOR(cw); err != nil {
return err
}
// t.InvokedActor (types.ActorTrace) (struct)
if err := t.InvokedActor.MarshalCBOR(cw); err != nil {
return err
}
// t.GasCharges ([]*types.GasTrace) (slice)
if len(t.GasCharges) > 1000000000 {
return xerrors.Errorf("Slice value in field t.GasCharges was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.GasCharges))); err != nil {
return err
}
for _, v := range t.GasCharges {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
// t.Subcalls ([]types.ExecutionTrace) (slice)
if len(t.Subcalls) > 1000000000 {
return xerrors.Errorf("Slice value in field t.Subcalls was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Subcalls))); err != nil {
return err
}
for _, v := range t.Subcalls {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
// t.Logs ([]string) (slice)
if len(t.Logs) > 1000000000 {
return xerrors.Errorf("Slice value in field t.Logs was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Logs))); err != nil {
return err
}
for _, v := range t.Logs {
if len(v) > 8192 {
return xerrors.Errorf("Value in field v was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil {
return err
}
if _, err := cw.WriteString(string(v)); err != nil {
return err
}
}
// t.IpldOps ([]types.TraceIpld) (slice)
if len(t.IpldOps) > 1000000000 {
return xerrors.Errorf("Slice value in field t.IpldOps was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.IpldOps))); err != nil {
return err
}
for _, v := range t.IpldOps {
if err := v.MarshalCBOR(cw); err != nil {
return err
}
}
return nil
}
func (t *ExecutionTrace) UnmarshalCBOR(r io.Reader) (err error) {
*t = ExecutionTrace{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
if extra != 7 {
return fmt.Errorf("cbor input had wrong number of fields")
}
// t.Msg (types.MessageTrace) (struct)
{
if err := t.Msg.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Msg: %w", err)
}
}
// t.MsgRct (types.ReturnTrace) (struct)
{
if err := t.MsgRct.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.MsgRct: %w", err)
}
}
// t.InvokedActor (types.ActorTrace) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.InvokedActor = new(ActorTrace)
if err := t.InvokedActor.UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.InvokedActor pointer: %w", err)
}
}
}
// t.GasCharges ([]*types.GasTrace) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 1000000000 {
return fmt.Errorf("t.GasCharges: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.GasCharges = make([]*GasTrace, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
t.GasCharges[i] = new(GasTrace)
if err := t.GasCharges[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.GasCharges[i] pointer: %w", err)
}
}
}
}
}
// t.Subcalls ([]types.ExecutionTrace) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 1000000000 {
return fmt.Errorf("t.Subcalls: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Subcalls = make([]ExecutionTrace, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
if err := t.Subcalls[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.Subcalls[i]: %w", err)
}
}
}
}
// t.Logs ([]string) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 1000000000 {
return fmt.Errorf("t.Logs: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.Logs = make([]string, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
sval, err := cbg.ReadStringWithMax(cr, 8192)
if err != nil {
return err
}
t.Logs[i] = string(sval)
}
}
}
// t.IpldOps ([]types.TraceIpld) (slice)
maj, extra, err = cr.ReadHeader()
if err != nil {
return err
}
if extra > 1000000000 {
return fmt.Errorf("t.IpldOps: array too large (%d)", extra)
}
if maj != cbg.MajArray {
return fmt.Errorf("expected cbor array")
}
if extra > 0 {
t.IpldOps = make([]TraceIpld, extra)
}
for i := 0; i < int(extra); i++ {
{
var maj byte
var extra uint64
var err error
_ = maj
_ = extra
_ = err
{
if err := t.IpldOps[i].UnmarshalCBOR(cr); err != nil {
return xerrors.Errorf("unmarshaling t.IpldOps[i]: %w", err)
}
}
}
}
return nil
}
package types
import (
"math/big"
"golang.org/x/crypto/blake2b"
"github.com/filecoin-project/lotus/build/buildconstants"
)
type ElectionProof struct {
WinCount int64
VRFProof []byte
}
const precision = 256
var (
expNumCoef []*big.Int
expDenoCoef []*big.Int
)
func init() {
parse := func(coefs []string) []*big.Int {
out := make([]*big.Int, len(coefs))
for i, coef := range coefs {
c, ok := new(big.Int).SetString(coef, 10)
if !ok {
panic("could not parse exp paramemter")
}
// << 256 (Q.0 to Q.256), >> 128 to transform integer params to coefficients
c = c.Lsh(c, precision-128)
out[i] = c
}
return out
}
// parameters are in integer format,
// coefficients are *2^-128 of that
num := []string{
"-648770010757830093818553637600",
"67469480939593786226847644286976",
"-3197587544499098424029388939001856",
"89244641121992890118377641805348864",
"-1579656163641440567800982336819953664",
"17685496037279256458459817590917169152",
"-115682590513835356866803355398940131328",
"340282366920938463463374607431768211456",
}
expNumCoef = parse(num)
deno := []string{
"1225524182432722209606361",
"114095592300906098243859450",
"5665570424063336070530214243",
"194450132448609991765137938448",
"5068267641632683791026134915072",
"104716890604972796896895427629056",
"1748338658439454459487681798864896",
"23704654329841312470660182937960448",
"259380097567996910282699886670381056",
"2250336698853390384720606936038375424",
"14978272436876548034486263159246028800",
"72144088983913131323343765784380833792",
"224599776407103106596571252037123047424",
"340282366920938463463374607431768211456",
}
expDenoCoef = parse(deno)
}
// expneg accepts x in Q.256 format and computes e^-x.
// It is most precise within [0, 1.725) range, where error is less than 3.4e-30.
// Over the [0, 5) range its error is less than 4.6e-15.
// Output is in Q.256 format.
func expneg(x *big.Int) *big.Int {
// exp is approximated by rational function
// polynomials of the rational function are evaluated using Horner's method
num := polyval(expNumCoef, x) // Q.256
deno := polyval(expDenoCoef, x) // Q.256
num = num.Lsh(num, precision) // Q.512
return num.Div(num, deno) // Q.512 / Q.256 => Q.256
}
// polyval evaluates a polynomial given by coefficients `p` in Q.256 format
// at point `x` in Q.256 format. Output is in Q.256.
// Coefficients should be ordered from the highest order coefficient to the lowest.
func polyval(p []*big.Int, x *big.Int) *big.Int {
// evaluation using Horner's method
res := new(big.Int).Set(p[0]) // Q.256
tmp := new(big.Int) // big.Int.Mul doesn't like when input is reused as output
for _, c := range p[1:] {
tmp = tmp.Mul(res, x) // Q.256 * Q.256 => Q.512
res = res.Rsh(tmp, precision) // Q.512 >> 256 => Q.256
res = res.Add(res, c)
}
return res
}
// computes lambda in Q.256
func lambda(power, totalPower *big.Int) *big.Int {
blocksPerEpoch := NewInt(buildconstants.BlocksPerEpoch)
lam := new(big.Int).Mul(power, blocksPerEpoch.Int) // Q.0
lam = lam.Lsh(lam, precision) // Q.256
lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256
return lam
}
var MaxWinCount = 3 * int64(buildconstants.BlocksPerEpoch)
type poiss struct {
lam *big.Int
pmf *big.Int
icdf *big.Int
tmp *big.Int // temporary variable for optimization
k uint64
}
// newPoiss starts poisson inverted CDF
// lambda is in Q.256 format
// returns (instance, `1-poisscdf(0, lambda)`)
// CDF value returend is reused when calling `next`
func newPoiss(lambda *big.Int) (*poiss, *big.Int) {
// pmf(k) = (lambda^k)*(e^lambda) / k!
// k = 0 here, so it simplifies to just e^-lambda
elam := expneg(lambda) // Q.256
pmf := new(big.Int).Set(elam)
// icdf(k) = 1 - ∑ᵏᵢ₌₀ pmf(i)
// icdf(0) = 1 - pmf(0)
icdf := big.NewInt(1)
icdf = icdf.Lsh(icdf, precision) // Q.256
icdf = icdf.Sub(icdf, pmf) // Q.256
k := uint64(0)
p := &poiss{
lam: lambda,
pmf: pmf,
tmp: elam,
icdf: icdf,
k: k,
}
return p, icdf
}
// next computes `k++, 1-poisscdf(k, lam)`
// return is in Q.256 format
func (p *poiss) next() *big.Int {
// incrementally compute next pmf and icdf
// pmf(k) = (lambda^k)*(e^lambda) / k!
// so pmf(k) = pmf(k-1) * lambda / k
p.k++
p.tmp.SetUint64(p.k) // Q.0
// calculate pmf for k
p.pmf = p.pmf.Div(p.pmf, p.tmp) // Q.256 / Q.0 => Q.256
// we are using `tmp` as target for multiplication as using an input as output
// for Int.Mul causes allocations
p.tmp = p.tmp.Mul(p.pmf, p.lam) // Q.256 * Q.256 => Q.512
p.pmf = p.pmf.Rsh(p.tmp, precision) // Q.512 >> 256 => Q.256
// calculate output
// icdf(k) = icdf(k-1) - pmf(k)
p.icdf = p.icdf.Sub(p.icdf, p.pmf) // Q.256
return p.icdf
}
// ComputeWinCount uses VRFProof to compute number of wins
// The algorithm is based on Algorand's Sortition with Binomial distribution
// replaced by Poisson distribution.
func (ep *ElectionProof) ComputeWinCount(power BigInt, totalPower BigInt) int64 {
h := blake2b.Sum256(ep.VRFProof)
lhs := BigFromBytes(h[:]).Int // 256bits, assume Q.256 so [0, 1)
// We are calculating upside-down CDF of Poisson distribution with
// rate λ=power*E/totalPower
// Steps:
// 1. calculate λ=power*E/totalPower
// 2. calculate elam = exp(-λ)
// 3. Check how many times we win:
// j = 0
// pmf = elam
// rhs = 1 - pmf
// for h(vrf) < rhs: j++; pmf = pmf * lam / j; rhs = rhs - pmf
lam := lambda(power.Int, totalPower.Int) // Q.256
p, rhs := newPoiss(lam)
var j int64
for lhs.Cmp(rhs) < 0 && j < MaxWinCount {
rhs = p.next()
j++
}
return j
}
package types
import (
"encoding/json"
"time"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/exitcode"
)
type GasTrace struct {
Name string
TotalGas int64 `json:"tg"`
ComputeGas int64 `json:"cg"`
StorageGas int64 `json:"sg"`
TimeTaken time.Duration `json:"tt"`
}
type MessageTrace struct {
From address.Address
To address.Address
Value abi.TokenAmount
Method abi.MethodNum
Params []byte
ParamsCodec uint64
GasLimit uint64
ReadOnly bool
}
type ActorTrace struct {
Id abi.ActorID
State Actor
}
type ReturnTrace struct {
ExitCode exitcode.ExitCode
Return []byte
ReturnCodec uint64
}
type Op uint64
const (
IpldOpGet Op = iota
IpldOpPut
)
type TraceIpld struct {
Op Op
Cid cid.Cid
Size uint64
}
func (t TraceIpld) MarshalJSON() ([]byte, error) {
type TraceIpldJSON struct {
Op string
Cid cid.Cid
Size uint64
}
var opStr string
switch t.Op {
case IpldOpGet:
opStr = "Get"
case IpldOpPut:
opStr = "Put"
default:
opStr = "Unknown"
}
return json.Marshal(TraceIpldJSON{
Op: opStr,
Cid: t.Cid,
Size: t.Size,
})
}
func (t *TraceIpld) UnmarshalJSON(data []byte) error {
type TraceIpldJSON struct {
Op string
Cid cid.Cid
Size uint64
}
var tj TraceIpldJSON
if err := json.Unmarshal(data, &tj); err != nil {
return err
}
t.Cid = tj.Cid
t.Size = tj.Size
switch tj.Op {
case "Get":
t.Op = IpldOpGet
case "Put":
t.Op = IpldOpPut
default:
return xerrors.Errorf("unknown operation: %s", tj.Op)
}
return nil
}
type ExecutionTrace struct {
Msg MessageTrace
MsgRct ReturnTrace
InvokedActor *ActorTrace `json:",omitempty"`
GasCharges []*GasTrace `cborgen:"maxlen=1000000000"`
Subcalls []ExecutionTrace `cborgen:"maxlen=1000000000"`
Logs []string `cborgen:"maxlen=1000000000" json:",omitempty"`
IpldOps []TraceIpld `cborgen:"maxlen=1000000000" json:",omitempty"`
}
func (et ExecutionTrace) SumGas() GasTrace {
return SumGas(et.GasCharges)
}
func SumGas(charges []*GasTrace) GasTrace {
var out GasTrace
for _, gc := range charges {
out.TotalGas += gc.TotalGas
out.ComputeGas += gc.ComputeGas
out.StorageGas += gc.StorageGas
}
return out
}
func (gt *GasTrace) MarshalJSON() ([]byte, error) {
type GasTraceCopy GasTrace
cpy := (*GasTraceCopy)(gt)
return json.Marshal(cpy)
}
package types
import (
"encoding"
"fmt"
"math/big"
"strings"
"github.com/invopop/jsonschema"
"github.com/filecoin-project/lotus/build/buildconstants"
)
type FIL BigInt
func (f FIL) String() string {
if f.Int == nil {
return "0 FIL"
}
return f.Unitless() + " FIL"
}
func (f FIL) Unitless() string {
r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(buildconstants.FilecoinPrecision)))
if r.Sign() == 0 {
return "0"
}
return strings.TrimRight(strings.TrimRight(r.FloatString(18), "0"), ".")
}
var AttoFil = NewInt(1)
var FemtoFil = BigMul(AttoFil, NewInt(1000))
var PicoFil = BigMul(FemtoFil, NewInt(1000))
var NanoFil = BigMul(PicoFil, NewInt(1000))
var unitPrefixes = []string{"a", "f", "p", "n", "μ", "m"}
func (f FIL) Short() string {
n := BigInt(f).Abs()
dn := uint64(1)
var prefix string
for _, p := range unitPrefixes {
if n.LessThan(NewInt(dn * 1000)) {
prefix = p
break
}
dn *= 1000
}
r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(dn)))
if r.Sign() == 0 {
return "0"
}
return strings.TrimRight(strings.TrimRight(r.FloatString(3), "0"), ".") + " " + prefix + "FIL"
}
func (f FIL) Nano() string {
r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(1e9)))
if r.Sign() == 0 {
return "0"
}
return strings.TrimRight(strings.TrimRight(r.FloatString(9), "0"), ".") + " nFIL"
}
func (f FIL) Format(s fmt.State, ch rune) {
switch ch {
case 's', 'v':
_, _ = fmt.Fprint(s, f.String())
default:
f.Int.Format(s, ch)
}
}
func (f FIL) MarshalText() (text []byte, err error) {
return []byte(f.String()), nil
}
func (f FIL) UnmarshalText(text []byte) error {
if f.Int == nil {
return fmt.Errorf("cannot unmarshal into nil BigInt (text:%s)", string(text))
}
p, err := ParseFIL(string(text))
if err != nil {
return err
}
f.Int.Set(p.Int)
return nil
}
func ParseFIL(s string) (FIL, error) {
suffix := strings.TrimLeft(s, "-.1234567890")
s = s[:len(s)-len(suffix)]
var attofil bool
if suffix != "" {
norm := strings.ToLower(strings.TrimSpace(suffix))
switch norm {
case "", "fil":
case "attofil", "afil":
attofil = true
default:
return FIL{}, fmt.Errorf("unrecognized suffix: %q", suffix)
}
}
if len(s) > 50 {
return FIL{}, fmt.Errorf("string length too large: %d", len(s))
}
r, ok := new(big.Rat).SetString(s) //nolint:gosec
if !ok {
return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s)
}
if !attofil {
r = r.Mul(r, big.NewRat(int64(buildconstants.FilecoinPrecision), 1))
}
if !r.IsInt() {
var pref string
if attofil {
pref = "atto"
}
return FIL{}, fmt.Errorf("invalid %sFIL value: %q", pref, s)
}
return FIL{r.Num()}, nil
}
func MustParseFIL(s string) FIL {
n, err := ParseFIL(s)
if err != nil {
panic(err)
}
return n
}
func (f FIL) JSONSchema() *jsonschema.Schema {
return &jsonschema.Schema{
Type: "string",
Pattern: `^((\d+(\.\d+)?|0x[0-9a-fA-F]+))( ([aA]([tT][tT][oO])?)?[fF][iI][lL])?$`,
}
}
var _ encoding.TextMarshaler = (*FIL)(nil)
var _ encoding.TextUnmarshaler = (*FIL)(nil)
package types
import "github.com/ipfs/go-cid"
type FullBlock struct {
Header *BlockHeader
BlsMessages []*Message
SecpkMessages []*SignedMessage
}
func (fb *FullBlock) Cid() cid.Cid {
return fb.Header.Cid()
}
package types
import (
"encoding/json"
"fmt"
"github.com/filecoin-project/go-state-types/crypto"
)
var (
ErrKeyInfoNotFound = fmt.Errorf("key info not found")
ErrKeyExists = fmt.Errorf("key already exists")
)
// KeyType defines a type of a key
type KeyType string
func (kt *KeyType) UnmarshalJSON(bb []byte) error {
{
// first option, try unmarshaling as string
var s string
err := json.Unmarshal(bb, &s)
if err == nil {
*kt = KeyType(s)
return nil
}
}
{
var b byte
err := json.Unmarshal(bb, &b)
if err != nil {
return fmt.Errorf("could not unmarshal KeyType either as string nor integer: %w", err)
}
bst := crypto.SigType(b)
switch bst {
case crypto.SigTypeBLS:
*kt = KTBLS
case crypto.SigTypeSecp256k1:
*kt = KTSecp256k1
case crypto.SigTypeDelegated:
*kt = KTDelegated
default:
return fmt.Errorf("unknown sigtype: %d", bst)
}
log.Warnf("deprecation: integer style 'KeyType' is deprecated, switch to string style")
return nil
}
}
const (
KTBLS KeyType = "bls"
KTSecp256k1 KeyType = "secp256k1"
KTSecp256k1Ledger KeyType = "secp256k1-ledger"
KTDelegated KeyType = "delegated"
)
// KeyInfo is used for storing keys in KeyStore
type KeyInfo struct {
Type KeyType
PrivateKey []byte
}
// KeyStore is used for storing secret keys
type KeyStore interface {
// List lists all the keys stored in the KeyStore
List() ([]string, error)
// Get gets a key out of keystore and returns KeyInfo corresponding to named key
Get(string) (KeyInfo, error)
// Put saves a key info under given name
Put(string, KeyInfo) error
// Delete removes a key from keystore
Delete(string) error
}
package types
import (
"github.com/ipfs/go-cid"
"go.uber.org/zap/zapcore"
)
type LogCids []cid.Cid
var _ zapcore.ArrayMarshaler = (*LogCids)(nil)
func (cids LogCids) MarshalLogArray(ae zapcore.ArrayEncoder) error {
for _, c := range cids {
ae.AppendString(c.String())
}
return nil
}
package types
import (
"bytes"
"encoding/json"
"fmt"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/go-state-types/network"
"github.com/filecoin-project/lotus/build/buildconstants"
)
const MessageVersion = 0
type ChainMsg interface {
Cid() cid.Cid
VMMessage() *Message
ToStorageBlock() (block.Block, error)
// FIXME: This is the *message* length, this name is misleading.
ChainLength() int
}
type Message struct {
Version uint64
To address.Address
From address.Address
Nonce uint64
Value abi.TokenAmount
GasLimit int64
GasFeeCap abi.TokenAmount
GasPremium abi.TokenAmount
Method abi.MethodNum
Params []byte
}
func (m *Message) Caller() address.Address {
return m.From
}
func (m *Message) Receiver() address.Address {
return m.To
}
func (m *Message) ValueReceived() abi.TokenAmount {
return m.Value
}
func DecodeMessage(b []byte) (*Message, error) {
var msg Message
if err := msg.UnmarshalCBOR(bytes.NewReader(b)); err != nil {
return nil, err
}
if msg.Version != MessageVersion {
return nil, fmt.Errorf("decoded message had incorrect version (%d)", msg.Version)
}
return &msg, nil
}
func (m *Message) Serialize() ([]byte, error) {
buf := new(bytes.Buffer)
if err := m.MarshalCBOR(buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (m *Message) ChainLength() int {
ser, err := m.Serialize()
if err != nil {
panic(err)
}
return len(ser)
}
func (m *Message) ToStorageBlock() (block.Block, error) {
data, err := m.Serialize()
if err != nil {
return nil, err
}
c, err := abi.CidBuilder.Sum(data)
if err != nil {
return nil, err
}
return block.NewBlockWithCid(data, c)
}
func (m *Message) Cid() cid.Cid {
b, err := m.ToStorageBlock()
if err != nil {
panic(fmt.Sprintf("failed to marshal message: %s", err)) // I think this is maybe sketchy, what happens if we try to serialize a message with an undefined address in it?
}
return b.Cid()
}
type mCid struct {
*RawMessage
CID cid.Cid
}
type RawMessage Message
func (m *Message) MarshalJSON() ([]byte, error) {
return json.Marshal(&mCid{
RawMessage: (*RawMessage)(m),
CID: m.Cid(),
})
}
func (m *Message) RequiredFunds() BigInt {
return BigMul(m.GasFeeCap, NewInt(uint64(m.GasLimit)))
}
func (m *Message) VMMessage() *Message {
return m
}
func (m *Message) Equals(o *Message) bool {
return m.Cid() == o.Cid()
}
func (m *Message) EqualCall(o *Message) bool {
m1 := *m
m2 := *o
m1.GasLimit, m2.GasLimit = 0, 0
m1.GasFeeCap, m2.GasFeeCap = big.Zero(), big.Zero()
m1.GasPremium, m2.GasPremium = big.Zero(), big.Zero()
return (&m1).Equals(&m2)
}
func (m *Message) ValidForBlockInclusion(minGas int64, version network.Version) error {
if m.Version != 0 {
return xerrors.New("'Version' unsupported")
}
if m.To == address.Undef {
return xerrors.New("'To' address cannot be empty")
}
if m.To == buildconstants.ZeroAddress && version >= network.Version7 {
return xerrors.New("invalid 'To' address")
}
if !abi.AddressValidForNetworkVersion(m.To, version) {
return xerrors.New("'To' address protocol unsupported for network version")
}
if m.From == address.Undef {
return xerrors.New("'From' address cannot be empty")
}
if !abi.AddressValidForNetworkVersion(m.From, version) {
return xerrors.New("'From' address protocol unsupported for network version")
}
if m.Value.Int == nil {
return xerrors.New("'Value' cannot be nil")
}
if m.Value.LessThan(big.Zero()) {
return xerrors.New("'Value' field cannot be negative")
}
if m.Value.GreaterThan(TotalFilecoinInt) {
return xerrors.New("'Value' field cannot be greater than total filecoin supply")
}
if m.GasFeeCap.Int == nil {
return xerrors.New("'GasFeeCap' cannot be nil")
}
if m.GasFeeCap.LessThan(big.Zero()) {
return xerrors.New("'GasFeeCap' field cannot be negative")
}
if m.GasPremium.Int == nil {
return xerrors.New("'GasPremium' cannot be nil")
}
if m.GasPremium.LessThan(big.Zero()) {
return xerrors.New("'GasPremium' field cannot be negative")
}
if m.GasPremium.GreaterThan(m.GasFeeCap) {
return xerrors.New("'GasFeeCap' less than 'GasPremium'")
}
if m.GasLimit > buildconstants.BlockGasLimit {
return xerrors.Errorf("'GasLimit' field cannot be greater than a block's gas limit (%d > %d)", m.GasLimit, buildconstants.BlockGasLimit)
}
if m.GasLimit <= 0 {
return xerrors.Errorf("'GasLimit' field %d must be positive", m.GasLimit)
}
// since prices might vary with time, this is technically semantic validation
if m.GasLimit < minGas {
return xerrors.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas)
}
return nil
}
// EffectiveGasPremium returns the effective gas premium claimable by the miner
// given the supplied base fee. This method is not used anywhere except the Eth API.
//
// Filecoin clamps the gas premium at GasFeeCap - BaseFee, if lower than the
// specified premium. Returns 0 if GasFeeCap is less than BaseFee.
func (m *Message) EffectiveGasPremium(baseFee abi.TokenAmount) abi.TokenAmount {
available := big.Sub(m.GasFeeCap, baseFee)
// It's possible that storage providers may include messages with gasFeeCap less than the baseFee
// In such cases, their reward should be viewed as zero
if available.LessThan(big.NewInt(0)) {
available = big.NewInt(0)
}
if big.Cmp(m.GasPremium, available) <= 0 {
return m.GasPremium
}
return available
}
const TestGasLimit = 100e6
//go:build gofuzz
// +build gofuzz
package types
import "bytes"
func FuzzMessage(data []byte) int {
var msg Message
err := msg.UnmarshalCBOR(bytes.NewReader(data))
if err != nil {
return 0
}
reData, err := msg.Serialize()
if err != nil {
panic(err) // ok
}
var msg2 Message
err = msg2.UnmarshalCBOR(bytes.NewReader(data))
if err != nil {
panic(err) // ok
}
reData2, err := msg.Serialize()
if err != nil {
panic(err) // ok
}
if !bytes.Equal(reData, reData2) {
panic("reencoding not equal") // ok
}
return 1
}
package types
import (
"bytes"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/exitcode"
)
type MessageReceiptVersion byte
const (
// MessageReceiptV0 refers to pre FIP-0049 receipts.
MessageReceiptV0 MessageReceiptVersion = 0
// MessageReceiptV1 refers to post FIP-0049 receipts.
MessageReceiptV1 MessageReceiptVersion = 1
)
const EventAMTBitwidth = 5
type MessageReceipt struct {
version MessageReceiptVersion
ExitCode exitcode.ExitCode
Return []byte
GasUsed int64
EventsRoot *cid.Cid // Root of Event AMT with bitwidth = EventAMTBitwidth
}
// NewMessageReceiptV0 creates a new pre FIP-0049 receipt with no capability to
// convey events.
func NewMessageReceiptV0(exitcode exitcode.ExitCode, ret []byte, gasUsed int64) MessageReceipt {
return MessageReceipt{
version: MessageReceiptV0,
ExitCode: exitcode,
Return: ret,
GasUsed: gasUsed,
}
}
// NewMessageReceiptV1 creates a new pre FIP-0049 receipt with the ability to
// convey events.
func NewMessageReceiptV1(exitcode exitcode.ExitCode, ret []byte, gasUsed int64, eventsRoot *cid.Cid) MessageReceipt {
return MessageReceipt{
version: MessageReceiptV1,
ExitCode: exitcode,
Return: ret,
GasUsed: gasUsed,
EventsRoot: eventsRoot,
}
}
func (mr *MessageReceipt) Version() MessageReceiptVersion {
return mr.version
}
func (mr *MessageReceipt) Equals(o *MessageReceipt) bool {
return mr.version == o.version && mr.ExitCode == o.ExitCode && bytes.Equal(mr.Return, o.Return) && mr.GasUsed == o.GasUsed &&
(mr.EventsRoot == o.EventsRoot || (mr.EventsRoot != nil && o.EventsRoot != nil && *mr.EventsRoot == *o.EventsRoot))
}
package types
import (
"fmt"
"io"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/cbor"
"github.com/filecoin-project/go-state-types/exitcode"
)
// This file contains custom CBOR serde logic to deal with the new versioned
// MessageReceipt resulting from the introduction of actor events (FIP-0049).
type messageReceiptV0 struct{ *MessageReceipt }
type messageReceiptV1 struct{ *MessageReceipt }
func (mr *MessageReceipt) MarshalCBOR(w io.Writer) error {
if mr == nil {
_, err := w.Write(cbg.CborNull)
return err
}
var m cbor.Marshaler
switch mr.version {
case MessageReceiptV0:
m = &messageReceiptV0{mr}
case MessageReceiptV1:
m = &messageReceiptV1{mr}
default:
return xerrors.Errorf("invalid message receipt version: %d", mr.version)
}
return m.MarshalCBOR(w)
}
func (mr *MessageReceipt) UnmarshalCBOR(r io.Reader) (err error) {
*mr = MessageReceipt{}
cr := cbg.NewCborReader(r)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if maj != cbg.MajArray {
return fmt.Errorf("cbor input should be of type array")
}
var u cbor.Unmarshaler
switch extra {
case 3:
mr.version = MessageReceiptV0
u = &messageReceiptV0{mr}
case 4:
mr.version = MessageReceiptV1
u = &messageReceiptV1{mr}
default:
return fmt.Errorf("cbor input had wrong number of fields")
}
// Ok to pass a CBOR reader since cbg.NewCborReader will return itself when
// already a CBOR reader.
return u.UnmarshalCBOR(cr)
}
var lengthBufAMessageReceiptV0 = []byte{131}
func (t *messageReceiptV0) MarshalCBOR(w io.Writer) error {
// eliding null check since nulls were already handled in the dispatcher
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufAMessageReceiptV0); err != nil {
return err
}
// t.ExitCode (exitcode.ExitCode) (int64)
if t.ExitCode >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil {
return err
}
}
// t.Return ([]uint8) (slice)
if len(t.Return) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Return was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil {
return err
}
if _, err := cw.Write(t.Return[:]); err != nil {
return err
}
// t.GasUsed (int64) (int64)
if t.GasUsed >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasUsed)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.GasUsed-1)); err != nil {
return err
}
}
return nil
}
func (t *messageReceiptV0) UnmarshalCBOR(r io.Reader) (err error) {
cr := cbg.NewCborReader(r)
// t.ExitCode (exitcode.ExitCode) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.ExitCode = exitcode.ExitCode(extraI)
}
// t.Return ([]uint8) (slice)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Return: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Return = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Return[:]); err != nil {
return err
}
// t.GasUsed (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.GasUsed = extraI
}
return nil
}
var lengthBufBMessageReceiptV1 = []byte{132}
func (t *messageReceiptV1) MarshalCBOR(w io.Writer) error {
// eliding null check since nulls were already handled in the dispatcher
cw := cbg.NewCborWriter(w)
if _, err := cw.Write(lengthBufBMessageReceiptV1); err != nil {
return err
}
// t.ExitCode (exitcode.ExitCode) (int64)
if t.ExitCode >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.ExitCode)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.ExitCode-1)); err != nil {
return err
}
}
// t.Return ([]uint8) (slice)
if len(t.Return) > cbg.ByteArrayMaxLen {
return xerrors.Errorf("Byte array in field t.Return was too long")
}
if err := cw.WriteMajorTypeHeader(cbg.MajByteString, uint64(len(t.Return))); err != nil {
return err
}
if _, err := cw.Write(t.Return[:]); err != nil {
return err
}
// t.GasUsed (int64) (int64)
if t.GasUsed >= 0 {
if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.GasUsed)); err != nil {
return err
}
} else {
if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.GasUsed-1)); err != nil {
return err
}
}
// t.EventsRoot (cid.Cid) (struct)
if t.EventsRoot == nil {
if _, err := cw.Write(cbg.CborNull); err != nil {
return err
}
} else {
if err := cbg.WriteCid(cw, *t.EventsRoot); err != nil {
return xerrors.Errorf("failed to write cid field t.EventsRoot: %w", err)
}
}
return nil
}
func (t *messageReceiptV1) UnmarshalCBOR(r io.Reader) (err error) {
cr := cbg.NewCborReader(r)
// t.ExitCode (exitcode.ExitCode) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.ExitCode = exitcode.ExitCode(extraI)
}
// t.Return ([]uint8) (slice)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
if extra > cbg.ByteArrayMaxLen {
return fmt.Errorf("t.Return: byte array too large (%d)", extra)
}
if maj != cbg.MajByteString {
return fmt.Errorf("expected byte array")
}
if extra > 0 {
t.Return = make([]uint8, extra)
}
if _, err := io.ReadFull(cr, t.Return[:]); err != nil {
return err
}
// t.GasUsed (int64) (int64)
{
maj, extra, err := cr.ReadHeader()
var extraI int64
if err != nil {
return err
}
switch maj {
case cbg.MajUnsignedInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 positive overflow")
}
case cbg.MajNegativeInt:
extraI = int64(extra)
if extraI < 0 {
return fmt.Errorf("int64 negative overflow")
}
extraI = -1 - extraI
default:
return fmt.Errorf("wrong type for int64 field: %d", maj)
}
t.GasUsed = extraI
}
// t.EventsRoot (cid.Cid) (struct)
{
b, err := cr.ReadByte()
if err != nil {
return err
}
if b != cbg.CborNull[0] {
if err := cr.UnreadByte(); err != nil {
return err
}
c, err := cbg.ReadCid(cr)
if err != nil {
return xerrors.Errorf("failed to read cid field t.EventsRoot: %w", err)
}
t.EventsRoot = &c
}
}
return nil
}
package types
import (
"time"
"github.com/filecoin-project/go-address"
)
type MpoolConfig struct {
PriorityAddrs []address.Address
SizeLimitHigh int
SizeLimitLow int
ReplaceByFeeRatio Percent
PruneCooldown time.Duration
GasLimitOverestimation float64
}
func (mc *MpoolConfig) Clone() *MpoolConfig {
r := new(MpoolConfig)
*r = *mc
return r
}
package types
import (
"fmt"
"math"
"strconv"
"golang.org/x/xerrors"
)
// Percent stores a signed percentage as an int64. When converted to a string (or json), it's stored
// as a decimal with two places (e.g., 100% -> 1.00).
type Percent int64
func (p Percent) String() string {
abs := p
sign := ""
if abs < 0 {
abs = -abs
sign = "-"
}
return fmt.Sprintf(`%s%d.%d`, sign, abs/100, abs%100)
}
func (p Percent) MarshalJSON() ([]byte, error) {
return []byte(p.String()), nil
}
func (p *Percent) UnmarshalJSON(b []byte) error {
flt, err := strconv.ParseFloat(string(b)+"e2", 64)
if err != nil {
return xerrors.Errorf("unable to parse ratio %s: %w", string(b), err)
}
if math.Trunc(flt) != flt {
return xerrors.Errorf("ratio may only have two decimals: %s", string(b))
}
*p = Percent(flt)
return nil
}
package types
import (
"bytes"
"encoding/json"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/crypto"
)
func (sm *SignedMessage) ToStorageBlock() (block.Block, error) {
if sm.Signature.Type == crypto.SigTypeBLS {
return sm.Message.ToStorageBlock()
}
data, err := sm.Serialize()
if err != nil {
return nil, err
}
c, err := abi.CidBuilder.Sum(data)
if err != nil {
return nil, err
}
return block.NewBlockWithCid(data, c)
}
func (sm *SignedMessage) Cid() cid.Cid {
if sm.Signature.Type == crypto.SigTypeBLS {
return sm.Message.Cid()
}
sb, err := sm.ToStorageBlock()
if err != nil {
panic(err)
}
return sb.Cid()
}
type SignedMessage struct {
Message Message
Signature crypto.Signature
}
func DecodeSignedMessage(data []byte) (*SignedMessage, error) {
var msg SignedMessage
if err := msg.UnmarshalCBOR(bytes.NewReader(data)); err != nil {
return nil, err
}
return &msg, nil
}
func (sm *SignedMessage) Serialize() ([]byte, error) {
buf := new(bytes.Buffer)
if err := sm.MarshalCBOR(buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
type smCid struct {
*RawSignedMessage
CID cid.Cid
}
type RawSignedMessage SignedMessage
func (sm *SignedMessage) MarshalJSON() ([]byte, error) {
return json.Marshal(&smCid{
RawSignedMessage: (*RawSignedMessage)(sm),
CID: sm.Cid(),
})
}
func (sm *SignedMessage) ChainLength() int {
var ser []byte
var err error
if sm.Signature.Type == crypto.SigTypeBLS {
// BLS chain message length doesn't include signature
ser, err = sm.Message.Serialize()
} else {
ser, err = sm.Serialize()
}
if err != nil {
panic(err)
}
return len(ser)
}
func (sm *SignedMessage) Size() int {
serdata, err := sm.Serialize()
if err != nil {
log.Errorf("serializing message failed: %s", err)
return 0
}
return len(serdata)
}
func (sm *SignedMessage) VMMessage() *Message {
return &sm.Message
}
package types
import (
"bytes"
"encoding/json"
"fmt"
"io"
"slices"
"sort"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log/v2"
cbg "github.com/whyrusleeping/cbor-gen"
"golang.org/x/crypto/blake2b"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
)
var log = logging.Logger("types")
type TipSet struct {
cids []cid.Cid
blks []*BlockHeader
height abi.ChainEpoch
}
type ExpTipSet struct {
Cids []cid.Cid
Blocks []*BlockHeader
Height abi.ChainEpoch
}
func (ts *TipSet) MarshalJSON() ([]byte, error) {
// why didn't i just export the fields? Because the struct has methods with the
// same names already
return json.Marshal(ExpTipSet{
Cids: ts.cids,
Blocks: ts.blks,
Height: ts.height,
})
}
func (ts *TipSet) UnmarshalJSON(b []byte) error {
var ets ExpTipSet
if err := json.Unmarshal(b, &ets); err != nil {
return err
}
ots, err := NewTipSet(ets.Blocks)
if err != nil {
return err
}
*ts = *ots
return nil
}
func (ts *TipSet) MarshalCBOR(w io.Writer) error {
if ts == nil {
_, err := w.Write(cbg.CborNull)
return err
}
return (&ExpTipSet{
Cids: ts.cids,
Blocks: ts.blks,
Height: ts.height,
}).MarshalCBOR(w)
}
func (ts *TipSet) UnmarshalCBOR(r io.Reader) error {
var ets ExpTipSet
if err := ets.UnmarshalCBOR(r); err != nil {
return err
}
ots, err := NewTipSet(ets.Blocks)
if err != nil {
return err
}
*ts = *ots
return nil
}
func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool {
return func(i, j int) bool {
ti := blks[i].LastTicket()
tj := blks[j].LastTicket()
if ti.Equals(tj) {
log.Warnf("blocks have same ticket (%s %s)", blks[i].Miner, blks[j].Miner)
return bytes.Compare(blks[i].Cid().Bytes(), blks[j].Cid().Bytes()) < 0
}
return ti.Less(tj)
}
}
// NewTipSet checks:
// - A tipset is composed of at least one block. (Because of our variable
// number of blocks per tipset, determined by randomness, we do not impose
// an upper limit.)
// - All blocks have the same height.
// - All blocks have the same parents (same number of them and matching CIDs).
func NewTipSet(blks []*BlockHeader) (*TipSet, error) {
if len(blks) == 0 {
return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks")
}
sort.Slice(blks, tipsetSortFunc(blks))
var ts TipSet
ts.cids = []cid.Cid{blks[0].Cid()}
ts.blks = blks
for _, b := range blks[1:] {
if b.Height != blks[0].Height {
return nil, fmt.Errorf("cannot create tipset with mismatching heights")
}
if len(blks[0].Parents) != len(b.Parents) {
return nil, fmt.Errorf("cannot create tipset with mismatching number of parents")
}
for i, cid := range b.Parents {
if cid != blks[0].Parents[i] {
return nil, fmt.Errorf("cannot create tipset with mismatching parents")
}
}
ts.cids = append(ts.cids, b.Cid())
}
ts.height = blks[0].Height
return &ts, nil
}
func (ts *TipSet) Cids() []cid.Cid {
return ts.cids
}
func (ts *TipSet) Key() TipSetKey {
if ts == nil {
return EmptyTSK
}
return NewTipSetKey(ts.cids...)
}
func (ts *TipSet) Height() abi.ChainEpoch {
return ts.height
}
func (ts *TipSet) Parents() TipSetKey {
return NewTipSetKey(ts.blks[0].Parents...)
}
func (ts *TipSet) Blocks() []*BlockHeader {
return ts.blks
}
func (ts *TipSet) Equals(ots *TipSet) bool {
if ts == nil && ots == nil {
return true
}
if ts == nil || ots == nil {
return false
}
if ts.height != ots.height {
return false
}
return slices.Equal(ts.cids, ots.cids)
}
func (t *Ticket) Less(o *Ticket) bool {
tDigest := blake2b.Sum256(t.VRFProof)
oDigest := blake2b.Sum256(o.VRFProof)
return bytes.Compare(tDigest[:], oDigest[:]) < 0
}
func (ts *TipSet) MinTicket() *Ticket {
return ts.MinTicketBlock().Ticket
}
func (ts *TipSet) MinTimestamp() uint64 {
if ts == nil {
return 0
}
blks := ts.Blocks()
// TODO::FVM @vyzo @magik Null rounds shouldn't ever be represented as
// tipsets with no blocks; Null-round generally means that the tipset at
// that epoch doesn't exist - and the next tipset that does exist links
// straight to first epoch with blocks (@raulk agrees -- this is odd)
if len(blks) == 0 {
// null rounds make things crash -- it is threaded in every fvm instantiation
return 0
}
minTs := blks[0].Timestamp
for _, bh := range blks[1:] {
if bh.Timestamp < minTs {
minTs = bh.Timestamp
}
}
return minTs
}
func (ts *TipSet) MinTicketBlock() *BlockHeader {
blks := ts.Blocks()
min := blks[0]
for _, b := range blks[1:] {
if b.LastTicket().Less(min.LastTicket()) {
min = b
}
}
return min
}
func (ts *TipSet) ParentMessageReceipts() cid.Cid {
return ts.blks[0].ParentMessageReceipts
}
func (ts *TipSet) ParentState() cid.Cid {
return ts.blks[0].ParentStateRoot
}
func (ts *TipSet) ParentWeight() BigInt {
return ts.blks[0].ParentWeight
}
func (ts *TipSet) Contains(oc cid.Cid) bool {
return slices.Contains(ts.cids, oc)
}
func (ts *TipSet) IsChildOf(parent *TipSet) bool {
return CidArrsEqual(ts.Parents().Cids(), parent.Cids()) &&
// FIXME: The height check might go beyond what is meant by
// "parent", but many parts of the code rely on the tipset's
// height for their processing logic at the moment to obviate it.
ts.height > parent.height
}
func (ts *TipSet) String() string {
return fmt.Sprintf("%v", ts.cids)
}
package types
import (
"bytes"
"encoding/json"
"fmt"
"io"
"strings"
block "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
typegen "github.com/whyrusleeping/cbor-gen"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
)
var EmptyTSK = TipSetKey{}
// The length of a block header CID in bytes.
var blockHeaderCIDLen int
func init() {
// hash a large string of zeros so we don't estimate based on inlined CIDs.
var buf [256]byte
c, err := abi.CidBuilder.Sum(buf[:])
if err != nil {
panic(err)
}
blockHeaderCIDLen = len(c.Bytes())
}
// A TipSetKey is an immutable collection of CIDs forming a unique key for a tipset.
// The CIDs are assumed to be distinct and in canonical order. Two keys with the same
// CIDs in a different order are not considered equal.
// TipSetKey is a lightweight value type, and may be compared for equality with ==.
type TipSetKey struct {
// The internal representation is a concatenation of the bytes of the CIDs, which are
// self-describing, wrapped as a string.
// These gymnastics make a TipSetKey usable as a map key.
// The empty key has value "".
value string
}
// NewTipSetKey builds a new key from a slice of CIDs.
// The CIDs are assumed to be ordered correctly.
func NewTipSetKey(cids ...cid.Cid) TipSetKey {
encoded := encodeKey(cids)
return TipSetKey{string(encoded)}
}
// TipSetKeyFromBytes wraps an encoded key, validating correct decoding.
func TipSetKeyFromBytes(encoded []byte) (TipSetKey, error) {
_, err := decodeKey(encoded)
if err != nil {
return EmptyTSK, xerrors.Errorf("decoding tpiset key: %w", err)
}
return TipSetKey{string(encoded)}, nil
}
// Cids returns a slice of the CIDs comprising this key.
func (k TipSetKey) Cids() []cid.Cid {
cids, err := decodeKey([]byte(k.value))
if err != nil {
panic("invalid tipset key: " + err.Error())
}
return cids
}
// String() returns a human-readable representation of the key.
func (k TipSetKey) String() string {
b := strings.Builder{}
b.WriteString("{")
cids := k.Cids()
for i, c := range cids {
b.WriteString(c.String())
if i < len(cids)-1 {
b.WriteString(",")
}
}
b.WriteString("}")
return b.String()
}
// Bytes returns a binary representation of the key.
func (k TipSetKey) Bytes() []byte {
return []byte(k.value)
}
func (k TipSetKey) MarshalJSON() ([]byte, error) {
return json.Marshal(k.Cids())
}
func (k *TipSetKey) UnmarshalJSON(b []byte) error {
var cids []cid.Cid
if err := json.Unmarshal(b, &cids); err != nil {
return err
}
k.value = string(encodeKey(cids))
return nil
}
func (k TipSetKey) Cid() (cid.Cid, error) {
blk, err := k.ToStorageBlock()
if err != nil {
return cid.Cid{}, err
}
return blk.Cid(), nil
}
func (k TipSetKey) ToStorageBlock() (block.Block, error) {
buf := new(bytes.Buffer)
if err := k.MarshalCBOR(buf); err != nil {
log.Errorf("failed to marshal ts key as CBOR: %s", k)
}
cid, err := abi.CidBuilder.Sum(buf.Bytes())
if err != nil {
return nil, err
}
return block.NewBlockWithCid(buf.Bytes(), cid)
}
func (k TipSetKey) MarshalCBOR(writer io.Writer) error {
if err := typegen.WriteMajorTypeHeader(writer, typegen.MajByteString, uint64(len(k.Bytes()))); err != nil {
return err
}
_, err := writer.Write(k.Bytes())
return err
}
func (k *TipSetKey) UnmarshalCBOR(reader io.Reader) error {
cr := typegen.NewCborReader(reader)
maj, extra, err := cr.ReadHeader()
if err != nil {
return err
}
defer func() {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
}()
if extra > typegen.ByteArrayMaxLen {
return fmt.Errorf("t.Binary: byte array too large (%d)", extra)
}
if maj != typegen.MajByteString {
return fmt.Errorf("expected byte array")
}
b := make([]uint8, extra)
if _, err := io.ReadFull(cr, b); err != nil {
return err
}
*k, err = TipSetKeyFromBytes(b)
return err
}
func (k TipSetKey) IsEmpty() bool {
return len(k.value) == 0
}
func encodeKey(cids []cid.Cid) []byte {
buffer := new(bytes.Buffer)
for _, c := range cids {
// bytes.Buffer.Write() err is documented to be always nil.
_, _ = buffer.Write(c.Bytes())
}
return buffer.Bytes()
}
func decodeKey(encoded []byte) ([]cid.Cid, error) {
// To avoid reallocation of the underlying array, estimate the number of CIDs to be extracted
// by dividing the encoded length by the expected CID length.
estimatedCount := len(encoded) / blockHeaderCIDLen
cids := make([]cid.Cid, 0, estimatedCount)
nextIdx := 0
for nextIdx < len(encoded) {
nr, c, err := cid.CidFromBytes(encoded[nextIdx:])
if err != nil {
return nil, err
}
cids = append(cids, c)
nextIdx += nr
}
return cids, nil
}
var _ typegen.CBORMarshaler = &TipSetKey{}
var _ typegen.CBORUnmarshaler = &TipSetKey{}
package types
import (
"golang.org/x/xerrors"
"github.com/filecoin-project/go-state-types/abi"
)
var (
// TipSetTags represents the predefined set of tags for tipsets. The supported
// tags are:
// - Latest: the most recent tipset in the chain with the heaviest weight.
// - Finalized: the most recent tipset considered final by the node.
// - Safe: the most recent tipset between Finalized and Latest - build.SafeHeightDistance.
// If the tipset at the safe height is null, the first non-nil parent tipset is returned.
//
// See TipSetTag.
TipSetTags = struct {
Latest TipSetTag
Finalized TipSetTag
Safe TipSetTag
}{
Latest: TipSetTag("latest"),
Finalized: TipSetTag("finalized"),
Safe: TipSetTag("safe"),
}
// TipSetSelectors represents the predefined set of selectors for tipsets.
//
// See TipSetSelector.
TipSetSelectors = struct {
Latest TipSetSelector
Finalized TipSetSelector
Safe TipSetSelector
Height func(abi.ChainEpoch, bool, *TipSetAnchor) TipSetSelector
Key func(TipSetKey) TipSetSelector
}{
Latest: TipSetSelector{Tag: &TipSetTags.Latest},
Finalized: TipSetSelector{Tag: &TipSetTags.Finalized},
Safe: TipSetSelector{Tag: &TipSetTags.Safe},
Height: func(height abi.ChainEpoch, previous bool, anchor *TipSetAnchor) TipSetSelector {
return TipSetSelector{Height: &TipSetHeight{At: &height, Previous: previous, Anchor: anchor}}
},
Key: func(key TipSetKey) TipSetSelector { return TipSetSelector{Key: &key} },
}
// TipSetAnchors represents the predefined set of anchors for tipsets.
//
// See TipSetAnchor.
TipSetAnchors = struct {
Latest *TipSetAnchor
Finalized *TipSetAnchor
Safe *TipSetAnchor
Key func(TipSetKey) *TipSetAnchor
}{
Latest: &TipSetAnchor{Tag: &TipSetTags.Latest},
Finalized: &TipSetAnchor{Tag: &TipSetTags.Finalized},
Safe: &TipSetAnchor{Tag: &TipSetTags.Safe},
Key: func(key TipSetKey) *TipSetAnchor { return &TipSetAnchor{Key: &key} },
}
)
// TipSetTag is a string that represents a pointer to a tipset.
// See TipSetSelector.
type TipSetTag string
// TipSetSelector captures the selection criteria for a tipset.
//
// The supported criterion for selection is one of the following:
// - Key: the tipset key, see TipSetKey.
// - Height: the tipset height with an optional fallback to non-null parent, see TipSetHeight.
// - Tag: the tipset tag, either "latest" or "finalized", see TipSetTags.
//
// At most, one such criterion can be specified at a time. Otherwise, the
// criterion is considered to be invalid. See Validate.
//
// Experimental: This API is experimental and may change without notice.
type TipSetSelector struct {
Key *TipSetKey `json:"key,omitempty"`
Height *TipSetHeight `json:"height,omitempty"`
Tag *TipSetTag `json:"tag,omitempty"`
}
// Validate ensures that the TipSetSelector is valid. It checks that only one of
// the selection criteria is specified. If no criteria are specified, it returns
// nil, indicating that the default selection criteria should be used as defined
// by the Lotus API Specification.
func (tss TipSetSelector) Validate() error {
var criteria int
if tss.Key != nil {
criteria++
}
if tss.Tag != nil {
criteria++
}
if tss.Height != nil {
criteria++
if err := tss.Height.Validate(); err != nil {
return xerrors.Errorf("validating tipset height: %w", err)
}
}
if criteria != 1 {
return xerrors.Errorf("exactly one tipset selection criteria must be specified, found: %v", criteria)
}
return nil
}
// TipSetHeight is a criterion that selects a tipset At given height anchored to
// a given parent tipset.
//
// In a case where the tipset at given height is null, and Previous is true,
// it'll select the previous non-null tipset instead. Otherwise, it returns the
// null tipset at the given height.
//
// The Anchor may optionally be specified as TipSetTag, or TipSetKey. If
// specified, the selected tipset is guaranteed to be a child of the tipset
// specified by the anchor at the given height. Otherwise, the "finalized" TipSetTag
// is used as the Anchor.
//
// Experimental: This API is experimental and may change without notice.
type TipSetHeight struct {
At *abi.ChainEpoch `json:"at,omitempty"`
Previous bool `json:"previous,omitempty"`
Anchor *TipSetAnchor `json:"anchor,omitempty"`
}
// Validate ensures that the TipSetHeight is valid. It checks that the height is
// not negative and the Anchor is valid.
//
// A nil or a zero-valued height is considered to be valid.
func (tsh TipSetHeight) Validate() error {
if tsh.At == nil {
return xerrors.New("invalid tipset height: at epoch must be specified")
}
if *tsh.At < 0 {
return xerrors.New("invalid tipset height: epoch cannot be less than zero")
}
return tsh.Anchor.Validate()
}
// TipSetAnchor represents a tipset in the chain that can be used as an anchor
// for selecting a tipset. The anchor may be specified as a TipSetTag or a
// TipSetKey but not both. Defaults to TipSetTag "finalized" if neither are
// specified.
//
// See TipSetHeight.
//
// Experimental: This API is experimental and may change without notice.
type TipSetAnchor struct {
// TODO: We might want to rename the term "anchor" to "parent" if they're
// conceptually interchangeable. Because, it is easier to reuse a term that
// already exist compared to teaching people a new one. For now we'll keep it as
// "anchor" to keep consistent with the internal API design discussions. We will
// revisit the terminology here as the new API groups are added, namely
// StateSearchMsg.
Key *TipSetKey `json:"key,omitempty"`
Tag *TipSetTag `json:"tag,omitempty"`
}
// Validate ensures that the TipSetAnchor is valid. It checks that at most one
// of TipSetKey or TipSetTag is specified. Otherwise, it returns an error.
//
// Note that a nil or a zero-valued anchor is valid, and is considered to be
// equivalent to the default anchor, which is the tipset tagged as "finalized".
func (tsa *TipSetAnchor) Validate() error {
if tsa == nil {
// An unspecified Anchor is valid, because it's an optional field, and falls back
// to whatever the API decides the default to be.
return nil
}
if tsa.Key != nil && tsa.Tag != nil {
return xerrors.New("invalid tipset anchor: at most one of key or tag must be specified")
}
// Zero-valued anchor is valid, and considered to be an equivalent to whatever
// the API decides the default to be.
return nil
}