Merge remote-tracking branch 'origin/master' into blocksync-refactor

This commit is contained in:
Łukasz Magiera 2020-09-09 19:19:05 +02:00
commit cb3b0ab2bb
21 changed files with 265 additions and 85 deletions

15
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,15 @@
## filecoin-project/lotus CODEOWNERS
## Refer to https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners.
##
## These users or groups will be automatically assigned as reviewers every time
## a PR is submitted that modifies code in the specified locations.
##
## The Lotus repo configuration requires that at least ONE codeowner approves
## the PR before merging.
### Global owners.
* @magik6k @whyrusleeping
### Conformance testing.
conformance/ @raulk
extern/test-vectors @raulk

View File

@ -2,6 +2,7 @@ package api
import (
"context"
"fmt"
"time"
"github.com/filecoin-project/specs-actors/actors/runtime/proof"
@ -709,8 +710,28 @@ const (
StageMessages
StageSyncComplete
StageSyncErrored
StageFetchingMessages
)
func (v SyncStateStage) String() string {
switch v {
case StageHeaders:
return "header sync"
case StagePersistHeaders:
return "persisting headers"
case StageMessages:
return "message sync"
case StageSyncComplete:
return "complete"
case StageSyncErrored:
return "error"
case StageFetchingMessages:
return "fetching messages"
default:
return fmt.Sprintf("<unknown: %d>", v)
}
}
type MpoolChange int
const (

View File

@ -25,7 +25,7 @@ func buildType() string {
}
// BuildVersion is the local build version, set by build system
const BuildVersion = "0.6.1"
const BuildVersion = "0.6.2-rc1"
func UserVersion() string {
return BuildVersion + buildType() + CurrentCommit

View File

@ -27,7 +27,11 @@ func (mp *MessagePool) republishPendingMessages() error {
mp.curTsLk.Unlock()
return xerrors.Errorf("computing basefee: %w", err)
}
baseFeeLowerBound := types.BigDiv(baseFee, baseFeeLowerBoundFactor)
if baseFeeLowerBoundFactor.LessThan(minimumBaseFee) {
baseFeeLowerBound = minimumBaseFee
}
pending := make(map[address.Address]map[uint64]*types.SignedMessage)
mp.lk.Lock()

View File

@ -3,6 +3,7 @@ package messagepool
import (
"context"
"math/big"
"math/rand"
"sort"
"time"
@ -304,6 +305,69 @@ tailLoop:
log.Infow("pack tail chains done", "took", dt)
}
// if we have gasLimit to spare, pick some random (non-negative) chains to fill the block
// we pick randomly so that we minimize the probability of duplication among all miners
startRandom := time.Now()
if gasLimit >= minGas {
shuffleChains(chains)
for _, chain := range chains {
// have we filled the block
if gasLimit < minGas {
break
}
// has it been merged or invalidated?
if chain.merged || !chain.valid {
continue
}
// is it negative?
if !allowNegativeChains(curTs.Height()) && chain.gasPerf < 0 {
continue
}
// compute the dependencies that must be merged and the gas limit including deps
chainGasLimit := chain.gasLimit
depGasLimit := int64(0)
var chainDeps []*msgChain
for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev {
chainDeps = append(chainDeps, curChain)
chainGasLimit += curChain.gasLimit
depGasLimit += curChain.gasLimit
}
// do the deps fit? if the deps won't fit, invalidate the chain
if depGasLimit > gasLimit {
chain.Invalidate()
continue
}
// do they fit as is? if it doesn't, trim to make it fit if possible
if chainGasLimit > gasLimit {
chain.Trim(gasLimit-depGasLimit, mp, baseFee, allowNegativeChains(curTs.Height()))
if !chain.valid {
continue
}
}
// include it together with all dependencies
for i := len(chainDeps) - 1; i >= 0; i-- {
curChain := chainDeps[i]
curChain.merged = true
result = append(result, curChain.msgs...)
}
chain.merged = true
result = append(result, chain.msgs...)
gasLimit -= chainGasLimit
}
}
if dt := time.Since(startRandom); dt > time.Millisecond {
log.Infow("pack random tail chains done", "took", dt)
}
return result, nil
}
@ -852,3 +916,10 @@ func (mc *msgChain) BeforeEffective(other *msgChain) bool {
(mc.effPerf == other.effPerf && mc.gasPerf > other.gasPerf) ||
(mc.effPerf == other.effPerf && mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0)
}
func shuffleChains(lst []*msgChain) {
for i := range lst {
j := rand.Intn(i + 1)
lst[i], lst[j] = lst[j], lst[i]
}
}

View File

@ -17,14 +17,14 @@ import (
"golang.org/x/xerrors"
)
var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, types.StateTree) error{
var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, types.StateTree, *types.TipSet) error{
build.UpgradeBreezeHeight: UpgradeFaucetBurnRecovery,
}
func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree, height abi.ChainEpoch) (err error) {
func (sm *StateManager) handleStateForks(ctx context.Context, st types.StateTree, height abi.ChainEpoch, ts *types.TipSet) (err error) {
f, ok := ForksAtHeight[height]
if ok {
err := f(ctx, sm, st)
err := f(ctx, sm, st, ts)
if err != nil {
return err
}
@ -66,7 +66,7 @@ func doTransfer(tree types.StateTree, from, to address.Address, amt abi.TokenAmo
return nil
}
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types.StateTree) error {
func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types.StateTree, ts *types.TipSet) error {
// Some initial parameters
FundsForMiners := types.FromFil(1_000_000)
LookbackEpoch := abi.ChainEpoch(32000)
@ -91,7 +91,7 @@ func UpgradeFaucetBurnRecovery(ctx context.Context, sm *StateManager, tree types
}
// Grab lookback state for account checks
lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, nil, false)
lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, LookbackEpoch, ts, false)
if err != nil {
return xerrors.Errorf("failed to get tipset at lookback height: %w", err)
}

View File

@ -119,7 +119,7 @@ func TestForkHeightTriggers(t *testing.T) {
t.Fatal(err)
}
stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, st types.StateTree) error {
stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, st types.StateTree, ts *types.TipSet) error {
cst := cbor.NewCborStore(sm.ChainStore().Blockstore())
act, err := st.GetActor(taddr)

View File

@ -147,7 +147,7 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c
type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount) (cid.Cid, cid.Cid, error) {
func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) {
vmopt := &vm.VMOpts{
StateBase: pstate,
@ -201,7 +201,7 @@ func (sm *StateManager) ApplyBlocks(ctx context.Context, parentEpoch abi.ChainEp
for i := parentEpoch; i < epoch; i++ {
// handle state forks
err = sm.handleStateForks(ctx, vmi.StateTree(), i)
err = sm.handleStateForks(ctx, vmi.StateTree(), i, ts)
if err != nil {
return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err)
}
@ -350,7 +350,7 @@ func (sm *StateManager) computeTipSetState(ctx context.Context, ts *types.TipSet
baseFee := blks[0].ParentBaseFee
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee)
return sm.ApplyBlocks(ctx, parentEpoch, pstate, blkmsgs, blks[0].Height, r, cb, baseFee, ts)
}
func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid {

View File

@ -456,7 +456,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch,
for i := ts.Height(); i < height; i++ {
// handle state forks
err = sm.handleStateForks(ctx, vmi.StateTree(), i)
err = sm.handleStateForks(ctx, vmi.StateTree(), i, ts)
if err != nil {
return cid.Undef, nil, xerrors.Errorf("error handling state forks: %w", err)
}

View File

@ -1439,6 +1439,7 @@ func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*
// fills out each of the given tipsets with messages and calls the callback with it
func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error {
ss := extractSyncState(ctx)
ctx, span := trace.StartSpan(ctx, "iterFullTipsets")
defer span.End()
@ -1466,6 +1467,7 @@ mainLoop:
nextI := (i + 1) - batchSize // want to fetch batchSize values, 'i' points to last one we want to fetch, so its 'inclusive' of our request, thus we need to add one to our request start index
ss.SetStage(api.StageFetchingMessages)
var bstout []*exchange.CompactedMessages
for len(bstout) < batchSize {
next := headers[nextI]
@ -1485,6 +1487,7 @@ mainLoop:
bstout = append(bstout, bstips...)
nextI += len(bstips)
}
ss.SetStage(api.StageMessages)
for bsi := 0; bsi < len(bstout); bsi++ {
// temp storage so we don't persist data we dont want to

View File

@ -1,7 +1,6 @@
package chain
import (
"fmt"
"sync"
"time"
@ -12,23 +11,6 @@ import (
"github.com/filecoin-project/lotus/chain/types"
)
func SyncStageString(v api.SyncStateStage) string {
switch v {
case api.StageHeaders:
return "header sync"
case api.StagePersistHeaders:
return "persisting headers"
case api.StageMessages:
return "message sync"
case api.StageSyncComplete:
return "complete"
case api.StageSyncErrored:
return "error"
default:
return fmt.Sprintf("<unknown: %d>", v)
}
}
type SyncerState struct {
lk sync.Mutex
Target *types.TipSet

View File

@ -8,12 +8,17 @@ import (
"strings"
"text/tabwriter"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/dustin/go-humanize"
"github.com/urfave/cli/v2"
"golang.org/x/xerrors"
"github.com/libp2p/go-libp2p-core/peer"
protocol "github.com/libp2p/go-libp2p-core/protocol"
"github.com/multiformats/go-multiaddr"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/lib/addrutil"
)
@ -141,7 +146,7 @@ var NetListen = &cli.Command{
var netConnect = &cli.Command{
Name: "connect",
Usage: "Connect to a peer",
ArgsUsage: "[peerMultiaddr]",
ArgsUsage: "[peerMultiaddr|minerActorAddress]",
Action: func(cctx *cli.Context) error {
api, closer, err := GetAPI(cctx)
if err != nil {
@ -152,7 +157,43 @@ var netConnect = &cli.Command{
pis, err := addrutil.ParseAddresses(ctx, cctx.Args().Slice())
if err != nil {
return err
a, perr := address.NewFromString(cctx.Args().First())
if perr != nil {
return err
}
na, fc, err := GetFullNodeAPI(cctx)
if err != nil {
return err
}
defer fc()
mi, err := na.StateMinerInfo(ctx, a, types.EmptyTSK)
if err != nil {
return xerrors.Errorf("getting miner info: %w", err)
}
if mi.PeerId == nil {
return xerrors.Errorf("no PeerID for miner")
}
multiaddrs := make([]multiaddr.Multiaddr, 0, len(mi.Multiaddrs))
for i, a := range mi.Multiaddrs {
maddr, err := multiaddr.NewMultiaddrBytes(a)
if err != nil {
log.Warnf("parsing multiaddr %d (%x): %s", i, a, err)
continue
}
multiaddrs = append(multiaddrs, maddr)
}
pi := peer.AddrInfo{
ID: *mi.PeerId,
Addrs: multiaddrs,
}
fmt.Printf("%s -> %s\n", a, pi)
pis = append(pis, pi)
}
for _, pi := range pis {

View File

@ -11,7 +11,6 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
)
var syncCmd = &cli.Command{
@ -61,7 +60,7 @@ var syncStatusCmd = &cli.Command{
fmt.Printf("\tBase:\t%s\n", base)
fmt.Printf("\tTarget:\t%s (%d)\n", target, theight)
fmt.Printf("\tHeight diff:\t%d\n", heightDiff)
fmt.Printf("\tStage: %s\n", chain.SyncStageString(ss.Stage))
fmt.Printf("\tStage: %s\n", ss.Stage)
fmt.Printf("\tHeight: %d\n", ss.Height)
if ss.End.IsZero() {
if !ss.Start.IsZero() {
@ -186,7 +185,7 @@ func SyncWait(ctx context.Context, napi api.FullNode) error {
theight = ss.Target.Height()
}
fmt.Printf("\r\x1b[2KWorker %d: Target Height: %d\tTarget: %s\tState: %s\tHeight: %d", working, theight, target, chain.SyncStageString(ss.Stage), ss.Height)
fmt.Printf("\r\x1b[2KWorker %d: Target Height: %d\tTarget: %s\tState: %s\tHeight: %d", working, theight, target, ss.Stage, ss.Height)
if time.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs) {
fmt.Println("\nDone!")

View File

@ -102,7 +102,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, preroot
messages = append(messages, msg)
results = append(results, ret)
return nil
}, tipset.BaseFee)
}, tipset.BaseFee, nil)
if err != nil {
return nil, err

View File

@ -410,7 +410,7 @@ func (sb *Sealer) ReadPiece(ctx context.Context, writer io.Writer, sector abi.Se
return false, xerrors.Errorf("closing partial file: %w", err)
}
return false, nil
return true, nil
}
func (sb *Sealer) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (out storage.PreCommit1Out, err error) {

View File

@ -217,16 +217,11 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return xerrors.Errorf("read piece: checking for already existing unsealed sector: %w", err)
}
var readOk bool
var selector WorkerSelector
if len(best) == 0 { // new
selector = newAllocSelector(m.index, stores.FTUnsealed, stores.PathSealing)
} else { // append to existing
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
}
var readOk bool
if len(best) > 0 {
// There is unsealed sector, see if we can read from it
selector = newExistingSelector(m.index, sector, stores.FTUnsealed, false)
@ -257,6 +252,9 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return nil
}
if unsealed == cid.Undef {
return xerrors.Errorf("cannot unseal piece (sector: %d, offset: %d size: %d) - unsealed cid is undefined", sector, offset, size)
}
err = m.sched.Schedule(ctx, sector, sealtasks.TTUnseal, selector, unsealFetch, func(ctx context.Context, w Worker) error {
return w.UnsealPiece(ctx, sector, offset, size, ticket, unsealed)
})
@ -274,7 +272,7 @@ func (m *Manager) ReadPiece(ctx context.Context, sink io.Writer, sector abi.Sect
return xerrors.Errorf("reading piece from sealed sector: %w", err)
}
if readOk {
if !readOk {
return xerrors.Errorf("failed to read unsealed piece")
}

View File

@ -161,7 +161,7 @@ func (c *ClientNodeAdapter) ValidatePublishedDeal(ctx context.Context, deal stor
pubmsg, err := c.cs.GetMessage(*deal.PublishMessage)
if err != nil {
return 0, xerrors.Errorf("getting deal pubsish message: %w", err)
return 0, xerrors.Errorf("getting deal publish message: %w", err)
}
mi, err := stmgr.StateMinerInfo(ctx, c.sm, c.cs.GetHeaviestTipSet(), deal.Proposal.Provider)

View File

@ -104,6 +104,10 @@ func (hs *Service) HandleStream(s inet.Stream) {
build.Clock.Sleep(time.Millisecond * 300)
}
if hs.pmgr != nil {
hs.pmgr.AddFilecoinPeer(s.Conn().RemotePeer())
}
ts, err := hs.syncer.FetchTipSet(context.Background(), s.Conn().RemotePeer(), types.NewTipSetKey(hmsg.HeaviestTipSet...))
if err != nil {
log.Errorf("failed to fetch tipset from peer during hello: %+v", err)
@ -117,9 +121,6 @@ func (hs *Service) HandleStream(s inet.Stream) {
log.Infof("Got new tipset through Hello: %s from %s", ts.Cids(), s.Conn().RemotePeer())
hs.syncer.InformNewHead(s.Conn().RemotePeer(), ts)
}
if hs.pmgr != nil {
hs.pmgr.AddFilecoinPeer(s.Conn().RemotePeer())
}
}

View File

@ -6,12 +6,8 @@ import (
"math/rand"
"sort"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
@ -19,8 +15,12 @@ import (
"github.com/filecoin-project/go-state-types/exitcode"
"github.com/filecoin-project/specs-actors/actors/builtin"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/messagepool"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
)
type GasAPI struct {
@ -50,6 +50,35 @@ func (a *GasAPI) GasEstimateFeeCap(ctx context.Context, msg *types.Message, maxq
return out, nil
}
type gasMeta struct {
price big.Int
limit int64
}
func medianGasPremium(prices []gasMeta, blocks int) abi.TokenAmount {
sort.Slice(prices, func(i, j int) bool {
// sort desc by price
return prices[i].price.GreaterThan(prices[j].price)
})
at := build.BlockGasTarget * int64(blocks) / 2
prev1, prev2 := big.Zero(), big.Zero()
for _, price := range prices {
prev1, prev2 = price.price, prev1
at -= price.limit
if at < 0 {
break
}
}
premium := prev1
if prev2.Sign() != 0 {
premium = big.Div(types.BigAdd(prev1, prev2), types.NewInt(2))
}
return premium
}
func (a *GasAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
sender address.Address, gaslimit int64, _ types.TipSetKey) (types.BigInt, error) {
@ -57,11 +86,6 @@ func (a *GasAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
nblocksincl = 1
}
type gasMeta struct {
price big.Int
limit int64
}
var prices []gasMeta
var blocks int
@ -92,25 +116,7 @@ func (a *GasAPI) GasEstimateGasPremium(ctx context.Context, nblocksincl uint64,
ts = pts
}
sort.Slice(prices, func(i, j int) bool {
// sort desc by price
return prices[i].price.GreaterThan(prices[j].price)
})
at := build.BlockGasTarget * int64(blocks) / 2
prev1, prev2 := big.Zero(), big.Zero()
for _, price := range prices {
prev1, prev2 = price.price, prev1
at -= price.limit
if at > 0 {
continue
}
}
premium := prev1
if prev2.Sign() != 0 {
premium = big.Div(types.BigAdd(prev1, prev2), types.NewInt(2))
}
premium := medianGasPremium(prices, blocks)
if types.BigCmp(premium, types.NewInt(MinGasPremium)) < 0 {
switch nblocksincl {

View File

@ -0,0 +1,40 @@
package full
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
)
func TestMedian(t *testing.T) {
require.Equal(t, types.NewInt(5), medianGasPremium([]gasMeta{
{big.NewInt(5), build.BlockGasTarget},
}, 1))
require.Equal(t, types.NewInt(10), medianGasPremium([]gasMeta{
{big.NewInt(5), build.BlockGasTarget},
{big.NewInt(10), build.BlockGasTarget},
}, 1))
require.Equal(t, types.NewInt(15), medianGasPremium([]gasMeta{
{big.NewInt(10), build.BlockGasTarget / 2},
{big.NewInt(20), build.BlockGasTarget / 2},
}, 1))
require.Equal(t, types.NewInt(25), medianGasPremium([]gasMeta{
{big.NewInt(10), build.BlockGasTarget / 2},
{big.NewInt(20), build.BlockGasTarget / 2},
{big.NewInt(30), build.BlockGasTarget / 2},
}, 1))
require.Equal(t, types.NewInt(15), medianGasPremium([]gasMeta{
{big.NewInt(10), build.BlockGasTarget / 2},
{big.NewInt(20), build.BlockGasTarget / 2},
{big.NewInt(30), build.BlockGasTarget / 2},
}, 2))
}

View File

@ -14,7 +14,6 @@ import (
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/client"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain"
"github.com/filecoin-project/lotus/chain/store"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/node/repo"
@ -72,7 +71,7 @@ sync_complete:
"target_height", w.Target.Height(),
"height", w.Height,
"error", w.Message,
"stage", chain.SyncStageString(w.Stage),
"stage", w.Stage.String(),
)
} else {
log.Infow(
@ -82,7 +81,7 @@ sync_complete:
"target", w.Target.Key(),
"target_height", w.Target.Height(),
"height", w.Height,
"stage", chain.SyncStageString(w.Stage),
"stage", w.Stage.String(),
)
}