Merge remote-tracking branch 'origin/master' into next

This commit is contained in:
Łukasz Magiera 2020-07-06 11:38:19 +02:00
commit 4b0ae7ef9a
30 changed files with 343 additions and 224 deletions

View File

@ -21,7 +21,7 @@ A clear and concise description of what you expected to happen.
**Screenshots** **Screenshots**
If applicable, add screenshots to help explain your problem. If applicable, add screenshots to help explain your problem.
**Version (run `lotus --version`):** **Version (run `lotus version`):**
**Additional context** **Additional context**
Add any other context about the problem here. Add any other context about the problem here.

View File

@ -379,6 +379,7 @@ type DealInfo struct {
type MsgLookup struct { type MsgLookup struct {
Receipt types.MessageReceipt Receipt types.MessageReceipt
ReturnDec interface{}
// TODO: This should probably a tipsetkey? // TODO: This should probably a tipsetkey?
TipSet *types.TipSet TipSet *types.TipSet
} }

View File

@ -61,7 +61,7 @@ func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport
defer close(done) defer close(done)
for atomic.LoadInt64(&mine) == 1 { for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil {
t.Error(err) t.Error(err)
} }
} }
@ -99,7 +99,7 @@ func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) {
defer close(done) defer close(done)
for atomic.LoadInt64(&mine) == 1 { for atomic.LoadInt64(&mine) == 1 {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil {
t.Error(err) t.Error(err)
} }
} }

View File

@ -34,7 +34,7 @@ func (ts *testSuite) testMining(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
err = sn[0].MineOne(ctx, func(bool) {}) err = sn[0].MineOne(ctx, func(bool, error) {})
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
@ -62,7 +62,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
err = sn[0].MineOne(ctx, func(bool) {}) err = sn[0].MineOne(ctx, func(bool, error) {})
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
@ -71,7 +71,7 @@ func (ts *testSuite) testMiningReal(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, abi.ChainEpoch(1), h2.Height()) require.Equal(t, abi.ChainEpoch(1), h2.Height())
err = sn[0].MineOne(ctx, func(bool) {}) err = sn[0].MineOne(ctx, func(bool, error) {})
require.NoError(t, err) require.NoError(t, err)
<-newHeads <-newHeads
@ -132,7 +132,7 @@ func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExpo
prevExpect := 0 prevExpect := 0
for atomic.LoadInt32(&mine) != 0 { for atomic.LoadInt32(&mine) != 0 {
wait := make(chan int, 2) wait := make(chan int, 2)
mdone := func(mined bool) { mdone := func(mined bool, err error) {
go func() { go func() {
n := 0 n := 0
if mined { if mined {

View File

@ -18,7 +18,7 @@ type TestNode struct {
type TestStorageNode struct { type TestStorageNode struct {
api.StorageMiner api.StorageMiner
MineOne func(context.Context, func(bool)) error MineOne func(context.Context, func(bool, error)) error
} }
var PresealGenesis = -1 var PresealGenesis = -1

View File

@ -43,7 +43,7 @@ func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSect
defer close(done) defer close(done)
for mine { for mine {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil {
t.Error(err) t.Error(err)
} }
} }
@ -123,7 +123,7 @@ func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSector
defer close(done) defer close(done)
for mine { for mine {
time.Sleep(blocktime) time.Sleep(blocktime)
if err := sn[0].MineOne(ctx, func(bool) {}); err != nil { if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil {
t.Error(err) t.Error(err)
} }
} }

View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"os" "os"
"reflect"
cid "github.com/ipfs/go-cid" cid "github.com/ipfs/go-cid"
blockstore "github.com/ipfs/go-ipfs-blockstore" blockstore "github.com/ipfs/go-ipfs-blockstore"
@ -18,10 +19,16 @@ import (
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/abi/big" "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/account"
"github.com/filecoin-project/specs-actors/actors/builtin/cron"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/specs-actors/actors/builtin/market"
"github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/filecoin-project/specs-actors/actors/builtin/power" "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/specs-actors/actors/crypto" "github.com/filecoin-project/specs-actors/actors/crypto"
"github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/filecoin-project/specs-actors/actors/util/adt"
@ -619,3 +626,60 @@ func (sm *StateManager) CirculatingSupply(ctx context.Context, ts *types.TipSet)
return rt.TotalFilCircSupply(), nil return rt.TotalFilCircSupply(), nil
} }
type methodMeta struct {
Name string
Params reflect.Type
Ret reflect.Type
}
var MethodsMap = map[cid.Cid][]methodMeta{}
func init() {
cidToMethods := map[cid.Cid][2]interface{}{
// builtin.SystemActorCodeID: {builtin.MethodsSystem, system.Actor{} }- apparently it doesn't have methods
builtin.InitActorCodeID: {builtin.MethodsInit, init_.Actor{}},
builtin.CronActorCodeID: {builtin.MethodsCron, cron.Actor{}},
builtin.AccountActorCodeID: {builtin.MethodsAccount, account.Actor{}},
builtin.StoragePowerActorCodeID: {builtin.MethodsPower, power.Actor{}},
builtin.StorageMinerActorCodeID: {builtin.MethodsMiner, miner.Actor{}},
builtin.StorageMarketActorCodeID: {builtin.MethodsMarket, market.Actor{}},
builtin.PaymentChannelActorCodeID: {builtin.MethodsPaych, paych.Actor{}},
builtin.MultisigActorCodeID: {builtin.MethodsMultisig, multisig.Actor{}},
builtin.RewardActorCodeID: {builtin.MethodsReward, reward.Actor{}},
builtin.VerifiedRegistryActorCodeID: {builtin.MethodsVerifiedRegistry, verifreg.Actor{}},
}
for c, m := range cidToMethods {
rt := reflect.TypeOf(m[0])
nf := rt.NumField()
MethodsMap[c] = append(MethodsMap[c], methodMeta{
Name: "Send",
Params: reflect.TypeOf(new(adt.EmptyValue)),
Ret: reflect.TypeOf(new(adt.EmptyValue)),
})
exports := m[1].(abi.Invokee).Exports()
for i := 0; i < nf; i++ {
export := reflect.TypeOf(exports[i+1])
MethodsMap[c] = append(MethodsMap[c], methodMeta{
Name: rt.Field(i).Name,
Params: export.In(1),
Ret: export.Out(0),
})
}
}
}
func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) {
act, err := sm.GetActor(to, ts)
if err != nil {
return nil, err
}
m := MethodsMap[act.Code][method]
return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil
}

View File

@ -23,72 +23,20 @@ import (
"github.com/filecoin-project/go-address" "github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi" "github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/specs-actors/actors/builtin" "github.com/filecoin-project/specs-actors/actors/builtin"
"github.com/filecoin-project/specs-actors/actors/builtin/account"
"github.com/filecoin-project/specs-actors/actors/builtin/cron"
init_ "github.com/filecoin-project/specs-actors/actors/builtin/init"
"github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/specs-actors/actors/builtin/market"
miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner"
"github.com/filecoin-project/specs-actors/actors/builtin/multisig" "github.com/filecoin-project/specs-actors/actors/builtin/multisig"
"github.com/filecoin-project/specs-actors/actors/builtin/paych" "github.com/filecoin-project/specs-actors/actors/builtin/paych"
"github.com/filecoin-project/specs-actors/actors/builtin/power" "github.com/filecoin-project/specs-actors/actors/builtin/power"
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
"github.com/filecoin-project/specs-actors/actors/builtin/verifreg"
"github.com/filecoin-project/specs-actors/actors/runtime/exitcode" "github.com/filecoin-project/specs-actors/actors/runtime/exitcode"
"github.com/filecoin-project/specs-actors/actors/util/adt"
"github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/stmgr"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner" "github.com/filecoin-project/lotus/miner"
) )
type methodMeta struct {
Name string
params reflect.Type
ret reflect.Type
}
var methods = map[cid.Cid][]methodMeta{}
func init() {
cidToMethods := map[cid.Cid][2]interface{}{
// builtin.SystemActorCodeID: {builtin.MethodsSystem, system.Actor{} }- apparently it doesn't have methods
builtin.InitActorCodeID: {builtin.MethodsInit, init_.Actor{}},
builtin.CronActorCodeID: {builtin.MethodsCron, cron.Actor{}},
builtin.AccountActorCodeID: {builtin.MethodsAccount, account.Actor{}},
builtin.StoragePowerActorCodeID: {builtin.MethodsPower, power.Actor{}},
builtin.StorageMinerActorCodeID: {builtin.MethodsMiner, miner2.Actor{}},
builtin.StorageMarketActorCodeID: {builtin.MethodsMarket, market.Actor{}},
builtin.PaymentChannelActorCodeID: {builtin.MethodsPaych, paych.Actor{}},
builtin.MultisigActorCodeID: {builtin.MethodsMultisig, multisig.Actor{}},
builtin.RewardActorCodeID: {builtin.MethodsReward, reward.Actor{}},
builtin.VerifiedRegistryActorCodeID: {builtin.MethodsVerifiedRegistry, verifreg.Actor{}},
}
for c, m := range cidToMethods {
rt := reflect.TypeOf(m[0])
nf := rt.NumField()
methods[c] = append(methods[c], methodMeta{
Name: "Send",
params: reflect.TypeOf(new(adt.EmptyValue)),
ret: reflect.TypeOf(new(adt.EmptyValue)),
})
exports := m[1].(abi.Invokee).Exports()
for i := 0; i < nf; i++ {
export := reflect.TypeOf(exports[i+1])
methods[c] = append(methods[c], methodMeta{
Name: rt.Field(i).Name,
params: export.In(1),
ret: export.Out(0),
})
}
}
}
var stateCmd = &cli.Command{ var stateCmd = &cli.Command{
Name: "state", Name: "state",
Usage: "Interact with and query filecoin chain state", Usage: "Interact with and query filecoin chain state",
@ -1191,7 +1139,7 @@ func codeStr(c cid.Cid) string {
} }
func getMethod(code cid.Cid, method abi.MethodNum) string { func getMethod(code cid.Cid, method abi.MethodNum) string {
return methods[code][method].Name return stmgr.MethodsMap[code][method].Name
} }
func toFil(f types.BigInt) types.FIL { func toFil(f types.BigInt) types.FIL {
@ -1222,7 +1170,7 @@ func sumGas(changes []*types.GasTrace) types.GasTrace {
} }
func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) { func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, error) {
re := reflect.New(methods[code][method].params.Elem()) re := reflect.New(stmgr.MethodsMap[code][method].Params.Elem())
p := re.Interface().(cbg.CBORUnmarshaler) p := re.Interface().(cbg.CBORUnmarshaler)
if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil { if err := p.UnmarshalCBOR(bytes.NewReader(params)); err != nil {
return "", err return "", err
@ -1233,7 +1181,7 @@ func jsonParams(code cid.Cid, method abi.MethodNum, params []byte) (string, erro
} }
func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) { func jsonReturn(code cid.Cid, method abi.MethodNum, ret []byte) (string, error) {
re := reflect.New(methods[code][method].ret.Elem()) re := reflect.New(stmgr.MethodsMap[code][method].Ret.Elem())
p := re.Interface().(cbg.CBORUnmarshaler) p := re.Interface().(cbg.CBORUnmarshaler)
if err := p.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { if err := p.UnmarshalCBOR(bytes.NewReader(ret)); err != nil {
return "", err return "", err

View File

@ -4,9 +4,13 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"html/template"
"io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"os" "os"
"sort"
"strconv" "strconv"
"time" "time"
@ -28,12 +32,47 @@ import (
"github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
lcli "github.com/filecoin-project/lotus/cli" lcli "github.com/filecoin-project/lotus/cli"
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
) )
var log = logging.Logger("main") var log = logging.Logger("main")
var sendPerRequest, _ = types.ParseFIL("50") var sendPerRequest, _ = types.ParseFIL("50")
var supportedSectors struct {
SectorSizes []struct {
Name string
Value uint64
Default bool
}
}
func init() {
for supportedSector, _ := range miner.SupportedProofTypes {
sectorSize, err := supportedSector.SectorSize()
if err != nil {
panic(err)
}
supportedSectors.SectorSizes = append(supportedSectors.SectorSizes, struct {
Name string
Value uint64
Default bool
}{
Name: sectorSize.ShortString(),
Value: uint64(sectorSize),
Default: false,
})
}
sort.Slice(supportedSectors.SectorSizes[:], func(i, j int) bool {
return supportedSectors.SectorSizes[i].Value < supportedSectors.SectorSizes[j].Value
})
supportedSectors.SectorSizes[0].Default = true
}
func main() { func main() {
logging.SetLogLevel("*", "INFO") logging.SetLogLevel("*", "INFO")
@ -125,6 +164,7 @@ var runCmd = &cli.Command{
} }
http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox())) http.Handle("/", http.FileServer(rice.MustFindBox("site").HTTPBox()))
http.HandleFunc("/miner.html", h.minerhtml)
http.HandleFunc("/send", h.send) http.HandleFunc("/send", h.send)
http.HandleFunc("/mkminer", h.mkminer) http.HandleFunc("/mkminer", h.mkminer)
http.HandleFunc("/msgwait", h.msgwait) http.HandleFunc("/msgwait", h.msgwait)
@ -153,6 +193,37 @@ type handler struct {
defaultMinerPeer peer.ID defaultMinerPeer peer.ID
} }
func (h *handler) minerhtml(w http.ResponseWriter, r *http.Request) {
f, err := rice.MustFindBox("site").Open("_miner.html")
if err != nil {
w.WriteHeader(500)
_, _ = w.Write([]byte(err.Error()))
return
}
tmpl, err := ioutil.ReadAll(f)
if err != nil {
w.WriteHeader(500)
_, _ = w.Write([]byte(err.Error()))
return
}
var executedTmpl bytes.Buffer
t, err := template.New("miner.html").Parse(string(tmpl))
if err := t.Execute(&executedTmpl, supportedSectors); err != nil {
w.WriteHeader(500)
_, _ = w.Write([]byte(err.Error()))
return
}
if _, err := io.Copy(w, &executedTmpl); err != nil {
log.Errorf("failed to write template to string %s", err)
}
return
}
func (h *handler) send(w http.ResponseWriter, r *http.Request) { func (h *handler) send(w http.ResponseWriter, r *http.Request) {
to, err := address.NewFromString(r.FormValue("address")) to, err := address.NewFromString(r.FormValue("address"))
if err != nil { if err != nil {

View File

@ -15,8 +15,9 @@
<span>Enter owner/worker address:</span> <span>Enter owner/worker address:</span>
<input type='text' name='address' style="width: 300px" placeholder="t3..."> <input type='text' name='address' style="width: 300px" placeholder="t3...">
<select name="sectorSize"> <select name="sectorSize">
<option selected value="34359738368">32GiB sectors</option> {{range .SectorSizes}}
<option value="68719476736">64GiB sectors</option> <option {{if .Default}}selected{{end}} value="{{ .Value }}">{{ .Name }}</option>
{{end}}
</select> </select>
<button type='submit'>Create Miner</button> <button type='submit'>Create Miner</button>
</form> </form>

View File

@ -186,7 +186,7 @@ func presealSector(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, sid abi.Sector
} }
func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) { func presealSectorFake(sbfs *basicfs.Provider, sid abi.SectorID, spt abi.RegisteredSealProof, ssize abi.SectorSize) (*genesis.PreSeal, error) {
paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, stores.FTSealed | stores.FTCache, true) paths, done, err := sbfs.AcquireSector(context.TODO(), sid, 0, stores.FTSealed|stores.FTCache, true)
if err != nil { if err != nil {
return nil, xerrors.Errorf("acquire unsealed sector: %w", err) return nil, xerrors.Errorf("acquire unsealed sector: %w", err)
} }

View File

@ -121,8 +121,7 @@ var keyinfoImportCmd = &cli.Command{
fmt.Printf("%s\n", peerid.String()) fmt.Printf("%s\n", peerid.String())
break break
case wallet.KTSecp256k1: case wallet.KTSecp256k1, wallet.KTBLS:
case wallet.KTBLS:
w, err := wallet.NewWallet(keystore) w, err := wallet.NewWallet(keystore)
if err != nil { if err != nil {
return err return err
@ -223,8 +222,7 @@ var keyinfoInfoCmd = &cli.Command{
kio.PublicKey = base64.StdEncoding.EncodeToString(pkBytes) kio.PublicKey = base64.StdEncoding.EncodeToString(pkBytes)
break break
case wallet.KTSecp256k1: case wallet.KTSecp256k1, wallet.KTBLS:
case wallet.KTBLS:
kio.Type = keyInfo.Type kio.Type = keyInfo.Type
key, err := wallet.NewKey(keyInfo) key, err := wallet.NewKey(keyInfo)
@ -311,8 +309,7 @@ var keyinfoNewCmd = &cli.Command{
keyInfo = ki keyInfo = ki
break break
case wallet.KTSecp256k1: case wallet.KTSecp256k1, wallet.KTBLS:
case wallet.KTBLS:
key, err := wallet.GenerateKey(wallet.ActSigType(keyType)) key, err := wallet.GenerateKey(wallet.ActSigType(keyType))
if err != nil { if err != nil {
return err return err

View File

@ -1,6 +1,6 @@
# Stats # lotus-stats
Stats is a small tool to push chain information into influxdb `lotus-stats` is a small tool to push chain information into influxdb
## Setup ## Setup
@ -14,13 +14,13 @@ INFLUX_PASS=""
## Usage ## Usage
Stats will be default look in `~/.lotus` to connect to a running daemon and resume collecting stats from last record block height. lotus-stats will be default look in `~/.lotus` to connect to a running daemon and resume collecting stats from last record block height.
For other usage see `./stats --help` For other usage see `./lotus-stats --help`
``` ```
go build -o stats *.go go build -o lotus-stats *.go
. env.stats && ./stats . env.stats && ./lotus-stats
``` ```

72
cmd/lotus-stats/main.go Normal file
View File

@ -0,0 +1,72 @@
package main
import (
"context"
"flag"
"os"
"github.com/filecoin-project/lotus/tools/stats"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("stats")
const (
influxAddrEnvVar = "INFLUX_ADDR"
influxUserEnvVar = "INFLUX_USER"
influxPassEnvVar = "INFLUX_PASS"
)
func main() {
var repo string = "~/.lotus"
var database string = "lotus"
var reset bool = false
var nosync bool = false
var height int64 = 0
var headlag int = 3
flag.StringVar(&repo, "repo", repo, "lotus repo path")
flag.StringVar(&database, "database", database, "influx database")
flag.Int64Var(&height, "height", height, "block height to start syncing from (0 will resume)")
flag.IntVar(&headlag, "head-lag", headlag, "number of head events to hold to protect against small reorgs")
flag.BoolVar(&reset, "reset", reset, "truncate database before starting stats gathering")
flag.BoolVar(&nosync, "nosync", nosync, "skip waiting for sync")
flag.Parse()
ctx := context.Background()
influx, err := stats.InfluxClient(os.Getenv(influxAddrEnvVar), os.Getenv(influxUserEnvVar), os.Getenv(influxPassEnvVar))
if err != nil {
log.Fatal(err)
}
if reset {
if err := stats.ResetDatabase(influx, database); err != nil {
log.Fatal(err)
}
}
if !reset && height == 0 {
h, err := stats.GetLastRecordedHeight(influx, database)
if err != nil {
log.Info(err)
}
height = h
}
api, closer, err := stats.GetFullNodeAPI(repo)
if err != nil {
log.Fatal(err)
}
defer closer()
if !nosync {
if err := stats.WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
}
}
stats.Collect(ctx, api, influx, database, height, headlag)
}

View File

@ -76,6 +76,7 @@ var provingFaultsCmd = &cli.Command{
} }
if len(faults) == 0 { if len(faults) == 0 {
fmt.Println("no faulty sectors") fmt.Println("no faulty sectors")
return nil
} }
head, err := api.ChainHead(ctx) head, err := api.ChainHead(ctx)
if err != nil { if err != nil {

View File

@ -28,7 +28,7 @@ import (
var log = logging.Logger("miner") var log = logging.Logger("miner")
// returns a callback reporting whether we mined a blocks in this round // returns a callback reporting whether we mined a blocks in this round
type waitFunc func(ctx context.Context, baseTime uint64) (func(bool), error) type waitFunc func(ctx context.Context, baseTime uint64) (func(bool, error), error)
func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address) *Miner { func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address) *Miner {
arc, err := lru.NewARC(10000) arc, err := lru.NewARC(10000)
@ -40,12 +40,12 @@ func NewMiner(api api.FullNode, epp gen.WinningPoStProver, addr address.Address)
api: api, api: api,
epp: epp, epp: epp,
address: addr, address: addr,
waitFunc: func(ctx context.Context, baseTime uint64) (func(bool), error) { waitFunc: func(ctx context.Context, baseTime uint64) (func(bool, error), error) {
// Wait around for half the block time in case other parents come in // Wait around for half the block time in case other parents come in
deadline := baseTime + build.PropagationDelaySecs deadline := baseTime + build.PropagationDelaySecs
time.Sleep(time.Until(time.Unix(int64(deadline), 0))) time.Sleep(time.Until(time.Unix(int64(deadline), 0)))
return func(bool) {}, nil return func(bool, error) {}, nil
}, },
minedBlockHeights: arc, minedBlockHeights: arc,
} }
@ -158,11 +158,12 @@ func (m *Miner) mine(ctx context.Context) {
if err != nil { if err != nil {
log.Errorf("mining block failed: %+v", err) log.Errorf("mining block failed: %+v", err)
m.niceSleep(time.Second) m.niceSleep(time.Second)
onDone(false, err)
continue continue
} }
lastBase = *base lastBase = *base
onDone(b != nil) onDone(b != nil, nil)
if b != nil { if b != nil {
btime := time.Unix(int64(b.Header.Timestamp), 0) btime := time.Unix(int64(b.Header.Timestamp), 0)

View File

@ -9,7 +9,7 @@ import (
lru "github.com/hashicorp/golang-lru" lru "github.com/hashicorp/golang-lru"
) )
func NewTestMiner(nextCh <-chan func(bool), addr address.Address) func(api.FullNode, gen.WinningPoStProver) *Miner { func NewTestMiner(nextCh <-chan func(bool, error), addr address.Address) func(api.FullNode, gen.WinningPoStProver) *Miner {
return func(api api.FullNode, epp gen.WinningPoStProver) *Miner { return func(api api.FullNode, epp gen.WinningPoStProver) *Miner {
arc, err := lru.NewARC(10000) arc, err := lru.NewARC(10000)
if err != nil { if err != nil {
@ -31,8 +31,8 @@ func NewTestMiner(nextCh <-chan func(bool), addr address.Address) func(api.FullN
} }
} }
func chanWaiter(next <-chan func(bool)) func(ctx context.Context, _ uint64) (func(bool), error) { func chanWaiter(next <-chan func(bool, error)) func(ctx context.Context, _ uint64) (func(bool, error), error) {
return func(ctx context.Context, _ uint64) (func(bool), error) { return func(ctx context.Context, _ uint64) (func(bool, error), error) {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return nil, ctx.Err() return nil, ctx.Err()

View File

@ -362,8 +362,30 @@ func (a *StateAPI) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence uin
return nil, err return nil, err
} }
var returndec interface{}
if recpt.ExitCode == 0 && len(recpt.Return) > 0 {
cmsg, err := a.Chain.GetCMessage(msg)
if err != nil {
return nil, xerrors.Errorf("failed to load message after successful receipt search: %w", err)
}
vmsg := cmsg.VMMessage()
t, err := stmgr.GetReturnType(ctx, a.StateManager, vmsg.To, vmsg.Method, ts)
if err != nil {
return nil, xerrors.Errorf("failed to get return type: %w", err)
}
if err := t.UnmarshalCBOR(bytes.NewReader(recpt.Return)); err != nil {
return nil, err
}
returndec = t
}
return &api.MsgLookup{ return &api.MsgLookup{
Receipt: *recpt, Receipt: *recpt,
ReturnDec: returndec,
TipSet: ts, TipSet: ts,
}, nil }, nil
} }

View File

@ -116,7 +116,7 @@ func testStorageNode(ctx context.Context, t *testing.T, waddr address.Address, a
// start node // start node
var minerapi api.StorageMiner var minerapi api.StorageMiner
mineBlock := make(chan func(bool)) mineBlock := make(chan func(bool, error))
// TODO: use stop // TODO: use stop
_, err = node.New(ctx, _, err = node.New(ctx,
node.StorageMiner(&minerapi), node.StorageMiner(&minerapi),
@ -141,7 +141,7 @@ func testStorageNode(ctx context.Context, t *testing.T, waddr address.Address, a
err = minerapi.NetConnect(ctx, remoteAddrs) err = minerapi.NetConnect(ctx, remoteAddrs)
require.NoError(t, err)*/ require.NoError(t, err)*/
mineOne := func(ctx context.Context, cb func(bool)) error { mineOne := func(ctx context.Context, cb func(bool, error)) error {
select { select {
case mineBlock <- cb: case mineBlock <- cb:
return nil return nil

63
tools/stats/collect.go Normal file
View File

@ -0,0 +1,63 @@
package stats
import (
"context"
"time"
"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/specs-actors/actors/abi"
client "github.com/influxdata/influxdb1-client/v2"
)
func Collect(ctx context.Context, api api.FullNode, influx client.Client, database string, height int64, headlag int) {
tipsetsCh, err := GetTips(ctx, api, abi.ChainEpoch(height), headlag)
if err != nil {
log.Fatal(err)
}
wq := NewInfluxWriteQueue(ctx, influx)
defer wq.Close()
for tipset := range tipsetsCh {
log.Infow("Collect stats", "height", tipset.Height())
pl := NewPointList()
height := tipset.Height()
if err := RecordTipsetPoints(ctx, api, pl, tipset); err != nil {
log.Warnw("Failed to record tipset", "height", height, "error", err)
continue
}
if err := RecordTipsetMessagesPoints(ctx, api, pl, tipset); err != nil {
log.Warnw("Failed to record messages", "height", height, "error", err)
continue
}
if err := RecordTipsetStatePoints(ctx, api, pl, tipset); err != nil {
log.Warnw("Failed to record state", "height", height, "error", err)
continue
}
// Instead of having to pass around a bunch of generic stuff we want for each point
// we will just add them at the end.
tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0))
nb, err := InfluxNewBatch()
if err != nil {
log.Fatal(err)
}
for _, pt := range pl.Points() {
pt.SetTime(tsTimestamp)
nb.AddPoint(NewPointFrom(pt))
}
nb.SetDatabase(database)
log.Infow("Adding points", "count", len(nb.Points()), "height", tipset.Height())
wq.AddBatch(nb)
}
}

View File

@ -1,4 +1,4 @@
package main package stats
import ( import (
"container/list" "container/list"

View File

@ -1,4 +1,4 @@
package main package stats
import ( import (
"testing" "testing"

View File

@ -1,126 +0,0 @@
package main
import (
"context"
"flag"
"os"
"time"
"github.com/filecoin-project/specs-actors/actors/abi"
logging "github.com/ipfs/go-log/v2"
)
var log = logging.Logger("stats")
const (
INFLUX_ADDR = "INFLUX_ADDR"
INFLUX_USER = "INFLUX_USER"
INFLUX_PASS = "INFLUX_PASS"
)
func main() {
var repo string = "~/.lotus"
var database string = "lotus"
var reset bool = false
var nosync bool = false
var height int64 = 0
var headlag int = 3
flag.StringVar(&repo, "repo", repo, "lotus repo path")
flag.StringVar(&database, "database", database, "influx database")
flag.Int64Var(&height, "height", height, "block height to start syncing from (0 will resume)")
flag.IntVar(&headlag, "head-lag", headlag, "number of head events to hold to protect against small reorgs")
flag.BoolVar(&reset, "reset", reset, "truncate database before starting stats gathering")
flag.BoolVar(&nosync, "nosync", nosync, "skip waiting for sync")
flag.Parse()
influxAddr := os.Getenv(INFLUX_ADDR)
influxUser := os.Getenv(INFLUX_USER)
influxPass := os.Getenv(INFLUX_PASS)
ctx := context.Background()
influx, err := InfluxClient(influxAddr, influxUser, influxPass)
if err != nil {
log.Fatal(err)
}
if reset {
if err := ResetDatabase(influx, database); err != nil {
log.Fatal(err)
}
}
if !reset && height == 0 {
h, err := GetLastRecordedHeight(influx, database)
if err != nil {
log.Info(err)
}
height = h
}
api, closer, err := GetFullNodeAPI(repo)
if err != nil {
log.Fatal(err)
}
defer closer()
if !nosync {
if err := WaitForSyncComplete(ctx, api); err != nil {
log.Fatal(err)
}
}
tipsetsCh, err := GetTips(ctx, api, abi.ChainEpoch(height), headlag)
if err != nil {
log.Fatal(err)
}
wq := NewInfluxWriteQueue(ctx, influx)
defer wq.Close()
for tipset := range tipsetsCh {
log.Infow("Collect stats", "height", tipset.Height())
pl := NewPointList()
height := tipset.Height()
if err := RecordTipsetPoints(ctx, api, pl, tipset); err != nil {
log.Warnw("Failed to record tipset", "height", height, "error", err)
continue
}
if err := RecordTipsetMessagesPoints(ctx, api, pl, tipset); err != nil {
log.Warnw("Failed to record messages", "height", height, "error", err)
continue
}
if err := RecordTipsetStatePoints(ctx, api, pl, tipset); err != nil {
log.Warnw("Failed to record state", "height", height, "error", err)
continue
}
// Instead of having to pass around a bunch of generic stuff we want for each point
// we will just add them at the end.
tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0))
nb, err := InfluxNewBatch()
if err != nil {
log.Fatal(err)
}
for _, pt := range pl.Points() {
pt.SetTime(tsTimestamp)
nb.AddPoint(NewPointFrom(pt))
}
nb.SetDatabase(database)
log.Infow("Adding points", "count", len(nb.Points()), "height", tipset.Height())
wq.AddBatch(nb)
}
}

View File

@ -1,4 +1,4 @@
package main package stats
import ( import (
"bytes" "bytes"
@ -25,8 +25,12 @@ import (
_ "github.com/influxdata/influxdb1-client" _ "github.com/influxdata/influxdb1-client"
models "github.com/influxdata/influxdb1-client/models" models "github.com/influxdata/influxdb1-client/models"
client "github.com/influxdata/influxdb1-client/v2" client "github.com/influxdata/influxdb1-client/v2"
logging "github.com/ipfs/go-log/v2"
) )
var log = logging.Logger("stats")
type PointList struct { type PointList struct {
points []models.Point points []models.Point
} }

View File

@ -1,4 +1,4 @@
package main package stats
import ( import (
"context" "context"