Merge branch 'master' into fix/master-merge
* master: fix(chainwatch): Parallel reward persistence; Tighten rpc logging fix(chainwatch): Remove --front switch feat(chainwatch): Add miner index on top_miners_by_base_reward view fix(chainwatch): Backoff processor when no work exists to process fix(log): Move metrics and stage updates to debug; Add --log-level switch fix(chainwatch): Correct index name on state_height view feat(chainwatch): Capture base_block_reward per epoch; Top miner by reward view SwapSigner API methods refactor: remove unused code refactor: wire up new processor and syncer refactor: implement processor and syncer Conflicts: Makefile cmd/lotus-chainwatch/storage.go cmd/lotus-chainwatch/sync.go
This commit is contained in:
commit
d30e120608
1
Makefile
1
Makefile
@ -158,7 +158,6 @@ BINS+=lotus-fountain
|
||||
lotus-chainwatch:
|
||||
rm -f lotus-chainwatch
|
||||
go build -o lotus-chainwatch ./cmd/lotus-chainwatch
|
||||
go run github.com/GeertJohan/go.rice/rice append --exec lotus-chainwatch -i ./cmd/lotus-chainwatch -i ./build
|
||||
.PHONY: lotus-chainwatch
|
||||
BINS+=lotus-chainwatch
|
||||
|
||||
|
@ -335,6 +335,18 @@ type FullNode interface {
|
||||
// It takes the following params: <multisig address>, <proposed message ID>, <recipient address>, <value to transfer>,
|
||||
// <sender address of the cancel msg>, <method to call in the proposed message>, <params to include in the proposed message>
|
||||
MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error)
|
||||
// MsigSwapPropose proposes swapping 2 signers in the multisig
|
||||
// It takes the following params: <multisig address>, <sender address of the propose msg>,
|
||||
// <old signer> <new signer>
|
||||
MsigSwapPropose(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error)
|
||||
// MsigSwapApprove approves a previously proposed SwapSigner
|
||||
// It takes the following params: <multisig address>, <sender address of the approve msg>, <proposed message ID>,
|
||||
// <proposer address>, <old signer> <new signer>
|
||||
MsigSwapApprove(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error)
|
||||
// MsigSwapCancel cancels a previously proposed SwapSigner message
|
||||
// It takes the following params: <multisig address>, <sender address of the cancel msg>, <proposed message ID>,
|
||||
// <old signer> <new signer>
|
||||
MsigSwapCancel(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error)
|
||||
|
||||
MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error)
|
||||
// MarketFreeBalance
|
||||
|
@ -169,6 +169,9 @@ type FullNodeStruct struct {
|
||||
MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
||||
MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
||||
MsigCancel func(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"`
|
||||
MsigSwapPropose func(context.Context, address.Address, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigSwapApprove func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
MsigSwapCancel func(context.Context, address.Address, address.Address, uint64, address.Address, address.Address) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"`
|
||||
|
||||
@ -741,6 +744,18 @@ func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, t
|
||||
return c.Internal.MsigCancel(ctx, msig, txID, to, amt, src, method, params)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||
return c.Internal.MsigSwapPropose(ctx, msig, src, oldAdd, newAdd)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||
return c.Internal.MsigSwapApprove(ctx, msig, src, txID, proposer, oldAdd, newAdd)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||
return c.Internal.MsigSwapCancel(ctx, msig, src, txID, oldAdd, newAdd)
|
||||
}
|
||||
|
||||
func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) {
|
||||
return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt)
|
||||
}
|
||||
|
252
cli/multisig.go
252
cli/multisig.go
@ -31,17 +31,14 @@ import (
|
||||
var multisigCmd = &cli.Command{
|
||||
Name: "msig",
|
||||
Usage: "Interact with a multisig wallet",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "source",
|
||||
Usage: "specify the account to send propose from",
|
||||
},
|
||||
},
|
||||
Subcommands: []*cli.Command{
|
||||
msigCreateCmd,
|
||||
msigInspectCmd,
|
||||
msigProposeCmd,
|
||||
msigApproveCmd,
|
||||
msigSwapProposeCmd,
|
||||
msigSwapApproveCmd,
|
||||
msigSwapCancelCmd,
|
||||
},
|
||||
}
|
||||
|
||||
@ -277,7 +274,7 @@ var msigProposeCmd = &cli.Command{
|
||||
ArgsUsage: "[multisigAddress destinationAddress value <methodId methodParams> (optional)]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "source",
|
||||
Name: "from",
|
||||
Usage: "account to send the propose message from",
|
||||
},
|
||||
},
|
||||
@ -329,8 +326,8 @@ var msigProposeCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var from address.Address
|
||||
if cctx.IsSet("source") {
|
||||
f, err := address.NewFromString(cctx.String("source"))
|
||||
if cctx.IsSet("from") {
|
||||
f, err := address.NewFromString(cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -376,7 +373,7 @@ var msigApproveCmd = &cli.Command{
|
||||
ArgsUsage: "[multisigAddress messageId proposerAddress destination value <methodId methodParams> (optional)]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "source",
|
||||
Name: "from",
|
||||
Usage: "account to send the approve message from",
|
||||
},
|
||||
},
|
||||
@ -445,8 +442,8 @@ var msigApproveCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var from address.Address
|
||||
if cctx.IsSet("source") {
|
||||
f, err := address.NewFromString(cctx.String("source"))
|
||||
if cctx.IsSet("from") {
|
||||
f, err := address.NewFromString(cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -478,3 +475,234 @@ var msigApproveCmd = &cli.Command{
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var msigSwapProposeCmd = &cli.Command{
|
||||
Name: "swap-propose",
|
||||
Usage: "Propose to swap signers",
|
||||
ArgsUsage: "[multisigAddress oldAddress newAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "account to send the approve message from",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.Args().Len() != 3 {
|
||||
return fmt.Errorf("must pass multisig address, old signer address, new signer address")
|
||||
}
|
||||
|
||||
msig, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldAdd, err := address.NewFromString(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newAdd, err := address.NewFromString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var from address.Address
|
||||
if cctx.IsSet("from") {
|
||||
f, err := address.NewFromString(cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from = f
|
||||
} else {
|
||||
defaddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from = defaddr
|
||||
}
|
||||
|
||||
msgCid, err := api.MsigSwapPropose(ctx, msig, from, oldAdd, newAdd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("sent swap proposal in message: ", msgCid)
|
||||
|
||||
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
return fmt.Errorf("swap proposal returned exit %d", wait.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var msigSwapApproveCmd = &cli.Command{
|
||||
Name: "swap-approve",
|
||||
Usage: "Approve a message to swap signers",
|
||||
ArgsUsage: "[multisigAddress proposerAddress txId oldAddress newAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "account to send the approve message from",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.Args().Len() != 5 {
|
||||
return fmt.Errorf("must pass multisig address, proposer address, transaction id, old signer address, new signer address")
|
||||
}
|
||||
|
||||
msig, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
prop, err := address.NewFromString(cctx.Args().Get(1))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txid, err := strconv.ParseUint(cctx.Args().Get(2), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldAdd, err := address.NewFromString(cctx.Args().Get(3))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newAdd, err := address.NewFromString(cctx.Args().Get(4))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var from address.Address
|
||||
if cctx.IsSet("from") {
|
||||
f, err := address.NewFromString(cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from = f
|
||||
} else {
|
||||
defaddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from = defaddr
|
||||
}
|
||||
|
||||
msgCid, err := api.MsigSwapApprove(ctx, msig, from, txid, prop, oldAdd, newAdd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("sent swap approval in message: ", msgCid)
|
||||
|
||||
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
return fmt.Errorf("swap approval returned exit %d", wait.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var msigSwapCancelCmd = &cli.Command{
|
||||
Name: "swap-cancel",
|
||||
Usage: "Cancel a message to swap signers",
|
||||
ArgsUsage: "[multisigAddress txId oldAddress newAddress]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "from",
|
||||
Usage: "account to send the approve message from",
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := ReqContext(cctx)
|
||||
|
||||
if cctx.Args().Len() != 4 {
|
||||
return fmt.Errorf("must pass multisig address, transaction id, old signer address, new signer address")
|
||||
}
|
||||
|
||||
msig, err := address.NewFromString(cctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldAdd, err := address.NewFromString(cctx.Args().Get(2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newAdd, err := address.NewFromString(cctx.Args().Get(3))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var from address.Address
|
||||
if cctx.IsSet("from") {
|
||||
f, err := address.NewFromString(cctx.String("from"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from = f
|
||||
} else {
|
||||
defaddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
from = defaddr
|
||||
}
|
||||
|
||||
msgCid, err := api.MsigSwapCancel(ctx, msig, from, txid, oldAdd, newAdd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("sent swap approval in message: ", msgCid)
|
||||
|
||||
wait, err := api.StateWaitMsg(ctx, msgCid, build.MessageConfidence)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if wait.Receipt.ExitCode != 0 {
|
||||
return fmt.Errorf("swap approval returned exit %d", wait.Receipt.ExitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ var sendCmd = &cli.Command{
|
||||
ArgsUsage: "[targetAddress] [amount]",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "source",
|
||||
Name: "from",
|
||||
Usage: "optionally specify the account to send funds from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
@ -52,7 +52,7 @@ var sendCmd = &cli.Command{
|
||||
}
|
||||
|
||||
var fromAddr address.Address
|
||||
if from := cctx.String("source"); from == "" {
|
||||
if from := cctx.String("from"); from == "" {
|
||||
defaddr, err := api.WalletDefaultAddress(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -1,12 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"strconv"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
var dotCmd = &cli.Command{
|
||||
@ -14,10 +17,24 @@ var dotCmd = &cli.Command{
|
||||
Usage: "generate dot graphs",
|
||||
ArgsUsage: "<minHeight> <toseeHeight>",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
st, err := openStorage(cctx.String("db"))
|
||||
ll := cctx.String("log-level")
|
||||
if err := logging.SetLogLevel("*", ll); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db, err := sql.Open("postgres", cctx.String("db"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Errorw("Failed to close database", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
return xerrors.Errorf("Database failed to respond to ping (is it online?): %w", err)
|
||||
}
|
||||
|
||||
minH, err := strconv.ParseInt(cctx.Args().Get(0), 10, 32)
|
||||
if err != nil {
|
||||
@ -29,7 +46,7 @@ var dotCmd = &cli.Command{
|
||||
}
|
||||
maxH := minH + tosee
|
||||
|
||||
res, err := st.db.Query(`select block, parent, b.miner, b.height, p.height from block_parents
|
||||
res, err := db.Query(`select block, parent, b.miner, b.height, p.height from block_parents
|
||||
inner join blocks b on block_parents.block = b.cid
|
||||
inner join blocks p on block_parents.parent = p.cid
|
||||
where b.height > $1 and b.height < $2`, minH, maxH)
|
||||
@ -40,7 +57,10 @@ where b.height > $1 and b.height < $2`, minH, maxH)
|
||||
|
||||
fmt.Println("digraph D {")
|
||||
|
||||
hl := st.hasList()
|
||||
hl, err := syncedBlocks(db)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for res.Next() {
|
||||
var block, parent, miner string
|
||||
@ -85,3 +105,27 @@ where b.height > $1 and b.height < $2`, minH, maxH)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func syncedBlocks(db *sql.DB) (map[cid.Cid]struct{}, error) {
|
||||
// timestamp is used to return a configurable amount of rows based on when they were last added.
|
||||
rws, err := db.Query(`select cid FROM blocks_synced`)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
|
||||
}
|
||||
out := map[cid.Cid]struct{}{}
|
||||
|
||||
for rws.Next() {
|
||||
var c string
|
||||
if err := rws.Scan(&c); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to scan blocks_synced: %w", err)
|
||||
}
|
||||
|
||||
ci, err := cid.Parse(c)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to parse blocks_synced: %w", err)
|
||||
}
|
||||
|
||||
out[ci] = struct{}{}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
@ -1,34 +1,22 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
)
|
||||
|
||||
var log = logging.Logger("chainwatch")
|
||||
|
||||
func main() {
|
||||
_ = logging.SetLogLevel("*", "INFO")
|
||||
if err := logging.SetLogLevel("rpc", "error"); err != nil {
|
||||
panic(err)
|
||||
if err := logging.SetLogLevel("*", "info"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
log.Info("Starting chainwatch")
|
||||
|
||||
local := []*cli.Command{
|
||||
runCmd,
|
||||
dotCmd,
|
||||
}
|
||||
|
||||
app := &cli.App{
|
||||
Name: "lotus-chainwatch",
|
||||
Usage: "Devnet token distribution utility",
|
||||
@ -44,69 +32,19 @@ func main() {
|
||||
EnvVars: []string{"LOTUS_DB"},
|
||||
Value: "",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "log-level",
|
||||
EnvVars: []string{"GOLOG_LOG_LEVEL"},
|
||||
Value: "info",
|
||||
},
|
||||
},
|
||||
Commands: []*cli.Command{
|
||||
dotCmd,
|
||||
runCmd,
|
||||
},
|
||||
|
||||
Commands: local,
|
||||
}
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
log.Warnf("%+v", err)
|
||||
os.Exit(1)
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var runCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start lotus chainwatch",
|
||||
Flags: []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "front",
|
||||
Value: "127.0.0.1:8418",
|
||||
},
|
||||
&cli.IntFlag{
|
||||
Name: "max-batch",
|
||||
Value: 1000,
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Remote version: %s", v.Version)
|
||||
|
||||
maxBatch := cctx.Int("max-batch")
|
||||
|
||||
st, err := openStorage(cctx.String("db"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer st.close() //nolint:errcheck
|
||||
|
||||
runSyncer(ctx, api, st, maxBatch)
|
||||
|
||||
h, err := newHandler(api, st)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("handler setup: %w", err)
|
||||
}
|
||||
|
||||
http.Handle("/", h)
|
||||
|
||||
fmt.Printf("Open http://%s\n", cctx.String("front"))
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
os.Exit(0)
|
||||
}()
|
||||
|
||||
return http.ListenAndServe(cctx.String("front"), nil)
|
||||
},
|
||||
}
|
||||
|
@ -1,60 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
aapi "github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func subMpool(ctx context.Context, api aapi.FullNode, st *storage) {
|
||||
sub, err := api.MpoolSub(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var updates []aapi.MpoolUpdate
|
||||
|
||||
select {
|
||||
case update := <-sub:
|
||||
updates = append(updates, update)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
select {
|
||||
case update := <-sub:
|
||||
updates = append(updates, update)
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
msgs := map[cid.Cid]*types.Message{}
|
||||
for _, v := range updates {
|
||||
if v.Type != aapi.MpoolAdd {
|
||||
continue
|
||||
}
|
||||
|
||||
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
||||
}
|
||||
|
||||
log.Debugf("Processing %d mpool updates", len(msgs))
|
||||
|
||||
err := st.storeMessages(msgs)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
if err := st.storeMpoolInclusions(updates); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
299
cmd/lotus-chainwatch/processor/common_actors.go
Normal file
299
cmd/lotus-chainwatch/processor/common_actors.go
Normal file
@ -0,0 +1,299 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
typegen "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
_init "github.com/filecoin-project/specs-actors/actors/builtin/init"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
|
||||
)
|
||||
|
||||
func (p *Processor) setupCommonActors() error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create table if not exists id_address_map
|
||||
(
|
||||
id text not null,
|
||||
address text not null,
|
||||
constraint id_address_map_pk
|
||||
primary key (id, address)
|
||||
);
|
||||
|
||||
create unique index if not exists id_address_map_id_uindex
|
||||
on id_address_map (id);
|
||||
|
||||
create unique index if not exists id_address_map_address_uindex
|
||||
on id_address_map (address);
|
||||
|
||||
create table if not exists actors
|
||||
(
|
||||
id text not null
|
||||
constraint id_address_map_actors_id_fk
|
||||
references id_address_map (id),
|
||||
code text not null,
|
||||
head text not null,
|
||||
nonce int not null,
|
||||
balance text not null,
|
||||
stateroot text
|
||||
);
|
||||
|
||||
create index if not exists actors_id_index
|
||||
on actors (id);
|
||||
|
||||
create index if not exists id_address_map_address_index
|
||||
on id_address_map (address);
|
||||
|
||||
create index if not exists id_address_map_id_index
|
||||
on id_address_map (id);
|
||||
|
||||
create or replace function actor_tips(epoch bigint)
|
||||
returns table (id text,
|
||||
code text,
|
||||
head text,
|
||||
nonce int,
|
||||
balance text,
|
||||
stateroot text,
|
||||
height bigint,
|
||||
parentstateroot text) as
|
||||
$body$
|
||||
select distinct on (id) * from actors
|
||||
inner join state_heights sh on sh.parentstateroot = stateroot
|
||||
where height < $1
|
||||
order by id, height desc;
|
||||
$body$ language sql;
|
||||
|
||||
create table if not exists actor_states
|
||||
(
|
||||
head text not null,
|
||||
code text not null,
|
||||
state json not null
|
||||
);
|
||||
|
||||
create unique index if not exists actor_states_head_code_uindex
|
||||
on actor_states (head, code);
|
||||
|
||||
create index if not exists actor_states_head_index
|
||||
on actor_states (head);
|
||||
|
||||
create index if not exists actor_states_code_head_index
|
||||
on actor_states (head, code);
|
||||
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) HandleCommonActorsChanges(ctx context.Context, actors map[cid.Cid]ActorTips) error {
|
||||
if err := p.storeActorAddresses(ctx, actors); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
grp, _ := errgroup.WithContext(ctx)
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeActorHeads(actors); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeActorStates(actors); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return grp.Wait()
|
||||
}
|
||||
|
||||
func (p Processor) storeActorAddresses(ctx context.Context, actors map[cid.Cid]ActorTips) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Actor Addresses", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
addressToID := map[address.Address]address.Address{}
|
||||
// HACK until genesis storage is figured out:
|
||||
addressToID[builtin.SystemActorAddr] = builtin.SystemActorAddr
|
||||
addressToID[builtin.InitActorAddr] = builtin.InitActorAddr
|
||||
addressToID[builtin.RewardActorAddr] = builtin.RewardActorAddr
|
||||
addressToID[builtin.CronActorAddr] = builtin.CronActorAddr
|
||||
addressToID[builtin.StoragePowerActorAddr] = builtin.StoragePowerActorAddr
|
||||
addressToID[builtin.StorageMarketActorAddr] = builtin.StorageMarketActorAddr
|
||||
addressToID[builtin.VerifiedRegistryActorAddr] = builtin.VerifiedRegistryActorAddr
|
||||
addressToID[builtin.BurntFundsActorAddr] = builtin.BurntFundsActorAddr
|
||||
initActor, err := p.node.StateGetActor(ctx, builtin.InitActorAddr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
initActorRaw, err := p.node.ChainReadObj(ctx, initActor.Head)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var initActorState _init.State
|
||||
if err := initActorState.UnmarshalCBOR(bytes.NewReader(initActorRaw)); err != nil {
|
||||
return err
|
||||
}
|
||||
ctxStore := cw_util.NewAPIIpldStore(ctx, p.node)
|
||||
addrMap, err := adt.AsMap(ctxStore, initActorState.AddressMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// gross..
|
||||
var actorID typegen.CborInt
|
||||
if err := addrMap.ForEach(&actorID, func(key string) error {
|
||||
longAddr, err := address.NewFromBytes([]byte(key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
shortAddr, err := address.NewIDAddress(uint64(actorID))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
addressToID[longAddr] = shortAddr
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create temp table iam (like id_address_map excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy iam (id, address) from STDIN `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for a, i := range addressToID {
|
||||
if i == address.Undef {
|
||||
continue
|
||||
}
|
||||
if _, err := stmt.Exec(
|
||||
i.String(),
|
||||
a.String(),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into id_address_map select * from iam on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeActorHeads(actors map[cid.Cid]ActorTips) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Actor Heads", "duration", time.Since(start).String())
|
||||
}()
|
||||
// Basic
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.Exec(`
|
||||
create temp table a (like actors excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy a (id, code, head, nonce, balance, stateroot) from stdin `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for code, actTips := range actors {
|
||||
for _, actorInfo := range actTips {
|
||||
for _, a := range actorInfo {
|
||||
if _, err := stmt.Exec(a.addr.String(), code.String(), a.act.Head.String(), a.act.Nonce, a.act.Balance.String(), a.stateroot.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into actors select * from a on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeActorStates(actors map[cid.Cid]ActorTips) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Actor States", "duration", time.Since(start).String())
|
||||
}()
|
||||
// States
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.Exec(`
|
||||
create temp table a (like actor_states excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy a (head, code, state) from stdin `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for code, actTips := range actors {
|
||||
for _, actorInfo := range actTips {
|
||||
for _, a := range actorInfo {
|
||||
if _, err := stmt.Exec(a.act.Head.String(), code.String(), a.state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into actor_states select * from a on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
301
cmd/lotus-chainwatch/processor/market.go
Normal file
301
cmd/lotus-chainwatch/processor/market.go
Normal file
@ -0,0 +1,301 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
)
|
||||
|
||||
func (p *Processor) setupMarket() error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create table if not exists market_deal_proposals
|
||||
(
|
||||
deal_id bigint not null,
|
||||
|
||||
state_root text not null,
|
||||
|
||||
piece_cid text not null,
|
||||
padded_piece_size bigint not null,
|
||||
unpadded_piece_size bigint not null,
|
||||
is_verified bool not null,
|
||||
|
||||
client_id text not null,
|
||||
provider_id text not null,
|
||||
|
||||
start_epoch bigint not null,
|
||||
end_epoch bigint not null,
|
||||
slashed_epoch bigint,
|
||||
storage_price_per_epoch text not null,
|
||||
|
||||
provider_collateral text not null,
|
||||
client_collateral text not null,
|
||||
|
||||
constraint market_deal_proposal_pk
|
||||
primary key (deal_id)
|
||||
);
|
||||
|
||||
create table if not exists market_deal_states
|
||||
(
|
||||
deal_id bigint not null,
|
||||
|
||||
sector_start_epoch bigint not null,
|
||||
last_update_epoch bigint not null,
|
||||
slash_epoch bigint not null,
|
||||
|
||||
state_root text not null,
|
||||
|
||||
unique (deal_id, sector_start_epoch, last_update_epoch, slash_epoch),
|
||||
|
||||
constraint market_deal_states_pk
|
||||
primary key (deal_id, state_root)
|
||||
|
||||
);
|
||||
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
type marketActorInfo struct {
|
||||
common actorInfo
|
||||
}
|
||||
|
||||
func (p *Processor) HandleMarketChanges(ctx context.Context, marketTips ActorTips) error {
|
||||
marketChanges, err := p.processMarket(ctx, marketTips)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to process market actors", "error", err)
|
||||
}
|
||||
|
||||
if err := p.persistMarket(ctx, marketChanges); err != nil {
|
||||
log.Fatalw("Failed to persist market actors", "error", err)
|
||||
}
|
||||
|
||||
if err := p.updateMarket(ctx, marketChanges); err != nil {
|
||||
log.Fatalw("Failed to update market actors", "error", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) processMarket(ctx context.Context, marketTips ActorTips) ([]marketActorInfo, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Processed Market", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
var out []marketActorInfo
|
||||
for _, markets := range marketTips {
|
||||
for _, mt := range markets {
|
||||
// NB: here is where we can extract the market state when we need it.
|
||||
out = append(out, marketActorInfo{common: mt})
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p *Processor) persistMarket(ctx context.Context, info []marketActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Market", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
grp, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeMarketActorDealProposals(ctx, info); err != nil {
|
||||
return xerrors.Errorf("Failed to store marker deal proposals: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeMarketActorDealStates(info); err != nil {
|
||||
return xerrors.Errorf("Failed to store marker deal states: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return grp.Wait()
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) updateMarket(ctx context.Context, info []marketActorInfo) error {
|
||||
if err := p.updateMarketActorDealProposals(ctx, info); err != nil {
|
||||
return xerrors.Errorf("Failed to update market info: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) storeMarketActorDealStates(marketTips []marketActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Market Deal States", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.Exec(`create temp table mds (like market_deal_states excluding constraints) on commit drop;`); err != nil {
|
||||
return err
|
||||
}
|
||||
stmt, err := tx.Prepare(`copy mds (deal_id, sector_start_epoch, last_update_epoch, slash_epoch, state_root) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, mt := range marketTips {
|
||||
dealStates, err := p.node.StateMarketDeals(context.TODO(), mt.common.tsKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for dealID, ds := range dealStates {
|
||||
id, err := strconv.ParseUint(dealID, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := stmt.Exec(
|
||||
id,
|
||||
ds.State.SectorStartEpoch,
|
||||
ds.State.LastUpdatedEpoch,
|
||||
ds.State.SlashEpoch,
|
||||
mt.common.stateroot.String(),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into market_deal_states select * from mds on conflict do nothing`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeMarketActorDealProposals(ctx context.Context, marketTips []marketActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Market Deal Proposals", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table mdp (like market_deal_proposals excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mdp (deal_id, state_root, piece_cid, padded_piece_size, unpadded_piece_size, is_verified, client_id, provider_id, start_epoch, end_epoch, slashed_epoch, storage_price_per_epoch, provider_collateral, client_collateral) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// insert in sorted order (lowest height -> highest height) since dealid is pk of table.
|
||||
for _, mt := range marketTips {
|
||||
dealStates, err := p.node.StateMarketDeals(ctx, mt.common.tsKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for dealID, ds := range dealStates {
|
||||
id, err := strconv.ParseUint(dealID, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := stmt.Exec(
|
||||
id,
|
||||
mt.common.stateroot.String(),
|
||||
ds.Proposal.PieceCID.String(),
|
||||
ds.Proposal.PieceSize,
|
||||
ds.Proposal.PieceSize.Unpadded(),
|
||||
ds.Proposal.VerifiedDeal,
|
||||
ds.Proposal.Client.String(),
|
||||
ds.Proposal.Provider.String(),
|
||||
ds.Proposal.StartEpoch,
|
||||
ds.Proposal.EndEpoch,
|
||||
nil, // slashed_epoch
|
||||
ds.Proposal.StoragePricePerEpoch.String(),
|
||||
ds.Proposal.ProviderCollateral.String(),
|
||||
ds.Proposal.ClientCollateral.String(),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tx.Exec(`insert into market_deal_proposals select * from mdp on conflict do nothing`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) updateMarketActorDealProposals(ctx context.Context, marketTip []marketActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Updated Market Deal Proposals", "duration", time.Since(start).String())
|
||||
}()
|
||||
pred := state.NewStatePredicates(p.node)
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`update market_deal_proposals set slashed_epoch=$1 where deal_id=$2`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, mt := range marketTip {
|
||||
stateDiff := pred.OnStorageMarketActorChanged(pred.OnDealStateChanged(pred.OnDealStateAmtChanged()))
|
||||
|
||||
changed, val, err := stateDiff(ctx, mt.common.parentTsKey, mt.common.tsKey)
|
||||
if err != nil {
|
||||
log.Warnw("error getting market deal state diff", "error", err)
|
||||
}
|
||||
if !changed {
|
||||
continue
|
||||
}
|
||||
changes, ok := val.(*state.MarketDealStateChanges)
|
||||
if !ok {
|
||||
return xerrors.Errorf("Unknown type returned by Deal State AMT predicate: %T", val)
|
||||
}
|
||||
|
||||
for _, modified := range changes.Modified {
|
||||
if modified.From.SlashEpoch != modified.To.SlashEpoch {
|
||||
if _, err := stmt.Exec(modified.To.SlashEpoch, modified.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
316
cmd/lotus-chainwatch/processor/messages.go
Normal file
316
cmd/lotus-chainwatch/processor/messages.go
Normal file
@ -0,0 +1,316 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/parmap"
|
||||
)
|
||||
|
||||
func (p *Processor) setupMessages() error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create table if not exists messages
|
||||
(
|
||||
cid text not null
|
||||
constraint messages_pk
|
||||
primary key,
|
||||
"from" text not null,
|
||||
"to" text not null,
|
||||
nonce bigint not null,
|
||||
value text not null,
|
||||
gasprice bigint not null,
|
||||
gaslimit bigint not null,
|
||||
method bigint,
|
||||
params bytea
|
||||
);
|
||||
|
||||
create unique index if not exists messages_cid_uindex
|
||||
on messages (cid);
|
||||
|
||||
create index if not exists messages_from_index
|
||||
on messages ("from");
|
||||
|
||||
create index if not exists messages_to_index
|
||||
on messages ("to");
|
||||
|
||||
create table if not exists block_messages
|
||||
(
|
||||
block text not null
|
||||
constraint blocks_block_cids_cid_fk
|
||||
references block_cids (cid),
|
||||
message text not null,
|
||||
constraint block_messages_pk
|
||||
primary key (block, message)
|
||||
);
|
||||
|
||||
create table if not exists mpool_messages
|
||||
(
|
||||
msg text not null
|
||||
constraint mpool_messages_pk
|
||||
primary key
|
||||
constraint mpool_messages_messages_cid_fk
|
||||
references messages,
|
||||
add_ts int not null
|
||||
);
|
||||
|
||||
create unique index if not exists mpool_messages_msg_uindex
|
||||
on mpool_messages (msg);
|
||||
|
||||
create table if not exists receipts
|
||||
(
|
||||
msg text not null,
|
||||
state text not null,
|
||||
idx int not null,
|
||||
exit int not null,
|
||||
gas_used int not null,
|
||||
return bytea,
|
||||
constraint receipts_pk
|
||||
primary key (msg, state)
|
||||
);
|
||||
|
||||
create index if not exists receipts_msg_state_index
|
||||
on receipts (msg, state);
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) HandleMessageChanges(ctx context.Context, blocks map[cid.Cid]*types.BlockHeader) error {
|
||||
if err := p.persistMessagesAndReceipts(ctx, blocks); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) persistMessagesAndReceipts(ctx context.Context, blocks map[cid.Cid]*types.BlockHeader) error {
|
||||
messages, inclusions := p.fetchMessages(ctx, blocks)
|
||||
receipts := p.fetchParentReceipts(ctx, blocks)
|
||||
|
||||
grp, _ := errgroup.WithContext(ctx)
|
||||
|
||||
grp.Go(func() error {
|
||||
return p.storeMessages(messages)
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
return p.storeMsgInclusions(inclusions)
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
return p.storeReceipts(receipts)
|
||||
})
|
||||
|
||||
return grp.Wait()
|
||||
}
|
||||
|
||||
func (p *Processor) storeReceipts(recs map[mrec]*types.MessageReceipt) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Receipts", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create temp table recs (like receipts excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy recs (msg, state, idx, exit, gas_used, return) from stdin `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for c, m := range recs {
|
||||
if _, err := stmt.Exec(
|
||||
c.msg.String(),
|
||||
c.state.String(),
|
||||
c.idx,
|
||||
m.ExitCode,
|
||||
m.GasUsed,
|
||||
m.Return,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into receipts select * from recs on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeMsgInclusions(incls map[cid.Cid][]cid.Cid) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Message Inclusions", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create temp table mi (like block_messages excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mi (block, message) from STDIN `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for b, msgs := range incls {
|
||||
for _, msg := range msgs {
|
||||
if _, err := stmt.Exec(
|
||||
b.String(),
|
||||
msg.String(),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into block_messages select * from mi on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeMessages(msgs map[cid.Cid]*types.Message) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Messages", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create temp table msgs (like messages excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy msgs (cid, "from", "to", nonce, "value", gasprice, gaslimit, method, params) from stdin `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for c, m := range msgs {
|
||||
if _, err := stmt.Exec(
|
||||
c.String(),
|
||||
m.From.String(),
|
||||
m.To.String(),
|
||||
m.Nonce,
|
||||
m.Value.String(),
|
||||
m.GasPrice.String(),
|
||||
m.GasLimit,
|
||||
m.Method,
|
||||
m.Params,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into messages select * from msgs on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) fetchMessages(ctx context.Context, blocks map[cid.Cid]*types.BlockHeader) (map[cid.Cid]*types.Message, map[cid.Cid][]cid.Cid) {
|
||||
var lk sync.Mutex
|
||||
messages := map[cid.Cid]*types.Message{}
|
||||
inclusions := map[cid.Cid][]cid.Cid{} // block -> msgs
|
||||
|
||||
parmap.Par(50, parmap.MapArr(blocks), func(header *types.BlockHeader) {
|
||||
msgs, err := p.node.ChainGetBlockMessages(ctx, header.Cid())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
vmm := make([]*types.Message, 0, len(msgs.Cids))
|
||||
for _, m := range msgs.BlsMessages {
|
||||
vmm = append(vmm, m)
|
||||
}
|
||||
|
||||
for _, m := range msgs.SecpkMessages {
|
||||
vmm = append(vmm, &m.Message)
|
||||
}
|
||||
|
||||
lk.Lock()
|
||||
for _, message := range vmm {
|
||||
messages[message.Cid()] = message
|
||||
inclusions[header.Cid()] = append(inclusions[header.Cid()], message.Cid())
|
||||
}
|
||||
lk.Unlock()
|
||||
})
|
||||
|
||||
return messages, inclusions
|
||||
}
|
||||
|
||||
type mrec struct {
|
||||
msg cid.Cid
|
||||
state cid.Cid
|
||||
idx int
|
||||
}
|
||||
|
||||
func (p *Processor) fetchParentReceipts(ctx context.Context, toSync map[cid.Cid]*types.BlockHeader) map[mrec]*types.MessageReceipt {
|
||||
var lk sync.Mutex
|
||||
out := map[mrec]*types.MessageReceipt{}
|
||||
|
||||
parmap.Par(50, parmap.MapArr(toSync), func(header *types.BlockHeader) {
|
||||
recs, err := p.node.ChainGetParentReceipts(ctx, header.Cid())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
msgs, err := p.node.ChainGetParentMessages(ctx, header.Cid())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
lk.Lock()
|
||||
for i, r := range recs {
|
||||
out[mrec{
|
||||
msg: msgs[i].Cid,
|
||||
state: header.ParentStateRoot,
|
||||
idx: i,
|
||||
}] = r
|
||||
}
|
||||
lk.Unlock()
|
||||
})
|
||||
|
||||
return out
|
||||
}
|
640
cmd/lotus-chainwatch/processor/miner.go
Normal file
640
cmd/lotus-chainwatch/processor/miner.go
Normal file
@ -0,0 +1,640 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/chain/events/state"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/libp2p/go-libp2p-core/peer"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
cw_util "github.com/filecoin-project/lotus/cmd/lotus-chainwatch/util"
|
||||
)
|
||||
|
||||
func (p *Processor) setupMiners() error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create table if not exists miner_sectors
|
||||
(
|
||||
miner_id text not null,
|
||||
sector_id bigint not null,
|
||||
|
||||
activation_epoch bigint not null,
|
||||
expiration_epoch bigint not null,
|
||||
termination_epoch bigint,
|
||||
|
||||
deal_weight text not null,
|
||||
verified_deal_weight text not null,
|
||||
seal_cid text not null,
|
||||
seal_rand_epoch bigint not null,
|
||||
constraint miner_sectors_pk
|
||||
primary key (miner_id, sector_id)
|
||||
);
|
||||
|
||||
create index if not exists miner_sectors_miner_sectorid_index
|
||||
on miner_sectors (miner_id, sector_id);
|
||||
|
||||
create table if not exists miner_info
|
||||
(
|
||||
miner_id text not null,
|
||||
owner_addr text not null,
|
||||
worker_addr text not null,
|
||||
peer_id text,
|
||||
sector_size text not null,
|
||||
|
||||
precommit_deposits text not null,
|
||||
locked_funds text not null,
|
||||
next_deadline_process_faults bigint not null,
|
||||
constraint miner_info_pk
|
||||
primary key (miner_id)
|
||||
);
|
||||
|
||||
/*
|
||||
* captures miner-specific power state for any given stateroot
|
||||
*/
|
||||
create table if not exists miner_power
|
||||
(
|
||||
miner_id text not null,
|
||||
state_root text not null,
|
||||
raw_bytes_power text not null,
|
||||
quality_adjusted_power text not null,
|
||||
constraint miner_power_pk
|
||||
primary key (miner_id, state_root)
|
||||
);
|
||||
|
||||
/* used to tell when a miners sectors (proven-not-yet-expired) changed if the miner_sectors_cid's are different a new sector was added or removed (terminated/expired) */
|
||||
create table if not exists miner_sectors_heads
|
||||
(
|
||||
miner_id text not null,
|
||||
miner_sectors_cid text not null,
|
||||
|
||||
state_root text not null,
|
||||
|
||||
constraint miner_sectors_heads_pk
|
||||
primary key (miner_id,miner_sectors_cid)
|
||||
|
||||
);
|
||||
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_type WHERE typname = 'miner_sector_event_type') THEN
|
||||
CREATE TYPE miner_sector_event_type AS ENUM
|
||||
(
|
||||
'ADDED','EXTENDED', 'EXPIRED', 'TERMINATED'
|
||||
);
|
||||
END IF;
|
||||
END$$;
|
||||
|
||||
create table if not exists miner_sector_events
|
||||
(
|
||||
miner_id text not null,
|
||||
sector_id bigint not null,
|
||||
state_root text not null,
|
||||
event miner_sector_event_type not null,
|
||||
|
||||
constraint miner_sector_events_pk
|
||||
primary key (sector_id, event, miner_id, state_root)
|
||||
)
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
type minerActorInfo struct {
|
||||
common actorInfo
|
||||
|
||||
state miner.State
|
||||
|
||||
// tracked by power actor
|
||||
rawPower big.Int
|
||||
qalPower big.Int
|
||||
}
|
||||
|
||||
type sectorUpdate struct {
|
||||
terminationEpoch abi.ChainEpoch
|
||||
terminated bool
|
||||
|
||||
expirationEpoch abi.ChainEpoch
|
||||
|
||||
sectorID abi.SectorNumber
|
||||
minerID address.Address
|
||||
}
|
||||
|
||||
func (p *Processor) HandleMinerChanges(ctx context.Context, minerTips ActorTips) error {
|
||||
minerChanges, err := p.processMiners(ctx, minerTips)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to process miner actors", "error", err)
|
||||
}
|
||||
|
||||
if err := p.persistMiners(ctx, minerChanges); err != nil {
|
||||
log.Fatalw("Failed to persist miner actors", "error", err)
|
||||
}
|
||||
|
||||
if err := p.updateMiners(ctx, minerChanges); err != nil {
|
||||
log.Fatalw("Failed to update miner actors", "error", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) processMiners(ctx context.Context, minerTips map[types.TipSetKey][]actorInfo) ([]minerActorInfo, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Processed Miners", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
var out []minerActorInfo
|
||||
// TODO add parallel calls if this becomes slow
|
||||
for tipset, miners := range minerTips {
|
||||
// get the power actors claims map
|
||||
minersClaims, err := getPowerActorClaimsMap(ctx, p.node, tipset)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get miner raw and quality power
|
||||
for _, act := range miners {
|
||||
var mi minerActorInfo
|
||||
mi.common = act
|
||||
|
||||
var claim power.Claim
|
||||
// get miner claim from power actors claim map and store if found, else the miner had no claim at
|
||||
// this tipset
|
||||
found, err := minersClaims.Get(adt.AddrKey(act.addr), &claim)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if found {
|
||||
mi.qalPower = claim.QualityAdjPower
|
||||
mi.rawPower = claim.RawBytePower
|
||||
}
|
||||
|
||||
// Get the miner state info
|
||||
astb, err := p.node.ChainReadObj(ctx, act.act.Head)
|
||||
if err != nil {
|
||||
log.Warnw("failed to find miner actor state", "address", act.addr, "error", err)
|
||||
continue
|
||||
}
|
||||
if err := mi.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, mi)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p *Processor) persistMiners(ctx context.Context, miners []minerActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Miners", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
grp, _ := errgroup.WithContext(ctx)
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeMinersActorState(miners); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeMinersPower(miners); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeMinersSectorState(miners); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeMinersSectorHeads(miners); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return grp.Wait()
|
||||
}
|
||||
|
||||
func (p *Processor) storeMinersActorState(miners []minerActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Miners Actor State", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table mi (like miner_info excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mi (miner_id, owner_addr, worker_addr, peer_id, sector_size, precommit_deposits, locked_funds, next_deadline_process_faults) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, m := range miners {
|
||||
var pid string
|
||||
if len(m.state.Info.PeerId) != 0 {
|
||||
peerid, err := peer.IDFromBytes(m.state.Info.PeerId)
|
||||
if err != nil {
|
||||
// this should "never happen", but if it does we should still store info about the miner.
|
||||
log.Warnw("failed to decode peerID", "peerID (bytes)", m.state.Info.PeerId, "miner", m.common.addr, "tipset", m.common.tsKey.String())
|
||||
} else {
|
||||
pid = peerid.String()
|
||||
}
|
||||
}
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
m.state.Info.Owner.String(),
|
||||
m.state.Info.Worker.String(),
|
||||
pid,
|
||||
m.state.Info.SectorSize.ShortString(),
|
||||
m.state.PreCommitDeposits.String(),
|
||||
m.state.LockedFunds.String(),
|
||||
m.state.NextDeadlineToProcessFaults,
|
||||
); err != nil {
|
||||
log.Errorw("failed to store miner state", "state", m.state, "info", m.state.Info, "error", err)
|
||||
return xerrors.Errorf("failed to store miner state: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into miner_info select * from mi on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeMinersPower(miners []minerActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Miners Power", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin miner_power tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table mp (like miner_power excluding constraints) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep miner_power temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mp (miner_id, state_root, raw_bytes_power, quality_adjusted_power) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp miner_power: %w", err)
|
||||
}
|
||||
|
||||
for _, m := range miners {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
m.common.stateroot.String(),
|
||||
m.rawPower.String(),
|
||||
m.qalPower.String(),
|
||||
); err != nil {
|
||||
log.Errorw("failed to store miner power", "miner", m.common.addr, "stateroot", m.common.stateroot, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared miner_power: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into miner_power select * from mp on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert miner_power from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit miner_power tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) storeMinersSectorState(miners []minerActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Miners Sector State", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table ms (like miner_sectors excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy ms (miner_id, sector_id, activation_epoch, expiration_epoch, deal_weight, verified_deal_weight, seal_cid, seal_rand_epoch) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
grp, ctx := errgroup.WithContext(context.TODO())
|
||||
for _, m := range miners {
|
||||
m := m
|
||||
grp.Go(func() error {
|
||||
sectors, err := p.node.StateMinerSectors(ctx, m.common.addr, nil, true, m.common.tsKey)
|
||||
if err != nil {
|
||||
log.Debugw("Failed to load sectors", "tipset", m.common.tsKey.String(), "miner", m.common.addr.String(), "error", err)
|
||||
}
|
||||
|
||||
for _, sector := range sectors {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
uint64(sector.ID),
|
||||
int64(sector.Info.ActivationEpoch),
|
||||
int64(sector.Info.Info.Expiration),
|
||||
sector.Info.DealWeight.String(),
|
||||
sector.Info.VerifiedDealWeight.String(),
|
||||
sector.Info.Info.SealedCID.String(),
|
||||
int64(sector.Info.Info.SealRandEpoch),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := grp.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into miner_sectors select * from ms on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) storeMinersSectorHeads(miners []minerActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Stored Miners Sector Heads", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table msh (like miner_sectors_heads excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy msh (miner_id, miner_sectors_cid, state_root) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, m := range miners {
|
||||
if _, err := stmt.Exec(
|
||||
m.common.addr.String(),
|
||||
m.state.Sectors.String(),
|
||||
m.common.stateroot.String(),
|
||||
); err != nil {
|
||||
log.Errorw("failed to store miners sectors head", "state", m.state, "info", m.state.Info, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into miner_sectors_heads select * from msh on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *Processor) updateMiners(ctx context.Context, miners []minerActorInfo) error {
|
||||
// TODO when/if there is more than one update operation here use an errgroup as is done in persistMiners
|
||||
if err := p.updateMinersSectors(ctx, miners); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) updateMinersSectors(ctx context.Context, miners []minerActorInfo) error {
|
||||
log.Debugw("Updating Miners Sectors", "#miners", len(miners))
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Updated Miners Sectors", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
pred := state.NewStatePredicates(p.node)
|
||||
|
||||
eventTx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := eventTx.Exec(`create temp table mse (like miner_sector_events excluding constraints) on commit drop;`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
eventStmt, err := eventTx.Prepare(`copy mse (sector_id, event, miner_id, state_root) from STDIN `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var updateWg sync.WaitGroup
|
||||
updateWg.Add(1)
|
||||
sectorUpdatesCh := make(chan sectorUpdate)
|
||||
var sectorUpdates []sectorUpdate
|
||||
go func() {
|
||||
for u := range sectorUpdatesCh {
|
||||
sectorUpdates = append(sectorUpdates, u)
|
||||
}
|
||||
updateWg.Done()
|
||||
}()
|
||||
|
||||
minerGrp, ctx := errgroup.WithContext(ctx)
|
||||
complete := 0
|
||||
for _, m := range miners {
|
||||
m := m
|
||||
minerGrp.Go(func() error {
|
||||
// special case genesis miners
|
||||
sectorDiffFn := pred.OnMinerActorChange(m.common.addr, pred.OnMinerSectorChange())
|
||||
changed, val, err := sectorDiffFn(ctx, m.common.parentTsKey, m.common.tsKey)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "address not found") {
|
||||
return nil
|
||||
}
|
||||
log.Errorw("error getting miner sector diff", "miner", m.common.addr, "error", err)
|
||||
return err
|
||||
}
|
||||
if !changed {
|
||||
complete++
|
||||
return nil
|
||||
}
|
||||
changes, ok := val.(*state.MinerSectorChanges)
|
||||
if !ok {
|
||||
log.Fatalw("Developer Error")
|
||||
}
|
||||
log.Debugw("sector changes for miner", "miner", m.common.addr.String(), "Added", len(changes.Added), "Extended", len(changes.Extended), "Removed", len(changes.Removed), "oldState", m.common.parentTsKey, "newState", m.common.tsKey)
|
||||
|
||||
for _, extended := range changes.Extended {
|
||||
if _, err := eventStmt.Exec(extended.To.Info.SectorNumber, "EXTENDED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
sectorUpdatesCh <- sectorUpdate{
|
||||
terminationEpoch: 0,
|
||||
terminated: false,
|
||||
expirationEpoch: extended.To.Info.Expiration,
|
||||
sectorID: extended.From.Info.SectorNumber,
|
||||
minerID: m.common.addr,
|
||||
}
|
||||
|
||||
log.Debugw("sector extended", "miner", m.common.addr.String(), "sector", extended.To.Info.SectorNumber, "old", extended.To.Info.Expiration, "new", extended.From.Info.Expiration)
|
||||
}
|
||||
curTs, err := p.node.ChainGetTipSet(ctx, m.common.tsKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, removed := range changes.Removed {
|
||||
log.Debugw("removed", "miner", m.common.addr)
|
||||
// decide if they were terminated or extended
|
||||
if removed.Info.Expiration > curTs.Height() {
|
||||
if _, err := eventStmt.Exec(removed.Info.SectorNumber, "TERMINATED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugw("sector terminated", "miner", m.common.addr.String(), "sector", removed.Info.SectorNumber, "old", "sectorExpiration", removed.Info.Expiration, "terminationEpoch", curTs.Height())
|
||||
sectorUpdatesCh <- sectorUpdate{
|
||||
terminationEpoch: curTs.Height(),
|
||||
terminated: true,
|
||||
expirationEpoch: removed.Info.Expiration,
|
||||
sectorID: removed.Info.SectorNumber,
|
||||
minerID: m.common.addr,
|
||||
}
|
||||
|
||||
}
|
||||
if _, err := eventStmt.Exec(removed.Info.SectorNumber, "EXPIRED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Debugw("sector removed", "miner", m.common.addr.String(), "sector", removed.Info.SectorNumber, "old", "sectorExpiration", removed.Info.Expiration, "currEpoch", curTs.Height())
|
||||
}
|
||||
|
||||
for _, added := range changes.Added {
|
||||
if _, err := eventStmt.Exec(added.Info.SectorNumber, "ADDED", m.common.addr.String(), m.common.stateroot.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
complete++
|
||||
log.Debugw("Update Done", "complete", complete, "added", len(changes.Added), "removed", len(changes.Removed), "modified", len(changes.Extended))
|
||||
return nil
|
||||
})
|
||||
}
|
||||
if err := minerGrp.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
close(sectorUpdatesCh)
|
||||
// wait for the update channel to be drained
|
||||
updateWg.Wait()
|
||||
|
||||
if err := eventStmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := eventTx.Exec(`insert into miner_sector_events select * from mse on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
if err := eventTx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateTx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updateStmt, err := updateTx.Prepare(`UPDATE miner_sectors SET termination_epoch=$1, expiration_epoch=$2 WHERE miner_id=$3 AND sector_id=$4`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, update := range sectorUpdates {
|
||||
if update.terminated {
|
||||
if _, err := updateStmt.Exec(update.terminationEpoch, update.expirationEpoch, update.minerID.String(), update.sectorID); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if _, err := updateStmt.Exec(nil, update.expirationEpoch, update.minerID.String(), update.sectorID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := updateStmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return updateTx.Commit()
|
||||
}
|
||||
|
||||
// load the power actor state clam as an adt.Map at the tipset `ts`.
|
||||
func getPowerActorClaimsMap(ctx context.Context, api api.FullNode, ts types.TipSetKey) (*adt.Map, error) {
|
||||
powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var powerActorState power.State
|
||||
if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal power actor state: %w", err)
|
||||
}
|
||||
|
||||
s := cw_util.NewAPIIpldStore(ctx, api)
|
||||
return adt.AsMap(s, powerActorState.Claims)
|
||||
}
|
103
cmd/lotus-chainwatch/processor/mpool.go
Normal file
103
cmd/lotus-chainwatch/processor/mpool.go
Normal file
@ -0,0 +1,103 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
func (p *Processor) subMpool(ctx context.Context) {
|
||||
sub, err := p.node.MpoolSub(ctx)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
var updates []api.MpoolUpdate
|
||||
|
||||
select {
|
||||
case update := <-sub:
|
||||
updates = append(updates, update)
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
loop:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
select {
|
||||
case update := <-sub:
|
||||
updates = append(updates, update)
|
||||
default:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
msgs := map[cid.Cid]*types.Message{}
|
||||
for _, v := range updates {
|
||||
if v.Type != api.MpoolAdd {
|
||||
continue
|
||||
}
|
||||
|
||||
msgs[v.Message.Message.Cid()] = &v.Message.Message
|
||||
}
|
||||
|
||||
log.Debugf("Processing %d mpool updates", len(msgs))
|
||||
|
||||
err := p.storeMessages(msgs)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
|
||||
if err := p.storeMpoolInclusions(updates); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Processor) storeMpoolInclusions(msgs []api.MpoolUpdate) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create temp table mi (like mpool_messages excluding constraints) on commit drop;
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy mi (msg, add_ts) from stdin `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
if msg.Type != api.MpoolAdd {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := stmt.Exec(
|
||||
msg.Message.Message.Cid().String(),
|
||||
time.Now().Unix(),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into mpool_messages select * from mi on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("actor put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
332
cmd/lotus-chainwatch/processor/processor.go
Normal file
332
cmd/lotus-chainwatch/processor/processor.go
Normal file
@ -0,0 +1,332 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/parmap"
|
||||
)
|
||||
|
||||
var log = logging.Logger("processor")
|
||||
|
||||
type Processor struct {
|
||||
db *sql.DB
|
||||
|
||||
node api.FullNode
|
||||
|
||||
// number of blocks processed at a time
|
||||
batch int
|
||||
}
|
||||
|
||||
type ActorTips map[types.TipSetKey][]actorInfo
|
||||
|
||||
type actorInfo struct {
|
||||
act types.Actor
|
||||
|
||||
stateroot cid.Cid
|
||||
height abi.ChainEpoch // so that we can walk the actor changes in chronological order.
|
||||
|
||||
tsKey types.TipSetKey
|
||||
parentTsKey types.TipSetKey
|
||||
|
||||
addr address.Address
|
||||
state string
|
||||
}
|
||||
|
||||
func NewProcessor(db *sql.DB, node api.FullNode, batch int) *Processor {
|
||||
return &Processor{
|
||||
db: db,
|
||||
node: node,
|
||||
batch: batch,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Processor) setupSchemas() error {
|
||||
if err := p.setupMarket(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.setupMiners(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.setupRewards(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.setupMessages(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := p.setupCommonActors(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) Start(ctx context.Context) {
|
||||
log.Debug("Starting Processor")
|
||||
|
||||
if err := p.setupSchemas(); err != nil {
|
||||
log.Fatalw("Failed to setup processor", "error", err)
|
||||
}
|
||||
|
||||
go p.subMpool(ctx)
|
||||
|
||||
// main processor loop
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Debugw("Stopping Processor...")
|
||||
return
|
||||
default:
|
||||
toProcess, err := p.unprocessedBlocks(ctx, p.batch)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to get unprocessed blocks", "error", err)
|
||||
}
|
||||
|
||||
if len(toProcess) == 0 {
|
||||
log.Debugw("No unprocessed blocks. Wait then try again...")
|
||||
time.Sleep(time.Second * 10)
|
||||
continue
|
||||
}
|
||||
|
||||
// TODO special case genesis state handling here to avoid all the special cases that will be needed for it else where
|
||||
// before doing "normal" processing.
|
||||
|
||||
actorChanges, err := p.collectActorChanges(ctx, toProcess)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to collect actor changes", "error", err)
|
||||
}
|
||||
|
||||
grp, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.HandleMarketChanges(ctx, actorChanges[builtin.StorageMarketActorCodeID]); err != nil {
|
||||
return xerrors.Errorf("Failed to handle market changes: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.HandleMinerChanges(ctx, actorChanges[builtin.StorageMinerActorCodeID]); err != nil {
|
||||
return xerrors.Errorf("Failed to handle miner changes: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.HandleRewardChanges(ctx, actorChanges[builtin.RewardActorCodeID]); err != nil {
|
||||
return xerrors.Errorf("Failed to handle reward changes: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.HandleMessageChanges(ctx, toProcess); err != nil {
|
||||
return xerrors.Errorf("Failed to handle message changes: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.HandleCommonActorsChanges(ctx, actorChanges); err != nil {
|
||||
return xerrors.Errorf("Failed to handle common actor changes: %w", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err := grp.Wait(); err != nil {
|
||||
log.Errorw("Failed to handle actor changes...retrying", "error", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := p.markBlocksProcessed(ctx, toProcess); err != nil {
|
||||
log.Fatalw("Failed to mark blocks as processed", "error", err)
|
||||
}
|
||||
|
||||
if err := p.refreshViews(); err != nil {
|
||||
log.Errorw("Failed to refresh views", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) refreshViews() error {
|
||||
if _, err := p.db.Exec(`refresh materialized view state_heights`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) collectActorChanges(ctx context.Context, toProcess map[cid.Cid]*types.BlockHeader) (map[cid.Cid]ActorTips, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Collected Actor Changes", "duration", time.Since(start).String())
|
||||
}()
|
||||
// ActorCode - > tipset->[]actorInfo
|
||||
out := map[cid.Cid]ActorTips{}
|
||||
var outMu sync.Mutex
|
||||
|
||||
// map of addresses to changed actors
|
||||
var changes map[string]types.Actor
|
||||
actorsSeen := map[cid.Cid]struct{}{}
|
||||
|
||||
// collect all actor state that has changes between block headers
|
||||
paDone := 0
|
||||
parmap.Par(50, parmap.MapArr(toProcess), func(bh *types.BlockHeader) {
|
||||
paDone++
|
||||
if paDone%100 == 0 {
|
||||
log.Debugw("Collecting actor changes", "done", paDone, "percent", (paDone*100)/len(toProcess))
|
||||
}
|
||||
|
||||
pts, err := p.node.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// collect all actors that had state changes between the blockheader parent-state and its grandparent-state.
|
||||
// TODO: changes will contain deleted actors, this causes needless processing further down the pipeline, consider
|
||||
// a separate strategy for deleted actors
|
||||
changes, err = p.node.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// record the state of all actors that have changed
|
||||
for a, act := range changes {
|
||||
act := act
|
||||
a := a
|
||||
|
||||
addr, err := address.NewFromString(a)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ast, err := p.node.StateReadState(ctx, addr, pts.Key())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// TODO look here for an empty state, maybe thats a sign the actor was deleted?
|
||||
|
||||
state, err := json.Marshal(ast.State)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
outMu.Lock()
|
||||
if _, ok := actorsSeen[act.Head]; !ok {
|
||||
_, ok := out[act.Code]
|
||||
if !ok {
|
||||
out[act.Code] = map[types.TipSetKey][]actorInfo{}
|
||||
}
|
||||
out[act.Code][pts.Key()] = append(out[act.Code][pts.Key()], actorInfo{
|
||||
act: act,
|
||||
stateroot: bh.ParentStateRoot,
|
||||
height: bh.Height,
|
||||
tsKey: pts.Key(),
|
||||
parentTsKey: pts.Parents(),
|
||||
addr: addr,
|
||||
state: string(state),
|
||||
})
|
||||
}
|
||||
actorsSeen[act.Head] = struct{}{}
|
||||
outMu.Unlock()
|
||||
}
|
||||
})
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p *Processor) unprocessedBlocks(ctx context.Context, batch int) (map[cid.Cid]*types.BlockHeader, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Gathered Blocks to process", "duration", time.Since(start).String())
|
||||
}()
|
||||
rows, err := p.db.Query(`
|
||||
with toProcess as (
|
||||
select blocks.cid, blocks.height, rank() over (order by height) as rnk
|
||||
from blocks
|
||||
left join blocks_synced bs on blocks.cid = bs.cid
|
||||
where bs.processed_at is null and blocks.height > 0
|
||||
)
|
||||
select cid
|
||||
from toProcess
|
||||
where rnk <= $1
|
||||
`, batch)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to query for unprocessed blocks: %w", err)
|
||||
}
|
||||
out := map[cid.Cid]*types.BlockHeader{}
|
||||
|
||||
// TODO consider parallel execution here for getting the blocks from the api as is done in fetchMessages()
|
||||
for rows.Next() {
|
||||
if rows.Err() != nil {
|
||||
return nil, err
|
||||
}
|
||||
var c string
|
||||
if err := rows.Scan(&c); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to scan unprocessed blocks: %w", err)
|
||||
}
|
||||
ci, err := cid.Parse(c)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to parse unprocessed blocks: %w", err)
|
||||
}
|
||||
bh, err := p.node.ChainGetBlock(ctx, ci)
|
||||
if err != nil {
|
||||
// this is a pretty serious issue.
|
||||
return nil, xerrors.Errorf("Failed to get block header %s: %w", ci.String(), err)
|
||||
}
|
||||
out[ci] = bh
|
||||
}
|
||||
return out, rows.Close()
|
||||
}
|
||||
|
||||
func (p *Processor) markBlocksProcessed(ctx context.Context, processed map[cid.Cid]*types.BlockHeader) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Marked blocks as Processed", "duration", time.Since(start).String())
|
||||
}()
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processedAt := time.Now().Unix()
|
||||
stmt, err := tx.Prepare(`update blocks_synced set processed_at=$1 where cid=$2`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for c := range processed {
|
||||
if _, err := stmt.Exec(processedAt, c.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
231
cmd/lotus-chainwatch/processor/reward.go
Normal file
231
cmd/lotus-chainwatch/processor/reward.go
Normal file
@ -0,0 +1,231 @@
|
||||
package processor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/build"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||
)
|
||||
|
||||
type rewardActorInfo struct {
|
||||
common actorInfo
|
||||
|
||||
// expected power in bytes during this epoch
|
||||
baselinePower big.Int
|
||||
|
||||
// base reward in attofil for each block found during this epoch
|
||||
baseBlockReward big.Int
|
||||
}
|
||||
|
||||
func (p *Processor) setupRewards() error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
/*
|
||||
* captures base block reward per miner per state root and does not
|
||||
* include penalties or gas reward
|
||||
*/
|
||||
create table if not exists base_block_rewards
|
||||
(
|
||||
state_root text not null
|
||||
constraint block_rewards_pk
|
||||
primary key,
|
||||
base_block_reward numeric not null
|
||||
);
|
||||
|
||||
/* captures chain-specific power state for any given stateroot */
|
||||
create table if not exists chain_power
|
||||
(
|
||||
state_root text not null
|
||||
constraint chain_power_pk
|
||||
primary key,
|
||||
baseline_power text not null
|
||||
);
|
||||
|
||||
create materialized view if not exists top_miners_by_base_reward as
|
||||
with total_rewards_by_miner as (
|
||||
select
|
||||
b.miner,
|
||||
sum(bbr.base_block_reward) as total_reward
|
||||
from blocks b
|
||||
inner join base_block_rewards bbr on b.parentstateroot = bbr.state_root
|
||||
group by 1
|
||||
) select
|
||||
rank() over (order by total_reward desc),
|
||||
miner,
|
||||
total_reward
|
||||
from total_rewards_by_miner
|
||||
group by 2, 3;
|
||||
|
||||
create index if not exists top_miners_by_base_reward_miner_index
|
||||
on top_miners_by_base_reward (miner);
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
|
||||
}
|
||||
|
||||
func (p *Processor) HandleRewardChanges(ctx context.Context, rewardTips ActorTips) error {
|
||||
rewardChanges, err := p.processRewardActors(ctx, rewardTips)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to process reward actors", "error", err)
|
||||
}
|
||||
|
||||
if err := p.persistRewardActors(ctx, rewardChanges); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) processRewardActors(ctx context.Context, rewardTips ActorTips) ([]rewardActorInfo, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Processed Reward Actors", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
var out []rewardActorInfo
|
||||
for tipset, rewards := range rewardTips {
|
||||
for _, act := range rewards {
|
||||
var rw rewardActorInfo
|
||||
rw.common = act
|
||||
|
||||
// get reward actor states at each tipset once for all updates
|
||||
rewardActor, err := p.node.StateGetActor(ctx, builtin.RewardActorAddr, tipset)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("get reward state (@ %s): %w", rw.common.stateroot.String(), err)
|
||||
}
|
||||
|
||||
rewardStateRaw, err := p.node.ChainReadObj(ctx, rewardActor.Head)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("read state obj (@ %s): %w", rw.common.stateroot.String(), err)
|
||||
}
|
||||
|
||||
var rewardActorState reward.State
|
||||
if err := rewardActorState.UnmarshalCBOR(bytes.NewReader(rewardStateRaw)); err != nil {
|
||||
return nil, xerrors.Errorf("unmarshal state (@ %s): %w", rw.common.stateroot.String(), err)
|
||||
}
|
||||
|
||||
rw.baseBlockReward = rewardActorState.LastPerEpochReward
|
||||
rw.baselinePower = rewardActorState.BaselinePower
|
||||
out = append(out, rw)
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (p *Processor) persistRewardActors(ctx context.Context, rewards []rewardActorInfo) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
log.Debugw("Persisted Reward Actors", "duration", time.Since(start).String())
|
||||
}()
|
||||
|
||||
grp, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeChainPower(rewards); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
grp.Go(func() error {
|
||||
if err := p.storeBaseBlockReward(rewards); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return grp.Wait()
|
||||
}
|
||||
|
||||
func (p *Processor) storeChainPower(rewards []rewardActorInfo) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin chain_power tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table cp (like chain_power excluding constraints) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep chain_power temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy cp (state_root, baseline_power) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp chain_power: %w", err)
|
||||
}
|
||||
|
||||
for _, rewardState := range rewards {
|
||||
if _, err := stmt.Exec(
|
||||
rewardState.common.stateroot.String(),
|
||||
rewardState.baselinePower.String(),
|
||||
); err != nil {
|
||||
log.Errorw("failed to store chain power", "state_root", rewardState.common.stateroot, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared chain_power: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into chain_power select * from cp on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert chain_power from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit chain_power tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Processor) storeBaseBlockReward(rewards []rewardActorInfo) error {
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin base_block_reward tx: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`create temp table bbr (like base_block_rewards excluding constraints) on commit drop`); err != nil {
|
||||
return xerrors.Errorf("prep base_block_reward temp: %w", err)
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`copy bbr (state_root, base_block_reward) from STDIN`)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("prepare tmp base_block_reward: %w", err)
|
||||
}
|
||||
|
||||
for _, rewardState := range rewards {
|
||||
baseBlockReward := big.Div(rewardState.baseBlockReward, big.NewIntUnsigned(build.BlocksPerEpoch))
|
||||
if _, err := stmt.Exec(
|
||||
rewardState.common.stateroot.String(),
|
||||
baseBlockReward.String(),
|
||||
); err != nil {
|
||||
log.Errorw("failed to store base block reward", "state_root", rewardState.common.stateroot, "error", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return xerrors.Errorf("close prepared base_block_reward: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into base_block_rewards select * from bbr on conflict do nothing`); err != nil {
|
||||
return xerrors.Errorf("insert base_block_reward from tmp: %w", err)
|
||||
}
|
||||
|
||||
if err := tx.Commit(); err != nil {
|
||||
return xerrors.Errorf("commit base_block_reward tx: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
77
cmd/lotus-chainwatch/run.go
Normal file
77
cmd/lotus-chainwatch/run.go
Normal file
@ -0,0 +1,77 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"os"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
|
||||
lcli "github.com/filecoin-project/lotus/cli"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/urfave/cli/v2"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/processor"
|
||||
"github.com/filecoin-project/lotus/cmd/lotus-chainwatch/syncer"
|
||||
)
|
||||
|
||||
var runCmd = &cli.Command{
|
||||
Name: "run",
|
||||
Usage: "Start lotus chainwatch",
|
||||
Flags: []cli.Flag{
|
||||
&cli.IntFlag{
|
||||
Name: "max-batch",
|
||||
Value: 1000,
|
||||
},
|
||||
},
|
||||
Action: func(cctx *cli.Context) error {
|
||||
ll := cctx.String("log-level")
|
||||
if err := logging.SetLogLevel("*", ll); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := logging.SetLogLevel("rpc", "error"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
api, closer, err := lcli.GetFullNodeAPI(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closer()
|
||||
ctx := lcli.ReqContext(cctx)
|
||||
|
||||
v, err := api.Version(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("Remote version: %s", v.Version)
|
||||
|
||||
maxBatch := cctx.Int("max-batch")
|
||||
|
||||
db, err := sql.Open("postgres", cctx.String("db"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); err != nil {
|
||||
log.Errorw("Failed to close database", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := db.Ping(); err != nil {
|
||||
return xerrors.Errorf("Database failed to respond to ping (is it online?): %w", err)
|
||||
}
|
||||
db.SetMaxOpenConns(1350)
|
||||
|
||||
sync := syncer.NewSyncer(db, api)
|
||||
sync.Start(ctx)
|
||||
|
||||
proc := processor.NewProcessor(db, api, maxBatch)
|
||||
proc.Start(ctx)
|
||||
|
||||
<-ctx.Done()
|
||||
os.Exit(0)
|
||||
return nil
|
||||
},
|
||||
}
|
@ -1,61 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Lotus ChainWatch</title>
|
||||
<link rel="stylesheet" type="text/css" href="main.css">
|
||||
</head>
|
||||
<body>
|
||||
{{$cid := param "cid"}}
|
||||
|
||||
<div class="Index">
|
||||
<div class="Index-header">
|
||||
<div>
|
||||
<span>Lotus ChainWatch - Wallets</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="Index-nodes">
|
||||
<div class="Index-node">
|
||||
<div>Miner: {{index (strings "blocks" "miner" "cid=$1" $cid) 0}}</div>
|
||||
<div>Parents:</div>
|
||||
<div>
|
||||
{{range strings "block_parents" "parent" "block=$1" $cid}}
|
||||
{{$parent := .}}
|
||||
<a href="block.html?cid={{$parent}}">{{. | substr 54 62}}</a>
|
||||
{{end}}
|
||||
</div>
|
||||
<div>Messages:</div>
|
||||
<table>
|
||||
{{range strings "block_messages" "message" "block=$1" $cid}}
|
||||
{{$msg := .}}
|
||||
<tr>
|
||||
<td><a href="message.html?cid={{$msg}}">{{$msg | substr 54 62}}</a></td>
|
||||
<td>
|
||||
{{$from := qstr "select \"from\" from messages where cid=$1" $msg}}
|
||||
{{$nonce := qstr "select nonce from messages where cid=$1" $msg}}
|
||||
<a href="key.html?w={{$from}}">{{$from}}</a> (N:{{$nonce}})
|
||||
</td>
|
||||
<td>-></td>
|
||||
<td>
|
||||
{{$to := qstr "select \"to\" from messages where cid=$1" $msg}}
|
||||
<a href="key.html?w={{$to}}">{{$to}}</a>
|
||||
</td>
|
||||
<td>
|
||||
Method:<b>{{qstr "select method from messages where cid=$1" $msg}}</b>
|
||||
</td>
|
||||
{{$rec := qstrs `select r.exit, r.gas_used from messages
|
||||
inner join block_messages bm on messages.cid = bm.message
|
||||
inner join blocks b on bm.block = b.cid
|
||||
inner join block_parents bp on b.cid = bp.parent
|
||||
inner join blocks chd on bp.block = chd.cid
|
||||
inner join receipts r on messages.cid = r.msg and chd.parentStateRoot = r.state
|
||||
where messages.cid=$1 and b.cid=$2` 2 $msg $cid}}
|
||||
<td>exit:<b>{{index $rec 0}}</b></td>
|
||||
<td>gasUsed:<b>{{index $rec 1}}</b></td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,43 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Lotus ChainWatch</title>
|
||||
<link rel="stylesheet" type="text/css" href="main.css">
|
||||
</head>
|
||||
<body>
|
||||
{{$start := param "start" | parseInt}}
|
||||
|
||||
<div class="Index">
|
||||
<div class="Index-header">
|
||||
<div>
|
||||
<span>Lotus ChainWatch - Wallets</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="Index-nodes">
|
||||
<div class="Index-node">
|
||||
<table>
|
||||
{{range pageDown $start 50}}
|
||||
<tr>
|
||||
<td>
|
||||
{{$h := .}}
|
||||
{{$h}};
|
||||
</td>
|
||||
<td>
|
||||
<b>{{qstr `select count(distinct block_messages.message) from block_messages
|
||||
inner join blocks b on block_messages.block = b.cid
|
||||
where b.height = $1` $h}}</b> Msgs
|
||||
</td>
|
||||
<td>
|
||||
{{range strings "blocks" "cid" "height = $1" $h}}
|
||||
<a href="block.html?cid={{.}}">{{. | substr 54 62}}</a>
|
||||
{{end}}
|
||||
</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</table>
|
||||
<a href="blocks.html?start={{sub $start 50}}">Next 50</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,37 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Lotus ChainWatch</title>
|
||||
<link rel="stylesheet" type="text/css" href="main.css">
|
||||
</head>
|
||||
<body>
|
||||
<div class="Index">
|
||||
<div class="Index-header">
|
||||
<div>
|
||||
<span>Lotus ChainWatch</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="Index-nodes">
|
||||
<div class="Index-node">
|
||||
<b>{{countCol "actors" "id"}}</b> Actors;
|
||||
<b>{{countCol "miner_heads" "addr"}}</b> Miners;
|
||||
<b>{{netPower "slashed_at = 0" | sizeStr}}</b> Power
|
||||
(<b>{{netPower "" | sizeStr}}</b> Total;
|
||||
<b>{{netPower "slashed_at > 0" | sizeStr}}</b> Slashed)
|
||||
</div>
|
||||
<div class="Index-node">
|
||||
{{count "messages"}} Messages; {{count "actors"}} state changes
|
||||
</div>
|
||||
<div class="Index-node">
|
||||
{{count "id_address_map" "id != address"}} <a href="keys.html">Keys</a>;
|
||||
E% FIL in wallets; F% FIL in miners; M% in market; %G Other actors; %H FIL it treasury
|
||||
</div>
|
||||
<div class="Index-node">
|
||||
{{$maxH := queryNum "select max(height) from blocks inner join blocks_synced bs on blocks.cid = bs.cid"}}
|
||||
|
||||
{{count "blocks"}} <a href="blocks.html?start={{$maxH}}">Blocks</a>; Current Height: {{$maxH}};
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,40 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Lotus ChainWatch</title>
|
||||
<link rel="stylesheet" type="text/css" href="main.css">
|
||||
</head>
|
||||
<body>
|
||||
{{$wallet := param "w"}}
|
||||
|
||||
<div class="Index">
|
||||
<div class="Index-header">
|
||||
<div>
|
||||
<span>Lotus ChainWatch - Wallet {{$wallet}}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="Index-nodes">
|
||||
<div class="Index-node">
|
||||
Balance: {{queryNum "select balance from actors inner join id_address_map m on m.address = $1 where actors.id = m.id order by nonce desc limit 1" $wallet }}
|
||||
</div>
|
||||
<div class="Index-node">
|
||||
Messages:
|
||||
<table>
|
||||
<tr><td>Dir</td><td>Peer</td><td>Nonce</td><td>Value</td><td>Block</td><td>Mpool Wait</td></tr>
|
||||
{{ range messages "\"from\" = $1 or \"to\" = $1" $wallet}}
|
||||
<tr>
|
||||
{{ if eq .From.String $wallet }}
|
||||
<td>To</td><td><a href="key.html?w={{.To.String}}">{{.To.String}}</a></td>
|
||||
{{else}}
|
||||
<td>From</td><td><a href="key.html?w={{.From.String}}">{{.From.String}}</a></td>
|
||||
{{end}}
|
||||
<td>{{.Nonce}}</td>
|
||||
<td>{{.Value}}</td>
|
||||
</tr>
|
||||
{{end}}
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,28 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Lotus ChainWatch</title>
|
||||
<link rel="stylesheet" type="text/css" href="main.css">
|
||||
</head>
|
||||
<body>
|
||||
<div class="Index">
|
||||
<div class="Index-header">
|
||||
<div>
|
||||
<span>Lotus ChainWatch - Wallets</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="Index-nodes">
|
||||
<div class="Index-node">
|
||||
{{range strings "id_address_map" "address" "address != id"}}
|
||||
{{$addr := .}}
|
||||
<div>
|
||||
<a href="key.html?w={{$addr}}">{{$addr}}</a>
|
||||
<span><b>{{qstr "select count(distinct cid) from messages where \"from\"=$1" $addr}}</b> outmsgs;</span>
|
||||
<span><b>{{qstr "select count(distinct cid) from messages where \"to\"=$1" $addr}}</b> inmsgs</span>
|
||||
</div>
|
||||
{{end}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,66 +0,0 @@
|
||||
body {
|
||||
font-family: 'monospace';
|
||||
background: #1f1f1f;
|
||||
color: #f0f0f0;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
b {
|
||||
color: #aff;
|
||||
}
|
||||
|
||||
.Index {
|
||||
width: 100vw;
|
||||
height: 100vh;
|
||||
background: #1a1a1a;
|
||||
color: #f0f0f0;
|
||||
font-family: monospace;
|
||||
overflow: auto;
|
||||
|
||||
display: grid;
|
||||
grid-template-columns: auto 80vw auto;
|
||||
grid-template-rows: 3em auto auto auto;
|
||||
grid-template-areas:
|
||||
"header header header header"
|
||||
". . . ."
|
||||
". main main ."
|
||||
". main main ."
|
||||
". main main ."
|
||||
". main main ."
|
||||
". main main ."
|
||||
". . . .";
|
||||
}
|
||||
|
||||
.Index-header {
|
||||
background: #2a2a2a;
|
||||
grid-area: header;
|
||||
}
|
||||
|
||||
.Index-Index-header > div {
|
||||
padding-left: 0.7em;
|
||||
padding-top: 0.7em;
|
||||
}
|
||||
|
||||
.Index-nodes {
|
||||
grid-area: main;
|
||||
background: #2a2a2a;
|
||||
}
|
||||
|
||||
.Index-node {
|
||||
margin: 5px;
|
||||
padding: 15px;
|
||||
background: #1f1f1f;
|
||||
}
|
||||
|
||||
a:link {
|
||||
color: #50f020;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #50f020;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #30a00a;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,748 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"container/list"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/power"
|
||||
"github.com/filecoin-project/specs-actors/actors/builtin/reward"
|
||||
"github.com/filecoin-project/specs-actors/actors/util/adt"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
parmap "github.com/filecoin-project/lotus/lib/parmap"
|
||||
)
|
||||
|
||||
func runSyncer(ctx context.Context, api api.FullNode, st *storage, maxBatch int) {
|
||||
notifs, err := api.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
go func() {
|
||||
for notif := range notifs {
|
||||
for _, change := range notif {
|
||||
switch change.Type {
|
||||
case store.HCCurrent:
|
||||
fallthrough
|
||||
case store.HCApply:
|
||||
syncHead(ctx, api, st, change.Val, maxBatch)
|
||||
case store.HCRevert:
|
||||
log.Warnf("revert todo")
|
||||
}
|
||||
|
||||
if change.Type == store.HCCurrent {
|
||||
go subMpool(ctx, api, st)
|
||||
go subBlocks(ctx, api, st)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type rewardStateInfo struct {
|
||||
stateroot cid.Cid
|
||||
baselinePower big.Int
|
||||
}
|
||||
|
||||
type minerStateInfo struct {
|
||||
// common
|
||||
addr address.Address
|
||||
act types.Actor
|
||||
stateroot cid.Cid
|
||||
|
||||
// calculating changes
|
||||
tsKey types.TipSetKey
|
||||
parentTsKey types.TipSetKey
|
||||
|
||||
// miner specific
|
||||
state miner.State
|
||||
info *miner.MinerInfo
|
||||
|
||||
// tracked by power actor
|
||||
rawPower big.Int
|
||||
qalPower big.Int
|
||||
ssize uint64
|
||||
psize uint64
|
||||
}
|
||||
|
||||
type marketStateInfo struct {
|
||||
// common
|
||||
act types.Actor
|
||||
stateroot cid.Cid
|
||||
|
||||
// calculating changes
|
||||
// calculating changes
|
||||
tsKey types.TipSetKey
|
||||
parentTsKey types.TipSetKey
|
||||
|
||||
// market actor specific
|
||||
state market.State
|
||||
}
|
||||
|
||||
type actorInfo struct {
|
||||
stateroot cid.Cid
|
||||
tsKey types.TipSetKey
|
||||
parentTsKey types.TipSetKey
|
||||
state string
|
||||
}
|
||||
|
||||
type tipsetKeyHeight struct {
|
||||
height abi.ChainEpoch
|
||||
tsKey types.TipSetKey
|
||||
}
|
||||
|
||||
func syncHead(ctx context.Context, api api.FullNode, st *storage, headTs *types.TipSet, maxBatch int) {
|
||||
var alk sync.Mutex
|
||||
|
||||
log.Infof("Getting synced block list")
|
||||
|
||||
hazlist := st.hasList()
|
||||
|
||||
log.Infof("Getting headers / actors")
|
||||
|
||||
// global list of all blocks that need to be synced
|
||||
allToSync := map[cid.Cid]*types.BlockHeader{}
|
||||
// a stack
|
||||
toVisit := list.New()
|
||||
|
||||
for _, header := range headTs.Blocks() {
|
||||
toVisit.PushBack(header)
|
||||
}
|
||||
|
||||
// TODO consider making a db query to check where syncing left off at in the case of a restart and avoid reprocessing
|
||||
// those entries, or write value to file on shutdown
|
||||
// walk the entire chain starting from headTS
|
||||
for toVisit.Len() > 0 {
|
||||
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
|
||||
_, has := hazlist[bh.Cid()]
|
||||
if _, seen := allToSync[bh.Cid()]; seen || has {
|
||||
continue
|
||||
}
|
||||
|
||||
allToSync[bh.Cid()] = bh
|
||||
if len(allToSync)%500 == 10 {
|
||||
log.Debugf("to visit: (%d) %s @%d", len(allToSync), bh.Cid(), bh.Height)
|
||||
}
|
||||
|
||||
if len(bh.Parents) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
pts, err := api.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, header := range pts.Blocks() {
|
||||
toVisit.PushBack(header)
|
||||
}
|
||||
}
|
||||
|
||||
// Main worker loop, this loop runs until all tipse from headTS to genesis have been processed.
|
||||
for len(allToSync) > 0 {
|
||||
// first map is addresses -> common actors states (head, code, balance, nonce)
|
||||
// second map common actor states -> chain state (tipset, stateroot) & unique actor state (deserialization of their head CID) represented as json.
|
||||
actors := map[address.Address]map[types.Actor]actorInfo{}
|
||||
|
||||
// map of actor public key address to ID address
|
||||
addressToID := map[address.Address]address.Address{}
|
||||
minH := abi.ChainEpoch(math.MaxInt64)
|
||||
|
||||
// find the blockheader with the lowest height
|
||||
for _, header := range allToSync {
|
||||
if header.Height < minH {
|
||||
minH = header.Height
|
||||
}
|
||||
}
|
||||
|
||||
// toSync maps block cids to their headers and contains all block headers that will be synced in this batch
|
||||
// `maxBatch` is a tunable parameter to control how many blocks we sync per iteration.
|
||||
toSync := map[cid.Cid]*types.BlockHeader{}
|
||||
for c, header := range allToSync {
|
||||
if header.Height < minH+abi.ChainEpoch(maxBatch) {
|
||||
toSync[c] = header
|
||||
addressToID[header.Miner] = address.Undef
|
||||
}
|
||||
}
|
||||
// remove everything we are syncing this round from the global list of blocks to sync
|
||||
for c := range toSync {
|
||||
delete(allToSync, c)
|
||||
}
|
||||
|
||||
log.Infow("Starting Sync", "height", minH, "numBlocks", len(toSync), "maxBatch", maxBatch)
|
||||
|
||||
// relate tipset keys to height so they may be processed in ascending order.
|
||||
var tipHeights []tipsetKeyHeight
|
||||
tipsSeen := make(map[types.TipSetKey]struct{})
|
||||
// map of addresses to changed actors
|
||||
var changes map[string]types.Actor
|
||||
// collect all actor state that has changes between block headers
|
||||
paDone := 0
|
||||
parmap.Par(50, parmap.MapArr(toSync), func(bh *types.BlockHeader) {
|
||||
paDone++
|
||||
if paDone%100 == 0 {
|
||||
log.Infof("pa: %d %d%%", paDone, (paDone*100)/len(toSync))
|
||||
}
|
||||
|
||||
if len(bh.Parents) == 0 { // genesis case
|
||||
genesisTs, _ := types.NewTipSet([]*types.BlockHeader{bh})
|
||||
st.genesisTs = genesisTs
|
||||
|
||||
aadrs, err := api.StateListActors(ctx, genesisTs.Key())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO suspicious there is not a lot to be gained by doing this in parallel since the genesis state
|
||||
// is unlikely to contain a lot of actors, why not for loop here?
|
||||
parmap.Par(50, aadrs, func(addr address.Address) {
|
||||
act, err := api.StateGetActor(ctx, addr, genesisTs.Key())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ast, err := api.StateReadState(ctx, addr, genesisTs.Key())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
state, err := json.Marshal(ast.State)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
alk.Lock()
|
||||
_, ok := actors[addr]
|
||||
if !ok {
|
||||
actors[addr] = map[types.Actor]actorInfo{}
|
||||
}
|
||||
actors[addr][*act] = actorInfo{
|
||||
stateroot: bh.ParentStateRoot,
|
||||
tsKey: genesisTs.Key(),
|
||||
parentTsKey: genesisTs.Key(),
|
||||
state: string(state),
|
||||
}
|
||||
addressToID[addr] = address.Undef
|
||||
alk.Unlock()
|
||||
})
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
pts, err := api.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// TODO Does this return actors that have been deleted between states?
|
||||
// collect all actors that had state changes between the blockheader parent-state and its grandparent-state.
|
||||
changes, err = api.StateChangedActors(ctx, pts.ParentState(), bh.ParentStateRoot)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// record the state of all actors that have changed
|
||||
for a, act := range changes {
|
||||
act := act
|
||||
|
||||
addr, err := address.NewFromString(a)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ast, err := api.StateReadState(ctx, addr, pts.Key())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
state, err := json.Marshal(ast.State)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
alk.Lock()
|
||||
_, ok := actors[addr]
|
||||
if !ok {
|
||||
actors[addr] = map[types.Actor]actorInfo{}
|
||||
}
|
||||
// a change occurred for the actor with address `addr` and state `act` at tipset `pts`.
|
||||
actors[addr][act] = actorInfo{
|
||||
stateroot: bh.ParentStateRoot,
|
||||
state: string(state),
|
||||
tsKey: pts.Key(),
|
||||
parentTsKey: pts.Parents(),
|
||||
}
|
||||
addressToID[addr] = address.Undef
|
||||
if _, ok := tipsSeen[pts.Key()]; !ok {
|
||||
tipHeights = append(tipHeights, tipsetKeyHeight{
|
||||
height: pts.Height(),
|
||||
tsKey: pts.Key(),
|
||||
})
|
||||
}
|
||||
tipsSeen[pts.Key()] = struct{}{}
|
||||
alk.Unlock()
|
||||
}
|
||||
})
|
||||
// sort tipHeights in ascending order.
|
||||
sort.Slice(tipHeights, func(i, j int) bool {
|
||||
return tipHeights[i].height < tipHeights[j].height
|
||||
})
|
||||
|
||||
// map of tipset to reward state
|
||||
rewardTips := make(map[types.TipSetKey]*rewardStateInfo, len(changes))
|
||||
// map of tipset to all miners that had a head-change at that tipset.
|
||||
minerTips := make(map[types.TipSetKey][]*minerStateInfo, len(changes))
|
||||
// heads we've seen, im being paranoid
|
||||
headsSeen := make(map[cid.Cid]struct{}, len(actors))
|
||||
|
||||
log.Infof("Getting messages")
|
||||
|
||||
msgs, incls := fetchMessages(ctx, api, toSync)
|
||||
|
||||
log.Infof("Resolving addresses")
|
||||
|
||||
for _, message := range msgs {
|
||||
addressToID[message.To] = address.Undef
|
||||
addressToID[message.From] = address.Undef
|
||||
}
|
||||
|
||||
parmap.Par(50, parmap.KMapArr(addressToID), func(addr address.Address) {
|
||||
// FIXME: cannot use EmptyTSK here since actorID's can change during reorgs, need to use the corresponding tipset.
|
||||
// TODO: figure out a way to get the corresponding tipset...
|
||||
raddr, err := api.StateLookupID(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return
|
||||
}
|
||||
alk.Lock()
|
||||
addressToID[addr] = raddr
|
||||
alk.Unlock()
|
||||
})
|
||||
|
||||
log.Infof("Getting actor change info")
|
||||
|
||||
// highly likely that the market actor will change at every epoch
|
||||
marketActorChanges := make(map[types.TipSetKey]*marketStateInfo, len(changes))
|
||||
|
||||
minerChanges := 0
|
||||
for addr, m := range actors {
|
||||
for actor, c := range m {
|
||||
// only want actors with head change events
|
||||
if _, found := headsSeen[actor.Head]; found {
|
||||
continue
|
||||
}
|
||||
headsSeen[actor.Head] = struct{}{}
|
||||
|
||||
switch actor.Code {
|
||||
case builtin.StorageMarketActorCodeID:
|
||||
marketActorChanges[c.tsKey] = &marketStateInfo{
|
||||
act: actor,
|
||||
stateroot: c.stateroot,
|
||||
tsKey: c.tsKey,
|
||||
parentTsKey: c.parentTsKey,
|
||||
state: market.State{},
|
||||
}
|
||||
case builtin.StorageMinerActorCodeID:
|
||||
minerChanges++
|
||||
|
||||
minerTips[c.tsKey] = append(minerTips[c.tsKey], &minerStateInfo{
|
||||
addr: addr,
|
||||
act: actor,
|
||||
stateroot: c.stateroot,
|
||||
|
||||
tsKey: c.tsKey,
|
||||
parentTsKey: c.parentTsKey,
|
||||
|
||||
state: miner.State{},
|
||||
info: nil,
|
||||
|
||||
rawPower: big.Zero(),
|
||||
qalPower: big.Zero(),
|
||||
})
|
||||
// reward actor
|
||||
case builtin.RewardActorCodeID:
|
||||
rewardTips[c.tsKey] = &rewardStateInfo{
|
||||
stateroot: c.stateroot,
|
||||
baselinePower: big.Zero(),
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
rewardProcessingStartedAt := time.Now()
|
||||
parmap.Par(50, parmap.KVMapArr(rewardTips), func(it func() (types.TipSetKey, *rewardStateInfo)) {
|
||||
tsKey, rewardInfo := it()
|
||||
// get reward actor states at each tipset once for all updates
|
||||
rewardActor, err := api.StateGetActor(ctx, builtin.RewardActorAddr, tsKey)
|
||||
if err != nil {
|
||||
log.Error(xerrors.Errorf("get reward state (@ %s): %w", rewardInfo.stateroot.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
rewardStateRaw, err := api.ChainReadObj(ctx, rewardActor.Head)
|
||||
if err != nil {
|
||||
log.Error(xerrors.Errorf("read state obj (@ %s): %w", rewardInfo.stateroot.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
var rewardActorState reward.State
|
||||
if err := rewardActorState.UnmarshalCBOR(bytes.NewReader(rewardStateRaw)); err != nil {
|
||||
log.Error(xerrors.Errorf("unmarshal state (@ %s): %w", rewardInfo.stateroot.String(), err))
|
||||
return
|
||||
}
|
||||
|
||||
panic("TODO")
|
||||
//rewardInfo.baselinePower = rewardActorState.BaselinePower
|
||||
})
|
||||
log.Infow("Completed Reward Processing", "duration", time.Since(rewardProcessingStartedAt).String(), "processed", len(rewardTips))
|
||||
|
||||
minerProcessingStartedAt := time.Now()
|
||||
log.Infow("Processing miners", "numTips", len(minerTips), "numMinerChanges", minerChanges)
|
||||
// extract the power actor state at each tipset, loop over all miners that changed at said tipset and extract their
|
||||
// claims from the power actor state. This ensures we only fetch the power actors state once for each tipset.
|
||||
parmap.Par(50, parmap.KVMapArr(minerTips), func(it func() (types.TipSetKey, []*minerStateInfo)) {
|
||||
tsKey, minerInfo := it()
|
||||
|
||||
// get the power actors claims map
|
||||
mp, err := getPowerActorClaimsMap(ctx, api, tsKey)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
// Get miner raw and quality power
|
||||
for _, mi := range minerInfo {
|
||||
var claim power.Claim
|
||||
// get miner claim from power actors claim map and store if found, else the miner had no claim at
|
||||
// this tipset
|
||||
found, err := mp.Get(adt.AddrKey(mi.addr), &claim)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
if found {
|
||||
mi.qalPower = claim.QualityAdjPower
|
||||
mi.rawPower = claim.RawBytePower
|
||||
}
|
||||
|
||||
// Get the miner state info
|
||||
astb, err := api.ChainReadObj(ctx, mi.act.Head)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
if err := mi.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
mi.info, err = mi.state.GetInfo(&apiIpldStore{ctx, api})
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// TODO Get the Sector Count
|
||||
// FIXME this is returning a lot of "address not found" errors, which is strange given that StateChangedActors
|
||||
// retruns all actors that had a state change at tipset `k.tsKey`, maybe its returning deleted miners too??
|
||||
/*
|
||||
sszs, err := api.StateMinerSectorCount(ctx, k.addr, k.tsKey)
|
||||
if err != nil {
|
||||
info.psize = 0
|
||||
info.ssize = 0
|
||||
} else {
|
||||
info.psize = sszs.Pset
|
||||
info.ssize = sszs.Sset
|
||||
}
|
||||
*/
|
||||
})
|
||||
log.Infow("Completed Miner Processing", "duration", time.Since(minerProcessingStartedAt).String(), "processed", minerChanges)
|
||||
|
||||
log.Info("Getting market actor info")
|
||||
// TODO: consider taking the min of the array length and using that for concurrency param, e.g:
|
||||
// concurrency := math.Min(len(marketActorChanges), 50)
|
||||
parmap.Par(50, parmap.MapArr(marketActorChanges), func(mrktInfo *marketStateInfo) {
|
||||
astb, err := api.ChainReadObj(ctx, mrktInfo.act.Head)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
if err := mrktInfo.state.UnmarshalCBOR(bytes.NewReader(astb)); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
})
|
||||
|
||||
log.Info("Getting receipts")
|
||||
|
||||
receipts := fetchParentReceipts(ctx, api, toSync)
|
||||
|
||||
log.Info("Storing headers")
|
||||
|
||||
if err := st.storeHeaders(toSync, true); err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Storing address mapping")
|
||||
|
||||
if err := st.storeAddressMap(addressToID); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Storing actors")
|
||||
if err := st.storeActors(actors); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
chainPowerStartedAt := time.Now()
|
||||
if err := st.storeChainPower(rewardTips); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Infow("Stored chain power", "duration", time.Since(chainPowerStartedAt).String())
|
||||
|
||||
log.Info("Storing miners")
|
||||
if err := st.storeMiners(minerTips); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
minerPowerStartedAt := time.Now()
|
||||
if err := st.storeMinerPower(minerTips); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
log.Infow("Stored miner power", "duration", time.Since(minerPowerStartedAt).String())
|
||||
|
||||
sectorStart := time.Now()
|
||||
if err := st.storeSectors(minerTips, api); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
log.Infow("Stored miner sectors", "duration", time.Since(sectorStart).String())
|
||||
|
||||
log.Info("Storing miner sectors heads")
|
||||
if err := st.storeMinerSectorsHeads(minerTips, api); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("updating miner sectors heads")
|
||||
if err := st.updateMinerSectors(minerTips, api); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Storing market actor deal proposal info")
|
||||
if err := st.storeMarketActorDealProposals(marketActorChanges, tipHeights, api); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Storing market actor deal state info")
|
||||
if err := st.storeMarketActorDealStates(marketActorChanges, tipHeights, api); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Updating market actor deal proposal info")
|
||||
if err := st.updateMarketActorDealProposals(marketActorChanges, tipHeights, api); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Storing messages")
|
||||
|
||||
if err := st.storeMessages(msgs); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("Storing message inclusions")
|
||||
|
||||
if err := st.storeMsgInclusions(incls); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Storing parent receipts")
|
||||
|
||||
if err := st.storeReceipts(receipts); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
log.Infof("Sync stage done")
|
||||
}
|
||||
|
||||
log.Infof("Get deals")
|
||||
|
||||
// TODO: incremental, gather expired
|
||||
deals, err := api.StateMarketDeals(ctx, headTs.Key())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Store deals")
|
||||
|
||||
if err := st.storeDeals(deals); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Refresh views")
|
||||
|
||||
if err := st.refreshViews(); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Sync done")
|
||||
}
|
||||
|
||||
func fetchMessages(ctx context.Context, api api.FullNode, toSync map[cid.Cid]*types.BlockHeader) (map[cid.Cid]*types.Message, map[cid.Cid][]cid.Cid) {
|
||||
var lk sync.Mutex
|
||||
messages := map[cid.Cid]*types.Message{}
|
||||
inclusions := map[cid.Cid][]cid.Cid{} // block -> msgs
|
||||
|
||||
parmap.Par(50, parmap.MapArr(toSync), func(header *types.BlockHeader) {
|
||||
msgs, err := api.ChainGetBlockMessages(ctx, header.Cid())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
vmm := make([]*types.Message, 0, len(msgs.Cids))
|
||||
for _, m := range msgs.BlsMessages {
|
||||
vmm = append(vmm, m)
|
||||
}
|
||||
|
||||
for _, m := range msgs.SecpkMessages {
|
||||
vmm = append(vmm, &m.Message)
|
||||
}
|
||||
|
||||
lk.Lock()
|
||||
for _, message := range vmm {
|
||||
messages[message.Cid()] = message
|
||||
inclusions[header.Cid()] = append(inclusions[header.Cid()], message.Cid())
|
||||
}
|
||||
lk.Unlock()
|
||||
})
|
||||
|
||||
return messages, inclusions
|
||||
}
|
||||
|
||||
type mrec struct {
|
||||
msg cid.Cid
|
||||
state cid.Cid
|
||||
idx int
|
||||
}
|
||||
|
||||
func fetchParentReceipts(ctx context.Context, api api.FullNode, toSync map[cid.Cid]*types.BlockHeader) map[mrec]*types.MessageReceipt {
|
||||
var lk sync.Mutex
|
||||
out := map[mrec]*types.MessageReceipt{}
|
||||
|
||||
parmap.Par(50, parmap.MapArr(toSync), func(header *types.BlockHeader) {
|
||||
recs, err := api.ChainGetParentReceipts(ctx, header.Cid())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
msgs, err := api.ChainGetParentMessages(ctx, header.Cid())
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
lk.Lock()
|
||||
for i, r := range recs {
|
||||
out[mrec{
|
||||
msg: msgs[i].Cid,
|
||||
state: header.ParentStateRoot,
|
||||
idx: i,
|
||||
}] = r
|
||||
}
|
||||
lk.Unlock()
|
||||
})
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// load the power actor state clam as an adt.Map at the tipset `ts`.
|
||||
func getPowerActorClaimsMap(ctx context.Context, api api.FullNode, ts types.TipSetKey) (*adt.Map, error) {
|
||||
powerActor, err := api.StateGetActor(ctx, builtin.StoragePowerActorAddr, ts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
powerRaw, err := api.ChainReadObj(ctx, powerActor.Head)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var powerActorState power.State
|
||||
if err := powerActorState.UnmarshalCBOR(bytes.NewReader(powerRaw)); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal power actor state: %w", err)
|
||||
}
|
||||
|
||||
s := &apiIpldStore{ctx, api}
|
||||
return adt.AsMap(s, powerActorState.Claims)
|
||||
}
|
||||
|
||||
// require for AMT and HAMT access
|
||||
// TODO extract this to a common location in lotus and reuse the code
|
||||
type apiIpldStore struct {
|
||||
ctx context.Context
|
||||
api api.FullNode
|
||||
}
|
||||
|
||||
func (ht *apiIpldStore) Context() context.Context {
|
||||
return ht.ctx
|
||||
}
|
||||
|
||||
func (ht *apiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
|
||||
raw, err := ht.api.ChainReadObj(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cu, ok := out.(cbg.CBORUnmarshaler)
|
||||
if ok {
|
||||
if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Object does not implement CBORUnmarshaler: %T", out)
|
||||
}
|
||||
|
||||
func (ht *apiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
|
||||
return cid.Undef, fmt.Errorf("Put is not implemented on apiIpldStore")
|
||||
}
|
@ -1,25 +1,24 @@
|
||||
package main
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/ipfs/go-cid"
|
||||
|
||||
aapi "github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
func subBlocks(ctx context.Context, api aapi.FullNode, st *storage) {
|
||||
sub, err := api.SyncIncomingBlocks(ctx)
|
||||
func (s *Syncer) subBlocks(ctx context.Context) {
|
||||
sub, err := s.node.SyncIncomingBlocks(ctx)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
for bh := range sub {
|
||||
err := st.storeHeaders(map[cid.Cid]*types.BlockHeader{
|
||||
err := s.storeHeaders(map[cid.Cid]*types.BlockHeader{
|
||||
bh.Cid(): bh,
|
||||
}, false)
|
||||
}, false, time.Now())
|
||||
if err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
}
|
446
cmd/lotus-chainwatch/syncer/sync.go
Normal file
446
cmd/lotus-chainwatch/syncer/sync.go
Normal file
@ -0,0 +1,446 @@
|
||||
package syncer
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"database/sql"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
var log = logging.Logger("syncer")
|
||||
|
||||
type Syncer struct {
|
||||
db *sql.DB
|
||||
|
||||
headerLk sync.Mutex
|
||||
node api.FullNode
|
||||
}
|
||||
|
||||
func NewSyncer(db *sql.DB, node api.FullNode) *Syncer {
|
||||
return &Syncer{
|
||||
db: db,
|
||||
node: node,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Syncer) setupSchemas() error {
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
create table if not exists block_cids
|
||||
(
|
||||
cid text not null
|
||||
constraint block_cids_pk
|
||||
primary key
|
||||
);
|
||||
|
||||
create unique index if not exists block_cids_cid_uindex
|
||||
on block_cids (cid);
|
||||
|
||||
create table if not exists blocks_synced
|
||||
(
|
||||
cid text not null
|
||||
constraint blocks_synced_pk
|
||||
primary key
|
||||
constraint blocks_block_cids_cid_fk
|
||||
references block_cids (cid),
|
||||
synced_at int not null,
|
||||
processed_at int
|
||||
);
|
||||
|
||||
create unique index if not exists blocks_synced_cid_uindex
|
||||
on blocks_synced (cid,processed_at);
|
||||
|
||||
create table if not exists block_parents
|
||||
(
|
||||
block text not null
|
||||
constraint blocks_block_cids_cid_fk
|
||||
references block_cids (cid),
|
||||
parent text not null
|
||||
);
|
||||
|
||||
create unique index if not exists block_parents_block_parent_uindex
|
||||
on block_parents (block, parent);
|
||||
|
||||
create table if not exists drand_entries
|
||||
(
|
||||
round bigint not null
|
||||
constraint drand_entries_pk
|
||||
primary key,
|
||||
data bytea not null
|
||||
);
|
||||
create unique index if not exists drand_entries_round_uindex
|
||||
on drand_entries (round);
|
||||
|
||||
create table if not exists block_drand_entries
|
||||
(
|
||||
round bigint not null
|
||||
constraint block_drand_entries_drand_entries_round_fk
|
||||
references drand_entries (round),
|
||||
block text not null
|
||||
constraint blocks_block_cids_cid_fk
|
||||
references block_cids (cid)
|
||||
);
|
||||
create unique index if not exists block_drand_entries_round_uindex
|
||||
on block_drand_entries (round, block);
|
||||
|
||||
create table if not exists blocks
|
||||
(
|
||||
cid text not null
|
||||
constraint blocks_pk
|
||||
primary key
|
||||
constraint blocks_block_cids_cid_fk
|
||||
references block_cids (cid),
|
||||
parentWeight numeric not null,
|
||||
parentStateRoot text not null,
|
||||
height bigint not null,
|
||||
miner text not null,
|
||||
timestamp bigint not null,
|
||||
ticket bytea not null,
|
||||
eprof bytea,
|
||||
forksig bigint not null
|
||||
);
|
||||
|
||||
create unique index if not exists block_cid_uindex
|
||||
on blocks (cid,height);
|
||||
|
||||
create materialized view if not exists state_heights
|
||||
as select distinct height, parentstateroot from blocks;
|
||||
|
||||
create index if not exists state_heights_height_index
|
||||
on state_heights (height);
|
||||
|
||||
create index if not exists state_heights_parentstateroot_index
|
||||
on state_heights (parentstateroot);
|
||||
`); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (s *Syncer) Start(ctx context.Context) {
|
||||
log.Debug("Starting Syncer")
|
||||
|
||||
if err := s.setupSchemas(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// doing the initial sync here lets us avoid the HCCurrent case in the switch
|
||||
head, err := s.node.ChainHead(ctx)
|
||||
if err != nil {
|
||||
log.Fatalw("Failed to get chain head form lotus", "error", err)
|
||||
}
|
||||
|
||||
unsynced, err := s.unsyncedBlocks(ctx, head, time.Unix(0, 0))
|
||||
if err != nil {
|
||||
log.Fatalw("failed to gather unsynced blocks", "error", err)
|
||||
}
|
||||
|
||||
if err := s.storeHeaders(unsynced, true, time.Now()); err != nil {
|
||||
log.Fatalw("failed to store unsynced blocks", "error", err)
|
||||
}
|
||||
|
||||
// continue to keep the block headers table up to date.
|
||||
notifs, err := s.node.ChainNotify(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
lastSynced := time.Now()
|
||||
go func() {
|
||||
for notif := range notifs {
|
||||
for _, change := range notif {
|
||||
switch change.Type {
|
||||
case store.HCApply:
|
||||
unsynced, err := s.unsyncedBlocks(ctx, change.Val, lastSynced)
|
||||
if err != nil {
|
||||
log.Errorw("failed to gather unsynced blocks", "error", err)
|
||||
}
|
||||
|
||||
if len(unsynced) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := s.storeHeaders(unsynced, true, lastSynced); err != nil {
|
||||
// so this is pretty bad, need some kind of retry..
|
||||
// for now just log an error and the blocks will be attempted again on next notifi
|
||||
log.Errorw("failed to store unsynced blocks", "error", err)
|
||||
}
|
||||
|
||||
lastSynced = time.Now()
|
||||
case store.HCRevert:
|
||||
log.Debug("revert todo")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Syncer) unsyncedBlocks(ctx context.Context, head *types.TipSet, since time.Time) (map[cid.Cid]*types.BlockHeader, error) {
|
||||
// get a list of blocks we have already synced in the past 3 mins. This ensures we aren't returning the entire
|
||||
// table every time.
|
||||
lookback := since.Add(-(time.Minute * 3))
|
||||
log.Debugw("Gathering unsynced blocks", "since", lookback.String())
|
||||
hasList, err := s.syncedBlocks(lookback)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// build a list of blocks that we have not synced.
|
||||
toVisit := list.New()
|
||||
for _, header := range head.Blocks() {
|
||||
toVisit.PushBack(header)
|
||||
}
|
||||
|
||||
toSync := map[cid.Cid]*types.BlockHeader{}
|
||||
|
||||
for toVisit.Len() > 0 {
|
||||
bh := toVisit.Remove(toVisit.Back()).(*types.BlockHeader)
|
||||
_, has := hasList[bh.Cid()]
|
||||
if _, seen := toSync[bh.Cid()]; seen || has {
|
||||
continue
|
||||
}
|
||||
|
||||
toSync[bh.Cid()] = bh
|
||||
if len(toSync)%500 == 10 {
|
||||
log.Debugw("To visit", "toVisit", toVisit.Len(), "toSync", len(toSync), "current_height", bh.Height)
|
||||
}
|
||||
|
||||
if len(bh.Parents) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
pts, err := s.node.ChainGetTipSet(ctx, types.NewTipSetKey(bh.Parents...))
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, header := range pts.Blocks() {
|
||||
toVisit.PushBack(header)
|
||||
}
|
||||
}
|
||||
log.Debugw("Gathered unsynced blocks", "count", len(toSync))
|
||||
return toSync, nil
|
||||
}
|
||||
|
||||
func (s *Syncer) syncedBlocks(timestamp time.Time) (map[cid.Cid]struct{}, error) {
|
||||
// timestamp is used to return a configurable amount of rows based on when they were last added.
|
||||
rws, err := s.db.Query(`select cid FROM blocks_synced where synced_at > $1`, timestamp.Unix())
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to query blocks_synced: %w", err)
|
||||
}
|
||||
out := map[cid.Cid]struct{}{}
|
||||
|
||||
for rws.Next() {
|
||||
var c string
|
||||
if err := rws.Scan(&c); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to scan blocks_synced: %w", err)
|
||||
}
|
||||
|
||||
ci, err := cid.Parse(c)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to parse blocks_synced: %w", err)
|
||||
}
|
||||
|
||||
out[ci] = struct{}{}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *Syncer) storeHeaders(bhs map[cid.Cid]*types.BlockHeader, sync bool, timestamp time.Time) error {
|
||||
s.headerLk.Lock()
|
||||
defer s.headerLk.Unlock()
|
||||
if len(bhs) == 0 {
|
||||
return nil
|
||||
}
|
||||
log.Debugw("Storing Headers", "count", len(bhs))
|
||||
|
||||
tx, err := s.db.Begin()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`
|
||||
|
||||
create temp table bc (like block_cids excluding constraints) on commit drop;
|
||||
create temp table de (like drand_entries excluding constraints) on commit drop;
|
||||
create temp table bde (like block_drand_entries excluding constraints) on commit drop;
|
||||
create temp table tbp (like block_parents excluding constraints) on commit drop;
|
||||
create temp table bs (like blocks_synced excluding constraints) on commit drop;
|
||||
create temp table b (like blocks excluding constraints) on commit drop;
|
||||
|
||||
|
||||
`); err != nil {
|
||||
return xerrors.Errorf("prep temp: %w", err)
|
||||
}
|
||||
|
||||
{
|
||||
stmt, err := tx.Prepare(`copy bc (cid) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bh := range bhs {
|
||||
if _, err := stmt.Exec(bh.Cid().String()); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into block_cids select * from bc on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("drand entries put: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
stmt, err := tx.Prepare(`copy de (round, data) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bh := range bhs {
|
||||
for _, ent := range bh.BeaconEntries {
|
||||
if _, err := stmt.Exec(ent.Round, ent.Data); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into drand_entries select * from de on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("drand entries put: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
stmt, err := tx.Prepare(`copy bde (round, block) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bh := range bhs {
|
||||
for _, ent := range bh.BeaconEntries {
|
||||
if _, err := stmt.Exec(ent.Round, bh.Cid().String()); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into block_drand_entries select * from bde on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("block drand entries put: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
stmt, err := tx.Prepare(`copy tbp (block, parent) from STDIN`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bh := range bhs {
|
||||
for _, parent := range bh.Parents {
|
||||
if _, err := stmt.Exec(bh.Cid().String(), parent.String()); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into block_parents select * from tbp on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("parent put: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if sync {
|
||||
|
||||
stmt, err := tx.Prepare(`copy bs (cid, synced_at) from stdin `)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bh := range bhs {
|
||||
if _, err := stmt.Exec(bh.Cid().String(), timestamp.Unix()); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into blocks_synced select * from bs on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("syncd put: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
stmt2, err := tx.Prepare(`copy b (cid, parentWeight, parentStateRoot, height, miner, "timestamp", ticket, eprof, forksig) from stdin`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, bh := range bhs {
|
||||
var eprof interface{}
|
||||
if bh.ElectionProof != nil {
|
||||
eprof = bh.ElectionProof.VRFProof
|
||||
}
|
||||
|
||||
if bh.Ticket == nil {
|
||||
log.Warnf("got a block with nil ticket")
|
||||
|
||||
bh.Ticket = &types.Ticket{
|
||||
VRFProof: []byte{},
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := stmt2.Exec(
|
||||
bh.Cid().String(),
|
||||
bh.ParentWeight.String(),
|
||||
bh.ParentStateRoot.String(),
|
||||
bh.Height,
|
||||
bh.Miner.String(),
|
||||
bh.Timestamp,
|
||||
bh.Ticket.VRFProof,
|
||||
eprof,
|
||||
bh.ForkSignaling); err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := stmt2.Close(); err != nil {
|
||||
return xerrors.Errorf("s2 close: %w", err)
|
||||
}
|
||||
|
||||
if _, err := tx.Exec(`insert into blocks select * from b on conflict do nothing `); err != nil {
|
||||
return xerrors.Errorf("blk put: %w", err)
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
@ -1,350 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"html/template"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
rice "github.com/GeertJohan/go.rice"
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
api api.FullNode
|
||||
st *storage
|
||||
site *rice.Box
|
||||
assets http.Handler
|
||||
|
||||
templates map[string]*template.Template
|
||||
}
|
||||
|
||||
func newHandler(api api.FullNode, st *storage) (*handler, error) {
|
||||
h := &handler{
|
||||
api: api,
|
||||
st: st,
|
||||
site: rice.MustFindBox("site"),
|
||||
|
||||
templates: map[string]*template.Template{},
|
||||
}
|
||||
h.assets = http.FileServer(h.site.HTTPBox())
|
||||
|
||||
funcs := template.FuncMap{
|
||||
"count": h.count,
|
||||
"countCol": h.countCol,
|
||||
"sum": h.sum,
|
||||
"netPower": h.netPower,
|
||||
"queryNum": h.queryNum,
|
||||
"sizeStr": sizeStr,
|
||||
"strings": h.strings,
|
||||
"qstr": h.qstr,
|
||||
"qstrs": h.qstrs,
|
||||
"messages": h.messages,
|
||||
|
||||
"pageDown": pageDown,
|
||||
"parseInt": func(s string) (int, error) { i, e := strconv.ParseInt(s, 10, 64); return int(i), e },
|
||||
"substr": func(i, j int, s string) string { return s[i:j] },
|
||||
"sub": func(a, b int) int { return a - b }, // TODO: really not builtin?
|
||||
|
||||
"param": func(string) string { return "" }, // replaced in request handler
|
||||
}
|
||||
|
||||
base := template.New("")
|
||||
|
||||
base.Funcs(funcs)
|
||||
|
||||
return h, h.site.Walk("", func(path string, info os.FileInfo, err error) error {
|
||||
if filepath.Ext(path) != ".html" {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info(path)
|
||||
|
||||
h.templates["/"+path], err = base.New(path).Parse(h.site.MustString(path))
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
h, err := newHandler(h.api, h.st) // for faster dev
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
p := r.URL.Path
|
||||
if p == "/" {
|
||||
p = "/index.html"
|
||||
}
|
||||
|
||||
t, ok := h.templates[p]
|
||||
if !ok {
|
||||
h.assets.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
t, err = t.Clone()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
t.Funcs(map[string]interface{}{
|
||||
"param": r.FormValue,
|
||||
})
|
||||
|
||||
if err := t.Execute(w, nil); err != nil {
|
||||
log.Errorf("%+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
log.Info(r.URL.Path)
|
||||
}
|
||||
|
||||
// Template funcs
|
||||
|
||||
func (h *handler) count(table string, filters ...string) (int, error) {
|
||||
// explicitly not caring about sql injection too much, this doesn't take user input
|
||||
|
||||
filts := ""
|
||||
if len(filters) > 0 {
|
||||
filts = " where "
|
||||
for _, filter := range filters {
|
||||
filts += filter + " and "
|
||||
}
|
||||
filts = filts[:len(filts)-5]
|
||||
}
|
||||
|
||||
var c int
|
||||
err := h.st.db.QueryRow("select count(1) from " + table + filts).Scan(&c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (h *handler) countCol(table string, col string, filters ...string) (int, error) {
|
||||
// explicitly not caring about sql injection too much, this doesn't take user input
|
||||
|
||||
filts := ""
|
||||
if len(filters) > 0 {
|
||||
filts = " where "
|
||||
for _, filter := range filters {
|
||||
filts += filter + " and "
|
||||
}
|
||||
filts = filts[:len(filts)-5]
|
||||
}
|
||||
|
||||
var c int
|
||||
err := h.st.db.QueryRow("select count(distinct " + col + ") from " + table + filts).Scan(&c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (h *handler) sum(table string, col string) (types.BigInt, error) {
|
||||
return h.queryNum("select sum(cast(" + col + " as bigint)) from " + table)
|
||||
}
|
||||
|
||||
func (h *handler) netPower(slashFilt string) (types.BigInt, error) {
|
||||
if slashFilt != "" {
|
||||
slashFilt = " where " + slashFilt
|
||||
}
|
||||
return h.queryNum(`select sum(power) from (select distinct on (addr) power, slashed_at from miner_heads
|
||||
inner join blocks b on miner_heads.stateroot = b.parentStateRoot
|
||||
order by addr, height desc) as p` + slashFilt)
|
||||
}
|
||||
|
||||
func (h *handler) queryNum(q string, p ...interface{}) (types.BigInt, error) {
|
||||
// explicitly not caring about sql injection too much, this doesn't take user input
|
||||
|
||||
var c string
|
||||
err := h.st.db.QueryRow(q, p...).Scan(&c)
|
||||
if err != nil {
|
||||
log.Error("qnum ", q, p, err)
|
||||
return types.NewInt(0), err
|
||||
}
|
||||
|
||||
i := types.NewInt(0)
|
||||
_, ok := i.SetString(c, 10)
|
||||
if !ok {
|
||||
return types.NewInt(0), xerrors.New("num parse error: " + c)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
var units = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB"}
|
||||
|
||||
func sizeStr(size types.BigInt) string {
|
||||
size = types.BigMul(size, types.NewInt(100))
|
||||
i := 0
|
||||
for types.BigCmp(size, types.NewInt(102400)) >= 0 && i < len(units)-1 {
|
||||
size = types.BigDiv(size, types.NewInt(1024))
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf("%s.%s %s", types.BigDiv(size, types.NewInt(100)), types.BigMod(size, types.NewInt(100)), units[i])
|
||||
}
|
||||
|
||||
func (h *handler) strings(table string, col string, filter string, args ...interface{}) (out []string, err error) {
|
||||
if len(filter) > 0 {
|
||||
filter = " where " + filter
|
||||
}
|
||||
log.Info("strings qstr ", "select "+col+" from "+table+filter, args)
|
||||
rws, err := h.st.db.Query("select "+col+" from "+table+filter, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for rws.Next() {
|
||||
var r string
|
||||
if err := rws.Scan(&r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, r)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *handler) qstr(q string, p ...interface{}) (string, error) {
|
||||
// explicitly not caring about sql injection too much, this doesn't take user input
|
||||
|
||||
r, err := h.qstrs(q, 1, p...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return r[0], nil
|
||||
}
|
||||
|
||||
func (h *handler) qstrs(q string, n int, p ...interface{}) ([]string, error) {
|
||||
// explicitly not caring about sql injection too much, this doesn't take user input
|
||||
|
||||
c := make([]string, n)
|
||||
ia := make([]interface{}, n)
|
||||
for i := range c {
|
||||
ia[i] = &c[i]
|
||||
}
|
||||
err := h.st.db.QueryRow(q, p...).Scan(ia...)
|
||||
if err != nil {
|
||||
log.Error("qnum ", q, p, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
type sbig types.BigInt
|
||||
|
||||
func (bi *sbig) Scan(value interface{}) error {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
i, ok := big.NewInt(0).SetString(value, 10)
|
||||
if !ok {
|
||||
if value == "<nil>" {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("failed to parse bigint string: '%s'", value)
|
||||
}
|
||||
|
||||
bi.Int = i
|
||||
|
||||
return nil
|
||||
case int64:
|
||||
bi.Int = big.NewInt(value).Int
|
||||
return nil
|
||||
default:
|
||||
return xerrors.Errorf("non-string types unsupported: %T", value)
|
||||
}
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
To address.Address
|
||||
From address.Address
|
||||
|
||||
Nonce uint64
|
||||
|
||||
Value sbig
|
||||
|
||||
GasPrice sbig
|
||||
GasLimit int64
|
||||
|
||||
Method abi.MethodNum
|
||||
Params []byte
|
||||
}
|
||||
|
||||
func (h *handler) messages(filter string, args ...interface{}) (out []types.Message, err error) {
|
||||
if len(filter) > 0 {
|
||||
filter = " where " + filter
|
||||
}
|
||||
|
||||
log.Info("select * from messages " + filter)
|
||||
|
||||
rws, err := h.st.db.Query("select * from messages "+filter, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for rws.Next() {
|
||||
var r Message
|
||||
var cs string
|
||||
|
||||
if err := rws.Scan(
|
||||
&cs,
|
||||
&r.From,
|
||||
&r.To,
|
||||
&r.Nonce,
|
||||
&r.Value,
|
||||
&r.GasPrice,
|
||||
&r.GasLimit,
|
||||
&r.Method,
|
||||
&r.Params,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err := cid.Parse(cs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := types.Message{
|
||||
To: r.To,
|
||||
From: r.From,
|
||||
Nonce: r.Nonce,
|
||||
Value: types.BigInt(r.Value),
|
||||
GasPrice: types.BigInt(r.GasPrice),
|
||||
GasLimit: r.GasLimit,
|
||||
Method: r.Method,
|
||||
Params: r.Params,
|
||||
}
|
||||
if c != tr.Cid() {
|
||||
log.Warn("msg cid doesn't match")
|
||||
}
|
||||
|
||||
out = append(out, tr)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func pageDown(base, n int) []int {
|
||||
out := make([]int, n)
|
||||
for i := range out {
|
||||
out[i] = base - i
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
var _ http.Handler = &handler{}
|
51
cmd/lotus-chainwatch/util/contextStore.go
Normal file
51
cmd/lotus-chainwatch/util/contextStore.go
Normal file
@ -0,0 +1,51 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
cbg "github.com/whyrusleeping/cbor-gen"
|
||||
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
)
|
||||
|
||||
// TODO extract this to a common location in lotus and reuse the code
|
||||
|
||||
// APIIpldStore is required for AMT and HAMT access.
|
||||
type APIIpldStore struct {
|
||||
ctx context.Context
|
||||
api api.FullNode
|
||||
}
|
||||
|
||||
func NewAPIIpldStore(ctx context.Context, api api.FullNode) *APIIpldStore {
|
||||
return &APIIpldStore{
|
||||
ctx: ctx,
|
||||
api: api,
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *APIIpldStore) Context() context.Context {
|
||||
return ht.ctx
|
||||
}
|
||||
|
||||
func (ht *APIIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error {
|
||||
raw, err := ht.api.ChainReadObj(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cu, ok := out.(cbg.CBORUnmarshaler)
|
||||
if ok {
|
||||
if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Object does not implement CBORUnmarshaler: %T", out)
|
||||
}
|
||||
|
||||
func (ht *APIIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) {
|
||||
return cid.Undef, fmt.Errorf("Put is not implemented on APIIpldStore")
|
||||
}
|
3
go.mod
3
go.mod
@ -74,7 +74,7 @@ require (
|
||||
github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae
|
||||
github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/lib/pq v1.7.0
|
||||
github.com/libp2p/go-eventbus v0.2.1
|
||||
github.com/libp2p/go-libp2p v0.10.0
|
||||
github.com/libp2p/go-libp2p-connmgr v0.2.4
|
||||
@ -119,6 +119,7 @@ require (
|
||||
go.uber.org/multierr v1.5.0
|
||||
go.uber.org/zap v1.15.0
|
||||
go4.org v0.0.0-20190313082347-94abd6928b1d // indirect
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
|
||||
|
4
go.sum
4
go.sum
@ -699,8 +699,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.7.0 h1:h93mCPfUSkaul3Ka/VG8uZdmW1uMHDGxzu0NWHuJmHY=
|
||||
github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ=
|
||||
github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU=
|
||||
github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E=
|
||||
|
@ -2,6 +2,7 @@ package full
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/lotus/api"
|
||||
@ -135,6 +136,33 @@ func (a *MsigAPI) MsigPropose(ctx context.Context, msig address.Address, to addr
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||
enc, actErr := serializeSwapParams(oldAdd, newAdd)
|
||||
if actErr != nil {
|
||||
return cid.Undef, actErr
|
||||
}
|
||||
|
||||
return a.MsigPropose(ctx, msig, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
|
||||
}
|
||||
|
||||
func (a *MsigAPI) MsigSwapApprove(ctx context.Context, msig address.Address, src address.Address, txID uint64, proposer address.Address, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||
enc, actErr := serializeSwapParams(oldAdd, newAdd)
|
||||
if actErr != nil {
|
||||
return cid.Undef, actErr
|
||||
}
|
||||
|
||||
return a.MsigApprove(ctx, msig, txID, proposer, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
|
||||
}
|
||||
|
||||
func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src address.Address, txID uint64, oldAdd address.Address, newAdd address.Address) (cid.Cid, error) {
|
||||
enc, actErr := serializeSwapParams(oldAdd, newAdd)
|
||||
if actErr != nil {
|
||||
return cid.Undef, actErr
|
||||
}
|
||||
|
||||
return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(builtin.MethodsMultisig.SwapSigner), enc)
|
||||
}
|
||||
|
||||
func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) {
|
||||
return a.msigApproveOrCancel(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params)
|
||||
}
|
||||
@ -223,3 +251,15 @@ func (a *MsigAPI) msigApproveOrCancel(ctx context.Context, operation api.MsigPro
|
||||
|
||||
return smsg.Cid(), nil
|
||||
}
|
||||
|
||||
func serializeSwapParams(old address.Address, new address.Address) ([]byte, error) {
|
||||
enc, actErr := actors.SerializeParams(&samsig.SwapSignerParams{
|
||||
From: old,
|
||||
To: new,
|
||||
})
|
||||
if actErr != nil {
|
||||
return nil, actErr
|
||||
}
|
||||
|
||||
return enc, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user