2019-12-06 14:06:42 +00:00
|
|
|
package vm
|
|
|
|
|
|
|
|
import (
|
2020-04-10 15:14:43 +00:00
|
|
|
"bytes"
|
2020-02-26 22:54:34 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
2020-06-05 17:47:49 +00:00
|
|
|
goruntime "runtime"
|
2020-05-28 00:06:29 +00:00
|
|
|
"sync"
|
2020-02-26 22:54:34 +00:00
|
|
|
|
2020-01-13 20:47:27 +00:00
|
|
|
"github.com/filecoin-project/go-address"
|
2020-02-18 07:15:30 +00:00
|
|
|
"github.com/ipfs/go-cid"
|
2020-04-10 15:14:43 +00:00
|
|
|
cbor "github.com/ipfs/go-ipld-cbor"
|
2020-04-16 17:17:56 +00:00
|
|
|
"github.com/minio/blake2b-simd"
|
2020-02-18 07:15:30 +00:00
|
|
|
mh "github.com/multiformats/go-multihash"
|
|
|
|
"golang.org/x/xerrors"
|
2020-03-26 19:34:38 +00:00
|
|
|
|
2020-04-10 15:14:43 +00:00
|
|
|
"github.com/filecoin-project/lotus/chain/state"
|
|
|
|
"github.com/filecoin-project/lotus/chain/types"
|
2020-07-01 11:47:40 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/adtutil"
|
2020-04-10 15:14:43 +00:00
|
|
|
"github.com/filecoin-project/lotus/lib/sigs"
|
2020-03-26 19:34:38 +00:00
|
|
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
2020-04-10 20:22:24 +00:00
|
|
|
"github.com/filecoin-project/specs-actors/actors/builtin/miner"
|
2020-03-26 19:34:38 +00:00
|
|
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
|
|
"github.com/filecoin-project/specs-actors/actors/runtime"
|
|
|
|
|
2020-03-27 23:00:21 +00:00
|
|
|
"github.com/filecoin-project/sector-storage/ffiwrapper"
|
2019-12-06 14:06:42 +00:00
|
|
|
)
|
|
|
|
|
2020-02-18 07:15:30 +00:00
|
|
|
func init() {
|
|
|
|
mh.Codes[0xf104] = "filecoin"
|
|
|
|
}
|
|
|
|
|
2019-12-06 14:06:42 +00:00
|
|
|
// Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there
|
|
|
|
|
2020-07-18 13:46:47 +00:00
|
|
|
type SyscallBuilder func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls
|
|
|
|
|
|
|
|
func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder {
|
|
|
|
return func(ctx context.Context, cstate *state.StateTree, cst cbor.IpldStore) runtime.Syscalls {
|
|
|
|
return &syscallShim{
|
|
|
|
ctx: ctx,
|
|
|
|
|
|
|
|
cstate: cstate,
|
|
|
|
cst: cst,
|
|
|
|
|
|
|
|
verifier: verifier,
|
|
|
|
}
|
|
|
|
}
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type syscallShim struct {
|
2020-04-10 15:14:43 +00:00
|
|
|
ctx context.Context
|
|
|
|
|
|
|
|
cstate *state.StateTree
|
2020-07-18 13:46:47 +00:00
|
|
|
cst cbor.IpldStore
|
2020-03-26 02:50:56 +00:00
|
|
|
verifier ffiwrapper.Verifier
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-06-15 16:30:49 +00:00
|
|
|
func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) {
|
2020-02-23 00:47:47 +00:00
|
|
|
var sum abi.PaddedPieceSize
|
2020-02-18 07:15:30 +00:00
|
|
|
for _, p := range pieces {
|
2020-02-23 00:47:47 +00:00
|
|
|
sum += p.Size
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-03-26 02:50:56 +00:00
|
|
|
commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces)
|
2020-02-18 07:15:30 +00:00
|
|
|
if err != nil {
|
2020-02-23 00:47:47 +00:00
|
|
|
log.Errorf("generate data commitment failed: %s", err)
|
2020-02-18 07:15:30 +00:00
|
|
|
return cid.Undef, err
|
|
|
|
}
|
|
|
|
|
2020-02-27 00:42:39 +00:00
|
|
|
return commd, nil
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ss *syscallShim) HashBlake2b(data []byte) [32]byte {
|
2020-04-16 17:17:56 +00:00
|
|
|
return blake2b.Sum256(data)
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-10 15:14:43 +00:00
|
|
|
// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault
|
|
|
|
// and an optional extra one to check common ancestry (as needed).
|
|
|
|
// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch().
|
2020-04-16 16:41:45 +00:00
|
|
|
func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.ConsensusFault, error) {
|
2020-04-10 15:14:43 +00:00
|
|
|
// Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions.
|
|
|
|
// Whether or not it could ever have been accepted in a chain is not checked/does not matter here.
|
|
|
|
// for that reason when checking block parent relationships, rather than instantiating a Tipset to do so
|
|
|
|
// (which runs a syntactic check), we do it directly on the CIDs.
|
|
|
|
|
|
|
|
// (0) cheap preliminary checks
|
|
|
|
|
|
|
|
// can blocks be decoded properly?
|
2020-04-10 20:22:24 +00:00
|
|
|
var blockA, blockB types.BlockHeader
|
2020-04-10 15:14:43 +00:00
|
|
|
if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil {
|
2020-04-10 20:22:24 +00:00
|
|
|
return nil, xerrors.Errorf("cannot decode first block header: %w", decodeErr)
|
2020-04-10 15:14:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil {
|
2020-04-10 20:22:24 +00:00
|
|
|
return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr)
|
2020-04-10 15:14:43 +00:00
|
|
|
}
|
|
|
|
|
2020-07-20 13:29:07 +00:00
|
|
|
// are blocks the same?
|
|
|
|
if blockA.Cid().Equals(blockB.Cid()) {
|
|
|
|
return nil, fmt.Errorf("no consensus fault: submitted blocks are the same")
|
|
|
|
}
|
|
|
|
|
2020-04-10 15:14:43 +00:00
|
|
|
// (1) check conditions necessary to any consensus fault
|
|
|
|
|
|
|
|
// were blocks mined by same miner?
|
|
|
|
if blockA.Miner != blockB.Miner {
|
|
|
|
return nil, fmt.Errorf("no consensus fault: blocks not mined by same miner")
|
|
|
|
}
|
|
|
|
|
|
|
|
// block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain).
|
|
|
|
if blockB.Height < blockA.Height {
|
|
|
|
return nil, fmt.Errorf("first block must not be of higher height than second")
|
|
|
|
}
|
|
|
|
|
2020-04-10 20:22:24 +00:00
|
|
|
// (2) check for the consensus faults themselves
|
2020-04-10 15:14:43 +00:00
|
|
|
var consensusFault *runtime.ConsensusFault
|
|
|
|
|
|
|
|
// (a) double-fork mining fault
|
|
|
|
if blockA.Height == blockB.Height {
|
|
|
|
consensusFault = &runtime.ConsensusFault{
|
|
|
|
Target: blockA.Miner,
|
|
|
|
Epoch: blockB.Height,
|
|
|
|
Type: runtime.ConsensusFaultDoubleForkMining,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// (b) time-offset mining fault
|
|
|
|
// strictly speaking no need to compare heights based on double fork mining check above,
|
|
|
|
// but at same height this would be a different fault.
|
2020-06-11 15:52:44 +00:00
|
|
|
if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height {
|
2020-04-10 15:14:43 +00:00
|
|
|
consensusFault = &runtime.ConsensusFault{
|
|
|
|
Target: blockA.Miner,
|
|
|
|
Epoch: blockB.Height,
|
|
|
|
Type: runtime.ConsensusFaultTimeOffsetMining,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// (c) parent-grinding fault
|
2020-04-10 20:22:24 +00:00
|
|
|
// Here extra is the "witness", a third block that shows the connection between A and B as
|
|
|
|
// A's sibling and B's parent.
|
2020-04-10 15:14:43 +00:00
|
|
|
// Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset
|
2020-04-10 20:22:24 +00:00
|
|
|
var blockC types.BlockHeader
|
|
|
|
if len(extra) > 0 {
|
|
|
|
if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil {
|
|
|
|
return nil, xerrors.Errorf("cannot decode extra: %w", decodeErr)
|
|
|
|
}
|
|
|
|
|
2020-04-16 19:52:49 +00:00
|
|
|
if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height &&
|
2020-04-10 20:34:04 +00:00
|
|
|
types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) {
|
2020-04-10 20:22:24 +00:00
|
|
|
consensusFault = &runtime.ConsensusFault{
|
|
|
|
Target: blockA.Miner,
|
|
|
|
Epoch: blockB.Height,
|
|
|
|
Type: runtime.ConsensusFaultParentGrinding,
|
|
|
|
}
|
2020-04-10 15:14:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-10 20:22:24 +00:00
|
|
|
// (3) return if no consensus fault by now
|
|
|
|
if consensusFault == nil {
|
2020-07-07 05:29:16 +00:00
|
|
|
return nil, xerrors.Errorf("no consensus fault detected")
|
2020-04-10 20:22:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// else
|
|
|
|
// (4) expensive final checks
|
2020-04-10 15:14:43 +00:00
|
|
|
|
|
|
|
// check blocks are properly signed by their respective miner
|
|
|
|
// note we do not need to check extra's: it is a parent to block b
|
|
|
|
// which itself is signed, so it was willingly included by the miner
|
|
|
|
if sigErr := ss.VerifyBlockSig(&blockA); sigErr != nil {
|
2020-04-10 20:22:24 +00:00
|
|
|
return nil, xerrors.Errorf("cannot verify first block sig: %w", sigErr)
|
2020-04-10 15:14:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if sigErr := ss.VerifyBlockSig(&blockB); sigErr != nil {
|
2020-06-11 15:52:44 +00:00
|
|
|
return nil, xerrors.Errorf("cannot verify second block sig: %w", sigErr)
|
2020-04-10 15:14:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return consensusFault, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error {
|
|
|
|
|
2020-04-10 20:22:24 +00:00
|
|
|
// get appropriate miner actor
|
|
|
|
act, err := ss.cstate.GetActor(blk.Miner)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-04-10 15:14:43 +00:00
|
|
|
|
2020-04-10 20:22:24 +00:00
|
|
|
// use that to get the miner state
|
|
|
|
var mas miner.State
|
|
|
|
if err = ss.cst.Get(ss.ctx, act.Head, &mas); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-01 11:47:40 +00:00
|
|
|
info, err := mas.GetInfo(adtutil.NewStore(ss.ctx, ss.cst))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-10 20:22:24 +00:00
|
|
|
// and use to get resolved workerKey
|
2020-07-01 11:47:40 +00:00
|
|
|
waddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker)
|
2020-04-10 20:22:24 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-04-10 15:14:43 +00:00
|
|
|
|
2020-06-02 14:29:39 +00:00
|
|
|
if err := sigs.CheckBlockSignature(ss.ctx, blk, waddr); err != nil {
|
2020-04-10 15:14:43 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-04-10 12:19:06 +00:00
|
|
|
func (ss *syscallShim) VerifyPoSt(proof abi.WindowPoStVerifyInfo) error {
|
|
|
|
ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof)
|
2020-02-26 22:54:34 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("proof was invalid")
|
|
|
|
}
|
|
|
|
return nil
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 22:54:34 +00:00
|
|
|
func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error {
|
2020-02-18 07:15:30 +00:00
|
|
|
//_, span := trace.StartSpan(ctx, "ValidatePoRep")
|
|
|
|
//defer span.End()
|
|
|
|
|
|
|
|
miner, err := address.NewIDAddress(uint64(info.Miner))
|
|
|
|
if err != nil {
|
2020-02-27 00:42:39 +00:00
|
|
|
return xerrors.Errorf("weirdly failed to construct address: %w", err)
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ticket := []byte(info.Randomness)
|
2020-05-27 20:53:20 +00:00
|
|
|
proof := info.Proof
|
2020-02-18 07:15:30 +00:00
|
|
|
seed := []byte(info.InteractiveRandomness)
|
|
|
|
|
2020-05-22 16:26:14 +00:00
|
|
|
log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
|
2020-02-23 20:32:14 +00:00
|
|
|
|
2020-02-18 07:15:30 +00:00
|
|
|
//func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber)
|
2020-02-27 00:42:39 +00:00
|
|
|
ok, err := ss.verifier.VerifySeal(info)
|
2020-02-18 07:15:30 +00:00
|
|
|
if err != nil {
|
2020-02-26 22:54:34 +00:00
|
|
|
return xerrors.Errorf("failed to validate PoRep: %w", err)
|
|
|
|
}
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("invalid proof")
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 22:54:34 +00:00
|
|
|
return nil
|
2020-02-18 07:15:30 +00:00
|
|
|
}
|
|
|
|
|
2020-02-26 22:54:34 +00:00
|
|
|
func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error {
|
2020-05-05 19:26:34 +00:00
|
|
|
// TODO: in genesis setup, we are currently faking signatures
|
|
|
|
|
|
|
|
kaddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, addr)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2019-12-06 14:06:42 +00:00
|
|
|
}
|
2020-05-05 19:26:34 +00:00
|
|
|
|
|
|
|
return sigs.Verify(&sig, kaddr, input)
|
2019-12-06 14:06:42 +00:00
|
|
|
}
|
2020-05-28 00:06:29 +00:00
|
|
|
|
2020-06-15 23:05:29 +00:00
|
|
|
var BatchSealVerifyParallelism = goruntime.NumCPU()
|
|
|
|
|
2020-05-28 00:06:29 +00:00
|
|
|
func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) {
|
|
|
|
out := make(map[address.Address][]bool)
|
|
|
|
|
2020-06-15 23:05:29 +00:00
|
|
|
sema := make(chan struct{}, BatchSealVerifyParallelism)
|
2020-06-05 17:47:49 +00:00
|
|
|
|
2020-05-28 00:06:29 +00:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
for addr, seals := range inp {
|
|
|
|
results := make([]bool, len(seals))
|
|
|
|
out[addr] = results
|
|
|
|
|
|
|
|
for i, s := range seals {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(ma address.Address, ix int, svi abi.SealVerifyInfo, res []bool) {
|
|
|
|
defer wg.Done()
|
2020-06-05 17:47:49 +00:00
|
|
|
sema <- struct{}{}
|
|
|
|
|
2020-05-28 00:06:29 +00:00
|
|
|
if err := ss.VerifySeal(svi); err != nil {
|
|
|
|
log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err)
|
|
|
|
res[ix] = false
|
|
|
|
} else {
|
|
|
|
res[ix] = true
|
|
|
|
}
|
2020-06-05 17:47:49 +00:00
|
|
|
|
|
|
|
<-sema
|
2020-05-28 00:06:29 +00:00
|
|
|
}(addr, i, s, results)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
return out, nil
|
|
|
|
}
|