fixed bugs in consistent broadcast integration
This commit is contained in:
parent
1dadff303c
commit
d1a4f1dc50
@ -53,15 +53,16 @@ type ConsistentBCast struct {
|
||||
m map[abi.ChainEpoch]*bcastDict
|
||||
}
|
||||
|
||||
func newBcastDict(delay time.Duration) *bcastDict {
|
||||
func newBcastDict() *bcastDict {
|
||||
return &bcastDict{new(sync.Map)}
|
||||
}
|
||||
|
||||
// TODO: What if the VRFProof is already small?? We don´t need the CID. Useless computation.
|
||||
func BCastKey(bh *types.BlockHeader) (multihash.Multihash, error) {
|
||||
proof := bh.Ticket.VRFProof
|
||||
binary.PutVarint(proof, int64(bh.Height))
|
||||
return multihash.Sum(proof, multihash.SHA2_256, -1)
|
||||
k := make([]byte, len(bh.Ticket.VRFProof))
|
||||
copy(k, bh.Ticket.VRFProof)
|
||||
binary.PutVarint(k, int64(bh.Height))
|
||||
return multihash.Sum(k, multihash.SHA2_256, -1)
|
||||
}
|
||||
|
||||
func NewConsistentBCast(delay time.Duration) *ConsistentBCast {
|
||||
@ -93,7 +94,7 @@ func (cb *ConsistentBCast) RcvBlock(ctx context.Context, blk *types.BlockMsg) er
|
||||
cb.lk.Lock()
|
||||
bcastDict, ok := cb.m[blk.Header.Height]
|
||||
if !ok {
|
||||
bcastDict = newBcastDict(cb.delay)
|
||||
bcastDict = newBcastDict()
|
||||
cb.m[blk.Header.Height] = bcastDict
|
||||
}
|
||||
cb.lk.Unlock()
|
||||
@ -116,7 +117,7 @@ func (cb *ConsistentBCast) RcvBlock(ctx context.Context, blk *types.BlockMsg) er
|
||||
return nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, cb.delay*time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, cb.delay)
|
||||
bcastDict.store(key, &blksInfo{ctx, cancel, []cid.Cid{blkCid}})
|
||||
return nil
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ var msgCidPrefix = cid.Prefix{
|
||||
MhLength: 32,
|
||||
}
|
||||
|
||||
func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, bs bserv.BlockService, cmgr connmgr.ConnManager) {
|
||||
func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, self peer.ID, s *chain.Syncer, bs bserv.BlockService, cmgr connmgr.ConnManager) {
|
||||
// Timeout after (block time + propagation delay). This is useless at
|
||||
// this point.
|
||||
timeout := time.Duration(build.BlockDelaySecs+build.PropagationDelaySecs) * time.Second
|
||||
@ -107,13 +107,19 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha
|
||||
log.Warnw("received block with large delay from miner", "block", blk.Cid(), "delay", delay, "miner", blk.Header.Miner)
|
||||
}
|
||||
|
||||
if err := cb.WaitForDelivery(blk.Header); err != nil {
|
||||
log.Errorf("couldn't deliver block to syncer over pubsub: %s; source: %s", err, src)
|
||||
return
|
||||
// When we propose a new block ourselves, the proposed block also gets here through SyncSubmitBlock.
|
||||
// If we are the block proposers we don't need to wait for delivery, we know the blocks are
|
||||
// honest.
|
||||
if src != self {
|
||||
log.Infof("Waiting for consistent broadcast of block in height: %v", blk.Header.Height)
|
||||
if err := cb.WaitForDelivery(blk.Header); err != nil {
|
||||
log.Errorf("couldn't deliver block to syncer over pubsub: %s; source: %s", err, src)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Garbage collect the broadcast state
|
||||
cb.GarbageCollect(blk.Header.Height)
|
||||
log.Infof("Block in height %v delivered successfully", blk.Header.Height)
|
||||
|
||||
if s.InformNewBlock(msg.ReceivedFrom, &types.FullBlock{
|
||||
Header: blk.Header,
|
||||
|
@ -166,7 +166,7 @@ func HandleIncomingBlocks(mctx helpers.MetricsCtx,
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go sub.HandleIncomingBlocks(ctx, blocksub, s, bserv, h.ConnManager())
|
||||
go sub.HandleIncomingBlocks(ctx, blocksub, h.ID(), s, bserv, h.ConnManager())
|
||||
}
|
||||
|
||||
func HandleIncomingMessages(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, stmgr *stmgr.StateManager, mpool *messagepool.MessagePool, h host.Host, nn dtypes.NetworkName, bootstrapper dtypes.Bootstrapper) {
|
||||
|
Loading…
Reference in New Issue
Block a user