Cleanup many lint warnings
Signed-off-by: Jakub Sztandera <kubuxu@protocol.ai>
This commit is contained in:
parent
6986204657
commit
d6615b6286
@ -22,13 +22,23 @@ issues:
|
|||||||
- "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this"
|
- "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this"
|
||||||
- "Potential file inclusion via variable"
|
- "Potential file inclusion via variable"
|
||||||
- "should have( a package)? comment"
|
- "should have( a package)? comment"
|
||||||
|
- "Error return value of `logging.SetLogLevel` is not checked"
|
||||||
|
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
|
- path: lotuspond
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
|
||||||
- path: node/modules/lp2p
|
- path: node/modules/lp2p
|
||||||
linters:
|
linters:
|
||||||
- golint
|
- golint
|
||||||
- path: ".*_test.go"
|
|
||||||
|
- path: build/params_.*\.go
|
||||||
|
linters:
|
||||||
|
- golint
|
||||||
|
|
||||||
|
- path: .*_test.go
|
||||||
linters:
|
linters:
|
||||||
- gosec
|
- gosec
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/filecoin-project/lotus/build"
|
"github.com/filecoin-project/lotus/build"
|
||||||
)
|
)
|
||||||
|
|
||||||
type WorkerApi interface {
|
type WorkerAPI interface {
|
||||||
Version(context.Context) (build.Version, error)
|
Version(context.Context) (build.Version, error)
|
||||||
// TODO: Info() (name, ...) ?
|
// TODO: Info() (name, ...) ?
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ func PermissionedFullAPI(a api.FullNode) api.FullNode {
|
|||||||
return &out
|
return &out
|
||||||
}
|
}
|
||||||
|
|
||||||
func PermissionedWorkerAPI(a api.WorkerApi) api.WorkerApi {
|
func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI {
|
||||||
var out WorkerStruct
|
var out WorkerStruct
|
||||||
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal)
|
||||||
return &out
|
return &out
|
||||||
|
@ -865,4 +865,4 @@ func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) {
|
|||||||
var _ api.Common = &CommonStruct{}
|
var _ api.Common = &CommonStruct{}
|
||||||
var _ api.FullNode = &FullNodeStruct{}
|
var _ api.FullNode = &FullNodeStruct{}
|
||||||
var _ api.StorageMiner = &StorageMinerStruct{}
|
var _ api.StorageMiner = &StorageMinerStruct{}
|
||||||
var _ api.WorkerApi = &WorkerStruct{}
|
var _ api.WorkerAPI = &WorkerStruct{}
|
||||||
|
@ -48,7 +48,7 @@ func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMine
|
|||||||
return &res, closer, err
|
return &res, closer, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerApi, jsonrpc.ClientCloser, error) {
|
func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) {
|
||||||
var res apistruct.WorkerStruct
|
var res apistruct.WorkerStruct
|
||||||
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
closer, err := jsonrpc.NewMergeClient(addr, "Filecoin",
|
||||||
[]interface{}{
|
[]interface{}{
|
||||||
|
@ -320,6 +320,7 @@ func main() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
for _, g := range groupslice {
|
for _, g := range groupslice {
|
||||||
|
g := g
|
||||||
fmt.Printf("## %s\n", g.GroupName)
|
fmt.Printf("## %s\n", g.GroupName)
|
||||||
fmt.Printf("%s\n\n", g.Header)
|
fmt.Printf("%s\n\n", g.Header)
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.Fu
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer os.RemoveAll(rpath)
|
defer os.RemoveAll(rpath) //nolint:errcheck
|
||||||
|
|
||||||
caddr, err := client.WalletDefaultAddress(ctx)
|
caddr, err := client.WalletDefaultAddress(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// TODO: check if this exists anywhere else
|
// TODO: check if this exists anywhere else
|
||||||
|
|
||||||
type MultiaddrSlice []ma.Multiaddr
|
type MultiaddrSlice []ma.Multiaddr
|
||||||
|
|
||||||
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
|
func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) {
|
||||||
|
@ -2,6 +2,6 @@ package build
|
|||||||
|
|
||||||
import rice "github.com/GeertJohan/go.rice"
|
import rice "github.com/GeertJohan/go.rice"
|
||||||
|
|
||||||
func ParametersJson() []byte {
|
func ParametersJSON() []byte {
|
||||||
return rice.MustFindBox("proof-params").MustBytes("parameters.json")
|
return rice.MustFindBox("proof-params").MustBytes("parameters.json")
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@ func (ve Version) EqMajorMinor(v2 Version) bool {
|
|||||||
// APIVersion is a semver version of the rpc api exposed
|
// APIVersion is a semver version of the rpc api exposed
|
||||||
var APIVersion Version = newVer(0, 3, 0)
|
var APIVersion Version = newVer(0, 3, 0)
|
||||||
|
|
||||||
|
//nolint:varcheck
|
||||||
const (
|
const (
|
||||||
majorMask = 0xff0000
|
majorMask = 0xff0000
|
||||||
minorMask = 0xffff00
|
minorMask = 0xffff00
|
||||||
|
@ -50,6 +50,7 @@ func Newf(retCode exitcode.ExitCode, format string, args ...interface{}) ActorEr
|
|||||||
}
|
}
|
||||||
|
|
||||||
// todo: bit hacky
|
// todo: bit hacky
|
||||||
|
|
||||||
func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
|
func NewfSkip(skip int, retCode exitcode.ExitCode, format string, args ...interface{}) ActorError {
|
||||||
if retCode == 0 {
|
if retCode == 0 {
|
||||||
return &actorError{
|
return &actorError{
|
||||||
|
@ -91,7 +91,7 @@ func (bss *BlockSyncService) HandleStream(s inet.Stream) {
|
|||||||
ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream")
|
ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream")
|
||||||
defer span.End()
|
defer span.End()
|
||||||
|
|
||||||
defer s.Close()
|
defer s.Close() //nolint:errcheck
|
||||||
|
|
||||||
var req BlockSyncRequest
|
var req BlockSyncRequest
|
||||||
if err := cborutil.ReadCborRPC(bufio.NewReader(s), &req); err != nil {
|
if err := cborutil.ReadCborRPC(bufio.NewReader(s), &req); err != nil {
|
||||||
@ -107,7 +107,7 @@ func (bss *BlockSyncService) HandleStream(s inet.Stream) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
writeDeadline := 60 * time.Second
|
writeDeadline := 60 * time.Second
|
||||||
s.SetDeadline(time.Now().Add(writeDeadline))
|
_ = s.SetDeadline(time.Now().Add(writeDeadline))
|
||||||
if err := cborutil.WriteCborRPC(s, resp); err != nil {
|
if err := cborutil.WriteCborRPC(s, resp); err != nil {
|
||||||
log.Warnw("failed to write back response for handle stream", "err", err, "peer", s.Conn().RemotePeer())
|
log.Warnw("failed to write back response for handle stream", "err", err, "peer", s.Conn().RemotePeer())
|
||||||
return
|
return
|
||||||
|
@ -283,14 +283,14 @@ func (bs *BlockSync) fetchBlocksBlockSync(ctx context.Context, p peer.ID, req *B
|
|||||||
bs.RemovePeer(p)
|
bs.RemovePeer(p)
|
||||||
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
|
return nil, xerrors.Errorf("failed to open stream to peer: %w", err)
|
||||||
}
|
}
|
||||||
s.SetWriteDeadline(time.Now().Add(5 * time.Second))
|
_ = s.SetWriteDeadline(time.Now().Add(5 * time.Second))
|
||||||
|
|
||||||
if err := cborutil.WriteCborRPC(s, req); err != nil {
|
if err := cborutil.WriteCborRPC(s, req); err != nil {
|
||||||
s.SetWriteDeadline(time.Time{})
|
_ = s.SetWriteDeadline(time.Time{})
|
||||||
bs.syncPeers.logFailure(p, time.Since(start))
|
bs.syncPeers.logFailure(p, time.Since(start))
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
s.SetWriteDeadline(time.Time{})
|
_ = s.SetWriteDeadline(time.Time{})
|
||||||
|
|
||||||
var res BlockSyncResponse
|
var res BlockSyncResponse
|
||||||
r := incrt.New(s, 50<<10, 5*time.Second)
|
r := incrt.New(s, 50<<10, 5*time.Second)
|
||||||
|
@ -67,7 +67,7 @@ func NewEvents(ctx context.Context, api eventApi) *Events {
|
|||||||
heightEvents: heightEvents{
|
heightEvents: heightEvents{
|
||||||
tsc: tsc,
|
tsc: tsc,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
gcConfidence: abi.ChainEpoch(gcConfidence),
|
gcConfidence: gcConfidence,
|
||||||
|
|
||||||
heightTriggers: map[uint64]*heightHandler{},
|
heightTriggers: map[uint64]*heightHandler{},
|
||||||
htTriggerHeights: map[abi.ChainEpoch][]uint64{},
|
htTriggerHeights: map[abi.ChainEpoch][]uint64{},
|
||||||
|
@ -58,6 +58,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
|
|
||||||
for i, m := range miners {
|
for i, m := range miners {
|
||||||
// Create miner through power actor
|
// Create miner through power actor
|
||||||
|
i := i
|
||||||
|
m := m
|
||||||
|
|
||||||
spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
|
spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -154,6 +156,7 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid
|
|||||||
|
|
||||||
// Commit sectors
|
// Commit sectors
|
||||||
for pi, preseal := range m.Sectors {
|
for pi, preseal := range m.Sectors {
|
||||||
|
preseal := preseal
|
||||||
// TODO: Maybe check seal (Can just be snark inputs, doesn't go into the genesis file)
|
// TODO: Maybe check seal (Can just be snark inputs, doesn't go into the genesis file)
|
||||||
|
|
||||||
// check deals, get dealWeight
|
// check deals, get dealWeight
|
||||||
|
@ -255,12 +255,15 @@ func TestStateTreeConsistency(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, a := range addrs {
|
for i, a := range addrs {
|
||||||
st.SetActor(a, &types.Actor{
|
err := st.SetActor(a, &types.Actor{
|
||||||
Code: randomCid,
|
Code: randomCid,
|
||||||
Head: randomCid,
|
Head: randomCid,
|
||||||
Balance: types.NewInt(uint64(10000 + i)),
|
Balance: types.NewInt(uint64(10000 + i)),
|
||||||
Nonce: uint64(1000 - i),
|
Nonce: uint64(1000 - i),
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("while setting actor: %+v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
root, err := st.Flush(context.TODO())
|
root, err := st.Flush(context.TODO())
|
||||||
|
@ -382,8 +382,8 @@ func (sm *StateManager) LoadActorState(ctx context.Context, a address.Address, o
|
|||||||
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
cst := cbor.NewCborStore(sm.cs.Blockstore())
|
||||||
if err := cst.Get(ctx, act.Head, out); err != nil {
|
if err := cst.Get(ctx, act.Head, out); err != nil {
|
||||||
var r cbg.Deferred
|
var r cbg.Deferred
|
||||||
cst.Get(ctx, act.Head, &r)
|
_ = cst.Get(ctx, act.Head, &r)
|
||||||
fmt.Printf("badhead %x\n", r.Raw)
|
log.Errorw("bad actor head", "error", err, "raw", r.Raw, "address", a)
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -895,11 +895,17 @@ func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.Cha
|
|||||||
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
||||||
}
|
}
|
||||||
VRFDigest := blake2b.Sum256(rbase)
|
VRFDigest := blake2b.Sum256(rbase)
|
||||||
h.Write(VRFDigest[:])
|
_, err := h.Write(VRFDigest[:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("hashing VRFDigest: %w", err)
|
||||||
|
}
|
||||||
if err := binary.Write(h, binary.BigEndian, round); err != nil {
|
if err := binary.Write(h, binary.BigEndian, round); err != nil {
|
||||||
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
return nil, xerrors.Errorf("deriving randomness: %w", err)
|
||||||
}
|
}
|
||||||
h.Write(entropy)
|
_, err = h.Write(entropy)
|
||||||
|
if err != nil {
|
||||||
|
return nil, xerrors.Errorf("hashing entropy: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return h.Sum(nil), nil
|
return h.Sum(nil), nil
|
||||||
}
|
}
|
||||||
|
@ -417,7 +417,7 @@ func TestSyncBadTimestamp(t *testing.T) {
|
|||||||
a1 := tu.mineOnBlock(base, 0, nil, false, true)
|
a1 := tu.mineOnBlock(base, 0, nil, false, true)
|
||||||
|
|
||||||
tu.g.Timestamper = nil
|
tu.g.Timestamper = nil
|
||||||
tu.g.ResyncBankerNonce(a1.TipSet())
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||||
|
|
||||||
fmt.Println("After mine bad block!")
|
fmt.Println("After mine bad block!")
|
||||||
tu.printHeads()
|
tu.printHeads()
|
||||||
@ -479,7 +479,7 @@ func TestSyncFork(t *testing.T) {
|
|||||||
a := tu.mineOnBlock(a1, p1, []int{0}, true, false)
|
a := tu.mineOnBlock(a1, p1, []int{0}, true, false)
|
||||||
a = tu.mineOnBlock(a, p1, []int{0}, true, false)
|
a = tu.mineOnBlock(a, p1, []int{0}, true, false)
|
||||||
|
|
||||||
tu.g.ResyncBankerNonce(a1.TipSet())
|
require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet()))
|
||||||
// chain B will now be heaviest
|
// chain B will now be heaviest
|
||||||
b := tu.mineOnBlock(base, p2, []int{1}, true, false)
|
b := tu.mineOnBlock(base, p2, []int{1}, true, false)
|
||||||
b = tu.mineOnBlock(b, p2, []int{1}, true, false)
|
b = tu.mineOnBlock(b, p2, []int{1}, true, false)
|
||||||
|
@ -4,13 +4,14 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
|
||||||
"github.com/filecoin-project/specs-actors/actors/crypto"
|
|
||||||
cid "github.com/ipfs/go-cid"
|
cid "github.com/ipfs/go-cid"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/go-address"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
|
"github.com/filecoin-project/specs-actors/actors/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
func testBlockHeader(t testing.TB) *BlockHeader {
|
func testBlockHeader(t testing.TB) *BlockHeader {
|
||||||
|
@ -7,6 +7,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/gen"
|
"github.com/filecoin-project/lotus/chain/gen"
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
@ -173,14 +174,18 @@ func WriteJsonToFile(fname string, obj interface{}) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer fi.Close()
|
defer fi.Close() //nolint:errcheck
|
||||||
|
|
||||||
out, err := json.MarshalIndent(obj, "", " ")
|
out, err := json.MarshalIndent(obj, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fi.Write(out)
|
_, err = fi.Write(out)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("writing json: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
@ -182,7 +183,7 @@ func (rs *Runtime) ValidateImmediateCallerAcceptAny() {
|
|||||||
func (rs *Runtime) CurrentBalance() abi.TokenAmount {
|
func (rs *Runtime) CurrentBalance() abi.TokenAmount {
|
||||||
b, err := rs.GetBalance(rs.Message().Receiver())
|
b, err := rs.GetBalance(rs.Message().Receiver())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rs.Abortf(exitcode.ExitCode(err.RetCode()), "get current balance: %v", err)
|
rs.Abortf(err.RetCode(), "get current balance: %v", err)
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
@ -354,13 +355,13 @@ func (rs *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMars
|
|||||||
params = buf.Bytes()
|
params = buf.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, err := rs.internalSend(rs.Message().Receiver(), to, method, types.BigInt(value), params)
|
ret, err := rs.internalSend(rs.Message().Receiver(), to, method, value, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err.IsFatal() {
|
if err.IsFatal() {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err)
|
log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err)
|
||||||
return nil, exitcode.ExitCode(err.RetCode())
|
return nil, err.RetCode()
|
||||||
}
|
}
|
||||||
return &dumbWrapperType{ret}, 0
|
return &dumbWrapperType{ret}, 0
|
||||||
}
|
}
|
||||||
@ -399,7 +400,7 @@ func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum,
|
|||||||
}
|
}
|
||||||
|
|
||||||
mr := types.MessageReceipt{
|
mr := types.MessageReceipt{
|
||||||
ExitCode: exitcode.ExitCode(aerrors.RetCode(errSend)),
|
ExitCode: aerrors.RetCode(errSend),
|
||||||
Return: ret,
|
Return: ret,
|
||||||
GasUsed: 0,
|
GasUsed: 0,
|
||||||
}
|
}
|
||||||
@ -431,6 +432,7 @@ type shimStateHandle struct {
|
|||||||
|
|
||||||
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
|
func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) {
|
||||||
c := ssh.rs.Put(obj)
|
c := ssh.rs.Put(obj)
|
||||||
|
// TODO: handle error below
|
||||||
ssh.rs.stateCommit(EmptyObjectCid, c)
|
ssh.rs.stateCommit(EmptyObjectCid, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -460,6 +462,7 @@ func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func() interface{}) in
|
|||||||
|
|
||||||
c := ssh.rs.Put(obj)
|
c := ssh.rs.Put(obj)
|
||||||
|
|
||||||
|
// TODO: handle error below
|
||||||
ssh.rs.stateCommit(baseState, c)
|
ssh.rs.stateCommit(baseState, c)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
@ -225,7 +225,7 @@ func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ticket := []byte(info.Randomness)
|
ticket := []byte(info.Randomness)
|
||||||
proof := []byte(info.Proof)
|
proof := info.Proof
|
||||||
seed := []byte(info.InteractiveRandomness)
|
seed := []byte(info.InteractiveRandomness)
|
||||||
|
|
||||||
log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
|
log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof)
|
||||||
|
@ -41,6 +41,7 @@ func init() {
|
|||||||
func TestChainValidationMessageSuite(t *testing.T) {
|
func TestChainValidationMessageSuite(t *testing.T) {
|
||||||
f := factory.NewFactories()
|
f := factory.NewFactories()
|
||||||
for _, testCase := range suites.MessageTestCases() {
|
for _, testCase := range suites.MessageTestCases() {
|
||||||
|
testCase := testCase
|
||||||
if TestSuiteSkipper.Skip(testCase) {
|
if TestSuiteSkipper.Skip(testCase) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -53,6 +54,7 @@ func TestChainValidationMessageSuite(t *testing.T) {
|
|||||||
func TestChainValidationTipSetSuite(t *testing.T) {
|
func TestChainValidationTipSetSuite(t *testing.T) {
|
||||||
f := factory.NewFactories()
|
f := factory.NewFactories()
|
||||||
for _, testCase := range suites.TipSetTestCases() {
|
for _, testCase := range suites.TipSetTestCases() {
|
||||||
|
testCase := testCase
|
||||||
if TestSuiteSkipper.Skip(testCase) {
|
if TestSuiteSkipper.Skip(testCase) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -246,7 +246,7 @@ func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*Ap
|
|||||||
ret, actorErr, rt := vm.send(ctx, msg, nil, 0)
|
ret, actorErr, rt := vm.send(ctx, msg, nil, 0)
|
||||||
return &ApplyRet{
|
return &ApplyRet{
|
||||||
MessageReceipt: types.MessageReceipt{
|
MessageReceipt: types.MessageReceipt{
|
||||||
ExitCode: exitcode.ExitCode(aerrors.RetCode(actorErr)),
|
ExitCode: aerrors.RetCode(actorErr),
|
||||||
Return: ret,
|
Return: ret,
|
||||||
GasUsed: 0,
|
GasUsed: 0,
|
||||||
},
|
},
|
||||||
@ -415,7 +415,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet,
|
|||||||
|
|
||||||
return &ApplyRet{
|
return &ApplyRet{
|
||||||
MessageReceipt: types.MessageReceipt{
|
MessageReceipt: types.MessageReceipt{
|
||||||
ExitCode: exitcode.ExitCode(errcode),
|
ExitCode: errcode,
|
||||||
Return: ret,
|
Return: ret,
|
||||||
GasUsed: gasUsed,
|
GasUsed: gasUsed,
|
||||||
},
|
},
|
||||||
|
@ -12,8 +12,8 @@ import (
|
|||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
|
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/bls"
|
_ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures
|
||||||
_ "github.com/filecoin-project/lotus/lib/sigs/secp"
|
_ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/lib/sigs"
|
"github.com/filecoin-project/lotus/lib/sigs"
|
||||||
|
@ -804,7 +804,12 @@ var chainExportCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer fi.Close()
|
defer func() {
|
||||||
|
err := fi.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error closing output file: %+v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
ts, err := LoadTipSet(ctx, cctx, api)
|
ts, err := LoadTipSet(ctx, cctx, api)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,7 +206,10 @@ var msigInspectCmd = &cli.Command{
|
|||||||
tx := pending[txid]
|
tx := pending[txid]
|
||||||
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%d\t%x\n", txid, state(tx), tx.To, types.FIL(tx.Value), tx.Method, tx.Params)
|
fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%d\t%x\n", txid, state(tx), tx.To, types.FIL(tx.Value), tx.Method, tx.Params)
|
||||||
}
|
}
|
||||||
w.Flush()
|
if err := w.Flush(); err != nil {
|
||||||
|
return xerrors.Errorf("flushing ouput: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -24,7 +24,7 @@ var fetchParamCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sectorSize := uint64(sectorSizeInt)
|
sectorSize := uint64(sectorSizeInt)
|
||||||
err = paramfetch.GetParams(build.ParametersJson(), sectorSize)
|
err = paramfetch.GetParams(build.ParametersJSON(), sectorSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -370,7 +370,7 @@ var paychVoucherSubmitCmd = &cli.Command{
|
|||||||
return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode)
|
return fmt.Errorf("message execution failed (exit code %d)", mwait.Receipt.ExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println("channel updated succesfully")
|
fmt.Println("channel updated successfully")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
|
@ -53,8 +53,7 @@ var runCmd = &cli.Command{
|
|||||||
defer closer()
|
defer closer()
|
||||||
ctx := lcli.ReqContext(cctx)
|
ctx := lcli.ReqContext(cctx)
|
||||||
|
|
||||||
sendSmallFundsTxs(ctx, api, addr, 5)
|
return sendSmallFundsTxs(ctx, api, addr, 5)
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -55,7 +55,7 @@ var importBenchCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer cfi.Close()
|
defer cfi.Close() //nolint:errcheck // read only file
|
||||||
|
|
||||||
tdir, err := ioutil.TempDir("", "lotus-import-bench")
|
tdir, err := ioutil.TempDir("", "lotus-import-bench")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -80,7 +80,7 @@ var importBenchCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer prof.Close()
|
defer prof.Close() //nolint:errcheck
|
||||||
|
|
||||||
if err := pprof.StartCPUProfile(prof); err != nil {
|
if err := pprof.StartCPUProfile(prof); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -146,7 +146,7 @@ var importBenchCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer ibj.Close()
|
defer ibj.Close() //nolint:errcheck
|
||||||
|
|
||||||
if err := json.NewEncoder(ibj).Encode(out); err != nil {
|
if err := json.NewEncoder(ibj).Encode(out); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -141,7 +141,10 @@ var sealBenchCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
if c.Bool("no-gpu") {
|
if c.Bool("no-gpu") {
|
||||||
os.Setenv("BELLMAN_NO_GPU", "1")
|
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("setting no-gpu flag: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
robench := c.String("benchmark-existing-sectorbuilder")
|
robench := c.String("benchmark-existing-sectorbuilder")
|
||||||
@ -154,7 +157,10 @@ var sealBenchCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
os.MkdirAll(sdir, 0775)
|
err = os.MkdirAll(sdir, 0775)
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("creating sectorbuilder dir: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
tsdir, err := ioutil.TempDir(sdir, "bench")
|
tsdir, err := ioutil.TempDir(sdir, "bench")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -209,7 +215,7 @@ var sealBenchCmd = &cli.Command{
|
|||||||
|
|
||||||
// Only fetch parameters if actually needed
|
// Only fetch parameters if actually needed
|
||||||
if !c.Bool("skip-commit2") {
|
if !c.Bool("skip-commit2") {
|
||||||
if err := paramfetch.GetParams(build.ParametersJson(), uint64(sectorSize)); err != nil {
|
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(sectorSize)); err != nil {
|
||||||
return xerrors.Errorf("getting params: %w", err)
|
return xerrors.Errorf("getting params: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -599,7 +605,10 @@ var proveCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
if c.Bool("no-gpu") {
|
if c.Bool("no-gpu") {
|
||||||
os.Setenv("BELLMAN_NO_GPU", "1")
|
err := os.Setenv("BELLMAN_NO_GPU", "1")
|
||||||
|
if err != nil {
|
||||||
|
return xerrors.Errorf("setting no-gpu flag: %w", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !c.Args().Present() {
|
if !c.Args().Present() {
|
||||||
@ -616,7 +625,7 @@ var proveCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("unmarshalling input file: %w", err)
|
return xerrors.Errorf("unmarshalling input file: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := paramfetch.GetParams(build.ParametersJson(), c2in.SectorSize); err != nil {
|
if err := paramfetch.GetParams(build.ParametersJSON(), c2in.SectorSize); err != nil {
|
||||||
return xerrors.Errorf("getting params: %w", err)
|
return xerrors.Errorf("getting params: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ var runCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer st.close()
|
defer st.close() //nolint:errcheck
|
||||||
|
|
||||||
runSyncer(ctx, api, st, maxBatch)
|
runSyncer(ctx, api, st, maxBatch)
|
||||||
|
|
||||||
|
@ -198,6 +198,8 @@ func syncHead(ctx context.Context, api api.FullNode, st *storage, ts *types.TipS
|
|||||||
}
|
}
|
||||||
|
|
||||||
for a, act := range changes {
|
for a, act := range changes {
|
||||||
|
act := act
|
||||||
|
|
||||||
addr, err := address.NewFromString(a)
|
addr, err := address.NewFromString(a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
|
@ -157,7 +157,7 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
|
|||||||
to, err := address.NewFromString(r.FormValue("address"))
|
to, err := address.NewFromString(r.FormValue("address"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(400)
|
w.WriteHeader(400)
|
||||||
w.Write([]byte(err.Error()))
|
_, _ = w.Write([]byte(err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,11 +204,11 @@ func (h *handler) send(w http.ResponseWriter, r *http.Request) {
|
|||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(400)
|
w.WriteHeader(400)
|
||||||
w.Write([]byte(err.Error()))
|
_, _ = w.Write([]byte(err.Error()))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Write([]byte(smsg.Cid().String()))
|
_, _ = w.Write([]byte(smsg.Cid().String()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) mkminer(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -79,7 +79,7 @@ var watchHeadCmd = &cli.Command{
|
|||||||
&cli.IntFlag{
|
&cli.IntFlag{
|
||||||
Name: "api-retries",
|
Name: "api-retries",
|
||||||
Value: 8,
|
Value: 8,
|
||||||
Usage: "number of API retry attemps",
|
Usage: "number of API retry attempts",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Action: func(c *cli.Context) error {
|
Action: func(c *cli.Context) error {
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
lcli "github.com/filecoin-project/lotus/cli"
|
lcli "github.com/filecoin-project/lotus/cli"
|
||||||
"github.com/filecoin-project/lotus/lib/lotuslog"
|
"github.com/filecoin-project/lotus/lib/lotuslog"
|
||||||
"github.com/filecoin-project/lotus/node/repo"
|
"github.com/filecoin-project/lotus/node/repo"
|
||||||
"github.com/filecoin-project/sector-storage"
|
sectorstorage "github.com/filecoin-project/sector-storage"
|
||||||
"github.com/filecoin-project/sector-storage/sealtasks"
|
"github.com/filecoin-project/sector-storage/sealtasks"
|
||||||
"github.com/filecoin-project/sector-storage/stores"
|
"github.com/filecoin-project/sector-storage/stores"
|
||||||
)
|
)
|
||||||
@ -107,7 +107,9 @@ var runCmd = &cli.Command{
|
|||||||
},
|
},
|
||||||
Action: func(cctx *cli.Context) error {
|
Action: func(cctx *cli.Context) error {
|
||||||
if !cctx.Bool("enable-gpu-proving") {
|
if !cctx.Bool("enable-gpu-proving") {
|
||||||
os.Setenv("BELLMAN_NO_GPU", "true")
|
if err := os.Setenv("BELLMAN_NO_GPU", "true"); err != nil {
|
||||||
|
return xerrors.Errorf("could not set no-gpu env: %+v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if cctx.String("address") == "" {
|
if cctx.String("address") == "" {
|
||||||
@ -146,7 +148,7 @@ var runCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
if cctx.Bool("commit") {
|
if cctx.Bool("commit") {
|
||||||
if err := paramfetch.GetParams(build.ParametersJson(), uint64(ssize)); err != nil {
|
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(ssize)); err != nil {
|
||||||
return xerrors.Errorf("get params: %w", err)
|
return xerrors.Errorf("get params: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,6 @@ var aggregateManifestsCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer fi.Close()
|
|
||||||
var val map[string]genesis.Miner
|
var val map[string]genesis.Miner
|
||||||
if err := json.NewDecoder(fi).Decode(&val); err != nil {
|
if err := json.NewDecoder(fi).Decode(&val); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/minio/blake2b-simd"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -15,6 +14,7 @@ import (
|
|||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
ic "github.com/libp2p/go-libp2p-core/crypto"
|
ic "github.com/libp2p/go-libp2p-core/crypto"
|
||||||
"github.com/libp2p/go-libp2p-core/peer"
|
"github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
"github.com/minio/blake2b-simd"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
@ -48,7 +48,7 @@ func PreSeal(maddr address.Address, pt abi.RegisteredProof, offset abi.SectorNum
|
|||||||
SealProofType: spt,
|
SealProofType: spt,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(sbroot, 0775); err != nil {
|
if err := os.MkdirAll(sbroot, 0775); err != nil { //golint:gosec
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ var importCarCmd = &cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer lr.Close()
|
defer lr.Close() //nolint:errcheck
|
||||||
|
|
||||||
cf := cctx.Args().Get(0)
|
cf := cctx.Args().Get(0)
|
||||||
f, err := os.OpenFile(cf, os.O_RDONLY, 0664)
|
f, err := os.OpenFile(cf, os.O_RDONLY, 0664)
|
||||||
|
@ -24,7 +24,7 @@ var fetchParamCmd = &cli.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sectorSize := uint64(sectorSizeInt)
|
sectorSize := uint64(sectorSizeInt)
|
||||||
err = paramfetch.GetParams(build.ParametersJson(), sectorSize)
|
err = paramfetch.GetParams(build.ParametersJSON(), sectorSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ var initCmd = &cli.Command{
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Checking proof parameters")
|
log.Info("Checking proof parameters")
|
||||||
if err := paramfetch.GetParams(build.ParametersJson(), uint64(ssize)); err != nil {
|
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(ssize)); err != nil {
|
||||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,7 +389,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api lapi.FullNode,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer lr.Close()
|
defer lr.Close() //notlint:errcheck
|
||||||
|
|
||||||
log.Info("Initializing libp2p identity")
|
log.Info("Initializing libp2p identity")
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ var DaemonCmd = &cli.Command{
|
|||||||
return xerrors.Errorf("repo init error: %w", err)
|
return xerrors.Errorf("repo init error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := paramfetch.GetParams(build.ParametersJson(), 0); err != nil {
|
if err := paramfetch.GetParams(build.ParametersJSON(), 0); err != nil {
|
||||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,7 +269,7 @@ func ImportChain(r repo.Repo, fname string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer lr.Close()
|
defer lr.Close() //nolint:errcheck
|
||||||
|
|
||||||
ds, err := lr.Datastore("/blocks")
|
ds, err := lr.Datastore("/blocks")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -85,14 +85,14 @@ func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Reque
|
|||||||
}
|
}
|
||||||
if !auth.HasPerm(r.Context(), nil, apistruct.PermWrite) {
|
if !auth.HasPerm(r.Context(), nil, apistruct.PermWrite) {
|
||||||
w.WriteHeader(401)
|
w.WriteHeader(401)
|
||||||
json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
_ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := a.ClientImportLocal(r.Context(), r.Body)
|
c, err := a.ClientImportLocal(r.Context(), r.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(500)
|
w.WriteHeader(500)
|
||||||
json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
|
_ = json.NewEncoder(w).Encode(struct{ Error string }{err.Error()})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.WriteHeader(200)
|
w.WriteHeader(200)
|
||||||
|
154
extern/rleplus/internal/bitvector.go
vendored
154
extern/rleplus/internal/bitvector.go
vendored
@ -1,154 +0,0 @@
|
|||||||
package bitvector
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrOutOfRange - the index passed is out of range for the BitVector
|
|
||||||
ErrOutOfRange = errors.New("index out of range")
|
|
||||||
)
|
|
||||||
|
|
||||||
// BitNumbering indicates the ordering of bits, either
|
|
||||||
// least-significant bit in position 0, or most-significant bit
|
|
||||||
// in position 0.
|
|
||||||
//
|
|
||||||
// It it used in 3 ways with BitVector:
|
|
||||||
// 1. Ordering of bits within the Buf []byte structure
|
|
||||||
// 2. What order to add bits when using Extend()
|
|
||||||
// 3. What order to read bits when using Take()
|
|
||||||
//
|
|
||||||
// https://en.wikipedia.org/wiki/Bit_numbering
|
|
||||||
type BitNumbering int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LSB0 - bit ordering starts with the low-order bit
|
|
||||||
LSB0 BitNumbering = iota
|
|
||||||
|
|
||||||
// MSB0 - bit ordering starts with the high-order bit
|
|
||||||
MSB0
|
|
||||||
)
|
|
||||||
|
|
||||||
// BitVector is used to manipulate ordered collections of bits
|
|
||||||
type BitVector struct {
|
|
||||||
Buf []byte
|
|
||||||
|
|
||||||
// BytePacking is the bit ordering within bytes
|
|
||||||
BytePacking BitNumbering
|
|
||||||
|
|
||||||
// Len is the logical number of bits in the vector.
|
|
||||||
// The last byte in Buf may have undefined bits if Len is not a multiple of 8
|
|
||||||
Len uint
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBitVector constructs a new BitVector from a slice of bytes.
|
|
||||||
//
|
|
||||||
// The bytePacking parameter is required to know how to interpret the bit ordering within the bytes.
|
|
||||||
func NewBitVector(buf []byte, bytePacking BitNumbering) *BitVector {
|
|
||||||
return &BitVector{
|
|
||||||
BytePacking: bytePacking,
|
|
||||||
Buf: buf,
|
|
||||||
Len: uint(len(buf) * 8),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Push adds a single bit to the BitVector.
|
|
||||||
//
|
|
||||||
// Although it takes a byte, only the low-order bit is used, so just use 0 or 1.
|
|
||||||
func (v *BitVector) Push(val byte) {
|
|
||||||
if v.Len%8 == 0 {
|
|
||||||
v.Buf = append(v.Buf, 0)
|
|
||||||
}
|
|
||||||
lastIdx := v.Len / 8
|
|
||||||
|
|
||||||
switch v.BytePacking {
|
|
||||||
case LSB0:
|
|
||||||
v.Buf[lastIdx] |= (val & 1) << (v.Len % 8)
|
|
||||||
default:
|
|
||||||
v.Buf[lastIdx] |= (val & 1) << (7 - (v.Len % 8))
|
|
||||||
}
|
|
||||||
|
|
||||||
v.Len++
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a single bit as a byte -- either 0 or 1
|
|
||||||
func (v *BitVector) Get(idx uint) (byte, error) {
|
|
||||||
if idx >= v.Len {
|
|
||||||
return 0, ErrOutOfRange
|
|
||||||
}
|
|
||||||
blockIdx := idx / 8
|
|
||||||
|
|
||||||
switch v.BytePacking {
|
|
||||||
case LSB0:
|
|
||||||
return v.Buf[blockIdx] >> (idx % 8) & 1, nil
|
|
||||||
default:
|
|
||||||
return v.Buf[blockIdx] >> (7 - idx%8) & 1, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extend adds up to 8 bits to the receiver
|
|
||||||
//
|
|
||||||
// Given a byte b == 0b11010101
|
|
||||||
// v.Extend(b, 4, LSB0) would add < 1, 0, 1, 0 >
|
|
||||||
// v.Extend(b, 4, MSB0) would add < 1, 1, 0, 1 >
|
|
||||||
//
|
|
||||||
// Panics if count is out of range
|
|
||||||
func (v *BitVector) Extend(val byte, count uint, order BitNumbering) {
|
|
||||||
if count > 8 {
|
|
||||||
log.Panicf("invalid count")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := uint(0); i < count; i++ {
|
|
||||||
switch order {
|
|
||||||
case LSB0:
|
|
||||||
v.Push((val >> i) & 1)
|
|
||||||
default:
|
|
||||||
v.Push((val >> (7 - i)) & 1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take reads up to 8 bits at the given index.
|
|
||||||
//
|
|
||||||
// Given a BitVector < 1, 1, 0, 1, 0, 1, 0, 1 >
|
|
||||||
// v.Take(0, 4, LSB0) would return 0b00001011
|
|
||||||
// v.Take(0, 4, MSB0) would return 0b11010000
|
|
||||||
//
|
|
||||||
// Panics if count is out of range
|
|
||||||
func (v *BitVector) Take(index uint, count uint, order BitNumbering) (out byte) {
|
|
||||||
if count > 8 {
|
|
||||||
log.Panicf("invalid count")
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := uint(0); i < count; i++ {
|
|
||||||
val, _ := v.Get(index + i)
|
|
||||||
|
|
||||||
switch order {
|
|
||||||
case LSB0:
|
|
||||||
out |= val << i
|
|
||||||
default:
|
|
||||||
out |= val << (7 - i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterator returns a function, which when invoked, returns the number
|
|
||||||
// of bits requested, and increments an internal cursor.
|
|
||||||
//
|
|
||||||
// When the end of the BitVector is reached, it returns zeroes indefinitely
|
|
||||||
//
|
|
||||||
// Panics if count is out of range
|
|
||||||
func (v *BitVector) Iterator(order BitNumbering) func(uint) byte {
|
|
||||||
cursor := uint(0)
|
|
||||||
return func(count uint) (out byte) {
|
|
||||||
if count > 8 {
|
|
||||||
log.Panicf("invalid count")
|
|
||||||
}
|
|
||||||
|
|
||||||
out = v.Take(cursor, count, order)
|
|
||||||
cursor += count
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
136
extern/rleplus/internal/bitvector_test.go
vendored
136
extern/rleplus/internal/bitvector_test.go
vendored
@ -1,136 +0,0 @@
|
|||||||
package bitvector_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
bitvector "github.com/filecoin-project/lotus/extern/rleplus/internal"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestBitVector(t *testing.T) {
|
|
||||||
t.Run("zero value", func(t *testing.T) {
|
|
||||||
var v bitvector.BitVector
|
|
||||||
|
|
||||||
assert.Equal(t, bitvector.LSB0, v.BytePacking)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Push", func(t *testing.T) {
|
|
||||||
// MSB0 bit numbering
|
|
||||||
v := bitvector.BitVector{BytePacking: bitvector.MSB0}
|
|
||||||
v.Push(1)
|
|
||||||
v.Push(0)
|
|
||||||
v.Push(1)
|
|
||||||
v.Push(1)
|
|
||||||
|
|
||||||
assert.Equal(t, byte(176), v.Buf[0])
|
|
||||||
|
|
||||||
// LSB0 bit numbering
|
|
||||||
v = bitvector.BitVector{BytePacking: bitvector.LSB0}
|
|
||||||
v.Push(1)
|
|
||||||
v.Push(0)
|
|
||||||
v.Push(1)
|
|
||||||
v.Push(1)
|
|
||||||
|
|
||||||
assert.Equal(t, byte(13), v.Buf[0])
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Get", func(t *testing.T) {
|
|
||||||
bits := []byte{1, 0, 1, 1, 0, 0, 1, 0}
|
|
||||||
|
|
||||||
for _, numbering := range []bitvector.BitNumbering{bitvector.MSB0, bitvector.LSB0} {
|
|
||||||
v := bitvector.BitVector{BytePacking: numbering}
|
|
||||||
|
|
||||||
for _, bit := range bits {
|
|
||||||
v.Push(bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
for idx, expected := range bits {
|
|
||||||
actual, _ := v.Get(uint(idx))
|
|
||||||
assert.Equal(t, expected, actual)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Extend", func(t *testing.T) {
|
|
||||||
val := byte(171) // 0b10101011
|
|
||||||
|
|
||||||
var v bitvector.BitVector
|
|
||||||
|
|
||||||
// MSB0 bit numbering
|
|
||||||
v = bitvector.BitVector{}
|
|
||||||
v.Extend(val, 4, bitvector.MSB0)
|
|
||||||
assertBitVector(t, []byte{1, 0, 1, 0}, v)
|
|
||||||
v.Extend(val, 5, bitvector.MSB0)
|
|
||||||
assertBitVector(t, []byte{1, 0, 1, 0, 1, 0, 1, 0, 1}, v)
|
|
||||||
|
|
||||||
// LSB0 bit numbering
|
|
||||||
v = bitvector.BitVector{}
|
|
||||||
v.Extend(val, 4, bitvector.LSB0)
|
|
||||||
assertBitVector(t, []byte{1, 1, 0, 1}, v)
|
|
||||||
v.Extend(val, 5, bitvector.LSB0)
|
|
||||||
assertBitVector(t, []byte{1, 1, 0, 1, 1, 1, 0, 1, 0}, v)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("invalid counts to Take/Extend/Iterator cause panics", func(t *testing.T) {
|
|
||||||
v := bitvector.BitVector{BytePacking: bitvector.LSB0}
|
|
||||||
|
|
||||||
assert.Panics(t, func() { v.Extend(0xff, 9, bitvector.LSB0) })
|
|
||||||
|
|
||||||
assert.Panics(t, func() { v.Take(0, 9, bitvector.LSB0) })
|
|
||||||
|
|
||||||
next := v.Iterator(bitvector.LSB0)
|
|
||||||
assert.Panics(t, func() { next(9) })
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Take", func(t *testing.T) {
|
|
||||||
var v bitvector.BitVector
|
|
||||||
|
|
||||||
bits := []byte{1, 0, 1, 0, 1, 0, 1, 1}
|
|
||||||
for _, bit := range bits {
|
|
||||||
v.Push(bit)
|
|
||||||
}
|
|
||||||
|
|
||||||
assert.Equal(t, byte(176), v.Take(4, 4, bitvector.MSB0))
|
|
||||||
assert.Equal(t, byte(13), v.Take(4, 4, bitvector.LSB0))
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Iterator", func(t *testing.T) {
|
|
||||||
var buf []byte
|
|
||||||
|
|
||||||
// make a bitvector of 256 sample bits
|
|
||||||
for i := 0; i < 32; i++ {
|
|
||||||
buf = append(buf, 128+32)
|
|
||||||
}
|
|
||||||
|
|
||||||
v := bitvector.NewBitVector(buf, bitvector.LSB0)
|
|
||||||
|
|
||||||
next := v.Iterator(bitvector.LSB0)
|
|
||||||
|
|
||||||
// compare to Get()
|
|
||||||
for i := uint(0); i < v.Len; i++ {
|
|
||||||
expected, _ := v.Get(i)
|
|
||||||
assert.Equal(t, expected, next(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
// out of range should return zero
|
|
||||||
assert.Equal(t, byte(0), next(1))
|
|
||||||
assert.Equal(t, byte(0), next(8))
|
|
||||||
|
|
||||||
// compare to Take()
|
|
||||||
next = v.Iterator(bitvector.LSB0)
|
|
||||||
assert.Equal(t, next(5), v.Take(0, 5, bitvector.LSB0))
|
|
||||||
assert.Equal(t, next(8), v.Take(5, 8, bitvector.LSB0))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: When using this helper assertion, expectedBits should *only* be 0s and 1s.
|
|
||||||
func assertBitVector(t *testing.T, expectedBits []byte, actual bitvector.BitVector) {
|
|
||||||
assert.Equal(t, uint(len(expectedBits)), actual.Len)
|
|
||||||
|
|
||||||
for idx, bit := range expectedBits {
|
|
||||||
actualBit, err := actual.Get(uint(idx))
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, bit, actualBit)
|
|
||||||
}
|
|
||||||
}
|
|
204
extern/rleplus/rleplus.go
vendored
204
extern/rleplus/rleplus.go
vendored
@ -1,204 +0,0 @@
|
|||||||
package rleplus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
bitvector "github.com/filecoin-project/lotus/extern/rleplus/internal"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Version is the 2 lowest bits of this constant
|
|
||||||
const Version = 0
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrRunLengthTooLarge - data implies a run-length which isn't supported
|
|
||||||
ErrRunLengthTooLarge = fmt.Errorf("run length too large for RLE+ version %d", Version)
|
|
||||||
|
|
||||||
// ErrDecode - invalid encoding for this version
|
|
||||||
ErrDecode = fmt.Errorf("invalid encoding for RLE+ version %d", Version)
|
|
||||||
|
|
||||||
// ErrWrongVersion - wrong version of RLE+
|
|
||||||
ErrWrongVersion = errors.New("invalid RLE+ version")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encode returns the RLE+ representation of the provided integers.
|
|
||||||
// Also returned is the number of bits required by this encoding,
|
|
||||||
// which is not necessarily on a byte boundary.
|
|
||||||
//
|
|
||||||
// The RLE+ spec is here: https://github.com/filecoin-project/specs/blob/master/data-structures.md#rle-bitset-encoding
|
|
||||||
// and is described by the BNF Grammar:
|
|
||||||
//
|
|
||||||
// <encoding> ::= <header> <blocks>
|
|
||||||
// <header> ::= <version> <bit>
|
|
||||||
// <version> ::= "00"
|
|
||||||
// <blocks> ::= <block> <blocks> | ""
|
|
||||||
// <block> ::= <block_single> | <block_short> | <block_long>
|
|
||||||
// <block_single> ::= "1"
|
|
||||||
// <block_short> ::= "01" <bit> <bit> <bit> <bit>
|
|
||||||
// <block_long> ::= "00" <unsigned_varint>
|
|
||||||
// <bit> ::= "0" | "1"
|
|
||||||
//
|
|
||||||
// Filecoin specific:
|
|
||||||
// The encoding is returned as a []byte, each byte packed starting with the low-order bit (LSB0)
|
|
||||||
func Encode(ints []uint64) ([]byte, uint, error) {
|
|
||||||
v := bitvector.BitVector{BytePacking: bitvector.LSB0}
|
|
||||||
firstBit, runs := RunLengths(ints)
|
|
||||||
|
|
||||||
// Add version header
|
|
||||||
v.Extend(Version, 2, bitvector.LSB0)
|
|
||||||
|
|
||||||
v.Push(firstBit)
|
|
||||||
|
|
||||||
for _, run := range runs {
|
|
||||||
switch {
|
|
||||||
case run == 1:
|
|
||||||
v.Push(1)
|
|
||||||
case run < 16:
|
|
||||||
v.Push(0)
|
|
||||||
v.Push(1)
|
|
||||||
v.Extend(byte(run), 4, bitvector.LSB0)
|
|
||||||
case run >= 16:
|
|
||||||
v.Push(0)
|
|
||||||
v.Push(0)
|
|
||||||
// 10 bytes needed to encode MaxUint64
|
|
||||||
buf := make([]byte, 10)
|
|
||||||
numBytes := binary.PutUvarint(buf, run)
|
|
||||||
for i := 0; i < numBytes; i++ {
|
|
||||||
v.Extend(buf[i], 8, bitvector.LSB0)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, 0, ErrRunLengthTooLarge
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return v.Buf, v.Len, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode returns integers represented by the given RLE+ encoding
|
|
||||||
//
|
|
||||||
// The length of the encoding is not specified. It is inferred by
|
|
||||||
// reading zeroes from the (possibly depleted) BitVector, by virtue
|
|
||||||
// of the behavior of BitVector.Take() returning 0 when the end of
|
|
||||||
// the BitVector has been reached. This has the downside of not
|
|
||||||
// being able to detect corrupt encodings.
|
|
||||||
//
|
|
||||||
// The passed []byte should be packed in LSB0 bit numbering
|
|
||||||
func Decode(buf []byte) (ints []uint64, err error) {
|
|
||||||
if len(buf) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
v := bitvector.NewBitVector(buf, bitvector.LSB0)
|
|
||||||
take := v.Iterator(bitvector.LSB0)
|
|
||||||
|
|
||||||
// Read version and check
|
|
||||||
// Version check
|
|
||||||
ver := take(2)
|
|
||||||
if ver != Version {
|
|
||||||
return nil, ErrWrongVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
curIdx := uint64(0)
|
|
||||||
curBit := take(1)
|
|
||||||
var runLength int
|
|
||||||
done := false
|
|
||||||
|
|
||||||
for done == false {
|
|
||||||
y := take(1)
|
|
||||||
switch y {
|
|
||||||
case 1:
|
|
||||||
runLength = 1
|
|
||||||
case 0:
|
|
||||||
val := take(1)
|
|
||||||
|
|
||||||
if val == 1 {
|
|
||||||
// short block
|
|
||||||
runLength = int(take(4))
|
|
||||||
} else {
|
|
||||||
// long block
|
|
||||||
var buf []byte
|
|
||||||
for {
|
|
||||||
b := take(8)
|
|
||||||
buf = append(buf, b)
|
|
||||||
|
|
||||||
if b&0x80 == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// 10 bytes is required to store math.MaxUint64 in a uvarint
|
|
||||||
if len(buf) > 10 {
|
|
||||||
return nil, ErrDecode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
x, _ := binary.Uvarint(buf)
|
|
||||||
|
|
||||||
if x == 0 {
|
|
||||||
done = true
|
|
||||||
}
|
|
||||||
runLength = int(x)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if curBit == 1 {
|
|
||||||
for j := 0; j < runLength; j++ {
|
|
||||||
ints = append(ints, curIdx+uint64(j))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
curIdx += uint64(runLength)
|
|
||||||
curBit = 1 - curBit
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunLengths transforms integers into its bit-set-run-length representation.
|
|
||||||
//
|
|
||||||
// A set of unsigned integers { 0, 2, 4, 5, 6 } can be thought of as
|
|
||||||
// indices into a bitset { 1, 0, 1, 0, 1, 1, 1 } where bitset[index] == 1.
|
|
||||||
//
|
|
||||||
// The bit set run lengths of this set would then be { 1, 1, 1, 1, 3 },
|
|
||||||
// representing lengths of runs alternating between 1 and 0, starting
|
|
||||||
// with a first bit of 1.
|
|
||||||
//
|
|
||||||
// Duplicated numbers are ignored.
|
|
||||||
//
|
|
||||||
// This is a helper function for Encode()
|
|
||||||
func RunLengths(ints []uint64) (firstBit byte, runs []uint64) {
|
|
||||||
if len(ints) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort our incoming numbers
|
|
||||||
sort.Slice(ints, func(i, j int) bool { return ints[i] < ints[j] })
|
|
||||||
|
|
||||||
prev := ints[0]
|
|
||||||
|
|
||||||
// Initialize our return value
|
|
||||||
if prev == 0 {
|
|
||||||
firstBit = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if firstBit == 0 {
|
|
||||||
// first run of zeroes
|
|
||||||
runs = append(runs, prev)
|
|
||||||
}
|
|
||||||
runs = append(runs, 1)
|
|
||||||
|
|
||||||
for _, cur := range ints[1:] {
|
|
||||||
delta := cur - prev
|
|
||||||
switch {
|
|
||||||
case delta == 1:
|
|
||||||
runs[len(runs)-1]++
|
|
||||||
case delta > 1:
|
|
||||||
// add run of zeroes if there is a gap
|
|
||||||
runs = append(runs, delta-1)
|
|
||||||
runs = append(runs, 1)
|
|
||||||
default:
|
|
||||||
// repeated number?
|
|
||||||
}
|
|
||||||
prev = cur
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
213
extern/rleplus/rleplus_test.go
vendored
213
extern/rleplus/rleplus_test.go
vendored
@ -1,213 +0,0 @@
|
|||||||
package rleplus_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/extern/rleplus"
|
|
||||||
bitvector "github.com/filecoin-project/lotus/extern/rleplus/internal"
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestRleplus(t *testing.T) {
|
|
||||||
|
|
||||||
t.Run("Encode", func(t *testing.T) {
|
|
||||||
// Encode an intset
|
|
||||||
ints := []uint64{
|
|
||||||
// run of 1
|
|
||||||
0,
|
|
||||||
// gap of 1
|
|
||||||
// run of 1
|
|
||||||
2,
|
|
||||||
// gap of 1
|
|
||||||
// run of 3
|
|
||||||
4, 5, 6,
|
|
||||||
// gap of 4
|
|
||||||
// run of 17
|
|
||||||
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedBits := []byte{
|
|
||||||
0, 0, // version
|
|
||||||
1, // first bit
|
|
||||||
1, // run of 1
|
|
||||||
1, // gap of 1
|
|
||||||
1, // run of 1
|
|
||||||
1, // gap of 1
|
|
||||||
0, 1, 1, 1, 0, 0, // run of 3
|
|
||||||
0, 1, 0, 0, 1, 0, // gap of 4
|
|
||||||
|
|
||||||
// run of 17 < 0 0 (varint) >
|
|
||||||
0, 0,
|
|
||||||
1, 0, 0, 0, 1, 0, 0, 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
v := bitvector.BitVector{}
|
|
||||||
for _, bit := range expectedBits {
|
|
||||||
v.Push(bit)
|
|
||||||
}
|
|
||||||
actualBytes, _, err := rleplus.Encode(ints)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
assert.Equal(t, len(v.Buf), len(actualBytes))
|
|
||||||
for idx, expected := range v.Buf {
|
|
||||||
assert.Equal(
|
|
||||||
t,
|
|
||||||
fmt.Sprintf("%08b", expected),
|
|
||||||
fmt.Sprintf("%08b", actualBytes[idx]),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Encode allows all runs sizes possible uint64", func(t *testing.T) {
|
|
||||||
// create a run of math.MaxUint64
|
|
||||||
ints := []uint64{math.MaxUint64}
|
|
||||||
|
|
||||||
// There would be 64 bits(1) for the UvarInt, totally 9 bytes.
|
|
||||||
expected := []byte{0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x20}
|
|
||||||
encodeBytes, _, err := rleplus.Encode(ints)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
for idx, v := range encodeBytes {
|
|
||||||
assert.Equal(
|
|
||||||
t,
|
|
||||||
fmt.Sprintf("%8b", v),
|
|
||||||
fmt.Sprintf("%8b", expected[idx]),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Encode for some big numbers", func(t *testing.T) {
|
|
||||||
// create a run of math.MaxUint64
|
|
||||||
ints := make([]uint64, 1024)
|
|
||||||
|
|
||||||
// ints {2^63 .. 2^63+1023}
|
|
||||||
for i := uint64(0); i < 1024; i++ {
|
|
||||||
ints[i] = uint64(1)<<63 + i
|
|
||||||
}
|
|
||||||
|
|
||||||
expected := []byte{0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x30, 0x00, 0x40, 0x04}
|
|
||||||
encodeBytes, _, err := rleplus.Encode(ints)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
for idx, v := range encodeBytes {
|
|
||||||
// fmt.Println(v, expected[idx])
|
|
||||||
assert.Equal(
|
|
||||||
t,
|
|
||||||
fmt.Sprintf("%8b", v),
|
|
||||||
fmt.Sprintf("%8b", expected[idx]),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Decode", func(t *testing.T) {
|
|
||||||
testCases := [][]uint64{
|
|
||||||
{},
|
|
||||||
{1},
|
|
||||||
{0},
|
|
||||||
{0, 1, 2, 3},
|
|
||||||
{
|
|
||||||
// run of 1
|
|
||||||
0,
|
|
||||||
// gap of 1
|
|
||||||
// run of 1
|
|
||||||
2,
|
|
||||||
// gap of 1
|
|
||||||
// run of 3
|
|
||||||
4, 5, 6,
|
|
||||||
// gap of 4
|
|
||||||
// run of 17
|
|
||||||
11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
encoded, _, err := rleplus.Encode(tc)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
result, err := rleplus.Decode(encoded)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
sort.Slice(tc, func(i, j int) bool { return tc[i] < tc[j] })
|
|
||||||
sort.Slice(result, func(i, j int) bool { return result[i] < result[j] })
|
|
||||||
|
|
||||||
assert.Equal(t, len(tc), len(result))
|
|
||||||
|
|
||||||
for idx, expected := range tc {
|
|
||||||
assert.Equal(t, expected, result[idx])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Decode version check", func(t *testing.T) {
|
|
||||||
_, err := rleplus.Decode([]byte{0xff})
|
|
||||||
assert.Error(t, err, "invalid RLE+ version")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("Decode returns an error with a bad encoding", func(t *testing.T) {
|
|
||||||
// create an encoding with a buffer with a run which is too long
|
|
||||||
_, err := rleplus.Decode([]byte{0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff})
|
|
||||||
assert.Error(t, err, "invalid encoding for RLE+ version 0")
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("outputs same as reference implementation", func(t *testing.T) {
|
|
||||||
// Encoding bitvec![LittleEndian; 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
|
|
||||||
// in the Rust reference implementation gives an encoding of [223, 145, 136, 0] (without version field)
|
|
||||||
// The bit vector is equivalent to the integer set { 0, 2, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 }
|
|
||||||
|
|
||||||
// This is the above reference output with a version header "00" manually added
|
|
||||||
referenceEncoding := []byte{124, 71, 34, 2}
|
|
||||||
|
|
||||||
expectedNumbers := []uint64{0, 2, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}
|
|
||||||
|
|
||||||
encoded, _, err := rleplus.Encode(expectedNumbers)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
// Our encoded bytes are the same as the ref bytes
|
|
||||||
assert.Equal(t, len(referenceEncoding), len(encoded))
|
|
||||||
for idx, expected := range referenceEncoding {
|
|
||||||
assert.Equal(t, expected, encoded[idx])
|
|
||||||
}
|
|
||||||
|
|
||||||
decoded, err := rleplus.Decode(referenceEncoding)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
// Our decoded integers are the same as expected
|
|
||||||
sort.Slice(decoded, func(i, j int) bool { return decoded[i] < decoded[j] })
|
|
||||||
assert.Equal(t, len(expectedNumbers), len(decoded))
|
|
||||||
for idx, expected := range expectedNumbers {
|
|
||||||
assert.Equal(t, expected, decoded[idx])
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("RunLengths", func(t *testing.T) {
|
|
||||||
testCases := []struct {
|
|
||||||
ints []uint64
|
|
||||||
first byte
|
|
||||||
runs []uint64
|
|
||||||
}{
|
|
||||||
// empty
|
|
||||||
{},
|
|
||||||
|
|
||||||
// leading with ones
|
|
||||||
{[]uint64{0}, 1, []uint64{1}},
|
|
||||||
{[]uint64{0, 1}, 1, []uint64{2}},
|
|
||||||
{[]uint64{0, 0xffffffff, 0xffffffff + 1}, 1, []uint64{1, 0xffffffff - 1, 2}},
|
|
||||||
|
|
||||||
// leading with zeroes
|
|
||||||
{[]uint64{1}, 0, []uint64{1, 1}},
|
|
||||||
{[]uint64{2}, 0, []uint64{2, 1}},
|
|
||||||
{[]uint64{10, 11, 13, 20}, 0, []uint64{10, 2, 1, 1, 6, 1}},
|
|
||||||
{[]uint64{10, 11, 11, 13, 20, 10, 11, 13, 20}, 0, []uint64{10, 2, 1, 1, 6, 1}},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, testCase := range testCases {
|
|
||||||
first, runs := rleplus.RunLengths(testCase.ints)
|
|
||||||
assert.Equal(t, testCase.first, first)
|
|
||||||
assert.Equal(t, len(testCase.runs), len(runs))
|
|
||||||
for idx, runLength := range testCase.runs {
|
|
||||||
assert.Equal(t, runLength, runs[idx])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
@ -28,7 +28,7 @@ type PreSeal struct {
|
|||||||
type Miner struct {
|
type Miner struct {
|
||||||
Owner address.Address
|
Owner address.Address
|
||||||
Worker address.Address
|
Worker address.Address
|
||||||
PeerId peer.ID
|
PeerId peer.ID //nolint:golint
|
||||||
|
|
||||||
MarketBalance abi.TokenAmount
|
MarketBalance abi.TokenAmount
|
||||||
PowerBalance abi.TokenAmount
|
PowerBalance abi.TokenAmount
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
madns "github.com/multiformats/go-multiaddr-dns"
|
madns "github.com/multiformats/go-multiaddr-dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
// parseAddresses is a function that takes in a slice of string peer addresses
|
// ParseAddresses is a function that takes in a slice of string peer addresses
|
||||||
// (multiaddr + peerid) and returns a slice of properly constructed peers
|
// (multiaddr + peerid) and returns a slice of properly constructed peers
|
||||||
func ParseAddresses(ctx context.Context, addrs []string) ([]peer.AddrInfo, error) {
|
func ParseAddresses(ctx context.Context, addrs []string) ([]peer.AddrInfo, error) {
|
||||||
// resolve addresses
|
// resolve addresses
|
||||||
|
@ -57,7 +57,7 @@ func (crt *incrt) Read(buf []byte) (int, error) {
|
|||||||
|
|
||||||
n, err := crt.rd.Read(buf)
|
n, err := crt.rd.Read(buf)
|
||||||
|
|
||||||
crt.rd.SetReadDeadline(time.Time{})
|
_ = crt.rd.SetReadDeadline(time.Time{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
dur := now().Sub(start)
|
dur := now().Sub(start)
|
||||||
crt.wait -= dur
|
crt.wait -= dur
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
logging "github.com/ipfs/go-log/v2"
|
logging "github.com/ipfs/go-log/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//nolint:gosec
|
||||||
func SetupLogLevels() {
|
func SetupLogLevels() {
|
||||||
if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set {
|
if _, set := os.LookupEnv("GOLOG_LOG_LEVEL"); !set {
|
||||||
logging.SetLogLevel("*", "INFO")
|
logging.SetLogLevel("*", "INFO")
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"github.com/filecoin-project/specs-actors/actors/abi"
|
"github.com/filecoin-project/specs-actors/actors/abi"
|
||||||
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
"github.com/filecoin-project/specs-actors/actors/abi/big"
|
||||||
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
"github.com/filecoin-project/specs-actors/actors/builtin/market"
|
||||||
peer "github.com/libp2p/go-libp2p-peer"
|
peer "github.com/libp2p/go-libp2p-core/peer"
|
||||||
|
|
||||||
"github.com/filecoin-project/go-address"
|
"github.com/filecoin-project/go-address"
|
||||||
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
"github.com/filecoin-project/go-fil-markets/storagemarket"
|
||||||
|
@ -2,12 +2,13 @@ package config
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/filecoin-project/sector-storage/stores"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/stores"
|
||||||
)
|
)
|
||||||
|
|
||||||
func StorageFromFile(path string, def *stores.StorageConfig) (*stores.StorageConfig, error) {
|
func StorageFromFile(path string, def *stores.StorageConfig) (*stores.StorageConfig, error) {
|
||||||
|
@ -64,7 +64,7 @@ func (hs *Service) HandleStream(s inet.Stream) {
|
|||||||
var hmsg HelloMessage
|
var hmsg HelloMessage
|
||||||
if err := cborutil.ReadCborRPC(s, &hmsg); err != nil {
|
if err := cborutil.ReadCborRPC(s, &hmsg); err != nil {
|
||||||
log.Infow("failed to read hello message, disconnecting", "error", err)
|
log.Infow("failed to read hello message, disconnecting", "error", err)
|
||||||
s.Conn().Close()
|
_ = s.Conn().Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
arrived := time.Now()
|
arrived := time.Now()
|
||||||
@ -76,11 +76,11 @@ func (hs *Service) HandleStream(s inet.Stream) {
|
|||||||
|
|
||||||
if hmsg.GenesisHash != hs.syncer.Genesis.Cids()[0] {
|
if hmsg.GenesisHash != hs.syncer.Genesis.Cids()[0] {
|
||||||
log.Warnf("other peer has different genesis! (%s)", hmsg.GenesisHash)
|
log.Warnf("other peer has different genesis! (%s)", hmsg.GenesisHash)
|
||||||
s.Conn().Close()
|
_ = s.Conn().Close()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
defer s.Close()
|
defer s.Close() //nolint:errcheck
|
||||||
|
|
||||||
sent := time.Now()
|
sent := time.Now()
|
||||||
msg := &LatencyMessage{
|
msg := &LatencyMessage{
|
||||||
@ -152,10 +152,10 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer s.Close()
|
defer s.Close() //nolint:errcheck
|
||||||
|
|
||||||
lmsg := &LatencyMessage{}
|
lmsg := &LatencyMessage{}
|
||||||
s.SetReadDeadline(time.Now().Add(10 * time.Second))
|
_ = s.SetReadDeadline(time.Now().Add(10 * time.Second))
|
||||||
err := cborutil.ReadCborRPC(s, lmsg)
|
err := cborutil.ReadCborRPC(s, lmsg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Infow("reading latency message", "error", err)
|
log.Infow("reading latency message", "error", err)
|
||||||
|
@ -427,7 +427,7 @@ func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath stri
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer bufferedDS.Remove(ctx, c)
|
defer bufferedDS.Remove(ctx, c) //nolint:errcheck
|
||||||
ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any)
|
ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any)
|
||||||
|
|
||||||
// entire DAG selector
|
// entire DAG selector
|
||||||
@ -435,7 +435,7 @@ func (a *API) ClientGenCar(ctx context.Context, ref api.FileRef, outputPath stri
|
|||||||
ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()
|
ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node()
|
||||||
|
|
||||||
f, err := os.Create(outputPath)
|
f, err := os.Create(outputPath)
|
||||||
defer f.Close()
|
defer f.Close() //nolint:errcheck
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -425,7 +425,7 @@ func (a *ChainAPI) ChainExport(ctx context.Context, tsk types.TipSetKey) (<-chan
|
|||||||
r, w := io.Pipe()
|
r, w := io.Pipe()
|
||||||
out := make(chan []byte)
|
out := make(chan []byte)
|
||||||
go func() {
|
go func() {
|
||||||
defer w.Close()
|
defer w.Close() //nolint:errcheck // it is a pipe
|
||||||
if err := a.Chain.Export(ctx, ts, w); err != nil {
|
if err := a.Chain.Export(ctx, ts, w); err != nil {
|
||||||
log.Errorf("chain export call failed: %s", err)
|
log.Errorf("chain export call failed: %s", err)
|
||||||
return
|
return
|
||||||
|
@ -17,7 +17,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type remoteWorker struct {
|
type remoteWorker struct {
|
||||||
api.WorkerApi
|
api.WorkerAPI
|
||||||
closer jsonrpc.ClientCloser
|
closer jsonrpc.ClientCloser
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ func RecordValidator(ps peerstore.Peerstore) record.Validator {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const JWTSecretName = "auth-jwt-private"
|
const JWTSecretName = "auth-jwt-private" //nolint:gosec
|
||||||
|
|
||||||
type jwtPayload struct {
|
type jwtPayload struct {
|
||||||
Allow []auth.Permission
|
Allow []auth.Permission
|
||||||
|
@ -18,7 +18,7 @@ func AddrFilters(filters []string) func() (opts Libp2pOpts, err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s)
|
return opts, fmt.Errorf("incorrectly formatted address filter in config: %s", s)
|
||||||
}
|
}
|
||||||
opts.Opts = append(opts.Opts, libp2p.FilterAddresses(f))
|
opts.Opts = append(opts.Opts, libp2p.FilterAddresses(f)) //golint:staticcheck
|
||||||
}
|
}
|
||||||
return opts, nil
|
return opts, nil
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func GetParams(sbc *ffiwrapper.Config) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := paramfetch.GetParams(build.ParametersJson(), uint64(ssize)); err != nil {
|
if err := paramfetch.GetParams(build.ParametersJSON(), uint64(ssize)); err != nil {
|
||||||
return xerrors.Errorf("fetching proof parameters: %w", err)
|
return xerrors.Errorf("fetching proof parameters: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,12 +167,10 @@ func StorageMiner(mctx helpers.MetricsCtx, lc fx.Lifecycle, api lapi.FullNode, h
|
|||||||
func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider) {
|
func HandleRetrieval(host host.Host, lc fx.Lifecycle, m retrievalmarket.RetrievalProvider) {
|
||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(context.Context) error {
|
OnStart: func(context.Context) error {
|
||||||
m.Start()
|
return m.Start()
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
OnStop: func(context.Context) error {
|
OnStop: func(context.Context) error {
|
||||||
m.Stop()
|
return m.Stop()
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -182,12 +180,10 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto
|
|||||||
|
|
||||||
lc.Append(fx.Hook{
|
lc.Append(fx.Hook{
|
||||||
OnStart: func(context.Context) error {
|
OnStart: func(context.Context) error {
|
||||||
h.Start(ctx)
|
return h.Start(ctx)
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
OnStop: func(context.Context) error {
|
OnStop: func(context.Context) error {
|
||||||
h.Stop()
|
return h.Stop()
|
||||||
return nil
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -84,9 +84,11 @@ func testStorageNode(ctx context.Context, t *testing.T, waddr address.Address, a
|
|||||||
|
|
||||||
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix))
|
||||||
for i := 0; i < nGenesisPreseals; i++ {
|
for i := 0; i < nGenesisPreseals; i++ {
|
||||||
nic.Next()
|
_, err := nic.Next()
|
||||||
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
nic.Next()
|
_, err = nic.Next()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = lr.Close()
|
err = lr.Close()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -3,7 +3,6 @@ package repo
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/filecoin-project/sector-storage/stores"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@ -21,6 +20,8 @@ import (
|
|||||||
"github.com/multiformats/go-multiaddr"
|
"github.com/multiformats/go-multiaddr"
|
||||||
"golang.org/x/xerrors"
|
"golang.org/x/xerrors"
|
||||||
|
|
||||||
|
"github.com/filecoin-project/sector-storage/stores"
|
||||||
|
|
||||||
"github.com/filecoin-project/lotus/chain/types"
|
"github.com/filecoin-project/lotus/chain/types"
|
||||||
"github.com/filecoin-project/lotus/node/config"
|
"github.com/filecoin-project/lotus/node/config"
|
||||||
)
|
)
|
||||||
|
@ -115,7 +115,7 @@ func (ps *Store) ListChannels() ([]address.Address, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer res.Close()
|
defer res.Close() //nolint:errcheck
|
||||||
|
|
||||||
var out []address.Address
|
var out []address.Address
|
||||||
for {
|
for {
|
||||||
@ -144,7 +144,7 @@ func (ps *Store) findChan(filter func(*ChannelInfo) bool) (address.Address, erro
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return address.Undef, err
|
return address.Undef, err
|
||||||
}
|
}
|
||||||
defer res.Close()
|
defer res.Close() //nolint:errcheck
|
||||||
|
|
||||||
var ci ChannelInfo
|
var ci ChannelInfo
|
||||||
|
|
||||||
|
@ -107,14 +107,13 @@ func (m *Miner) Run(ctx context.Context) error {
|
|||||||
pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, 10000000, md.PeriodStart%miner.WPoStProvingPeriod)
|
pcp := sealing.NewBasicPreCommitPolicy(adaptedAPI, 10000000, md.PeriodStart%miner.WPoStProvingPeriod)
|
||||||
m.sealing = sealing.New(adaptedAPI, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp)
|
m.sealing = sealing.New(adaptedAPI, NewEventsAdapter(evts), m.maddr, m.ds, m.sealer, m.sc, m.verif, &pcp)
|
||||||
|
|
||||||
go m.sealing.Run(ctx)
|
go m.sealing.Run(ctx) //nolint:errcheck // logged intside the function
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Miner) Stop(ctx context.Context) error {
|
func (m *Miner) Stop(ctx context.Context) error {
|
||||||
defer m.sealing.Stop(ctx)
|
return m.sealing.Stop(ctx)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Miner) runPreflightChecks(ctx context.Context) error {
|
func (m *Miner) runPreflightChecks(ctx context.Context) error {
|
||||||
|
Loading…
Reference in New Issue
Block a user