Merge remote-tracking branch 'origin/master' into next

This commit is contained in:
Łukasz Magiera 2020-06-16 12:04:29 +02:00
commit ab4c11c096
8 changed files with 85 additions and 47 deletions

View File

@ -45,6 +45,10 @@ var log = logging.Logger("gen")
const msgsPerBlock = 20
var ValidWpostForTesting = []abi.PoStProof{{
ProofBytes: []byte("valid proof"),
}}
type ChainGen struct {
msgsPerBlock int
@ -533,9 +537,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom
}
func (wpp *wppProvider) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) {
return []abi.PoStProof{{
ProofBytes: []byte("valid proof"),
}}, nil
return ValidWpostForTesting, nil
}
func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch,

View File

@ -47,6 +47,7 @@ import (
var log = logging.Logger("chainstore")
var chainHeadKey = dstore.NewKey("head")
var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation")
// ReorgNotifee represents a callback that gets called upon reorgs.
type ReorgNotifee func(rev, app []*types.TipSet) error
@ -217,6 +218,22 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) {
cs.reorgNotifeeCh <- f
}
func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
return cs.ds.Has(key)
}
func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error {
key := blockValidationCacheKeyPrefix.Instance(blkid.String())
if err := cs.ds.Put(key, []byte{0}); err != nil {
return xerrors.Errorf("cache block validation: %w", err)
}
return nil
}
func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error {
ts, err := types.NewTipSet([]*types.BlockHeader{b})
if err != nil {

View File

@ -528,7 +528,24 @@ func blockSanityChecks(h *types.BlockHeader) error {
}
// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) error {
func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) {
defer func() {
// b.Cid() could panic for empty blocks that are used in tests.
if rerr := recover(); rerr != nil {
err = xerrors.Errorf("validate block panic: %w", rerr)
return
}
}()
isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid())
if err != nil {
return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err)
}
if isValidated {
return nil
}
validationStart := time.Now()
defer func() {
dur := time.Since(validationStart)
@ -649,7 +666,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
return xerrors.Errorf("could not draw randomness: %w", err)
}
if err := gen.VerifyVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil {
return xerrors.Errorf("validating block election proof failed: %w", err)
}
@ -710,7 +727,7 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
return xerrors.Errorf("failed to compute vrf base for ticket: %w", err)
}
err = gen.VerifyVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof)
if err != nil {
return xerrors.Errorf("validating block tickets failed: %w", err)
}
@ -758,7 +775,11 @@ func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) err
}
}
return merr
if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil {
return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err)
}
return nil
}
func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error {
@ -1326,12 +1347,12 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error
return nil
}
func VerifyElectionPoStVRF(ctx context.Context, evrf []byte, rand []byte, worker address.Address) error {
if err := gen.VerifyVRF(ctx, worker, rand, evrf); err != nil {
return xerrors.Errorf("failed to verify post_randomness vrf: %w", err)
func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error {
if build.InsecurePoStValidation {
return nil
} else {
return gen.VerifyVRF(ctx, worker, rand, evrf)
}
return nil
}
func (syncer *Syncer) State() []SyncerState {

View File

@ -113,7 +113,12 @@ var walletBalance = &cli.Command{
return err
}
fmt.Printf("%s\n", types.FIL(balance))
if balance.Equals(types.NewInt(0)) {
fmt.Printf("%s (warning: may display 0 if chain sync in progress)\n", types.FIL(balance))
} else {
fmt.Printf("%s\n", types.FIL(balance))
}
return nil
},
}

View File

@ -72,7 +72,7 @@ func init() {
uts := head.MinTimestamp() + uint64(build.BlockDelay)
nheight := head.Height() + 1
blk, err := api.MinerCreateBlock(ctx, &lapi.BlockTemplate{
addr, head.Key(), ticket, nil, nil, msgs, nheight, uts, nil,
addr, head.Key(), ticket, &types.ElectionProof{}, nil, msgs, nheight, uts, gen.ValidWpostForTesting,
})
if err != nil {
return xerrors.Errorf("creating block: %w", err)

View File

@ -16,6 +16,8 @@ The setup below is a minimal example for sealing 32 GiB sectors on Lotus:
Note that 1GB sectors don't require as high of specs, but are likely to be removed as we improve the performance of 32GB sector sealing.
For the first part of the sealing process, AMD CPU's are __highly recommended__, because of the `Intel SHA Extensions` instruction set that is available there ever since the `Zen` microarchitecture. Hence, AMD CPU's seem to perform much better on the testnet than other CPU's. Contrary to what the name implies, this extended instruction set is not available on recent Intel desktop/server chips.
## Testnet discoveries
- If you only have 128GiB of ram, enabling 256GB of **NVMe** swap on an SSD will help you avoid out-of-memory issues while mining.
@ -33,7 +35,7 @@ GPUs are a must for getting **block rewards**. Here are a few that have been con
## Testing other GPUs
If you want to test a GPU that is not explicitly supported, use the following global**environment variable**:
If you want to test a GPU that is not explicitly supported, use the following global **environment variable**:
```sh
BELLMAN_CUSTOM_GPU="<NAME>:<NUMBER_OF_CORES>"

View File

@ -1,6 +1,6 @@
# Lotus Seal Worker
The **Lotus Seal Worker** is an extra process that can offload heavy processing tasks from your **Lotus Storage Miner**. It can be run on the same machine as your `lotus-storage-miner`, or on another machine communicating over a fast network.
The **Lotus Seal Worker** is an extra process that can offload heavy processing tasks from your **Lotus Storage Miner**. The sealing process automatically runs in the **Lotus Storage Miner** process, but you can use the Seal Worker on another machine communicating over a fast network to free up resources on the machine running the mining process.
## Note: Using the Lotus Seal Worker from China
@ -12,39 +12,13 @@ IPFS_GATEWAY="https://proof-parameters.s3.cn-south-1.jdcloud-oss.com/ipfs/"
## Get Started
Make sure that the `lotus-seal-worker` is installed by running:
Make sure that the `lotus-seal-worker` is compiled and installed by running:
```sh
make lotus-seal-worker
```
## Running Alongside Storage Miner
You may wish to run the **Lotus Seal Worker** on the same computer as the **Lotus Storage Miner**. This allows you to easily set the process priority of the sealing tasks to be lower than the priority of your more important storage miner process.
To do this, simply run `lotus-seal-worker run`, and the seal worker will automatically pick up the correct authentication tokens from the `LOTUS_STORAGE_PATH` miner repository.
To check that the **Lotus Seal Worker** is properly connected to your storage miner, run `lotus-storage-miner info` and check that the remote worker count has increased.
```sh
why@computer ~/lotus> lotus-storage-miner workers list
Worker 0, host computer
CPU: [ ] 0 core(s) in use
RAM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB
VMEM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB
GPU: GeForce RTX 2080, not used
Worker 1, host computer
CPU: [ ] 0 core(s) in use
RAM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB
VMEM: [|||||||||||||||||| ] 28% 18.1 GiB/62.7 GiB
GPU: GeForce RTX 2080, not used
```
## Running Over the Network
Warning: This setup is a little more complex than running it locally.
To use an entirely separate computer for sealing tasks, you will want to run the `lotus-seal-worker` on a separate machine, connected to your **Lotus Storage Miner** via the local area network.
## Setting up the Storage Miner
First, you will need to ensure your `lotus-storage-miner`'s API is accessible over the network.
@ -62,7 +36,7 @@ To make your node accessible over the local area network, you will need to deter
A more permissive and less secure option is to change it to `0.0.0.0`. This will allow anyone who can connect to your computer on that port to access the [API](https://docs.lotu.sh/en+api). They will still need an auth token.
`RemoteListenAddress` must be set to an address which other nodes on your network will be able to reach
`RemoteListenAddress` must be set to an address which other nodes on your network will be able to reach.
Next, you will need to [create an authentication token](https://docs.lotu.sh/en+api-scripting-support#generate-a-jwt-46). All Lotus APIs require authentication tokens to ensure your processes are as secure against attackers attempting to make unauthenticated requests to them.
@ -73,9 +47,11 @@ On the machine that will run `lotus-seal-worker`, set the `STORAGE_API_INFO` env
Once this is set, run:
```sh
lotus-seal-worker run
lotus-seal-worker run --address 192.168.2.10:2345
```
Replace `192.168.2.10:2345` with the proper IP and port.
To check that the **Lotus Seal Worker** is connected to your **Lotus Storage Miner**, run `lotus-storage-miner workers list` and check that the remote worker count has increased.
```sh
@ -92,3 +68,14 @@ Worker 1, host othercomputer
VMEM: [|||||||||||||| ] 23% 14 GiB/62.7 GiB
GPU: GeForce RTX 2080, not used
```
### Running locally for manually managing process priority
You can also run the **Lotus Seal Worker** on the same machine as your **Lotus Storage Miner**, so you can manually manage the process priority.
To do so you have to first __disable all seal task types__ in the miner config. This is important to prevent conflicts between the two processes.
You can then run the storage miner on your local-loopback interface;
```sh
lotus-seal-worker run --address 127.0.0.1:2345
```

View File

@ -40,7 +40,11 @@ func (a *WalletAPI) WalletBalance(ctx context.Context, addr address.Address) (ty
}
func (a *WalletAPI) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) {
return a.Wallet.Sign(ctx, k, msg)
keyAddr, err := a.StateManager.ResolveToKeyAddress(ctx, k, nil)
if err != nil {
return nil, xerrors.Errorf("failed to resolve ID address: %w", keyAddr)
}
return a.Wallet.Sign(ctx, keyAddr, msg)
}
func (a *WalletAPI) WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) {