Compare commits
19 Commits
ae2cc67f96
...
5de49e53bc
Author | SHA1 | Date | |
---|---|---|---|
5de49e53bc | |||
|
10f7b6ed5f | ||
|
c2dd67403b | ||
|
1b6bffd339 | ||
|
914a65cea6 | ||
|
46992f0fd2 | ||
|
cee77aa415 | ||
|
aa76a45086 | ||
|
15f5f4745d | ||
|
10877d2e66 | ||
73783fcdfb | |||
|
560826d5c6 | ||
e461e672ea | |||
e2712a9157 | |||
|
1735a6f308 | ||
|
75c7f16765 | ||
|
2e372edde4 | ||
|
ff21559244 | ||
|
0a29aa0b6f |
@ -542,6 +542,7 @@ Lotus-workers can now be built to leverage the SupraSeal C2 sealing optimization
|
||||
- fix: lotus-provider: lotus-provider msg sending ([filecoin-project/lotus#11480](https://github.com/filecoin-project/lotus/pull/11480))
|
||||
- fix: lotus-provider: Fix winning PoSt ([filecoin-project/lotus#11483](https://github.com/filecoin-project/lotus/pull/11483))
|
||||
- chore: fix: sql Scan cannot write to an object ([filecoin-project/lotus#11487](https://github.com/filecoin-project/lotus/pull/11487))
|
||||
- fix: Exclude reverted events in `eth_getLogs` results [filecoin-project/lotus#11318](https://github.com/filecoin-project/lotus/pull/11318)
|
||||
|
||||
## Dependencies
|
||||
- deps: update go-libp2p to v0.28.1 ([filecoin-project/lotus#10998](https://github.com/filecoin-project/lotus/pull/10998))
|
||||
|
@ -335,7 +335,7 @@ type FullNode interface {
|
||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||
// WalletSetDefault marks the given address as as the default one.
|
||||
// WalletSetDefault marks the given address as the default one.
|
||||
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||
// WalletExport returns the private key of an address in the wallet.
|
||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||
|
@ -293,7 +293,7 @@ type FullNode interface {
|
||||
WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) (bool, error) //perm:read
|
||||
// WalletDefaultAddress returns the address marked as default in the wallet.
|
||||
WalletDefaultAddress(context.Context) (address.Address, error) //perm:write
|
||||
// WalletSetDefault marks the given address as as the default one.
|
||||
// WalletSetDefault marks the given address as the default one.
|
||||
WalletSetDefault(context.Context, address.Address) error //perm:write
|
||||
// WalletExport returns the private key of an address in the wallet.
|
||||
WalletExport(context.Context, address.Address) (*types.KeyInfo, error) //perm:admin
|
||||
|
@ -145,10 +145,10 @@ func readEmbeddedBuiltinActorsMetadata(bundle string) ([]*BuiltinActorsMetadata,
|
||||
)
|
||||
|
||||
if !strings.HasPrefix(bundle, "v") {
|
||||
return nil, xerrors.Errorf("bundle bundle '%q' doesn't start with a 'v'", bundle)
|
||||
return nil, xerrors.Errorf("bundle '%q' doesn't start with a 'v'", bundle)
|
||||
}
|
||||
if !strings.HasSuffix(bundle, archiveExt) {
|
||||
return nil, xerrors.Errorf("bundle bundle '%q' doesn't end with '%s'", bundle, archiveExt)
|
||||
return nil, xerrors.Errorf("bundle '%q' doesn't end with '%s'", bundle, archiveExt)
|
||||
}
|
||||
version, err := strconv.ParseInt(bundle[1:len(bundle)-len(archiveExt)], 10, 0)
|
||||
if err != nil {
|
||||
|
@ -26382,7 +26382,7 @@
|
||||
{
|
||||
"name": "Filecoin.WalletSetDefault",
|
||||
"description": "```go\nfunc (s *FullNodeStruct) WalletSetDefault(p0 context.Context, p1 address.Address) error {\n\tif s.Internal.WalletSetDefault == nil {\n\t\treturn ErrNotSupported\n\t}\n\treturn s.Internal.WalletSetDefault(p0, p1)\n}\n```",
|
||||
"summary": "WalletSetDefault marks the given address as as the default one.\n",
|
||||
"summary": "WalletSetDefault marks the given address as the default one.\n",
|
||||
"paramStructure": "by-position",
|
||||
"params": [
|
||||
{
|
||||
|
@ -37,7 +37,11 @@ func BuildTypeString() string {
|
||||
}
|
||||
|
||||
// BuildVersion is the local build version
|
||||
<<<<<<< HEAD
|
||||
const BuildVersion = "1.26.3"
|
||||
=======
|
||||
const BuildVersion = "1.27.0-dev"
|
||||
>>>>>>> tags/v1.27.0-rc1
|
||||
|
||||
func UserVersion() string {
|
||||
if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" {
|
||||
|
@ -26,7 +26,7 @@ var pragmas = []string{
|
||||
"PRAGMA temp_store = memory",
|
||||
"PRAGMA mmap_size = 30000000000",
|
||||
"PRAGMA page_size = 32768",
|
||||
"PRAGMA auto_vacuum = NONE",
|
||||
"PRAGMA auto_vacuum = NONE", // not useful until we implement GC
|
||||
"PRAGMA automatic_index = OFF",
|
||||
"PRAGMA journal_mode = WAL",
|
||||
"PRAGMA read_uncommitted = ON",
|
||||
@ -45,8 +45,10 @@ var ddls = []string{
|
||||
reverted INTEGER NOT NULL
|
||||
)`,
|
||||
|
||||
createIndexEventHeightTipsetKey,
|
||||
createIndexEventEmitterAddr,
|
||||
createIndexEventTipsetKeyCid,
|
||||
createIndexEventHeight,
|
||||
createIndexEventReverted,
|
||||
|
||||
`CREATE TABLE IF NOT EXISTS event_entry (
|
||||
event_id INTEGER,
|
||||
@ -57,7 +59,9 @@ var ddls = []string{
|
||||
value BLOB NOT NULL
|
||||
)`,
|
||||
|
||||
createIndexEventEntryKey,
|
||||
createIndexEventEntryIndexedKey,
|
||||
createIndexEventEntryCodecValue,
|
||||
createIndexEventEntryEventId,
|
||||
|
||||
// metadata containing version of schema
|
||||
`CREATE TABLE IF NOT EXISTS _meta (
|
||||
@ -67,6 +71,7 @@ var ddls = []string{
|
||||
`INSERT OR IGNORE INTO _meta (version) VALUES (1)`,
|
||||
`INSERT OR IGNORE INTO _meta (version) VALUES (2)`,
|
||||
`INSERT OR IGNORE INTO _meta (version) VALUES (3)`,
|
||||
`INSERT OR IGNORE INTO _meta (version) VALUES (4)`,
|
||||
}
|
||||
|
||||
var (
|
||||
@ -74,7 +79,7 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
schemaVersion = 3
|
||||
schemaVersion = 4
|
||||
|
||||
eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
||||
insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)`
|
||||
@ -82,9 +87,14 @@ const (
|
||||
revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?`
|
||||
restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?`
|
||||
|
||||
createIndexEventHeightTipsetKey = `CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)`
|
||||
createIndexEventEmitterAddr = `CREATE INDEX IF NOT EXISTS event_emitter_addr ON event (emitter_addr)`
|
||||
createIndexEventEntryKey = `CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)`
|
||||
createIndexEventTipsetKeyCid = `CREATE INDEX IF NOT EXISTS event_tipset_key_cid ON event (tipset_key_cid);`
|
||||
createIndexEventHeight = `CREATE INDEX IF NOT EXISTS event_height ON event (height);`
|
||||
createIndexEventReverted = `CREATE INDEX IF NOT EXISTS event_reverted ON event (reverted);`
|
||||
|
||||
createIndexEventEntryIndexedKey = `CREATE INDEX IF NOT EXISTS event_entry_indexed_key ON event_entry (indexed, key);`
|
||||
createIndexEventEntryCodecValue = `CREATE INDEX IF NOT EXISTS event_entry_codec_value ON event_entry (codec, value);`
|
||||
createIndexEventEntryEventId = `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id);`
|
||||
)
|
||||
|
||||
type EventIndex struct {
|
||||
@ -237,7 +247,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
if err != nil {
|
||||
return xerrors.Errorf("rows affected: %w", err)
|
||||
}
|
||||
log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected)
|
||||
log.Infof("Cleaned up %d entries that had deleted events\n", nrRowsAffected)
|
||||
|
||||
// drop the temporary indices after the migration
|
||||
_, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_tipset_key_cid")
|
||||
@ -249,11 +259,9 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err)
|
||||
}
|
||||
|
||||
// create the final index on event.height and event.tipset_key
|
||||
_, err = tx.ExecContext(ctx, createIndexEventHeightTipsetKey)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create index height_tipset_key: %w", err)
|
||||
}
|
||||
// original v2 migration introduced an index:
|
||||
// CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)
|
||||
// which has subsequently been removed in v4, so it's omitted here
|
||||
|
||||
// increment the schema version to 2 in _meta table.
|
||||
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (2)")
|
||||
@ -266,20 +274,7 @@ func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.C
|
||||
return xerrors.Errorf("commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// during the migration, we have likely increased the WAL size a lot, so lets do some
|
||||
// simple DB administration to free up space (VACUUM followed by truncating the WAL file)
|
||||
// as this would be a good time to do it when no other writes are happening
|
||||
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
|
||||
_, err = ei.db.ExecContext(ctx, "VACUUM")
|
||||
if err != nil {
|
||||
log.Warnf("error vacuuming database: %s", err)
|
||||
}
|
||||
_, err = ei.db.ExecContext(ctx, "PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
if err != nil {
|
||||
log.Warnf("error checkpointing wal: %s", err)
|
||||
}
|
||||
|
||||
log.Infof("Successfully migrated events to version 2 in %s", time.Since(now))
|
||||
log.Infof("Successfully migrated event index from version 1 to version 2 in %s", time.Since(now))
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -301,11 +296,9 @@ func (ei *EventIndex) migrateToVersion3(ctx context.Context) error {
|
||||
return xerrors.Errorf("create index event_emitter_addr: %w", err)
|
||||
}
|
||||
|
||||
// create index on event_entry.key index.
|
||||
_, err = tx.ExecContext(ctx, createIndexEventEntryKey)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("create index event_entry_key_index: %w", err)
|
||||
}
|
||||
// original v3 migration introduced an index:
|
||||
// CREATE INDEX IF NOT EXISTS event_entry_key_index ON event_entry (key)
|
||||
// which has subsequently been removed in v4, so it's omitted here
|
||||
|
||||
// increment the schema version to 3 in _meta table.
|
||||
_, err = tx.ExecContext(ctx, "INSERT OR IGNORE INTO _meta (version) VALUES (3)")
|
||||
@ -317,10 +310,82 @@ func (ei *EventIndex) migrateToVersion3(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("commit transaction: %w", err)
|
||||
}
|
||||
log.Infof("Successfully migrated events to version 3 in %s", time.Since(now))
|
||||
log.Infof("Successfully migrated event index from version 2 to version 3 in %s", time.Since(now))
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateToVersion4 migrates the schema from version 3 to version 4 by adjusting indexes to match
|
||||
// the query patterns of the event filter.
|
||||
//
|
||||
// First it drops indexes introduced in previous migrations:
|
||||
// 1. the index on the event.height and event.tipset_key columns
|
||||
// 2. the index on the event_entry.key column
|
||||
//
|
||||
// And then creating the following indices:
|
||||
// 1. an index on the event.tipset_key_cid column
|
||||
// 2. an index on the event.height column
|
||||
// 3. an index on the event.reverted column
|
||||
// 4. an index on the event_entry.indexed and event_entry.key columns
|
||||
// 5. an index on the event_entry.codec and event_entry.value columns
|
||||
// 6. an index on the event_entry.event_id column
|
||||
func (ei *EventIndex) migrateToVersion4(ctx context.Context) error {
|
||||
now := time.Now()
|
||||
|
||||
tx, err := ei.db.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("begin transaction: %w", err)
|
||||
}
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
for _, create := range []struct {
|
||||
desc string
|
||||
query string
|
||||
}{
|
||||
{"drop index height_tipset_key", "DROP INDEX IF EXISTS height_tipset_key;"},
|
||||
{"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"},
|
||||
{"create index event_tipset_key_cid", createIndexEventTipsetKeyCid},
|
||||
{"create index event_height", createIndexEventHeight},
|
||||
{"create index event_reverted", createIndexEventReverted},
|
||||
{"create index event_entry_indexed_key", createIndexEventEntryIndexedKey},
|
||||
{"create index event_entry_codec_value", createIndexEventEntryCodecValue},
|
||||
{"create index event_entry_event_id", createIndexEventEntryEventId},
|
||||
} {
|
||||
_, err = tx.ExecContext(ctx, create.query)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("%s: %w", create.desc, err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = tx.Exec("INSERT OR IGNORE INTO _meta (version) VALUES (4)"); err != nil {
|
||||
return xerrors.Errorf("increment _meta version: %w", err)
|
||||
}
|
||||
|
||||
err = tx.Commit()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("commit transaction: %w", err)
|
||||
}
|
||||
|
||||
ei.vacuumDBAndCheckpointWAL(ctx)
|
||||
|
||||
log.Infof("Successfully migrated event index from version 3 to version 4 in %s", time.Since(now))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ei *EventIndex) vacuumDBAndCheckpointWAL(ctx context.Context) {
|
||||
// During the large migrations, we have likely increased the WAL size a lot, so lets do some
|
||||
// simple DB administration to free up space (VACUUM followed by truncating the WAL file)
|
||||
// as this would be a good time to do it when no other writes are happening.
|
||||
log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration")
|
||||
_, err := ei.db.ExecContext(ctx, "VACUUM")
|
||||
if err != nil {
|
||||
log.Warnf("error vacuuming database: %s", err)
|
||||
}
|
||||
_, err = ei.db.ExecContext(ctx, "PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
if err != nil {
|
||||
log.Warnf("error checkpointing wal: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) {
|
||||
db, err := sql.Open("sqlite3", path+"?mode=rwc")
|
||||
if err != nil {
|
||||
@ -358,25 +423,35 @@ func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStor
|
||||
}
|
||||
|
||||
if version == 1 {
|
||||
log.Infof("upgrading event index from version 1 to version 2")
|
||||
log.Infof("Upgrading event index from version 1 to version 2")
|
||||
err = eventIndex.migrateToVersion2(ctx, chainStore)
|
||||
if err != nil {
|
||||
_ = db.Close()
|
||||
return nil, xerrors.Errorf("could not migrate sql data to version 2: %w", err)
|
||||
return nil, xerrors.Errorf("could not migrate event index schema from version 1 to version 2: %w", err)
|
||||
}
|
||||
version = 2
|
||||
}
|
||||
|
||||
if version == 2 {
|
||||
log.Infof("upgrading event index from version 2 to version 3")
|
||||
log.Infof("Upgrading event index from version 2 to version 3")
|
||||
err = eventIndex.migrateToVersion3(ctx)
|
||||
if err != nil {
|
||||
_ = db.Close()
|
||||
return nil, xerrors.Errorf("could not migrate sql data to version 2: %w", err)
|
||||
return nil, xerrors.Errorf("could not migrate event index schema from version 2 to version 3: %w", err)
|
||||
}
|
||||
version = 3
|
||||
}
|
||||
|
||||
if version == 3 {
|
||||
log.Infof("Upgrading event index from version 3 to version 4")
|
||||
err = eventIndex.migrateToVersion4(ctx)
|
||||
if err != nil {
|
||||
_ = db.Close()
|
||||
return nil, xerrors.Errorf("could not migrate event index schema from version 3 to version 4: %w", err)
|
||||
}
|
||||
version = 4
|
||||
}
|
||||
|
||||
if version != schemaVersion {
|
||||
_ = db.Close()
|
||||
return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion)
|
||||
@ -407,7 +482,7 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever
|
||||
// rollback the transaction (a no-op if the transaction was already committed)
|
||||
defer func() { _ = tx.Rollback() }()
|
||||
|
||||
// lets handle the revert case first, since its simpler and we can simply mark all events events in this tipset as reverted and return
|
||||
// lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return
|
||||
if revert {
|
||||
_, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes())
|
||||
if err != nil {
|
||||
|
@ -400,7 +400,7 @@ tailLoop:
|
||||
continue tailLoop
|
||||
}
|
||||
|
||||
// the merge loop ended after processing all the chains and we we probably have still
|
||||
// the merge loop ended after processing all the chains and we probably have still
|
||||
// gas to spare; end the loop.
|
||||
break
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ func TestChainCheckpoint(t *testing.T) {
|
||||
head = cs.GetHeaviestTipSet()
|
||||
require.True(t, head.Equals(checkpoint))
|
||||
|
||||
// Let the second miner miner mine a fork
|
||||
// Let the second miner mine a fork
|
||||
last = checkpointParents
|
||||
for i := 0; i < 4; i++ {
|
||||
ts, err := cg.NextTipSetFromMiners(last, cg.Miners[1:], 0)
|
||||
|
@ -14,8 +14,8 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
logging "github.com/ipfs/go-log/v2"
|
||||
"github.com/jackc/pgx/v5"
|
||||
manet "github.com/multiformats/go-multiaddr/net"
|
||||
"github.com/yugabyte/pgx/v5"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
@ -3,6 +3,7 @@ package seal
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/ipfs/go-cid"
|
||||
"golang.org/x/xerrors"
|
||||
@ -111,6 +112,15 @@ func (p *PoRepTask) Do(taskID harmonytask.TaskID, stillOwned func() bool) (done
|
||||
|
||||
proof, err := p.sc.PoRepSnark(ctx, sr, sealed, unsealed, sectorParams.TicketValue, abi.InteractiveSealRandomness(rand))
|
||||
if err != nil {
|
||||
end, err := p.recoverErrors(ctx, sectorParams.SpID, sectorParams.SectorNumber, err)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("recover errors: %w", err)
|
||||
}
|
||||
if end {
|
||||
// done, but the error handling has stored a different than success state
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, xerrors.Errorf("failed to compute seal proof: %w", err)
|
||||
}
|
||||
|
||||
@ -161,4 +171,46 @@ func (p *PoRepTask) Adder(taskFunc harmonytask.AddTaskFunc) {
|
||||
p.sp.pollers[pollerPoRep].Set(taskFunc)
|
||||
}
|
||||
|
||||
func (p *PoRepTask) recoverErrors(ctx context.Context, spid, snum int64, cerr error) (end bool, err error) {
|
||||
const (
|
||||
// rust-fil-proofs error strings
|
||||
// https://github.com/filecoin-project/rust-fil-proofs/blob/3f018b51b6327b135830899d237a7ba181942d7e/storage-proofs-porep/src/stacked/vanilla/proof.rs#L454C1-L463
|
||||
errstrInvalidCommD = "Invalid comm_d detected at challenge_index"
|
||||
errstrInvalidCommR = "Invalid comm_r detected at challenge_index"
|
||||
errstrInvalidEncoding = "Invalid encoding proof generated at layer"
|
||||
)
|
||||
|
||||
if cerr == nil {
|
||||
return false, xerrors.Errorf("nil error")
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.Contains(cerr.Error(), errstrInvalidCommD):
|
||||
fallthrough
|
||||
case strings.Contains(cerr.Error(), errstrInvalidCommR):
|
||||
// todo: it might be more optimal to just retry the Trees compute first.
|
||||
// Invalid CommD/R likely indicates a problem with the data computed in that step
|
||||
// For now for simplicity just retry the whole thing
|
||||
fallthrough
|
||||
case strings.Contains(cerr.Error(), errstrInvalidEncoding):
|
||||
n, err := p.db.Exec(ctx, `UPDATE sectors_sdr_pipeline
|
||||
SET after_porep = FALSE, after_sdr = FALSE, after_tree_d = FALSE,
|
||||
after_tree_r = FALSE, after_tree_c = FALSE
|
||||
WHERE sp_id = $1 AND sector_number = $2`,
|
||||
spid, snum)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("store sdr success: updating pipeline: %w", err)
|
||||
}
|
||||
if n != 1 {
|
||||
return false, xerrors.Errorf("store sdr success: updated %d rows", n)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
default:
|
||||
// if end is false the original error will be returned by the caller
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
var _ harmonytask.TaskInterface = &PoRepTask{}
|
||||
|
@ -7594,7 +7594,7 @@ Inputs:
|
||||
Response: `"f01234"`
|
||||
|
||||
### WalletSetDefault
|
||||
WalletSetDefault marks the given address as as the default one.
|
||||
WalletSetDefault marks the given address as the default one.
|
||||
|
||||
|
||||
Perms: write
|
||||
|
@ -9284,7 +9284,7 @@ Inputs:
|
||||
Response: `"f01234"`
|
||||
|
||||
### WalletSetDefault
|
||||
WalletSetDefault marks the given address as as the default one.
|
||||
WalletSetDefault marks the given address as the default one.
|
||||
|
||||
|
||||
Perms: write
|
||||
|
2
extern/filecoin-ffi
vendored
2
extern/filecoin-ffi
vendored
@ -1 +1 @@
|
||||
Subproject commit b715c9403faf919e95fdc702cd651e842f18d890
|
||||
Subproject commit ed08caaf8778e1b6def83efd37fce41574214353
|
2
go.mod
2
go.mod
@ -107,7 +107,6 @@ require (
|
||||
github.com/ipni/go-libipni v0.0.8
|
||||
github.com/ipni/index-provider v0.12.0
|
||||
github.com/jackc/pgerrcode v0.0.0-20220416144525-469b46aa5efa
|
||||
github.com/jackc/pgx/v5 v5.4.1
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/koalacxr/quantile v0.0.1
|
||||
github.com/libp2p/go-buffer-pool v0.1.0
|
||||
@ -251,6 +250,7 @@ require (
|
||||
github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect
|
||||
github.com/jackc/pgx/v5 v5.4.1 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.1 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c // indirect
|
||||
|
6
go.sum
6
go.sum
@ -305,6 +305,7 @@ github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38a
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE=
|
||||
github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo=
|
||||
github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE=
|
||||
github.com/filecoin-project/go-amt-ipld/v4 v4.2.0/go.mod h1:0eDVF7pROvxrsxvLJx+SJZXqRaXXcEPUcgb/rG0zGU4=
|
||||
github.com/filecoin-project/go-amt-ipld/v4 v4.3.0 h1:bY42N1gR0DqrLMCKUPzX1VhYVgXaETQm0Um4ohvyEP8=
|
||||
github.com/filecoin-project/go-amt-ipld/v4 v4.3.0/go.mod h1:39Ep/yBbF6xN94WevLG9qSbglBJepHa5zeEbAE1pYsc=
|
||||
github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM=
|
||||
@ -354,7 +355,7 @@ github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go
|
||||
github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g=
|
||||
github.com/filecoin-project/go-state-types v0.1.6/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.1.10/go.mod h1:UwGVoMsULoCK+bWjEdd/xLCvLAQFBC7EDT477SKml+Q=
|
||||
github.com/filecoin-project/go-state-types v0.11.2-0.20230712101859-8f37624fa540/go.mod h1:SyNPwTsU7I22gL2r0OAPcImvLoTVfgRwdK/Y5rR1zz8=
|
||||
github.com/filecoin-project/go-state-types v0.13.1/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY=
|
||||
github.com/filecoin-project/go-state-types v0.13.3 h1:9JPkC0E6HDtfHbaOIrFiCDzT/Z0jRTb9En4Y4Ci/b3w=
|
||||
github.com/filecoin-project/go-state-types v0.13.3/go.mod h1:cHpOPup9H1g2T29dKHAjC2sc7/Ef5ypjuW9A3I+e9yY=
|
||||
github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig=
|
||||
@ -953,6 +954,7 @@ github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6K
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/koalacxr/quantile v0.0.1 h1:wAW+SQ286Erny9wOjVww96t8ws+x5Zj6AKHDULUK+o0=
|
||||
@ -1681,6 +1683,7 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:f
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.0.0-20220323183124-98fa8256a799/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ=
|
||||
github.com/whyrusleeping/cbor-gen v0.1.0/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
|
||||
github.com/whyrusleeping/cbor-gen v0.1.1 h1:eKfcJIoxivjMtwfCfmJAqSF56MHcWqyIScXwaC1VBgw=
|
||||
github.com/whyrusleeping/cbor-gen v0.1.1/go.mod h1:pM99HXyEbSQHcosHc0iW7YFmwnscr+t9Te4ibko05so=
|
||||
github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E=
|
||||
@ -2065,6 +2068,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220708085239-5a0f0661e09d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
@ -95,7 +95,7 @@ func (a *Alerting) update(at AlertType, message interface{}, upd func(Alert, jso
|
||||
}{
|
||||
AlertError: err.Error(),
|
||||
})
|
||||
log.Errorw("marshaling marshaling error failed", "type", at, "error", err)
|
||||
log.Errorw("marshaling error failed", "type", at, "error", err)
|
||||
}
|
||||
|
||||
a.alerts[at] = upd(alert, rawMsg)
|
||||
|
@ -1,4 +1,5 @@
|
||||
/* Used for webui clusterMachineSummary */
|
||||
-- NOTE: This index is changed in 20240420-web-task-indexes.sql
|
||||
CREATE INDEX harmony_task_history_work_index
|
||||
ON harmony_task_history (completed_by_host_and_port ASC, name ASC, result ASC, work_end DESC);
|
||||
|
||||
|
9
lib/harmony/harmonydb/sql/20240420-web-task-indexes.sql
Normal file
9
lib/harmony/harmonydb/sql/20240420-web-task-indexes.sql
Normal file
@ -0,0 +1,9 @@
|
||||
DROP INDEX harmony_task_history_work_index;
|
||||
|
||||
/*
|
||||
This structure improves clusterMachineSummary query better than the old version,
|
||||
while at the same time also being usable by clusterTaskHistorySummary (which wasn't
|
||||
the case with the old index).
|
||||
*/
|
||||
create index harmony_task_history_work_index
|
||||
on harmony_task_history (work_end desc, completed_by_host_and_port asc, name asc, result asc);
|
@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v5"
|
||||
"github.com/yugabyte/pgx/v5"
|
||||
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/lib/passcall"
|
||||
|
@ -190,11 +190,11 @@ var (
|
||||
RcmgrAllowPeer = stats.Int64("rcmgr/allow_peer", "Number of allowed peer connections", stats.UnitDimensionless)
|
||||
RcmgrBlockPeer = stats.Int64("rcmgr/block_peer", "Number of blocked peer connections", stats.UnitDimensionless)
|
||||
RcmgrAllowProto = stats.Int64("rcmgr/allow_proto", "Number of allowed streams attached to a protocol", stats.UnitDimensionless)
|
||||
RcmgrBlockProto = stats.Int64("rcmgr/block_proto", "Number of blocked blocked streams attached to a protocol", stats.UnitDimensionless)
|
||||
RcmgrBlockProtoPeer = stats.Int64("rcmgr/block_proto", "Number of blocked blocked streams attached to a protocol for a specific peer", stats.UnitDimensionless)
|
||||
RcmgrBlockProto = stats.Int64("rcmgr/block_proto", "Number of blocked streams attached to a protocol", stats.UnitDimensionless)
|
||||
RcmgrBlockProtoPeer = stats.Int64("rcmgr/block_proto", "Number of blocked streams attached to a protocol for a specific peer", stats.UnitDimensionless)
|
||||
RcmgrAllowSvc = stats.Int64("rcmgr/allow_svc", "Number of allowed streams attached to a service", stats.UnitDimensionless)
|
||||
RcmgrBlockSvc = stats.Int64("rcmgr/block_svc", "Number of blocked blocked streams attached to a service", stats.UnitDimensionless)
|
||||
RcmgrBlockSvcPeer = stats.Int64("rcmgr/block_svc", "Number of blocked blocked streams attached to a service for a specific peer", stats.UnitDimensionless)
|
||||
RcmgrBlockSvc = stats.Int64("rcmgr/block_svc", "Number of blocked streams attached to a service", stats.UnitDimensionless)
|
||||
RcmgrBlockSvcPeer = stats.Int64("rcmgr/block_svc", "Number of blocked streams attached to a service for a specific peer", stats.UnitDimensionless)
|
||||
RcmgrAllowMem = stats.Int64("rcmgr/allow_mem", "Number of allowed memory reservations", stats.UnitDimensionless)
|
||||
RcmgrBlockMem = stats.Int64("rcmgr/block_mem", "Number of blocked memory reservations", stats.UnitDimensionless)
|
||||
|
||||
|
@ -723,7 +723,7 @@ func (dbi *DBIndex) StorageBestAlloc(ctx context.Context, allocate storiface.Sec
|
||||
FROM storage_path
|
||||
WHERE available >= $1
|
||||
and NOW()-($2 * INTERVAL '1 second') < last_heartbeat
|
||||
and heartbeat_err = ''
|
||||
and heartbeat_err IS NULL
|
||||
and (($3 and can_seal = TRUE) or ($4 and can_store = TRUE))
|
||||
order by (available::numeric * weight) desc`,
|
||||
spaceReq,
|
||||
|
@ -174,6 +174,7 @@ func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector Sec
|
||||
nv, err := m.Api.StateNetworkVersion(ctx.Context(), ts.Key())
|
||||
if err != nil {
|
||||
log.Errorf("failed to get network version: %+v", err)
|
||||
return ctx.Send(SectorSubmitReplicaUpdateFailed{})
|
||||
}
|
||||
|
||||
pams, deals, err := m.processPieces(ctx.Context(), sector, nv >= network.Version22)
|
||||
|
Loading…
Reference in New Issue
Block a user