wip in-memory journal.

This commit is contained in:
Raúl Kripalani 2020-07-17 14:22:37 +01:00
parent 6092854ece
commit 695f6cfe45
13 changed files with 643 additions and 168 deletions

View File

@ -26,7 +26,6 @@ type heightEvents struct {
} }
func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange") ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange")
defer span.End() defer span.End()
span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height()))) span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height())))
@ -144,12 +143,11 @@ func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error {
} }
// ChainAt invokes the specified `HeightHandler` when the chain reaches the // ChainAt invokes the specified `HeightHandler` when the chain reaches the
// specified height+confidence threshold. If the chain is rolled-back under the // specified height+confidence threshold. If the chain is rolled-back under the
// specified height, `RevertHandler` will be called. // specified height, `RevertHandler` will be called.
// //
// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null // ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null
func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error { func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error {
e.lk.Lock() // Tricky locking, check your locks if you modify this function! e.lk.Lock() // Tricky locking, check your locks if you modify this function!
bestH := e.tsc.best().Height() bestH := e.tsc.best().Height()

View File

@ -85,9 +85,10 @@ type ChainStore struct {
tsCache *lru.ARCCache tsCache *lru.ARCCache
vmcalls runtime.Syscalls vmcalls runtime.Syscalls
journal journal.Journal
} }
func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Syscalls) *ChainStore { func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Syscalls, journal journal.Journal) *ChainStore {
c, _ := lru.NewARC(2048) c, _ := lru.NewARC(2048)
tsc, _ := lru.NewARC(4096) tsc, _ := lru.NewARC(4096)
cs := &ChainStore{ cs := &ChainStore{
@ -98,6 +99,7 @@ func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Sys
mmCache: c, mmCache: c,
tsCache: tsc, tsCache: tsc,
vmcalls: vmcalls, vmcalls: vmcalls,
journal: journal,
} }
ci := NewChainIndex(cs.LoadTipSet) ci := NewChainIndex(cs.LoadTipSet)
@ -326,12 +328,13 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo
continue continue
} }
journal.Add("sync", map[string]interface{}{ cs.journal.AddEntry(journal.EventType{"sync", "head_change"}, map[string]interface{}{
"op": "headChange", "from": r.old.Key(),
"from": r.old.Key(), "from_height": r.old.Height(),
"to": r.new.Key(), "to": r.new.Key(),
"rev": len(revert), "to_height": r.new.Height(),
"apply": len(apply), "rev_cnt": len(revert),
"apply_cnt": len(apply),
}) })
// reverse the apply array // reverse the apply array

View File

@ -37,6 +37,7 @@ import (
var log = logging.Logger("main") var log = logging.Logger("main")
const FlagWorkerRepo = "worker-repo" const FlagWorkerRepo = "worker-repo"
// TODO remove after deprecation period // TODO remove after deprecation period
const FlagWorkerRepoDeprecation = "workerrepo" const FlagWorkerRepoDeprecation = "workerrepo"

View File

@ -22,6 +22,7 @@ import (
var log = logging.Logger("main") var log = logging.Logger("main")
const FlagMinerRepo = "miner-repo" const FlagMinerRepo = "miner-repo"
// TODO remove after deprecation period // TODO remove after deprecation period
const FlagMinerRepoDeprecation = "storagerepo" const FlagMinerRepoDeprecation = "storagerepo"

16
journal/events.go Normal file
View File

@ -0,0 +1,16 @@
package journal
import (
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/filecoin-project/lotus/chain/types"
)
type HeadChangeEvt struct {
From types.TipSetKey `json:"from"`
FromHeight abi.ChainEpoch `json:"from_height"`
To types.TipSetKey `json:"to"`
ToHeight abi.ChainEpoch `json:"to_height"`
RevertCount int `json:"rev_cnt"`
ApplyCount int `json:"apply_cnt"`
}

131
journal/filesystem.go Normal file
View File

@ -0,0 +1,131 @@
package journal
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"time"
logging "github.com/ipfs/go-log"
"go.uber.org/fx"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/node/repo"
)
var log = logging.Logger("journal")
// fsJournal is a basic journal backed by files on a filesystem.
type fsJournal struct {
disabledTracker
dir string
sizeLimit int64
lk sync.Mutex
fi *os.File
fSize int64
incoming chan *Entry
closing chan struct{}
}
// OpenFSJournal constructs a rolling filesystem journal, with a default
// per-file size limit of 1GiB.
func OpenFSJournal(lr repo.LockedRepo, lc fx.Lifecycle, disabled []EventType) (Journal, error) {
dir := filepath.Join(lr.Path(), "journal")
if err := os.MkdirAll(dir, 0755); err != nil {
return nil, fmt.Errorf("failed to mk directory %s for file journal: %w", dir, err)
}
f := &fsJournal{
disabledTracker: newDisabledTracker(disabled),
dir: dir,
sizeLimit: 1 << 30,
incoming: make(chan *Entry, 32),
closing: make(chan struct{}),
}
if err := f.rollJournalFile(); err != nil {
return nil, err
}
lc.Append(fx.Hook{
OnStop: func(_ context.Context) error { return f.Close() },
})
go f.runLoop()
return f, nil
}
func (f *fsJournal) AddEntry(evtType EventType, obj interface{}) {
je := &Entry{
EventType: evtType,
Timestamp: build.Clock.Now(),
Data: obj,
}
select {
case f.incoming <- je:
case <-f.closing:
log.Warnw("journal closed but tried to log event", "entry", je)
}
}
func (f *fsJournal) Close() error {
close(f.closing)
return nil
}
func (f *fsJournal) putEntry(je *Entry) error {
b, err := json.Marshal(je)
if err != nil {
return err
}
n, err := f.fi.Write(append(b, '\n'))
if err != nil {
return err
}
f.fSize += int64(n)
if f.fSize >= f.sizeLimit {
_ = f.rollJournalFile()
}
return nil
}
func (f *fsJournal) rollJournalFile() error {
if f.fi != nil {
_ = f.fi.Close()
}
nfi, err := os.Create(filepath.Join(f.dir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(time.RFC3339))))
if err != nil {
return xerrors.Errorf("failed to open journal file: %w", err)
}
f.fi = nfi
f.fSize = 0
return nil
}
func (f *fsJournal) runLoop() {
for {
select {
case je := <-f.incoming:
if err := f.putEntry(je); err != nil {
log.Errorw("failed to write out journal entry", "entry", je, "err", err)
}
case <-f.closing:
_ = f.fi.Close()
return
}
}
}

View File

@ -1,148 +0,0 @@
package journal
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"sync"
"time"
logging "github.com/ipfs/go-log"
"golang.org/x/xerrors"
"github.com/filecoin-project/lotus/build"
)
func InitializeSystemJournal(dir string) error {
if err := os.MkdirAll(dir, 0755); err != nil {
return err
}
j, err := OpenFSJournal(dir)
if err != nil {
return err
}
currentJournal = j
return nil
}
func Add(sys string, val interface{}) {
if currentJournal == nil {
log.Warn("no journal configured")
return
}
currentJournal.AddEntry(sys, val)
}
var log = logging.Logger("journal")
var currentJournal Journal
type Journal interface {
AddEntry(system string, obj interface{})
Close() error
}
// fsJournal is a basic journal backed by files on a filesystem
type fsJournal struct {
fi *os.File
fSize int64
lk sync.Mutex
journalDir string
incoming chan *JournalEntry
journalSizeLimit int64
closing chan struct{}
}
func OpenFSJournal(dir string) (*fsJournal, error) {
fsj := &fsJournal{
journalDir: dir,
incoming: make(chan *JournalEntry, 32),
journalSizeLimit: 1 << 30,
closing: make(chan struct{}),
}
if err := fsj.rollJournalFile(); err != nil {
return nil, err
}
go fsj.runLoop()
return fsj, nil
}
type JournalEntry struct {
System string
Timestamp time.Time
Val interface{}
}
func (fsj *fsJournal) putEntry(je *JournalEntry) error {
b, err := json.Marshal(je)
if err != nil {
return err
}
n, err := fsj.fi.Write(append(b, '\n'))
if err != nil {
return err
}
fsj.fSize += int64(n)
if fsj.fSize >= fsj.journalSizeLimit {
fsj.rollJournalFile()
}
return nil
}
func (fsj *fsJournal) rollJournalFile() error {
if fsj.fi != nil {
fsj.fi.Close()
}
nfi, err := os.Create(filepath.Join(fsj.journalDir, fmt.Sprintf("lotus-journal-%s.ndjson", build.Clock.Now().Format(time.RFC3339))))
if err != nil {
return xerrors.Errorf("failed to open journal file: %w", err)
}
fsj.fi = nfi
fsj.fSize = 0
return nil
}
func (fsj *fsJournal) runLoop() {
for {
select {
case je := <-fsj.incoming:
if err := fsj.putEntry(je); err != nil {
log.Errorw("failed to write out journal entry", "entry", je, "err", err)
}
case <-fsj.closing:
fsj.fi.Close()
return
}
}
}
func (fsj *fsJournal) AddEntry(system string, obj interface{}) {
je := &JournalEntry{
System: system,
Timestamp: build.Clock.Now(),
Val: obj,
}
select {
case fsj.incoming <- je:
case <-fsj.closing:
log.Warnw("journal closed but tried to log event", "entry", je)
}
}
func (fsj *fsJournal) Close() error {
close(fsj.closing)
return nil
}

239
journal/memory.go Normal file
View File

@ -0,0 +1,239 @@
package journal
import (
"context"
"sync/atomic"
"go.uber.org/fx"
"github.com/filecoin-project/lotus/build"
)
// Control messages.
type (
clearCtrl struct{}
addObserverCtrl struct {
observer *observer
replay bool
}
rmObserverCtrl *observer
getEntriesCtrl chan []*Entry
)
type MemJournal struct {
disabledTracker
entries []*Entry
index map[string]map[string][]*Entry
observers []observer
incomingCh chan *Entry
controlCh chan interface{}
state int32 // guarded by atomic; 0=closed, 1=running.
closed chan struct{}
}
var _ Journal = (*MemJournal)(nil)
type observer struct {
accept map[EventType]struct{}
ch chan *Entry
}
func (o *observer) dispatch(entry *Entry) {
if o.accept == nil {
o.ch <- entry
}
if _, ok := o.accept[entry.EventType]; ok {
o.ch <- entry
}
}
func NewMemoryJournal(lc fx.Lifecycle, disabled []EventType) *MemJournal {
m := &MemJournal{
disabledTracker: newDisabledTracker(disabled),
index: make(map[string]map[string][]*Entry, 16),
observers: make([]observer, 0, 16),
incomingCh: make(chan *Entry, 256),
controlCh: make(chan interface{}, 16),
state: 1,
closed: make(chan struct{}),
}
lc.Append(fx.Hook{
OnStop: func(_ context.Context) error { return m.Close() },
})
go m.process()
return m
}
func (m *MemJournal) AddEntry(evtType EventType, obj interface{}) {
entry := &Entry{
EventType: evtType,
Timestamp: build.Clock.Now(),
Data: obj,
}
select {
case m.incomingCh <- entry:
case <-m.closed:
}
}
func (m *MemJournal) Close() error {
if !atomic.CompareAndSwapInt32(&m.state, 1, 0) {
// already closed.
return nil
}
close(m.closed)
return nil
}
func (m *MemJournal) Clear() {
select {
case m.controlCh <- clearCtrl{}:
case <-m.closed:
}
}
// Observe starts observing events that are recorded in the MemJournal, and
// returns a channel where new events will be sent. When replay is true, all
// entries that have been recorded prior to the observer being registered will
// be replayed. To restrict the event types this observer will sent, use the
// include argument. If no include set is passed, the observer will receive all
// events types.
func (m *MemJournal) Observe(ctx context.Context, replay bool, include ...EventType) <-chan *Entry {
var acc map[EventType]struct{}
if include != nil {
acc = make(map[EventType]struct{}, 16)
for _, et := range include {
acc[et] = struct{}{}
}
}
ch := make(chan *Entry, 256)
o := &observer{
accept: acc,
ch: ch,
}
// watch the context, and fire the "remove observer" control message upon
// cancellation.
go func() {
<-ctx.Done()
select {
case m.controlCh <- rmObserverCtrl(o):
case <-m.closed:
}
}()
select {
case m.controlCh <- addObserverCtrl{o, replay}:
case <-m.closed:
// we are already stopped.
close(ch)
}
return ch
}
// Entries gets a snapshot of stored entries.
func (m *MemJournal) Entries() []*Entry {
ch := make(chan []*Entry)
m.controlCh <- getEntriesCtrl(ch)
return <-ch
}
func (m *MemJournal) process() {
processCtrlMsg := func(message interface{}) {
switch msg := message.(type) {
case addObserverCtrl:
// adding an observer.
m.observers = append(m.observers, *msg.observer)
if msg.replay {
// replay all existing entries.
for _, e := range m.entries {
msg.observer.dispatch(e)
}
}
case rmObserverCtrl:
// removing an observer; find the observer, close its channel.
// then discard it from our list by replacing it with the last
// observer and reslicing.
for i, o := range m.observers {
if o.ch == msg.ch {
close(o.ch)
m.observers[i] = m.observers[len(m.observers)-1]
m.observers = m.observers[:len(m.observers)-1]
}
}
case clearCtrl:
m.entries = m.entries[0:0]
// carry over system and event names; there are unlikely to change;
// just reslice the entry slices, so we are not thrashing memory.
for _, events := range m.index {
for ev := range events {
events[ev] = events[ev][0:0]
}
}
case getEntriesCtrl:
cpy := make([]*Entry, len(m.entries))
copy(cpy, m.entries)
msg <- cpy
close(msg)
}
}
processClose := func() {
m.entries = nil
m.index = make(map[string]map[string][]*Entry, 16)
for _, o := range m.observers {
close(o.ch)
}
m.observers = nil
}
for {
// Drain all control messages first!
select {
case msg := <-m.controlCh:
processCtrlMsg(msg)
continue
case <-m.closed:
processClose()
return
default:
}
// Now consume and pipe messages.
select {
case entry := <-m.incomingCh:
m.entries = append(m.entries, entry)
events := m.index[entry.System]
if events == nil {
events = make(map[string][]*Entry, 16)
m.index[entry.System] = events
}
entries := events[entry.Event]
events[entry.Event] = append(entries, entry)
for _, o := range m.observers {
o.dispatch(entry)
}
case msg := <-m.controlCh:
processCtrlMsg(msg)
continue
case <-m.closed:
processClose()
return
}
}
}

178
journal/memory_test.go Normal file
View File

@ -0,0 +1,178 @@
package journal
import (
"context"
"fmt"
"testing"
"time"
"github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/raulk/clock"
"github.com/stretchr/testify/require"
"go.uber.org/fx/fxtest"
)
func TestMemJournal_AddEntry(t *testing.T) {
lc := fxtest.NewLifecycle(t)
defer lc.RequireStop()
clk := clock.NewMock()
build.Clock = clk
journal := NewMemoryJournal(lc, nil)
addEntries(journal, 100)
require.Eventually(t, func() bool { return len(journal.Entries()) == 100 }, 1*time.Second, 100*time.Millisecond)
entries := journal.Entries()
cnt := make(map[string]int, 10)
for i, e := range entries {
require.EqualValues(t, "spaceship", e.System)
require.Equal(t, HeadChangeEvt{
From: types.TipSetKey{},
FromHeight: abi.ChainEpoch(i),
To: types.TipSetKey{},
ToHeight: abi.ChainEpoch(i),
RevertCount: i,
ApplyCount: i,
}, e.Data)
require.Equal(t, build.Clock.Now(), e.Timestamp)
cnt[e.Event]++
}
// we received 10 entries of each event type.
for _, c := range cnt {
require.Equal(t, 10, c)
}
}
func TestMemJournal_Close(t *testing.T) {
lc := fxtest.NewLifecycle(t)
defer lc.RequireStop()
journal := NewMemoryJournal(lc, nil)
addEntries(journal, 100)
require.Eventually(t, func() bool { return len(journal.Entries()) == 100 }, 1*time.Second, 100*time.Millisecond)
o1 := journal.Observe(context.TODO(), false)
o2 := journal.Observe(context.TODO(), false)
o3 := journal.Observe(context.TODO(), false)
time.Sleep(500 * time.Millisecond)
// Close the journal.
require.NoError(t, journal.Close())
time.Sleep(500 * time.Millisecond)
NextChannel:
for _, ch := range []<-chan *Entry{o1, o2, o3} {
for {
select {
case _, more := <-ch:
if more {
// keep consuming
} else {
continue NextChannel
}
default:
t.Fatal("nothing more to consume, and channel is not closed")
}
}
}
}
func TestMemJournal_Clear(t *testing.T) {
lc := fxtest.NewLifecycle(t)
defer lc.RequireStop()
journal := NewMemoryJournal(lc, nil)
addEntries(journal, 100)
require.Eventually(t, func() bool { return len(journal.Entries()) == 100 }, 1*time.Second, 100*time.Millisecond)
journal.Clear()
require.Empty(t, journal.Entries())
require.Empty(t, journal.Entries())
require.Empty(t, journal.Entries())
}
func TestMemJournal_Observe(t *testing.T) {
lc := fxtest.NewLifecycle(t)
defer lc.RequireStop()
journal := NewMemoryJournal(lc, nil)
addEntries(journal, 100)
require.Eventually(t, func() bool { return len(journal.Entries()) == 100 }, 1*time.Second, 100*time.Millisecond)
o1 := journal.Observe(context.TODO(), false, EventType{"spaceship", "wheezing-1"})
o2 := journal.Observe(context.TODO(), true, EventType{"spaceship", "wheezing-1"}, EventType{"spaceship", "wheezing-2"})
o3 := journal.Observe(context.TODO(), true)
time.Sleep(1 * time.Second)
require.Len(t, o1, 0) // no replay
require.Len(t, o2, 20) // replay with include set
require.Len(t, o3, 100) // replay with no include set (all entries)
// add another 100 entries and assert what the observers have seen.
addEntries(journal, 100)
require.Eventually(t, func() bool { return len(journal.Entries()) == 200 }, 1*time.Second, 100*time.Millisecond)
// note: we're able to queue items because the observer channel buffer size is 256.
require.Len(t, o1, 10) // should have 0 old entries + 10 new entries
require.Len(t, o2, 40) // should have 20 old entries + 20 new entries
require.Len(t, o3, 200) // should have 100 old entries + 100 new entries
}
func TestMemJournal_ObserverCancellation(t *testing.T) {
lc := fxtest.NewLifecycle(t)
defer lc.RequireStop()
journal := NewMemoryJournal(lc, nil)
ctx, cancel := context.WithCancel(context.TODO())
o1 := journal.Observe(ctx, false)
o2 := journal.Observe(context.TODO(), false)
addEntries(journal, 100)
require.Eventually(t, func() bool { return len(journal.Entries()) == 100 }, 1*time.Second, 100*time.Millisecond)
// all observers have received the 100 entries.
require.Len(t, o1, 100)
require.Len(t, o2, 100)
// cancel o1's context.
cancel()
time.Sleep(500 * time.Millisecond)
// add 50 new entries
addEntries(journal, 50)
require.Eventually(t, func() bool { return len(journal.Entries()) == 150 }, 1*time.Second, 100*time.Millisecond)
require.Len(t, o1, 100) // has not moved.
require.Len(t, o2, 150) // should have 100 old entries + 50 new entries
}
func addEntries(journal *MemJournal, count int) {
for i := 0; i < count; i++ {
eventIdx := i % 10
journal.AddEntry(EventType{"spaceship", fmt.Sprintf("wheezing-%d", eventIdx)}, HeadChangeEvt{
From: types.TipSetKey{},
FromHeight: abi.ChainEpoch(i),
To: types.TipSetKey{},
ToHeight: abi.ChainEpoch(i),
RevertCount: i,
ApplyCount: i,
})
}
}

60
journal/types.go Normal file
View File

@ -0,0 +1,60 @@
package journal
import "time"
// EventType represents the signature of an event.
type EventType struct {
System string
Event string
}
// Journal represents an audit trail of system actions.
//
// Every entry is tagged with a timestamp, a system name, and an event name.
// The supplied data can be any type, as long as it is JSON serializable,
// including structs, map[string]interface{}, or primitive types.
//
// For cleanliness and type safety, we recommend to use typed events. See the
// *Evt struct types in this package for more info.
type Journal interface {
// IsEnabled allows components to check if a given event type is enabled.
// All event types are enabled by default, and specific event types can only
// be disabled at construction type. Components are advised to check if the
// journal event types they record are enabled as soon as possible, and hold
// on to the answer all throughout the lifetime of the process.
IsEnabled(evtType EventType) bool
// AddEntry adds an entry to this journal. See godocs on the Journal type
// for more info.
AddEntry(evtType EventType, data interface{})
// Close closes this journal for further writing.
Close() error
}
// Entry represents a journal entry.
//
// See godocs on Journal for more information.
type Entry struct {
EventType
Timestamp time.Time
Data interface{}
}
// disabledTracker is an embeddable mixin that takes care of tracking disabled
// event types.
type disabledTracker map[EventType]struct{}
func newDisabledTracker(disabled []EventType) disabledTracker {
dis := make(map[EventType]struct{}, len(disabled))
for _, et := range disabled {
dis[et] = struct{}{}
}
return dis
}
func (d disabledTracker) IsEnabled(evtType EventType) bool {
_, ok := d[evtType]
return !ok
}

View File

@ -41,6 +41,7 @@ import (
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/vm"
"github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/chain/wallet"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/lib/peermgr"
_ "github.com/filecoin-project/lotus/lib/sigs/bls" _ "github.com/filecoin-project/lotus/lib/sigs/bls"
_ "github.com/filecoin-project/lotus/lib/sigs/secp" _ "github.com/filecoin-project/lotus/lib/sigs/secp"
@ -119,7 +120,7 @@ const (
ExtractApiKey ExtractApiKey
HeadMetricsKey HeadMetricsKey
RunPeerTaggerKey RunPeerTaggerKey
JournalKey InitJournalKey
SetApiEndpointKey SetApiEndpointKey
@ -151,8 +152,9 @@ func defaults() []Option {
Override(new(record.Validator), modules.RecordValidator), Override(new(record.Validator), modules.RecordValidator),
Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)),
Override(new(dtypes.ShutdownChan), make(chan struct{})), Override(new(dtypes.ShutdownChan), make(chan struct{})),
Override(JournalKey, modules.SetupJournal), Override(new(journal.Journal), journal.OpenFSJournal),
Override(InitJournalKey, func(j *journal.Journal) { /* forces the creation of the journal at startup */ }),
// Filecoin modules // Filecoin modules
} }

View File

@ -6,7 +6,6 @@ import (
"errors" "errors"
"io" "io"
"io/ioutil" "io/ioutil"
"path/filepath"
"github.com/gbrlsnchs/jwt/v3" "github.com/gbrlsnchs/jwt/v3"
logging "github.com/ipfs/go-log/v2" logging "github.com/ipfs/go-log/v2"
@ -19,7 +18,6 @@ import (
"github.com/filecoin-project/lotus/api/apistruct" "github.com/filecoin-project/lotus/api/apistruct"
"github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build"
"github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/journal"
"github.com/filecoin-project/lotus/lib/addrutil" "github.com/filecoin-project/lotus/lib/addrutil"
"github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/dtypes"
"github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo"
@ -95,7 +93,3 @@ func BuiltinBootstrap() (dtypes.BootstrapPeers, error) {
func DrandBootstrap() (dtypes.DrandBootstrap, error) { func DrandBootstrap() (dtypes.DrandBootstrap, error) {
return build.DrandBootstrap() return build.DrandBootstrap()
} }
func SetupJournal(lr repo.LockedRepo) error {
return journal.InitializeSystemJournal(filepath.Join(lr.Path(), "journal"))
}

View File

@ -256,7 +256,7 @@ func (s *WindowPoStScheduler) checkNextFaults(ctx context.Context, deadline uint
Method: builtin.MethodsMiner.DeclareFaults, Method: builtin.MethodsMiner.DeclareFaults,
Params: enc, Params: enc,
Value: types.NewInt(0), // TODO: Is there a fee? Value: types.NewInt(0), // TODO: Is there a fee?
GasLimit: 100_000_000, // i dont know help GasLimit: 100_000_000, // i dont know help
GasPrice: types.NewInt(2), GasPrice: types.NewInt(2),
} }