feat: curioweb: Sector info page (#11846)
* feat: curioweb: Sector info page * Address review
This commit is contained in:
parent
afa9032833
commit
d7d849cf20
@ -4,10 +4,18 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/client"
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
cliutil "github.com/filecoin-project/lotus/cli/util"
|
||||
"github.com/filecoin-project/lotus/lib/must"
|
||||
)
|
||||
|
||||
var ChainBlockCache = must.One(lru.New[blockstore.MhString, blocks.Block](4096))
|
||||
|
||||
func (a *app) watchRpc() {
|
||||
ticker := time.NewTicker(watchInterval)
|
||||
for {
|
||||
@ -84,6 +92,7 @@ func (a *app) updateRpc(ctx context.Context) error {
|
||||
}()
|
||||
|
||||
a.workingApi = v1api
|
||||
a.stor = store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,9 @@ func Routes(r *mux.Router, deps *deps.Deps) error {
|
||||
|
||||
// node info page
|
||||
r.HandleFunc("/node/{id}", a.nodeInfo)
|
||||
|
||||
// sector info page
|
||||
r.HandleFunc("/sector/{sp}/{id}", a.sectorInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -8,15 +8,23 @@ import (
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/samber/lo"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
|
||||
"github.com/filecoin-project/lotus/api/v1api"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/lib/harmony/harmonydb"
|
||||
"github.com/filecoin-project/lotus/lib/must"
|
||||
"github.com/filecoin-project/lotus/storage/paths"
|
||||
"github.com/filecoin-project/lotus/storage/sealer/storiface"
|
||||
)
|
||||
|
||||
type app struct {
|
||||
@ -25,6 +33,7 @@ type app struct {
|
||||
|
||||
rpcInfoLk sync.Mutex
|
||||
workingApi v1api.FullNode
|
||||
stor adt.Store
|
||||
|
||||
actorInfoLk sync.Mutex
|
||||
actorInfos []actorInfo
|
||||
@ -128,6 +137,253 @@ func (a *app) nodeInfo(writer http.ResponseWriter, request *http.Request) {
|
||||
a.executePageTemplate(writer, "node_info", "Node Info", mi)
|
||||
}
|
||||
|
||||
func (a *app) sectorInfo(w http.ResponseWriter, r *http.Request) {
|
||||
params := mux.Vars(r)
|
||||
|
||||
id, ok := params["id"]
|
||||
if !ok {
|
||||
http.Error(w, "missing id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
intid, err := strconv.ParseInt(id, 10, 64)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid id", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
sp, ok := params["sp"]
|
||||
if !ok {
|
||||
http.Error(w, "missing sp", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
maddr, err := address.NewFromString(sp)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid sp", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
spid, err := address.IDFromAddress(maddr)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid sp", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := r.Context()
|
||||
var tasks []PipelineTask
|
||||
|
||||
err = a.db.Select(ctx, &tasks, `SELECT
|
||||
sp_id, sector_number,
|
||||
create_time,
|
||||
task_id_sdr, after_sdr,
|
||||
task_id_tree_d, after_tree_d,
|
||||
task_id_tree_c, after_tree_c,
|
||||
task_id_tree_r, after_tree_r,
|
||||
task_id_precommit_msg, after_precommit_msg,
|
||||
after_precommit_msg_success, seed_epoch,
|
||||
task_id_porep, porep_proof, after_porep,
|
||||
task_id_finalize, after_finalize,
|
||||
task_id_move_storage, after_move_storage,
|
||||
task_id_commit_msg, after_commit_msg,
|
||||
after_commit_msg_success,
|
||||
failed, failed_reason
|
||||
FROM sectors_sdr_pipeline WHERE sp_id = $1 AND sector_number = $2`, spid, intid)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to fetch pipeline task info: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(tasks) == 0 {
|
||||
http.Error(w, "sector not found", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
head, err := a.workingApi.ChainHead(ctx)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
epoch := head.Height()
|
||||
|
||||
mbf, err := a.getMinerBitfields(ctx, maddr, a.stor)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load miner bitfields: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
task := tasks[0]
|
||||
|
||||
afterSeed := task.SeedEpoch != nil && *task.SeedEpoch <= int64(epoch)
|
||||
|
||||
var sectorLocations []struct {
|
||||
CanSeal, CanStore bool
|
||||
FileType storiface.SectorFileType `db:"sector_filetype"`
|
||||
StorageID string `db:"storage_id"`
|
||||
Urls string `db:"urls"`
|
||||
}
|
||||
|
||||
err = a.db.Select(ctx, §orLocations, `SELECT p.can_seal, p.can_store, l.sector_filetype, l.storage_id, p.urls FROM sector_location l
|
||||
JOIN storage_path p ON l.storage_id = p.storage_id
|
||||
WHERE l.sector_num = $1 and l.miner_id = $2 ORDER BY p.can_seal, p.can_store, l.storage_id`, intid, spid)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to fetch sector locations: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
type fileLocations struct {
|
||||
StorageID string
|
||||
Urls []string
|
||||
}
|
||||
|
||||
type locationTable struct {
|
||||
PathType *string
|
||||
PathTypeRowSpan int
|
||||
|
||||
FileType *string
|
||||
FileTypeRowSpan int
|
||||
|
||||
Locations []fileLocations
|
||||
}
|
||||
locs := []locationTable{}
|
||||
|
||||
for i, loc := range sectorLocations {
|
||||
loc := loc
|
||||
|
||||
urlList := strings.Split(loc.Urls, paths.URLSeparator)
|
||||
|
||||
fLoc := fileLocations{
|
||||
StorageID: loc.StorageID,
|
||||
Urls: urlList,
|
||||
}
|
||||
|
||||
var pathTypeStr *string
|
||||
var fileTypeStr *string
|
||||
pathTypeRowSpan := 1
|
||||
fileTypeRowSpan := 1
|
||||
|
||||
pathType := "None"
|
||||
if loc.CanSeal && loc.CanStore {
|
||||
pathType = "Seal/Store"
|
||||
} else if loc.CanSeal {
|
||||
pathType = "Seal"
|
||||
} else if loc.CanStore {
|
||||
pathType = "Store"
|
||||
}
|
||||
pathTypeStr = &pathType
|
||||
|
||||
fileType := loc.FileType.String()
|
||||
fileTypeStr = &fileType
|
||||
|
||||
if i > 0 {
|
||||
prevNonNilPathTypeLoc := i - 1
|
||||
for prevNonNilPathTypeLoc > 0 && locs[prevNonNilPathTypeLoc].PathType == nil {
|
||||
prevNonNilPathTypeLoc--
|
||||
}
|
||||
if *locs[prevNonNilPathTypeLoc].PathType == *pathTypeStr {
|
||||
pathTypeRowSpan = 0
|
||||
pathTypeStr = nil
|
||||
locs[prevNonNilPathTypeLoc].PathTypeRowSpan++
|
||||
// only if we have extended path type we may need to extend file type
|
||||
|
||||
prevNonNilFileTypeLoc := i - 1
|
||||
for prevNonNilFileTypeLoc > 0 && locs[prevNonNilFileTypeLoc].FileType == nil {
|
||||
prevNonNilFileTypeLoc--
|
||||
}
|
||||
if *locs[prevNonNilFileTypeLoc].FileType == *fileTypeStr {
|
||||
fileTypeRowSpan = 0
|
||||
fileTypeStr = nil
|
||||
locs[prevNonNilFileTypeLoc].FileTypeRowSpan++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locTable := locationTable{
|
||||
PathType: pathTypeStr,
|
||||
PathTypeRowSpan: pathTypeRowSpan,
|
||||
FileType: fileTypeStr,
|
||||
FileTypeRowSpan: fileTypeRowSpan,
|
||||
Locations: []fileLocations{fLoc},
|
||||
}
|
||||
|
||||
locs = append(locs, locTable)
|
||||
|
||||
}
|
||||
|
||||
// TaskIDs
|
||||
taskIDs := map[int64]struct{}{}
|
||||
var htasks []taskSummary
|
||||
{
|
||||
// get non-nil task IDs
|
||||
appendNonNil := func(id *int64) {
|
||||
if id != nil {
|
||||
taskIDs[*id] = struct{}{}
|
||||
}
|
||||
}
|
||||
appendNonNil(task.TaskSDR)
|
||||
appendNonNil(task.TaskTreeD)
|
||||
appendNonNil(task.TaskTreeC)
|
||||
appendNonNil(task.TaskTreeR)
|
||||
appendNonNil(task.TaskPrecommitMsg)
|
||||
appendNonNil(task.TaskPoRep)
|
||||
appendNonNil(task.TaskFinalize)
|
||||
appendNonNil(task.TaskMoveStorage)
|
||||
appendNonNil(task.TaskCommitMsg)
|
||||
|
||||
if len(taskIDs) > 0 {
|
||||
ids := lo.Keys(taskIDs)
|
||||
|
||||
var dbtasks []struct {
|
||||
OwnerID *string `db:"owner_id"`
|
||||
HostAndPort *string `db:"host_and_port"`
|
||||
TaskID int64 `db:"id"`
|
||||
Name string `db:"name"`
|
||||
UpdateTime time.Time `db:"update_time"`
|
||||
}
|
||||
err = a.db.Select(ctx, &dbtasks, `SELECT t.owner_id, hm.host_and_port, t.id, t.name, t.update_time FROM harmony_task t LEFT JOIN curio.harmony_machines hm ON hm.id = t.owner_id WHERE t.id = ANY($1)`, ids)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("failed to fetch task names: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
for _, tn := range dbtasks {
|
||||
htasks = append(htasks, taskSummary{
|
||||
Name: tn.Name,
|
||||
SincePosted: time.Since(tn.UpdateTime.Local()).Round(time.Second).String(),
|
||||
Owner: tn.HostAndPort,
|
||||
OwnerID: tn.OwnerID,
|
||||
ID: tn.TaskID,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mi := struct {
|
||||
SectorNumber int64
|
||||
PipelinePoRep sectorListEntry
|
||||
|
||||
Locations []locationTable
|
||||
Tasks []taskSummary
|
||||
}{
|
||||
SectorNumber: intid,
|
||||
PipelinePoRep: sectorListEntry{
|
||||
PipelineTask: tasks[0],
|
||||
AfterSeed: afterSeed,
|
||||
|
||||
ChainAlloc: must.One(mbf.alloc.IsSet(uint64(task.SectorNumber))),
|
||||
ChainSector: must.One(mbf.sectorSet.IsSet(uint64(task.SectorNumber))),
|
||||
ChainActive: must.One(mbf.active.IsSet(uint64(task.SectorNumber))),
|
||||
ChainUnproven: must.One(mbf.unproven.IsSet(uint64(task.SectorNumber))),
|
||||
ChainFaulty: must.One(mbf.faulty.IsSet(uint64(task.SectorNumber))),
|
||||
},
|
||||
|
||||
Locations: locs,
|
||||
Tasks: htasks,
|
||||
}
|
||||
|
||||
a.executePageTemplate(w, "sector_info", "Sector Info", mi)
|
||||
}
|
||||
|
||||
var templateDev = os.Getenv("LOTUS_WEB_DEV") == "1"
|
||||
|
||||
func (a *app) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
|
||||
|
@ -1,29 +1,22 @@
|
||||
package hapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
lru "github.com/hashicorp/golang-lru/v2"
|
||||
blocks "github.com/ipfs/go-block-format"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/filecoin-project/go-address"
|
||||
"github.com/filecoin-project/go-bitfield"
|
||||
|
||||
"github.com/filecoin-project/lotus/blockstore"
|
||||
"github.com/filecoin-project/lotus/chain/actors/adt"
|
||||
"github.com/filecoin-project/lotus/chain/actors/builtin/miner"
|
||||
"github.com/filecoin-project/lotus/chain/store"
|
||||
"github.com/filecoin-project/lotus/chain/types"
|
||||
"github.com/filecoin-project/lotus/lib/must"
|
||||
)
|
||||
|
||||
var ChainBlockCache = must.One(lru.New[blockstore.MhString, blocks.Block](4096))
|
||||
|
||||
func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
type PipelineTask struct {
|
||||
type PipelineTask struct {
|
||||
SpID int64 `db:"sp_id"`
|
||||
SectorNumber int64 `db:"sector_number"`
|
||||
|
||||
@ -64,7 +57,24 @@ func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
Failed bool `db:"failed"`
|
||||
FailedReason string `db:"failed_reason"`
|
||||
}
|
||||
}
|
||||
|
||||
type sectorListEntry struct {
|
||||
PipelineTask
|
||||
|
||||
Address address.Address
|
||||
CreateTime string
|
||||
AfterSeed bool
|
||||
|
||||
ChainAlloc, ChainSector, ChainActive, ChainUnproven, ChainFaulty bool
|
||||
}
|
||||
|
||||
type minerBitfields struct {
|
||||
alloc, sectorSet, active, unproven, faulty bitfield.BitField
|
||||
}
|
||||
|
||||
func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
var tasks []PipelineTask
|
||||
|
||||
@ -89,16 +99,6 @@ func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
type sectorListEntry struct {
|
||||
PipelineTask
|
||||
|
||||
Address address.Address
|
||||
CreateTime string
|
||||
AfterSeed bool
|
||||
|
||||
ChainAlloc, ChainSector, ChainActive, ChainUnproven, ChainFaulty bool
|
||||
}
|
||||
|
||||
head, err := a.workingApi.ChainHead(ctx)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to fetch chain head: %w", err).Error(), http.StatusInternalServerError)
|
||||
@ -106,11 +106,6 @@ func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
epoch := head.Height()
|
||||
|
||||
stor := store.ActorStore(ctx, blockstore.NewReadCachedBlockstore(blockstore.NewAPIBlockstore(a.workingApi), ChainBlockCache))
|
||||
|
||||
type minerBitfields struct {
|
||||
alloc, sectorSet, active, unproven, faulty bitfield.BitField
|
||||
}
|
||||
minerBitfieldCache := map[address.Address]minerBitfields{}
|
||||
|
||||
sectorList := make([]sectorListEntry, 0, len(tasks))
|
||||
@ -127,55 +122,11 @@ func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
mbf, ok := minerBitfieldCache[addr]
|
||||
if !ok {
|
||||
act, err := a.workingApi.StateGetActor(ctx, addr, types.EmptyTSK)
|
||||
mbf, err := a.getMinerBitfields(ctx, addr, a.stor)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load actor: %w", err).Error(), http.StatusInternalServerError)
|
||||
http.Error(w, xerrors.Errorf("failed to load miner bitfields: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
mas, err := miner.Load(stor, act)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load miner actor: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
activeSectors, err := miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load active sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load all sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
unproved, err := miner.AllPartSectors(mas, miner.Partition.UnprovenSectors)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load unproven sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
faulty, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load faulty sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
alloc, err := mas.GetAllocatedSectors()
|
||||
if err != nil {
|
||||
http.Error(w, xerrors.Errorf("failed to load allocated sectors: %w", err).Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
mbf = minerBitfields{
|
||||
alloc: *alloc,
|
||||
sectorSet: allSectors,
|
||||
active: activeSectors,
|
||||
unproven: unproved,
|
||||
faulty: faulty,
|
||||
}
|
||||
minerBitfieldCache[addr] = mbf
|
||||
}
|
||||
|
||||
@ -197,3 +148,48 @@ func (a *app) pipelinePorepSectors(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
a.executeTemplate(w, "pipeline_porep_sectors", sectorList)
|
||||
}
|
||||
|
||||
func (a *app) getMinerBitfields(ctx context.Context, addr address.Address, stor adt.Store) (minerBitfields, error) {
|
||||
act, err := a.workingApi.StateGetActor(ctx, addr, types.EmptyTSK)
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load actor: %w", err)
|
||||
}
|
||||
|
||||
mas, err := miner.Load(stor, act)
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load miner actor: %w", err)
|
||||
}
|
||||
|
||||
activeSectors, err := miner.AllPartSectors(mas, miner.Partition.ActiveSectors)
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load active sectors: %w", err)
|
||||
}
|
||||
|
||||
allSectors, err := miner.AllPartSectors(mas, miner.Partition.AllSectors)
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load all sectors: %w", err)
|
||||
}
|
||||
|
||||
unproved, err := miner.AllPartSectors(mas, miner.Partition.UnprovenSectors)
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load unproven sectors: %w", err)
|
||||
}
|
||||
|
||||
faulty, err := miner.AllPartSectors(mas, miner.Partition.FaultySectors)
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load faulty sectors: %w", err)
|
||||
}
|
||||
|
||||
alloc, err := mas.GetAllocatedSectors()
|
||||
if err != nil {
|
||||
return minerBitfields{}, xerrors.Errorf("failed to load allocated sectors: %w", err)
|
||||
}
|
||||
|
||||
return minerBitfields{
|
||||
alloc: *alloc,
|
||||
sectorSet: allSectors,
|
||||
active: activeSectors,
|
||||
unproven: unproved,
|
||||
faulty: faulty,
|
||||
}, nil
|
||||
}
|
||||
|
@ -1,9 +1,4 @@
|
||||
{{define "pipeline_porep_sectors"}}
|
||||
{{range .}}
|
||||
<tr>
|
||||
<td>{{.Address}}</td>
|
||||
<td rowspan="2">{{.CreateTime}}</td>
|
||||
<td rowspan="2">
|
||||
{{define "sector_porep_state"}}
|
||||
<table class="porep-state">
|
||||
<tbody>
|
||||
<tr>
|
||||
@ -124,9 +119,18 @@
|
||||
</td>
|
||||
</tbody>
|
||||
</table>
|
||||
{{end}}
|
||||
|
||||
{{define "pipeline_porep_sectors"}}
|
||||
{{range .}}
|
||||
<tr>
|
||||
<td>{{.Address}}</td>
|
||||
<td rowspan="2">{{.CreateTime}}</td>
|
||||
<td rowspan="2">
|
||||
{{template "sector_porep_state" .}}
|
||||
</td>
|
||||
<td rowspan="2">
|
||||
<a href="#">DETAILS</a>
|
||||
<a href="/hapi/sector/{{.Address}}/{{.SectorNumber}}">DETAILS</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
|
@ -68,3 +68,53 @@ a:hover {
|
||||
color: deeppink;
|
||||
}
|
||||
}
|
||||
|
||||
/* Path: curiosrc/web/hapi/web/pipeline_porep_sectors.gohtml */
|
||||
.porep-pipeline-table, .porep-state {
|
||||
color: #d0d0d0;
|
||||
}
|
||||
|
||||
.porep-pipeline-table td, .porep-pipeline-table th {
|
||||
border-left: none;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.porep-pipeline-table tr:nth-child(odd) {
|
||||
border-top: 6px solid #999999;
|
||||
|
||||
}
|
||||
|
||||
.porep-pipeline-table tr:first-child, .porep-pipeline-table tr:first-child {
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
.porep-state {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.porep-state td, .porep-state th {
|
||||
border-left: 1px solid #f0f0f0;
|
||||
border-right: 1px solid #f0f0f0;
|
||||
|
||||
padding: 1px 5px;
|
||||
|
||||
text-align: center;
|
||||
font-size: 0.7em;
|
||||
}
|
||||
|
||||
.porep-state tr {
|
||||
border-top: 1px solid #f0f0f0;
|
||||
}
|
||||
.porep-state tr:first-child {
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
.pipeline-active {
|
||||
background-color: #303060;
|
||||
}
|
||||
.pipeline-success {
|
||||
background-color: #306030;
|
||||
}
|
||||
.pipeline-failed {
|
||||
background-color: #603030;
|
||||
}
|
||||
|
@ -5,57 +5,6 @@
|
||||
<script type="module" src="chain-connectivity.js"></script>
|
||||
<link rel="stylesheet" href="main.css">
|
||||
<link rel='stylesheet' href='https://cdn.jsdelivr.net/npm/hack-font@3.3.0/build/web/hack-subset.css'>
|
||||
<style>
|
||||
.porep-pipeline-table, .porep-state {
|
||||
color: #d0d0d0;
|
||||
}
|
||||
|
||||
.porep-pipeline-table td, .porep-pipeline-table th {
|
||||
border-left: none;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.porep-pipeline-table tr:nth-child(odd) {
|
||||
border-top: 6px solid #999999;
|
||||
|
||||
}
|
||||
|
||||
.porep-pipeline-table tr:first-child, .porep-pipeline-table tr:first-child {
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
.porep-state {
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
.porep-state td, .porep-state th {
|
||||
border-left: 1px solid #f0f0f0;
|
||||
border-right: 1px solid #f0f0f0;
|
||||
|
||||
padding: 1px 5px;
|
||||
|
||||
text-align: center;
|
||||
font-size: 0.7em;
|
||||
}
|
||||
|
||||
.porep-state tr {
|
||||
border-top: 1px solid #f0f0f0;
|
||||
}
|
||||
.porep-state tr:first-child {
|
||||
border-top: none;
|
||||
}
|
||||
|
||||
.pipeline-active {
|
||||
background-color: #303060;
|
||||
}
|
||||
.pipeline-success {
|
||||
background-color: #306030;
|
||||
}
|
||||
.pipeline-failed {
|
||||
background-color: #603030;
|
||||
}
|
||||
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="app-head">
|
||||
|
@ -26,6 +26,8 @@ import (
|
||||
|
||||
const NoMinerFilter = abi.ActorID(0)
|
||||
|
||||
const URLSeparator = ","
|
||||
|
||||
var errAlreadyLocked = errors.New("already locked")
|
||||
|
||||
type DBIndex struct {
|
||||
@ -197,13 +199,13 @@ func (dbi *DBIndex) StorageAttach(ctx context.Context, si storiface.StorageInfo,
|
||||
if storageId.Valid {
|
||||
var currUrls []string
|
||||
if urls.Valid {
|
||||
currUrls = strings.Split(urls.String, ",")
|
||||
currUrls = strings.Split(urls.String, URLSeparator)
|
||||
}
|
||||
currUrls = union(currUrls, si.URLs)
|
||||
|
||||
_, err = tx.Exec(
|
||||
"UPDATE storage_path set urls=$1, weight=$2, max_storage=$3, can_seal=$4, can_store=$5, groups=$6, allow_to=$7, allow_types=$8, deny_types=$9, allow_miners=$10, deny_miners=$11, last_heartbeat=NOW() WHERE storage_id=$12",
|
||||
strings.Join(currUrls, ","),
|
||||
strings.Join(currUrls, URLSeparator),
|
||||
si.Weight,
|
||||
si.MaxStorage,
|
||||
si.CanSeal,
|
||||
@ -277,7 +279,7 @@ func (dbi *DBIndex) StorageDetach(ctx context.Context, id storiface.ID, url stri
|
||||
}
|
||||
|
||||
if len(modUrls) > 0 {
|
||||
newUrls := strings.Join(modUrls, ",")
|
||||
newUrls := strings.Join(modUrls, URLSeparator)
|
||||
_, err := dbi.harmonyDB.Exec(ctx, "UPDATE storage_path set urls=$1 WHERE storage_id=$2", newUrls, id)
|
||||
if err != nil {
|
||||
return err
|
||||
|
Loading…
Reference in New Issue
Block a user