forked from cerc-io/plugeth
dashboard: remove the dashboard (#20279)
This removes the dashboard project. The dashboard was an experimental browser UI for geth which displayed metrics and chain information in real time. We are removing it because it has marginal utility and nobody on the team can maintain it. Removing the dashboard removes a lot of dependency code and shaves 6 MB off the geth binary size.
This commit is contained in:
parent
987648b0ad
commit
afe0b65405
@ -28,7 +28,6 @@ import (
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/node"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
@ -75,11 +74,10 @@ type ethstatsConfig struct {
|
||||
}
|
||||
|
||||
type gethConfig struct {
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
Dashboard dashboard.Config
|
||||
Eth eth.Config
|
||||
Shh whisper.Config
|
||||
Node node.Config
|
||||
Ethstats ethstatsConfig
|
||||
}
|
||||
|
||||
func loadConfig(file string, cfg *gethConfig) error {
|
||||
@ -110,10 +108,9 @@ func defaultNodeConfig() node.Config {
|
||||
func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
// Load defaults.
|
||||
cfg := gethConfig{
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
Dashboard: dashboard.DefaultConfig,
|
||||
Eth: eth.DefaultConfig,
|
||||
Shh: whisper.DefaultConfig,
|
||||
Node: defaultNodeConfig(),
|
||||
}
|
||||
|
||||
// Load config file.
|
||||
@ -134,7 +131,6 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) {
|
||||
cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name)
|
||||
}
|
||||
utils.SetShhConfig(ctx, stack, &cfg.Shh)
|
||||
utils.SetDashboardConfig(ctx, &cfg.Dashboard)
|
||||
|
||||
return stack, cfg
|
||||
}
|
||||
@ -179,12 +175,6 @@ func makeFullNode(ctx *cli.Context) *node.Node {
|
||||
if cfg.Ethstats.URL != "" {
|
||||
utils.RegisterEthStatsService(stack, cfg.Ethstats.URL)
|
||||
}
|
||||
|
||||
// Add dashboard daemon if requested. This should be the last registered service
|
||||
// in order to be able to collect information about the other services.
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
utils.RegisterDashboardService(stack, &cfg.Dashboard, gitCommit)
|
||||
}
|
||||
return stack
|
||||
}
|
||||
|
||||
|
@ -70,10 +70,6 @@ var (
|
||||
utils.NoUSBFlag,
|
||||
utils.SmartCardDaemonPathFlag,
|
||||
utils.OverrideIstanbulFlag,
|
||||
utils.DashboardEnabledFlag,
|
||||
utils.DashboardAddrFlag,
|
||||
utils.DashboardPortFlag,
|
||||
utils.DashboardRefreshFlag,
|
||||
utils.EthashCacheDirFlag,
|
||||
utils.EthashCachesInMemoryFlag,
|
||||
utils.EthashCachesOnDiskFlag,
|
||||
@ -236,16 +232,8 @@ func init() {
|
||||
app.Flags = append(app.Flags, metricsFlags...)
|
||||
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
logdir := ""
|
||||
if ctx.GlobalBool(utils.DashboardEnabledFlag.Name) {
|
||||
logdir = (&node.Config{DataDir: utils.MakeDataDir(ctx)}).ResolvePath("logs")
|
||||
}
|
||||
if err := debug.Setup(ctx, logdir); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return debug.Setup(ctx, "")
|
||||
}
|
||||
|
||||
app.After = func(ctx *cli.Context) error {
|
||||
debug.Exit()
|
||||
console.Stdin.Close() // Resets terminal mode.
|
||||
|
@ -22,8 +22,6 @@ import (
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||
"github.com/ethereum/go-ethereum/internal/debug"
|
||||
cli "gopkg.in/urfave/cli.v1"
|
||||
@ -116,16 +114,6 @@ var AppHelpFlagGroups = []flagGroup{
|
||||
utils.EthashDatasetsOnDiskFlag,
|
||||
},
|
||||
},
|
||||
//{
|
||||
// Name: "DASHBOARD",
|
||||
// Flags: []cli.Flag{
|
||||
// utils.DashboardEnabledFlag,
|
||||
// utils.DashboardAddrFlag,
|
||||
// utils.DashboardPortFlag,
|
||||
// utils.DashboardRefreshFlag,
|
||||
// utils.DashboardAssetsFlag,
|
||||
// },
|
||||
//},
|
||||
{
|
||||
Name: "TRANSACTION POOL",
|
||||
Flags: []cli.Flag{
|
||||
@ -324,9 +312,6 @@ func init() {
|
||||
var uncategorized []cli.Flag
|
||||
for _, flag := range data.(*cli.App).Flags {
|
||||
if _, ok := categorized[flag.String()]; !ok {
|
||||
if strings.HasPrefix(flag.GetName(), "dashboard") {
|
||||
continue
|
||||
}
|
||||
uncategorized = append(uncategorized, flag)
|
||||
}
|
||||
}
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/core/vm"
|
||||
"github.com/ethereum/go-ethereum/crypto"
|
||||
"github.com/ethereum/go-ethereum/dashboard"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/eth/downloader"
|
||||
"github.com/ethereum/go-ethereum/eth/gasprice"
|
||||
@ -272,26 +271,6 @@ var (
|
||||
Name: "ulc.onlyannounce",
|
||||
Usage: "Ultra light server sends announcements only",
|
||||
}
|
||||
// Dashboard settings
|
||||
DashboardEnabledFlag = cli.BoolFlag{
|
||||
Name: "dashboard",
|
||||
Usage: "Enable the dashboard",
|
||||
}
|
||||
DashboardAddrFlag = cli.StringFlag{
|
||||
Name: "dashboard.addr",
|
||||
Usage: "Dashboard listening interface",
|
||||
Value: dashboard.DefaultConfig.Host,
|
||||
}
|
||||
DashboardPortFlag = cli.IntFlag{
|
||||
Name: "dashboard.host",
|
||||
Usage: "Dashboard listening port",
|
||||
Value: dashboard.DefaultConfig.Port,
|
||||
}
|
||||
DashboardRefreshFlag = cli.DurationFlag{
|
||||
Name: "dashboard.refresh",
|
||||
Usage: "Dashboard metrics collection refresh rate",
|
||||
Value: dashboard.DefaultConfig.Refresh,
|
||||
}
|
||||
// Ethash settings
|
||||
EthashCacheDirFlag = DirectoryFlag{
|
||||
Name: "ethash.cachedir",
|
||||
@ -1530,13 +1509,6 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// SetDashboardConfig applies dashboard related command line flags to the config.
|
||||
func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) {
|
||||
cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name)
|
||||
cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name)
|
||||
cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name)
|
||||
}
|
||||
|
||||
// RegisterEthService adds an Ethereum client to the stack.
|
||||
func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
var err error
|
||||
@ -1559,22 +1531,6 @@ func RegisterEthService(stack *node.Node, cfg *eth.Config) {
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterDashboardService adds a dashboard to the stack.
|
||||
func RegisterDashboardService(stack *node.Node, cfg *dashboard.Config, commit string) {
|
||||
err := stack.Register(func(ctx *node.ServiceContext) (node.Service, error) {
|
||||
var (
|
||||
ethServ *eth.Ethereum
|
||||
lesServ *les.LightEthereum
|
||||
)
|
||||
_ = ctx.Service(ðServ)
|
||||
_ = ctx.Service(&lesServ)
|
||||
return dashboard.New(cfg, ethServ, lesServ, commit, ctx.ResolvePath("logs")), nil
|
||||
})
|
||||
if err != nil {
|
||||
Fatalf("Failed to register the dashboard service: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterShhService configures Whisper and adds it to the given node.
|
||||
func RegisterShhService(stack *node.Node, cfg *whisper.Config) {
|
||||
if err := stack.Register(func(n *node.ServiceContext) (node.Service, error) {
|
||||
|
@ -1,58 +0,0 @@
|
||||
## Go Ethereum Dashboard
|
||||
|
||||
The dashboard is a data visualizer integrated into geth, intended to collect and visualize useful information of an Ethereum node. It consists of two parts:
|
||||
|
||||
* The client visualizes the collected data.
|
||||
* The server collects the data, and updates the clients.
|
||||
|
||||
The client's UI uses [React][React] with JSX syntax, which is validated by the [ESLint][ESLint] linter mostly according to the [Airbnb React/JSX Style Guide][Airbnb]. The style is defined in the `.eslintrc` configuration file. The resources are bundled into a single `bundle.js` file using [Webpack][Webpack], which relies on the `webpack.config.js`. The bundled file is referenced from `dashboard.html` and takes part in the `assets.go` too. The necessary dependencies for the module bundler are gathered by [Node.js][Node.js].
|
||||
|
||||
### Development and bundling
|
||||
|
||||
As the dashboard depends on certain NPM packages (which are not included in the `go-ethereum` repo), these need to be installed first:
|
||||
|
||||
```
|
||||
$ (cd dashboard/assets && yarn install && yarn flow)
|
||||
```
|
||||
|
||||
Normally the dashboard assets are bundled into Geth via `go-bindata` to avoid external dependencies. Rebuilding Geth after each UI modification however is not feasible from a developer perspective. Instead, we can run `yarn dev` to watch for file system changes and refresh the browser automatically.
|
||||
|
||||
```
|
||||
$ geth --dashboard --vmodule=dashboard=5
|
||||
$ (cd dashboard/assets && yarn dev)
|
||||
```
|
||||
|
||||
To bundle up the final UI into Geth, run `go generate`:
|
||||
|
||||
```
|
||||
$ (cd dashboard && go generate)
|
||||
```
|
||||
|
||||
### Static type checking
|
||||
|
||||
Since JavaScript doesn't provide type safety, [Flow][Flow] is used to check types. These are only useful during development, so at the end of the process Babel will strip them.
|
||||
|
||||
To take advantage of static type checking, your IDE needs to be prepared for it. In case of [Atom][Atom] a configuration guide can be found [here][Atom config]: Install the [Nuclide][Nuclide] package for Flow support, making sure it installs all of its support packages by enabling `Install Recommended Packages on Startup`, and set the path of the `flow-bin` which were installed previously by `yarn`.
|
||||
|
||||
For more IDE support install the `linter-eslint` package too, which finds the `.eslintrc` file, and provides real-time linting. Atom warns, that these two packages are incompatible, but they seem to work well together. For third-party library errors and auto-completion [flow-typed][flow-typed] is used.
|
||||
|
||||
### Have fun
|
||||
|
||||
[Webpack][Webpack] offers handy tools for visualizing the bundle's dependency tree and space usage.
|
||||
|
||||
* Generate the bundle's profile running `yarn stats`
|
||||
* For the _dependency tree_ go to [Webpack Analyze][WA], and import `stats.json`
|
||||
* For the _space usage_ go to [Webpack Visualizer][WV], and import `stats.json`
|
||||
|
||||
[React]: https://reactjs.org/
|
||||
[ESLint]: https://eslint.org/
|
||||
[Airbnb]: https://github.com/airbnb/javascript/tree/master/react
|
||||
[Webpack]: https://webpack.github.io/
|
||||
[WA]: https://webpack.github.io/analyse/
|
||||
[WV]: https://chrisbateman.github.io/webpack-visualizer/
|
||||
[Node.js]: https://nodejs.org/en/
|
||||
[Flow]: https://flow.org/
|
||||
[Atom]: https://atom.io/
|
||||
[Atom config]: https://medium.com/@fastphrase/integrating-flow-into-a-react-project-fbbc2f130eed
|
||||
[Nuclide]: https://nuclide.io/docs/quick-start/getting-started/
|
||||
[flow-typed]: https://github.com/flowtype/flow-typed
|
39051
dashboard/assets.go
39051
dashboard/assets.go
File diff suppressed because one or more lines are too long
@ -1,3 +0,0 @@
|
||||
node_modules/* #ignored by default
|
||||
flow-typed/*
|
||||
bundle.js
|
@ -1,81 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// React syntax style mostly according to https://github.com/airbnb/javascript/tree/master/react
|
||||
{
|
||||
"env": {
|
||||
"browser": true,
|
||||
"node": true,
|
||||
"es6": true
|
||||
},
|
||||
"parser": "babel-eslint",
|
||||
"parserOptions": {
|
||||
"sourceType": "module",
|
||||
"ecmaVersion": 6,
|
||||
"ecmaFeatures": {
|
||||
"jsx": true
|
||||
}
|
||||
},
|
||||
"extends": [
|
||||
"eslint:recommended",
|
||||
"airbnb",
|
||||
"plugin:flowtype/recommended",
|
||||
"plugin:react/recommended"
|
||||
],
|
||||
"plugins": [
|
||||
"flowtype",
|
||||
"react"
|
||||
],
|
||||
"rules": {
|
||||
"no-tabs": "off",
|
||||
"indent": ["error", "tab"],
|
||||
"react/jsx-indent": ["error", "tab"],
|
||||
"react/jsx-indent-props": ["error", "tab"],
|
||||
"react/prefer-stateless-function": "off",
|
||||
"react/destructuring-assignment": ["error", "always", {"ignoreClassFields": true}],
|
||||
"jsx-quotes": ["error", "prefer-single"],
|
||||
"no-plusplus": "off",
|
||||
"no-console": ["error", { "allow": ["error"] }],
|
||||
// Specifies the maximum length of a line.
|
||||
"max-len": ["warn", 120, 2, {
|
||||
"ignoreUrls": true,
|
||||
"ignoreComments": false,
|
||||
"ignoreRegExpLiterals": true,
|
||||
"ignoreStrings": true,
|
||||
"ignoreTemplateLiterals": true
|
||||
}],
|
||||
// Enforces consistent spacing between keys and values in object literal properties.
|
||||
"key-spacing": ["error", {"align": {
|
||||
"beforeColon": false,
|
||||
"afterColon": true,
|
||||
"on": "value"
|
||||
}}],
|
||||
// Prohibits padding inside curly braces.
|
||||
"object-curly-spacing": ["error", "never"],
|
||||
"no-use-before-define": "off", // message types
|
||||
"default-case": "off"
|
||||
},
|
||||
"settings": {
|
||||
"import/resolver": {
|
||||
"node": {
|
||||
"paths": ["components"] // import './components/Component' -> import 'Component'
|
||||
}
|
||||
},
|
||||
"flowtype": {
|
||||
"onlyFilesWithFlowAnnotation": true
|
||||
}
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
[ignore]
|
||||
<PROJECT_ROOT>/node_modules/material-ui/.*\.js\.flow
|
||||
|
||||
[libs]
|
||||
<PROJECT_ROOT>/flow-typed/
|
||||
node_modules/jss/flow-typed
|
||||
|
||||
[options]
|
||||
include_warnings=true
|
||||
module.system.node.resolve_dirname=node_modules
|
||||
module.system.node.resolve_dirname=components
|
@ -1,92 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import {faHome, faLink, faGlobeEurope, faTachometerAlt, faList} from '@fortawesome/free-solid-svg-icons';
|
||||
import {faCreditCard} from '@fortawesome/free-regular-svg-icons';
|
||||
|
||||
type ProvidedMenuProp = {|title: string, icon: string|};
|
||||
const menuSkeletons: Array<{|id: string, menu: ProvidedMenuProp|}> = [
|
||||
{
|
||||
id: 'home',
|
||||
menu: {
|
||||
title: 'Home',
|
||||
icon: faHome,
|
||||
},
|
||||
}, {
|
||||
id: 'chain',
|
||||
menu: {
|
||||
title: 'Chain',
|
||||
icon: faLink,
|
||||
},
|
||||
}, {
|
||||
id: 'txpool',
|
||||
menu: {
|
||||
title: 'TxPool',
|
||||
icon: faCreditCard,
|
||||
},
|
||||
}, {
|
||||
id: 'network',
|
||||
menu: {
|
||||
title: 'Network',
|
||||
icon: faGlobeEurope,
|
||||
},
|
||||
}, {
|
||||
id: 'system',
|
||||
menu: {
|
||||
title: 'System',
|
||||
icon: faTachometerAlt,
|
||||
},
|
||||
}, {
|
||||
id: 'logs',
|
||||
menu: {
|
||||
title: 'Logs',
|
||||
icon: faList,
|
||||
},
|
||||
},
|
||||
];
|
||||
export type MenuProp = {|...ProvidedMenuProp, id: string|};
|
||||
// The sidebar menu and the main content are rendered based on these elements.
|
||||
// Using the id is circumstantial in some cases, so it is better to insert it also as a value.
|
||||
// This way the mistyping is prevented.
|
||||
export const MENU: Map<string, {...MenuProp}> = new Map(menuSkeletons.map(({id, menu}) => ([id, {id, ...menu}])));
|
||||
|
||||
export const DURATION = 200;
|
||||
|
||||
export const chartStrokeWidth = 0.2;
|
||||
|
||||
export const styles = {
|
||||
light: {
|
||||
color: 'rgba(255, 255, 255, 0.54)',
|
||||
},
|
||||
};
|
||||
|
||||
// unit contains the units for the bytePlotter.
|
||||
export const unit = ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'];
|
||||
|
||||
// simplifyBytes returns the simplified version of the given value followed by the unit.
|
||||
export const simplifyBytes = (x: number) => {
|
||||
let i = 0;
|
||||
for (; x > 1024 && i < 8; i++) {
|
||||
x /= 1024;
|
||||
}
|
||||
return x.toFixed(2).toString().concat(' ', unit[i], 'B');
|
||||
};
|
||||
|
||||
// hues contains predefined colors for gradient stop colors.
|
||||
export const hues = ['#00FF00', '#FFFF00', '#FF7F00', '#FF0000'];
|
||||
export const hueScale = [0, 2048, 102400, 2097152];
|
@ -1,63 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import SideBar from './SideBar';
|
||||
import Main from './Main';
|
||||
import type {Content} from '../types/content';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
body: {
|
||||
display: 'flex',
|
||||
width: '100%',
|
||||
height: '92%',
|
||||
},
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
opened: boolean,
|
||||
changeContent: string => void,
|
||||
active: string,
|
||||
content: Content,
|
||||
shouldUpdate: Object,
|
||||
send: string => void,
|
||||
};
|
||||
|
||||
// Body renders the body of the dashboard.
|
||||
class Body extends Component<Props> {
|
||||
render() {
|
||||
return (
|
||||
<div style={styles.body}>
|
||||
<SideBar
|
||||
opened={this.props.opened}
|
||||
changeContent={this.props.changeContent}
|
||||
/>
|
||||
<Main
|
||||
active={this.props.active}
|
||||
content={this.props.content}
|
||||
shouldUpdate={this.props.shouldUpdate}
|
||||
send={this.props.send}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default Body;
|
@ -1,53 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
import type {Chain as ChainType} from '../types/content';
|
||||
|
||||
export const inserter = () => (update: ChainType, prev: ChainType) => {
|
||||
if (!update.currentBlock) {
|
||||
return;
|
||||
}
|
||||
if (!prev.currentBlock) {
|
||||
prev.currentBlock = {};
|
||||
}
|
||||
prev.currentBlock.number = update.currentBlock.number;
|
||||
prev.currentBlock.timestamp = update.currentBlock.timestamp;
|
||||
return prev;
|
||||
};
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles = theme => ({});
|
||||
|
||||
export type Props = {
|
||||
content: Content,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Logs renders the log page.
|
||||
class Chain extends Component<Props, State> {
|
||||
render() {
|
||||
return <></>;
|
||||
}
|
||||
}
|
||||
|
||||
export default Chain;
|
@ -1,57 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
import type {ChildrenArray} from 'react';
|
||||
|
||||
import Grid from '@material-ui/core/Grid';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
container: {
|
||||
flexWrap: 'nowrap',
|
||||
height: '100%',
|
||||
maxWidth: '100%',
|
||||
margin: 0,
|
||||
},
|
||||
item: {
|
||||
flex: 1,
|
||||
padding: 0,
|
||||
},
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
children: ChildrenArray<React$Element<any>>,
|
||||
};
|
||||
|
||||
// ChartRow renders a row of equally sized responsive charts.
|
||||
class ChartRow extends Component<Props> {
|
||||
render() {
|
||||
return (
|
||||
<Grid container direction='row' style={styles.container} justify='space-between'>
|
||||
{React.Children.map(this.props.children, child => (
|
||||
<Grid item xs style={styles.item}>
|
||||
{child}
|
||||
</Grid>
|
||||
))}
|
||||
</Grid>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default ChartRow;
|
@ -1,84 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import {styles, simplifyBytes} from '../common';
|
||||
|
||||
// multiplier multiplies a number by another.
|
||||
export const multiplier = <T>(by: number = 1) => (x: number) => x * by;
|
||||
|
||||
// percentPlotter renders a tooltip, which displays the value of the payload followed by a percent sign.
|
||||
export const percentPlotter = <T>(text: string, mapper: (T => T) = multiplier(1)) => (payload: T) => {
|
||||
const p = mapper(payload);
|
||||
if (typeof p !== 'number') {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={styles.light}>{text}</span> {p.toFixed(2)} %
|
||||
</Typography>
|
||||
);
|
||||
};
|
||||
|
||||
// bytePlotter renders a tooltip, which displays the payload as a byte value.
|
||||
export const bytePlotter = <T>(text: string, mapper: (T => T) = multiplier(1)) => (payload: T) => {
|
||||
const p = mapper(payload);
|
||||
if (typeof p !== 'number') {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={styles.light}>{text}</span> {simplifyBytes(p)}
|
||||
</Typography>
|
||||
);
|
||||
};
|
||||
|
||||
// bytePlotter renders a tooltip, which displays the payload as a byte value followed by '/s'.
|
||||
export const bytePerSecPlotter = <T>(text: string, mapper: (T => T) = multiplier(1)) => (payload: T) => {
|
||||
const p = mapper(payload);
|
||||
if (typeof p !== 'number') {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={styles.light}>{text}</span>
|
||||
{simplifyBytes(p)}/s
|
||||
</Typography>
|
||||
);
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
active: boolean,
|
||||
payload: Object,
|
||||
tooltip: <T>(text: string, mapper?: T => T) => (payload: mixed) => null | React$Element<any>,
|
||||
};
|
||||
|
||||
// CustomTooltip takes a tooltip function, and uses it to plot the active value of the chart.
|
||||
class CustomTooltip extends Component<Props> {
|
||||
render() {
|
||||
const {active, payload, tooltip} = this.props;
|
||||
if (!active || typeof tooltip !== 'function' || !Array.isArray(payload) || payload.length < 1) {
|
||||
return null;
|
||||
}
|
||||
return tooltip(payload[0].value);
|
||||
}
|
||||
}
|
||||
|
||||
export default CustomTooltip;
|
@ -1,268 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
import {hot} from 'react-hot-loader';
|
||||
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
|
||||
import Header from 'Header';
|
||||
import Body from 'Body';
|
||||
import {inserter as logInserter, SAME} from 'Logs';
|
||||
import {inserter as peerInserter} from 'Network';
|
||||
import {inserter as chainInserter} from 'Chain';
|
||||
import {MENU} from '../common';
|
||||
import type {Content} from '../types/content';
|
||||
|
||||
// deepUpdate updates an object corresponding to the given update data, which has
|
||||
// the shape of the same structure as the original object. updater also has the same
|
||||
// structure, except that it contains functions where the original data needs to be
|
||||
// updated. These functions are used to handle the update.
|
||||
//
|
||||
// Since the messages have the same shape as the state content, this approach allows
|
||||
// the generalization of the message handling. The only necessary thing is to set a
|
||||
// handler function for every path of the state in order to maximize the flexibility
|
||||
// of the update.
|
||||
const deepUpdate = (updater: Object, update: Object, prev: Object): $Shape<Content> => {
|
||||
if (typeof update === 'undefined') {
|
||||
return prev;
|
||||
}
|
||||
if (typeof updater === 'function') {
|
||||
return updater(update, prev);
|
||||
}
|
||||
const updated = {};
|
||||
Object.keys(prev).forEach((key) => {
|
||||
updated[key] = deepUpdate(updater[key], update[key], prev[key]);
|
||||
});
|
||||
|
||||
return updated;
|
||||
};
|
||||
|
||||
// shouldUpdate returns the structure of a message. It is used to prevent unnecessary render
|
||||
// method triggerings. In the affected component's shouldComponentUpdate method it can be checked
|
||||
// whether the involved data was changed or not by checking the message structure.
|
||||
//
|
||||
// We could return the message itself too, but it's safer not to give access to it.
|
||||
const shouldUpdate = (updater: Object, msg: Object) => {
|
||||
const su = {};
|
||||
Object.keys(msg).forEach((key) => {
|
||||
su[key] = typeof updater[key] !== 'function' ? shouldUpdate(updater[key], msg[key]) : true;
|
||||
});
|
||||
|
||||
return su;
|
||||
};
|
||||
|
||||
// replacer is a state updater function, which replaces the original data.
|
||||
const replacer = <T>(update: T) => update;
|
||||
|
||||
// appender is a state updater function, which appends the update data to the
|
||||
// existing data. limit defines the maximum allowed size of the created array,
|
||||
// mapper maps the update data.
|
||||
const appender = <T>(limit: number, mapper = replacer) => (update: Array<T>, prev: Array<T>) => [
|
||||
...prev,
|
||||
...update.map(sample => mapper(sample)),
|
||||
].slice(-limit);
|
||||
|
||||
// defaultContent returns the initial value of the state content. Needs to be a function in order to
|
||||
// instantiate the object again, because it is used by the state, and isn't automatically cleaned
|
||||
// when a new connection is established. The state is mutated during the update in order to avoid
|
||||
// the execution of unnecessary operations (e.g. copy of the log array).
|
||||
const defaultContent: () => Content = () => ({
|
||||
general: {
|
||||
commit: null,
|
||||
version: null,
|
||||
genesis: '',
|
||||
},
|
||||
home: {},
|
||||
chain: {
|
||||
currentBlock: {
|
||||
number: 0,
|
||||
timestamp: 0,
|
||||
},
|
||||
},
|
||||
txpool: {},
|
||||
network: {
|
||||
peers: {
|
||||
bundles: {},
|
||||
},
|
||||
diff: [],
|
||||
activePeerCount: 0,
|
||||
},
|
||||
system: {
|
||||
activeMemory: [],
|
||||
virtualMemory: [],
|
||||
networkIngress: [],
|
||||
networkEgress: [],
|
||||
processCPU: [],
|
||||
systemCPU: [],
|
||||
diskRead: [],
|
||||
diskWrite: [],
|
||||
},
|
||||
logs: {
|
||||
chunks: [],
|
||||
endTop: false,
|
||||
endBottom: true,
|
||||
topChanged: SAME,
|
||||
bottomChanged: SAME,
|
||||
},
|
||||
});
|
||||
|
||||
// updaters contains the state updater functions for each path of the state.
|
||||
//
|
||||
// TODO (kurkomisi): Define a tricky type which embraces the content and the updaters.
|
||||
const updaters = {
|
||||
general: {
|
||||
version: replacer,
|
||||
commit: replacer,
|
||||
genesis: replacer,
|
||||
},
|
||||
home: null,
|
||||
chain: chainInserter(),
|
||||
txpool: null,
|
||||
network: peerInserter(200),
|
||||
system: {
|
||||
activeMemory: appender(200),
|
||||
virtualMemory: appender(200),
|
||||
networkIngress: appender(200),
|
||||
networkEgress: appender(200),
|
||||
processCPU: appender(200),
|
||||
systemCPU: appender(200),
|
||||
diskRead: appender(200),
|
||||
diskWrite: appender(200),
|
||||
},
|
||||
logs: logInserter(5),
|
||||
};
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
dashboard: {
|
||||
display: 'flex',
|
||||
flexFlow: 'column',
|
||||
width: '100%',
|
||||
height: '100%',
|
||||
zIndex: 1,
|
||||
overflow: 'hidden',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles: Object = (theme: Object) => ({
|
||||
dashboard: {
|
||||
background: theme.palette.background.default,
|
||||
},
|
||||
});
|
||||
|
||||
export type Props = {
|
||||
classes: Object, // injected by withStyles()
|
||||
};
|
||||
|
||||
type State = {
|
||||
active: string, // active menu
|
||||
sideBar: boolean, // true if the sidebar is opened
|
||||
content: Content, // the visualized data
|
||||
shouldUpdate: Object, // labels for the components, which need to re-render based on the incoming message
|
||||
server: ?WebSocket,
|
||||
};
|
||||
|
||||
// Dashboard is the main component, which renders the whole page, makes connection with the server and
|
||||
// listens for messages. When there is an incoming message, updates the page's content correspondingly.
|
||||
class Dashboard extends Component<Props, State> {
|
||||
constructor(props: Props) {
|
||||
super(props);
|
||||
this.state = {
|
||||
active: MENU.get('home').id,
|
||||
sideBar: true,
|
||||
content: defaultContent(),
|
||||
shouldUpdate: {},
|
||||
server: null,
|
||||
};
|
||||
}
|
||||
|
||||
// componentDidMount initiates the establishment of the first websocket connection after the component is rendered.
|
||||
componentDidMount() {
|
||||
this.reconnect();
|
||||
}
|
||||
|
||||
// reconnect establishes a websocket connection with the server, listens for incoming messages
|
||||
// and tries to reconnect on connection loss.
|
||||
reconnect = () => {
|
||||
const host = process.env.NODE_ENV === 'production' ? window.location.host : 'localhost:8080';
|
||||
const server = new WebSocket(`${((window.location.protocol === 'https:') ? 'wss://' : 'ws://')}${host}/api`);
|
||||
server.onopen = () => {
|
||||
this.setState({content: defaultContent(), shouldUpdate: {}, server});
|
||||
};
|
||||
server.onmessage = (event) => {
|
||||
const msg: $Shape<Content> = JSON.parse(event.data);
|
||||
if (!msg) {
|
||||
console.error(`Incoming message is ${msg}`);
|
||||
return;
|
||||
}
|
||||
this.update(msg);
|
||||
};
|
||||
server.onclose = () => {
|
||||
this.setState({server: null});
|
||||
setTimeout(this.reconnect, 3000);
|
||||
};
|
||||
};
|
||||
|
||||
// send sends a message to the server, which can be accessed only through this function for safety reasons.
|
||||
send = (msg: string) => {
|
||||
if (this.state.server != null) {
|
||||
this.state.server.send(msg);
|
||||
}
|
||||
};
|
||||
|
||||
// update updates the content corresponding to the incoming message.
|
||||
update = (msg: $Shape<Content>) => {
|
||||
this.setState(prevState => ({
|
||||
content: deepUpdate(updaters, msg, prevState.content),
|
||||
shouldUpdate: shouldUpdate(updaters, msg),
|
||||
}));
|
||||
};
|
||||
|
||||
// changeContent sets the active label, which is used at the content rendering.
|
||||
changeContent = (newActive: string) => {
|
||||
this.setState(prevState => (prevState.active !== newActive ? {active: newActive} : {}));
|
||||
};
|
||||
|
||||
// switchSideBar opens or closes the sidebar's state.
|
||||
switchSideBar = () => {
|
||||
this.setState(prevState => ({sideBar: !prevState.sideBar}));
|
||||
};
|
||||
|
||||
render() {
|
||||
return (
|
||||
<div className={this.props.classes.dashboard} style={styles.dashboard}>
|
||||
<Header
|
||||
switchSideBar={this.switchSideBar}
|
||||
content={this.state.content}
|
||||
/>
|
||||
<Body
|
||||
opened={this.state.sideBar}
|
||||
changeContent={this.changeContent}
|
||||
active={this.state.active}
|
||||
content={this.state.content}
|
||||
shouldUpdate={this.state.shouldUpdate}
|
||||
send={this.send}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default hot(module)(withStyles(themeStyles)(Dashboard));
|
@ -1,234 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import Grid from '@material-ui/core/Grid';
|
||||
import ResponsiveContainer from 'recharts/es6/component/ResponsiveContainer';
|
||||
import AreaChart from 'recharts/es6/chart/AreaChart';
|
||||
import Area from 'recharts/es6/cartesian/Area';
|
||||
import ReferenceLine from 'recharts/es6/cartesian/ReferenceLine';
|
||||
import Label from 'recharts/es6/component/Label';
|
||||
import Tooltip from 'recharts/es6/component/Tooltip';
|
||||
|
||||
import ChartRow from 'ChartRow';
|
||||
import CustomTooltip, {bytePlotter, bytePerSecPlotter, percentPlotter, multiplier} from 'CustomTooltip';
|
||||
import {chartStrokeWidth, styles as commonStyles} from '../common';
|
||||
import type {General, System} from '../types/content';
|
||||
import {FontAwesomeIcon} from "@fortawesome/react-fontawesome";
|
||||
import {faNetworkWired} from "@fortawesome/free-solid-svg-icons";
|
||||
import Toolbar from "@material-ui/core/Toolbar";
|
||||
|
||||
const FOOTER_SYNC_ID = 'footerSyncId';
|
||||
|
||||
const CPU = 'cpu';
|
||||
const MEMORY = 'memory';
|
||||
const DISK = 'disk';
|
||||
const TRAFFIC = 'traffic';
|
||||
|
||||
const TOP = 'Top';
|
||||
const BOTTOM = 'Bottom';
|
||||
|
||||
const cpuLabelTop = 'Process load';
|
||||
const cpuLabelBottom = 'System load';
|
||||
const memoryLabelTop = 'Active memory';
|
||||
const memoryLabelBottom = 'Virtual memory';
|
||||
const diskLabelTop = 'Disk read';
|
||||
const diskLabelBottom = 'Disk write';
|
||||
const trafficLabelTop = 'Download';
|
||||
const trafficLabelBottom = 'Upload';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
footer: {
|
||||
maxWidth: '100%',
|
||||
flexWrap: 'nowrap',
|
||||
margin: 0,
|
||||
},
|
||||
chartRowWrapper: {
|
||||
height: '100%',
|
||||
padding: 0,
|
||||
},
|
||||
doubleChartWrapper: {
|
||||
height: '100%',
|
||||
width: '99%',
|
||||
},
|
||||
link: {
|
||||
color: 'inherit',
|
||||
textDecoration: 'none',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles: Object = (theme: Object) => ({
|
||||
footer: {
|
||||
backgroundColor: theme.palette.grey[900],
|
||||
color: theme.palette.getContrastText(theme.palette.grey[900]),
|
||||
zIndex: theme.zIndex.appBar,
|
||||
height: theme.spacing.unit * 10,
|
||||
},
|
||||
});
|
||||
|
||||
export type Props = {
|
||||
classes: Object, // injected by withStyles()
|
||||
theme: Object,
|
||||
general: General,
|
||||
system: System,
|
||||
shouldUpdate: Object,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Footer renders the footer of the dashboard.
|
||||
class Footer extends Component<Props, State> {
|
||||
shouldComponentUpdate(nextProps: Readonly<Props>, nextState: Readonly<State>, nextContext: any) {
|
||||
return typeof nextProps.shouldUpdate.general !== 'undefined' || typeof nextProps.shouldUpdate.system !== 'undefined';
|
||||
}
|
||||
|
||||
// halfHeightChart renders an area chart with half of the height of its parent.
|
||||
halfHeightChart = (chartProps, tooltip, areaProps, label, position) => (
|
||||
<ResponsiveContainer width='100%' height='50%'>
|
||||
<AreaChart {...chartProps}>
|
||||
{!tooltip || (<Tooltip cursor={false} content={<CustomTooltip tooltip={tooltip} />} />)}
|
||||
<Area isAnimationActive={false} strokeWidth={chartStrokeWidth} type='monotone' {...areaProps} />
|
||||
<ReferenceLine x={0} strokeWidth={0}>
|
||||
<Label fill={areaProps.fill} value={label} position={position} />
|
||||
</ReferenceLine>
|
||||
</AreaChart>
|
||||
</ResponsiveContainer>
|
||||
);
|
||||
|
||||
// doubleChart renders a pair of charts separated by the baseline.
|
||||
doubleChart = (syncId, chartKey, topChart, bottomChart) => {
|
||||
if (!Array.isArray(topChart.data) || !Array.isArray(bottomChart.data)) {
|
||||
return null;
|
||||
}
|
||||
const topDefault = topChart.default || 0;
|
||||
const bottomDefault = bottomChart.default || 0;
|
||||
const topKey = `${chartKey}${TOP}`;
|
||||
const bottomKey = `${chartKey}${BOTTOM}`;
|
||||
const topColor = '#8884d8';
|
||||
const bottomColor = '#82ca9d';
|
||||
|
||||
return (
|
||||
<div style={styles.doubleChartWrapper}>
|
||||
{this.halfHeightChart(
|
||||
{
|
||||
syncId,
|
||||
data: topChart.data.map(({value}) => ({[topKey]: value || topDefault})),
|
||||
margin: {top: 5, right: 5, bottom: 0, left: 5},
|
||||
},
|
||||
topChart.tooltip,
|
||||
{dataKey: topKey, stroke: topColor, fill: topColor},
|
||||
topChart.label,
|
||||
'insideBottomLeft',
|
||||
)}
|
||||
{this.halfHeightChart(
|
||||
{
|
||||
syncId,
|
||||
data: bottomChart.data.map(({value}) => ({[bottomKey]: -value || -bottomDefault})),
|
||||
margin: {top: 0, right: 5, bottom: 5, left: 5},
|
||||
},
|
||||
bottomChart.tooltip,
|
||||
{dataKey: bottomKey, stroke: bottomColor, fill: bottomColor},
|
||||
bottomChart.label,
|
||||
'insideTopLeft',
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
render() {
|
||||
const {general, system} = this.props;
|
||||
let network = '';
|
||||
switch (general.genesis) {
|
||||
case '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3':
|
||||
network = 'main';
|
||||
break;
|
||||
case '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d':
|
||||
network = 'ropsten';
|
||||
break;
|
||||
case '0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177':
|
||||
network = 'rinkeby';
|
||||
break;
|
||||
case '0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a':
|
||||
network = 'görli';
|
||||
break;
|
||||
default:
|
||||
network = `unknown (${general.genesis.substring(0, 8)})`;
|
||||
}
|
||||
|
||||
return (
|
||||
<Grid container className={this.props.classes.footer} direction='row' alignItems='center' style={styles.footer}>
|
||||
<Grid item xs style={styles.chartRowWrapper}>
|
||||
<ChartRow>
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
CPU,
|
||||
{data: system.processCPU, tooltip: percentPlotter(cpuLabelTop), label: cpuLabelTop},
|
||||
{data: system.systemCPU, tooltip: percentPlotter(cpuLabelBottom, multiplier(-1)), label: cpuLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
MEMORY,
|
||||
{data: system.activeMemory, tooltip: bytePlotter(memoryLabelTop), label: memoryLabelTop},
|
||||
{data: system.virtualMemory, tooltip: bytePlotter(memoryLabelBottom, multiplier(-1)), label: memoryLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
DISK,
|
||||
{data: system.diskRead, tooltip: bytePerSecPlotter(diskLabelTop), label: diskLabelTop},
|
||||
{data: system.diskWrite, tooltip: bytePerSecPlotter(diskLabelBottom, multiplier(-1)), label: diskLabelBottom},
|
||||
)}
|
||||
{this.doubleChart(
|
||||
FOOTER_SYNC_ID,
|
||||
TRAFFIC,
|
||||
{data: system.networkIngress, tooltip: bytePerSecPlotter(trafficLabelTop), label: trafficLabelTop},
|
||||
{data: system.networkEgress, tooltip: bytePerSecPlotter(trafficLabelBottom, multiplier(-1)), label: trafficLabelBottom},
|
||||
)}
|
||||
</ChartRow>
|
||||
</Grid>
|
||||
<Grid item>
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={commonStyles.light}>Geth</span> {general.version}
|
||||
</Typography>
|
||||
{general.commit && (
|
||||
<Typography type='caption' color='inherit'>
|
||||
<span style={commonStyles.light}>{'Commit '}</span>
|
||||
<a
|
||||
href={`https://github.com/ethereum/go-ethereum/commit/${general.commit}`}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
style={styles.link}
|
||||
>
|
||||
{general.commit.substring(0, 8)}
|
||||
</a>
|
||||
</Typography>
|
||||
)}
|
||||
<Typography style={styles.headerText}>
|
||||
<span style={commonStyles.light}>Network</span> {network}
|
||||
</Typography>
|
||||
</Grid>
|
||||
</Grid>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(Footer);
|
@ -1,135 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import AppBar from '@material-ui/core/AppBar';
|
||||
import Toolbar from '@material-ui/core/Toolbar';
|
||||
import IconButton from '@material-ui/core/IconButton';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
import {faBars, faSortAmountUp, faClock, faUsers, faSync} from '@fortawesome/free-solid-svg-icons';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import type {Content} from '../types/content';
|
||||
|
||||
|
||||
const magnitude = [31536000, 604800, 86400, 3600, 60, 1];
|
||||
const label = ['y', 'w', 'd', 'h', 'm', 's'];
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
header: {
|
||||
height: '8%',
|
||||
},
|
||||
headerText: {
|
||||
marginRight: 15,
|
||||
},
|
||||
toolbar: {
|
||||
height: '100%',
|
||||
minHeight: 'unset',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles = (theme: Object) => ({
|
||||
header: {
|
||||
backgroundColor: theme.palette.grey[900],
|
||||
color: theme.palette.getContrastText(theme.palette.grey[900]),
|
||||
zIndex: theme.zIndex.appBar,
|
||||
},
|
||||
toolbar: {
|
||||
paddingLeft: theme.spacing.unit,
|
||||
paddingRight: theme.spacing.unit,
|
||||
},
|
||||
title: {
|
||||
paddingLeft: theme.spacing.unit,
|
||||
fontSize: 3 * theme.spacing.unit,
|
||||
flex: 1,
|
||||
},
|
||||
});
|
||||
|
||||
export type Props = {
|
||||
classes: Object, // injected by withStyles()
|
||||
switchSideBar: () => void,
|
||||
content: Content,
|
||||
networkID: number,
|
||||
};
|
||||
|
||||
type State = {
|
||||
since: string,
|
||||
}
|
||||
// Header renders the header of the dashboard.
|
||||
class Header extends Component<Props, State> {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.state = {since: ''};
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
this.interval = setInterval(() => this.setState(() => {
|
||||
// time (seconds) since last block.
|
||||
let timeDiff = Math.floor((Date.now() - this.props.content.chain.currentBlock.timestamp * 1000) / 1000);
|
||||
let since = '';
|
||||
let i = 0;
|
||||
for (; i < magnitude.length && timeDiff < magnitude[i]; i++);
|
||||
for (let j = 2; i < magnitude.length && j > 0; j--, i++) {
|
||||
const t = Math.floor(timeDiff / magnitude[i]);
|
||||
if (t > 0) {
|
||||
since += `${t}${label[i]} `;
|
||||
timeDiff %= magnitude[i];
|
||||
}
|
||||
}
|
||||
if (since === '') {
|
||||
since = 'now';
|
||||
}
|
||||
this.setState({since: since});
|
||||
}), 1000);
|
||||
}
|
||||
|
||||
componentWillUnmount() {
|
||||
clearInterval(this.interval);
|
||||
}
|
||||
|
||||
render() {
|
||||
const {classes} = this.props;
|
||||
|
||||
return (
|
||||
<AppBar position='static' className={classes.header} style={styles.header}>
|
||||
<Toolbar className={classes.toolbar} style={styles.toolbar}>
|
||||
<IconButton onClick={this.props.switchSideBar}>
|
||||
<FontAwesomeIcon icon={faBars} />
|
||||
</IconButton>
|
||||
<Typography type='title' color='inherit' noWrap className={classes.title}>
|
||||
Go Ethereum Dashboard
|
||||
</Typography>
|
||||
<Typography style={styles.headerText}>
|
||||
<FontAwesomeIcon icon={faSortAmountUp} /> {this.props.content.chain.currentBlock.number}
|
||||
</Typography>
|
||||
<Typography style={styles.headerText}>
|
||||
<FontAwesomeIcon icon={faClock} /> {this.state.since}
|
||||
</Typography>
|
||||
<Typography style={styles.headerText}>
|
||||
<FontAwesomeIcon icon={faUsers} /> {this.props.content.network.activePeerCount}
|
||||
</Typography>
|
||||
</Toolbar>
|
||||
</AppBar>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(Header);
|
@ -1,327 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import List from '@material-ui/core/List';
|
||||
import ListItem from '@material-ui/core/ListItem';
|
||||
import escapeHtml from 'escape-html';
|
||||
import type {Record, Content, LogsMessage, Logs as LogsType} from '../types/content';
|
||||
|
||||
// requestBand says how wide is the top/bottom zone, eg. 0.1 means 10% of the container height.
|
||||
const requestBand = 0.05;
|
||||
|
||||
// fieldPadding is a global map with maximum field value lengths seen until now
|
||||
// to allow padding log contexts in a bit smarter way.
|
||||
const fieldPadding = new Map();
|
||||
|
||||
// createChunk creates an HTML formatted object, which displays the given array similarly to
|
||||
// the server side terminal.
|
||||
const createChunk = (records: Array<Record>) => {
|
||||
let content = '';
|
||||
records.forEach((record) => {
|
||||
const {t, ctx} = record;
|
||||
let {lvl, msg} = record;
|
||||
let color = '#ce3c23';
|
||||
switch (lvl) {
|
||||
case 'trace':
|
||||
case 'trce':
|
||||
lvl = 'TRACE';
|
||||
color = '#3465a4';
|
||||
break;
|
||||
case 'debug':
|
||||
case 'dbug':
|
||||
lvl = 'DEBUG';
|
||||
color = '#3d989b';
|
||||
break;
|
||||
case 'info':
|
||||
lvl = 'INFO ';
|
||||
color = '#4c8f0f';
|
||||
break;
|
||||
case 'warn':
|
||||
lvl = 'WARN ';
|
||||
color = '#b79a22';
|
||||
break;
|
||||
case 'error':
|
||||
case 'eror':
|
||||
lvl = 'ERROR';
|
||||
color = '#754b70';
|
||||
break;
|
||||
case 'crit':
|
||||
lvl = 'CRIT ';
|
||||
color = '#ce3c23';
|
||||
break;
|
||||
default:
|
||||
lvl = '';
|
||||
}
|
||||
const time = new Date(t);
|
||||
if (lvl === '' || !(time instanceof Date) || isNaN(time) || typeof msg !== 'string' || !Array.isArray(ctx)) {
|
||||
content += '<span style="color:#ce3c23">Invalid log record</span><br />';
|
||||
return;
|
||||
}
|
||||
if (ctx.length > 0) {
|
||||
msg += ' '.repeat(Math.max(40 - msg.length, 0));
|
||||
}
|
||||
const month = `0${time.getMonth() + 1}`.slice(-2);
|
||||
const date = `0${time.getDate()}`.slice(-2);
|
||||
const hours = `0${time.getHours()}`.slice(-2);
|
||||
const minutes = `0${time.getMinutes()}`.slice(-2);
|
||||
const seconds = `0${time.getSeconds()}`.slice(-2);
|
||||
content += `<span style="color:${color}">${lvl}</span>[${month}-${date}|${hours}:${minutes}:${seconds}] ${msg}`;
|
||||
|
||||
for (let i = 0; i < ctx.length; i += 2) {
|
||||
const key = escapeHtml(ctx[i]);
|
||||
const val = escapeHtml(ctx[i + 1]);
|
||||
let padding = fieldPadding.get(key);
|
||||
if (typeof padding !== 'number' || padding < val.length) {
|
||||
padding = val.length;
|
||||
fieldPadding.set(key, padding);
|
||||
}
|
||||
let p = '';
|
||||
if (i < ctx.length - 2) {
|
||||
p = ' '.repeat(padding - val.length);
|
||||
}
|
||||
content += ` <span style="color:${color}">${key}</span>=${val}${p}`;
|
||||
}
|
||||
content += '<br />';
|
||||
});
|
||||
return content;
|
||||
};
|
||||
|
||||
// ADDED, SAME and REMOVED are used to track the change of the log chunk array.
|
||||
// The scroll position is set using these values.
|
||||
export const ADDED = 1;
|
||||
export const SAME = 0;
|
||||
export const REMOVED = -1;
|
||||
|
||||
// inserter is a state updater function for the main component, which inserts the new log chunk into the chunk array.
|
||||
// limit is the maximum length of the chunk array, used in order to prevent the browser from OOM.
|
||||
export const inserter = (limit: number) => (update: LogsMessage, prev: LogsType) => {
|
||||
prev.topChanged = SAME;
|
||||
prev.bottomChanged = SAME;
|
||||
if (!Array.isArray(update.chunk) || update.chunk.length < 1) {
|
||||
return prev;
|
||||
}
|
||||
if (!Array.isArray(prev.chunks)) {
|
||||
prev.chunks = [];
|
||||
}
|
||||
const content = createChunk(update.chunk);
|
||||
if (!update.source) {
|
||||
// In case of stream chunk.
|
||||
if (!prev.endBottom) {
|
||||
return prev;
|
||||
}
|
||||
if (prev.chunks.length < 1) {
|
||||
// This should never happen, because the first chunk is always a non-stream chunk.
|
||||
return [{content, name: '00000000000000.log'}];
|
||||
}
|
||||
prev.chunks[prev.chunks.length - 1].content += content;
|
||||
prev.bottomChanged = ADDED;
|
||||
return prev;
|
||||
}
|
||||
const chunk = {
|
||||
content,
|
||||
name: update.source.name,
|
||||
};
|
||||
if (prev.chunks.length > 0 && update.source.name < prev.chunks[0].name) {
|
||||
if (update.source.last) {
|
||||
prev.endTop = true;
|
||||
}
|
||||
if (prev.chunks.length >= limit) {
|
||||
prev.endBottom = false;
|
||||
prev.chunks.splice(limit - 1, prev.chunks.length - limit + 1);
|
||||
prev.bottomChanged = REMOVED;
|
||||
}
|
||||
prev.chunks = [chunk, ...prev.chunks];
|
||||
prev.topChanged = ADDED;
|
||||
return prev;
|
||||
}
|
||||
if (update.source.last) {
|
||||
prev.endBottom = true;
|
||||
}
|
||||
if (prev.chunks.length >= limit) {
|
||||
prev.endTop = false;
|
||||
prev.chunks.splice(0, prev.chunks.length - limit + 1);
|
||||
prev.topChanged = REMOVED;
|
||||
}
|
||||
prev.chunks = [...prev.chunks, chunk];
|
||||
prev.bottomChanged = ADDED;
|
||||
return prev;
|
||||
};
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
logListItem: {
|
||||
padding: 0,
|
||||
lineHeight: 1.231,
|
||||
},
|
||||
logChunk: {
|
||||
color: 'white',
|
||||
fontFamily: 'monospace',
|
||||
whiteSpace: 'nowrap',
|
||||
width: 0,
|
||||
},
|
||||
waitMsg: {
|
||||
textAlign: 'center',
|
||||
color: 'white',
|
||||
fontFamily: 'monospace',
|
||||
},
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
container: Object,
|
||||
content: Content,
|
||||
shouldUpdate: Object,
|
||||
send: string => void,
|
||||
};
|
||||
|
||||
type State = {
|
||||
requestAllowed: boolean,
|
||||
};
|
||||
|
||||
// Logs renders the log page.
|
||||
class Logs extends Component<Props, State> {
|
||||
constructor(props: Props) {
|
||||
super(props);
|
||||
this.content = React.createRef();
|
||||
this.state = {
|
||||
requestAllowed: true,
|
||||
};
|
||||
}
|
||||
|
||||
componentDidMount() {
|
||||
const {container} = this.props;
|
||||
if (typeof container === 'undefined') {
|
||||
return;
|
||||
}
|
||||
container.scrollTop = container.scrollHeight - container.clientHeight;
|
||||
const {logs} = this.props.content;
|
||||
if (typeof this.content === 'undefined' || logs.chunks.length < 1) {
|
||||
return;
|
||||
}
|
||||
if (this.content.clientHeight < container.clientHeight && !logs.endTop) {
|
||||
this.sendRequest(logs.chunks[0].name, true);
|
||||
}
|
||||
}
|
||||
|
||||
// onScroll is triggered by the parent component's scroll event, and sends requests if the scroll position is
|
||||
// at the top or at the bottom.
|
||||
onScroll = () => {
|
||||
if (!this.state.requestAllowed || typeof this.content === 'undefined') {
|
||||
return;
|
||||
}
|
||||
const {logs} = this.props.content;
|
||||
if (logs.chunks.length < 1) {
|
||||
return;
|
||||
}
|
||||
if (this.atTop() && !logs.endTop) {
|
||||
this.sendRequest(logs.chunks[0].name, true);
|
||||
} else if (this.atBottom() && !logs.endBottom) {
|
||||
this.sendRequest(logs.chunks[logs.chunks.length - 1].name, false);
|
||||
}
|
||||
};
|
||||
|
||||
sendRequest = (name: string, past: boolean) => {
|
||||
this.setState({requestAllowed: false});
|
||||
this.props.send(JSON.stringify({
|
||||
Logs: {
|
||||
Name: name,
|
||||
Past: past,
|
||||
},
|
||||
}));
|
||||
};
|
||||
|
||||
// atTop checks if the scroll position it at the top of the container.
|
||||
atTop = () => this.props.container.scrollTop <= this.props.container.scrollHeight * requestBand;
|
||||
|
||||
// atBottom checks if the scroll position it at the bottom of the container.
|
||||
atBottom = () => {
|
||||
const {container} = this.props;
|
||||
return container.scrollHeight - container.scrollTop
|
||||
<= container.clientHeight + container.scrollHeight * requestBand;
|
||||
};
|
||||
|
||||
// beforeUpdate is called by the parent component, saves the previous scroll position
|
||||
// and the height of the first log chunk, which can be deleted during the insertion.
|
||||
beforeUpdate = () => {
|
||||
let firstHeight = 0;
|
||||
const chunkList = this.content.children[1];
|
||||
if (chunkList && chunkList.children[0]) {
|
||||
firstHeight = chunkList.children[0].clientHeight;
|
||||
}
|
||||
return {
|
||||
scrollTop: this.props.container.scrollTop,
|
||||
firstHeight,
|
||||
};
|
||||
};
|
||||
|
||||
// didUpdate is called by the parent component, which provides the container. Sends the first request if the
|
||||
// visible part of the container isn't full, and resets the scroll position in order to avoid jumping when a
|
||||
// chunk is inserted or removed.
|
||||
didUpdate = (prevProps, prevState, snapshot) => {
|
||||
if (typeof this.props.shouldUpdate.logs === 'undefined' || typeof this.content === 'undefined' || snapshot === null) {
|
||||
return;
|
||||
}
|
||||
const {logs} = this.props.content;
|
||||
const {container} = this.props;
|
||||
if (typeof container === 'undefined' || logs.chunks.length < 1) {
|
||||
return;
|
||||
}
|
||||
if (this.content.clientHeight < container.clientHeight) {
|
||||
// Only enters here at the beginning, when there aren't enough logs to fill the container
|
||||
// and the scroll bar doesn't appear.
|
||||
if (!logs.endTop) {
|
||||
this.sendRequest(logs.chunks[0].name, true);
|
||||
}
|
||||
return;
|
||||
}
|
||||
let {scrollTop} = snapshot;
|
||||
if (logs.topChanged === ADDED) {
|
||||
// It would be safer to use a ref to the list, but ref doesn't work well with HOCs.
|
||||
scrollTop += this.content.children[1].children[0].clientHeight;
|
||||
} else if (logs.bottomChanged === ADDED) {
|
||||
if (logs.topChanged === REMOVED) {
|
||||
scrollTop -= snapshot.firstHeight;
|
||||
} else if (this.atBottom() && logs.endBottom) {
|
||||
scrollTop = container.scrollHeight - container.clientHeight;
|
||||
}
|
||||
}
|
||||
container.scrollTop = scrollTop;
|
||||
this.setState({requestAllowed: true});
|
||||
};
|
||||
|
||||
render() {
|
||||
return (
|
||||
<div ref={(ref) => { this.content = ref; }}>
|
||||
<div style={styles.waitMsg}>
|
||||
{this.props.content.logs.endTop ? 'No more logs.' : 'Waiting for server...'}
|
||||
</div>
|
||||
<List>
|
||||
{this.props.content.logs.chunks.map((c, index) => (
|
||||
<ListItem style={styles.logListItem} key={index}>
|
||||
<div style={styles.logChunk} dangerouslySetInnerHTML={{__html: c.content}} />
|
||||
</ListItem>
|
||||
))}
|
||||
</List>
|
||||
{this.props.content.logs.endBottom || <div style={styles.waitMsg}>Waiting for server...</div>}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default Logs;
|
@ -1,147 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
|
||||
import Chain from 'Chain';
|
||||
import Network from 'Network';
|
||||
import Logs from 'Logs';
|
||||
import Footer from 'Footer';
|
||||
import {MENU} from '../common';
|
||||
import type {Content} from '../types/content';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
wrapper: {
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
width: '100%',
|
||||
},
|
||||
content: {
|
||||
flex: 1,
|
||||
overflow: 'auto',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles = theme => ({
|
||||
content: {
|
||||
backgroundColor: theme.palette.background.default,
|
||||
padding: theme.spacing.unit * 3,
|
||||
},
|
||||
});
|
||||
|
||||
export type Props = {
|
||||
classes: Object,
|
||||
active: string,
|
||||
content: Content,
|
||||
shouldUpdate: Object,
|
||||
send: string => void,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Main renders the chosen content.
|
||||
class Main extends Component<Props, State> {
|
||||
constructor(props) {
|
||||
super(props);
|
||||
this.container = React.createRef();
|
||||
this.content = React.createRef();
|
||||
}
|
||||
|
||||
componentDidUpdate(prevProps, prevState, snapshot) {
|
||||
if (this.content && typeof this.content.didUpdate === 'function') {
|
||||
this.content.didUpdate(prevProps, prevState, snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
onScroll = () => {
|
||||
if (this.content && typeof this.content.onScroll === 'function') {
|
||||
this.content.onScroll();
|
||||
}
|
||||
};
|
||||
|
||||
getSnapshotBeforeUpdate(prevProps: Readonly<P>, prevState: Readonly<S>) {
|
||||
if (this.content && typeof this.content.beforeUpdate === 'function') {
|
||||
return this.content.beforeUpdate();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
render() {
|
||||
const {
|
||||
classes, active, content, shouldUpdate,
|
||||
} = this.props;
|
||||
|
||||
let children = null;
|
||||
switch (active) {
|
||||
case MENU.get('home').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('chain').id:
|
||||
children = <Chain
|
||||
content={this.props.content.chain}
|
||||
/>;
|
||||
break;
|
||||
case MENU.get('txpool').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('network').id:
|
||||
children = <Network
|
||||
content={this.props.content.network}
|
||||
container={this.container}
|
||||
/>;
|
||||
break;
|
||||
case MENU.get('system').id:
|
||||
children = <div>Work in progress.</div>;
|
||||
break;
|
||||
case MENU.get('logs').id:
|
||||
children = (
|
||||
<Logs
|
||||
ref={(ref) => { this.content = ref; }}
|
||||
container={this.container}
|
||||
send={this.props.send}
|
||||
content={this.props.content}
|
||||
shouldUpdate={shouldUpdate}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div style={styles.wrapper}>
|
||||
<div
|
||||
className={classes.content}
|
||||
style={styles.content}
|
||||
ref={(ref) => { this.container = ref; }}
|
||||
onScroll={this.onScroll}
|
||||
>
|
||||
{children}
|
||||
</div>
|
||||
<Footer
|
||||
general={content.general}
|
||||
system={content.system}
|
||||
shouldUpdate={shouldUpdate}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(Main);
|
@ -1,858 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import Table from '@material-ui/core/Table';
|
||||
import TableHead from '@material-ui/core/TableHead';
|
||||
import TableBody from '@material-ui/core/TableBody';
|
||||
import TableRow from '@material-ui/core/TableRow';
|
||||
import TableCell from '@material-ui/core/TableCell';
|
||||
import Grid from '@material-ui/core/Grid/Grid';
|
||||
import Typography from '@material-ui/core/Typography';
|
||||
import {AreaChart, Area, Tooltip, YAxis} from 'recharts';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
import {faCircle as fasCircle} from '@fortawesome/free-solid-svg-icons'; // More icons at fontawesome.com/icons
|
||||
import {faCircle as farCircle, faClipboard as farClipboard} from '@fortawesome/free-regular-svg-icons';
|
||||
import convert from 'color-convert';
|
||||
import {Scrollbars} from 'react-custom-scrollbars';
|
||||
|
||||
import CustomTooltip, {bytePlotter, multiplier} from 'CustomTooltip';
|
||||
import type {Network as NetworkType, PeerEvent} from '../types/content';
|
||||
import {chartStrokeWidth, hues, hueScale} from '../common';
|
||||
|
||||
// Peer chart dimensions.
|
||||
const trafficChartHeight = 15;
|
||||
const trafficChartWidth = 200;
|
||||
|
||||
// attemptSeparator separates the peer connection attempts
|
||||
// such as the peers from the addresses with more attempts
|
||||
// go to the beginning of the table, and the rest go to the end.
|
||||
const attemptSeparator = 9;
|
||||
|
||||
// setMaxIngress adjusts the peer chart's gradient values based on the given value.
|
||||
const setMaxIngress = (peer, value) => {
|
||||
peer.maxIngress = value;
|
||||
peer.ingressGradient = [];
|
||||
peer.ingressGradient.push({offset: hueScale[0], color: hues[0]});
|
||||
let i = 1;
|
||||
for (; i < hues.length && value > hueScale[i]; i++) {
|
||||
peer.ingressGradient.push({offset: Math.floor(hueScale[i] * 100 / value), color: hues[i]});
|
||||
}
|
||||
i--;
|
||||
if (i < hues.length - 1) {
|
||||
// Usually the maximum value gets between two points on the predefined
|
||||
// color scale (e.g. 123KB is somewhere between 100KB (#FFFF00) and
|
||||
// 1MB (#FF0000)), and the charts need to be comparable by the colors,
|
||||
// so we have to calculate the last hue using the maximum value and the
|
||||
// surrounding hues in order to avoid the uniformity of the top colors
|
||||
// on the charts. For this reason the two hues are translated into the
|
||||
// CIELAB color space, and the top color will be their weighted average
|
||||
// (CIELAB is perceptually uniform, meaning that any point on the line
|
||||
// between two pure color points is also a pure color, so the weighted
|
||||
// average will not lose from the saturation).
|
||||
//
|
||||
// In case the maximum value is greater than the biggest predefined
|
||||
// scale value, the top of the chart will have uniform color.
|
||||
const lastHue = convert.hex.lab(hues[i]);
|
||||
const proportion = (value - hueScale[i]) * 100 / (hueScale[i + 1] - hueScale[i]);
|
||||
convert.hex.lab(hues[i + 1]).forEach((val, j) => {
|
||||
lastHue[j] = (lastHue[j] * proportion + val * (100 - proportion)) / 100;
|
||||
});
|
||||
peer.ingressGradient.push({offset: 100, color: `#${convert.lab.hex(lastHue)}`});
|
||||
}
|
||||
};
|
||||
|
||||
// setMaxEgress adjusts the peer chart's gradient values based on the given value.
|
||||
// In case of the egress the chart is upside down, so the gradients need to be
|
||||
// calculated inversely compared to the ingress.
|
||||
const setMaxEgress = (peer, value) => {
|
||||
peer.maxEgress = value;
|
||||
peer.egressGradient = [];
|
||||
peer.egressGradient.push({offset: 100 - hueScale[0], color: hues[0]});
|
||||
let i = 1;
|
||||
for (; i < hues.length && value > hueScale[i]; i++) {
|
||||
peer.egressGradient.unshift({offset: 100 - Math.floor(hueScale[i] * 100 / value), color: hues[i]});
|
||||
}
|
||||
i--;
|
||||
if (i < hues.length - 1) {
|
||||
// Calculate the last hue.
|
||||
const lastHue = convert.hex.lab(hues[i]);
|
||||
const proportion = (value - hueScale[i]) * 100 / (hueScale[i + 1] - hueScale[i]);
|
||||
convert.hex.lab(hues[i + 1]).forEach((val, j) => {
|
||||
lastHue[j] = (lastHue[j] * proportion + val * (100 - proportion)) / 100;
|
||||
});
|
||||
peer.egressGradient.unshift({offset: 0, color: `#${convert.lab.hex(lastHue)}`});
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// setIngressChartAttributes searches for the maximum value of the ingress
|
||||
// samples, and adjusts the peer chart's gradient values accordingly.
|
||||
const setIngressChartAttributes = (peer) => {
|
||||
let max = 0;
|
||||
peer.ingress.forEach(({value}) => {
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
});
|
||||
setMaxIngress(peer, max);
|
||||
};
|
||||
|
||||
// setEgressChartAttributes searches for the maximum value of the egress
|
||||
// samples, and adjusts the peer chart's gradient values accordingly.
|
||||
const setEgressChartAttributes = (peer) => {
|
||||
let max = 0;
|
||||
peer.egress.forEach(({value}) => {
|
||||
if (value > max) {
|
||||
max = value;
|
||||
}
|
||||
});
|
||||
setMaxEgress(peer, max);
|
||||
};
|
||||
|
||||
// shortName adds some heuristics to the node name in order to make it look meaningful.
|
||||
const shortName = (name: string) => {
|
||||
const parts = name.split('/');
|
||||
if (parts[0].substring(0, 'parity'.length).toLowerCase() === 'parity') {
|
||||
// Merge Parity and Parity-Ethereum under the same name.
|
||||
parts[0] = 'Parity';
|
||||
}
|
||||
if (parts.length < 2) {
|
||||
console.error('Incorrect node name', name);
|
||||
return parts[0];
|
||||
}
|
||||
const versionRE = RegExp(/^v?\d+\.\d+\.\d+.*/);
|
||||
// Drop optional custom identifier.
|
||||
if (!versionRE.test(parts[1])) {
|
||||
if (parts.length < 3 || !versionRE.test(parts[2])) {
|
||||
console.error('Incorrect node name', name);
|
||||
return parts[0];
|
||||
}
|
||||
parts[1] = parts[2];
|
||||
}
|
||||
// Cutting anything from the version after the first - or +.
|
||||
parts[1] = parts[1].split('-')[0].split('+')[0];
|
||||
return `${parts[0]}/${parts[1]}`;
|
||||
};
|
||||
|
||||
// shortLocation returns a shortened version of the given location object.
|
||||
const shortLocation = (location: Object) => {
|
||||
if (!location) {
|
||||
return '';
|
||||
}
|
||||
return `${location.city ? `${location.city}/` : ''}${location.country ? location.country : ''}`;
|
||||
};
|
||||
|
||||
// protocol returns a shortened version of the eth protocol values.
|
||||
const protocol = (p: Object) => {
|
||||
if (!p) {
|
||||
return '';
|
||||
}
|
||||
if (typeof p === 'string') {
|
||||
return p;
|
||||
}
|
||||
if (!(p instanceof Object)) {
|
||||
console.error('Wrong protocol type', p, typeof p);
|
||||
return '';
|
||||
}
|
||||
if (!p.hasOwnProperty('version') || !p.hasOwnProperty('difficulty') || !p.hasOwnProperty('head')) {
|
||||
console.error('Missing protocol attributes', p);
|
||||
return '';
|
||||
}
|
||||
return `h=${p.head.substring(0, 10)} v=${p.version} td=${p.difficulty}`;
|
||||
};
|
||||
|
||||
// inserter is a state updater function for the main component, which handles the peers.
|
||||
export const inserter = (sampleLimit: number) => (update: NetworkType, prev: NetworkType) => {
|
||||
// The first message contains the metered peer history.
|
||||
if (update.peers && update.peers.bundles) {
|
||||
prev.peers = update.peers;
|
||||
Object.values(prev.peers.bundles).forEach((bundle) => {
|
||||
if (bundle.knownPeers) {
|
||||
Object.values(bundle.knownPeers).forEach((peer) => {
|
||||
if (!peer.maxIngress) {
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.maxEgress) {
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.name) {
|
||||
peer.name = '';
|
||||
peer.shortName = '';
|
||||
} else if (!peer.shortName) {
|
||||
peer.shortName = shortName(peer.name);
|
||||
}
|
||||
if (!peer.enode) {
|
||||
peer.enode = '';
|
||||
}
|
||||
if (!peer.protocols) {
|
||||
peer.protocols = {};
|
||||
}
|
||||
peer.eth = protocol(peer.protocols.eth);
|
||||
peer.les = protocol(peer.protocols.les);
|
||||
});
|
||||
}
|
||||
bundle.shortLocation = shortLocation(bundle.location);
|
||||
});
|
||||
}
|
||||
if (Array.isArray(update.diff)) {
|
||||
update.diff.forEach((event: PeerEvent) => {
|
||||
if (!event.addr) {
|
||||
console.error('Peer event without TCP address', event);
|
||||
return;
|
||||
}
|
||||
switch (event.remove) {
|
||||
case 'bundle': {
|
||||
delete prev.peers.bundles[event.addr];
|
||||
return;
|
||||
}
|
||||
case 'known': {
|
||||
if (!event.enode) {
|
||||
console.error('Remove known peer event without node URL', event.addr);
|
||||
return;
|
||||
}
|
||||
const bundle = prev.peers.bundles[event.addr];
|
||||
if (!bundle || !bundle.knownPeers || !bundle.knownPeers[event.enode]) {
|
||||
console.error('No known peer to remove', event.addr, event.enode);
|
||||
return;
|
||||
}
|
||||
delete bundle.knownPeers[event.enode];
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (!prev.peers.bundles[event.addr]) {
|
||||
prev.peers.bundles[event.addr] = {
|
||||
location: {
|
||||
country: '',
|
||||
city: '',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
},
|
||||
shortLocation: '',
|
||||
knownPeers: {},
|
||||
attempts: 0,
|
||||
};
|
||||
}
|
||||
const bundle = prev.peers.bundles[event.addr];
|
||||
if (event.location) {
|
||||
bundle.location = event.location;
|
||||
bundle.shortLocation = shortLocation(bundle.location);
|
||||
return;
|
||||
}
|
||||
if (!event.enode) {
|
||||
bundle.attempts++;
|
||||
return;
|
||||
}
|
||||
if (!bundle.knownPeers) {
|
||||
bundle.knownPeers = {};
|
||||
}
|
||||
if (!bundle.knownPeers[event.enode]) {
|
||||
bundle.knownPeers[event.enode] = {
|
||||
connected: [],
|
||||
disconnected: [],
|
||||
ingress: [],
|
||||
egress: [],
|
||||
active: false,
|
||||
name: '',
|
||||
shortName: '',
|
||||
enode: '',
|
||||
protocols: {},
|
||||
eth: '',
|
||||
les: '',
|
||||
};
|
||||
}
|
||||
const peer = bundle.knownPeers[event.enode];
|
||||
if (event.name) {
|
||||
peer.name = event.name;
|
||||
peer.shortName = shortName(event.name);
|
||||
}
|
||||
if (event.enode) {
|
||||
peer.enode = event.enode;
|
||||
}
|
||||
if (event.protocols) {
|
||||
peer.protocols = event.protocols;
|
||||
peer.eth = protocol(peer.protocols.eth);
|
||||
peer.les = protocol(peer.protocols.les);
|
||||
}
|
||||
if (!peer.maxIngress) {
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
if (!peer.maxEgress) {
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
if (event.connected) {
|
||||
if (!peer.connected) {
|
||||
console.warn('peer.connected should exist');
|
||||
peer.connected = [];
|
||||
}
|
||||
peer.connected.push(event.connected);
|
||||
}
|
||||
if (event.disconnected) {
|
||||
if (!peer.disconnected) {
|
||||
console.warn('peer.disconnected should exist');
|
||||
peer.disconnected = [];
|
||||
}
|
||||
peer.disconnected.push(event.disconnected);
|
||||
}
|
||||
switch (event.activity) {
|
||||
case 'active':
|
||||
peer.active = true;
|
||||
break;
|
||||
case 'inactive':
|
||||
peer.active = false;
|
||||
break;
|
||||
}
|
||||
if (Array.isArray(event.ingress) && Array.isArray(event.egress)) {
|
||||
if (event.ingress.length !== event.egress.length) {
|
||||
console.error('Different traffic sample length', event);
|
||||
return;
|
||||
}
|
||||
// Check if there is a new maximum value, and reset the colors in case.
|
||||
let maxIngress = peer.maxIngress;
|
||||
event.ingress.forEach(({value}) => {
|
||||
if (value > maxIngress) {
|
||||
maxIngress = value;
|
||||
}
|
||||
});
|
||||
if (maxIngress > peer.maxIngress) {
|
||||
setMaxIngress(peer, maxIngress);
|
||||
}
|
||||
// Push the new values.
|
||||
peer.ingress.splice(peer.ingress.length, 0, ...event.ingress);
|
||||
const ingressDiff = peer.ingress.length - sampleLimit;
|
||||
if (ingressDiff > 0) {
|
||||
// Check if the maximum value is in the beginning.
|
||||
let i = 0;
|
||||
while (i < ingressDiff && peer.ingress[i].value < peer.maxIngress) {
|
||||
i++;
|
||||
}
|
||||
// Remove the old values from the beginning.
|
||||
peer.ingress.splice(0, ingressDiff);
|
||||
if (i < ingressDiff) {
|
||||
// Reset the colors if the maximum value leaves the chart.
|
||||
setIngressChartAttributes(peer);
|
||||
}
|
||||
}
|
||||
// Check if there is a new maximum value, and reset the colors in case.
|
||||
let maxEgress = peer.maxEgress;
|
||||
event.egress.forEach(({value}) => {
|
||||
if (value > maxEgress) {
|
||||
maxEgress = value;
|
||||
}
|
||||
});
|
||||
if (maxEgress > peer.maxEgress) {
|
||||
setMaxEgress(peer, maxEgress);
|
||||
}
|
||||
// Push the new values.
|
||||
peer.egress.splice(peer.egress.length, 0, ...event.egress);
|
||||
const egressDiff = peer.egress.length - sampleLimit;
|
||||
if (egressDiff > 0) {
|
||||
// Check if the maximum value is in the beginning.
|
||||
let i = 0;
|
||||
while (i < egressDiff && peer.egress[i].value < peer.maxEgress) {
|
||||
i++;
|
||||
}
|
||||
// Remove the old values from the beginning.
|
||||
peer.egress.splice(0, egressDiff);
|
||||
if (i < egressDiff) {
|
||||
// Reset the colors if the maximum value leaves the chart.
|
||||
setEgressChartAttributes(peer);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
prev.activePeerCount = 0;
|
||||
Object.entries(prev.peers.bundles).forEach(([addr, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return;
|
||||
}
|
||||
Object.entries(bundle.knownPeers).forEach(([enode, peer]) => {
|
||||
if (peer.active === true) {
|
||||
prev.activePeerCount++;
|
||||
}
|
||||
});
|
||||
});
|
||||
return prev;
|
||||
};
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
title: {
|
||||
marginLeft: 5,
|
||||
},
|
||||
table: {
|
||||
borderCollapse: 'unset',
|
||||
padding: 5,
|
||||
},
|
||||
tableHead: {
|
||||
height: 'auto',
|
||||
},
|
||||
tableRow: {
|
||||
height: 'auto',
|
||||
},
|
||||
tableCell: {
|
||||
paddingTop: 0,
|
||||
paddingRight: 5,
|
||||
paddingBottom: 0,
|
||||
paddingLeft: 5,
|
||||
border: 'none',
|
||||
fontFamily: 'monospace',
|
||||
fontSize: 10,
|
||||
},
|
||||
content: {
|
||||
height: '800px',
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles = theme => ({
|
||||
title: {
|
||||
color: theme.palette.common.white,
|
||||
},
|
||||
table: {
|
||||
background: theme.palette.grey[900],
|
||||
},
|
||||
});
|
||||
|
||||
// limitedWidthStyle returns a style object which cuts the long text with three dots.
|
||||
const limitedWidthStyle = (width) => {
|
||||
return {
|
||||
textOverflow: 'ellipsis',
|
||||
maxWidth: width,
|
||||
overflow: 'hidden',
|
||||
whiteSpace: 'nowrap',
|
||||
};
|
||||
};
|
||||
|
||||
export type Props = {
|
||||
classes: Object, // injected by withStyles()
|
||||
container: Object,
|
||||
content: NetworkType,
|
||||
shouldUpdate: Object,
|
||||
};
|
||||
|
||||
type State = {};
|
||||
|
||||
// Network renders the network page.
|
||||
class Network extends Component<Props, State> {
|
||||
componentDidMount() {
|
||||
const {container} = this.props;
|
||||
if (typeof container === 'undefined') {
|
||||
return;
|
||||
}
|
||||
container.scrollTop = 0;
|
||||
}
|
||||
|
||||
formatTime = (t: string) => {
|
||||
const time = new Date(t);
|
||||
if (isNaN(time)) {
|
||||
return '';
|
||||
}
|
||||
const month = `0${time.getMonth() + 1}`.slice(-2);
|
||||
const date = `0${time.getDate()}`.slice(-2);
|
||||
const hours = `0${time.getHours()}`.slice(-2);
|
||||
const minutes = `0${time.getMinutes()}`.slice(-2);
|
||||
const seconds = `0${time.getSeconds()}`.slice(-2);
|
||||
return `${month}/${date}/${hours}:${minutes}:${seconds}`;
|
||||
};
|
||||
|
||||
copyToClipboard = (text: string) => (event) => {
|
||||
event.preventDefault();
|
||||
navigator.clipboard.writeText(text).then(() => {}, () => {
|
||||
console.error("Failed to copy", text);
|
||||
});
|
||||
};
|
||||
|
||||
lesList = () => {
|
||||
const list = [];
|
||||
Object.values(this.props.content.peers.bundles).forEach((bundle) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return;
|
||||
}
|
||||
Object.entries(bundle.knownPeers).forEach(([enode, peer]) => {
|
||||
if (peer.les === '' || peer.eth !== '') {
|
||||
return;
|
||||
}
|
||||
list.push({enode, name: peer.name, location: bundle.location, protocols: peer.protocols});
|
||||
});
|
||||
});
|
||||
return list;
|
||||
};
|
||||
|
||||
ethList = () => {
|
||||
const list = [];
|
||||
Object.values(this.props.content.peers.bundles).forEach((bundle) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return;
|
||||
}
|
||||
Object.entries(bundle.knownPeers).forEach(([enode, peer]) => {
|
||||
if (peer.eth === '' && peer.les !== '') {
|
||||
return;
|
||||
}
|
||||
list.push({enode, name: peer.name, location: bundle.location, protocols: peer.protocols});
|
||||
});
|
||||
});
|
||||
return list;
|
||||
};
|
||||
|
||||
attemptList = () => {
|
||||
const list = [];
|
||||
Object.entries(this.props.content.peers.bundles).forEach(([addr, bundle]) => {
|
||||
if (!bundle.attempts) {
|
||||
return;
|
||||
}
|
||||
list.push({addr, location: bundle.location, attempts: bundle.attempts});
|
||||
});
|
||||
return list;
|
||||
};
|
||||
|
||||
knownPeerTableRow = (addr, enode, bundle, peer, showTraffic, proto) => {
|
||||
const ingressValues = peer.ingress.map(({value}) => ({ingress: value || 0.001}));
|
||||
const egressValues = peer.egress.map(({value}) => ({egress: -value || -0.001}));
|
||||
return (
|
||||
<TableRow key={`known_${addr}_${enode}`} style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{peer.active
|
||||
? <FontAwesomeIcon icon={fasCircle} color='green' />
|
||||
: <FontAwesomeIcon icon={farCircle} />
|
||||
}
|
||||
</TableCell>
|
||||
<TableCell
|
||||
style={{
|
||||
cursor: 'copy',
|
||||
...styles.tableCell,
|
||||
...limitedWidthStyle(80),
|
||||
}}
|
||||
onClick={this.copyToClipboard(enode)}
|
||||
>
|
||||
{enode.substring(8)}
|
||||
</TableCell>
|
||||
<TableCell
|
||||
style={{
|
||||
cursor: 'copy',
|
||||
...styles.tableCell,
|
||||
...limitedWidthStyle(80),
|
||||
}}
|
||||
onClick={this.copyToClipboard(peer.name)}
|
||||
>
|
||||
{peer.shortName}
|
||||
</TableCell>
|
||||
<TableCell
|
||||
style={{
|
||||
cursor: 'copy',
|
||||
...styles.tableCell,
|
||||
...limitedWidthStyle(100),
|
||||
}}
|
||||
onClick={this.copyToClipboard(JSON.stringify(bundle.location))}
|
||||
>
|
||||
{bundle.shortLocation}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{showTraffic ? (
|
||||
<>
|
||||
<AreaChart
|
||||
width={trafficChartWidth}
|
||||
height={trafficChartHeight}
|
||||
data={ingressValues}
|
||||
margin={{top: 5, right: 5, bottom: 0, left: 5}}
|
||||
syncId={`peerIngress_${addr}_${enode}`}
|
||||
>
|
||||
<defs>
|
||||
<linearGradient id={`ingressGradient_${addr}_${enode}`} x1='0' y1='1' x2='0' y2='0'>
|
||||
{peer.ingressGradient
|
||||
&& peer.ingressGradient.map(({offset, color}, i) => (
|
||||
<stop
|
||||
key={`ingressStop_${addr}_${enode}_${i}`}
|
||||
offset={`${offset}%`}
|
||||
stopColor={color}
|
||||
/>
|
||||
))}
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<Tooltip cursor={false} content={<CustomTooltip tooltip={bytePlotter('Download')} />} />
|
||||
<YAxis hide scale='sqrt' domain={[0.001, dataMax => Math.max(dataMax, 0)]} />
|
||||
<Area
|
||||
dataKey='ingress'
|
||||
isAnimationActive={false}
|
||||
type='monotone'
|
||||
fill={`url(#ingressGradient_${addr}_${enode})`}
|
||||
stroke={peer.ingressGradient[peer.ingressGradient.length - 1].color}
|
||||
strokeWidth={chartStrokeWidth}
|
||||
/>
|
||||
</AreaChart>
|
||||
<AreaChart
|
||||
width={trafficChartWidth}
|
||||
height={trafficChartHeight}
|
||||
data={egressValues}
|
||||
margin={{top: 0, right: 5, bottom: 5, left: 5}}
|
||||
syncId={`peerIngress_${addr}_${enode}`}
|
||||
>
|
||||
<defs>
|
||||
<linearGradient id={`egressGradient_${addr}_${enode}`} x1='0' y1='1' x2='0' y2='0'>
|
||||
{peer.egressGradient
|
||||
&& peer.egressGradient.map(({offset, color}, i) => (
|
||||
<stop
|
||||
key={`egressStop_${addr}_${enode}_${i}`}
|
||||
offset={`${offset}%`}
|
||||
stopColor={color}
|
||||
/>
|
||||
))}
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<Tooltip cursor={false} content={<CustomTooltip tooltip={bytePlotter('Upload', multiplier(-1))} />} />
|
||||
<YAxis hide scale='sqrt' domain={[dataMin => Math.min(dataMin, 0), -0.001]} />
|
||||
<Area
|
||||
dataKey='egress'
|
||||
isAnimationActive={false}
|
||||
type='monotone'
|
||||
fill={`url(#egressGradient_${addr}_${enode})`}
|
||||
stroke={peer.egressGradient[0].color}
|
||||
strokeWidth={chartStrokeWidth}
|
||||
/>
|
||||
</AreaChart>
|
||||
</>
|
||||
) : null}
|
||||
</TableCell>
|
||||
{typeof proto === 'object' ? (
|
||||
<>
|
||||
<TableCell
|
||||
style={{
|
||||
cursor: 'copy',
|
||||
...styles.tableCell,
|
||||
...limitedWidthStyle(80),
|
||||
}}
|
||||
onClick={this.copyToClipboard(JSON.stringify(proto.head))}
|
||||
>
|
||||
{proto.head}
|
||||
</TableCell>
|
||||
<TableCell
|
||||
style={{
|
||||
cursor: 'copy',
|
||||
...styles.tableCell,
|
||||
}}
|
||||
onClick={this.copyToClipboard(JSON.stringify(proto.difficulty))}
|
||||
>
|
||||
{proto.difficulty}
|
||||
</TableCell>
|
||||
<TableCell
|
||||
style={{
|
||||
cursor: 'copy',
|
||||
...styles.tableCell,
|
||||
}}
|
||||
onClick={this.copyToClipboard(JSON.stringify(proto.version))}
|
||||
>
|
||||
{proto.version}
|
||||
</TableCell>
|
||||
</>
|
||||
) : null }
|
||||
</TableRow>
|
||||
);
|
||||
};
|
||||
|
||||
connectionAttemptTableRow = (addr, bundle) => (
|
||||
<TableRow key={`attempt_${addr}`} style={styles.tableRow}>
|
||||
<TableCell
|
||||
style={{cursor: 'copy', ...styles.tableCell}}
|
||||
onClick={this.copyToClipboard(addr)}
|
||||
>
|
||||
{addr}
|
||||
</TableCell>
|
||||
<TableCell
|
||||
style={{cursor: 'copy', ...limitedWidthStyle(100), ...styles.tableCell}}
|
||||
onClick={this.copyToClipboard(JSON.stringify(bundle.location))}
|
||||
>
|
||||
{bundle.shortLocation}
|
||||
</TableCell>
|
||||
<TableCell style={styles.tableCell}>
|
||||
{bundle.attempts}
|
||||
</TableCell>
|
||||
</TableRow>
|
||||
);
|
||||
|
||||
render() {
|
||||
const {classes} = this.props;
|
||||
return (
|
||||
<Grid container direction='row' spacing={3}>
|
||||
<Grid item style={{width: '40%'}}>
|
||||
<div className={classes.table} style={styles.table}>
|
||||
<Typography variant='subtitle1' gutterBottom className={classes.title} style={styles.title}>
|
||||
Full peers
|
||||
<FontAwesomeIcon
|
||||
icon={farClipboard}
|
||||
onClick={this.copyToClipboard(JSON.stringify(this.ethList()))}
|
||||
style={{float: 'right'}}
|
||||
/>
|
||||
</Typography>
|
||||
<Scrollbars style={styles.content}>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell} />
|
||||
<TableCell style={styles.tableCell}>Node URL</TableCell>
|
||||
<TableCell style={styles.tableCell}>Name</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Traffic</TableCell>
|
||||
<TableCell style={styles.tableCell}>Head</TableCell>
|
||||
<TableCell style={styles.tableCell}>TD</TableCell>
|
||||
<TableCell style={styles.tableCell}>V</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([addr, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([enode, peer]) => {
|
||||
if (peer.active === false) {
|
||||
return null;
|
||||
}
|
||||
if (peer.eth === '' && peer.les !== '') {
|
||||
return null;
|
||||
}
|
||||
return this.knownPeerTableRow(addr, enode, bundle, peer, true, peer.protocols.eth);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([addr, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([enode, peer]) => {
|
||||
if (peer.active === true) {
|
||||
return null;
|
||||
}
|
||||
if (peer.eth === '' && peer.les !== '') {
|
||||
return null;
|
||||
}
|
||||
return this.knownPeerTableRow(addr, enode, bundle, peer, false, peer.protocols.eth);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Scrollbars>
|
||||
</div>
|
||||
</Grid>
|
||||
<Grid item style={{width: '40%'}}>
|
||||
<div className={classes.table} style={styles.table}>
|
||||
<Typography variant='subtitle1' gutterBottom className={classes.title} style={styles.title}>
|
||||
Light peers
|
||||
<FontAwesomeIcon
|
||||
icon={farClipboard}
|
||||
onClick={this.copyToClipboard(JSON.stringify(this.lesList()))}
|
||||
style={{float: 'right'}}
|
||||
/>
|
||||
</Typography>
|
||||
<Scrollbars style={styles.content}>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell} />
|
||||
<TableCell style={styles.tableCell}>Node URL</TableCell>
|
||||
<TableCell style={styles.tableCell}>Name</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Traffic</TableCell>
|
||||
<TableCell style={styles.tableCell}>Head</TableCell>
|
||||
<TableCell style={styles.tableCell}>TD</TableCell>
|
||||
<TableCell style={styles.tableCell}>V</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([addr, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([enode, peer]) => {
|
||||
if (peer.active === false) {
|
||||
return null;
|
||||
}
|
||||
if (peer.les === '' || peer.eth !== '') {
|
||||
return null;
|
||||
}
|
||||
return this.knownPeerTableRow(addr, enode, bundle, peer, true, peer.protocols.les);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([addr, bundle]) => {
|
||||
if (!bundle.knownPeers || Object.keys(bundle.knownPeers).length < 1) {
|
||||
return null;
|
||||
}
|
||||
return Object.entries(bundle.knownPeers).map(([enode, peer]) => {
|
||||
if (peer.active === true) {
|
||||
return null;
|
||||
}
|
||||
if (peer.les === '' || peer.eth !== '') {
|
||||
return null;
|
||||
}
|
||||
return this.knownPeerTableRow(addr, enode, bundle, peer, false, peer.protocols.les);
|
||||
});
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Scrollbars>
|
||||
</div>
|
||||
</Grid>
|
||||
<Grid item xs>
|
||||
<div className={classes.table} style={styles.table}>
|
||||
<Typography variant='subtitle1' gutterBottom className={classes.title} style={styles.title}>
|
||||
Connection attempts
|
||||
<FontAwesomeIcon
|
||||
icon={farClipboard}
|
||||
onClick={this.copyToClipboard(JSON.stringify(this.attemptList()))}
|
||||
style={{float: 'right'}}
|
||||
/>
|
||||
</Typography>
|
||||
<Scrollbars style={styles.content}>
|
||||
<Table>
|
||||
<TableHead style={styles.tableHead}>
|
||||
<TableRow style={styles.tableRow}>
|
||||
<TableCell style={styles.tableCell}>TCP address</TableCell>
|
||||
<TableCell style={styles.tableCell}>Location</TableCell>
|
||||
<TableCell style={styles.tableCell}>Nr</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([addr, bundle]) => {
|
||||
if (!bundle.attempts || bundle.attempts <= attemptSeparator) {
|
||||
return null;
|
||||
}
|
||||
return this.connectionAttemptTableRow(addr, bundle);
|
||||
})}
|
||||
</TableBody>
|
||||
<TableBody>
|
||||
{Object.entries(this.props.content.peers.bundles).map(([addr, bundle]) => {
|
||||
if (!bundle.attempts || bundle.attempts < 1 || bundle.attempts > attemptSeparator) {
|
||||
return null;
|
||||
}
|
||||
return this.connectionAttemptTableRow(addr, bundle);
|
||||
})}
|
||||
</TableBody>
|
||||
</Table>
|
||||
</Scrollbars>
|
||||
</div>
|
||||
</Grid>
|
||||
</Grid>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(Network);
|
@ -1,123 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React, {Component} from 'react';
|
||||
|
||||
import withStyles from '@material-ui/core/styles/withStyles';
|
||||
import List from '@material-ui/core/List';
|
||||
import ListItem from '@material-ui/core/ListItem';
|
||||
import ListItemIcon from '@material-ui/core/ListItemIcon';
|
||||
import ListItemText from '@material-ui/core/ListItemText';
|
||||
import Icon from '@material-ui/core/Icon';
|
||||
import Transition from 'react-transition-group/Transition';
|
||||
import {FontAwesomeIcon} from '@fortawesome/react-fontawesome';
|
||||
|
||||
import {MENU, DURATION} from '../common';
|
||||
|
||||
// styles contains the constant styles of the component.
|
||||
const styles = {
|
||||
menu: {
|
||||
default: {
|
||||
transition: `margin-left ${DURATION}ms`,
|
||||
},
|
||||
transition: {
|
||||
entered: {marginLeft: -200},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// themeStyles returns the styles generated from the theme for the component.
|
||||
const themeStyles = theme => ({
|
||||
list: {
|
||||
background: theme.palette.grey[900],
|
||||
},
|
||||
listItem: {
|
||||
minWidth: theme.spacing(7),
|
||||
color: theme.palette.common.white,
|
||||
},
|
||||
icon: {
|
||||
fontSize: theme.spacing(3),
|
||||
overflow: 'unset',
|
||||
},
|
||||
});
|
||||
|
||||
export type Props = {
|
||||
classes: Object, // injected by withStyles()
|
||||
opened: boolean,
|
||||
changeContent: string => void,
|
||||
};
|
||||
|
||||
type State = {}
|
||||
|
||||
// SideBar renders the sidebar of the dashboard.
|
||||
class SideBar extends Component<Props, State> {
|
||||
shouldComponentUpdate(nextProps: Readonly<Props>, nextState: Readonly<State>, nextContext: any) {
|
||||
return nextProps.opened !== this.props.opened;
|
||||
}
|
||||
|
||||
// clickOn returns a click event handler function for the given menu item.
|
||||
clickOn = menu => (event) => {
|
||||
event.preventDefault();
|
||||
this.props.changeContent(menu);
|
||||
};
|
||||
|
||||
// menuItems returns the menu items corresponding to the sidebar state.
|
||||
menuItems = (transitionState) => {
|
||||
const {classes} = this.props;
|
||||
const children = [];
|
||||
MENU.forEach((menu) => {
|
||||
children.push((
|
||||
<ListItem button key={menu.id} onClick={this.clickOn(menu.id)} className={classes.listItem}>
|
||||
<ListItemIcon>
|
||||
<Icon className={classes.icon}>
|
||||
<FontAwesomeIcon icon={menu.icon} />
|
||||
</Icon>
|
||||
</ListItemIcon>
|
||||
<ListItemText
|
||||
primary={menu.title}
|
||||
style={{
|
||||
...styles.menu.default,
|
||||
...styles.menu.transition[transitionState],
|
||||
padding: 0,
|
||||
}}
|
||||
/>
|
||||
</ListItem>
|
||||
));
|
||||
});
|
||||
return children;
|
||||
};
|
||||
|
||||
// menu renders the list of the menu items.
|
||||
menu = (transitionState: Object) => (
|
||||
<div className={this.props.classes.list}>
|
||||
<List>
|
||||
{this.menuItems(transitionState)}
|
||||
</List>
|
||||
</div>
|
||||
);
|
||||
|
||||
render() {
|
||||
return (
|
||||
<Transition mountOnEnter in={this.props.opened} timeout={{enter: DURATION}}>
|
||||
{this.menu}
|
||||
</Transition>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export default withStyles(themeStyles)(SideBar);
|
@ -1,23 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// fa-only-woff-loader removes the .eot, .ttf, .svg dependencies of the FontAwesome library,
|
||||
// because they produce unused extra blobs.
|
||||
module.exports = content => content
|
||||
.replace(/src.*url(?!.*url.*(\.eot)).*(\.eot)[^;]*;/, '')
|
||||
.replace(/url(?!.*url.*(\.eot)).*(\.eot)[^,]*,/, '')
|
||||
.replace(/url(?!.*url.*(\.ttf)).*(\.ttf)[^,]*,/, '')
|
||||
.replace(/,[^,]*url(?!.*url.*(\.svg)).*(\.svg)[^;]*;/, ';');
|
@ -1,26 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en" style="height: 100%">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
|
||||
<title>Go Ethereum Dashboard</title>
|
||||
<link rel="shortcut icon" type="image/ico" href="https://ethereum.org/favicon.ico" />
|
||||
<style>
|
||||
::-webkit-scrollbar {
|
||||
width: 16px;
|
||||
}
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: #212121;
|
||||
}
|
||||
::-webkit-scrollbar-corner {
|
||||
background: transparent;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body style="height: 100%; margin: 0">
|
||||
<div id="dashboard" style="height: 100%"></div>
|
||||
<script type="text/javascript" src="bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
@ -1,44 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import React from 'react';
|
||||
import {render} from 'react-dom';
|
||||
|
||||
import MuiThemeProvider from '@material-ui/core/styles/MuiThemeProvider';
|
||||
import createMuiTheme from '@material-ui/core/styles/createMuiTheme';
|
||||
|
||||
import Dashboard from './components/Dashboard';
|
||||
|
||||
const theme: Object = createMuiTheme({
|
||||
// typography: {
|
||||
// useNextVariants: true,
|
||||
// },
|
||||
palette: {
|
||||
type: 'dark',
|
||||
},
|
||||
});
|
||||
const dashboard = document.getElementById('dashboard');
|
||||
if (dashboard) {
|
||||
// Renders the whole dashboard.
|
||||
render(
|
||||
<MuiThemeProvider theme={theme}>
|
||||
<Dashboard />
|
||||
</MuiThemeProvider>,
|
||||
dashboard,
|
||||
);
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
{
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@babel/core": "7.4.5",
|
||||
"@babel/plugin-proposal-class-properties": "7.4.4",
|
||||
"@babel/plugin-proposal-function-bind": "7.2.0",
|
||||
"@babel/plugin-transform-flow-strip-types": "7.4.4",
|
||||
"@babel/preset-env": "7.4.5",
|
||||
"@babel/preset-react": "^7.0.0",
|
||||
"@babel/preset-stage-0": "^7.0.0",
|
||||
"@fortawesome/fontawesome-free-regular": "^5.0.13",
|
||||
"@fortawesome/fontawesome-svg-core": "1.2.18",
|
||||
"@fortawesome/free-regular-svg-icons": "5.8.2",
|
||||
"@fortawesome/free-solid-svg-icons": "5.8.2",
|
||||
"@fortawesome/react-fontawesome": "^0.1.4",
|
||||
"@material-ui/core": "4.0.1",
|
||||
"@material-ui/icons": "4.0.1",
|
||||
"babel-eslint": "10.0.1",
|
||||
"babel-loader": "8.0.6",
|
||||
"classnames": "^2.2.6",
|
||||
"color-convert": "^2.0.0",
|
||||
"css-loader": "2.1.1",
|
||||
"escape-html": "^1.0.3",
|
||||
"eslint": "5.16.0",
|
||||
"eslint-config-airbnb": "^17.0.0",
|
||||
"eslint-loader": "2.1.2",
|
||||
"eslint-plugin-flowtype": "3.9.1",
|
||||
"eslint-plugin-import": "2.17.3",
|
||||
"eslint-plugin-jsx-a11y": "6.2.1",
|
||||
"eslint-plugin-node": "9.1.0",
|
||||
"eslint-plugin-promise": "4.1.1",
|
||||
"eslint-plugin-react": "7.13.0",
|
||||
"file-loader": "3.0.1",
|
||||
"flow-bin": "0.98.1",
|
||||
"flow-bin-loader": "^1.0.3",
|
||||
"flow-typed": "2.5.2",
|
||||
"js-beautify": "1.10.0",
|
||||
"path": "^0.12.7",
|
||||
"react": "16.8.6",
|
||||
"react-custom-scrollbars": "^4.2.1",
|
||||
"react-dom": "16.8.6",
|
||||
"react-hot-loader": "4.8.8",
|
||||
"react-scrollbar": "0.5.6",
|
||||
"react-transition-group": "4.0.1",
|
||||
"recharts": "1.6.2",
|
||||
"style-loader": "0.23.1",
|
||||
"terser-webpack-plugin": "1.3.0",
|
||||
"url": "^0.11.0",
|
||||
"url-loader": "1.1.2",
|
||||
"webpack": "4.32.2",
|
||||
"webpack-cli": "3.3.2",
|
||||
"webpack-dashboard": "3.0.7",
|
||||
"webpack-dev-server": "3.4.1",
|
||||
"webpack-merge": "4.2.1"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "webpack --config webpack.config.prod.js",
|
||||
"stats": "webpack --config webpack.config.prod.js --profile --json > stats.json",
|
||||
"dev": "webpack-dev-server --open --config webpack.config.dev.js",
|
||||
"dash": "webpack-dashboard -- yarn dev",
|
||||
"install-flow": "flow-typed install",
|
||||
"flow": "flow status --show-all-errors",
|
||||
"eslint": "eslint **/*"
|
||||
},
|
||||
"sideEffects": false,
|
||||
"license": "LGPL-3.0-or-later"
|
||||
}
|
@ -1,145 +0,0 @@
|
||||
// @flow
|
||||
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
export type Content = {
|
||||
general: General,
|
||||
home: Home,
|
||||
chain: Chain,
|
||||
txpool: TxPool,
|
||||
network: Network,
|
||||
system: System,
|
||||
logs: Logs,
|
||||
};
|
||||
|
||||
export type ChartEntries = Array<ChartEntry>;
|
||||
|
||||
export type ChartEntry = {
|
||||
value: number,
|
||||
};
|
||||
|
||||
export type General = {
|
||||
version: ?string,
|
||||
commit: ?string,
|
||||
genesis: ?string,
|
||||
};
|
||||
|
||||
export type Home = {
|
||||
/* TODO (kurkomisi) */
|
||||
};
|
||||
|
||||
export type Chain = {
|
||||
currentBlock: Block,
|
||||
};
|
||||
|
||||
export type Block = {
|
||||
number: number,
|
||||
timestamp: number,
|
||||
}
|
||||
|
||||
export type TxPool = {
|
||||
/* TODO (kurkomisi) */
|
||||
};
|
||||
|
||||
export type Network = {
|
||||
peers: Peers,
|
||||
diff: Array<PeerEvent>,
|
||||
activePeerCount: number,
|
||||
};
|
||||
|
||||
export type PeerEvent = {
|
||||
name: string,
|
||||
addr: string,
|
||||
enode: string,
|
||||
protocols: {[string]: Object},
|
||||
remove: string,
|
||||
location: GeoLocation,
|
||||
connected: Date,
|
||||
disconnected: Date,
|
||||
ingress: ChartEntries,
|
||||
egress: ChartEntries,
|
||||
activity: string,
|
||||
};
|
||||
|
||||
export type Peers = {
|
||||
bundles: {[string]: PeerBundle},
|
||||
};
|
||||
|
||||
export type PeerBundle = {
|
||||
location: GeoLocation,
|
||||
knownPeers: {[string]: KnownPeer},
|
||||
attempts: number,
|
||||
};
|
||||
|
||||
export type KnownPeer = {
|
||||
connected: Array<Date>,
|
||||
disconnected: Array<Date>,
|
||||
ingress: Array<ChartEntries>,
|
||||
egress: Array<ChartEntries>,
|
||||
name: string,
|
||||
enode: string,
|
||||
protocols: {[string]: Object},
|
||||
active: boolean,
|
||||
};
|
||||
|
||||
export type GeoLocation = {
|
||||
country: string,
|
||||
city: string,
|
||||
latitude: number,
|
||||
longitude: number,
|
||||
};
|
||||
|
||||
export type System = {
|
||||
activeMemory: ChartEntries,
|
||||
virtualMemory: ChartEntries,
|
||||
networkIngress: ChartEntries,
|
||||
networkEgress: ChartEntries,
|
||||
processCPU: ChartEntries,
|
||||
systemCPU: ChartEntries,
|
||||
diskRead: ChartEntries,
|
||||
diskWrite: ChartEntries,
|
||||
};
|
||||
|
||||
export type Record = {
|
||||
t: string,
|
||||
lvl: Object,
|
||||
msg: string,
|
||||
ctx: Array<string>
|
||||
};
|
||||
|
||||
export type Chunk = {
|
||||
content: string,
|
||||
name: string,
|
||||
};
|
||||
|
||||
export type Logs = {
|
||||
chunks: Array<Chunk>,
|
||||
endTop: boolean,
|
||||
endBottom: boolean,
|
||||
topChanged: number,
|
||||
bottomChanged: number,
|
||||
};
|
||||
|
||||
export type LogsMessage = {
|
||||
source: ?LogFile,
|
||||
chunk: Array<Record>,
|
||||
};
|
||||
|
||||
export type LogFile = {
|
||||
name: string,
|
||||
last: string,
|
||||
};
|
@ -1,85 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const path = require('path');
|
||||
|
||||
module.exports = {
|
||||
target: 'web',
|
||||
entry: {
|
||||
bundle: './index',
|
||||
},
|
||||
output: {
|
||||
filename: '[name].js',
|
||||
path: path.resolve(__dirname, ''),
|
||||
sourceMapFilename: '[file].map',
|
||||
},
|
||||
resolve: {
|
||||
modules: [
|
||||
'node_modules',
|
||||
path.resolve(__dirname, 'components'), // import './components/Component' -> import 'Component'
|
||||
],
|
||||
extensions: ['.js', '.jsx'],
|
||||
},
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.jsx$/, // regexp for JSX files
|
||||
exclude: /node_modules/,
|
||||
use: [ // order: from bottom to top
|
||||
{
|
||||
loader: 'babel-loader',
|
||||
options: {
|
||||
presets: [ // order: from bottom to top
|
||||
'@babel/env',
|
||||
'@babel/react',
|
||||
],
|
||||
plugins: [ // order: from top to bottom
|
||||
'@babel/proposal-function-bind', // instead of stage 0
|
||||
'@babel/proposal-class-properties', // static defaultProps
|
||||
'@babel/transform-flow-strip-types',
|
||||
'react-hot-loader/babel',
|
||||
],
|
||||
},
|
||||
},
|
||||
// 'eslint-loader', // show errors in the console
|
||||
],
|
||||
},
|
||||
{
|
||||
test: /\.css$/,
|
||||
oneOf: [
|
||||
{
|
||||
test: /font-awesome/,
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
path.resolve(__dirname, './fa-only-woff-loader.js'),
|
||||
],
|
||||
},
|
||||
{
|
||||
use: [
|
||||
'style-loader',
|
||||
'css-loader',
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
test: /\.woff2?$/, // font-awesome icons
|
||||
use: 'url-loader',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
@ -1,35 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const webpack = require('webpack');
|
||||
const merge = require('webpack-merge');
|
||||
const WebpackDashboard = require('webpack-dashboard/plugin');
|
||||
const common = require('./webpack.config.common.js');
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'development',
|
||||
plugins: [
|
||||
new WebpackDashboard(),
|
||||
new webpack.HotModuleReplacementPlugin(),
|
||||
],
|
||||
// devtool: 'eval',
|
||||
devtool: 'source-map',
|
||||
devServer: {
|
||||
port: 8081,
|
||||
hot: true,
|
||||
compress: true,
|
||||
},
|
||||
});
|
@ -1,41 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
const TerserPlugin = require('terser-webpack-plugin');
|
||||
const merge = require('webpack-merge');
|
||||
const common = require('./webpack.config.common.js');
|
||||
|
||||
module.exports = merge(common, {
|
||||
mode: 'production',
|
||||
devtool: 'source-map',
|
||||
optimization: {
|
||||
minimize: true,
|
||||
namedModules: true, // Module names instead of numbers - resolves the large diff problem.
|
||||
minimizer: [
|
||||
new TerserPlugin({
|
||||
cache: true,
|
||||
parallel: true,
|
||||
sourceMap: true,
|
||||
terserOptions: {
|
||||
output: {
|
||||
comments: false,
|
||||
beautify: true,
|
||||
},
|
||||
},
|
||||
}),
|
||||
],
|
||||
},
|
||||
});
|
File diff suppressed because it is too large
Load Diff
@ -1,77 +0,0 @@
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
type block struct {
|
||||
Number int64 `json:"number,omitempty"`
|
||||
Time uint64 `json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
func (db *Dashboard) collectChainData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
var (
|
||||
currentBlock *block
|
||||
chainCh chan core.ChainHeadEvent
|
||||
chainSub event.Subscription
|
||||
)
|
||||
switch {
|
||||
case db.ethServ != nil:
|
||||
chain := db.ethServ.BlockChain()
|
||||
currentBlock = &block{
|
||||
Number: chain.CurrentHeader().Number.Int64(),
|
||||
Time: chain.CurrentHeader().Time,
|
||||
}
|
||||
chainCh = make(chan core.ChainHeadEvent)
|
||||
chainSub = chain.SubscribeChainHeadEvent(chainCh)
|
||||
case db.lesServ != nil:
|
||||
chain := db.lesServ.BlockChain()
|
||||
currentBlock = &block{
|
||||
Number: chain.CurrentHeader().Number.Int64(),
|
||||
Time: chain.CurrentHeader().Time,
|
||||
}
|
||||
chainCh = make(chan core.ChainHeadEvent)
|
||||
chainSub = chain.SubscribeChainHeadEvent(chainCh)
|
||||
default:
|
||||
errc := <-db.quit
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
defer chainSub.Unsubscribe()
|
||||
|
||||
db.chainLock.Lock()
|
||||
db.history.Chain = &ChainMessage{
|
||||
CurrentBlock: currentBlock,
|
||||
}
|
||||
db.chainLock.Unlock()
|
||||
db.sendToAll(&Message{Chain: &ChainMessage{CurrentBlock: currentBlock}})
|
||||
|
||||
for {
|
||||
select {
|
||||
case e := <-chainCh:
|
||||
currentBlock := &block{
|
||||
Number: e.Block.Number().Int64(),
|
||||
Time: e.Block.Time(),
|
||||
}
|
||||
db.chainLock.Lock()
|
||||
db.history.Chain = &ChainMessage{
|
||||
CurrentBlock: currentBlock,
|
||||
}
|
||||
db.chainLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{Chain: &ChainMessage{CurrentBlock: currentBlock}})
|
||||
case err := <-chainSub.Err():
|
||||
log.Warn("Chain subscription error", "err", err)
|
||||
errc := <-db.quit
|
||||
errc <- nil
|
||||
return
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import "time"
|
||||
|
||||
// DefaultConfig contains default settings for the dashboard.
|
||||
var DefaultConfig = Config{
|
||||
Host: "localhost",
|
||||
Port: 8080,
|
||||
Refresh: 5 * time.Second,
|
||||
}
|
||||
|
||||
// Config contains the configuration parameters of the dashboard.
|
||||
type Config struct {
|
||||
// Host is the host interface on which to start the dashboard server. If this
|
||||
// field is empty, no dashboard will be started.
|
||||
Host string `toml:",omitempty"`
|
||||
|
||||
// Port is the TCP port number on which to start the dashboard server. The
|
||||
// default zero value is/ valid and will pick a port number randomly (useful
|
||||
// for ephemeral nodes).
|
||||
Port int `toml:",omitempty"`
|
||||
|
||||
// Refresh is the refresh rate of the data updates, the chartEntry will be collected this often.
|
||||
Refresh time.Duration `toml:",omitempty"`
|
||||
}
|
@ -1,35 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
)
|
||||
|
||||
// getProcessCPUTime retrieves the process' CPU time since program startup.
|
||||
func getProcessCPUTime() float64 {
|
||||
var usage syscall.Rusage
|
||||
if err := syscall.Getrusage(syscall.RUSAGE_SELF, &usage); err != nil {
|
||||
log.Warn("Failed to retrieve CPU time", "err", err)
|
||||
return 0
|
||||
}
|
||||
return float64(usage.Utime.Sec+usage.Stime.Sec) + float64(usage.Utime.Usec+usage.Stime.Usec)/1000000
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
// getProcessCPUTime returns 0 on Windows as there is no system call to resolve
|
||||
// the actual process' CPU time.
|
||||
func getProcessCPUTime() float64 {
|
||||
return 0
|
||||
}
|
@ -1,312 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
//go:generate yarn --cwd ./assets install
|
||||
//go:generate yarn --cwd ./assets build
|
||||
//go:generate yarn --cwd ./assets js-beautify -f bundle.js.map -r -w 1
|
||||
//go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/index.html assets/bundle.js assets/bundle.js.map
|
||||
//go:generate sh -c "sed 's#var _bundleJs#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate sh -c "sed 's#var _bundleJsMap#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate sh -c "sed 's#var _indexHtml#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
|
||||
//go:generate gofmt -w -s assets.go
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/eth"
|
||||
"github.com/ethereum/go-ethereum/event"
|
||||
"github.com/ethereum/go-ethereum/les"
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
"github.com/ethereum/go-ethereum/params"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
"github.com/mohae/deepcopy"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
sampleLimit = 200 // Maximum number of data samples
|
||||
dataCollectorCount = 4
|
||||
)
|
||||
|
||||
// Dashboard contains the dashboard internals.
|
||||
type Dashboard struct {
|
||||
config *Config // Configuration values for the dashboard
|
||||
|
||||
listener net.Listener // Network listener listening for dashboard clients
|
||||
conns map[uint32]*client // Currently live websocket connections
|
||||
nextConnID uint32 // Next connection id
|
||||
|
||||
history *Message // Stored historical data
|
||||
|
||||
lock sync.Mutex // Lock protecting the dashboard's internals
|
||||
chainLock sync.RWMutex // Lock protecting the stored blockchain data
|
||||
sysLock sync.RWMutex // Lock protecting the stored system data
|
||||
peerLock sync.RWMutex // Lock protecting the stored peer data
|
||||
logLock sync.RWMutex // Lock protecting the stored log data
|
||||
|
||||
geodb *geoDB // geoip database instance for IP to geographical information conversions
|
||||
logdir string // Directory containing the log files
|
||||
|
||||
quit chan chan error // Channel used for graceful exit
|
||||
wg sync.WaitGroup // Wait group used to close the data collector threads
|
||||
|
||||
peerCh chan p2p.MeteredPeerEvent // Peer event channel.
|
||||
subPeer event.Subscription // Peer event subscription.
|
||||
|
||||
ethServ *eth.Ethereum // Ethereum object serving internals.
|
||||
lesServ *les.LightEthereum // LightEthereum object serving internals.
|
||||
}
|
||||
|
||||
// client represents active websocket connection with a remote browser.
|
||||
type client struct {
|
||||
conn *websocket.Conn // Particular live websocket connection
|
||||
msg chan *Message // Message queue for the update messages
|
||||
logger log.Logger // Logger for the particular live websocket connection
|
||||
}
|
||||
|
||||
// New creates a new dashboard instance with the given configuration.
|
||||
func New(config *Config, ethServ *eth.Ethereum, lesServ *les.LightEthereum, commit string, logdir string) *Dashboard {
|
||||
// There is a data race between the network layer and the dashboard, which
|
||||
// can cause some lost peer events, therefore some peers might not appear
|
||||
// on the dashboard.
|
||||
// In order to solve this problem, the peer event subscription is registered
|
||||
// here, before the network layer starts.
|
||||
peerCh := make(chan p2p.MeteredPeerEvent, p2p.MeteredPeerLimit)
|
||||
versionMeta := ""
|
||||
if len(params.VersionMeta) > 0 {
|
||||
versionMeta = fmt.Sprintf(" (%s)", params.VersionMeta)
|
||||
}
|
||||
var genesis common.Hash
|
||||
if ethServ != nil {
|
||||
genesis = ethServ.BlockChain().Genesis().Hash()
|
||||
} else if lesServ != nil {
|
||||
genesis = lesServ.BlockChain().Genesis().Hash()
|
||||
}
|
||||
return &Dashboard{
|
||||
conns: make(map[uint32]*client),
|
||||
config: config,
|
||||
quit: make(chan chan error),
|
||||
history: &Message{
|
||||
General: &GeneralMessage{
|
||||
Commit: commit,
|
||||
Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
|
||||
Genesis: genesis,
|
||||
},
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: emptyChartEntries(sampleLimit),
|
||||
VirtualMemory: emptyChartEntries(sampleLimit),
|
||||
NetworkIngress: emptyChartEntries(sampleLimit),
|
||||
NetworkEgress: emptyChartEntries(sampleLimit),
|
||||
ProcessCPU: emptyChartEntries(sampleLimit),
|
||||
SystemCPU: emptyChartEntries(sampleLimit),
|
||||
DiskRead: emptyChartEntries(sampleLimit),
|
||||
DiskWrite: emptyChartEntries(sampleLimit),
|
||||
},
|
||||
},
|
||||
logdir: logdir,
|
||||
peerCh: peerCh,
|
||||
subPeer: p2p.SubscribeMeteredPeerEvent(peerCh),
|
||||
ethServ: ethServ,
|
||||
lesServ: lesServ,
|
||||
}
|
||||
}
|
||||
|
||||
// emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
|
||||
func emptyChartEntries(limit int) ChartEntries {
|
||||
ce := make(ChartEntries, limit)
|
||||
for i := 0; i < limit; i++ {
|
||||
ce[i] = new(ChartEntry)
|
||||
}
|
||||
return ce
|
||||
}
|
||||
|
||||
// Protocols implements the node.Service interface.
|
||||
func (db *Dashboard) Protocols() []p2p.Protocol { return nil }
|
||||
|
||||
// APIs implements the node.Service interface.
|
||||
func (db *Dashboard) APIs() []rpc.API { return nil }
|
||||
|
||||
// Start starts the data collection thread and the listening server of the dashboard.
|
||||
// Implements the node.Service interface.
|
||||
func (db *Dashboard) Start(server *p2p.Server) error {
|
||||
log.Info("Starting dashboard", "url", fmt.Sprintf("http://%s:%d", db.config.Host, db.config.Port))
|
||||
|
||||
db.wg.Add(dataCollectorCount)
|
||||
go db.collectChainData()
|
||||
go db.collectSystemData()
|
||||
go db.streamLogs()
|
||||
go db.collectPeerData()
|
||||
|
||||
http.HandleFunc("/", db.webHandler)
|
||||
http.Handle("/api", websocket.Handler(db.apiHandler))
|
||||
|
||||
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", db.config.Host, db.config.Port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.listener = listener
|
||||
|
||||
go func() {
|
||||
if err := http.Serve(listener, nil); err != http.ErrServerClosed {
|
||||
log.Warn("Could not accept incoming HTTP connections", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the data collection thread and the connection listener of the dashboard.
|
||||
// Implements the node.Service interface.
|
||||
func (db *Dashboard) Stop() error {
|
||||
// Close the connection listener.
|
||||
var errs []error
|
||||
if err := db.listener.Close(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// Close the collectors.
|
||||
errc := make(chan error, dataCollectorCount)
|
||||
for i := 0; i < dataCollectorCount; i++ {
|
||||
db.quit <- errc
|
||||
if err := <-errc; err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
// Close the connections.
|
||||
db.lock.Lock()
|
||||
for _, c := range db.conns {
|
||||
if err := c.conn.Close(); err != nil {
|
||||
c.logger.Warn("Failed to close connection", "err", err)
|
||||
}
|
||||
}
|
||||
db.lock.Unlock()
|
||||
|
||||
// Wait until every goroutine terminates.
|
||||
db.wg.Wait()
|
||||
log.Info("Dashboard stopped")
|
||||
|
||||
var err error
|
||||
if len(errs) > 0 {
|
||||
err = fmt.Errorf("%v", errs)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// webHandler handles all non-api requests, simply flattening and returning the dashboard website.
|
||||
func (db *Dashboard) webHandler(w http.ResponseWriter, r *http.Request) {
|
||||
log.Debug("Request", "URL", r.URL)
|
||||
|
||||
path := r.URL.String()
|
||||
if path == "/" {
|
||||
path = "/index.html"
|
||||
}
|
||||
blob, err := Asset(path[1:])
|
||||
if err != nil {
|
||||
log.Warn("Failed to load the asset", "path", path, "err", err)
|
||||
http.Error(w, "not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
w.Write(blob)
|
||||
}
|
||||
|
||||
// apiHandler handles requests for the dashboard.
|
||||
func (db *Dashboard) apiHandler(conn *websocket.Conn) {
|
||||
id := atomic.AddUint32(&db.nextConnID, 1)
|
||||
client := &client{
|
||||
conn: conn,
|
||||
msg: make(chan *Message, 128),
|
||||
logger: log.New("id", id),
|
||||
}
|
||||
done := make(chan struct{})
|
||||
|
||||
// Start listening for messages to send.
|
||||
db.wg.Add(1)
|
||||
go func() {
|
||||
defer db.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case msg := <-client.msg:
|
||||
if err := websocket.JSON.Send(client.conn, msg); err != nil {
|
||||
client.logger.Warn("Failed to send the message", "msg", msg, "err", err)
|
||||
client.conn.Close()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Send the past data.
|
||||
db.chainLock.RLock()
|
||||
db.sysLock.RLock()
|
||||
db.peerLock.RLock()
|
||||
db.logLock.RLock()
|
||||
|
||||
h := deepcopy.Copy(db.history).(*Message)
|
||||
|
||||
db.chainLock.RUnlock()
|
||||
db.sysLock.RUnlock()
|
||||
db.peerLock.RUnlock()
|
||||
db.logLock.RUnlock()
|
||||
|
||||
// Start tracking the connection and drop at connection loss.
|
||||
db.lock.Lock()
|
||||
client.msg <- h
|
||||
db.conns[id] = client
|
||||
db.lock.Unlock()
|
||||
defer func() {
|
||||
db.lock.Lock()
|
||||
delete(db.conns, id)
|
||||
db.lock.Unlock()
|
||||
}()
|
||||
for {
|
||||
r := new(Request)
|
||||
if err := websocket.JSON.Receive(conn, r); err != nil {
|
||||
if err != io.EOF {
|
||||
client.logger.Warn("Failed to receive request", "err", err)
|
||||
}
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
if r.Logs != nil {
|
||||
db.handleLogRequest(r.Logs, client)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendToAll sends the given message to the active dashboards.
|
||||
func (db *Dashboard) sendToAll(msg *Message) {
|
||||
db.lock.Lock()
|
||||
for _, c := range db.conns {
|
||||
select {
|
||||
case c.msg <- msg:
|
||||
default:
|
||||
c.conn.Close()
|
||||
}
|
||||
}
|
||||
db.lock.Unlock()
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/apilayer/freegeoip"
|
||||
)
|
||||
|
||||
// geoDBInfo contains all the geographical information we could extract based on an IP
|
||||
// address.
|
||||
type geoDBInfo struct {
|
||||
Country struct {
|
||||
Names struct {
|
||||
English string `maxminddb:"en" json:"en,omitempty"`
|
||||
} `maxminddb:"names" json:"names,omitempty"`
|
||||
} `maxminddb:"country" json:"country,omitempty"`
|
||||
City struct {
|
||||
Names struct {
|
||||
English string `maxminddb:"en" json:"en,omitempty"`
|
||||
} `maxminddb:"names" json:"names,omitempty"`
|
||||
} `maxminddb:"city" json:"city,omitempty"`
|
||||
Location struct {
|
||||
Latitude float64 `maxminddb:"latitude" json:"latitude,omitempty"`
|
||||
Longitude float64 `maxminddb:"longitude" json:"longitude,omitempty"`
|
||||
} `maxminddb:"location" json:"location,omitempty"`
|
||||
}
|
||||
|
||||
// geoLocation contains geographical information.
|
||||
type geoLocation struct {
|
||||
Country string `json:"country,omitempty"`
|
||||
City string `json:"city,omitempty"`
|
||||
Latitude float64 `json:"latitude,omitempty"`
|
||||
Longitude float64 `json:"longitude,omitempty"`
|
||||
}
|
||||
|
||||
// geoDB represents a geoip database that can be queried for IP to geographical
|
||||
// information conversions.
|
||||
type geoDB struct {
|
||||
geodb *freegeoip.DB
|
||||
}
|
||||
|
||||
// Open creates a new geoip database with an up-to-date database from the internet.
|
||||
func openGeoDB() (*geoDB, error) {
|
||||
// Initiate a geoip database to cross reference locations
|
||||
db, err := freegeoip.OpenURL(freegeoip.MaxMindDB, 24*time.Hour, time.Hour)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Wait until the database is updated to the latest data
|
||||
select {
|
||||
case <-db.NotifyOpen():
|
||||
case err := <-db.NotifyError():
|
||||
return nil, err
|
||||
}
|
||||
// Assemble and return our custom wrapper
|
||||
return &geoDB{geodb: db}, nil
|
||||
}
|
||||
|
||||
// Close terminates the database background updater.
|
||||
func (db *geoDB) close() error {
|
||||
db.geodb.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup converts an IP address to a geographical location.
|
||||
func (db *geoDB) lookup(ip net.IP) *geoDBInfo {
|
||||
result := new(geoDBInfo)
|
||||
db.geodb.Lookup(ip, result)
|
||||
return result
|
||||
}
|
||||
|
||||
// Location retrieves the geographical location of the given IP address.
|
||||
func (db *geoDB) location(ip string) *geoLocation {
|
||||
location := db.lookup(net.ParseIP(ip))
|
||||
return &geoLocation{
|
||||
Country: location.Country.Names.English,
|
||||
City: location.City.Names.English,
|
||||
Latitude: location.Location.Latitude,
|
||||
Longitude: location.Location.Longitude,
|
||||
}
|
||||
}
|
288
dashboard/log.go
288
dashboard/log.go
@ -1,288 +0,0 @@
|
||||
// Copyright 2018 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/mohae/deepcopy"
|
||||
"github.com/rjeczalik/notify"
|
||||
)
|
||||
|
||||
var emptyChunk = json.RawMessage("[]")
|
||||
|
||||
// prepLogs creates a JSON array from the given log record buffer.
|
||||
// Returns the prepared array and the position of the last '\n'
|
||||
// character in the original buffer, or -1 if it doesn't contain any.
|
||||
func prepLogs(buf []byte) (json.RawMessage, int) {
|
||||
b := make(json.RawMessage, 1, len(buf)+1)
|
||||
b[0] = '['
|
||||
b = append(b, buf...)
|
||||
last := -1
|
||||
for i := 1; i < len(b); i++ {
|
||||
if b[i] == '\n' {
|
||||
b[i] = ','
|
||||
last = i
|
||||
}
|
||||
}
|
||||
if last < 0 {
|
||||
return emptyChunk, -1
|
||||
}
|
||||
b[last] = ']'
|
||||
return b[:last+1], last - 1
|
||||
}
|
||||
|
||||
// handleLogRequest searches for the log file specified by the timestamp of the
|
||||
// request, creates a JSON array out of it and sends it to the requesting client.
|
||||
func (db *Dashboard) handleLogRequest(r *LogsRequest, c *client) {
|
||||
files, err := ioutil.ReadDir(db.logdir)
|
||||
if err != nil {
|
||||
log.Warn("Failed to open logdir", "path", db.logdir, "err", err)
|
||||
return
|
||||
}
|
||||
re := regexp.MustCompile(`\.log$`)
|
||||
fileNames := make([]string, 0, len(files))
|
||||
for _, f := range files {
|
||||
if f.Mode().IsRegular() && re.MatchString(f.Name()) {
|
||||
fileNames = append(fileNames, f.Name())
|
||||
}
|
||||
}
|
||||
if len(fileNames) < 1 {
|
||||
log.Warn("No log files in logdir", "path", db.logdir)
|
||||
return
|
||||
}
|
||||
idx := sort.Search(len(fileNames), func(idx int) bool {
|
||||
// Returns the smallest index such as fileNames[idx] >= r.Name,
|
||||
// if there is no such index, returns n.
|
||||
return fileNames[idx] >= r.Name
|
||||
})
|
||||
|
||||
switch {
|
||||
case idx < 0:
|
||||
return
|
||||
case idx == 0 && r.Past:
|
||||
return
|
||||
case idx >= len(fileNames):
|
||||
return
|
||||
case r.Past:
|
||||
idx--
|
||||
case idx == len(fileNames)-1 && fileNames[idx] == r.Name:
|
||||
return
|
||||
case idx == len(fileNames)-1 || (idx == len(fileNames)-2 && fileNames[idx] == r.Name):
|
||||
// The last file is continuously updated, and its chunks are streamed,
|
||||
// so in order to avoid log record duplication on the client side, it is
|
||||
// handled differently. Its actual content is always saved in the history.
|
||||
db.logLock.RLock()
|
||||
if db.history.Logs != nil {
|
||||
c.msg <- &Message{
|
||||
Logs: deepcopy.Copy(db.history.Logs).(*LogsMessage),
|
||||
}
|
||||
}
|
||||
db.logLock.RUnlock()
|
||||
return
|
||||
case fileNames[idx] == r.Name:
|
||||
idx++
|
||||
}
|
||||
|
||||
path := filepath.Join(db.logdir, fileNames[idx])
|
||||
var buf []byte
|
||||
if buf, err = ioutil.ReadFile(path); err != nil {
|
||||
log.Warn("Failed to read file", "path", path, "err", err)
|
||||
return
|
||||
}
|
||||
chunk, end := prepLogs(buf)
|
||||
if end < 0 {
|
||||
log.Warn("The file doesn't contain valid logs", "path", path)
|
||||
return
|
||||
}
|
||||
c.msg <- &Message{
|
||||
Logs: &LogsMessage{
|
||||
Source: &LogFile{
|
||||
Name: fileNames[idx],
|
||||
Last: r.Past && idx == 0,
|
||||
},
|
||||
Chunk: chunk,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// streamLogs watches the file system, and when the logger writes
|
||||
// the new log records into the files, picks them up, then makes
|
||||
// JSON array out of them and sends them to the clients.
|
||||
func (db *Dashboard) streamLogs() {
|
||||
defer db.wg.Done()
|
||||
var (
|
||||
err error
|
||||
errc chan error
|
||||
)
|
||||
defer func() {
|
||||
if errc == nil {
|
||||
errc = <-db.quit
|
||||
}
|
||||
errc <- err
|
||||
}()
|
||||
|
||||
files, err := ioutil.ReadDir(db.logdir)
|
||||
if err != nil {
|
||||
log.Warn("Failed to open logdir", "path", db.logdir, "err", err)
|
||||
return
|
||||
}
|
||||
var (
|
||||
opened *os.File // File descriptor for the opened active log file.
|
||||
buf []byte // Contains the recently written log chunks, which are not sent to the clients yet.
|
||||
)
|
||||
|
||||
// The log records are always written into the last file in alphabetical order, because of the timestamp.
|
||||
re := regexp.MustCompile(`\.log$`)
|
||||
i := len(files) - 1
|
||||
for i >= 0 && (!files[i].Mode().IsRegular() || !re.MatchString(files[i].Name())) {
|
||||
i--
|
||||
}
|
||||
if i < 0 {
|
||||
log.Warn("No log files in logdir", "path", db.logdir)
|
||||
return
|
||||
}
|
||||
if opened, err = os.OpenFile(filepath.Join(db.logdir, files[i].Name()), os.O_RDONLY, 0600); err != nil {
|
||||
log.Warn("Failed to open file", "name", files[i].Name(), "err", err)
|
||||
return
|
||||
}
|
||||
defer opened.Close() // Close the lastly opened file.
|
||||
fi, err := opened.Stat()
|
||||
if err != nil {
|
||||
log.Warn("Problem with file", "name", opened.Name(), "err", err)
|
||||
return
|
||||
}
|
||||
db.logLock.Lock()
|
||||
db.history.Logs = &LogsMessage{
|
||||
Source: &LogFile{
|
||||
Name: fi.Name(),
|
||||
Last: true,
|
||||
},
|
||||
Chunk: emptyChunk,
|
||||
}
|
||||
db.logLock.Unlock()
|
||||
|
||||
watcher := make(chan notify.EventInfo, 10)
|
||||
if err := notify.Watch(db.logdir, watcher, notify.Create); err != nil {
|
||||
log.Warn("Failed to create file system watcher", "err", err)
|
||||
return
|
||||
}
|
||||
defer notify.Stop(watcher)
|
||||
|
||||
ticker := time.NewTicker(db.config.Refresh)
|
||||
defer ticker.Stop()
|
||||
|
||||
loop:
|
||||
for err == nil || errc == nil {
|
||||
select {
|
||||
case event := <-watcher:
|
||||
// Make sure that new log file was created.
|
||||
if !re.Match([]byte(event.Path())) {
|
||||
break
|
||||
}
|
||||
if opened == nil {
|
||||
log.Warn("The last log file is not opened")
|
||||
break loop
|
||||
}
|
||||
// The new log file's name is always greater,
|
||||
// because it is created using the actual log record's time.
|
||||
if opened.Name() >= event.Path() {
|
||||
break
|
||||
}
|
||||
// Read the rest of the previously opened file.
|
||||
chunk, err := ioutil.ReadAll(opened)
|
||||
if err != nil {
|
||||
log.Warn("Failed to read file", "name", opened.Name(), "err", err)
|
||||
break loop
|
||||
}
|
||||
buf = append(buf, chunk...)
|
||||
opened.Close()
|
||||
|
||||
if chunk, last := prepLogs(buf); last >= 0 {
|
||||
// Send the rest of the previously opened file.
|
||||
db.sendToAll(&Message{
|
||||
Logs: &LogsMessage{
|
||||
Chunk: chunk,
|
||||
},
|
||||
})
|
||||
}
|
||||
if opened, err = os.OpenFile(event.Path(), os.O_RDONLY, 0644); err != nil {
|
||||
log.Warn("Failed to open file", "name", event.Path(), "err", err)
|
||||
break loop
|
||||
}
|
||||
buf = buf[:0]
|
||||
|
||||
// Change the last file in the history.
|
||||
fi, err := opened.Stat()
|
||||
if err != nil {
|
||||
log.Warn("Problem with file", "name", opened.Name(), "err", err)
|
||||
break loop
|
||||
}
|
||||
db.logLock.Lock()
|
||||
db.history.Logs.Source.Name = fi.Name()
|
||||
db.history.Logs.Chunk = emptyChunk
|
||||
db.logLock.Unlock()
|
||||
case <-ticker.C: // Send log updates to the client.
|
||||
if opened == nil {
|
||||
log.Warn("The last log file is not opened")
|
||||
break loop
|
||||
}
|
||||
// Read the new logs created since the last read.
|
||||
chunk, err := ioutil.ReadAll(opened)
|
||||
if err != nil {
|
||||
log.Warn("Failed to read file", "name", opened.Name(), "err", err)
|
||||
break loop
|
||||
}
|
||||
b := append(buf, chunk...)
|
||||
|
||||
chunk, last := prepLogs(b)
|
||||
if last < 0 {
|
||||
break
|
||||
}
|
||||
// Only keep the invalid part of the buffer, which can be valid after the next read.
|
||||
buf = b[last+1:]
|
||||
|
||||
var l *LogsMessage
|
||||
// Update the history.
|
||||
db.logLock.Lock()
|
||||
if bytes.Equal(db.history.Logs.Chunk, emptyChunk) {
|
||||
db.history.Logs.Chunk = chunk
|
||||
l = deepcopy.Copy(db.history.Logs).(*LogsMessage)
|
||||
} else {
|
||||
b = make([]byte, len(db.history.Logs.Chunk)+len(chunk)-1)
|
||||
copy(b, db.history.Logs.Chunk)
|
||||
b[len(db.history.Logs.Chunk)-1] = ','
|
||||
copy(b[len(db.history.Logs.Chunk):], chunk[1:])
|
||||
db.history.Logs.Chunk = b
|
||||
l = &LogsMessage{Chunk: chunk}
|
||||
}
|
||||
db.logLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{Logs: l})
|
||||
case errc = <-db.quit:
|
||||
break loop
|
||||
}
|
||||
}
|
||||
}
|
@ -1,99 +0,0 @@
|
||||
// Copyright 2017 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
type Message struct {
|
||||
General *GeneralMessage `json:"general,omitempty"`
|
||||
Home *HomeMessage `json:"home,omitempty"`
|
||||
Chain *ChainMessage `json:"chain,omitempty"`
|
||||
TxPool *TxPoolMessage `json:"txpool,omitempty"`
|
||||
Network *NetworkMessage `json:"network,omitempty"`
|
||||
System *SystemMessage `json:"system,omitempty"`
|
||||
Logs *LogsMessage `json:"logs,omitempty"`
|
||||
}
|
||||
|
||||
type ChartEntries []*ChartEntry
|
||||
|
||||
type ChartEntry struct {
|
||||
Value float64 `json:"value"`
|
||||
}
|
||||
|
||||
type GeneralMessage struct {
|
||||
Version string `json:"version,omitempty"`
|
||||
Commit string `json:"commit,omitempty"`
|
||||
Genesis common.Hash `json:"genesis,omitempty"`
|
||||
}
|
||||
|
||||
type HomeMessage struct {
|
||||
/* TODO (kurkomisi) */
|
||||
}
|
||||
|
||||
type ChainMessage struct {
|
||||
CurrentBlock *block `json:"currentBlock,omitempty"`
|
||||
}
|
||||
|
||||
type TxPoolMessage struct {
|
||||
/* TODO (kurkomisi) */
|
||||
}
|
||||
|
||||
// NetworkMessage contains information about the peers
|
||||
// organized based on their IP address and node ID.
|
||||
type NetworkMessage struct {
|
||||
Peers *peerContainer `json:"peers,omitempty"` // Peer tree.
|
||||
Diff []*peerEvent `json:"diff,omitempty"` // Events that change the peer tree.
|
||||
}
|
||||
|
||||
// SystemMessage contains the metered system data samples.
|
||||
type SystemMessage struct {
|
||||
ActiveMemory ChartEntries `json:"activeMemory,omitempty"`
|
||||
VirtualMemory ChartEntries `json:"virtualMemory,omitempty"`
|
||||
NetworkIngress ChartEntries `json:"networkIngress,omitempty"`
|
||||
NetworkEgress ChartEntries `json:"networkEgress,omitempty"`
|
||||
ProcessCPU ChartEntries `json:"processCPU,omitempty"`
|
||||
SystemCPU ChartEntries `json:"systemCPU,omitempty"`
|
||||
DiskRead ChartEntries `json:"diskRead,omitempty"`
|
||||
DiskWrite ChartEntries `json:"diskWrite,omitempty"`
|
||||
}
|
||||
|
||||
// LogsMessage wraps up a log chunk. If 'Source' isn't present, the chunk is a stream chunk.
|
||||
type LogsMessage struct {
|
||||
Source *LogFile `json:"source,omitempty"` // Attributes of the log file.
|
||||
Chunk json.RawMessage `json:"chunk"` // Contains log records.
|
||||
}
|
||||
|
||||
// LogFile contains the attributes of a log file.
|
||||
type LogFile struct {
|
||||
Name string `json:"name"` // The name of the file.
|
||||
Last bool `json:"last"` // Denotes if the actual log file is the last one in the directory.
|
||||
}
|
||||
|
||||
// Request represents the client request.
|
||||
type Request struct {
|
||||
Logs *LogsRequest `json:"logs,omitempty"`
|
||||
}
|
||||
|
||||
// LogsRequest contains the attributes of the log file the client wants to receive.
|
||||
type LogsRequest struct {
|
||||
Name string `json:"name"` // The request handler searches for log file based on this file name.
|
||||
Past bool `json:"past"` // Denotes whether the client wants the previous or the next file.
|
||||
}
|
@ -1,530 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
|
||||
"github.com/ethereum/go-ethereum/log"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
const (
|
||||
knownPeerLimit = 100 // Maximum number of stored peers, which successfully made the handshake.
|
||||
|
||||
// eventLimit is the maximum number of the dashboard's custom peer events,
|
||||
// that are collected between two metering period and sent to the clients
|
||||
// as one message.
|
||||
// TODO (kurkomisi): Limit the number of events.
|
||||
eventLimit = knownPeerLimit << 2
|
||||
)
|
||||
|
||||
// peerContainer contains information about the node's peers. This data structure
|
||||
// maintains the metered peer data based on the different behaviours of the peers.
|
||||
//
|
||||
// Every peer has an IP address, and the peers that manage to make the handshake
|
||||
// (known peers) have node IDs too. There can appear more peers with the same IP,
|
||||
// therefore the peer container data structure is a tree consisting of a map of
|
||||
// maps, where the first key groups the peers by IP, while the second one groups
|
||||
// them by the node ID. The known peers can be active if their connection is still
|
||||
// open, or inactive otherwise. The peers failing before the handshake (unknown
|
||||
// peers) only have IP addresses, so their connection attempts are stored as part
|
||||
// of the value of the outer map.
|
||||
//
|
||||
// Another criteria is to limit the number of metered peers so that
|
||||
// they don't fill the memory. The selection order is based on the
|
||||
// peers activity: the peers that are inactive for the longest time
|
||||
// are thrown first. For the selection a fifo list is used which is
|
||||
// linked to the bottom of the peer tree in a way that every activity
|
||||
// of the peer pushes the peer to the end of the list, so the inactive
|
||||
// ones come to the front. When a peer has some activity, it is removed
|
||||
// from and reinserted into the list. When the length of the list reaches
|
||||
// the limit, the first element is removed from the list, as well as from
|
||||
// the tree.
|
||||
//
|
||||
// The active peers have priority over the inactive ones, therefore
|
||||
// they have their own list. The separation makes it sure that the
|
||||
// inactive peers are always removed before the active ones.
|
||||
//
|
||||
// The peers that don't manage to make handshake are not inserted into the list,
|
||||
// only their connection attempts are appended to the array belonging to their IP.
|
||||
// In order to keep the fifo principle, a super array contains the order of the
|
||||
// attempts, and when the overall count reaches the limit, the earliest attempt is
|
||||
// removed from the beginning of its array.
|
||||
//
|
||||
// This data structure makes it possible to marshal the peer
|
||||
// history simply by passing it to the JSON marshaler.
|
||||
type peerContainer struct {
|
||||
// Bundles is the outer map using the peer's IP address as key.
|
||||
Bundles map[string]*peerBundle `json:"bundles,omitempty"`
|
||||
|
||||
activeCount int // Number of the still connected peers
|
||||
|
||||
// inactivePeers contains the peers with closed connection in chronological order.
|
||||
inactivePeers *list.List
|
||||
|
||||
// geodb is the geoip database used to retrieve the peers' geographical location.
|
||||
geodb *geoDB
|
||||
}
|
||||
|
||||
// newPeerContainer returns a new instance of the peer container.
|
||||
func newPeerContainer(geodb *geoDB) *peerContainer {
|
||||
return &peerContainer{
|
||||
Bundles: make(map[string]*peerBundle),
|
||||
inactivePeers: list.New(),
|
||||
geodb: geodb,
|
||||
}
|
||||
}
|
||||
|
||||
// bundle inserts a new peer bundle into the map, if the peer belonging
|
||||
// to the given IP wasn't metered so far. In this case retrieves the location of
|
||||
// the IP address from the database and creates a corresponding peer event.
|
||||
// Returns the bundle belonging to the given IP and the events occurring during
|
||||
// the initialization.
|
||||
func (pc *peerContainer) bundle(addr string) (*peerBundle, []*peerEvent) {
|
||||
var events []*peerEvent
|
||||
if _, ok := pc.Bundles[addr]; !ok {
|
||||
i := strings.IndexByte(addr, ':')
|
||||
if i < 0 {
|
||||
i = len(addr)
|
||||
}
|
||||
location := pc.geodb.location(addr[:i])
|
||||
events = append(events, &peerEvent{
|
||||
Addr: addr,
|
||||
Location: location,
|
||||
})
|
||||
pc.Bundles[addr] = &peerBundle{
|
||||
Location: location,
|
||||
KnownPeers: make(map[string]*knownPeer),
|
||||
}
|
||||
}
|
||||
return pc.Bundles[addr], events
|
||||
}
|
||||
|
||||
// extendKnown handles the events of the successfully connected peers.
|
||||
// Returns the events occurring during the extension.
|
||||
func (pc *peerContainer) extendKnown(event *peerEvent) []*peerEvent {
|
||||
bundle, events := pc.bundle(event.Addr)
|
||||
peer, peerEvents := bundle.knownPeer(event.Addr, event.Enode)
|
||||
events = append(events, peerEvents...)
|
||||
// Append the connect and the disconnect events to
|
||||
// the corresponding arrays keeping the limit.
|
||||
switch {
|
||||
case event.Connected != nil: // Handshake succeeded
|
||||
peer.Connected = append(peer.Connected, event.Connected)
|
||||
if first := len(peer.Connected) - sampleLimit; first > 0 {
|
||||
peer.Connected = peer.Connected[first:]
|
||||
}
|
||||
if event.peer == nil {
|
||||
log.Warn("Peer handshake succeeded event without peer instance", "addr", event.Addr, "enode", event.Enode)
|
||||
}
|
||||
peer.peer = event.peer
|
||||
info := event.peer.Info()
|
||||
peer.Name = info.Name
|
||||
peer.Protocols = info.Protocols
|
||||
peer.Active = true
|
||||
e := &peerEvent{
|
||||
Activity: Active,
|
||||
Name: info.Name,
|
||||
Addr: peer.addr,
|
||||
Enode: peer.enode,
|
||||
Protocols: peer.Protocols,
|
||||
}
|
||||
events = append(events, e)
|
||||
pc.activeCount++
|
||||
if peer.listElement != nil {
|
||||
_ = pc.inactivePeers.Remove(peer.listElement)
|
||||
peer.listElement = nil
|
||||
}
|
||||
case event.Disconnected != nil: // Peer disconnected
|
||||
peer.Disconnected = append(peer.Disconnected, event.Disconnected)
|
||||
if first := len(peer.Disconnected) - sampleLimit; first > 0 {
|
||||
peer.Disconnected = peer.Disconnected[first:]
|
||||
}
|
||||
peer.Active = false
|
||||
events = append(events, &peerEvent{
|
||||
Activity: Inactive,
|
||||
Addr: peer.addr,
|
||||
Enode: peer.enode,
|
||||
})
|
||||
pc.activeCount--
|
||||
if peer.listElement != nil {
|
||||
// If the peer is already in the list, remove and reinsert it.
|
||||
_ = pc.inactivePeers.Remove(peer.listElement)
|
||||
}
|
||||
// Insert the peer into the list.
|
||||
peer.listElement = pc.inactivePeers.PushBack(peer)
|
||||
default:
|
||||
log.Warn("Unexpected known peer event", "event", *event)
|
||||
}
|
||||
for pc.inactivePeers.Len() > 0 && pc.activeCount+pc.inactivePeers.Len() > knownPeerLimit {
|
||||
// While the count of the known peers is greater than the limit,
|
||||
// remove the first element from the inactive peer list and from the map.
|
||||
if removedPeer, ok := pc.inactivePeers.Remove(pc.inactivePeers.Front()).(*knownPeer); ok {
|
||||
events = append(events, pc.removeKnown(removedPeer.addr, removedPeer.enode)...)
|
||||
} else {
|
||||
log.Warn("Failed to parse the removed peer")
|
||||
}
|
||||
}
|
||||
if pc.activeCount > knownPeerLimit {
|
||||
log.Warn("Number of active peers is greater than the limit")
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// peerBundle contains the peers belonging to a given IP address.
|
||||
type peerBundle struct {
|
||||
// Location contains the geographical location based on the bundle's IP address.
|
||||
Location *geoLocation `json:"location,omitempty"`
|
||||
|
||||
// KnownPeers is the inner map of the metered peer
|
||||
// maintainer data structure using the node ID as key.
|
||||
KnownPeers map[string]*knownPeer `json:"knownPeers,omitempty"`
|
||||
|
||||
// Attempts contains the count of the failed connection
|
||||
// attempts of the peers belonging to a given IP address.
|
||||
Attempts uint `json:"attempts,omitempty"`
|
||||
}
|
||||
|
||||
// removeKnown removes the known peer belonging to the
|
||||
// given IP address and node ID from the peer tree.
|
||||
func (pc *peerContainer) removeKnown(addr, enode string) (events []*peerEvent) {
|
||||
// TODO (kurkomisi): Remove peers that don't have traffic samples anymore.
|
||||
if bundle, ok := pc.Bundles[addr]; ok {
|
||||
if _, ok := bundle.KnownPeers[enode]; ok {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveKnown,
|
||||
Addr: addr,
|
||||
Enode: enode,
|
||||
})
|
||||
delete(bundle.KnownPeers, enode)
|
||||
} else {
|
||||
log.Warn("No peer to remove", "addr", addr, "enode", enode)
|
||||
}
|
||||
if len(bundle.KnownPeers) < 1 && bundle.Attempts < 1 {
|
||||
events = append(events, &peerEvent{
|
||||
Remove: RemoveBundle,
|
||||
Addr: addr,
|
||||
})
|
||||
delete(pc.Bundles, addr)
|
||||
}
|
||||
} else {
|
||||
log.Warn("No bundle to remove", "addr", addr)
|
||||
}
|
||||
return events
|
||||
}
|
||||
|
||||
// knownPeer inserts a new peer into the map, if the peer belonging
|
||||
// to the given IP address and node ID wasn't metered so far. Returns the peer
|
||||
// belonging to the given IP and ID as well as the events occurring during the
|
||||
// initialization.
|
||||
func (bundle *peerBundle) knownPeer(addr, enode string) (*knownPeer, []*peerEvent) {
|
||||
var events []*peerEvent
|
||||
if _, ok := bundle.KnownPeers[enode]; !ok {
|
||||
ingress := emptyChartEntries(sampleLimit)
|
||||
egress := emptyChartEntries(sampleLimit)
|
||||
events = append(events, &peerEvent{
|
||||
Addr: addr,
|
||||
Enode: enode,
|
||||
Ingress: append([]*ChartEntry{}, ingress...),
|
||||
Egress: append([]*ChartEntry{}, egress...),
|
||||
})
|
||||
bundle.KnownPeers[enode] = &knownPeer{
|
||||
addr: addr,
|
||||
enode: enode,
|
||||
Ingress: ingress,
|
||||
Egress: egress,
|
||||
}
|
||||
}
|
||||
return bundle.KnownPeers[enode], events
|
||||
}
|
||||
|
||||
// knownPeer contains the metered data of a particular peer.
|
||||
type knownPeer struct {
|
||||
// Connected contains the timestamps of the peer's connection events.
|
||||
Connected []*time.Time `json:"connected,omitempty"`
|
||||
|
||||
// Disconnected contains the timestamps of the peer's disconnection events.
|
||||
Disconnected []*time.Time `json:"disconnected,omitempty"`
|
||||
|
||||
// Ingress and Egress contain the peer's traffic samples, which are collected
|
||||
// periodically from the metrics registry.
|
||||
//
|
||||
// A peer can connect multiple times, and we want to visualize the time
|
||||
// passed between two connections, so after the first connection a 0 value
|
||||
// is appended to the traffic arrays even if the peer is inactive until the
|
||||
// peer is removed.
|
||||
Ingress ChartEntries `json:"ingress,omitempty"`
|
||||
Egress ChartEntries `json:"egress,omitempty"`
|
||||
|
||||
Name string `json:"name,omitempty"` // Name of the node, including client type, version, OS, custom data
|
||||
Enode string `json:"enode,omitempty"` // Node URL
|
||||
Protocols map[string]interface{} `json:"protocols,omitempty"` // Sub-protocol specific metadata fields
|
||||
|
||||
Active bool `json:"active"` // Denotes if the peer is still connected.
|
||||
|
||||
listElement *list.Element // Pointer to the peer element in the list.
|
||||
addr, enode string // The IP and the ID by which the peer can be accessed in the tree.
|
||||
prevIngress float64
|
||||
prevEgress float64
|
||||
|
||||
peer *p2p.Peer // Connected remote node instance
|
||||
}
|
||||
|
||||
type RemovedPeerType string
|
||||
type ActivityType string
|
||||
|
||||
const (
|
||||
RemoveKnown RemovedPeerType = "known"
|
||||
RemoveBundle RemovedPeerType = "bundle"
|
||||
|
||||
Active ActivityType = "active"
|
||||
Inactive ActivityType = "inactive"
|
||||
)
|
||||
|
||||
// peerEvent contains the attributes of a peer event.
|
||||
type peerEvent struct {
|
||||
Name string `json:"name,omitempty"` // Name of the node, including client type, version, OS, custom data
|
||||
Addr string `json:"addr,omitempty"` // TCP address of the peer.
|
||||
Enode string `json:"enode,omitempty"` // Node URL
|
||||
Protocols map[string]interface{} `json:"protocols,omitempty"` // Sub-protocol specific metadata fields
|
||||
Remove RemovedPeerType `json:"remove,omitempty"` // Type of the peer that is to be removed.
|
||||
Location *geoLocation `json:"location,omitempty"` // Geographical location of the peer.
|
||||
Connected *time.Time `json:"connected,omitempty"` // Timestamp of the connection moment.
|
||||
Disconnected *time.Time `json:"disconnected,omitempty"` // Timestamp of the disonnection moment.
|
||||
Ingress ChartEntries `json:"ingress,omitempty"` // Ingress samples.
|
||||
Egress ChartEntries `json:"egress,omitempty"` // Egress samples.
|
||||
Activity ActivityType `json:"activity,omitempty"` // Connection status change.
|
||||
|
||||
peer *p2p.Peer // Connected remote node instance.
|
||||
}
|
||||
|
||||
// trafficMap is a container for the periodically collected peer traffic.
|
||||
type trafficMap map[string]map[string]float64
|
||||
|
||||
// insert inserts a new value to the traffic map. Overwrites
|
||||
// the value at the given ip and id if that already exists.
|
||||
func (m *trafficMap) insert(ip, id string, val float64) {
|
||||
if _, ok := (*m)[ip]; !ok {
|
||||
(*m)[ip] = make(map[string]float64)
|
||||
}
|
||||
(*m)[ip][id] = val
|
||||
}
|
||||
|
||||
// collectPeerData gathers data about the peers and sends it to the clients.
|
||||
func (db *Dashboard) collectPeerData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
// Open the geodb database for IP to geographical information conversions.
|
||||
var err error
|
||||
db.geodb, err = openGeoDB()
|
||||
if err != nil {
|
||||
log.Warn("Failed to open geodb", "err", err)
|
||||
errc := <-db.quit
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
defer db.geodb.close()
|
||||
|
||||
ticker := time.NewTicker(db.config.Refresh)
|
||||
defer ticker.Stop()
|
||||
|
||||
type registryFunc func(name string, i interface{})
|
||||
type collectorFunc func(traffic *trafficMap) registryFunc
|
||||
|
||||
// trafficCollector generates a function that can be passed to
|
||||
// the prefixed peer registry in order to collect the metered
|
||||
// traffic data from each peer meter.
|
||||
trafficCollector := func(prefix string) collectorFunc {
|
||||
// This part makes is possible to collect the
|
||||
// traffic data into a map from outside.
|
||||
return func(traffic *trafficMap) registryFunc {
|
||||
// The function which can be passed to the registry.
|
||||
return func(name string, i interface{}) {
|
||||
if m, ok := i.(metrics.Meter); ok {
|
||||
enode := strings.TrimPrefix(name, prefix)
|
||||
if addr := strings.Split(enode, "@"); len(addr) == 2 {
|
||||
traffic.insert(addr[1], enode, float64(m.Count()))
|
||||
} else {
|
||||
log.Warn("Invalid enode", "enode", enode)
|
||||
}
|
||||
} else {
|
||||
log.Warn("Invalid meter type", "name", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
collectIngress := trafficCollector(p2p.MetricsInboundTraffic + "/")
|
||||
collectEgress := trafficCollector(p2p.MetricsOutboundTraffic + "/")
|
||||
|
||||
peers := newPeerContainer(db.geodb)
|
||||
db.peerLock.Lock()
|
||||
db.history.Network = &NetworkMessage{
|
||||
Peers: peers,
|
||||
}
|
||||
db.peerLock.Unlock()
|
||||
|
||||
// newPeerEvents contains peer events, which trigger operations that
|
||||
// will be executed on the peer tree after a metering period.
|
||||
newPeerEvents := make([]*peerEvent, 0, eventLimit)
|
||||
ingress, egress := new(trafficMap), new(trafficMap)
|
||||
*ingress, *egress = make(trafficMap), make(trafficMap)
|
||||
|
||||
defer db.subPeer.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-db.peerCh:
|
||||
now := time.Now()
|
||||
switch event.Type {
|
||||
case p2p.PeerHandshakeFailed:
|
||||
connected := now.Add(-event.Elapsed)
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
Addr: event.Addr,
|
||||
Connected: &connected,
|
||||
Disconnected: &now,
|
||||
})
|
||||
case p2p.PeerHandshakeSucceeded:
|
||||
connected := now.Add(-event.Elapsed)
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
Addr: event.Addr,
|
||||
Enode: event.Peer.Node().String(),
|
||||
peer: event.Peer,
|
||||
Connected: &connected,
|
||||
})
|
||||
case p2p.PeerDisconnected:
|
||||
addr, enode := event.Addr, event.Peer.Node().String()
|
||||
newPeerEvents = append(newPeerEvents, &peerEvent{
|
||||
Addr: addr,
|
||||
Enode: enode,
|
||||
Disconnected: &now,
|
||||
})
|
||||
// The disconnect event comes with the last metered traffic count,
|
||||
// because after the disconnection the peer's meter is removed
|
||||
// from the registry. It can happen, that between two metering
|
||||
// period the same peer disconnects multiple times, and appending
|
||||
// all the samples to the traffic arrays would shift the metering,
|
||||
// so only the last metering is stored, overwriting the previous one.
|
||||
ingress.insert(addr, enode, float64(event.Ingress))
|
||||
egress.insert(addr, enode, float64(event.Egress))
|
||||
default:
|
||||
log.Error("Unknown metered peer event type", "type", event.Type)
|
||||
}
|
||||
case <-ticker.C:
|
||||
// Collect the traffic samples from the registry.
|
||||
p2p.PeerIngressRegistry.Each(collectIngress(ingress))
|
||||
p2p.PeerEgressRegistry.Each(collectEgress(egress))
|
||||
|
||||
// Protect 'peers', because it is part of the history.
|
||||
db.peerLock.Lock()
|
||||
|
||||
var diff []*peerEvent
|
||||
for i := 0; i < len(newPeerEvents); i++ {
|
||||
if newPeerEvents[i].Addr == "" {
|
||||
log.Warn("Peer event without IP", "event", *newPeerEvents[i])
|
||||
continue
|
||||
}
|
||||
diff = append(diff, newPeerEvents[i])
|
||||
// There are two main branches of peer events coming from the event
|
||||
// feed, one belongs to the known peers, one to the unknown peers.
|
||||
// If the event has node ID, it belongs to a known peer, otherwise
|
||||
// to an unknown one, which is considered as connection attempt.
|
||||
//
|
||||
// The extension can produce additional peer events, such
|
||||
// as remove, location and initial samples events.
|
||||
if newPeerEvents[i].Enode == "" {
|
||||
bundle, events := peers.bundle(newPeerEvents[i].Addr)
|
||||
bundle.Attempts++
|
||||
diff = append(diff, events...)
|
||||
continue
|
||||
}
|
||||
diff = append(diff, peers.extendKnown(newPeerEvents[i])...)
|
||||
}
|
||||
// Update the peer tree using the traffic maps.
|
||||
for addr, bundle := range peers.Bundles {
|
||||
for enode, peer := range bundle.KnownPeers {
|
||||
// Value is 0 if the traffic map doesn't have the
|
||||
// entry corresponding to the given IP and ID.
|
||||
curIngress, curEgress := (*ingress)[addr][enode], (*egress)[addr][enode]
|
||||
deltaIngress, deltaEgress := curIngress, curEgress
|
||||
if deltaIngress >= peer.prevIngress {
|
||||
deltaIngress -= peer.prevIngress
|
||||
}
|
||||
if deltaEgress >= peer.prevEgress {
|
||||
deltaEgress -= peer.prevEgress
|
||||
}
|
||||
peer.prevIngress, peer.prevEgress = curIngress, curEgress
|
||||
i := &ChartEntry{
|
||||
Value: deltaIngress,
|
||||
}
|
||||
e := &ChartEntry{
|
||||
Value: deltaEgress,
|
||||
}
|
||||
peer.Ingress = append(peer.Ingress, i)
|
||||
peer.Egress = append(peer.Egress, e)
|
||||
if first := len(peer.Ingress) - sampleLimit; first > 0 {
|
||||
peer.Ingress = peer.Ingress[first:]
|
||||
}
|
||||
if first := len(peer.Egress) - sampleLimit; first > 0 {
|
||||
peer.Egress = peer.Egress[first:]
|
||||
}
|
||||
// Creating the traffic sample events.
|
||||
diff = append(diff, &peerEvent{
|
||||
Addr: addr,
|
||||
Enode: enode,
|
||||
Ingress: ChartEntries{i},
|
||||
Egress: ChartEntries{e},
|
||||
})
|
||||
if peer.peer != nil {
|
||||
info := peer.peer.Info()
|
||||
if !reflect.DeepEqual(peer.Protocols, info.Protocols) {
|
||||
peer.Protocols = info.Protocols
|
||||
diff = append(diff, &peerEvent{
|
||||
Addr: addr,
|
||||
Enode: enode,
|
||||
Protocols: peer.Protocols,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
db.peerLock.Unlock()
|
||||
|
||||
if len(diff) > 0 {
|
||||
db.sendToAll(&Message{Network: &NetworkMessage{
|
||||
Diff: diff,
|
||||
}})
|
||||
}
|
||||
// Clear the traffic maps, and the event array,
|
||||
// prepare them for the next metering.
|
||||
*ingress, *egress = make(trafficMap), make(trafficMap)
|
||||
newPeerEvents = newPeerEvents[:0]
|
||||
case err := <-db.subPeer.Err():
|
||||
log.Warn("Peer subscription error", "err", err)
|
||||
errc := <-db.quit
|
||||
errc <- nil
|
||||
return
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@ -1,146 +0,0 @@
|
||||
// Copyright 2019 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package dashboard
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/ethereum/go-ethereum/metrics"
|
||||
"github.com/ethereum/go-ethereum/p2p"
|
||||
)
|
||||
|
||||
// meterCollector returns a function, which retrieves the count of a specific meter.
|
||||
func meterCollector(name string) func() int64 {
|
||||
if meter := metrics.Get(name); meter != nil {
|
||||
m := meter.(metrics.Meter)
|
||||
return func() int64 {
|
||||
return m.Count()
|
||||
}
|
||||
}
|
||||
return func() int64 {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// collectSystemData gathers data about the system and sends it to the clients.
|
||||
func (db *Dashboard) collectSystemData() {
|
||||
defer db.wg.Done()
|
||||
|
||||
systemCPUUsage := gosigar.Cpu{}
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
mem runtime.MemStats
|
||||
|
||||
collectNetworkIngress = meterCollector(p2p.MetricsInboundTraffic)
|
||||
collectNetworkEgress = meterCollector(p2p.MetricsOutboundTraffic)
|
||||
collectDiskRead = meterCollector("eth/db/chaindata/disk/read")
|
||||
collectDiskWrite = meterCollector("eth/db/chaindata/disk/write")
|
||||
|
||||
prevNetworkIngress = collectNetworkIngress()
|
||||
prevNetworkEgress = collectNetworkEgress()
|
||||
prevProcessCPUTime = getProcessCPUTime()
|
||||
prevSystemCPUUsage = systemCPUUsage
|
||||
prevDiskRead = collectDiskRead()
|
||||
prevDiskWrite = collectDiskWrite()
|
||||
|
||||
frequency = float64(db.config.Refresh / time.Second)
|
||||
numCPU = float64(runtime.NumCPU())
|
||||
)
|
||||
|
||||
for {
|
||||
select {
|
||||
case errc := <-db.quit:
|
||||
errc <- nil
|
||||
return
|
||||
case <-time.After(db.config.Refresh):
|
||||
systemCPUUsage.Get()
|
||||
var (
|
||||
curNetworkIngress = collectNetworkIngress()
|
||||
curNetworkEgress = collectNetworkEgress()
|
||||
curProcessCPUTime = getProcessCPUTime()
|
||||
curSystemCPUUsage = systemCPUUsage
|
||||
curDiskRead = collectDiskRead()
|
||||
curDiskWrite = collectDiskWrite()
|
||||
|
||||
deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
|
||||
deltaNetworkEgress = float64(curNetworkEgress - prevNetworkEgress)
|
||||
deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
|
||||
deltaSystemCPUUsage = curSystemCPUUsage.Delta(prevSystemCPUUsage)
|
||||
deltaDiskRead = curDiskRead - prevDiskRead
|
||||
deltaDiskWrite = curDiskWrite - prevDiskWrite
|
||||
)
|
||||
prevNetworkIngress = curNetworkIngress
|
||||
prevNetworkEgress = curNetworkEgress
|
||||
prevProcessCPUTime = curProcessCPUTime
|
||||
prevSystemCPUUsage = curSystemCPUUsage
|
||||
prevDiskRead = curDiskRead
|
||||
prevDiskWrite = curDiskWrite
|
||||
|
||||
runtime.ReadMemStats(&mem)
|
||||
activeMemory := &ChartEntry{
|
||||
Value: float64(mem.Alloc) / frequency,
|
||||
}
|
||||
virtualMemory := &ChartEntry{
|
||||
Value: float64(mem.Sys) / frequency,
|
||||
}
|
||||
networkIngress := &ChartEntry{
|
||||
Value: deltaNetworkIngress / frequency,
|
||||
}
|
||||
networkEgress := &ChartEntry{
|
||||
Value: deltaNetworkEgress / frequency,
|
||||
}
|
||||
processCPU := &ChartEntry{
|
||||
Value: deltaProcessCPUTime / frequency / numCPU * 100,
|
||||
}
|
||||
systemCPU := &ChartEntry{
|
||||
Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
|
||||
}
|
||||
diskRead := &ChartEntry{
|
||||
Value: float64(deltaDiskRead) / frequency,
|
||||
}
|
||||
diskWrite := &ChartEntry{
|
||||
Value: float64(deltaDiskWrite) / frequency,
|
||||
}
|
||||
db.sysLock.Lock()
|
||||
sys := db.history.System
|
||||
sys.ActiveMemory = append(sys.ActiveMemory[1:], activeMemory)
|
||||
sys.VirtualMemory = append(sys.VirtualMemory[1:], virtualMemory)
|
||||
sys.NetworkIngress = append(sys.NetworkIngress[1:], networkIngress)
|
||||
sys.NetworkEgress = append(sys.NetworkEgress[1:], networkEgress)
|
||||
sys.ProcessCPU = append(sys.ProcessCPU[1:], processCPU)
|
||||
sys.SystemCPU = append(sys.SystemCPU[1:], systemCPU)
|
||||
sys.DiskRead = append(sys.DiskRead[1:], diskRead)
|
||||
sys.DiskWrite = append(sys.DiskWrite[1:], diskWrite)
|
||||
db.sysLock.Unlock()
|
||||
|
||||
db.sendToAll(&Message{
|
||||
System: &SystemMessage{
|
||||
ActiveMemory: ChartEntries{activeMemory},
|
||||
VirtualMemory: ChartEntries{virtualMemory},
|
||||
NetworkIngress: ChartEntries{networkIngress},
|
||||
NetworkEgress: ChartEntries{networkEgress},
|
||||
ProcessCPU: ChartEntries{processCPU},
|
||||
SystemCPU: ChartEntries{systemCPU},
|
||||
DiskRead: ChartEntries{diskRead},
|
||||
DiskWrite: ChartEntries{diskWrite},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
@ -27,7 +27,7 @@ var Enabled = false
|
||||
var EnabledExpensive = false
|
||||
|
||||
// enablerFlags is the CLI flag names to use to enable metrics collections.
|
||||
var enablerFlags = []string{"metrics", "dashboard"}
|
||||
var enablerFlags = []string{"metrics"}
|
||||
|
||||
// expensiveEnablerFlags is the CLI flag names to use to enable metrics collections.
|
||||
var expensiveEnablerFlags = []string{"metrics.expensive"}
|
||||
|
11
vendor/github.com/apilayer/freegeoip/AUTHORS
generated
vendored
11
vendor/github.com/apilayer/freegeoip/AUTHORS
generated
vendored
@ -1,11 +0,0 @@
|
||||
# This is the official list of freegeoip authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS file.
|
||||
#
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alexandre Fiori <fiorix@gmail.com>
|
22
vendor/github.com/apilayer/freegeoip/CONTRIBUTORS
generated
vendored
22
vendor/github.com/apilayer/freegeoip/CONTRIBUTORS
generated
vendored
@ -1,22 +0,0 @@
|
||||
# This is the official list of freegeoip contributors for copyright purposes.
|
||||
# This file is distinct from the AUTHORS file.
|
||||
#
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
#
|
||||
# Please keep the list sorted.
|
||||
#
|
||||
# Use the following command to generate the list:
|
||||
#
|
||||
# git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
#
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Alex Goretoy <alex@goretoy.com>
|
||||
Gleicon Moraes <gleicon@gmail.com>
|
||||
Leandro Pereira <leandro@hardinfo.org>
|
||||
Lucas Fontes <lxfontes@gmail.com>
|
||||
Matthias Nehlsen <matthias.nehlsen@gmail.com>
|
||||
Melchi <melchi.si@gmail.com>
|
||||
Nick Muerdter <stuff@nickm.org>
|
||||
Vladimir Agafonkin <agafonkin@gmail.com>
|
25
vendor/github.com/apilayer/freegeoip/Dockerfile
generated
vendored
25
vendor/github.com/apilayer/freegeoip/Dockerfile
generated
vendored
@ -1,25 +0,0 @@
|
||||
FROM golang:1.9
|
||||
|
||||
COPY cmd/freegeoip/public /var/www
|
||||
|
||||
ADD . /go/src/github.com/apilayer/freegeoip
|
||||
RUN \
|
||||
cd /go/src/github.com/apilayer/freegeoip/cmd/freegeoip && \
|
||||
go get -d && go install && \
|
||||
apt-get update && apt-get install -y libcap2-bin && \
|
||||
setcap cap_net_bind_service=+ep /go/bin/freegeoip && \
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/* && \
|
||||
useradd -ms /bin/bash freegeoip
|
||||
|
||||
USER freegeoip
|
||||
ENTRYPOINT ["/go/bin/freegeoip"]
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
# CMD instructions:
|
||||
# Add "-use-x-forwarded-for" if your server is behind a reverse proxy
|
||||
# Add "-public", "/var/www" to enable the web front-end
|
||||
# Add "-internal-server", "8888" to enable the pprof+metrics server
|
||||
#
|
||||
# Example:
|
||||
# CMD ["-use-x-forwarded-for", "-public", "/var/www", "-internal-server", "8888"]
|
55
vendor/github.com/apilayer/freegeoip/HISTORY.md
generated
vendored
55
vendor/github.com/apilayer/freegeoip/HISTORY.md
generated
vendored
@ -1,55 +0,0 @@
|
||||
# History of freegeoip.net
|
||||
|
||||
The freegeoip software is the result of a web server research project that
|
||||
started in 2009, written in Python and hosted on
|
||||
[Google App Engine](http://appengine.google.com). It was rapidly adopted by
|
||||
many developers around the world due to its simplistic and straightforward
|
||||
HTTP API, causing the free account on GAE to exceed its quota every day
|
||||
after few hours of operation.
|
||||
|
||||
A year later freegeoip 1.0 was released, and the freegeoip.net domain
|
||||
moved over to its own server infrastructure. The software was rewritten
|
||||
using the [Cyclone](http://cyclone.io) web framework, backed by
|
||||
[Twisted](http://twistedmatrix.com) and [PyPy](http://pypy.org) in
|
||||
production. That's when the first database management tool was created,
|
||||
a script that would download many pieces of information from the Internet
|
||||
to create the IP database, an sqlite flat file used by the server.
|
||||
|
||||
This version of the Python server shipped with a much better front-end as
|
||||
well, but still as a server-side rendered template inherited from the GAE
|
||||
version. It was only circa 2011 that freegeoip got its first standalone
|
||||
front-end based on jQuery, and is when Twitter bootstrap was first used.
|
||||
|
||||
Python played an important role in the early life of freegeoip and
|
||||
allowed the service to grow and evolve fast. It provided a lot of
|
||||
flexibility in building and maintaining the IP database using multiple
|
||||
sources of data. This version of the server lasted until 2013, when
|
||||
it was once again rewritten from scratch, this time in Go. The database
|
||||
tool, however, remained intact.
|
||||
|
||||
In 2013 the Go version was released as freegeoip 2.0 and this version
|
||||
had many iterations. The first versions of the server written in Go were
|
||||
very rustic, practically a verbatim transcription of the Python server.
|
||||
Took a while until it started looking more like common Go code, and to
|
||||
have tests.
|
||||
|
||||
Another important change that shipped with v2 was a front-end based on
|
||||
AngularJS, but still mixed with some jQuery. The Google map in the front
|
||||
page was made optional to put more focus on the HTTP API. The popularity
|
||||
of freegeoip has increased considerably over the years of 2013 and 2014,
|
||||
calling for more.
|
||||
|
||||
Enter freegeoip 3.0, an evolution of the Go server. The foundation of
|
||||
freegeoip, which is the IP database and HTTP API, now lives in a Go
|
||||
package that other developers can leverage. The freegeoip web server is
|
||||
built on this package making its code cleaner, the server faster,
|
||||
and requires zero maintenance for the IP database. The server downloads
|
||||
the file from MaxMind and keep it up to date in background.
|
||||
|
||||
This and other changes make it very Docker friendly.
|
||||
|
||||
The front-end has been trimmed down to a single index.html file that loads
|
||||
CSS and JS from CDNs on the internet. The JS part is based on AngularJS
|
||||
and handles the search request and response of the public site. The
|
||||
optional map has become a link to Google Maps following the lat/long
|
||||
of the query results.
|
27
vendor/github.com/apilayer/freegeoip/LICENSE
generated
vendored
27
vendor/github.com/apilayer/freegeoip/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2009 The freegeoip authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* The names of authors or contributors may NOT be used to endorse or
|
||||
promote products derived from this software without specific prior
|
||||
written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1
vendor/github.com/apilayer/freegeoip/Procfile
generated
vendored
1
vendor/github.com/apilayer/freegeoip/Procfile
generated
vendored
@ -1 +0,0 @@
|
||||
web: freegeoip -http :${PORT} -use-x-forwarded-for -public /app/cmd/freegeoip/public -quota-backend map -quota-max 10000
|
259
vendor/github.com/apilayer/freegeoip/README.md
generated
vendored
259
vendor/github.com/apilayer/freegeoip/README.md
generated
vendored
@ -1,259 +0,0 @@
|
||||
![freegeoip ipstack](https://raw.githubusercontent.com/apilayer/freegeoip/master/freegeo-warning.png)
|
||||
|
||||
# freegeoip - Important Announcement
|
||||
|
||||
*[The old freegeoip API is now deprecated and will be discontinued on July 1st, 2018]*
|
||||
|
||||
Launched more than 6 years ago, the freegeoip.net API has grown into one of the biggest and most widely used APIs for IP to location services worldwide. The API is used by thousands of developers, SMBs and large corporations around the globe and is currently handling more than 2 billion requests per day. After years of operation and the API remaining almost unchanged, today we announce the complete re-launch of freegeoip into a faster, more advanced and more scalable API service called ipstack (https://ipstack.com). All users that wish to continue using our IP to location service will be required to sign up to obtain a free API access key and perform a few simple changes to their integration. While the new API offers the ability to return data in the same structure as the old freegeoip API, the new API structure offers various options of delivering much more advanced data for IP Addresses.
|
||||
|
||||
## Required Changes to Legacy Integrations (freegeoip.net/json/xml)
|
||||
|
||||
As of March 31 2018 the old freegeoip API is deprecated and a completely re-designed API is now accessible at http://api.ipstack.com. While the new API offers the same capabilities as the old one and also has the option of returning data in the legacy format, the API URL has now changed and all users are required to sign up for a free API Access Key to use the service.
|
||||
|
||||
1. Get a free ipstack Account and Access Key
|
||||
|
||||
Head over to https://ipstack.com and follow the instructions to create your account and obtain your access token. If you only need basic IP to Geolocation data and do not require more than 10,000 requests per month, you can use the free account. If you'd like more advanced features or more requests than included in the free account you will need to choose one of the paid options. You can find an overview of all available plans at https://ipstack.com/product
|
||||
|
||||
2. Integrate the new API URL
|
||||
|
||||
The new API comes with a completely new endpoint (api.ipstack.com) and requires you to append your API Access Key to the URL as a GET parameter. For complete integration instructions, please head over to the API Documentation at https://ipstack.com/documentation. While the new API offers a completely reworked response structure with many additional data points, we also offer the option to receive results in the old freegeoip.net format in JSON or XML.
|
||||
|
||||
To receive your API results in the old freegeoip format, please simply append &legacy=1 to the new API URL.
|
||||
|
||||
JSON Example: http://api.ipstack.com/186.116.207.169?access_key=YOUR_ACCESS_KEY&output=json&legacy=1
|
||||
|
||||
XML Example: http://api.ipstack.com/186.116.207.169?access_key=YOUR_ACCESS_KEY&output=xml&legacy=1
|
||||
|
||||
## New features with ipstack
|
||||
While the new ipstack service now runs on a commercial/freemium model, we have worked hard at building a faster, more scalable, and more advanced IP to location API product. You can read more about all the new features by navigating to https://ipstack.com, but here's a list of the most important changes and additions:
|
||||
|
||||
- We're still free for basic usage
|
||||
|
||||
While we now offer paid / premium options for our more advanced users, our core product and IP to Country/Region/City product is still completely free of charge for up to 10,000 requests per month. If you need more advanced data or more requests, you can choose one of the paid plans listed at https://ipstack.com/product
|
||||
|
||||
- Batch Requests
|
||||
|
||||
Need to validate more than 1 IP Address in a single API Call? Our new Bulk Lookup Feature (available on our paid plans) allows you to geolocate up to 50 IP Addresses in a single API Call.
|
||||
|
||||
- Much more Data
|
||||
|
||||
While the old freegeoip API was limited to provide only the most basic IP to location data, our new API provides more than 20 additional data points including Language, Time Zone, Current Time, Currencies, Connection & ASN Information, and much more. To learn more about all the data points available, please head over to the ipstack website.
|
||||
|
||||
- Security & Fraud Prevention Tools
|
||||
|
||||
Do you want to prevent fraudulent traffic from arriving at your website or from abusing your service? Easily spot malicious / proxy / VPN traffic by using our new Security Module, which outputs a lot of valuable security information about an IP Address.
|
||||
|
||||
Next Steps
|
||||
|
||||
- Deprecation of the old API
|
||||
|
||||
While we want to keep the disruption to our current users as minimal as possible, we are planning to shut the old API down on July 1st, 2018. This should give all users enough time to adapt to changes, and should we still see high volumes of traffic going to the old API by that date, we may decide to extend it further. In any case, we highly recommend you switch to the new API as soon as possible. We will keep you posted here about any changes to the planned shutdown date.
|
||||
|
||||
- Any Questions? Please get in touch!
|
||||
|
||||
It's very important to ensure a smooth transition to ipstack for all freegeoip API users. If you are a developer that has published a plugin/addon that includes the legacy API, we recommend you get in touch with us and also share this announcement with your users. If you have any questions about the transition or the new API, please get in touch with us at support@ipstack.com
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# freegeoip - Deprecated Documentation
|
||||
|
||||
[![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy)
|
||||
|
||||
This is the source code of the freegeoip software. It contains both the web server that empowers freegeoip.net, and a package for the [Go](http://golang.org) programming language that enables any web server to support IP geolocation with a simple and clean API.
|
||||
|
||||
See http://en.wikipedia.org/wiki/Geolocation for details about geolocation.
|
||||
|
||||
Developers looking for the Go API can skip to the [Package freegeoip](#packagefreegeoip) section below.
|
||||
|
||||
## Running
|
||||
|
||||
This section is for people who desire to run the freegeoip web server on their own infrastructure. The easiest and most generic way of doing this is by using Docker. All examples below use Docker.
|
||||
|
||||
### Docker
|
||||
|
||||
#### Install Docker
|
||||
|
||||
Docker has [install instructions for many platforms](https://docs.docker.com/engine/installation/),
|
||||
including
|
||||
- [Ubuntu](https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/)
|
||||
- [CentOS](https://docs.docker.com/engine/installation/linux/docker-ce/centos/)
|
||||
- [Mac](https://docs.docker.com/docker-for-mac/install/)
|
||||
|
||||
#### Run the API in a container
|
||||
|
||||
```bash
|
||||
docker run --restart=always -p 8080:8080 -d apilayer/freegeoip
|
||||
```
|
||||
|
||||
#### Test
|
||||
|
||||
```bash
|
||||
curl localhost:8080/json/1.2.3.4
|
||||
# => {"ip":"1.2.3.4","country_code":"US","country_name":"United States", # ...
|
||||
```
|
||||
|
||||
### Other Linux, OS X, FreeBSD, and Windows
|
||||
|
||||
There are [pre-compiled binaries](https://github.com/apilayer/freegeoip/releases) available.
|
||||
|
||||
### Production configuration
|
||||
|
||||
For production workloads you may want to use different configuration for the freegeoip web server, for example:
|
||||
|
||||
* Enabling the "internal server" for collecting metrics and profiling/tracing the freegeoip web server on demand
|
||||
* Monitoring the internal server using [Prometheus](https://prometheus.io), or exporting your metrics to [New Relic](https://newrelic.com)
|
||||
* Serving the freegeoip API over HTTPS (TLS) using your own certificates, or provisioned automatically using [LetsEncrypt.org](https://letsencrypt.org)
|
||||
* Configuring [HSTS](https://en.wikipedia.org/wiki/HTTP_Strict_Transport_Security) to restrict your browser clients to always use HTTPS
|
||||
* Configuring the read and write timeouts to avoid stale clients consuming server resources
|
||||
* Configuring the freegeoip web server to read the client IP (for logs, etc) from the X-Forwarded-For header when running behind a reverse proxy
|
||||
* Configuring [CORS](https://en.wikipedia.org/wiki/Cross-origin_resource_sharing) to restrict access to your API to specific domains
|
||||
* Configuring a specific endpoint path prefix other than the default "/" (thus /json, /xml, /csv) to serve the API alongside other APIs on the same host
|
||||
* Optimizing your round trips by enabling [TCP Fast Open](https://en.wikipedia.org/wiki/TCP_Fast_Open) on your OS and the freegeoip web server
|
||||
* Setting up usage limits (quotas) for your clients (per client IP) based on requests per time interval; we support various backends such as in-memory map (for single instance), or redis or memcache for distributed deployments
|
||||
* Serve the default [GeoLite2 City](http://dev.maxmind.com/geoip/geoip2/geolite2/) free database that is downloaded and updated automatically in background on a configurable schedule, or
|
||||
* Serve the commercial [GeoIP2 City](https://www.maxmind.com/en/geoip2-city) database from MaxMind, either as a local file that you provide and update periodically (so the server can reload it), or configured to be downloaded periodically using your API key
|
||||
|
||||
See the [Server Options](#serveroptions) section below for more information on configuring the server.
|
||||
|
||||
For automation, check out the [freegeoip chef cookbook](https://supermarket.chef.io/cookbooks/freegeoip) or the (legacy) [Ansible Playbook](./cmd/freegeoip/ansible-playbook) for Ubuntu 14.04 LTS.
|
||||
|
||||
<a name="serveroptions">
|
||||
|
||||
### Server Options
|
||||
|
||||
To see all the available options, use the `-help` option:
|
||||
|
||||
```bash
|
||||
docker run --rm -it apilayer/freegeoip -help
|
||||
```
|
||||
|
||||
If you're using LetsEncrypt.org to provision your TLS certificates, you have to listen for HTTPS on port 443. Following is an example of the server listening on 3 different ports: metrics + pprof (8888), http (80), and https (443):
|
||||
|
||||
```bash
|
||||
docker run -p 8888:8888 -p 80:8080 -p 443:8443 -d apilayer/freegeoip \
|
||||
-internal-server=:8888 \
|
||||
-http=:8080 \
|
||||
-https=:8443 \
|
||||
-hsts=max-age=31536000 \
|
||||
-letsencrypt \
|
||||
-letsencrypt-hosts=myfancydomain.io
|
||||
```
|
||||
|
||||
You can configure the freegeiop web server via command line flags or environment variables. The names of environment variables are the same for command line flags, but prefixed with FREEGEOIP, all upperscase, separated by underscores. If you want to use environment variables instead:
|
||||
|
||||
```bash
|
||||
$ cat prod.env
|
||||
FREEGEOIP_INTERNAL_SERVER=:8888
|
||||
FREEGEOIP_HTTP=:8080
|
||||
FREEGEOIP_HTTPS=:8443
|
||||
FREEGEOIP_HSTS=max-age=31536000
|
||||
FREEGEOIP_LETSENCRYPT=true
|
||||
FREEGEOIP_LETSENCRYPT_HOSTS=myfancydomain.io
|
||||
|
||||
$ docker run --env-file=prod.env -p 8888:8888 -p 80:8080 -p 443:8443 -d apilayer/freegeoip
|
||||
```
|
||||
|
||||
By default, HTTP/2 is enabled over HTTPS. You can disable by passing the `-http2=false` flag.
|
||||
|
||||
Also, the Docker image of freegeoip does not provide the web page from freegeiop.net, it only provides the API. If you want to serve that page, you can pass the `-public=/var/www` parameter in the command line. You can also tell Docker to mount that directory as a volume on the host machine and have it serve your own page, using Docker's `-v` parameter.
|
||||
|
||||
If the freegeoip web server is running behind a reverse proxy or load balancer, you have to run it passing the `-use-x-forwarded-for` parameter and provide the `X-Forwarded-For` HTTP header in all requests. This is for the freegeoip web server be able to log the client IP, and to perform geolocation lookups when an IP is not provided to the API, e.g. `/json/` (uses client IP) vs `/json/1.2.3.4`.
|
||||
|
||||
## Database
|
||||
|
||||
The current implementation uses the free [GeoLite2 City](http://dev.maxmind.com/geoip/geoip2/geolite2/) database from MaxMind.
|
||||
|
||||
In the past we had databases from other providers, and at some point even our own database comprised of data from different sources. This means it might change in the future.
|
||||
|
||||
If you have purchased the commercial database from MaxMind, you can point the freegeoip web server or (Go API, for dev) to the URL containing the file, or local file, and the server will use it.
|
||||
|
||||
In case of files on disk, you can replace the file with a newer version and the freegeoip web server will reload it automatically in background. If instead of a file you use a URL (the default), we periodically check the URL in background to see if there's a new database version available, then download the reload it automatically.
|
||||
|
||||
All responses from the freegeiop API contain the date that the database was downloaded in the X-Database-Date HTTP header.
|
||||
|
||||
## API
|
||||
|
||||
The freegeoip API is served by endpoints that encode the response in different formats.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
curl freegeoip.net/json/
|
||||
```
|
||||
|
||||
Returns the geolocation information of your own IP address, the source IP address of the connection.
|
||||
|
||||
You can pass a different IP or hostname. For example, to lookup the geolocation of `github.com` the server resolves the name first, then uses the first IP address available, which might be IPv4 or IPv6:
|
||||
|
||||
```bash
|
||||
curl freegeoip.net/json/github.com
|
||||
```
|
||||
|
||||
Same semantics are available for the `/xml/{ip}` and `/csv/{ip}` endpoints.
|
||||
|
||||
JSON responses can be encoded as JSONP, by adding the `callback` parameter:
|
||||
|
||||
```bash
|
||||
curl freegeoip.net/json/?callback=foobar
|
||||
```
|
||||
|
||||
The callback parameter is ignored on all other endpoints.
|
||||
|
||||
## Metrics and profiling
|
||||
|
||||
The freegeoip web server can provide metrics about its usage, and also supports runtime profiling and tracing.
|
||||
|
||||
Both are disabled by default, but can be enabled by passing the `-internal-server` parameter in the command line. Metrics are generated for [Prometheus](http://prometheus.io) and can be queried at `/metrics` even with curl.
|
||||
|
||||
HTTP pprof is available at `/debug/pprof` and the examples from the [pprof](https://golang.org/pkg/net/http/pprof/) package documentation should work on the freegeiop web server.
|
||||
|
||||
<a name="packagefreegeoip">
|
||||
|
||||
## Package freegeoip
|
||||
|
||||
The freegeoip package for the Go programming language provides two APIs:
|
||||
|
||||
- A database API that requires zero maintenance of the IP database;
|
||||
- A geolocation `http.Handler` that can be used/served by any http server.
|
||||
|
||||
tl;dr if all you want is code then see the `example_test.go` file.
|
||||
|
||||
Otherwise check out the godoc reference.
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/apilayer/freegeoip?status.svg)](https://godoc.org/github.com/apilayer/freegeoip)
|
||||
[![Build Status](https://secure.travis-ci.org/apilayer/freegeoip.png)](http://travis-ci.org/apilayer/freegeoip)
|
||||
[![GoReportCard](https://goreportcard.com/badge/github.com/apilayer/freegeoip)](https://goreportcard.com/report/github.com/apilayer/freegeoip)
|
||||
|
||||
### Features
|
||||
|
||||
- Zero maintenance
|
||||
|
||||
The DB object alone can download an IP database file from the internet and service lookups to your program right away. It will auto-update the file in background and always magically work.
|
||||
|
||||
- DevOps friendly
|
||||
|
||||
If you do care about the database and have the commercial version of the MaxMind database, you can update the database file with your program running and the DB object will load it in background. You can focus on your stuff.
|
||||
|
||||
- Extensible
|
||||
|
||||
Besides the database part, the package provides an `http.Handler` object that you can add to your HTTP server to service IP geolocation lookups with the same simplistic API of freegeoip.net. There's also an interface for crafting your own HTTP responses encoded in any format.
|
||||
|
||||
### Install
|
||||
|
||||
Download the package:
|
||||
|
||||
go get -d github.com/apilayer/freegeoip/...
|
||||
|
||||
Install the web server:
|
||||
|
||||
go install github.com/apilayer/freegeoip/cmd/freegeoip
|
||||
|
||||
Test coverage is quite good, and test code may help you find the stuff you need.
|
7
vendor/github.com/apilayer/freegeoip/app.json
generated
vendored
7
vendor/github.com/apilayer/freegeoip/app.json
generated
vendored
@ -1,7 +0,0 @@
|
||||
{
|
||||
"name": "freegeoip",
|
||||
"description": "IP geolocation web server",
|
||||
"website": "https://github.com/apilayer/freegeoip",
|
||||
"success_url": "/",
|
||||
"keywords": ["golang", "geoip", "api"]
|
||||
}
|
453
vendor/github.com/apilayer/freegeoip/db.go
generated
vendored
453
vendor/github.com/apilayer/freegeoip/db.go
generated
vendored
@ -1,453 +0,0 @@
|
||||
// Copyright 2009 The freegeoip authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
package freegeoip
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/howeyc/fsnotify"
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrUnavailable may be returned by DB.Lookup when the database
|
||||
// points to a URL and is not yet available because it's being
|
||||
// downloaded in background.
|
||||
ErrUnavailable = errors.New("no database available")
|
||||
|
||||
// Local cached copy of a database downloaded from a URL.
|
||||
defaultDB = filepath.Join(os.TempDir(), "freegeoip", "db.gz")
|
||||
|
||||
// MaxMindDB is the URL of the free MaxMind GeoLite2 database.
|
||||
MaxMindDB = "http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz"
|
||||
)
|
||||
|
||||
// DB is the IP geolocation database.
|
||||
type DB struct {
|
||||
file string // Database file name.
|
||||
checksum string // MD5 of the unzipped database file
|
||||
reader *maxminddb.Reader // Actual db object.
|
||||
notifyQuit chan struct{} // Stop auto-update and watch goroutines.
|
||||
notifyOpen chan string // Notify when a db file is open.
|
||||
notifyError chan error // Notify when an error occurs.
|
||||
notifyInfo chan string // Notify random actions for logging
|
||||
closed bool // Mark this db as closed.
|
||||
lastUpdated time.Time // Last time the db was updated.
|
||||
mu sync.RWMutex // Protects all the above.
|
||||
|
||||
updateInterval time.Duration // Update interval.
|
||||
maxRetryInterval time.Duration // Max retry interval in case of failure.
|
||||
}
|
||||
|
||||
// Open creates and initializes a DB from a local file.
|
||||
//
|
||||
// The database file is monitored by fsnotify and automatically
|
||||
// reloads when the file is updated or overwritten.
|
||||
func Open(dsn string) (*DB, error) {
|
||||
db := &DB{
|
||||
file: dsn,
|
||||
notifyQuit: make(chan struct{}),
|
||||
notifyOpen: make(chan string, 1),
|
||||
notifyError: make(chan error, 1),
|
||||
notifyInfo: make(chan string, 1),
|
||||
}
|
||||
err := db.openFile()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, err
|
||||
}
|
||||
err = db.watchFile()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("fsnotify failed for %s: %s", dsn, err)
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// MaxMindUpdateURL generates the URL for MaxMind paid databases.
|
||||
func MaxMindUpdateURL(hostname, productID, userID, licenseKey string) (string, error) {
|
||||
limiter := func(r io.Reader) *io.LimitedReader {
|
||||
return &io.LimitedReader{R: r, N: 1 << 30}
|
||||
}
|
||||
baseurl := "https://" + hostname + "/app/"
|
||||
// Get the file name for the product ID.
|
||||
u := baseurl + "update_getfilename?product_id=" + productID
|
||||
resp, err := http.Get(u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
md5hash := md5.New()
|
||||
_, err = io.Copy(md5hash, limiter(resp.Body))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum := md5hash.Sum(nil)
|
||||
hexdigest1 := hex.EncodeToString(sum[:])
|
||||
// Get our client IP address.
|
||||
resp, err = http.Get(baseurl + "update_getipaddr")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
md5hash = md5.New()
|
||||
io.WriteString(md5hash, licenseKey)
|
||||
_, err = io.Copy(md5hash, limiter(resp.Body))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum = md5hash.Sum(nil)
|
||||
hexdigest2 := hex.EncodeToString(sum[:])
|
||||
// Generate the URL.
|
||||
params := url.Values{
|
||||
"db_md5": {hexdigest1},
|
||||
"challenge_md5": {hexdigest2},
|
||||
"user_id": {userID},
|
||||
"edition_id": {productID},
|
||||
}
|
||||
u = baseurl + "update_secure?" + params.Encode()
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// OpenURL creates and initializes a DB from a URL.
|
||||
// It automatically downloads and updates the file in background, and
|
||||
// keeps a local copy on $TMPDIR.
|
||||
func OpenURL(url string, updateInterval, maxRetryInterval time.Duration) (*DB, error) {
|
||||
db := &DB{
|
||||
file: defaultDB,
|
||||
notifyQuit: make(chan struct{}),
|
||||
notifyOpen: make(chan string, 1),
|
||||
notifyError: make(chan error, 1),
|
||||
notifyInfo: make(chan string, 1),
|
||||
updateInterval: updateInterval,
|
||||
maxRetryInterval: maxRetryInterval,
|
||||
}
|
||||
db.openFile() // Optional, might fail.
|
||||
go db.autoUpdate(url)
|
||||
err := db.watchFile()
|
||||
if err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("fsnotify failed for %s: %s", db.file, err)
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (db *DB) watchFile() error {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dbdir, err := db.makeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go db.watchEvents(watcher)
|
||||
return watcher.Watch(dbdir)
|
||||
}
|
||||
|
||||
func (db *DB) watchEvents(watcher *fsnotify.Watcher) {
|
||||
for {
|
||||
select {
|
||||
case ev := <-watcher.Event:
|
||||
if ev.Name == db.file && (ev.IsCreate() || ev.IsModify()) {
|
||||
db.openFile()
|
||||
}
|
||||
case <-watcher.Error:
|
||||
case <-db.notifyQuit:
|
||||
watcher.Close()
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Second) // Suppress high-rate events.
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) openFile() error {
|
||||
reader, checksum, err := db.newReader(db.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat, err := os.Stat(db.file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.setReader(reader, stat.ModTime(), checksum)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) newReader(dbfile string) (*maxminddb.Reader, string, error) {
|
||||
f, err := os.Open(dbfile)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer f.Close()
|
||||
gzf, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer gzf.Close()
|
||||
b, err := ioutil.ReadAll(gzf)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
checksum := fmt.Sprintf("%x", md5.Sum(b))
|
||||
mmdb, err := maxminddb.FromBytes(b)
|
||||
return mmdb, checksum, err
|
||||
}
|
||||
|
||||
func (db *DB) setReader(reader *maxminddb.Reader, modtime time.Time, checksum string) {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
if db.closed {
|
||||
reader.Close()
|
||||
return
|
||||
}
|
||||
if db.reader != nil {
|
||||
db.reader.Close()
|
||||
}
|
||||
db.reader = reader
|
||||
db.lastUpdated = modtime.UTC()
|
||||
db.checksum = checksum
|
||||
select {
|
||||
case db.notifyOpen <- db.file:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) autoUpdate(url string) {
|
||||
backoff := time.Second
|
||||
for {
|
||||
db.sendInfo("starting update")
|
||||
err := db.runUpdate(url)
|
||||
if err != nil {
|
||||
bs := backoff.Seconds()
|
||||
ms := db.maxRetryInterval.Seconds()
|
||||
backoff = time.Duration(math.Min(bs*math.E, ms)) * time.Second
|
||||
db.sendError(fmt.Errorf("download failed (will retry in %s): %s", backoff, err))
|
||||
} else {
|
||||
backoff = db.updateInterval
|
||||
}
|
||||
db.sendInfo("finished update")
|
||||
select {
|
||||
case <-db.notifyQuit:
|
||||
return
|
||||
case <-time.After(backoff):
|
||||
// Sleep till time for the next update attempt.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) runUpdate(url string) error {
|
||||
yes, err := db.needUpdate(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !yes {
|
||||
return nil
|
||||
}
|
||||
tmpfile, err := db.download(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = db.renameFile(tmpfile)
|
||||
if err != nil {
|
||||
// Cleanup the tempfile if renaming failed.
|
||||
os.RemoveAll(tmpfile)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (db *DB) needUpdate(url string) (bool, error) {
|
||||
stat, err := os.Stat(db.file)
|
||||
if err != nil {
|
||||
return true, nil // Local db is missing, must be downloaded.
|
||||
}
|
||||
|
||||
resp, err := http.Head(url)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check X-Database-MD5 if it exists
|
||||
headerMd5 := resp.Header.Get("X-Database-MD5")
|
||||
if len(headerMd5) > 0 && db.checksum != headerMd5 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if stat.Size() != resp.ContentLength {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (db *DB) download(url string) (tmpfile string, err error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
tmpfile = filepath.Join(os.TempDir(),
|
||||
fmt.Sprintf("_freegeoip.%d.db.gz", time.Now().UnixNano()))
|
||||
f, err := os.Create(tmpfile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return tmpfile, nil
|
||||
}
|
||||
|
||||
func (db *DB) makeDir() (dbdir string, err error) {
|
||||
dbdir = filepath.Dir(db.file)
|
||||
_, err = os.Stat(dbdir)
|
||||
if err != nil {
|
||||
err = os.MkdirAll(dbdir, 0755)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return dbdir, nil
|
||||
}
|
||||
|
||||
func (db *DB) renameFile(name string) error {
|
||||
os.Rename(db.file, db.file+".bak") // Optional, might fail.
|
||||
_, err := db.makeDir()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(name, db.file)
|
||||
}
|
||||
|
||||
// Date returns the UTC date the database file was last modified.
|
||||
// If no database file has been opened the behaviour of Date is undefined.
|
||||
func (db *DB) Date() time.Time {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
return db.lastUpdated
|
||||
}
|
||||
|
||||
// NotifyClose returns a channel that is closed when the database is closed.
|
||||
func (db *DB) NotifyClose() <-chan struct{} {
|
||||
return db.notifyQuit
|
||||
}
|
||||
|
||||
// NotifyOpen returns a channel that notifies when a new database is
|
||||
// loaded or reloaded. This can be used to monitor background updates
|
||||
// when the DB points to a URL.
|
||||
func (db *DB) NotifyOpen() (filename <-chan string) {
|
||||
return db.notifyOpen
|
||||
}
|
||||
|
||||
// NotifyError returns a channel that notifies when an error occurs
|
||||
// while downloading or reloading a DB that points to a URL.
|
||||
func (db *DB) NotifyError() (errChan <-chan error) {
|
||||
return db.notifyError
|
||||
}
|
||||
|
||||
// NotifyInfo returns a channel that notifies informational messages
|
||||
// while downloading or reloading.
|
||||
func (db *DB) NotifyInfo() <-chan string {
|
||||
return db.notifyInfo
|
||||
}
|
||||
|
||||
func (db *DB) sendError(err error) {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
if db.closed {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case db.notifyError <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) sendInfo(message string) {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
if db.closed {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case db.notifyInfo <- message:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup performs a database lookup of the given IP address, and stores
|
||||
// the response into the result value. The result value must be a struct
|
||||
// with specific fields and tags as described here:
|
||||
// https://godoc.org/github.com/oschwald/maxminddb-golang#Reader.Lookup
|
||||
//
|
||||
// See the DefaultQuery for an example of the result struct.
|
||||
func (db *DB) Lookup(addr net.IP, result interface{}) error {
|
||||
db.mu.RLock()
|
||||
defer db.mu.RUnlock()
|
||||
if db.reader != nil {
|
||||
return db.reader.Lookup(addr, result)
|
||||
}
|
||||
return ErrUnavailable
|
||||
}
|
||||
|
||||
// DefaultQuery is the default query used for database lookups.
|
||||
type DefaultQuery struct {
|
||||
Continent struct {
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"continent"`
|
||||
Country struct {
|
||||
ISOCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"country"`
|
||||
Region []struct {
|
||||
ISOCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"subdivisions"`
|
||||
City struct {
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"city"`
|
||||
Location struct {
|
||||
Latitude float64 `maxminddb:"latitude"`
|
||||
Longitude float64 `maxminddb:"longitude"`
|
||||
MetroCode uint `maxminddb:"metro_code"`
|
||||
TimeZone string `maxminddb:"time_zone"`
|
||||
} `maxminddb:"location"`
|
||||
Postal struct {
|
||||
Code string `maxminddb:"code"`
|
||||
} `maxminddb:"postal"`
|
||||
}
|
||||
|
||||
// Close closes the database.
|
||||
func (db *DB) Close() {
|
||||
db.mu.Lock()
|
||||
defer db.mu.Unlock()
|
||||
if !db.closed {
|
||||
db.closed = true
|
||||
close(db.notifyQuit)
|
||||
close(db.notifyOpen)
|
||||
close(db.notifyError)
|
||||
close(db.notifyInfo)
|
||||
}
|
||||
if db.reader != nil {
|
||||
db.reader.Close()
|
||||
db.reader = nil
|
||||
}
|
||||
}
|
14
vendor/github.com/apilayer/freegeoip/doc.go
generated
vendored
14
vendor/github.com/apilayer/freegeoip/doc.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// Copyright 2009 The freegeoip authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Package freegeoip provides an API for searching the geolocation of IP
|
||||
// addresses. It uses a database that can be either a local file or a
|
||||
// remote resource from a URL.
|
||||
//
|
||||
// Local databases are monitored by fsnotify and reloaded when the file is
|
||||
// either updated or overwritten.
|
||||
//
|
||||
// Remote databases are automatically downloaded and updated in background
|
||||
// so you can focus on using the API and not managing the database.
|
||||
package freegeoip
|
BIN
vendor/github.com/apilayer/freegeoip/freegeo-warning.png
generated
vendored
BIN
vendor/github.com/apilayer/freegeoip/freegeo-warning.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 14 KiB |
28
vendor/github.com/howeyc/fsnotify/AUTHORS
generated
vendored
28
vendor/github.com/howeyc/fsnotify/AUTHORS
generated
vendored
@ -1,28 +0,0 @@
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
John C Barstow
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
bronze1man <bronze1man@gmail.com>
|
||||
debrando <denis.brandolini@gmail.com>
|
||||
henrikedwards <henrik.edwards@gmail.com>
|
160
vendor/github.com/howeyc/fsnotify/CHANGELOG.md
generated
vendored
160
vendor/github.com/howeyc/fsnotify/CHANGELOG.md
generated
vendored
@ -1,160 +0,0 @@
|
||||
# Changelog
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on OS X [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
* [Fix] race in symlink test [#45][] (reported by @srid)
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
* inotify: add `DELETE_SELF` (requested by @taralx)
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
* inotify: ignore `IN_IGNORED` events
|
||||
* event String()
|
||||
* linux: common FileEvent functions
|
||||
* initial commit
|
||||
|
||||
[#79]: https://github.com/howeyc/fsnotify/pull/79
|
||||
[#77]: https://github.com/howeyc/fsnotify/pull/77
|
||||
[#72]: https://github.com/howeyc/fsnotify/issues/72
|
||||
[#71]: https://github.com/howeyc/fsnotify/issues/71
|
||||
[#70]: https://github.com/howeyc/fsnotify/issues/70
|
||||
[#63]: https://github.com/howeyc/fsnotify/issues/63
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#60]: https://github.com/howeyc/fsnotify/issues/60
|
||||
[#59]: https://github.com/howeyc/fsnotify/issues/59
|
||||
[#49]: https://github.com/howeyc/fsnotify/issues/49
|
||||
[#45]: https://github.com/howeyc/fsnotify/issues/45
|
||||
[#40]: https://github.com/howeyc/fsnotify/issues/40
|
||||
[#36]: https://github.com/howeyc/fsnotify/issues/36
|
||||
[#33]: https://github.com/howeyc/fsnotify/issues/33
|
||||
[#29]: https://github.com/howeyc/fsnotify/issues/29
|
||||
[#25]: https://github.com/howeyc/fsnotify/issues/25
|
||||
[#24]: https://github.com/howeyc/fsnotify/issues/24
|
||||
[#21]: https://github.com/howeyc/fsnotify/issues/21
|
||||
[#1]: https://github.com/howeyc/fsnotify/issues/1
|
7
vendor/github.com/howeyc/fsnotify/CONTRIBUTING.md
generated
vendored
7
vendor/github.com/howeyc/fsnotify/CONTRIBUTING.md
generated
vendored
@ -1,7 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
## Moving Notice
|
||||
|
||||
There is a fork being actively developed with a new API in preparation for the Go Standard Library:
|
||||
[github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify)
|
||||
|
28
vendor/github.com/howeyc/fsnotify/LICENSE
generated
vendored
28
vendor/github.com/howeyc/fsnotify/LICENSE
generated
vendored
@ -1,28 +0,0 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
93
vendor/github.com/howeyc/fsnotify/README.md
generated
vendored
93
vendor/github.com/howeyc/fsnotify/README.md
generated
vendored
@ -1,93 +0,0 @@
|
||||
# File system notifications for Go
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/howeyc/fsnotify?status.png)](http://godoc.org/github.com/howeyc/fsnotify)
|
||||
|
||||
Cross platform: Windows, Linux, BSD and OS X.
|
||||
|
||||
## Moving Notice
|
||||
|
||||
There is a fork being actively developed with a new API in preparation for the Go Standard Library:
|
||||
[github.com/go-fsnotify/fsnotify](https://github.com/go-fsnotify/fsnotify)
|
||||
|
||||
## Example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/howeyc/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
done := make(chan bool)
|
||||
|
||||
// Process events
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-watcher.Event:
|
||||
log.Println("event:", ev)
|
||||
case err := <-watcher.Error:
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Watch("testDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Hang so program doesn't exit
|
||||
<-done
|
||||
|
||||
/* ... do stuff ... */
|
||||
watcher.Close()
|
||||
}
|
||||
```
|
||||
|
||||
For each event:
|
||||
* Name
|
||||
* IsCreate()
|
||||
* IsDelete()
|
||||
* IsModify()
|
||||
* IsRename()
|
||||
|
||||
## FAQ
|
||||
|
||||
**When a file is moved to another directory is it still being watched?**
|
||||
|
||||
No (it shouldn't be, unless you are watching where it was moved to).
|
||||
|
||||
**When I watch a directory, are all subdirectories watched as well?**
|
||||
|
||||
No, you must add watches for any directory you want to watch (a recursive watcher is in the works [#56][]).
|
||||
|
||||
**Do I have to watch the Error and Event channels in a separate goroutine?**
|
||||
|
||||
As of now, yes. Looking into making this single-thread friendly (see [#7][])
|
||||
|
||||
**Why am I receiving multiple events for the same file on OS X?**
|
||||
|
||||
Spotlight indexing on OS X can result in multiple events (see [#62][]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#54][]).
|
||||
|
||||
**How many files can be watched at once?**
|
||||
|
||||
There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit,
|
||||
reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#56]: https://github.com/howeyc/fsnotify/issues/56
|
||||
[#54]: https://github.com/howeyc/fsnotify/issues/54
|
||||
[#7]: https://github.com/howeyc/fsnotify/issues/7
|
||||
|
111
vendor/github.com/howeyc/fsnotify/fsnotify.go
generated
vendored
111
vendor/github.com/howeyc/fsnotify/fsnotify.go
generated
vendored
@ -1,111 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fsnotify implements file system notification.
|
||||
package fsnotify
|
||||
|
||||
import "fmt"
|
||||
|
||||
const (
|
||||
FSN_CREATE = 1
|
||||
FSN_MODIFY = 2
|
||||
FSN_DELETE = 4
|
||||
FSN_RENAME = 8
|
||||
|
||||
FSN_ALL = FSN_MODIFY | FSN_DELETE | FSN_RENAME | FSN_CREATE
|
||||
)
|
||||
|
||||
// Purge events from interal chan to external chan if passes filter
|
||||
func (w *Watcher) purgeEvents() {
|
||||
for ev := range w.internalEvent {
|
||||
sendEvent := false
|
||||
w.fsnmut.Lock()
|
||||
fsnFlags := w.fsnFlags[ev.Name]
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
if (fsnFlags&FSN_CREATE == FSN_CREATE) && ev.IsCreate() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if (fsnFlags&FSN_MODIFY == FSN_MODIFY) && ev.IsModify() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if (fsnFlags&FSN_DELETE == FSN_DELETE) && ev.IsDelete() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if (fsnFlags&FSN_RENAME == FSN_RENAME) && ev.IsRename() {
|
||||
sendEvent = true
|
||||
}
|
||||
|
||||
if sendEvent {
|
||||
w.Event <- ev
|
||||
}
|
||||
|
||||
// If there's no file, then no more events for user
|
||||
// BSD must keep watch for internal use (watches DELETEs to keep track
|
||||
// what files exist for create events)
|
||||
if ev.IsDelete() {
|
||||
w.fsnmut.Lock()
|
||||
delete(w.fsnFlags, ev.Name)
|
||||
w.fsnmut.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
close(w.Event)
|
||||
}
|
||||
|
||||
// Watch a given file path
|
||||
func (w *Watcher) Watch(path string) error {
|
||||
return w.WatchFlags(path, FSN_ALL)
|
||||
}
|
||||
|
||||
// Watch a given file path for a particular set of notifications (FSN_MODIFY etc.)
|
||||
func (w *Watcher) WatchFlags(path string, flags uint32) error {
|
||||
w.fsnmut.Lock()
|
||||
w.fsnFlags[path] = flags
|
||||
w.fsnmut.Unlock()
|
||||
return w.watch(path)
|
||||
}
|
||||
|
||||
// Remove a watch on a file
|
||||
func (w *Watcher) RemoveWatch(path string) error {
|
||||
w.fsnmut.Lock()
|
||||
delete(w.fsnFlags, path)
|
||||
w.fsnmut.Unlock()
|
||||
return w.removeWatch(path)
|
||||
}
|
||||
|
||||
// String formats the event e in the form
|
||||
// "filename: DELETE|MODIFY|..."
|
||||
func (e *FileEvent) String() string {
|
||||
var events string = ""
|
||||
|
||||
if e.IsCreate() {
|
||||
events += "|" + "CREATE"
|
||||
}
|
||||
|
||||
if e.IsDelete() {
|
||||
events += "|" + "DELETE"
|
||||
}
|
||||
|
||||
if e.IsModify() {
|
||||
events += "|" + "MODIFY"
|
||||
}
|
||||
|
||||
if e.IsRename() {
|
||||
events += "|" + "RENAME"
|
||||
}
|
||||
|
||||
if e.IsAttrib() {
|
||||
events += "|" + "ATTRIB"
|
||||
}
|
||||
|
||||
if len(events) > 0 {
|
||||
events = events[1:]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%q: %s", e.Name, events)
|
||||
}
|
496
vendor/github.com/howeyc/fsnotify/fsnotify_bsd.go
generated
vendored
496
vendor/github.com/howeyc/fsnotify/fsnotify_bsd.go
generated
vendored
@ -1,496 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// Flags (from <sys/event.h>)
|
||||
sys_NOTE_DELETE = 0x0001 /* vnode was removed */
|
||||
sys_NOTE_WRITE = 0x0002 /* data contents changed */
|
||||
sys_NOTE_EXTEND = 0x0004 /* size increased */
|
||||
sys_NOTE_ATTRIB = 0x0008 /* attributes changed */
|
||||
sys_NOTE_LINK = 0x0010 /* link count changed */
|
||||
sys_NOTE_RENAME = 0x0020 /* vnode was renamed */
|
||||
sys_NOTE_REVOKE = 0x0040 /* vnode access was revoked */
|
||||
|
||||
// Watch all events
|
||||
sys_NOTE_ALLEVENTS = sys_NOTE_DELETE | sys_NOTE_WRITE | sys_NOTE_ATTRIB | sys_NOTE_RENAME
|
||||
|
||||
// Block for 100 ms on each call to kevent
|
||||
keventWaitTime = 100e6
|
||||
)
|
||||
|
||||
type FileEvent struct {
|
||||
mask uint32 // Mask of events
|
||||
Name string // File name (optional)
|
||||
create bool // set by fsnotify package if found new file
|
||||
}
|
||||
|
||||
// IsCreate reports whether the FileEvent was triggered by a creation
|
||||
func (e *FileEvent) IsCreate() bool { return e.create }
|
||||
|
||||
// IsDelete reports whether the FileEvent was triggered by a delete
|
||||
func (e *FileEvent) IsDelete() bool { return (e.mask & sys_NOTE_DELETE) == sys_NOTE_DELETE }
|
||||
|
||||
// IsModify reports whether the FileEvent was triggered by a file modification
|
||||
func (e *FileEvent) IsModify() bool {
|
||||
return ((e.mask&sys_NOTE_WRITE) == sys_NOTE_WRITE || (e.mask&sys_NOTE_ATTRIB) == sys_NOTE_ATTRIB)
|
||||
}
|
||||
|
||||
// IsRename reports whether the FileEvent was triggered by a change name
|
||||
func (e *FileEvent) IsRename() bool { return (e.mask & sys_NOTE_RENAME) == sys_NOTE_RENAME }
|
||||
|
||||
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
|
||||
func (e *FileEvent) IsAttrib() bool {
|
||||
return (e.mask & sys_NOTE_ATTRIB) == sys_NOTE_ATTRIB
|
||||
}
|
||||
|
||||
type Watcher struct {
|
||||
mu sync.Mutex // Mutex for the Watcher itself.
|
||||
kq int // File descriptor (as returned by the kqueue() syscall)
|
||||
watches map[string]int // Map of watched file descriptors (key: path)
|
||||
wmut sync.Mutex // Protects access to watches.
|
||||
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
|
||||
fsnmut sync.Mutex // Protects access to fsnFlags.
|
||||
enFlags map[string]uint32 // Map of watched files to evfilt note flags used in kqueue
|
||||
enmut sync.Mutex // Protects access to enFlags.
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
finfo map[int]os.FileInfo // Map of file information (isDir, isReg; key: watch descriptor)
|
||||
pmut sync.Mutex // Protects access to paths and finfo.
|
||||
fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events)
|
||||
femut sync.Mutex // Protects access to fileExists.
|
||||
externalWatches map[string]bool // Map of watches added by user of the library.
|
||||
ewmut sync.Mutex // Protects access to externalWatches.
|
||||
Error chan error // Errors are sent on this channel
|
||||
internalEvent chan *FileEvent // Events are queued on this channel
|
||||
Event chan *FileEvent // Events are returned on this channel
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates and returns a new kevent instance using kqueue(2)
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
fd, errno := syscall.Kqueue()
|
||||
if fd == -1 {
|
||||
return nil, os.NewSyscallError("kqueue", errno)
|
||||
}
|
||||
w := &Watcher{
|
||||
kq: fd,
|
||||
watches: make(map[string]int),
|
||||
fsnFlags: make(map[string]uint32),
|
||||
enFlags: make(map[string]uint32),
|
||||
paths: make(map[int]string),
|
||||
finfo: make(map[int]os.FileInfo),
|
||||
fileExists: make(map[string]bool),
|
||||
externalWatches: make(map[string]bool),
|
||||
internalEvent: make(chan *FileEvent),
|
||||
Event: make(chan *FileEvent),
|
||||
Error: make(chan error),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
go w.purgeEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close closes a kevent watcher instance
|
||||
// It sends a message to the reader goroutine to quit and removes all watches
|
||||
// associated with the kevent instance
|
||||
func (w *Watcher) Close() error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
w.mu.Unlock()
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
w.done <- true
|
||||
w.wmut.Lock()
|
||||
ws := w.watches
|
||||
w.wmut.Unlock()
|
||||
for path := range ws {
|
||||
w.removeWatch(path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddWatch adds path to the watched file set.
|
||||
// The flags are interpreted as described in kevent(2).
|
||||
func (w *Watcher) addWatch(path string, flags uint32) error {
|
||||
w.mu.Lock()
|
||||
if w.isClosed {
|
||||
w.mu.Unlock()
|
||||
return errors.New("kevent instance already closed")
|
||||
}
|
||||
w.mu.Unlock()
|
||||
|
||||
watchDir := false
|
||||
|
||||
w.wmut.Lock()
|
||||
watchfd, found := w.watches[path]
|
||||
w.wmut.Unlock()
|
||||
if !found {
|
||||
fi, errstat := os.Lstat(path)
|
||||
if errstat != nil {
|
||||
return errstat
|
||||
}
|
||||
|
||||
// don't watch socket
|
||||
if fi.Mode()&os.ModeSocket == os.ModeSocket {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Follow Symlinks
|
||||
// Unfortunately, Linux can add bogus symlinks to watch list without
|
||||
// issue, and Windows can't do symlinks period (AFAIK). To maintain
|
||||
// consistency, we will act like everything is fine. There will simply
|
||||
// be no file events for broken symlinks.
|
||||
// Hence the returns of nil on errors.
|
||||
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
path, err := filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fi, errstat = os.Lstat(path)
|
||||
if errstat != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fd, errno := syscall.Open(path, open_FLAGS, 0700)
|
||||
if fd == -1 {
|
||||
return errno
|
||||
}
|
||||
watchfd = fd
|
||||
|
||||
w.wmut.Lock()
|
||||
w.watches[path] = watchfd
|
||||
w.wmut.Unlock()
|
||||
|
||||
w.pmut.Lock()
|
||||
w.paths[watchfd] = path
|
||||
w.finfo[watchfd] = fi
|
||||
w.pmut.Unlock()
|
||||
}
|
||||
// Watch the directory if it has not been watched before.
|
||||
w.pmut.Lock()
|
||||
w.enmut.Lock()
|
||||
if w.finfo[watchfd].IsDir() &&
|
||||
(flags&sys_NOTE_WRITE) == sys_NOTE_WRITE &&
|
||||
(!found || (w.enFlags[path]&sys_NOTE_WRITE) != sys_NOTE_WRITE) {
|
||||
watchDir = true
|
||||
}
|
||||
w.enmut.Unlock()
|
||||
w.pmut.Unlock()
|
||||
|
||||
w.enmut.Lock()
|
||||
w.enFlags[path] = flags
|
||||
w.enmut.Unlock()
|
||||
|
||||
var kbuf [1]syscall.Kevent_t
|
||||
watchEntry := &kbuf[0]
|
||||
watchEntry.Fflags = flags
|
||||
syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_ADD|syscall.EV_CLEAR)
|
||||
entryFlags := watchEntry.Flags
|
||||
success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
|
||||
if success == -1 {
|
||||
return errno
|
||||
} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
|
||||
return errors.New("kevent add error")
|
||||
}
|
||||
|
||||
if watchDir {
|
||||
errdir := w.watchDirectoryFiles(path)
|
||||
if errdir != nil {
|
||||
return errdir
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch adds path to the watched file set, watching all events.
|
||||
func (w *Watcher) watch(path string) error {
|
||||
w.ewmut.Lock()
|
||||
w.externalWatches[path] = true
|
||||
w.ewmut.Unlock()
|
||||
return w.addWatch(path, sys_NOTE_ALLEVENTS)
|
||||
}
|
||||
|
||||
// RemoveWatch removes path from the watched file set.
|
||||
func (w *Watcher) removeWatch(path string) error {
|
||||
w.wmut.Lock()
|
||||
watchfd, ok := w.watches[path]
|
||||
w.wmut.Unlock()
|
||||
if !ok {
|
||||
return errors.New(fmt.Sprintf("can't remove non-existent kevent watch for: %s", path))
|
||||
}
|
||||
var kbuf [1]syscall.Kevent_t
|
||||
watchEntry := &kbuf[0]
|
||||
syscall.SetKevent(watchEntry, watchfd, syscall.EVFILT_VNODE, syscall.EV_DELETE)
|
||||
entryFlags := watchEntry.Flags
|
||||
success, errno := syscall.Kevent(w.kq, kbuf[:], nil, nil)
|
||||
if success == -1 {
|
||||
return os.NewSyscallError("kevent_rm_watch", errno)
|
||||
} else if (entryFlags & syscall.EV_ERROR) == syscall.EV_ERROR {
|
||||
return errors.New("kevent rm error")
|
||||
}
|
||||
syscall.Close(watchfd)
|
||||
w.wmut.Lock()
|
||||
delete(w.watches, path)
|
||||
w.wmut.Unlock()
|
||||
w.enmut.Lock()
|
||||
delete(w.enFlags, path)
|
||||
w.enmut.Unlock()
|
||||
w.pmut.Lock()
|
||||
delete(w.paths, watchfd)
|
||||
fInfo := w.finfo[watchfd]
|
||||
delete(w.finfo, watchfd)
|
||||
w.pmut.Unlock()
|
||||
|
||||
// Find all watched paths that are in this directory that are not external.
|
||||
if fInfo.IsDir() {
|
||||
var pathsToRemove []string
|
||||
w.pmut.Lock()
|
||||
for _, wpath := range w.paths {
|
||||
wdir, _ := filepath.Split(wpath)
|
||||
if filepath.Clean(wdir) == filepath.Clean(path) {
|
||||
w.ewmut.Lock()
|
||||
if !w.externalWatches[wpath] {
|
||||
pathsToRemove = append(pathsToRemove, wpath)
|
||||
}
|
||||
w.ewmut.Unlock()
|
||||
}
|
||||
}
|
||||
w.pmut.Unlock()
|
||||
for _, p := range pathsToRemove {
|
||||
// Since these are internal, not much sense in propagating error
|
||||
// to the user, as that will just confuse them with an error about
|
||||
// a path they did not explicitly watch themselves.
|
||||
w.removeWatch(p)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the kqueue file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Event channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
eventbuf [10]syscall.Kevent_t // Event buffer
|
||||
events []syscall.Kevent_t // Received events
|
||||
twait *syscall.Timespec // Time to block waiting for events
|
||||
n int // Number of events returned from kevent
|
||||
errno error // Syscall errno
|
||||
)
|
||||
events = eventbuf[0:0]
|
||||
twait = new(syscall.Timespec)
|
||||
*twait = syscall.NsecToTimespec(keventWaitTime)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
var done bool
|
||||
select {
|
||||
case done = <-w.done:
|
||||
default:
|
||||
}
|
||||
|
||||
// If "done" message is received
|
||||
if done {
|
||||
errno := syscall.Close(w.kq)
|
||||
if errno != nil {
|
||||
w.Error <- os.NewSyscallError("close", errno)
|
||||
}
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
return
|
||||
}
|
||||
|
||||
// Get new events
|
||||
if len(events) == 0 {
|
||||
n, errno = syscall.Kevent(w.kq, nil, eventbuf[:], twait)
|
||||
|
||||
// EINTR is okay, basically the syscall was interrupted before
|
||||
// timeout expired.
|
||||
if errno != nil && errno != syscall.EINTR {
|
||||
w.Error <- os.NewSyscallError("kevent", errno)
|
||||
continue
|
||||
}
|
||||
|
||||
// Received some events
|
||||
if n > 0 {
|
||||
events = eventbuf[0:n]
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the events we received to the events channel
|
||||
for len(events) > 0 {
|
||||
fileEvent := new(FileEvent)
|
||||
watchEvent := &events[0]
|
||||
fileEvent.mask = uint32(watchEvent.Fflags)
|
||||
w.pmut.Lock()
|
||||
fileEvent.Name = w.paths[int(watchEvent.Ident)]
|
||||
fileInfo := w.finfo[int(watchEvent.Ident)]
|
||||
w.pmut.Unlock()
|
||||
if fileInfo != nil && fileInfo.IsDir() && !fileEvent.IsDelete() {
|
||||
// Double check to make sure the directory exist. This can happen when
|
||||
// we do a rm -fr on a recursively watched folders and we receive a
|
||||
// modification event first but the folder has been deleted and later
|
||||
// receive the delete event
|
||||
if _, err := os.Lstat(fileEvent.Name); os.IsNotExist(err) {
|
||||
// mark is as delete event
|
||||
fileEvent.mask |= sys_NOTE_DELETE
|
||||
}
|
||||
}
|
||||
|
||||
if fileInfo != nil && fileInfo.IsDir() && fileEvent.IsModify() && !fileEvent.IsDelete() {
|
||||
w.sendDirectoryChangeEvents(fileEvent.Name)
|
||||
} else {
|
||||
// Send the event on the events channel
|
||||
w.internalEvent <- fileEvent
|
||||
}
|
||||
|
||||
// Move to next event
|
||||
events = events[1:]
|
||||
|
||||
if fileEvent.IsRename() {
|
||||
w.removeWatch(fileEvent.Name)
|
||||
w.femut.Lock()
|
||||
delete(w.fileExists, fileEvent.Name)
|
||||
w.femut.Unlock()
|
||||
}
|
||||
if fileEvent.IsDelete() {
|
||||
w.removeWatch(fileEvent.Name)
|
||||
w.femut.Lock()
|
||||
delete(w.fileExists, fileEvent.Name)
|
||||
w.femut.Unlock()
|
||||
|
||||
// Look for a file that may have overwritten this
|
||||
// (ie mv f1 f2 will delete f2 then create f2)
|
||||
fileDir, _ := filepath.Split(fileEvent.Name)
|
||||
fileDir = filepath.Clean(fileDir)
|
||||
w.wmut.Lock()
|
||||
_, found := w.watches[fileDir]
|
||||
w.wmut.Unlock()
|
||||
if found {
|
||||
// make sure the directory exist before we watch for changes. When we
|
||||
// do a recursive watch and perform rm -fr, the parent directory might
|
||||
// have gone missing, ignore the missing directory and let the
|
||||
// upcoming delete event remove the watch form the parent folder
|
||||
if _, err := os.Lstat(fileDir); !os.IsNotExist(err) {
|
||||
w.sendDirectoryChangeEvents(fileDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) watchDirectoryFiles(dirPath string) error {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
|
||||
// Inherit fsnFlags from parent directory
|
||||
w.fsnmut.Lock()
|
||||
if flags, found := w.fsnFlags[dirPath]; found {
|
||||
w.fsnFlags[filePath] = flags
|
||||
} else {
|
||||
w.fsnFlags[filePath] = FSN_ALL
|
||||
}
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
if fileInfo.IsDir() == false {
|
||||
// Watch file to mimic linux fsnotify
|
||||
e := w.addWatch(filePath, sys_NOTE_ALLEVENTS)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
} else {
|
||||
// If the user is currently watching directory
|
||||
// we want to preserve the flags used
|
||||
w.enmut.Lock()
|
||||
currFlags, found := w.enFlags[filePath]
|
||||
w.enmut.Unlock()
|
||||
var newFlags uint32 = sys_NOTE_DELETE
|
||||
if found {
|
||||
newFlags |= currFlags
|
||||
}
|
||||
|
||||
// Linux gives deletes if not explicitly watching
|
||||
e := w.addWatch(filePath, newFlags)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
w.femut.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.femut.Unlock()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendDirectoryEvents searches the directory for newly created files
|
||||
// and sends them over the event channel. This functionality is to have
|
||||
// the BSD version of fsnotify match linux fsnotify which provides a
|
||||
// create event for files created in a watched directory.
|
||||
func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
|
||||
// Get all files
|
||||
files, err := ioutil.ReadDir(dirPath)
|
||||
if err != nil {
|
||||
w.Error <- err
|
||||
}
|
||||
|
||||
// Search for new files
|
||||
for _, fileInfo := range files {
|
||||
filePath := filepath.Join(dirPath, fileInfo.Name())
|
||||
w.femut.Lock()
|
||||
_, doesExist := w.fileExists[filePath]
|
||||
w.femut.Unlock()
|
||||
if !doesExist {
|
||||
// Inherit fsnFlags from parent directory
|
||||
w.fsnmut.Lock()
|
||||
if flags, found := w.fsnFlags[dirPath]; found {
|
||||
w.fsnFlags[filePath] = flags
|
||||
} else {
|
||||
w.fsnFlags[filePath] = FSN_ALL
|
||||
}
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
// Send create event
|
||||
fileEvent := new(FileEvent)
|
||||
fileEvent.Name = filePath
|
||||
fileEvent.create = true
|
||||
w.internalEvent <- fileEvent
|
||||
}
|
||||
w.femut.Lock()
|
||||
w.fileExists[filePath] = true
|
||||
w.femut.Unlock()
|
||||
}
|
||||
w.watchDirectoryFiles(dirPath)
|
||||
}
|
304
vendor/github.com/howeyc/fsnotify/fsnotify_linux.go
generated
vendored
304
vendor/github.com/howeyc/fsnotify/fsnotify_linux.go
generated
vendored
@ -1,304 +0,0 @@
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Options for inotify_init() are not exported
|
||||
// sys_IN_CLOEXEC uint32 = syscall.IN_CLOEXEC
|
||||
// sys_IN_NONBLOCK uint32 = syscall.IN_NONBLOCK
|
||||
|
||||
// Options for AddWatch
|
||||
sys_IN_DONT_FOLLOW uint32 = syscall.IN_DONT_FOLLOW
|
||||
sys_IN_ONESHOT uint32 = syscall.IN_ONESHOT
|
||||
sys_IN_ONLYDIR uint32 = syscall.IN_ONLYDIR
|
||||
|
||||
// The "sys_IN_MASK_ADD" option is not exported, as AddWatch
|
||||
// adds it automatically, if there is already a watch for the given path
|
||||
// sys_IN_MASK_ADD uint32 = syscall.IN_MASK_ADD
|
||||
|
||||
// Events
|
||||
sys_IN_ACCESS uint32 = syscall.IN_ACCESS
|
||||
sys_IN_ALL_EVENTS uint32 = syscall.IN_ALL_EVENTS
|
||||
sys_IN_ATTRIB uint32 = syscall.IN_ATTRIB
|
||||
sys_IN_CLOSE uint32 = syscall.IN_CLOSE
|
||||
sys_IN_CLOSE_NOWRITE uint32 = syscall.IN_CLOSE_NOWRITE
|
||||
sys_IN_CLOSE_WRITE uint32 = syscall.IN_CLOSE_WRITE
|
||||
sys_IN_CREATE uint32 = syscall.IN_CREATE
|
||||
sys_IN_DELETE uint32 = syscall.IN_DELETE
|
||||
sys_IN_DELETE_SELF uint32 = syscall.IN_DELETE_SELF
|
||||
sys_IN_MODIFY uint32 = syscall.IN_MODIFY
|
||||
sys_IN_MOVE uint32 = syscall.IN_MOVE
|
||||
sys_IN_MOVED_FROM uint32 = syscall.IN_MOVED_FROM
|
||||
sys_IN_MOVED_TO uint32 = syscall.IN_MOVED_TO
|
||||
sys_IN_MOVE_SELF uint32 = syscall.IN_MOVE_SELF
|
||||
sys_IN_OPEN uint32 = syscall.IN_OPEN
|
||||
|
||||
sys_AGNOSTIC_EVENTS = sys_IN_MOVED_TO | sys_IN_MOVED_FROM | sys_IN_CREATE | sys_IN_ATTRIB | sys_IN_MODIFY | sys_IN_MOVE_SELF | sys_IN_DELETE | sys_IN_DELETE_SELF
|
||||
|
||||
// Special events
|
||||
sys_IN_ISDIR uint32 = syscall.IN_ISDIR
|
||||
sys_IN_IGNORED uint32 = syscall.IN_IGNORED
|
||||
sys_IN_Q_OVERFLOW uint32 = syscall.IN_Q_OVERFLOW
|
||||
sys_IN_UNMOUNT uint32 = syscall.IN_UNMOUNT
|
||||
)
|
||||
|
||||
type FileEvent struct {
|
||||
mask uint32 // Mask of events
|
||||
cookie uint32 // Unique cookie associating related events (for rename(2))
|
||||
Name string // File name (optional)
|
||||
}
|
||||
|
||||
// IsCreate reports whether the FileEvent was triggered by a creation
|
||||
func (e *FileEvent) IsCreate() bool {
|
||||
return (e.mask&sys_IN_CREATE) == sys_IN_CREATE || (e.mask&sys_IN_MOVED_TO) == sys_IN_MOVED_TO
|
||||
}
|
||||
|
||||
// IsDelete reports whether the FileEvent was triggered by a delete
|
||||
func (e *FileEvent) IsDelete() bool {
|
||||
return (e.mask&sys_IN_DELETE_SELF) == sys_IN_DELETE_SELF || (e.mask&sys_IN_DELETE) == sys_IN_DELETE
|
||||
}
|
||||
|
||||
// IsModify reports whether the FileEvent was triggered by a file modification or attribute change
|
||||
func (e *FileEvent) IsModify() bool {
|
||||
return ((e.mask&sys_IN_MODIFY) == sys_IN_MODIFY || (e.mask&sys_IN_ATTRIB) == sys_IN_ATTRIB)
|
||||
}
|
||||
|
||||
// IsRename reports whether the FileEvent was triggered by a change name
|
||||
func (e *FileEvent) IsRename() bool {
|
||||
return ((e.mask&sys_IN_MOVE_SELF) == sys_IN_MOVE_SELF || (e.mask&sys_IN_MOVED_FROM) == sys_IN_MOVED_FROM)
|
||||
}
|
||||
|
||||
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
|
||||
func (e *FileEvent) IsAttrib() bool {
|
||||
return (e.mask & sys_IN_ATTRIB) == sys_IN_ATTRIB
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
|
||||
flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
|
||||
}
|
||||
|
||||
type Watcher struct {
|
||||
mu sync.Mutex // Map access
|
||||
fd int // File descriptor (as returned by the inotify_init() syscall)
|
||||
watches map[string]*watch // Map of inotify watches (key: path)
|
||||
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
|
||||
fsnmut sync.Mutex // Protects access to fsnFlags.
|
||||
paths map[int]string // Map of watched paths (key: watch descriptor)
|
||||
Error chan error // Errors are sent on this channel
|
||||
internalEvent chan *FileEvent // Events are queued on this channel
|
||||
Event chan *FileEvent // Events are returned on this channel
|
||||
done chan bool // Channel for sending a "quit message" to the reader goroutine
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
}
|
||||
|
||||
// NewWatcher creates and returns a new inotify instance using inotify_init(2)
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
fd, errno := syscall.InotifyInit()
|
||||
if fd == -1 {
|
||||
return nil, os.NewSyscallError("inotify_init", errno)
|
||||
}
|
||||
w := &Watcher{
|
||||
fd: fd,
|
||||
watches: make(map[string]*watch),
|
||||
fsnFlags: make(map[string]uint32),
|
||||
paths: make(map[int]string),
|
||||
internalEvent: make(chan *FileEvent),
|
||||
Event: make(chan *FileEvent),
|
||||
Error: make(chan error),
|
||||
done: make(chan bool, 1),
|
||||
}
|
||||
|
||||
go w.readEvents()
|
||||
go w.purgeEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close closes an inotify watcher instance
|
||||
// It sends a message to the reader goroutine to quit and removes all watches
|
||||
// associated with the inotify instance
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Remove all watches
|
||||
for path := range w.watches {
|
||||
w.RemoveWatch(path)
|
||||
}
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
w.done <- true
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddWatch adds path to the watched file set.
|
||||
// The flags are interpreted as described in inotify_add_watch(2).
|
||||
func (w *Watcher) addWatch(path string, flags uint32) error {
|
||||
if w.isClosed {
|
||||
return errors.New("inotify instance already closed")
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
watchEntry, found := w.watches[path]
|
||||
w.mu.Unlock()
|
||||
if found {
|
||||
watchEntry.flags |= flags
|
||||
flags |= syscall.IN_MASK_ADD
|
||||
}
|
||||
wd, errno := syscall.InotifyAddWatch(w.fd, path, flags)
|
||||
if wd == -1 {
|
||||
return errno
|
||||
}
|
||||
|
||||
w.mu.Lock()
|
||||
w.watches[path] = &watch{wd: uint32(wd), flags: flags}
|
||||
w.paths[wd] = path
|
||||
w.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Watch adds path to the watched file set, watching all events.
|
||||
func (w *Watcher) watch(path string) error {
|
||||
return w.addWatch(path, sys_AGNOSTIC_EVENTS)
|
||||
}
|
||||
|
||||
// RemoveWatch removes path from the watched file set.
|
||||
func (w *Watcher) removeWatch(path string) error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
watch, ok := w.watches[path]
|
||||
if !ok {
|
||||
return errors.New(fmt.Sprintf("can't remove non-existent inotify watch for: %s", path))
|
||||
}
|
||||
success, errno := syscall.InotifyRmWatch(w.fd, watch.wd)
|
||||
if success == -1 {
|
||||
return os.NewSyscallError("inotify_rm_watch", errno)
|
||||
}
|
||||
delete(w.watches, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the inotify file descriptor, converts the
|
||||
// received events into Event objects and sends them via the Event channel
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
buf [syscall.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
|
||||
n int // Number of bytes read with read()
|
||||
errno error // Syscall errno
|
||||
)
|
||||
|
||||
for {
|
||||
// See if there is a message on the "done" channel
|
||||
select {
|
||||
case <-w.done:
|
||||
syscall.Close(w.fd)
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
n, errno = syscall.Read(w.fd, buf[:])
|
||||
|
||||
// If EOF is received
|
||||
if n == 0 {
|
||||
syscall.Close(w.fd)
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if n < 0 {
|
||||
w.Error <- os.NewSyscallError("read", errno)
|
||||
continue
|
||||
}
|
||||
if n < syscall.SizeofInotifyEvent {
|
||||
w.Error <- errors.New("inotify: short read in readEvents()")
|
||||
continue
|
||||
}
|
||||
|
||||
var offset uint32 = 0
|
||||
// We don't know how many events we just read into the buffer
|
||||
// While the offset points to at least one whole event...
|
||||
for offset <= uint32(n-syscall.SizeofInotifyEvent) {
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.InotifyEvent)(unsafe.Pointer(&buf[offset]))
|
||||
event := new(FileEvent)
|
||||
event.mask = uint32(raw.Mask)
|
||||
event.cookie = uint32(raw.Cookie)
|
||||
nameLen := uint32(raw.Len)
|
||||
// If the event happened to the watched directory or the watched file, the kernel
|
||||
// doesn't append the filename to the event, but we would like to always fill the
|
||||
// the "Name" field with a valid filename. We retrieve the path of the watch from
|
||||
// the "paths" map.
|
||||
w.mu.Lock()
|
||||
event.Name = w.paths[int(raw.Wd)]
|
||||
w.mu.Unlock()
|
||||
watchedName := event.Name
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[syscall.PathMax]byte)(unsafe.Pointer(&buf[offset+syscall.SizeofInotifyEvent]))
|
||||
// The filename is padded with NUL bytes. TrimRight() gets rid of those.
|
||||
event.Name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
||||
// Send the events that are not ignored on the events channel
|
||||
if !event.ignoreLinux() {
|
||||
// Setup FSNotify flags (inherit from directory watch)
|
||||
w.fsnmut.Lock()
|
||||
if _, fsnFound := w.fsnFlags[event.Name]; !fsnFound {
|
||||
if fsnFlags, watchFound := w.fsnFlags[watchedName]; watchFound {
|
||||
w.fsnFlags[event.Name] = fsnFlags
|
||||
} else {
|
||||
w.fsnFlags[event.Name] = FSN_ALL
|
||||
}
|
||||
}
|
||||
w.fsnmut.Unlock()
|
||||
|
||||
w.internalEvent <- event
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
offset += syscall.SizeofInotifyEvent + nameLen
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Certain types of events can be "ignored" and not sent over the Event
|
||||
// channel. Such as events marked ignore by the kernel, or MODIFY events
|
||||
// against files that do not exist.
|
||||
func (e *FileEvent) ignoreLinux() bool {
|
||||
// Ignore anything the inotify API says to ignore
|
||||
if e.mask&sys_IN_IGNORED == sys_IN_IGNORED {
|
||||
return true
|
||||
}
|
||||
|
||||
// If the event is not a DELETE or RENAME, the file must exist.
|
||||
// Otherwise the event is ignored.
|
||||
// *Note*: this was put in place because it was seen that a MODIFY
|
||||
// event was sent after the DELETE. This ignores that MODIFY and
|
||||
// assumes a DELETE will come or has come if the file doesn't exist.
|
||||
if !(e.IsDelete() || e.IsRename()) {
|
||||
_, statErr := os.Lstat(e.Name)
|
||||
return os.IsNotExist(statErr)
|
||||
}
|
||||
return false
|
||||
}
|
11
vendor/github.com/howeyc/fsnotify/fsnotify_open_bsd.go
generated
vendored
11
vendor/github.com/howeyc/fsnotify/fsnotify_open_bsd.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "syscall"
|
||||
|
||||
const open_FLAGS = syscall.O_NONBLOCK | syscall.O_RDONLY
|
11
vendor/github.com/howeyc/fsnotify/fsnotify_open_darwin.go
generated
vendored
11
vendor/github.com/howeyc/fsnotify/fsnotify_open_darwin.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "syscall"
|
||||
|
||||
const open_FLAGS = syscall.O_EVTONLY
|
598
vendor/github.com/howeyc/fsnotify/fsnotify_windows.go
generated
vendored
598
vendor/github.com/howeyc/fsnotify/fsnotify_windows.go
generated
vendored
@ -1,598 +0,0 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package fsnotify
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// Options for AddWatch
|
||||
sys_FS_ONESHOT = 0x80000000
|
||||
sys_FS_ONLYDIR = 0x1000000
|
||||
|
||||
// Events
|
||||
sys_FS_ACCESS = 0x1
|
||||
sys_FS_ALL_EVENTS = 0xfff
|
||||
sys_FS_ATTRIB = 0x4
|
||||
sys_FS_CLOSE = 0x18
|
||||
sys_FS_CREATE = 0x100
|
||||
sys_FS_DELETE = 0x200
|
||||
sys_FS_DELETE_SELF = 0x400
|
||||
sys_FS_MODIFY = 0x2
|
||||
sys_FS_MOVE = 0xc0
|
||||
sys_FS_MOVED_FROM = 0x40
|
||||
sys_FS_MOVED_TO = 0x80
|
||||
sys_FS_MOVE_SELF = 0x800
|
||||
|
||||
// Special events
|
||||
sys_FS_IGNORED = 0x8000
|
||||
sys_FS_Q_OVERFLOW = 0x4000
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(nj): Use syscall.ERROR_MORE_DATA from ztypes_windows in Go 1.3+
|
||||
sys_ERROR_MORE_DATA syscall.Errno = 234
|
||||
)
|
||||
|
||||
// Event is the type of the notification messages
|
||||
// received on the watcher's Event channel.
|
||||
type FileEvent struct {
|
||||
mask uint32 // Mask of events
|
||||
cookie uint32 // Unique cookie associating related events (for rename)
|
||||
Name string // File name (optional)
|
||||
}
|
||||
|
||||
// IsCreate reports whether the FileEvent was triggered by a creation
|
||||
func (e *FileEvent) IsCreate() bool { return (e.mask & sys_FS_CREATE) == sys_FS_CREATE }
|
||||
|
||||
// IsDelete reports whether the FileEvent was triggered by a delete
|
||||
func (e *FileEvent) IsDelete() bool {
|
||||
return ((e.mask&sys_FS_DELETE) == sys_FS_DELETE || (e.mask&sys_FS_DELETE_SELF) == sys_FS_DELETE_SELF)
|
||||
}
|
||||
|
||||
// IsModify reports whether the FileEvent was triggered by a file modification or attribute change
|
||||
func (e *FileEvent) IsModify() bool {
|
||||
return ((e.mask&sys_FS_MODIFY) == sys_FS_MODIFY || (e.mask&sys_FS_ATTRIB) == sys_FS_ATTRIB)
|
||||
}
|
||||
|
||||
// IsRename reports whether the FileEvent was triggered by a change name
|
||||
func (e *FileEvent) IsRename() bool {
|
||||
return ((e.mask&sys_FS_MOVE) == sys_FS_MOVE || (e.mask&sys_FS_MOVE_SELF) == sys_FS_MOVE_SELF || (e.mask&sys_FS_MOVED_FROM) == sys_FS_MOVED_FROM || (e.mask&sys_FS_MOVED_TO) == sys_FS_MOVED_TO)
|
||||
}
|
||||
|
||||
// IsAttrib reports whether the FileEvent was triggered by a change in the file metadata.
|
||||
func (e *FileEvent) IsAttrib() bool {
|
||||
return (e.mask & sys_FS_ATTRIB) == sys_FS_ATTRIB
|
||||
}
|
||||
|
||||
const (
|
||||
opAddWatch = iota
|
||||
opRemoveWatch
|
||||
)
|
||||
|
||||
const (
|
||||
provisional uint64 = 1 << (32 + iota)
|
||||
)
|
||||
|
||||
type input struct {
|
||||
op int
|
||||
path string
|
||||
flags uint32
|
||||
reply chan error
|
||||
}
|
||||
|
||||
type inode struct {
|
||||
handle syscall.Handle
|
||||
volume uint32
|
||||
index uint64
|
||||
}
|
||||
|
||||
type watch struct {
|
||||
ov syscall.Overlapped
|
||||
ino *inode // i-number
|
||||
path string // Directory path
|
||||
mask uint64 // Directory itself is being watched with these notify flags
|
||||
names map[string]uint64 // Map of names being watched and their notify flags
|
||||
rename string // Remembers the old name while renaming a file
|
||||
buf [4096]byte
|
||||
}
|
||||
|
||||
type indexMap map[uint64]*watch
|
||||
type watchMap map[uint32]indexMap
|
||||
|
||||
// A Watcher waits for and receives event notifications
|
||||
// for a specific set of files and directories.
|
||||
type Watcher struct {
|
||||
mu sync.Mutex // Map access
|
||||
port syscall.Handle // Handle to completion port
|
||||
watches watchMap // Map of watches (key: i-number)
|
||||
fsnFlags map[string]uint32 // Map of watched files to flags used for filter
|
||||
fsnmut sync.Mutex // Protects access to fsnFlags.
|
||||
input chan *input // Inputs to the reader are sent on this channel
|
||||
internalEvent chan *FileEvent // Events are queued on this channel
|
||||
Event chan *FileEvent // Events are returned on this channel
|
||||
Error chan error // Errors are sent on this channel
|
||||
isClosed bool // Set to true when Close() is first called
|
||||
quit chan chan<- error
|
||||
cookie uint32
|
||||
}
|
||||
|
||||
// NewWatcher creates and returns a Watcher.
|
||||
func NewWatcher() (*Watcher, error) {
|
||||
port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
w := &Watcher{
|
||||
port: port,
|
||||
watches: make(watchMap),
|
||||
fsnFlags: make(map[string]uint32),
|
||||
input: make(chan *input, 1),
|
||||
Event: make(chan *FileEvent, 50),
|
||||
internalEvent: make(chan *FileEvent),
|
||||
Error: make(chan error),
|
||||
quit: make(chan chan<- error, 1),
|
||||
}
|
||||
go w.readEvents()
|
||||
go w.purgeEvents()
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// Close closes a Watcher.
|
||||
// It sends a message to the reader goroutine to quit and removes all watches
|
||||
// associated with the watcher.
|
||||
func (w *Watcher) Close() error {
|
||||
if w.isClosed {
|
||||
return nil
|
||||
}
|
||||
w.isClosed = true
|
||||
|
||||
// Send "quit" message to the reader goroutine
|
||||
ch := make(chan error)
|
||||
w.quit <- ch
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-ch
|
||||
}
|
||||
|
||||
// AddWatch adds path to the watched file set.
|
||||
func (w *Watcher) AddWatch(path string, flags uint32) error {
|
||||
if w.isClosed {
|
||||
return errors.New("watcher already closed")
|
||||
}
|
||||
in := &input{
|
||||
op: opAddWatch,
|
||||
path: filepath.Clean(path),
|
||||
flags: flags,
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
// Watch adds path to the watched file set, watching all events.
|
||||
func (w *Watcher) watch(path string) error {
|
||||
return w.AddWatch(path, sys_FS_ALL_EVENTS)
|
||||
}
|
||||
|
||||
// RemoveWatch removes path from the watched file set.
|
||||
func (w *Watcher) removeWatch(path string) error {
|
||||
in := &input{
|
||||
op: opRemoveWatch,
|
||||
path: filepath.Clean(path),
|
||||
reply: make(chan error),
|
||||
}
|
||||
w.input <- in
|
||||
if err := w.wakeupReader(); err != nil {
|
||||
return err
|
||||
}
|
||||
return <-in.reply
|
||||
}
|
||||
|
||||
func (w *Watcher) wakeupReader() error {
|
||||
e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
|
||||
if e != nil {
|
||||
return os.NewSyscallError("PostQueuedCompletionStatus", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDir(pathname string) (dir string, err error) {
|
||||
attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
|
||||
if e != nil {
|
||||
return "", os.NewSyscallError("GetFileAttributes", e)
|
||||
}
|
||||
if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
|
||||
dir = pathname
|
||||
} else {
|
||||
dir, _ = filepath.Split(pathname)
|
||||
dir = filepath.Clean(dir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getIno(path string) (ino *inode, err error) {
|
||||
h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
|
||||
syscall.FILE_LIST_DIRECTORY,
|
||||
syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
|
||||
nil, syscall.OPEN_EXISTING,
|
||||
syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
|
||||
if e != nil {
|
||||
return nil, os.NewSyscallError("CreateFile", e)
|
||||
}
|
||||
var fi syscall.ByHandleFileInformation
|
||||
if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
|
||||
syscall.CloseHandle(h)
|
||||
return nil, os.NewSyscallError("GetFileInformationByHandle", e)
|
||||
}
|
||||
ino = &inode{
|
||||
handle: h,
|
||||
volume: fi.VolumeSerialNumber,
|
||||
index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
|
||||
}
|
||||
return ino, nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) get(ino *inode) *watch {
|
||||
if i := m[ino.volume]; i != nil {
|
||||
return i[ino.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (m watchMap) set(ino *inode, watch *watch) {
|
||||
i := m[ino.volume]
|
||||
if i == nil {
|
||||
i = make(indexMap)
|
||||
m[ino.volume] = i
|
||||
}
|
||||
i[ino.index] = watch
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) addWatch(pathname string, flags uint64) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if flags&sys_FS_ONLYDIR != 0 && pathname != dir {
|
||||
return nil
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watchEntry := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watchEntry == nil {
|
||||
if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
return os.NewSyscallError("CreateIoCompletionPort", e)
|
||||
}
|
||||
watchEntry = &watch{
|
||||
ino: ino,
|
||||
path: dir,
|
||||
names: make(map[string]uint64),
|
||||
}
|
||||
w.mu.Lock()
|
||||
w.watches.set(ino, watchEntry)
|
||||
w.mu.Unlock()
|
||||
flags |= provisional
|
||||
} else {
|
||||
syscall.CloseHandle(ino.handle)
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask |= flags
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] |= flags
|
||||
}
|
||||
if err = w.startRead(watchEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
if pathname == dir {
|
||||
watchEntry.mask &= ^provisional
|
||||
} else {
|
||||
watchEntry.names[filepath.Base(pathname)] &= ^provisional
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) remWatch(pathname string) error {
|
||||
dir, err := getDir(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ino, err := getIno(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.mu.Lock()
|
||||
watch := w.watches.get(ino)
|
||||
w.mu.Unlock()
|
||||
if watch == nil {
|
||||
return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
|
||||
}
|
||||
if pathname == dir {
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||
watch.mask = 0
|
||||
} else {
|
||||
name := filepath.Base(pathname)
|
||||
w.sendEvent(watch.path+"\\"+name, watch.names[name]&sys_FS_IGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
return w.startRead(watch)
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) deleteWatch(watch *watch) {
|
||||
for name, mask := range watch.names {
|
||||
if mask&provisional == 0 {
|
||||
w.sendEvent(watch.path+"\\"+name, mask&sys_FS_IGNORED)
|
||||
}
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if watch.mask != 0 {
|
||||
if watch.mask&provisional == 0 {
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_IGNORED)
|
||||
}
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Must run within the I/O thread.
|
||||
func (w *Watcher) startRead(watch *watch) error {
|
||||
if e := syscall.CancelIo(watch.ino.handle); e != nil {
|
||||
w.Error <- os.NewSyscallError("CancelIo", e)
|
||||
w.deleteWatch(watch)
|
||||
}
|
||||
mask := toWindowsFlags(watch.mask)
|
||||
for _, m := range watch.names {
|
||||
mask |= toWindowsFlags(m)
|
||||
}
|
||||
if mask == 0 {
|
||||
if e := syscall.CloseHandle(watch.ino.handle); e != nil {
|
||||
w.Error <- os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
w.mu.Lock()
|
||||
delete(w.watches[watch.ino.volume], watch.ino.index)
|
||||
w.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
|
||||
uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
|
||||
if e != nil {
|
||||
err := os.NewSyscallError("ReadDirectoryChanges", e)
|
||||
if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
|
||||
// Watched directory was probably removed
|
||||
if w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF) {
|
||||
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readEvents reads from the I/O completion port, converts the
|
||||
// received events into Event objects and sends them via the Event channel.
|
||||
// Entry point to the I/O thread.
|
||||
func (w *Watcher) readEvents() {
|
||||
var (
|
||||
n, key uint32
|
||||
ov *syscall.Overlapped
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
|
||||
for {
|
||||
e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
|
||||
watch := (*watch)(unsafe.Pointer(ov))
|
||||
|
||||
if watch == nil {
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.mu.Lock()
|
||||
var indexes []indexMap
|
||||
for _, index := range w.watches {
|
||||
indexes = append(indexes, index)
|
||||
}
|
||||
w.mu.Unlock()
|
||||
for _, index := range indexes {
|
||||
for _, watch := range index {
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if e := syscall.CloseHandle(w.port); e != nil {
|
||||
err = os.NewSyscallError("CloseHandle", e)
|
||||
}
|
||||
close(w.internalEvent)
|
||||
close(w.Error)
|
||||
ch <- err
|
||||
return
|
||||
case in := <-w.input:
|
||||
switch in.op {
|
||||
case opAddWatch:
|
||||
in.reply <- w.addWatch(in.path, uint64(in.flags))
|
||||
case opRemoveWatch:
|
||||
in.reply <- w.remWatch(in.path)
|
||||
}
|
||||
default:
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch e {
|
||||
case sys_ERROR_MORE_DATA:
|
||||
if watch == nil {
|
||||
w.Error <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
|
||||
} else {
|
||||
// The i/o succeeded but the buffer is full.
|
||||
// In theory we should be building up a full packet.
|
||||
// In practice we can get away with just carrying on.
|
||||
n = uint32(unsafe.Sizeof(watch.buf))
|
||||
}
|
||||
case syscall.ERROR_ACCESS_DENIED:
|
||||
// Watched directory was probably removed
|
||||
w.sendEvent(watch.path, watch.mask&sys_FS_DELETE_SELF)
|
||||
w.deleteWatch(watch)
|
||||
w.startRead(watch)
|
||||
continue
|
||||
case syscall.ERROR_OPERATION_ABORTED:
|
||||
// CancelIo was called on this handle
|
||||
continue
|
||||
default:
|
||||
w.Error <- os.NewSyscallError("GetQueuedCompletionPort", e)
|
||||
continue
|
||||
case nil:
|
||||
}
|
||||
|
||||
var offset uint32
|
||||
for {
|
||||
if n == 0 {
|
||||
w.internalEvent <- &FileEvent{mask: sys_FS_Q_OVERFLOW}
|
||||
w.Error <- errors.New("short read in readEvents()")
|
||||
break
|
||||
}
|
||||
|
||||
// Point "raw" to the event in the buffer
|
||||
raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
|
||||
buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
|
||||
name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
|
||||
fullname := watch.path + "\\" + name
|
||||
|
||||
var mask uint64
|
||||
switch raw.Action {
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
mask = sys_FS_DELETE_SELF
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
mask = sys_FS_MODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
watch.rename = name
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
if watch.names[watch.rename] != 0 {
|
||||
watch.names[name] |= watch.names[watch.rename]
|
||||
delete(watch.names, watch.rename)
|
||||
mask = sys_FS_MOVE_SELF
|
||||
}
|
||||
}
|
||||
|
||||
sendNameEvent := func() {
|
||||
if w.sendEvent(fullname, watch.names[name]&mask) {
|
||||
if watch.names[name]&sys_FS_ONESHOT != 0 {
|
||||
delete(watch.names, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
sendNameEvent()
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_REMOVED {
|
||||
w.sendEvent(fullname, watch.names[name]&sys_FS_IGNORED)
|
||||
delete(watch.names, name)
|
||||
}
|
||||
if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
|
||||
if watch.mask&sys_FS_ONESHOT != 0 {
|
||||
watch.mask = 0
|
||||
}
|
||||
}
|
||||
if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
|
||||
fullname = watch.path + "\\" + watch.rename
|
||||
sendNameEvent()
|
||||
}
|
||||
|
||||
// Move to the next event in the buffer
|
||||
if raw.NextEntryOffset == 0 {
|
||||
break
|
||||
}
|
||||
offset += raw.NextEntryOffset
|
||||
|
||||
// Error!
|
||||
if offset >= n {
|
||||
w.Error <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.startRead(watch); err != nil {
|
||||
w.Error <- err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Watcher) sendEvent(name string, mask uint64) bool {
|
||||
if mask == 0 {
|
||||
return false
|
||||
}
|
||||
event := &FileEvent{mask: uint32(mask), Name: name}
|
||||
if mask&sys_FS_MOVE != 0 {
|
||||
if mask&sys_FS_MOVED_FROM != 0 {
|
||||
w.cookie++
|
||||
}
|
||||
event.cookie = w.cookie
|
||||
}
|
||||
select {
|
||||
case ch := <-w.quit:
|
||||
w.quit <- ch
|
||||
case w.Event <- event:
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toWindowsFlags(mask uint64) uint32 {
|
||||
var m uint32
|
||||
if mask&sys_FS_ACCESS != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
|
||||
}
|
||||
if mask&sys_FS_MODIFY != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
|
||||
}
|
||||
if mask&sys_FS_ATTRIB != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
|
||||
}
|
||||
if mask&(sys_FS_MOVE|sys_FS_CREATE|sys_FS_DELETE) != 0 {
|
||||
m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func toFSnotifyFlags(action uint32) uint64 {
|
||||
switch action {
|
||||
case syscall.FILE_ACTION_ADDED:
|
||||
return sys_FS_CREATE
|
||||
case syscall.FILE_ACTION_REMOVED:
|
||||
return sys_FS_DELETE
|
||||
case syscall.FILE_ACTION_MODIFIED:
|
||||
return sys_FS_MODIFY
|
||||
case syscall.FILE_ACTION_RENAMED_OLD_NAME:
|
||||
return sys_FS_MOVED_FROM
|
||||
case syscall.FILE_ACTION_RENAMED_NEW_NAME:
|
||||
return sys_FS_MOVED_TO
|
||||
}
|
||||
return 0
|
||||
}
|
21
vendor/github.com/mohae/deepcopy/LICENSE
generated
vendored
21
vendor/github.com/mohae/deepcopy/LICENSE
generated
vendored
@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Joel
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
8
vendor/github.com/mohae/deepcopy/README.md
generated
vendored
8
vendor/github.com/mohae/deepcopy/README.md
generated
vendored
@ -1,8 +0,0 @@
|
||||
deepCopy
|
||||
========
|
||||
[![GoDoc](https://godoc.org/github.com/mohae/deepcopy?status.svg)](https://godoc.org/github.com/mohae/deepcopy)[![Build Status](https://travis-ci.org/mohae/deepcopy.png)](https://travis-ci.org/mohae/deepcopy)
|
||||
|
||||
DeepCopy makes deep copies of things: unexported field values are not copied.
|
||||
|
||||
## Usage
|
||||
cpy := deepcopy.Copy(orig)
|
125
vendor/github.com/mohae/deepcopy/deepcopy.go
generated
vendored
125
vendor/github.com/mohae/deepcopy/deepcopy.go
generated
vendored
@ -1,125 +0,0 @@
|
||||
// deepcopy makes deep copies of things. A standard copy will copy the
|
||||
// pointers: deep copy copies the values pointed to. Unexported field
|
||||
// values are not copied.
|
||||
//
|
||||
// Copyright (c)2014-2016, Joel Scoble (github.com/mohae), all rights reserved.
|
||||
// License: MIT, for more details check the included LICENSE file.
|
||||
package deepcopy
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Interface for delegating copy process to type
|
||||
type Interface interface {
|
||||
DeepCopy() interface{}
|
||||
}
|
||||
|
||||
// Iface is an alias to Copy; this exists for backwards compatibility reasons.
|
||||
func Iface(iface interface{}) interface{} {
|
||||
return Copy(iface)
|
||||
}
|
||||
|
||||
// Copy creates a deep copy of whatever is passed to it and returns the copy
|
||||
// in an interface{}. The returned value will need to be asserted to the
|
||||
// correct type.
|
||||
func Copy(src interface{}) interface{} {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make the interface a reflect.Value
|
||||
original := reflect.ValueOf(src)
|
||||
|
||||
// Make a copy of the same type as the original.
|
||||
cpy := reflect.New(original.Type()).Elem()
|
||||
|
||||
// Recursively copy the original.
|
||||
copyRecursive(original, cpy)
|
||||
|
||||
// Return the copy as an interface.
|
||||
return cpy.Interface()
|
||||
}
|
||||
|
||||
// copyRecursive does the actual copying of the interface. It currently has
|
||||
// limited support for what it can handle. Add as needed.
|
||||
func copyRecursive(original, cpy reflect.Value) {
|
||||
// check for implement deepcopy.Interface
|
||||
if original.CanInterface() {
|
||||
if copier, ok := original.Interface().(Interface); ok {
|
||||
cpy.Set(reflect.ValueOf(copier.DeepCopy()))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// handle according to original's Kind
|
||||
switch original.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Get the actual value being pointed to.
|
||||
originalValue := original.Elem()
|
||||
|
||||
// if it isn't valid, return.
|
||||
if !originalValue.IsValid() {
|
||||
return
|
||||
}
|
||||
cpy.Set(reflect.New(originalValue.Type()))
|
||||
copyRecursive(originalValue, cpy.Elem())
|
||||
|
||||
case reflect.Interface:
|
||||
// If this is a nil, don't do anything
|
||||
if original.IsNil() {
|
||||
return
|
||||
}
|
||||
// Get the value for the interface, not the pointer.
|
||||
originalValue := original.Elem()
|
||||
|
||||
// Get the value by calling Elem().
|
||||
copyValue := reflect.New(originalValue.Type()).Elem()
|
||||
copyRecursive(originalValue, copyValue)
|
||||
cpy.Set(copyValue)
|
||||
|
||||
case reflect.Struct:
|
||||
t, ok := original.Interface().(time.Time)
|
||||
if ok {
|
||||
cpy.Set(reflect.ValueOf(t))
|
||||
return
|
||||
}
|
||||
// Go through each field of the struct and copy it.
|
||||
for i := 0; i < original.NumField(); i++ {
|
||||
// The Type's StructField for a given field is checked to see if StructField.PkgPath
|
||||
// is set to determine if the field is exported or not because CanSet() returns false
|
||||
// for settable fields. I'm not sure why. -mohae
|
||||
if original.Type().Field(i).PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
copyRecursive(original.Field(i), cpy.Field(i))
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if original.IsNil() {
|
||||
return
|
||||
}
|
||||
// Make a new slice and copy each element.
|
||||
cpy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap()))
|
||||
for i := 0; i < original.Len(); i++ {
|
||||
copyRecursive(original.Index(i), cpy.Index(i))
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if original.IsNil() {
|
||||
return
|
||||
}
|
||||
cpy.Set(reflect.MakeMap(original.Type()))
|
||||
for _, key := range original.MapKeys() {
|
||||
originalValue := original.MapIndex(key)
|
||||
copyValue := reflect.New(originalValue.Type()).Elem()
|
||||
copyRecursive(originalValue, copyValue)
|
||||
copyKey := Copy(key.Interface())
|
||||
cpy.SetMapIndex(reflect.ValueOf(copyKey), copyValue)
|
||||
}
|
||||
|
||||
default:
|
||||
cpy.Set(original)
|
||||
}
|
||||
}
|
15
vendor/github.com/oschwald/maxminddb-golang/LICENSE
generated
vendored
15
vendor/github.com/oschwald/maxminddb-golang/LICENSE
generated
vendored
@ -1,15 +0,0 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
38
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
38
vendor/github.com/oschwald/maxminddb-golang/README.md
generated
vendored
@ -1,38 +0,0 @@
|
||||
# MaxMind DB Reader for Go #
|
||||
|
||||
[![Build Status](https://travis-ci.org/oschwald/maxminddb-golang.png?branch=master)](https://travis-ci.org/oschwald/maxminddb-golang)
|
||||
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/4j2f9oep8nnfrmov/branch/master?svg=true)](https://ci.appveyor.com/project/oschwald/maxminddb-golang/branch/master)
|
||||
[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.png)](https://godoc.org/github.com/oschwald/maxminddb-golang)
|
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to
|
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
|
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
|
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
|
||||
API for doing so.
|
||||
|
||||
This is not an official MaxMind API.
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/maxminddb-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Examples ##
|
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
|
||||
`example_test.go` for examples.
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC License.
|
19
vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
generated
vendored
19
vendor/github.com/oschwald/maxminddb-golang/appveyor.yml
generated
vendored
@ -1,19 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\oschwald\maxminddb-golang
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- git submodule update --init --recursive
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -t ./...
|
||||
|
||||
build_script:
|
||||
- go test -v ./...
|
721
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
721
vendor/github.com/oschwald/maxminddb-golang/decoder.go
generated
vendored
@ -1,721 +0,0 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
type dataType int
|
||||
|
||||
const (
|
||||
_Extended dataType = iota
|
||||
_Pointer
|
||||
_String
|
||||
_Float64
|
||||
_Bytes
|
||||
_Uint16
|
||||
_Uint32
|
||||
_Map
|
||||
_Int32
|
||||
_Uint64
|
||||
_Uint128
|
||||
_Slice
|
||||
_Container
|
||||
_Marker
|
||||
_Bool
|
||||
_Float32
|
||||
)
|
||||
|
||||
const (
|
||||
// This is the value used in libmaxminddb
|
||||
maximumDataStructureDepth = 512
|
||||
)
|
||||
|
||||
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError("exceeded maximum data structure depth; database is likely corrupt")
|
||||
}
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
|
||||
result.Set(reflect.ValueOf(uintptr(offset)))
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
|
||||
newOffset := offset + 1
|
||||
if offset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
ctrlByte := d.buffer[offset]
|
||||
|
||||
typeNum := dataType(ctrlByte >> 5)
|
||||
if typeNum == _Extended {
|
||||
if newOffset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
typeNum = dataType(d.buffer[newOffset] + 7)
|
||||
newOffset++
|
||||
}
|
||||
|
||||
var size uint
|
||||
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
|
||||
return typeNum, size, newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) sizeFromCtrlByte(ctrlByte byte, offset uint, typeNum dataType) (uint, uint, error) {
|
||||
size := uint(ctrlByte & 0x1f)
|
||||
if typeNum == _Extended {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
var bytesToRead uint
|
||||
if size < 29 {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
bytesToRead = size - 28
|
||||
newOffset := offset + bytesToRead
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
if size == 29 {
|
||||
return 29 + uint(d.buffer[offset]), offset + 1, nil
|
||||
}
|
||||
|
||||
sizeBytes := d.buffer[offset:newOffset]
|
||||
|
||||
switch {
|
||||
case size == 30:
|
||||
size = 285 + uintFromBytes(0, sizeBytes)
|
||||
case size > 30:
|
||||
size = uintFromBytes(0, sizeBytes) + 65821
|
||||
}
|
||||
return size, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromType(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = d.indirect(result)
|
||||
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
return d.unmarshalBool(size, offset, result)
|
||||
case _Map:
|
||||
return d.unmarshalMap(size, offset, result, depth)
|
||||
case _Pointer:
|
||||
return d.unmarshalPointer(size, offset, result, depth)
|
||||
case _Slice:
|
||||
return d.unmarshalSlice(size, offset, result, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
return d.unmarshalBytes(size, offset, result)
|
||||
case _Float32:
|
||||
return d.unmarshalFloat32(size, offset, result)
|
||||
case _Float64:
|
||||
return d.unmarshalFloat64(size, offset, result)
|
||||
case _Int32:
|
||||
return d.unmarshalInt32(size, offset, result)
|
||||
case _String:
|
||||
return d.unmarshalString(size, offset, result)
|
||||
case _Uint16:
|
||||
return d.unmarshalUint(size, offset, result, 16)
|
||||
case _Uint32:
|
||||
return d.unmarshalUint(size, offset, result, 32)
|
||||
case _Uint64:
|
||||
return d.unmarshalUint(size, offset, result, 64)
|
||||
case _Uint128:
|
||||
return d.unmarshalUint128(size, offset, result)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalBool(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 1 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (bool size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeBool(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.Bool:
|
||||
result.SetBool(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
// indirect follows pointers and create values as necessary. This is
|
||||
// heavily based on encoding/json as my original version had a subtle
|
||||
// bug. This method should be considered to be licensed under
|
||||
// https://golang.org/LICENSE
|
||||
func (d *decoder) indirect(result reflect.Value) reflect.Value {
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if result.Kind() == reflect.Interface && !result.IsNil() {
|
||||
e := result.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
result = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if result.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.New(result.Type().Elem()))
|
||||
}
|
||||
result = result.Elem()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var sliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
func (d *decoder) unmarshalBytes(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset, err := d.decodeBytes(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
if result.Type() == sliceType {
|
||||
result.SetBytes(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat32(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 4 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float32 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeFloat32(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
result.SetFloat(float64(value))
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat64(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
|
||||
if size != 8 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (float 64 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeFloat64(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if result.OverflowFloat(value) {
|
||||
return 0, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
result.SetFloat(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInt32(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 4 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (int32 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeInt(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
n := uint64(value)
|
||||
if !result.OverflowUint(n) {
|
||||
result.SetUint(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = d.indirect(result)
|
||||
switch result.Kind() {
|
||||
default:
|
||||
return 0, newUnmarshalTypeError("map", result.Type())
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(size, offset, result, depth)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
rv := reflect.ValueOf(make(map[string]interface{}, size))
|
||||
newOffset, err := d.decodeMap(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
return 0, newUnmarshalTypeError("map", result.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalPointer(size uint, offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decode(pointer, result, depth)
|
||||
return newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
a := []interface{}{}
|
||||
rv := reflect.ValueOf(&a).Elem()
|
||||
newOffset, err := d.decodeSlice(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
}
|
||||
return 0, newUnmarshalTypeError("array", result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalString(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset, err := d.decodeString(size, offset)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch result.Kind() {
|
||||
case reflect.String:
|
||||
result.SetString(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalUint(size uint, offset uint, result reflect.Value, uintType uint) (uint, error) {
|
||||
if size > uintType/8 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint%v size of %v)", uintType, size)
|
||||
}
|
||||
|
||||
value, newOffset, err := d.decodeUint(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
if !result.OverflowUint(value) {
|
||||
result.SetUint(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
var bigIntType = reflect.TypeOf(big.Int{})
|
||||
|
||||
func (d *decoder) unmarshalUint128(size uint, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 16 {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's data section contains bad data (uint128 size of %v)", size)
|
||||
}
|
||||
value, newOffset, err := d.decodeUint128(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Struct:
|
||||
if result.Type() == bigIntType {
|
||||
result.Set(reflect.ValueOf(*value))
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(size uint, offset uint) (bool, uint, error) {
|
||||
return size != 0, offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(size uint, offset uint) ([]byte, uint, error) {
|
||||
newOffset := offset + size
|
||||
bytes := make([]byte, size)
|
||||
copy(bytes, d.buffer[offset:newOffset])
|
||||
return bytes, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat64(size uint, offset uint) (float64, uint, error) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
|
||||
return math.Float64frombits(bits), newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat32(size uint, offset uint) (float32, uint, error) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
|
||||
return math.Float32frombits(bits), newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(size uint, offset uint) (int, uint, error) {
|
||||
newOffset := offset + size
|
||||
var val int32
|
||||
for _, b := range d.buffer[offset:newOffset] {
|
||||
val = (val << 8) | int32(b)
|
||||
}
|
||||
return int(val), newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.MakeMap(result.Type()))
|
||||
}
|
||||
|
||||
for i := uint(0); i < size; i++ {
|
||||
var key []byte
|
||||
var err error
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
value := reflect.New(result.Type().Elem())
|
||||
offset, err = d.decode(offset, value, depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result.SetMapIndex(reflect.ValueOf(string(key)), value.Elem())
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePointer(
|
||||
size uint,
|
||||
offset uint,
|
||||
) (uint, uint, error) {
|
||||
pointerSize := ((size >> 3) & 0x3) + 1
|
||||
newOffset := offset + pointerSize
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
pointerBytes := d.buffer[offset:newOffset]
|
||||
var prefix uint
|
||||
if pointerSize == 4 {
|
||||
prefix = 0
|
||||
} else {
|
||||
prefix = uint(size & 0x7)
|
||||
}
|
||||
unpacked := uintFromBytes(prefix, pointerBytes)
|
||||
|
||||
var pointerValueOffset uint
|
||||
switch pointerSize {
|
||||
case 1:
|
||||
pointerValueOffset = 0
|
||||
case 2:
|
||||
pointerValueOffset = 2048
|
||||
case 3:
|
||||
pointerValueOffset = 526336
|
||||
case 4:
|
||||
pointerValueOffset = 0
|
||||
}
|
||||
|
||||
pointer := unpacked + pointerValueOffset
|
||||
|
||||
return pointer, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
|
||||
for i := 0; i < int(size); i++ {
|
||||
var err error
|
||||
offset, err = d.decode(offset, result.Index(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(size uint, offset uint) (string, uint, error) {
|
||||
newOffset := offset + size
|
||||
return string(d.buffer[offset:newOffset]), newOffset, nil
|
||||
}
|
||||
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var (
|
||||
fieldMap = map[reflect.Type]*fieldsType{}
|
||||
fieldMapMu sync.RWMutex
|
||||
)
|
||||
|
||||
func (d *decoder) decodeStruct(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
resultType := result.Type()
|
||||
|
||||
fieldMapMu.RLock()
|
||||
fields, ok := fieldMap[resultType]
|
||||
fieldMapMu.RUnlock()
|
||||
if !ok {
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fieldMapMu.Lock()
|
||||
fields = &fieldsType{namedFields, anonymous}
|
||||
fieldMap[resultType] = fields
|
||||
fieldMapMu.Unlock()
|
||||
}
|
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields {
|
||||
_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// This handles named fields
|
||||
for i := uint(0); i < size; i++ {
|
||||
var (
|
||||
err error
|
||||
key []byte
|
||||
)
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// The string() does not create a copy due to this compiler
|
||||
// optimization: https://github.com/golang/go/issues/3512
|
||||
j, ok := fields.namedFields[string(key)]
|
||||
if !ok {
|
||||
offset, err = d.nextValueOffset(offset, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, result.Field(j), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size uint, offset uint) (uint64, uint, error) {
|
||||
newOffset := offset + size
|
||||
bytes := d.buffer[offset:newOffset]
|
||||
|
||||
var val uint64
|
||||
for _, b := range bytes {
|
||||
val = (val << 8) | uint64(b)
|
||||
}
|
||||
return val, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint128(size uint, offset uint) (*big.Int, uint, error) {
|
||||
newOffset := offset + size
|
||||
val := new(big.Int)
|
||||
val.SetBytes(d.buffer[offset:newOffset])
|
||||
|
||||
return val, newOffset, nil
|
||||
}
|
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint {
|
||||
val := prefix
|
||||
for _, b := range uintBytes {
|
||||
val = (val << 8) | uint(b)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
|
||||
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
|
||||
// copying the bytes when decoding a struct. Previously, we achieved this by
|
||||
// using unsafe.
|
||||
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
|
||||
typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if typeNum == _Pointer {
|
||||
pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
key, _, err := d.decodeKey(pointer)
|
||||
return key, ptrOffset, err
|
||||
}
|
||||
if typeNum != _String {
|
||||
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
|
||||
}
|
||||
newOffset := dataOffset + size
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return nil, 0, newOffsetError()
|
||||
}
|
||||
return d.buffer[dataOffset:newOffset], newOffset, nil
|
||||
}
|
||||
|
||||
// This function is used to skip ahead to the next value without decoding
|
||||
// the one at the offset passed in. The size bits have different meanings for
|
||||
// different data types
|
||||
func (d *decoder) nextValueOffset(offset uint, numberToSkip uint) (uint, error) {
|
||||
if numberToSkip == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
typeNum, size, offset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch typeNum {
|
||||
case _Pointer:
|
||||
_, offset, err = d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case _Map:
|
||||
numberToSkip += 2 * size
|
||||
case _Slice:
|
||||
numberToSkip += size
|
||||
case _Bool:
|
||||
default:
|
||||
offset += size
|
||||
}
|
||||
return d.nextValueOffset(offset, numberToSkip-1)
|
||||
}
|
42
vendor/github.com/oschwald/maxminddb-golang/errors.go
generated
vendored
42
vendor/github.com/oschwald/maxminddb-golang/errors.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// InvalidDatabaseError is returned when the database contains invalid data
|
||||
// and cannot be parsed.
|
||||
type InvalidDatabaseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func newOffsetError() InvalidDatabaseError {
|
||||
return InvalidDatabaseError{"unexpected end of database"}
|
||||
}
|
||||
|
||||
func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
|
||||
return InvalidDatabaseError{fmt.Sprintf(format, args...)}
|
||||
}
|
||||
|
||||
func (e InvalidDatabaseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// UnmarshalTypeError is returned when the value in the database cannot be
|
||||
// assigned to the specified data type.
|
||||
type UnmarshalTypeError struct {
|
||||
Value string // stringified copy of the database value that caused the error
|
||||
Type reflect.Type // type of the value that could not be assign to
|
||||
}
|
||||
|
||||
func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
|
||||
return UnmarshalTypeError{
|
||||
Value: fmt.Sprintf("%v", value),
|
||||
Type: rType,
|
||||
}
|
||||
}
|
||||
|
||||
func (e UnmarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
|
||||
}
|
15
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
15
vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
generated
vendored
@ -1,15 +0,0 @@
|
||||
// +build !windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
return unix.Munmap(b)
|
||||
}
|
85
vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
generated
vendored
85
vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
// +build windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
// Windows support largely borrowed from mmap-go.
|
||||
//
|
||||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type memoryMap []byte
|
||||
|
||||
// Windows
|
||||
var handleLock sync.Mutex
|
||||
var handleMap = map[uintptr]windows.Handle{}
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
|
||||
uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
|
||||
0, uintptr(length))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
handleLock.Lock()
|
||||
handleMap[addr] = h
|
||||
handleLock.Unlock()
|
||||
|
||||
m := memoryMap{}
|
||||
dh := m.header()
|
||||
dh.Data = addr
|
||||
dh.Len = length
|
||||
dh.Cap = dh.Len
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *memoryMap) header() *reflect.SliceHeader {
|
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
errno := windows.FlushViewOfFile(addr, len)
|
||||
return os.NewSyscallError("FlushViewOfFile", errno)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
m := memoryMap(b)
|
||||
dh := m.header()
|
||||
|
||||
addr := dh.Data
|
||||
length := uintptr(dh.Len)
|
||||
|
||||
flush(addr, length)
|
||||
err = windows.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
delete(handleMap, addr)
|
||||
|
||||
e := windows.CloseHandle(windows.Handle(handle))
|
||||
return os.NewSyscallError("CloseHandle", e)
|
||||
}
|
259
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
259
vendor/github.com/oschwald/maxminddb-golang/reader.go
generated
vendored
@ -1,259 +0,0 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// NotFound is returned by LookupOffset when a matched root record offset
|
||||
// cannot be found.
|
||||
NotFound = ^uintptr(0)
|
||||
|
||||
dataSectionSeparatorSize = 16
|
||||
)
|
||||
|
||||
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
|
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
type Reader struct {
|
||||
hasMappedFile bool
|
||||
buffer []byte
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
}
|
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// in has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct {
|
||||
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
|
||||
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
|
||||
BuildEpoch uint `maxminddb:"build_epoch"`
|
||||
DatabaseType string `maxminddb:"database_type"`
|
||||
Description map[string]string `maxminddb:"description"`
|
||||
IPVersion uint `maxminddb:"ip_version"`
|
||||
Languages []string `maxminddb:"languages"`
|
||||
NodeCount uint `maxminddb:"node_count"`
|
||||
RecordSize uint `maxminddb:"record_size"`
|
||||
}
|
||||
|
||||
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
|
||||
// a Reader structure or an error.
|
||||
func FromBytes(buffer []byte) (*Reader, error) {
|
||||
metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
|
||||
|
||||
if metadataStart == -1 {
|
||||
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
|
||||
}
|
||||
|
||||
metadataStart += len(metadataStartMarker)
|
||||
metadataDecoder := decoder{buffer[metadataStart:]}
|
||||
|
||||
var metadata Metadata
|
||||
|
||||
rvMetdata := reflect.ValueOf(&metadata)
|
||||
_, err := metadataDecoder.decode(0, rvMetdata, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
|
||||
dataSectionStart := searchTreeSize + dataSectionSeparatorSize
|
||||
dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
|
||||
if dataSectionStart > dataSectionEnd {
|
||||
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
|
||||
}
|
||||
d := decoder{
|
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
|
||||
}
|
||||
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
}
|
||||
|
||||
reader.ipv4Start, err = reader.startNode()
|
||||
|
||||
return reader, err
|
||||
}
|
||||
|
||||
func (r *Reader) startNode() (uint, error) {
|
||||
if r.Metadata.IPVersion != 6 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
node := uint(0)
|
||||
var err error
|
||||
for i := 0; i < 96 && node < nodeCount; i++ {
|
||||
node, err = r.readNode(node, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return node, err
|
||||
}
|
||||
|
||||
// Lookup takes an IP address as a net.IP structure and a pointer to the
|
||||
// result value to Decode into.
|
||||
func (r *Reader) Lookup(ipAddress net.IP, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, err := r.lookupPointer(ipAddress)
|
||||
if pointer == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
return r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ipAddress net.IP) (uintptr, error) {
|
||||
if r.buffer == nil {
|
||||
return 0, errors.New("cannot call LookupOffset on a closed database")
|
||||
}
|
||||
pointer, err := r.lookupPointer(ipAddress)
|
||||
if pointer == 0 || err != nil {
|
||||
return NotFound, err
|
||||
}
|
||||
return r.resolveDataPointer(pointer)
|
||||
}
|
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
// data or an empty interface{} value.
|
||||
//
|
||||
// If result is a pointer to a struct, the struct need not include a field
|
||||
// for every value that may be in the database. If a field is not present in
|
||||
// the structure, the decoder will not decode that field, reducing the time
|
||||
// required to decode the record.
|
||||
//
|
||||
// As a special case, a struct field of type uintptr will be used to capture
|
||||
// the offset of the value. Decode may later be used to extract the stored
|
||||
// value from the offset. MaxMind DBs are highly normalized: for example in
|
||||
// the City database, all records of the same country will reference a
|
||||
// single representative record for that country. This uintptr behavior allows
|
||||
// clients to leverage this normalization in their own sub-record caching.
|
||||
func (r *Reader) Decode(offset uintptr, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Decode on a closed database")
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) decode(offset uintptr, result interface{}) error {
|
||||
rv := reflect.ValueOf(result)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return errors.New("result param must be a pointer")
|
||||
}
|
||||
|
||||
_, err := r.decoder.decode(uint(offset), reflect.ValueOf(result), 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Reader) lookupPointer(ipAddress net.IP) (uint, error) {
|
||||
if ipAddress == nil {
|
||||
return 0, errors.New("ipAddress passed to Lookup cannot be nil")
|
||||
}
|
||||
|
||||
ipV4Address := ipAddress.To4()
|
||||
if ipV4Address != nil {
|
||||
ipAddress = ipV4Address
|
||||
}
|
||||
if len(ipAddress) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, fmt.Errorf("error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database", ipAddress.String())
|
||||
}
|
||||
|
||||
return r.findAddressInTree(ipAddress)
|
||||
}
|
||||
|
||||
func (r *Reader) findAddressInTree(ipAddress net.IP) (uint, error) {
|
||||
|
||||
bitCount := uint(len(ipAddress) * 8)
|
||||
|
||||
var node uint
|
||||
if bitCount == 32 {
|
||||
node = r.ipv4Start
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
for i := uint(0); i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ipAddress[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
var err error
|
||||
node, err = r.readNode(node, bit)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if node == nodeCount {
|
||||
// Record is empty
|
||||
return 0, nil
|
||||
} else if node > nodeCount {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
return 0, newInvalidDatabaseError("invalid node in search tree")
|
||||
}
|
||||
|
||||
func (r *Reader) readNode(nodeNumber uint, index uint) (uint, error) {
|
||||
RecordSize := r.Metadata.RecordSize
|
||||
|
||||
baseOffset := nodeNumber * RecordSize / 4
|
||||
|
||||
var nodeBytes []byte
|
||||
var prefix uint
|
||||
switch RecordSize {
|
||||
case 24:
|
||||
offset := baseOffset + index*3
|
||||
nodeBytes = r.buffer[offset : offset+3]
|
||||
case 28:
|
||||
prefix = uint(r.buffer[baseOffset+3])
|
||||
if index != 0 {
|
||||
prefix &= 0x0F
|
||||
} else {
|
||||
prefix = (0xF0 & prefix) >> 4
|
||||
}
|
||||
offset := baseOffset + index*4
|
||||
nodeBytes = r.buffer[offset : offset+3]
|
||||
case 32:
|
||||
offset := baseOffset + index*4
|
||||
nodeBytes = r.buffer[offset : offset+4]
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown record size: %d", RecordSize)
|
||||
}
|
||||
return uintFromBytes(prefix, nodeBytes), nil
|
||||
}
|
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result interface{}) error {
|
||||
offset, err := r.resolveDataPointer(pointer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
|
||||
var resolved = uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
|
||||
|
||||
if resolved > uintptr(len(r.buffer)) {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
|
||||
}
|
||||
return resolved, nil
|
||||
}
|
28
vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
generated
vendored
28
vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
bytes, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromBytes(bytes)
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method sets the underlying buffer
|
||||
// to nil, returning the resources to the system.
|
||||
func (r *Reader) Close() error {
|
||||
r.buffer = nil
|
||||
return nil
|
||||
}
|
63
vendor/github.com/oschwald/maxminddb-golang/reader_other.go
generated
vendored
63
vendor/github.com/oschwald/maxminddb-golang/reader_other.go
generated
vendored
@ -1,63 +0,0 @@
|
||||
// +build !appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
mapFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if rerr := mapFile.Close(); rerr != nil {
|
||||
err = rerr
|
||||
}
|
||||
}()
|
||||
|
||||
stats, err := mapFile.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSize := int(stats.Size())
|
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := FromBytes(mmap)
|
||||
if err != nil {
|
||||
if err2 := munmap(mmap); err2 != nil {
|
||||
// failing to unmap the file is probably the more severe error
|
||||
return nil, err2
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.hasMappedFile = true
|
||||
runtime.SetFinalizer(reader, (*Reader).Close)
|
||||
return reader, err
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method does nothing.
|
||||
func (r *Reader) Close() error {
|
||||
var err error
|
||||
if r.hasMappedFile {
|
||||
runtime.SetFinalizer(r, nil)
|
||||
r.hasMappedFile = false
|
||||
err = munmap(r.buffer)
|
||||
}
|
||||
r.buffer = nil
|
||||
return err
|
||||
}
|
108
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
108
vendor/github.com/oschwald/maxminddb-golang/traverse.go
generated
vendored
@ -1,108 +0,0 @@
|
||||
package maxminddb
|
||||
|
||||
import "net"
|
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct {
|
||||
ip net.IP
|
||||
bit uint
|
||||
pointer uint
|
||||
}
|
||||
|
||||
// Networks represents a set of subnets that we are iterating over.
|
||||
type Networks struct {
|
||||
reader *Reader
|
||||
nodes []netNode // Nodes we still have to visit.
|
||||
lastNode netNode
|
||||
err error
|
||||
}
|
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in in an IPv6 database. This iterator will iterate over all of these
|
||||
// locations separately.
|
||||
func (r *Reader) Networks() *Networks {
|
||||
s := 4
|
||||
if r.Metadata.IPVersion == 6 {
|
||||
s = 16
|
||||
}
|
||||
return &Networks{
|
||||
reader: r,
|
||||
nodes: []netNode{
|
||||
{
|
||||
ip: make(net.IP, s),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Next prepares the next network for reading with the Network method. It
|
||||
// returns true if there is another network to be processed and false if there
|
||||
// are no more networks or if there is an error.
|
||||
func (n *Networks) Next() bool {
|
||||
for len(n.nodes) > 0 {
|
||||
node := n.nodes[len(n.nodes)-1]
|
||||
n.nodes = n.nodes[:len(n.nodes)-1]
|
||||
|
||||
for {
|
||||
if node.pointer < n.reader.Metadata.NodeCount {
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
rightPointer, err := n.reader.readNode(node.pointer, 1)
|
||||
if err != nil {
|
||||
n.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer, err = n.reader.readNode(node.pointer, 0)
|
||||
if err != nil {
|
||||
n.err = err
|
||||
return false
|
||||
}
|
||||
|
||||
} else if node.pointer > n.reader.Metadata.NodeCount {
|
||||
n.lastNode = node
|
||||
return true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Network returns the current network or an error if there is a problem
|
||||
// decoding the data for the network. It takes a pointer to a result value to
|
||||
// decode the network's data into.
|
||||
func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
|
||||
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &net.IPNet{
|
||||
IP: n.lastNode.ip,
|
||||
Mask: net.CIDRMask(int(n.lastNode.bit), len(n.lastNode.ip)*8),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Err returns an error, if any, that was encountered during iteration.
|
||||
func (n *Networks) Err() error {
|
||||
return n.err
|
||||
}
|
185
vendor/github.com/oschwald/maxminddb-golang/verifier.go
generated
vendored
185
vendor/github.com/oschwald/maxminddb-golang/verifier.go
generated
vendored
@ -1,185 +0,0 @@
|
||||
package maxminddb
|
||||
|
||||
import "reflect"
|
||||
|
||||
type verifier struct {
|
||||
reader *Reader
|
||||
}
|
||||
|
||||
// Verify checks that the database is valid. It validates the search tree,
|
||||
// the data section, and the metadata section. This verifier is stricter than
|
||||
// the specification and may return errors on databases that are readable.
|
||||
func (r *Reader) Verify() error {
|
||||
v := verifier{r}
|
||||
if err := v.verifyMetadata(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDatabase()
|
||||
}
|
||||
|
||||
func (v *verifier) verifyMetadata() error {
|
||||
metadata := v.reader.Metadata
|
||||
|
||||
if metadata.BinaryFormatMajorVersion != 2 {
|
||||
return testError(
|
||||
"binary_format_major_version",
|
||||
2,
|
||||
metadata.BinaryFormatMajorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.BinaryFormatMinorVersion != 0 {
|
||||
return testError(
|
||||
"binary_format_minor_version",
|
||||
0,
|
||||
metadata.BinaryFormatMinorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.DatabaseType == "" {
|
||||
return testError(
|
||||
"database_type",
|
||||
"non-empty string",
|
||||
metadata.DatabaseType,
|
||||
)
|
||||
}
|
||||
|
||||
if len(metadata.Description) == 0 {
|
||||
return testError(
|
||||
"description",
|
||||
"non-empty slice",
|
||||
metadata.Description,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
|
||||
return testError(
|
||||
"ip_version",
|
||||
"4 or 6",
|
||||
metadata.IPVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.RecordSize != 24 &&
|
||||
metadata.RecordSize != 28 &&
|
||||
metadata.RecordSize != 32 {
|
||||
return testError(
|
||||
"record_size",
|
||||
"24, 28, or 32",
|
||||
metadata.RecordSize,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.NodeCount == 0 {
|
||||
return testError(
|
||||
"node_count",
|
||||
"positive integer",
|
||||
metadata.NodeCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDatabase() error {
|
||||
offsets, err := v.verifySearchTree()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := v.verifyDataSectionSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDataSection(offsets)
|
||||
}
|
||||
|
||||
func (v *verifier) verifySearchTree() (map[uint]bool, error) {
|
||||
offsets := make(map[uint]bool)
|
||||
|
||||
it := v.reader.Networks()
|
||||
for it.Next() {
|
||||
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[uint(offset)] = true
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSectionSeparator() error {
|
||||
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
|
||||
|
||||
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
|
||||
|
||||
for _, b := range separator {
|
||||
if b != 0 {
|
||||
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
|
||||
pointerCount := len(offsets)
|
||||
|
||||
decoder := v.reader.decoder
|
||||
|
||||
var offset uint
|
||||
bufferLen := uint(len(decoder.buffer))
|
||||
for offset < bufferLen {
|
||||
var data interface{}
|
||||
rv := reflect.ValueOf(&data)
|
||||
newOffset, err := decoder.decode(offset, rv, 0)
|
||||
if err != nil {
|
||||
return newInvalidDatabaseError("received decoding error (%v) at offset of %v", err, offset)
|
||||
}
|
||||
if newOffset <= offset {
|
||||
return newInvalidDatabaseError("data section offset unexpectedly went from %v to %v", offset, newOffset)
|
||||
}
|
||||
|
||||
pointer := offset
|
||||
|
||||
if _, ok := offsets[pointer]; ok {
|
||||
delete(offsets, pointer)
|
||||
} else {
|
||||
return newInvalidDatabaseError("found data (%v) at %v that the search tree does not point to", data, pointer)
|
||||
}
|
||||
|
||||
offset = newOffset
|
||||
}
|
||||
|
||||
if offset != bufferLen {
|
||||
return newInvalidDatabaseError(
|
||||
"unexpected data at the end of the data section (last offset: %v, end: %v)",
|
||||
offset,
|
||||
bufferLen,
|
||||
)
|
||||
}
|
||||
|
||||
if len(offsets) != 0 {
|
||||
return newInvalidDatabaseError(
|
||||
"found %v pointers (of %v) in the search tree that we did not see in the data section",
|
||||
len(offsets),
|
||||
pointerCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testError(
|
||||
field string,
|
||||
expected interface{},
|
||||
actual interface{},
|
||||
) error {
|
||||
return newInvalidDatabaseError(
|
||||
"%v - Expected: %v Actual: %v",
|
||||
field,
|
||||
expected,
|
||||
actual,
|
||||
)
|
||||
}
|
24
vendor/vendor.json
vendored
24
vendor/vendor.json
vendored
@ -32,12 +32,6 @@
|
||||
"revision": "e24eb225f15679bbe54f91bfa7da3b00e59b9768",
|
||||
"revisionTime": "2019-02-18T06:46:05Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "hp2pna9yEn9hemIjc7asalxL2Qs=",
|
||||
"path": "github.com/apilayer/freegeoip",
|
||||
"revision": "3f942d1392f6439bda0f67b3c650ce468ebdba8e",
|
||||
"revisionTime": "2018-07-02T11:14:01Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "USkefO0g1U9mr+8hagv3fpSkrxg=",
|
||||
"path": "github.com/aristanetworks/goarista/monotime",
|
||||
@ -176,12 +170,6 @@
|
||||
"revision": "0a025b7e63adc15a622f29b0b2c4c3848243bbf6",
|
||||
"revisionTime": "2016-08-13T22:13:03Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "ZxzYc1JwJ3U6kZbw/KGuPko5lSY=",
|
||||
"path": "github.com/howeyc/fsnotify",
|
||||
"revision": "f0c08ee9c60704c1879025f2ae0ff3e000082c13",
|
||||
"revisionTime": "2015-10-03T19:46:02Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "f55gR+6YClh0i/FOhdy66SOUiwY=",
|
||||
"path": "github.com/huin/goupnp",
|
||||
@ -286,12 +274,6 @@
|
||||
"revision": "ce7b0b5c7b45a81508558cd1dba6bb1e4ddb51bb",
|
||||
"revisionTime": "2018-04-08T05:53:51Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "2jsbDTvwxafPp7FJjJ8IIFlTLjs=",
|
||||
"path": "github.com/mohae/deepcopy",
|
||||
"revision": "c48cc78d482608239f6c4c92a4abd87eb8761c90",
|
||||
"revisionTime": "2017-09-29T03:49:55Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "FYM/8R2CqS6PSNAoKl6X5gNJ20A=",
|
||||
"path": "github.com/naoina/toml",
|
||||
@ -328,12 +310,6 @@
|
||||
"revision": "bd9c3193394760d98b2fa6ebb2291f0cd1d06a7d",
|
||||
"revisionTime": "2018-06-06T20:41:48Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "a1WxG0wMDGFnjojQghwu1i1SDhk=",
|
||||
"path": "github.com/oschwald/maxminddb-golang",
|
||||
"revision": "277d39ecb83edd90f26a1fb450ab7e710faa203f",
|
||||
"revisionTime": "2018-08-19T23:01:43Z"
|
||||
},
|
||||
{
|
||||
"checksumSHA1": "Se195FlZ160eaEk/uVx4KdTPSxU=",
|
||||
"path": "github.com/pborman/uuid",
|
||||
|
Loading…
Reference in New Issue
Block a user