forked from cerc-io/plugeth
accounts/abi/bind, cmd/abigen: Go API generator around an EVM ABI
This commit is contained in:
parent
75c86f8646
commit
72826bb5ad
8
Godeps/Godeps.json
generated
8
Godeps/Godeps.json
generated
@ -286,6 +286,14 @@
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "09761194ac5034a97b2bfad4f5b896b0ac350b3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/go/ast/astutil",
|
||||
"Rev": "758728c4b28cfbac299730969ef8f655c4761283"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/tools/imports",
|
||||
"Rev": "758728c4b28cfbac299730969ef8f655c4761283"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/check.v1",
|
||||
"Rev": "4f90aeace3a26ad7021961c297b22c42160c7b25"
|
||||
|
27
Godeps/_workspace/src/golang.org/x/tools/LICENSE
generated
vendored
Normal file
27
Godeps/_workspace/src/golang.org/x/tools/LICENSE
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
Godeps/_workspace/src/golang.org/x/tools/PATENTS
generated
vendored
Normal file
22
Godeps/_workspace/src/golang.org/x/tools/PATENTS
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
624
Godeps/_workspace/src/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
Normal file
624
Godeps/_workspace/src/golang.org/x/tools/go/ast/astutil/enclosing.go
generated
vendored
Normal file
@ -0,0 +1,624 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package astutil
|
||||
|
||||
// This file defines utilities for working with source positions.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// PathEnclosingInterval returns the node that encloses the source
|
||||
// interval [start, end), and all its ancestors up to the AST root.
|
||||
//
|
||||
// The definition of "enclosing" used by this function considers
|
||||
// additional whitespace abutting a node to be enclosed by it.
|
||||
// In this example:
|
||||
//
|
||||
// z := x + y // add them
|
||||
// <-A->
|
||||
// <----B----->
|
||||
//
|
||||
// the ast.BinaryExpr(+) node is considered to enclose interval B
|
||||
// even though its [Pos()..End()) is actually only interval A.
|
||||
// This behaviour makes user interfaces more tolerant of imperfect
|
||||
// input.
|
||||
//
|
||||
// This function treats tokens as nodes, though they are not included
|
||||
// in the result. e.g. PathEnclosingInterval("+") returns the
|
||||
// enclosing ast.BinaryExpr("x + y").
|
||||
//
|
||||
// If start==end, the 1-char interval following start is used instead.
|
||||
//
|
||||
// The 'exact' result is true if the interval contains only path[0]
|
||||
// and perhaps some adjacent whitespace. It is false if the interval
|
||||
// overlaps multiple children of path[0], or if it contains only
|
||||
// interior whitespace of path[0].
|
||||
// In this example:
|
||||
//
|
||||
// z := x + y // add them
|
||||
// <--C--> <---E-->
|
||||
// ^
|
||||
// D
|
||||
//
|
||||
// intervals C, D and E are inexact. C is contained by the
|
||||
// z-assignment statement, because it spans three of its children (:=,
|
||||
// x, +). So too is the 1-char interval D, because it contains only
|
||||
// interior whitespace of the assignment. E is considered interior
|
||||
// whitespace of the BlockStmt containing the assignment.
|
||||
//
|
||||
// Precondition: [start, end) both lie within the same file as root.
|
||||
// TODO(adonovan): return (nil, false) in this case and remove precond.
|
||||
// Requires FileSet; see loader.tokenFileContainsPos.
|
||||
//
|
||||
// Postcondition: path is never nil; it always contains at least 'root'.
|
||||
//
|
||||
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
|
||||
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
|
||||
|
||||
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
|
||||
var visit func(node ast.Node) bool
|
||||
visit = func(node ast.Node) bool {
|
||||
path = append(path, node)
|
||||
|
||||
nodePos := node.Pos()
|
||||
nodeEnd := node.End()
|
||||
|
||||
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
|
||||
|
||||
// Intersect [start, end) with interval of node.
|
||||
if start < nodePos {
|
||||
start = nodePos
|
||||
}
|
||||
if end > nodeEnd {
|
||||
end = nodeEnd
|
||||
}
|
||||
|
||||
// Find sole child that contains [start, end).
|
||||
children := childrenOf(node)
|
||||
l := len(children)
|
||||
for i, child := range children {
|
||||
// [childPos, childEnd) is unaugmented interval of child.
|
||||
childPos := child.Pos()
|
||||
childEnd := child.End()
|
||||
|
||||
// [augPos, augEnd) is whitespace-augmented interval of child.
|
||||
augPos := childPos
|
||||
augEnd := childEnd
|
||||
if i > 0 {
|
||||
augPos = children[i-1].End() // start of preceding whitespace
|
||||
}
|
||||
if i < l-1 {
|
||||
nextChildPos := children[i+1].Pos()
|
||||
// Does [start, end) lie between child and next child?
|
||||
if start >= augEnd && end <= nextChildPos {
|
||||
return false // inexact match
|
||||
}
|
||||
augEnd = nextChildPos // end of following whitespace
|
||||
}
|
||||
|
||||
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
|
||||
// i, augPos, augEnd, start, end) // debugging
|
||||
|
||||
// Does augmented child strictly contain [start, end)?
|
||||
if augPos <= start && end <= augEnd {
|
||||
_, isToken := child.(tokenNode)
|
||||
return isToken || visit(child)
|
||||
}
|
||||
|
||||
// Does [start, end) overlap multiple children?
|
||||
// i.e. left-augmented child contains start
|
||||
// but LR-augmented child does not contain end.
|
||||
if start < childEnd && end > augEnd {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// No single child contained [start, end),
|
||||
// so node is the result. Is it exact?
|
||||
|
||||
// (It's tempting to put this condition before the
|
||||
// child loop, but it gives the wrong result in the
|
||||
// case where a node (e.g. ExprStmt) and its sole
|
||||
// child have equal intervals.)
|
||||
if start == nodePos && end == nodeEnd {
|
||||
return true // exact match
|
||||
}
|
||||
|
||||
return false // inexact: overlaps multiple children
|
||||
}
|
||||
|
||||
if start > end {
|
||||
start, end = end, start
|
||||
}
|
||||
|
||||
if start < root.End() && end > root.Pos() {
|
||||
if start == end {
|
||||
end = start + 1 // empty interval => interval of size 1
|
||||
}
|
||||
exact = visit(root)
|
||||
|
||||
// Reverse the path:
|
||||
for i, l := 0, len(path); i < l/2; i++ {
|
||||
path[i], path[l-1-i] = path[l-1-i], path[i]
|
||||
}
|
||||
} else {
|
||||
// Selection lies within whitespace preceding the
|
||||
// first (or following the last) declaration in the file.
|
||||
// The result nonetheless always includes the ast.File.
|
||||
path = append(path, root)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// tokenNode is a dummy implementation of ast.Node for a single token.
|
||||
// They are used transiently by PathEnclosingInterval but never escape
|
||||
// this package.
|
||||
//
|
||||
type tokenNode struct {
|
||||
pos token.Pos
|
||||
end token.Pos
|
||||
}
|
||||
|
||||
func (n tokenNode) Pos() token.Pos {
|
||||
return n.pos
|
||||
}
|
||||
|
||||
func (n tokenNode) End() token.Pos {
|
||||
return n.end
|
||||
}
|
||||
|
||||
func tok(pos token.Pos, len int) ast.Node {
|
||||
return tokenNode{pos, pos + token.Pos(len)}
|
||||
}
|
||||
|
||||
// childrenOf returns the direct non-nil children of ast.Node n.
|
||||
// It may include fake ast.Node implementations for bare tokens.
|
||||
// it is not safe to call (e.g.) ast.Walk on such nodes.
|
||||
//
|
||||
func childrenOf(n ast.Node) []ast.Node {
|
||||
var children []ast.Node
|
||||
|
||||
// First add nodes for all true subtrees.
|
||||
ast.Inspect(n, func(node ast.Node) bool {
|
||||
if node == n { // push n
|
||||
return true // recur
|
||||
}
|
||||
if node != nil { // push child
|
||||
children = append(children, node)
|
||||
}
|
||||
return false // no recursion
|
||||
})
|
||||
|
||||
// Then add fake Nodes for bare tokens.
|
||||
switch n := n.(type) {
|
||||
case *ast.ArrayType:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("[")),
|
||||
tok(n.Elt.End(), len("]")))
|
||||
|
||||
case *ast.AssignStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.BasicLit:
|
||||
children = append(children,
|
||||
tok(n.ValuePos, len(n.Value)))
|
||||
|
||||
case *ast.BinaryExpr:
|
||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
||||
|
||||
case *ast.BlockStmt:
|
||||
children = append(children,
|
||||
tok(n.Lbrace, len("{")),
|
||||
tok(n.Rbrace, len("}")))
|
||||
|
||||
case *ast.BranchStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.CallExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
if n.Ellipsis != 0 {
|
||||
children = append(children, tok(n.Ellipsis, len("...")))
|
||||
}
|
||||
|
||||
case *ast.CaseClause:
|
||||
if n.List == nil {
|
||||
children = append(children,
|
||||
tok(n.Case, len("default")))
|
||||
} else {
|
||||
children = append(children,
|
||||
tok(n.Case, len("case")))
|
||||
}
|
||||
children = append(children, tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.ChanType:
|
||||
switch n.Dir {
|
||||
case ast.RECV:
|
||||
children = append(children, tok(n.Begin, len("<-chan")))
|
||||
case ast.SEND:
|
||||
children = append(children, tok(n.Begin, len("chan<-")))
|
||||
case ast.RECV | ast.SEND:
|
||||
children = append(children, tok(n.Begin, len("chan")))
|
||||
}
|
||||
|
||||
case *ast.CommClause:
|
||||
if n.Comm == nil {
|
||||
children = append(children,
|
||||
tok(n.Case, len("default")))
|
||||
} else {
|
||||
children = append(children,
|
||||
tok(n.Case, len("case")))
|
||||
}
|
||||
children = append(children, tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.Comment:
|
||||
// nop
|
||||
|
||||
case *ast.CommentGroup:
|
||||
// nop
|
||||
|
||||
case *ast.CompositeLit:
|
||||
children = append(children,
|
||||
tok(n.Lbrace, len("{")),
|
||||
tok(n.Rbrace, len("{")))
|
||||
|
||||
case *ast.DeclStmt:
|
||||
// nop
|
||||
|
||||
case *ast.DeferStmt:
|
||||
children = append(children,
|
||||
tok(n.Defer, len("defer")))
|
||||
|
||||
case *ast.Ellipsis:
|
||||
children = append(children,
|
||||
tok(n.Ellipsis, len("...")))
|
||||
|
||||
case *ast.EmptyStmt:
|
||||
// nop
|
||||
|
||||
case *ast.ExprStmt:
|
||||
// nop
|
||||
|
||||
case *ast.Field:
|
||||
// TODO(adonovan): Field.{Doc,Comment,Tag}?
|
||||
|
||||
case *ast.FieldList:
|
||||
children = append(children,
|
||||
tok(n.Opening, len("(")),
|
||||
tok(n.Closing, len(")")))
|
||||
|
||||
case *ast.File:
|
||||
// TODO test: Doc
|
||||
children = append(children,
|
||||
tok(n.Package, len("package")))
|
||||
|
||||
case *ast.ForStmt:
|
||||
children = append(children,
|
||||
tok(n.For, len("for")))
|
||||
|
||||
case *ast.FuncDecl:
|
||||
// TODO(adonovan): FuncDecl.Comment?
|
||||
|
||||
// Uniquely, FuncDecl breaks the invariant that
|
||||
// preorder traversal yields tokens in lexical order:
|
||||
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
|
||||
//
|
||||
// As a workaround, we inline the case for FuncType
|
||||
// here and order things correctly.
|
||||
//
|
||||
children = nil // discard ast.Walk(FuncDecl) info subtrees
|
||||
children = append(children, tok(n.Type.Func, len("func")))
|
||||
if n.Recv != nil {
|
||||
children = append(children, n.Recv)
|
||||
}
|
||||
children = append(children, n.Name)
|
||||
if n.Type.Params != nil {
|
||||
children = append(children, n.Type.Params)
|
||||
}
|
||||
if n.Type.Results != nil {
|
||||
children = append(children, n.Type.Results)
|
||||
}
|
||||
if n.Body != nil {
|
||||
children = append(children, n.Body)
|
||||
}
|
||||
|
||||
case *ast.FuncLit:
|
||||
// nop
|
||||
|
||||
case *ast.FuncType:
|
||||
if n.Func != 0 {
|
||||
children = append(children,
|
||||
tok(n.Func, len("func")))
|
||||
}
|
||||
|
||||
case *ast.GenDecl:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
if n.Lparen != 0 {
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
}
|
||||
|
||||
case *ast.GoStmt:
|
||||
children = append(children,
|
||||
tok(n.Go, len("go")))
|
||||
|
||||
case *ast.Ident:
|
||||
children = append(children,
|
||||
tok(n.NamePos, len(n.Name)))
|
||||
|
||||
case *ast.IfStmt:
|
||||
children = append(children,
|
||||
tok(n.If, len("if")))
|
||||
|
||||
case *ast.ImportSpec:
|
||||
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
|
||||
|
||||
case *ast.IncDecStmt:
|
||||
children = append(children,
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.IndexExpr:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("{")),
|
||||
tok(n.Rbrack, len("}")))
|
||||
|
||||
case *ast.InterfaceType:
|
||||
children = append(children,
|
||||
tok(n.Interface, len("interface")))
|
||||
|
||||
case *ast.KeyValueExpr:
|
||||
children = append(children,
|
||||
tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.LabeledStmt:
|
||||
children = append(children,
|
||||
tok(n.Colon, len(":")))
|
||||
|
||||
case *ast.MapType:
|
||||
children = append(children,
|
||||
tok(n.Map, len("map")))
|
||||
|
||||
case *ast.ParenExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
|
||||
case *ast.RangeStmt:
|
||||
children = append(children,
|
||||
tok(n.For, len("for")),
|
||||
tok(n.TokPos, len(n.Tok.String())))
|
||||
|
||||
case *ast.ReturnStmt:
|
||||
children = append(children,
|
||||
tok(n.Return, len("return")))
|
||||
|
||||
case *ast.SelectStmt:
|
||||
children = append(children,
|
||||
tok(n.Select, len("select")))
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
// nop
|
||||
|
||||
case *ast.SendStmt:
|
||||
children = append(children,
|
||||
tok(n.Arrow, len("<-")))
|
||||
|
||||
case *ast.SliceExpr:
|
||||
children = append(children,
|
||||
tok(n.Lbrack, len("[")),
|
||||
tok(n.Rbrack, len("]")))
|
||||
|
||||
case *ast.StarExpr:
|
||||
children = append(children, tok(n.Star, len("*")))
|
||||
|
||||
case *ast.StructType:
|
||||
children = append(children, tok(n.Struct, len("struct")))
|
||||
|
||||
case *ast.SwitchStmt:
|
||||
children = append(children, tok(n.Switch, len("switch")))
|
||||
|
||||
case *ast.TypeAssertExpr:
|
||||
children = append(children,
|
||||
tok(n.Lparen-1, len(".")),
|
||||
tok(n.Lparen, len("(")),
|
||||
tok(n.Rparen, len(")")))
|
||||
|
||||
case *ast.TypeSpec:
|
||||
// TODO(adonovan): TypeSpec.{Doc,Comment}?
|
||||
|
||||
case *ast.TypeSwitchStmt:
|
||||
children = append(children, tok(n.Switch, len("switch")))
|
||||
|
||||
case *ast.UnaryExpr:
|
||||
children = append(children, tok(n.OpPos, len(n.Op.String())))
|
||||
|
||||
case *ast.ValueSpec:
|
||||
// TODO(adonovan): ValueSpec.{Doc,Comment}?
|
||||
|
||||
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
|
||||
// nop
|
||||
}
|
||||
|
||||
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
|
||||
// the switch above so we can make interleaved callbacks for
|
||||
// both Nodes and Tokens in the right order and avoid the need
|
||||
// to sort.
|
||||
sort.Sort(byPos(children))
|
||||
|
||||
return children
|
||||
}
|
||||
|
||||
type byPos []ast.Node
|
||||
|
||||
func (sl byPos) Len() int {
|
||||
return len(sl)
|
||||
}
|
||||
func (sl byPos) Less(i, j int) bool {
|
||||
return sl[i].Pos() < sl[j].Pos()
|
||||
}
|
||||
func (sl byPos) Swap(i, j int) {
|
||||
sl[i], sl[j] = sl[j], sl[i]
|
||||
}
|
||||
|
||||
// NodeDescription returns a description of the concrete type of n suitable
|
||||
// for a user interface.
|
||||
//
|
||||
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
|
||||
// StarExpr) we could be much more specific given the path to the AST
|
||||
// root. Perhaps we should do that.
|
||||
//
|
||||
func NodeDescription(n ast.Node) string {
|
||||
switch n := n.(type) {
|
||||
case *ast.ArrayType:
|
||||
return "array type"
|
||||
case *ast.AssignStmt:
|
||||
return "assignment"
|
||||
case *ast.BadDecl:
|
||||
return "bad declaration"
|
||||
case *ast.BadExpr:
|
||||
return "bad expression"
|
||||
case *ast.BadStmt:
|
||||
return "bad statement"
|
||||
case *ast.BasicLit:
|
||||
return "basic literal"
|
||||
case *ast.BinaryExpr:
|
||||
return fmt.Sprintf("binary %s operation", n.Op)
|
||||
case *ast.BlockStmt:
|
||||
return "block"
|
||||
case *ast.BranchStmt:
|
||||
switch n.Tok {
|
||||
case token.BREAK:
|
||||
return "break statement"
|
||||
case token.CONTINUE:
|
||||
return "continue statement"
|
||||
case token.GOTO:
|
||||
return "goto statement"
|
||||
case token.FALLTHROUGH:
|
||||
return "fall-through statement"
|
||||
}
|
||||
case *ast.CallExpr:
|
||||
return "function call (or conversion)"
|
||||
case *ast.CaseClause:
|
||||
return "case clause"
|
||||
case *ast.ChanType:
|
||||
return "channel type"
|
||||
case *ast.CommClause:
|
||||
return "communication clause"
|
||||
case *ast.Comment:
|
||||
return "comment"
|
||||
case *ast.CommentGroup:
|
||||
return "comment group"
|
||||
case *ast.CompositeLit:
|
||||
return "composite literal"
|
||||
case *ast.DeclStmt:
|
||||
return NodeDescription(n.Decl) + " statement"
|
||||
case *ast.DeferStmt:
|
||||
return "defer statement"
|
||||
case *ast.Ellipsis:
|
||||
return "ellipsis"
|
||||
case *ast.EmptyStmt:
|
||||
return "empty statement"
|
||||
case *ast.ExprStmt:
|
||||
return "expression statement"
|
||||
case *ast.Field:
|
||||
// Can be any of these:
|
||||
// struct {x, y int} -- struct field(s)
|
||||
// struct {T} -- anon struct field
|
||||
// interface {I} -- interface embedding
|
||||
// interface {f()} -- interface method
|
||||
// func (A) func(B) C -- receiver, param(s), result(s)
|
||||
return "field/method/parameter"
|
||||
case *ast.FieldList:
|
||||
return "field/method/parameter list"
|
||||
case *ast.File:
|
||||
return "source file"
|
||||
case *ast.ForStmt:
|
||||
return "for loop"
|
||||
case *ast.FuncDecl:
|
||||
return "function declaration"
|
||||
case *ast.FuncLit:
|
||||
return "function literal"
|
||||
case *ast.FuncType:
|
||||
return "function type"
|
||||
case *ast.GenDecl:
|
||||
switch n.Tok {
|
||||
case token.IMPORT:
|
||||
return "import declaration"
|
||||
case token.CONST:
|
||||
return "constant declaration"
|
||||
case token.TYPE:
|
||||
return "type declaration"
|
||||
case token.VAR:
|
||||
return "variable declaration"
|
||||
}
|
||||
case *ast.GoStmt:
|
||||
return "go statement"
|
||||
case *ast.Ident:
|
||||
return "identifier"
|
||||
case *ast.IfStmt:
|
||||
return "if statement"
|
||||
case *ast.ImportSpec:
|
||||
return "import specification"
|
||||
case *ast.IncDecStmt:
|
||||
if n.Tok == token.INC {
|
||||
return "increment statement"
|
||||
}
|
||||
return "decrement statement"
|
||||
case *ast.IndexExpr:
|
||||
return "index expression"
|
||||
case *ast.InterfaceType:
|
||||
return "interface type"
|
||||
case *ast.KeyValueExpr:
|
||||
return "key/value association"
|
||||
case *ast.LabeledStmt:
|
||||
return "statement label"
|
||||
case *ast.MapType:
|
||||
return "map type"
|
||||
case *ast.Package:
|
||||
return "package"
|
||||
case *ast.ParenExpr:
|
||||
return "parenthesized " + NodeDescription(n.X)
|
||||
case *ast.RangeStmt:
|
||||
return "range loop"
|
||||
case *ast.ReturnStmt:
|
||||
return "return statement"
|
||||
case *ast.SelectStmt:
|
||||
return "select statement"
|
||||
case *ast.SelectorExpr:
|
||||
return "selector"
|
||||
case *ast.SendStmt:
|
||||
return "channel send"
|
||||
case *ast.SliceExpr:
|
||||
return "slice expression"
|
||||
case *ast.StarExpr:
|
||||
return "*-operation" // load/store expr or pointer type
|
||||
case *ast.StructType:
|
||||
return "struct type"
|
||||
case *ast.SwitchStmt:
|
||||
return "switch statement"
|
||||
case *ast.TypeAssertExpr:
|
||||
return "type assertion"
|
||||
case *ast.TypeSpec:
|
||||
return "type specification"
|
||||
case *ast.TypeSwitchStmt:
|
||||
return "type switch"
|
||||
case *ast.UnaryExpr:
|
||||
return fmt.Sprintf("unary %s operation", n.Op)
|
||||
case *ast.ValueSpec:
|
||||
return "value specification"
|
||||
|
||||
}
|
||||
panic(fmt.Sprintf("unexpected node type: %T", n))
|
||||
}
|
376
Godeps/_workspace/src/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
Normal file
376
Godeps/_workspace/src/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
Normal file
@ -0,0 +1,376 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package astutil contains common utilities for working with the Go AST.
|
||||
package astutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// AddImport adds the import path to the file f, if absent.
|
||||
func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
|
||||
return AddNamedImport(fset, f, "", ipath)
|
||||
}
|
||||
|
||||
// AddNamedImport adds the import path to the file f, if absent.
|
||||
// If name is not empty, it is used to rename the import.
|
||||
//
|
||||
// For example, calling
|
||||
// AddNamedImport(fset, f, "pathpkg", "path")
|
||||
// adds
|
||||
// import pathpkg "path"
|
||||
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
|
||||
if imports(f, ipath) {
|
||||
return false
|
||||
}
|
||||
|
||||
newImport := &ast.ImportSpec{
|
||||
Path: &ast.BasicLit{
|
||||
Kind: token.STRING,
|
||||
Value: strconv.Quote(ipath),
|
||||
},
|
||||
}
|
||||
if name != "" {
|
||||
newImport.Name = &ast.Ident{Name: name}
|
||||
}
|
||||
|
||||
// Find an import decl to add to.
|
||||
// The goal is to find an existing import
|
||||
// whose import path has the longest shared
|
||||
// prefix with ipath.
|
||||
var (
|
||||
bestMatch = -1 // length of longest shared prefix
|
||||
lastImport = -1 // index in f.Decls of the file's final import decl
|
||||
impDecl *ast.GenDecl // import decl containing the best match
|
||||
impIndex = -1 // spec index in impDecl containing the best match
|
||||
)
|
||||
for i, decl := range f.Decls {
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if ok && gen.Tok == token.IMPORT {
|
||||
lastImport = i
|
||||
// Do not add to import "C", to avoid disrupting the
|
||||
// association with its doc comment, breaking cgo.
|
||||
if declImports(gen, "C") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Match an empty import decl if that's all that is available.
|
||||
if len(gen.Specs) == 0 && bestMatch == -1 {
|
||||
impDecl = gen
|
||||
}
|
||||
|
||||
// Compute longest shared prefix with imports in this group.
|
||||
for j, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
n := matchLen(importPath(impspec), ipath)
|
||||
if n > bestMatch {
|
||||
bestMatch = n
|
||||
impDecl = gen
|
||||
impIndex = j
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no import decl found, add one after the last import.
|
||||
if impDecl == nil {
|
||||
impDecl = &ast.GenDecl{
|
||||
Tok: token.IMPORT,
|
||||
}
|
||||
if lastImport >= 0 {
|
||||
impDecl.TokPos = f.Decls[lastImport].End()
|
||||
} else {
|
||||
// There are no existing imports.
|
||||
// Our new import goes after the package declaration and after
|
||||
// the comment, if any, that starts on the same line as the
|
||||
// package declaration.
|
||||
impDecl.TokPos = f.Package
|
||||
|
||||
file := fset.File(f.Package)
|
||||
pkgLine := file.Line(f.Package)
|
||||
for _, c := range f.Comments {
|
||||
if file.Line(c.Pos()) > pkgLine {
|
||||
break
|
||||
}
|
||||
impDecl.TokPos = c.End()
|
||||
}
|
||||
}
|
||||
f.Decls = append(f.Decls, nil)
|
||||
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
|
||||
f.Decls[lastImport+1] = impDecl
|
||||
}
|
||||
|
||||
// Insert new import at insertAt.
|
||||
insertAt := 0
|
||||
if impIndex >= 0 {
|
||||
// insert after the found import
|
||||
insertAt = impIndex + 1
|
||||
}
|
||||
impDecl.Specs = append(impDecl.Specs, nil)
|
||||
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
|
||||
impDecl.Specs[insertAt] = newImport
|
||||
pos := impDecl.Pos()
|
||||
if insertAt > 0 {
|
||||
// If there is a comment after an existing import, preserve the comment
|
||||
// position by adding the new import after the comment.
|
||||
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
|
||||
pos = spec.Comment.End()
|
||||
} else {
|
||||
// Assign same position as the previous import,
|
||||
// so that the sorter sees it as being in the same block.
|
||||
pos = impDecl.Specs[insertAt-1].Pos()
|
||||
}
|
||||
}
|
||||
if newImport.Name != nil {
|
||||
newImport.Name.NamePos = pos
|
||||
}
|
||||
newImport.Path.ValuePos = pos
|
||||
newImport.EndPos = pos
|
||||
|
||||
// Clean up parens. impDecl contains at least one spec.
|
||||
if len(impDecl.Specs) == 1 {
|
||||
// Remove unneeded parens.
|
||||
impDecl.Lparen = token.NoPos
|
||||
} else if !impDecl.Lparen.IsValid() {
|
||||
// impDecl needs parens added.
|
||||
impDecl.Lparen = impDecl.Specs[0].Pos()
|
||||
}
|
||||
|
||||
f.Imports = append(f.Imports, newImport)
|
||||
return true
|
||||
}
|
||||
|
||||
// DeleteImport deletes the import path from the file f, if present.
|
||||
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
|
||||
return DeleteNamedImport(fset, f, "", path)
|
||||
}
|
||||
|
||||
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
|
||||
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
|
||||
var delspecs []*ast.ImportSpec
|
||||
|
||||
// Find the import nodes that import path, if any.
|
||||
for i := 0; i < len(f.Decls); i++ {
|
||||
decl := f.Decls[i]
|
||||
gen, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gen.Tok != token.IMPORT {
|
||||
continue
|
||||
}
|
||||
for j := 0; j < len(gen.Specs); j++ {
|
||||
spec := gen.Specs[j]
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if impspec.Name == nil && name != "" {
|
||||
continue
|
||||
}
|
||||
if impspec.Name != nil && impspec.Name.Name != name {
|
||||
continue
|
||||
}
|
||||
if importPath(impspec) != path {
|
||||
continue
|
||||
}
|
||||
|
||||
// We found an import spec that imports path.
|
||||
// Delete it.
|
||||
delspecs = append(delspecs, impspec)
|
||||
deleted = true
|
||||
copy(gen.Specs[j:], gen.Specs[j+1:])
|
||||
gen.Specs = gen.Specs[:len(gen.Specs)-1]
|
||||
|
||||
// If this was the last import spec in this decl,
|
||||
// delete the decl, too.
|
||||
if len(gen.Specs) == 0 {
|
||||
copy(f.Decls[i:], f.Decls[i+1:])
|
||||
f.Decls = f.Decls[:len(f.Decls)-1]
|
||||
i--
|
||||
break
|
||||
} else if len(gen.Specs) == 1 {
|
||||
gen.Lparen = token.NoPos // drop parens
|
||||
}
|
||||
if j > 0 {
|
||||
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
|
||||
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
|
||||
line := fset.Position(impspec.Path.ValuePos).Line
|
||||
|
||||
// We deleted an entry but now there may be
|
||||
// a blank line-sized hole where the import was.
|
||||
if line-lastLine > 1 {
|
||||
// There was a blank line immediately preceding the deleted import,
|
||||
// so there's no need to close the hole.
|
||||
// Do nothing.
|
||||
} else {
|
||||
// There was no blank line. Close the hole.
|
||||
fset.File(gen.Rparen).MergeLine(line)
|
||||
}
|
||||
}
|
||||
j--
|
||||
}
|
||||
}
|
||||
|
||||
// Delete them from f.Imports.
|
||||
for i := 0; i < len(f.Imports); i++ {
|
||||
imp := f.Imports[i]
|
||||
for j, del := range delspecs {
|
||||
if imp == del {
|
||||
copy(f.Imports[i:], f.Imports[i+1:])
|
||||
f.Imports = f.Imports[:len(f.Imports)-1]
|
||||
copy(delspecs[j:], delspecs[j+1:])
|
||||
delspecs = delspecs[:len(delspecs)-1]
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(delspecs) > 0 {
|
||||
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RewriteImport rewrites any import of path oldPath to path newPath.
|
||||
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
|
||||
for _, imp := range f.Imports {
|
||||
if importPath(imp) == oldPath {
|
||||
rewrote = true
|
||||
// record old End, because the default is to compute
|
||||
// it using the length of imp.Path.Value.
|
||||
imp.EndPos = imp.End()
|
||||
imp.Path.Value = strconv.Quote(newPath)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UsesImport reports whether a given import is used.
|
||||
func UsesImport(f *ast.File, path string) (used bool) {
|
||||
spec := importSpec(f, path)
|
||||
if spec == nil {
|
||||
return
|
||||
}
|
||||
|
||||
name := spec.Name.String()
|
||||
switch name {
|
||||
case "<nil>":
|
||||
// If the package name is not explicitly specified,
|
||||
// make an educated guess. This is not guaranteed to be correct.
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 {
|
||||
name = path
|
||||
} else {
|
||||
name = path[lastSlash+1:]
|
||||
}
|
||||
case "_", ".":
|
||||
// Not sure if this import is used - err on the side of caution.
|
||||
return true
|
||||
}
|
||||
|
||||
ast.Walk(visitFn(func(n ast.Node) {
|
||||
sel, ok := n.(*ast.SelectorExpr)
|
||||
if ok && isTopName(sel.X, name) {
|
||||
used = true
|
||||
}
|
||||
}), f)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type visitFn func(node ast.Node)
|
||||
|
||||
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
||||
fn(node)
|
||||
return fn
|
||||
}
|
||||
|
||||
// imports returns true if f imports path.
|
||||
func imports(f *ast.File, path string) bool {
|
||||
return importSpec(f, path) != nil
|
||||
}
|
||||
|
||||
// importSpec returns the import spec if f imports path,
|
||||
// or nil otherwise.
|
||||
func importSpec(f *ast.File, path string) *ast.ImportSpec {
|
||||
for _, s := range f.Imports {
|
||||
if importPath(s) == path {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importPath returns the unquoted import path of s,
|
||||
// or "" if the path is not properly quoted.
|
||||
func importPath(s *ast.ImportSpec) string {
|
||||
t, err := strconv.Unquote(s.Path.Value)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// declImports reports whether gen contains an import of path.
|
||||
func declImports(gen *ast.GenDecl, path string) bool {
|
||||
if gen.Tok != token.IMPORT {
|
||||
return false
|
||||
}
|
||||
for _, spec := range gen.Specs {
|
||||
impspec := spec.(*ast.ImportSpec)
|
||||
if importPath(impspec) == path {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// matchLen returns the length of the longest path segment prefix shared by x and y.
|
||||
func matchLen(x, y string) int {
|
||||
n := 0
|
||||
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
|
||||
if x[i] == '/' {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// isTopName returns true if n is a top-level unresolved identifier with the given name.
|
||||
func isTopName(n ast.Expr, name string) bool {
|
||||
id, ok := n.(*ast.Ident)
|
||||
return ok && id.Name == name && id.Obj == nil
|
||||
}
|
||||
|
||||
// Imports returns the file imports grouped by paragraph.
|
||||
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
|
||||
var groups [][]*ast.ImportSpec
|
||||
|
||||
for _, decl := range f.Decls {
|
||||
genDecl, ok := decl.(*ast.GenDecl)
|
||||
if !ok || genDecl.Tok != token.IMPORT {
|
||||
break
|
||||
}
|
||||
|
||||
group := []*ast.ImportSpec{}
|
||||
|
||||
var lastLine int
|
||||
for _, spec := range genDecl.Specs {
|
||||
importSpec := spec.(*ast.ImportSpec)
|
||||
pos := importSpec.Path.ValuePos
|
||||
line := fset.Position(pos).Line
|
||||
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
|
||||
groups = append(groups, group)
|
||||
group = []*ast.ImportSpec{}
|
||||
}
|
||||
group = append(group, importSpec)
|
||||
lastLine = line
|
||||
}
|
||||
groups = append(groups, group)
|
||||
}
|
||||
|
||||
return groups
|
||||
}
|
14
Godeps/_workspace/src/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
Normal file
14
Godeps/_workspace/src/golang.org/x/tools/go/ast/astutil/util.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package astutil
|
||||
|
||||
import "go/ast"
|
||||
|
||||
// Unparen returns e with any enclosing parentheses stripped.
|
||||
func Unparen(e ast.Expr) ast.Expr {
|
||||
for {
|
||||
p, ok := e.(*ast.ParenExpr)
|
||||
if !ok {
|
||||
return e
|
||||
}
|
||||
e = p.X
|
||||
}
|
||||
}
|
419
Godeps/_workspace/src/golang.org/x/tools/imports/fix.go
generated
vendored
Normal file
419
Godeps/_workspace/src/golang.org/x/tools/imports/fix.go
generated
vendored
Normal file
@ -0,0 +1,419 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// importToGroup is a list of functions which map from an import path to
|
||||
// a group number.
|
||||
var importToGroup = []func(importPath string) (num int, ok bool){
|
||||
func(importPath string) (num int, ok bool) {
|
||||
if strings.HasPrefix(importPath, "appengine") {
|
||||
return 2, true
|
||||
}
|
||||
return
|
||||
},
|
||||
func(importPath string) (num int, ok bool) {
|
||||
if strings.Contains(importPath, ".") {
|
||||
return 1, true
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
|
||||
func importGroup(importPath string) int {
|
||||
for _, fn := range importToGroup {
|
||||
if n, ok := fn(importPath); ok {
|
||||
return n
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func fixImports(fset *token.FileSet, f *ast.File, filename string) (added []string, err error) {
|
||||
// refs are a set of possible package references currently unsatisfied by imports.
|
||||
// first key: either base package (e.g. "fmt") or renamed package
|
||||
// second key: referenced package symbol (e.g. "Println")
|
||||
refs := make(map[string]map[string]bool)
|
||||
|
||||
// decls are the current package imports. key is base package or renamed package.
|
||||
decls := make(map[string]*ast.ImportSpec)
|
||||
|
||||
// collect potential uses of packages.
|
||||
var visitor visitFn
|
||||
visitor = visitFn(func(node ast.Node) ast.Visitor {
|
||||
if node == nil {
|
||||
return visitor
|
||||
}
|
||||
switch v := node.(type) {
|
||||
case *ast.ImportSpec:
|
||||
if v.Name != nil {
|
||||
decls[v.Name.Name] = v
|
||||
} else {
|
||||
local := importPathToName(strings.Trim(v.Path.Value, `\"`))
|
||||
decls[local] = v
|
||||
}
|
||||
case *ast.SelectorExpr:
|
||||
xident, ok := v.X.(*ast.Ident)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if xident.Obj != nil {
|
||||
// if the parser can resolve it, it's not a package ref
|
||||
break
|
||||
}
|
||||
pkgName := xident.Name
|
||||
if refs[pkgName] == nil {
|
||||
refs[pkgName] = make(map[string]bool)
|
||||
}
|
||||
if decls[pkgName] == nil {
|
||||
refs[pkgName][v.Sel.Name] = true
|
||||
}
|
||||
}
|
||||
return visitor
|
||||
})
|
||||
ast.Walk(visitor, f)
|
||||
|
||||
// Nil out any unused ImportSpecs, to be removed in following passes
|
||||
unusedImport := map[string]string{}
|
||||
for pkg, is := range decls {
|
||||
if refs[pkg] == nil && pkg != "_" && pkg != "." {
|
||||
name := ""
|
||||
if is.Name != nil {
|
||||
name = is.Name.Name
|
||||
}
|
||||
unusedImport[strings.Trim(is.Path.Value, `"`)] = name
|
||||
}
|
||||
}
|
||||
for ipath, name := range unusedImport {
|
||||
if ipath == "C" {
|
||||
// Don't remove cgo stuff.
|
||||
continue
|
||||
}
|
||||
astutil.DeleteNamedImport(fset, f, name, ipath)
|
||||
}
|
||||
|
||||
// Search for imports matching potential package references.
|
||||
searches := 0
|
||||
type result struct {
|
||||
ipath string
|
||||
name string
|
||||
err error
|
||||
}
|
||||
results := make(chan result)
|
||||
for pkgName, symbols := range refs {
|
||||
if len(symbols) == 0 {
|
||||
continue // skip over packages already imported
|
||||
}
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
ipath, rename, err := findImport(pkgName, symbols, filename)
|
||||
r := result{ipath: ipath, err: err}
|
||||
if rename {
|
||||
r.name = pkgName
|
||||
}
|
||||
results <- r
|
||||
}(pkgName, symbols)
|
||||
searches++
|
||||
}
|
||||
for i := 0; i < searches; i++ {
|
||||
result := <-results
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
if result.ipath != "" {
|
||||
if result.name != "" {
|
||||
astutil.AddNamedImport(fset, f, result.name, result.ipath)
|
||||
} else {
|
||||
astutil.AddImport(fset, f, result.ipath)
|
||||
}
|
||||
added = append(added, result.ipath)
|
||||
}
|
||||
}
|
||||
|
||||
return added, nil
|
||||
}
|
||||
|
||||
// importPathToName returns the package name for the given import path.
|
||||
var importPathToName = importPathToNameGoPath
|
||||
|
||||
// importPathToNameBasic assumes the package name is the base of import path.
|
||||
func importPathToNameBasic(importPath string) (packageName string) {
|
||||
return path.Base(importPath)
|
||||
}
|
||||
|
||||
// importPathToNameGoPath finds out the actual package name, as declared in its .go files.
|
||||
// If there's a problem, it falls back to using importPathToNameBasic.
|
||||
func importPathToNameGoPath(importPath string) (packageName string) {
|
||||
if buildPkg, err := build.Import(importPath, "", 0); err == nil {
|
||||
return buildPkg.Name
|
||||
} else {
|
||||
return importPathToNameBasic(importPath)
|
||||
}
|
||||
}
|
||||
|
||||
type pkg struct {
|
||||
importpath string // full pkg import path, e.g. "net/http"
|
||||
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
|
||||
}
|
||||
|
||||
var pkgIndexOnce sync.Once
|
||||
|
||||
var pkgIndex struct {
|
||||
sync.Mutex
|
||||
m map[string][]pkg // shortname => []pkg, e.g "http" => "net/http"
|
||||
}
|
||||
|
||||
// gate is a semaphore for limiting concurrency.
|
||||
type gate chan struct{}
|
||||
|
||||
func (g gate) enter() { g <- struct{}{} }
|
||||
func (g gate) leave() { <-g }
|
||||
|
||||
// fsgate protects the OS & filesystem from too much concurrency.
|
||||
// Too much disk I/O -> too many threads -> swapping and bad scheduling.
|
||||
var fsgate = make(gate, 8)
|
||||
|
||||
func loadPkgIndex() {
|
||||
pkgIndex.Lock()
|
||||
pkgIndex.m = make(map[string][]pkg)
|
||||
pkgIndex.Unlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, path := range build.Default.SrcDirs() {
|
||||
fsgate.enter()
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
fsgate.leave()
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
continue
|
||||
}
|
||||
children, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
fsgate.leave()
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
continue
|
||||
}
|
||||
for _, child := range children {
|
||||
if child.IsDir() {
|
||||
wg.Add(1)
|
||||
go func(path, name string) {
|
||||
defer wg.Done()
|
||||
loadPkg(&wg, path, name)
|
||||
}(path, child.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func loadPkg(wg *sync.WaitGroup, root, pkgrelpath string) {
|
||||
importpath := filepath.ToSlash(pkgrelpath)
|
||||
dir := filepath.Join(root, importpath)
|
||||
|
||||
fsgate.enter()
|
||||
defer fsgate.leave()
|
||||
pkgDir, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
children, err := pkgDir.Readdir(-1)
|
||||
pkgDir.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// hasGo tracks whether a directory actually appears to be a
|
||||
// Go source code directory. If $GOPATH == $HOME, and
|
||||
// $HOME/src has lots of other large non-Go projects in it,
|
||||
// then the calls to importPathToName below can be expensive.
|
||||
hasGo := false
|
||||
for _, child := range children {
|
||||
// Avoid .foo, _foo, and testdata directory trees.
|
||||
name := child.Name()
|
||||
if name == "" || name[0] == '.' || name[0] == '_' || name == "testdata" {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(name, ".go") {
|
||||
hasGo = true
|
||||
}
|
||||
if child.IsDir() {
|
||||
wg.Add(1)
|
||||
go func(root, name string) {
|
||||
defer wg.Done()
|
||||
loadPkg(wg, root, name)
|
||||
}(root, filepath.Join(importpath, name))
|
||||
}
|
||||
}
|
||||
if hasGo {
|
||||
shortName := importPathToName(importpath)
|
||||
pkgIndex.Lock()
|
||||
pkgIndex.m[shortName] = append(pkgIndex.m[shortName], pkg{
|
||||
importpath: importpath,
|
||||
dir: dir,
|
||||
})
|
||||
pkgIndex.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// loadExports returns a list exports for a package.
|
||||
var loadExports = loadExportsGoPath
|
||||
|
||||
func loadExportsGoPath(dir string) map[string]bool {
|
||||
exports := make(map[string]bool)
|
||||
buildPkg, err := build.ImportDir(dir, 0)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no buildable Go source files in") {
|
||||
return nil
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "could not import %q: %v\n", dir, err)
|
||||
return nil
|
||||
}
|
||||
fset := token.NewFileSet()
|
||||
for _, files := range [...][]string{buildPkg.GoFiles, buildPkg.CgoFiles} {
|
||||
for _, file := range files {
|
||||
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "could not parse %q: %v\n", file, err)
|
||||
continue
|
||||
}
|
||||
for name := range f.Scope.Objects {
|
||||
if ast.IsExported(name) {
|
||||
exports[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return exports
|
||||
}
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns "".
|
||||
// Declared as a variable rather than a function so goimports can be easily
|
||||
// extended by adding a file with an init function.
|
||||
var findImport = findImportGoPath
|
||||
|
||||
func findImportGoPath(pkgName string, symbols map[string]bool, filename string) (string, bool, error) {
|
||||
// Fast path for the standard library.
|
||||
// In the common case we hopefully never have to scan the GOPATH, which can
|
||||
// be slow with moving disks.
|
||||
if pkg, rename, ok := findImportStdlib(pkgName, symbols); ok {
|
||||
return pkg, rename, nil
|
||||
}
|
||||
|
||||
// TODO(sameer): look at the import lines for other Go files in the
|
||||
// local directory, since the user is likely to import the same packages
|
||||
// in the current Go file. Return rename=true when the other Go files
|
||||
// use a renamed package that's also used in the current file.
|
||||
|
||||
pkgIndexOnce.Do(loadPkgIndex)
|
||||
|
||||
// Collect exports for packages with matching names.
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
shortest string
|
||||
)
|
||||
pkgIndex.Lock()
|
||||
for _, pkg := range pkgIndex.m[pkgName] {
|
||||
if !canUse(filename, pkg.dir) {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(importpath, dir string) {
|
||||
defer wg.Done()
|
||||
exports := loadExports(dir)
|
||||
if exports == nil {
|
||||
return
|
||||
}
|
||||
// If it doesn't have the right symbols, stop.
|
||||
for symbol := range symbols {
|
||||
if !exports[symbol] {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Devendorize for use in import statement.
|
||||
if i := strings.LastIndex(importpath, "/vendor/"); i >= 0 {
|
||||
importpath = importpath[i+len("/vendor/"):]
|
||||
} else if strings.HasPrefix(importpath, "vendor/") {
|
||||
importpath = importpath[len("vendor/"):]
|
||||
}
|
||||
|
||||
// Save as the answer.
|
||||
// If there are multiple candidates, the shortest wins,
|
||||
// to prefer "bytes" over "github.com/foo/bytes".
|
||||
mu.Lock()
|
||||
if shortest == "" || len(importpath) < len(shortest) || len(importpath) == len(shortest) && importpath < shortest {
|
||||
shortest = importpath
|
||||
}
|
||||
mu.Unlock()
|
||||
}(pkg.importpath, pkg.dir)
|
||||
}
|
||||
pkgIndex.Unlock()
|
||||
wg.Wait()
|
||||
|
||||
return shortest, false, nil
|
||||
}
|
||||
|
||||
func canUse(filename, dir string) bool {
|
||||
dirSlash := filepath.ToSlash(dir)
|
||||
if !strings.Contains(dirSlash, "/vendor/") && !strings.Contains(dirSlash, "/internal/") && !strings.HasSuffix(dirSlash, "/internal") {
|
||||
return true
|
||||
}
|
||||
// Vendor or internal directory only visible from children of parent.
|
||||
// That means the path from the current directory to the target directory
|
||||
// can contain ../vendor or ../internal but not ../foo/vendor or ../foo/internal
|
||||
// or bar/vendor or bar/internal.
|
||||
// After stripping all the leading ../, the only okay place to see vendor or internal
|
||||
// is at the very beginning of the path.
|
||||
abs, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
rel, err := filepath.Rel(abs, dir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
relSlash := filepath.ToSlash(rel)
|
||||
if i := strings.LastIndex(relSlash, "../"); i >= 0 {
|
||||
relSlash = relSlash[i+len("../"):]
|
||||
}
|
||||
return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
|
||||
}
|
||||
|
||||
type visitFn func(node ast.Node) ast.Visitor
|
||||
|
||||
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
||||
return fn(node)
|
||||
}
|
||||
|
||||
func findImportStdlib(shortPkg string, symbols map[string]bool) (importPath string, rename, ok bool) {
|
||||
for symbol := range symbols {
|
||||
path := stdlib[shortPkg+"."+symbol]
|
||||
if path == "" {
|
||||
return "", false, false
|
||||
}
|
||||
if importPath != "" && importPath != path {
|
||||
// Ambiguous. Symbols pointed to different things.
|
||||
return "", false, false
|
||||
}
|
||||
importPath = path
|
||||
}
|
||||
return importPath, false, importPath != ""
|
||||
}
|
283
Godeps/_workspace/src/golang.org/x/tools/imports/imports.go
generated
vendored
Normal file
283
Godeps/_workspace/src/golang.org/x/tools/imports/imports.go
generated
vendored
Normal file
@ -0,0 +1,283 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package imports
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/printer"
|
||||
"go/token"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
)
|
||||
|
||||
// Options specifies options for processing files.
|
||||
type Options struct {
|
||||
Fragment bool // Accept fragment of a source file (no package statement)
|
||||
AllErrors bool // Report all errors (not just the first 10 on different lines)
|
||||
|
||||
Comments bool // Print comments (true if nil *Options provided)
|
||||
TabIndent bool // Use tabs for indent (true if nil *Options provided)
|
||||
TabWidth int // Tab width (8 if nil *Options provided)
|
||||
}
|
||||
|
||||
// Process formats and adjusts imports for the provided file.
|
||||
// If opt is nil the defaults are used.
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
// To process data ``as if'' it were in filename, pass the data as a non-nil src.
|
||||
func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||
if opt == nil {
|
||||
opt = &Options{Comments: true, TabIndent: true, TabWidth: 8}
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = fixImports(fileSet, file, filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sortImports(fileSet, file)
|
||||
imps := astutil.Imports(fileSet, file)
|
||||
|
||||
var spacesBefore []string // import paths we need spaces before
|
||||
for _, impSection := range imps {
|
||||
// Within each block of contiguous imports, see if any
|
||||
// import lines are in different group numbers. If so,
|
||||
// we'll need to put a space between them so it's
|
||||
// compatible with gofmt.
|
||||
lastGroup := -1
|
||||
for _, importSpec := range impSection {
|
||||
importPath, _ := strconv.Unquote(importSpec.Path.Value)
|
||||
groupNum := importGroup(importPath)
|
||||
if groupNum != lastGroup && lastGroup != -1 {
|
||||
spacesBefore = append(spacesBefore, importPath)
|
||||
}
|
||||
lastGroup = groupNum
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
printerMode := printer.UseSpaces
|
||||
if opt.TabIndent {
|
||||
printerMode |= printer.TabIndent
|
||||
}
|
||||
printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth}
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = printConfig.Fprint(&buf, fileSet, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := buf.Bytes()
|
||||
if adjust != nil {
|
||||
out = adjust(src, out)
|
||||
}
|
||||
if len(spacesBefore) > 0 {
|
||||
out = addImportSpaces(bytes.NewReader(out), spacesBefore)
|
||||
}
|
||||
|
||||
out, err = format.Source(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// parse parses src, which was read from filename,
|
||||
// as a Go source file or statement list.
|
||||
func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
|
||||
// Try as whole source file.
|
||||
file, err := parser.ParseFile(fset, filename, src, parserMode)
|
||||
if err == nil {
|
||||
return file, nil, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// package line and we accept fragmented input, fall through to
|
||||
// try as a source fragment. Stop and return on any other error.
|
||||
if !opt.Fragment || !strings.Contains(err.Error(), "expected 'package'") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a declaration list, make it a source file
|
||||
// by inserting a package clause.
|
||||
// Insert using a ;, not a newline, so that the line numbers
|
||||
// in psrc match the ones in src.
|
||||
psrc := append([]byte("package main;"), src...)
|
||||
file, err = parser.ParseFile(fset, filename, psrc, parserMode)
|
||||
if err == nil {
|
||||
// If a main function exists, we will assume this is a main
|
||||
// package and leave the file.
|
||||
if containsMainFunc(file) {
|
||||
return file, nil, nil
|
||||
}
|
||||
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the package clause.
|
||||
// Gofmt has turned the ; into a \n.
|
||||
src = src[len("package main\n"):]
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
// If the error is that the source file didn't begin with a
|
||||
// declaration, fall through to try as a statement list.
|
||||
// Stop and return on any other error.
|
||||
if !strings.Contains(err.Error(), "expected declaration") {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// If this is a statement list, make it a source file
|
||||
// by inserting a package clause and turning the list
|
||||
// into a function body. This handles expressions too.
|
||||
// Insert using a ;, not a newline, so that the line numbers
|
||||
// in fsrc match the ones in src.
|
||||
fsrc := append(append([]byte("package p; func _() {"), src...), '}')
|
||||
file, err = parser.ParseFile(fset, filename, fsrc, parserMode)
|
||||
if err == nil {
|
||||
adjust := func(orig, src []byte) []byte {
|
||||
// Remove the wrapping.
|
||||
// Gofmt has turned the ; into a \n\n.
|
||||
src = src[len("package p\n\nfunc _() {"):]
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
}
|
||||
|
||||
// Failed, and out of options.
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// containsMainFunc checks if a file contains a function declaration with the
|
||||
// function signature 'func main()'
|
||||
func containsMainFunc(file *ast.File) bool {
|
||||
for _, decl := range file.Decls {
|
||||
if f, ok := decl.(*ast.FuncDecl); ok {
|
||||
if f.Name.Name != "main" {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(f.Type.Params.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Type.Results != nil && len(f.Type.Results.List) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func cutSpace(b []byte) (before, middle, after []byte) {
|
||||
i := 0
|
||||
for i < len(b) && (b[i] == ' ' || b[i] == '\t' || b[i] == '\n') {
|
||||
i++
|
||||
}
|
||||
j := len(b)
|
||||
for j > 0 && (b[j-1] == ' ' || b[j-1] == '\t' || b[j-1] == '\n') {
|
||||
j--
|
||||
}
|
||||
if i <= j {
|
||||
return b[:i], b[i:j], b[j:]
|
||||
}
|
||||
return nil, nil, b[j:]
|
||||
}
|
||||
|
||||
// matchSpace reformats src to use the same space context as orig.
|
||||
// 1) If orig begins with blank lines, matchSpace inserts them at the beginning of src.
|
||||
// 2) matchSpace copies the indentation of the first non-blank line in orig
|
||||
// to every non-blank line in src.
|
||||
// 3) matchSpace copies the trailing space from orig and uses it in place
|
||||
// of src's trailing space.
|
||||
func matchSpace(orig []byte, src []byte) []byte {
|
||||
before, _, after := cutSpace(orig)
|
||||
i := bytes.LastIndex(before, []byte{'\n'})
|
||||
before, indent := before[:i+1], before[i+1:]
|
||||
|
||||
_, src, _ = cutSpace(src)
|
||||
|
||||
var b bytes.Buffer
|
||||
b.Write(before)
|
||||
for len(src) > 0 {
|
||||
line := src
|
||||
if i := bytes.IndexByte(line, '\n'); i >= 0 {
|
||||
line, src = line[:i+1], line[i+1:]
|
||||
} else {
|
||||
src = nil
|
||||
}
|
||||
if len(line) > 0 && line[0] != '\n' { // not blank
|
||||
b.Write(indent)
|
||||
}
|
||||
b.Write(line)
|
||||
}
|
||||
b.Write(after)
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
var impLine = regexp.MustCompile(`^\s+(?:[\w\.]+\s+)?"(.+)"`)
|
||||
|
||||
func addImportSpaces(r io.Reader, breaks []string) []byte {
|
||||
var out bytes.Buffer
|
||||
sc := bufio.NewScanner(r)
|
||||
inImports := false
|
||||
done := false
|
||||
for sc.Scan() {
|
||||
s := sc.Text()
|
||||
|
||||
if !inImports && !done && strings.HasPrefix(s, "import") {
|
||||
inImports = true
|
||||
}
|
||||
if inImports && (strings.HasPrefix(s, "var") ||
|
||||
strings.HasPrefix(s, "func") ||
|
||||
strings.HasPrefix(s, "const") ||
|
||||
strings.HasPrefix(s, "type")) {
|
||||
done = true
|
||||
inImports = false
|
||||
}
|
||||
if inImports && len(breaks) > 0 {
|
||||
if m := impLine.FindStringSubmatch(s); m != nil {
|
||||
if m[1] == string(breaks[0]) {
|
||||
out.WriteByte('\n')
|
||||
breaks = breaks[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintln(&out, s)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
173
Godeps/_workspace/src/golang.org/x/tools/imports/mkindex.go
generated
vendored
Normal file
173
Godeps/_workspace/src/golang.org/x/tools/imports/mkindex.go
generated
vendored
Normal file
@ -0,0 +1,173 @@
|
||||
// +build ignore
|
||||
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Command mkindex creates the file "pkgindex.go" containing an index of the Go
|
||||
// standard library. The file is intended to be built as part of the imports
|
||||
// package, so that the package may be used in environments where a GOROOT is
|
||||
// not available (such as App Engine).
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
pkgIndex = make(map[string][]pkg)
|
||||
exports = make(map[string]map[string]bool)
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Don't use GOPATH.
|
||||
ctx := build.Default
|
||||
ctx.GOPATH = ""
|
||||
|
||||
// Populate pkgIndex global from GOROOT.
|
||||
for _, path := range ctx.SrcDirs() {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
continue
|
||||
}
|
||||
children, err := f.Readdir(-1)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
continue
|
||||
}
|
||||
for _, child := range children {
|
||||
if child.IsDir() {
|
||||
loadPkg(path, child.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Populate exports global.
|
||||
for _, ps := range pkgIndex {
|
||||
for _, p := range ps {
|
||||
e := loadExports(p.dir)
|
||||
if e != nil {
|
||||
exports[p.dir] = e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Construct source file.
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprint(&buf, pkgIndexHead)
|
||||
fmt.Fprintf(&buf, "var pkgIndexMaster = %#v\n", pkgIndex)
|
||||
fmt.Fprintf(&buf, "var exportsMaster = %#v\n", exports)
|
||||
src := buf.Bytes()
|
||||
|
||||
// Replace main.pkg type name with pkg.
|
||||
src = bytes.Replace(src, []byte("main.pkg"), []byte("pkg"), -1)
|
||||
// Replace actual GOROOT with "/go".
|
||||
src = bytes.Replace(src, []byte(ctx.GOROOT), []byte("/go"), -1)
|
||||
// Add some line wrapping.
|
||||
src = bytes.Replace(src, []byte("}, "), []byte("},\n"), -1)
|
||||
src = bytes.Replace(src, []byte("true, "), []byte("true,\n"), -1)
|
||||
|
||||
var err error
|
||||
src, err = format.Source(src)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Write out source file.
|
||||
err = ioutil.WriteFile("pkgindex.go", src, 0644)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
const pkgIndexHead = `package imports
|
||||
|
||||
func init() {
|
||||
pkgIndexOnce.Do(func() {
|
||||
pkgIndex.m = pkgIndexMaster
|
||||
})
|
||||
loadExports = func(dir string) map[string]bool {
|
||||
return exportsMaster[dir]
|
||||
}
|
||||
}
|
||||
`
|
||||
|
||||
type pkg struct {
|
||||
importpath string // full pkg import path, e.g. "net/http"
|
||||
dir string // absolute file path to pkg directory e.g. "/usr/lib/go/src/fmt"
|
||||
}
|
||||
|
||||
var fset = token.NewFileSet()
|
||||
|
||||
func loadPkg(root, importpath string) {
|
||||
shortName := path.Base(importpath)
|
||||
if shortName == "testdata" {
|
||||
return
|
||||
}
|
||||
|
||||
dir := filepath.Join(root, importpath)
|
||||
pkgIndex[shortName] = append(pkgIndex[shortName], pkg{
|
||||
importpath: importpath,
|
||||
dir: dir,
|
||||
})
|
||||
|
||||
pkgDir, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
children, err := pkgDir.Readdir(-1)
|
||||
pkgDir.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, child := range children {
|
||||
name := child.Name()
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
if c := name[0]; c == '.' || ('0' <= c && c <= '9') {
|
||||
continue
|
||||
}
|
||||
if child.IsDir() {
|
||||
loadPkg(root, filepath.Join(importpath, name))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadExports(dir string) map[string]bool {
|
||||
exports := make(map[string]bool)
|
||||
buildPkg, err := build.ImportDir(dir, 0)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "no buildable Go source files in") {
|
||||
return nil
|
||||
}
|
||||
log.Printf("could not import %q: %v", dir, err)
|
||||
return nil
|
||||
}
|
||||
for _, file := range buildPkg.GoFiles {
|
||||
f, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)
|
||||
if err != nil {
|
||||
log.Printf("could not parse %q: %v", file, err)
|
||||
continue
|
||||
}
|
||||
for name := range f.Scope.Objects {
|
||||
if ast.IsExported(name) {
|
||||
exports[name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return exports
|
||||
}
|
93
Godeps/_workspace/src/golang.org/x/tools/imports/mkstdlib.go
generated
vendored
Normal file
93
Godeps/_workspace/src/golang.org/x/tools/imports/mkstdlib.go
generated
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
// +build ignore
|
||||
|
||||
// mkstdlib generates the zstdlib.go file, containing the Go standard
|
||||
// library API symbols. It's baked into the binary to avoid scanning
|
||||
// GOPATH in the common case.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func mustOpen(name string) io.Reader {
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func api(base string) string {
|
||||
return filepath.Join(os.Getenv("GOROOT"), "api", base)
|
||||
}
|
||||
|
||||
var sym = regexp.MustCompile(`^pkg (\S+).*?, (?:var|func|type|const) ([A-Z]\w*)`)
|
||||
|
||||
func main() {
|
||||
var buf bytes.Buffer
|
||||
outf := func(format string, args ...interface{}) {
|
||||
fmt.Fprintf(&buf, format, args...)
|
||||
}
|
||||
outf("// AUTO-GENERATED BY mkstdlib.go\n\n")
|
||||
outf("package imports\n")
|
||||
outf("var stdlib = map[string]string{\n")
|
||||
f := io.MultiReader(
|
||||
mustOpen(api("go1.txt")),
|
||||
mustOpen(api("go1.1.txt")),
|
||||
mustOpen(api("go1.2.txt")),
|
||||
mustOpen(api("go1.3.txt")),
|
||||
mustOpen(api("go1.4.txt")),
|
||||
mustOpen(api("go1.5.txt")),
|
||||
)
|
||||
sc := bufio.NewScanner(f)
|
||||
fullImport := map[string]string{} // "zip.NewReader" => "archive/zip"
|
||||
ambiguous := map[string]bool{}
|
||||
var keys []string
|
||||
for sc.Scan() {
|
||||
l := sc.Text()
|
||||
has := func(v string) bool { return strings.Contains(l, v) }
|
||||
if has("struct, ") || has("interface, ") || has(", method (") {
|
||||
continue
|
||||
}
|
||||
if m := sym.FindStringSubmatch(l); m != nil {
|
||||
full := m[1]
|
||||
key := path.Base(full) + "." + m[2]
|
||||
if exist, ok := fullImport[key]; ok {
|
||||
if exist != full {
|
||||
ambiguous[key] = true
|
||||
}
|
||||
} else {
|
||||
fullImport[key] = full
|
||||
keys = append(keys, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := sc.Err(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
if ambiguous[key] {
|
||||
outf("\t// %q is ambiguous\n", key)
|
||||
} else {
|
||||
outf("\t%q: %q,\n", key, fullImport[key])
|
||||
}
|
||||
}
|
||||
outf("}\n")
|
||||
fmtbuf, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
os.Stdout.Write(fmtbuf)
|
||||
}
|
212
Godeps/_workspace/src/golang.org/x/tools/imports/sortimports.go
generated
vendored
Normal file
212
Godeps/_workspace/src/golang.org/x/tools/imports/sortimports.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Hacked up copy of go/ast/import.go
|
||||
|
||||
package imports
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// sortImports sorts runs of consecutive import lines in import blocks in f.
|
||||
// It also removes duplicate imports when it is possible to do so without data loss.
|
||||
func sortImports(fset *token.FileSet, f *ast.File) {
|
||||
for i, d := range f.Decls {
|
||||
d, ok := d.(*ast.GenDecl)
|
||||
if !ok || d.Tok != token.IMPORT {
|
||||
// Not an import declaration, so we're done.
|
||||
// Imports are always first.
|
||||
break
|
||||
}
|
||||
|
||||
if len(d.Specs) == 0 {
|
||||
// Empty import block, remove it.
|
||||
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
|
||||
}
|
||||
|
||||
if !d.Lparen.IsValid() {
|
||||
// Not a block: sorted by default.
|
||||
continue
|
||||
}
|
||||
|
||||
// Identify and sort runs of specs on successive lines.
|
||||
i := 0
|
||||
specs := d.Specs[:0]
|
||||
for j, s := range d.Specs {
|
||||
if j > i && fset.Position(s.Pos()).Line > 1+fset.Position(d.Specs[j-1].End()).Line {
|
||||
// j begins a new run. End this one.
|
||||
specs = append(specs, sortSpecs(fset, f, d.Specs[i:j])...)
|
||||
i = j
|
||||
}
|
||||
}
|
||||
specs = append(specs, sortSpecs(fset, f, d.Specs[i:])...)
|
||||
d.Specs = specs
|
||||
|
||||
// Deduping can leave a blank line before the rparen; clean that up.
|
||||
if len(d.Specs) > 0 {
|
||||
lastSpec := d.Specs[len(d.Specs)-1]
|
||||
lastLine := fset.Position(lastSpec.Pos()).Line
|
||||
if rParenLine := fset.Position(d.Rparen).Line; rParenLine > lastLine+1 {
|
||||
fset.File(d.Rparen).MergeLine(rParenLine - 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func importPath(s ast.Spec) string {
|
||||
t, err := strconv.Unquote(s.(*ast.ImportSpec).Path.Value)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func importName(s ast.Spec) string {
|
||||
n := s.(*ast.ImportSpec).Name
|
||||
if n == nil {
|
||||
return ""
|
||||
}
|
||||
return n.Name
|
||||
}
|
||||
|
||||
func importComment(s ast.Spec) string {
|
||||
c := s.(*ast.ImportSpec).Comment
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
return c.Text()
|
||||
}
|
||||
|
||||
// collapse indicates whether prev may be removed, leaving only next.
|
||||
func collapse(prev, next ast.Spec) bool {
|
||||
if importPath(next) != importPath(prev) || importName(next) != importName(prev) {
|
||||
return false
|
||||
}
|
||||
return prev.(*ast.ImportSpec).Comment == nil
|
||||
}
|
||||
|
||||
type posSpan struct {
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
func sortSpecs(fset *token.FileSet, f *ast.File, specs []ast.Spec) []ast.Spec {
|
||||
// Can't short-circuit here even if specs are already sorted,
|
||||
// since they might yet need deduplication.
|
||||
// A lone import, however, may be safely ignored.
|
||||
if len(specs) <= 1 {
|
||||
return specs
|
||||
}
|
||||
|
||||
// Record positions for specs.
|
||||
pos := make([]posSpan, len(specs))
|
||||
for i, s := range specs {
|
||||
pos[i] = posSpan{s.Pos(), s.End()}
|
||||
}
|
||||
|
||||
// Identify comments in this range.
|
||||
// Any comment from pos[0].Start to the final line counts.
|
||||
lastLine := fset.Position(pos[len(pos)-1].End).Line
|
||||
cstart := len(f.Comments)
|
||||
cend := len(f.Comments)
|
||||
for i, g := range f.Comments {
|
||||
if g.Pos() < pos[0].Start {
|
||||
continue
|
||||
}
|
||||
if i < cstart {
|
||||
cstart = i
|
||||
}
|
||||
if fset.Position(g.End()).Line > lastLine {
|
||||
cend = i
|
||||
break
|
||||
}
|
||||
}
|
||||
comments := f.Comments[cstart:cend]
|
||||
|
||||
// Assign each comment to the import spec preceding it.
|
||||
importComment := map[*ast.ImportSpec][]*ast.CommentGroup{}
|
||||
specIndex := 0
|
||||
for _, g := range comments {
|
||||
for specIndex+1 < len(specs) && pos[specIndex+1].Start <= g.Pos() {
|
||||
specIndex++
|
||||
}
|
||||
s := specs[specIndex].(*ast.ImportSpec)
|
||||
importComment[s] = append(importComment[s], g)
|
||||
}
|
||||
|
||||
// Sort the import specs by import path.
|
||||
// Remove duplicates, when possible without data loss.
|
||||
// Reassign the import paths to have the same position sequence.
|
||||
// Reassign each comment to abut the end of its spec.
|
||||
// Sort the comments by new position.
|
||||
sort.Sort(byImportSpec(specs))
|
||||
|
||||
// Dedup. Thanks to our sorting, we can just consider
|
||||
// adjacent pairs of imports.
|
||||
deduped := specs[:0]
|
||||
for i, s := range specs {
|
||||
if i == len(specs)-1 || !collapse(s, specs[i+1]) {
|
||||
deduped = append(deduped, s)
|
||||
} else {
|
||||
p := s.Pos()
|
||||
fset.File(p).MergeLine(fset.Position(p).Line)
|
||||
}
|
||||
}
|
||||
specs = deduped
|
||||
|
||||
// Fix up comment positions
|
||||
for i, s := range specs {
|
||||
s := s.(*ast.ImportSpec)
|
||||
if s.Name != nil {
|
||||
s.Name.NamePos = pos[i].Start
|
||||
}
|
||||
s.Path.ValuePos = pos[i].Start
|
||||
s.EndPos = pos[i].End
|
||||
for _, g := range importComment[s] {
|
||||
for _, c := range g.List {
|
||||
c.Slash = pos[i].End
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byCommentPos(comments))
|
||||
|
||||
return specs
|
||||
}
|
||||
|
||||
type byImportSpec []ast.Spec // slice of *ast.ImportSpec
|
||||
|
||||
func (x byImportSpec) Len() int { return len(x) }
|
||||
func (x byImportSpec) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byImportSpec) Less(i, j int) bool {
|
||||
ipath := importPath(x[i])
|
||||
jpath := importPath(x[j])
|
||||
|
||||
igroup := importGroup(ipath)
|
||||
jgroup := importGroup(jpath)
|
||||
if igroup != jgroup {
|
||||
return igroup < jgroup
|
||||
}
|
||||
|
||||
if ipath != jpath {
|
||||
return ipath < jpath
|
||||
}
|
||||
iname := importName(x[i])
|
||||
jname := importName(x[j])
|
||||
|
||||
if iname != jname {
|
||||
return iname < jname
|
||||
}
|
||||
return importComment(x[i]) < importComment(x[j])
|
||||
}
|
||||
|
||||
type byCommentPos []*ast.CommentGroup
|
||||
|
||||
func (x byCommentPos) Len() int { return len(x) }
|
||||
func (x byCommentPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
func (x byCommentPos) Less(i, j int) bool { return x[i].Pos() < x[j].Pos() }
|
9038
Godeps/_workspace/src/golang.org/x/tools/imports/zstdlib.go
generated
vendored
Normal file
9038
Godeps/_workspace/src/golang.org/x/tools/imports/zstdlib.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@ -26,11 +26,6 @@ import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
)
|
||||
|
||||
// Executer is an executer method for performing state executions. It takes one
|
||||
// argument which is the input data and expects output data to be returned as
|
||||
// multiple 32 byte word length concatenated slice
|
||||
type Executer func(datain []byte) []byte
|
||||
|
||||
// The ABI holds information about a contract's context and available
|
||||
// invokable methods. It will allow you to type check function calls and
|
||||
// packs data accordingly.
|
||||
@ -169,21 +164,6 @@ func toGoType(i int, t Argument, output []byte) (interface{}, error) {
|
||||
return nil, fmt.Errorf("abi: unknown type %v", t.Type.T)
|
||||
}
|
||||
|
||||
// Call will unmarshal the output of the call in v. It will return an error if
|
||||
// invalid type is given or if the output is too short to conform to the ABI
|
||||
// spec.
|
||||
//
|
||||
// Call supports all of the available types and accepts a struct or an interface
|
||||
// slice if the return is a tuple.
|
||||
func (abi ABI) Call(executer Executer, v interface{}, name string, args ...interface{}) error {
|
||||
callData, err := abi.Pack(name, args...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return abi.unmarshal(v, name, executer(callData))
|
||||
}
|
||||
|
||||
// these variable are used to determine certain types during type assertion for
|
||||
// assignment.
|
||||
var (
|
||||
@ -193,8 +173,8 @@ var (
|
||||
r_byte = reflect.TypeOf(byte(0))
|
||||
)
|
||||
|
||||
// unmarshal output in v according to the abi specification
|
||||
func (abi ABI) unmarshal(v interface{}, name string, output []byte) error {
|
||||
// Unpack output in v according to the abi specification
|
||||
func (abi ABI) Unpack(v interface{}, name string, output []byte) error {
|
||||
var method = abi.Methods[name]
|
||||
|
||||
if len(output) == 0 {
|
||||
|
@ -579,7 +579,7 @@ func TestMultiReturnWithStruct(t *testing.T) {
|
||||
Int *big.Int
|
||||
String string
|
||||
}
|
||||
err = abi.unmarshal(&inter, "multi", buff.Bytes())
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -597,7 +597,7 @@ func TestMultiReturnWithStruct(t *testing.T) {
|
||||
Int *big.Int
|
||||
}
|
||||
|
||||
err = abi.unmarshal(&reversed, "multi", buff.Bytes())
|
||||
err = abi.Unpack(&reversed, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -629,7 +629,7 @@ func TestMultiReturnWithSlice(t *testing.T) {
|
||||
buff.Write(common.RightPadBytes([]byte(stringOut), 32))
|
||||
|
||||
var inter []interface{}
|
||||
err = abi.unmarshal(&inter, "multi", buff.Bytes())
|
||||
err = abi.Unpack(&inter, "multi", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -661,13 +661,13 @@ func TestMarshalArrays(t *testing.T) {
|
||||
output := common.LeftPadBytes([]byte{1}, 32)
|
||||
|
||||
var bytes10 [10]byte
|
||||
err = abi.unmarshal(&bytes10, "bytes32", output)
|
||||
err = abi.Unpack(&bytes10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var bytes32 [32]byte
|
||||
err = abi.unmarshal(&bytes32, "bytes32", output)
|
||||
err = abi.Unpack(&bytes32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
@ -681,13 +681,13 @@ func TestMarshalArrays(t *testing.T) {
|
||||
)
|
||||
|
||||
var b10 B10
|
||||
err = abi.unmarshal(&b10, "bytes32", output)
|
||||
err = abi.Unpack(&b10, "bytes32", output)
|
||||
if err == nil || err.Error() != "abi: cannot unmarshal src (len=32) in to dst (len=10)" {
|
||||
t.Error("expected error or bytes32 not be assignable to bytes10:", err)
|
||||
}
|
||||
|
||||
var b32 B32
|
||||
err = abi.unmarshal(&b32, "bytes32", output)
|
||||
err = abi.Unpack(&b32, "bytes32", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
@ -697,7 +697,7 @@ func TestMarshalArrays(t *testing.T) {
|
||||
|
||||
output[10] = 1
|
||||
var shortAssignLong [32]byte
|
||||
err = abi.unmarshal(&shortAssignLong, "bytes10", output)
|
||||
err = abi.Unpack(&shortAssignLong, "bytes10", output)
|
||||
if err != nil {
|
||||
t.Error("didn't expect error:", err)
|
||||
}
|
||||
@ -722,7 +722,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
|
||||
// marshal int
|
||||
var Int *big.Int
|
||||
err = abi.unmarshal(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
err = abi.Unpack(&Int, "int", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -733,7 +733,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
|
||||
// marshal bool
|
||||
var Bool bool
|
||||
err = abi.unmarshal(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
err = abi.Unpack(&Bool, "bool", common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000001"))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -750,7 +750,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var Bytes []byte
|
||||
err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -766,7 +766,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 64)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -782,7 +782,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
bytesOut = common.RightPadBytes([]byte("hello"), 63)
|
||||
buff.Write(bytesOut)
|
||||
|
||||
err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -792,7 +792,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
}
|
||||
|
||||
// marshal dynamic bytes output empty
|
||||
err = abi.unmarshal(&Bytes, "bytes", nil)
|
||||
err = abi.Unpack(&Bytes, "bytes", nil)
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
@ -803,7 +803,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000005"))
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -817,7 +817,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(common.RightPadBytes([]byte("hello"), 32))
|
||||
|
||||
var hash common.Hash
|
||||
err = abi.unmarshal(&hash, "fixed", buff.Bytes())
|
||||
err = abi.Unpack(&hash, "fixed", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -830,12 +830,12 @@ func TestUnmarshal(t *testing.T) {
|
||||
// marshal error
|
||||
buff.Reset()
|
||||
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000020"))
|
||||
err = abi.unmarshal(&Bytes, "bytes", buff.Bytes())
|
||||
err = abi.Unpack(&Bytes, "bytes", buff.Bytes())
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
|
||||
err = abi.unmarshal(&Bytes, "multi", make([]byte, 64))
|
||||
err = abi.Unpack(&Bytes, "multi", make([]byte, 64))
|
||||
if err == nil {
|
||||
t.Error("expected error")
|
||||
}
|
||||
@ -850,7 +850,7 @@ func TestUnmarshal(t *testing.T) {
|
||||
buff.Write(bytesOut)
|
||||
|
||||
var out []interface{}
|
||||
err = abi.unmarshal(&out, "mixedBytes", buff.Bytes())
|
||||
err = abi.Unpack(&out, "mixedBytes", buff.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal("didn't expect error:", err)
|
||||
}
|
||||
|
247
accounts/abi/bind/backend.go
Normal file
247
accounts/abi/bind/backend.go
Normal file
@ -0,0 +1,247 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bind
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/ethereum/go-ethereum/rlp"
|
||||
"github.com/ethereum/go-ethereum/rpc"
|
||||
)
|
||||
|
||||
// ContractCaller defines the methods needed to allow operating with contract on a read
|
||||
// only basis.
|
||||
type ContractCaller interface {
|
||||
// ContractCall executes an Ethereum contract call with the specified data as
|
||||
// the input.
|
||||
ContractCall(contract common.Address, data []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// ContractTransactor defines the methods needed to allow operating with contract
|
||||
// on a write only basis. Beside the transacting method, the remainder are helpers
|
||||
// used when the user does not provide some needed values, but rather leaves it up
|
||||
// to the transactor to decide.
|
||||
type ContractTransactor interface {
|
||||
// Nonce retrieves the current pending nonce associated with an account.
|
||||
AccountNonce(account common.Address) (uint64, error)
|
||||
|
||||
// GasPrice retrieves the currently suggested gas price to allow a timely execution
|
||||
// of a transaction.
|
||||
GasPrice() (*big.Int, error)
|
||||
|
||||
// GasLimit tries to estimate the gas needed to execute a specific transaction.
|
||||
GasLimit(sender, contract common.Address, value *big.Int, data []byte) (*big.Int, error)
|
||||
|
||||
// SendTransaction injects the transaction into the pending pool for execution.
|
||||
SendTransaction(*types.Transaction) error
|
||||
}
|
||||
|
||||
// ContractBackend defines the methods needed to allow operating with contract
|
||||
// on a read-write basis.
|
||||
type ContractBackend interface {
|
||||
ContractCaller
|
||||
ContractTransactor
|
||||
}
|
||||
|
||||
// nilBackend implements bind.ContractBackend, but panics on any method call.
|
||||
// Its sole purpose is to support the binding tests to construct the generated
|
||||
// wrappers without calling any methods on them.
|
||||
type nilBackend struct{}
|
||||
|
||||
func (*nilBackend) ContractCall(common.Address, []byte) ([]byte, error) { panic("not implemented") }
|
||||
func (*nilBackend) SendTransaction(*types.Transaction) error { panic("not implemented") }
|
||||
func (*nilBackend) AccountNonce(common.Address) (uint64, error) { panic("not implemented") }
|
||||
func (*nilBackend) GasPrice() (*big.Int, error) { panic("not implemented") }
|
||||
func (*nilBackend) GasLimit(common.Address, common.Address, *big.Int, []byte) (*big.Int, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// Helper backend for internal tests. Will panic on any invocation!
|
||||
var NilBackend = new(nilBackend)
|
||||
|
||||
// rpcBackend implements bind.ContractBackend, and acts as the data provider to
|
||||
// Ethereum contracts bound to Go structs. It uses an RPC connection to delegate
|
||||
// all its functionality.
|
||||
//
|
||||
// Note: The current implementation is a blocking one. This should be replaced
|
||||
// by a proper async version when a real RPC client is created.
|
||||
type rpcBackend struct {
|
||||
client rpc.Client // RPC client connection to interact with an API server
|
||||
autoid uint32 // ID number to use for the next API request
|
||||
lock sync.Mutex // Singleton access until we get to request multiplexing
|
||||
}
|
||||
|
||||
// NewRPCBackend creates a new binding backend to an RPC provider that can be
|
||||
// used to interact with remote contracts.
|
||||
func NewRPCBackend(client rpc.Client) ContractBackend {
|
||||
return &rpcBackend{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
// request is a JSON RPC request package assembled internally from the client
|
||||
// method calls.
|
||||
type request struct {
|
||||
JsonRpc string `json:"jsonrpc"` // Version of the JSON RPC protocol, always set to 2.0
|
||||
Id int `json:"id"` // Auto incrementing ID number for this request
|
||||
Method string `json:"method"` // Remote procedure name to invoke on the server
|
||||
Params []interface{} `json:"params"` // List of parameters to pass through (keep types simple)
|
||||
}
|
||||
|
||||
// response is a JSON RPC response package sent back from the API server.
|
||||
type response struct {
|
||||
JsonRpc string `json:"jsonrpc"` // Version of the JSON RPC protocol, always set to 2.0
|
||||
Id int `json:"id"` // Auto incrementing ID number for this request
|
||||
Error json.RawMessage `json:"error"` // Any error returned by the remote side
|
||||
Result json.RawMessage `json:"result"` // Whatever the remote side sends us in reply
|
||||
}
|
||||
|
||||
// request forwards an API request to the RPC server, and parses the response.
|
||||
//
|
||||
// This is currently painfully non-concurrent, but it will have to do until we
|
||||
// find the time for niceties like this :P
|
||||
func (backend *rpcBackend) request(method string, params []interface{}) (json.RawMessage, error) {
|
||||
backend.lock.Lock()
|
||||
defer backend.lock.Unlock()
|
||||
|
||||
// Ugly hack to serialize an empty list properly
|
||||
if params == nil {
|
||||
params = []interface{}{}
|
||||
}
|
||||
// Assemble the request object
|
||||
req := &request{
|
||||
JsonRpc: "2.0",
|
||||
Id: int(atomic.AddUint32(&backend.autoid, 1)),
|
||||
Method: method,
|
||||
Params: params,
|
||||
}
|
||||
if err := backend.client.Send(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := new(response)
|
||||
if err := backend.client.Recv(res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(res.Error) > 0 {
|
||||
return nil, fmt.Errorf("remote error: %s", string(res.Error))
|
||||
}
|
||||
return res.Result, nil
|
||||
}
|
||||
|
||||
// ContractCall implements ContractCaller.ContractCall, delegating the execution of
|
||||
// a contract call to the remote node, returning the reply to for local processing.
|
||||
func (b *rpcBackend) ContractCall(contract common.Address, data []byte) ([]byte, error) {
|
||||
// Pack up the request into an RPC argument
|
||||
args := struct {
|
||||
To common.Address `json:"to"`
|
||||
Data string `json:"data"`
|
||||
}{
|
||||
To: contract,
|
||||
Data: common.ToHex(data),
|
||||
}
|
||||
// Execute the RPC call and retrieve the response
|
||||
res, err := b.request("eth_call", []interface{}{args, "pending"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert the response back to a Go byte slice and return
|
||||
return common.FromHex(hex), nil
|
||||
}
|
||||
|
||||
// AccountNonce implements ContractTransactor.AccountNonce, delegating the
|
||||
// current account nonce retrieval to the remote node.
|
||||
func (b *rpcBackend) AccountNonce(account common.Address) (uint64, error) {
|
||||
res, err := b.request("eth_getTransactionCount", []interface{}{account.Hex(), "pending"})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return new(big.Int).SetBytes(common.FromHex(hex)).Uint64(), nil
|
||||
}
|
||||
|
||||
// GasPrice implements ContractTransactor.GasPrice, delegating the gas price
|
||||
// oracle request to the remote node.
|
||||
func (b *rpcBackend) GasPrice() (*big.Int, error) {
|
||||
res, err := b.request("eth_gasPrice", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return new(big.Int).SetBytes(common.FromHex(hex)), nil
|
||||
}
|
||||
|
||||
// GasLimit implements ContractTransactor.GasLimit, delegating the gas estimation
|
||||
// to the remote node.
|
||||
func (b *rpcBackend) GasLimit(sender, contract common.Address, value *big.Int, data []byte) (*big.Int, error) {
|
||||
// Pack up the request into an RPC argument
|
||||
args := struct {
|
||||
From common.Address `json:"from"`
|
||||
To common.Address `json:"to"`
|
||||
Value *rpc.HexNumber `json:"value"`
|
||||
Data string `json:"data"`
|
||||
}{
|
||||
From: sender,
|
||||
To: contract,
|
||||
Data: common.ToHex(data),
|
||||
Value: rpc.NewHexNumber(value),
|
||||
}
|
||||
// Execute the RPC call and retrieve the response
|
||||
res, err := b.request("eth_estimateGas", []interface{}{args})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Convert the response back to a Go byte slice and return
|
||||
return new(big.Int).SetBytes(common.FromHex(hex)), nil
|
||||
}
|
||||
|
||||
// Transact implements ContractTransactor.SendTransaction, delegating the raw
|
||||
// transaction injection to the remote node.
|
||||
func (b *rpcBackend) SendTransaction(tx *types.Transaction) error {
|
||||
data, err := rlp.EncodeToBytes(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := b.request("eth_sendRawTransaction", []interface{}{common.ToHex(data)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var hex string
|
||||
if err := json.Unmarshal(res, &hex); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
130
accounts/abi/bind/base.go
Normal file
130
accounts/abi/bind/base.go
Normal file
@ -0,0 +1,130 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package bind
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
// SignerFn is a signer function callback when a contract requires a method to
|
||||
// sign the transaction before submission.
|
||||
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
|
||||
|
||||
// AuthOpts is the authorization data required to create a valid Ethereum transaction.
|
||||
type AuthOpts struct {
|
||||
Account common.Address // Ethereum account to send the transaction from
|
||||
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
|
||||
Signer SignerFn // Method to use for signing the transaction (mandatory)
|
||||
|
||||
Value *big.Int // Funds to transfer along along the transaction (nil = 0 = no funds)
|
||||
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
|
||||
GasLimit *big.Int // Gas limit to set for the transaction execution (nil = estimate + 10%)
|
||||
}
|
||||
|
||||
// BoundContract is the base wrapper object that reflects a contract on the
|
||||
// Ethereum network. It contains a collection of methods that are used by the
|
||||
// higher level contract bindings to operate.
|
||||
type BoundContract struct {
|
||||
address common.Address // Deployment address of the contract on the Ethereum blockchain
|
||||
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
|
||||
caller ContractCaller // Read interface to interact with the blockchain
|
||||
transactor ContractTransactor // Write interface to interact with the blockchain
|
||||
}
|
||||
|
||||
// NewBoundContract creates a low level contract interface through which calls
|
||||
// and transactions may be made through.
|
||||
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor) *BoundContract {
|
||||
return &BoundContract{
|
||||
address: address,
|
||||
abi: abi,
|
||||
caller: caller,
|
||||
transactor: transactor,
|
||||
}
|
||||
}
|
||||
|
||||
// Call invokes the (constant) contract method with params as input values and
|
||||
// sets the output to result. The result type might be a single field for simple
|
||||
// returns, a slice of interfaces for anonymous returns and a struct for named
|
||||
// returns.
|
||||
func (c *BoundContract) Call(result interface{}, method string, params ...interface{}) error {
|
||||
input, err := c.abi.Pack(method, params...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
output, err := c.caller.ContractCall(c.address, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.abi.Unpack(result, method, output)
|
||||
}
|
||||
|
||||
// Transact invokes the (paid) contract method with params as input values and
|
||||
// value as the fund transfer to the contract.
|
||||
func (c *BoundContract) Transact(opts *AuthOpts, method string, params ...interface{}) (*types.Transaction, error) {
|
||||
input, err := c.abi.Pack(method, params...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Ensure a valid value field and resolve the account nonce
|
||||
value := opts.Value
|
||||
if value == nil {
|
||||
value = new(big.Int)
|
||||
}
|
||||
nonce := uint64(0)
|
||||
if opts.Nonce == nil {
|
||||
nonce, err = c.transactor.AccountNonce(opts.Account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to retrieve account nonce: %v", err)
|
||||
}
|
||||
} else {
|
||||
nonce = opts.Nonce.Uint64()
|
||||
}
|
||||
// Figure out the gas allowance and gas price values
|
||||
gasPrice := opts.GasPrice
|
||||
if gasPrice == nil {
|
||||
gasPrice, err = c.transactor.GasPrice()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to suggest gas price: %v", err)
|
||||
}
|
||||
}
|
||||
gasLimit := opts.GasLimit
|
||||
if gasLimit == nil {
|
||||
gasLimit, err = c.transactor.GasLimit(opts.Account, c.address, value, input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to exstimate gas needed: %v", err)
|
||||
}
|
||||
}
|
||||
// Create the transaction, sign it and schedule it for execution
|
||||
rawTx := types.NewTransaction(nonce, c.address, value, gasLimit, gasPrice, input)
|
||||
if opts.Signer == nil {
|
||||
return nil, errors.New("no signer to authorize the transaction with")
|
||||
}
|
||||
signedTx, err := opts.Signer(opts.Account, rawTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := c.transactor.SendTransaction(signedTx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signedTx, nil
|
||||
}
|
317
accounts/abi/bind/bind.go
Normal file
317
accounts/abi/bind/bind.go
Normal file
@ -0,0 +1,317 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Package bind generates Ethereum contract Go bindings.
|
||||
package bind
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi"
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
|
||||
// to be used as is in client code, but rather as an intermediate struct which
|
||||
// enforces compile time type safety and naming convention opposed to having to
|
||||
// manually maintain hard coded strings that break on runtime.
|
||||
func Bind(jsonABI string, pkg string, kind string) (string, error) {
|
||||
// Parse the actual ABI to generate the binding for
|
||||
abi, err := abi.JSON(strings.NewReader(jsonABI))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Generate the contract type, fields and methods
|
||||
code := new(bytes.Buffer)
|
||||
kind = strings.ToUpper(kind[:1]) + kind[1:]
|
||||
fmt.Fprintf(code, "%s\n", bindContract(kind, jsonABI))
|
||||
|
||||
methods := make([]string, 0, len(abi.Methods))
|
||||
for name, _ := range abi.Methods {
|
||||
methods = append(methods, name)
|
||||
}
|
||||
sort.Strings(methods)
|
||||
|
||||
for _, method := range methods {
|
||||
fmt.Fprintf(code, "%s\n", bindMethod(kind, abi.Methods[method]))
|
||||
}
|
||||
// Format the code with goimports and return
|
||||
buffer := new(bytes.Buffer)
|
||||
|
||||
fmt.Fprintf(buffer, "package %s\n\n", pkg)
|
||||
fmt.Fprintf(buffer, "%s\n\n", string(code.Bytes()))
|
||||
|
||||
blob, err := imports.Process("", buffer.Bytes(), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(blob), nil
|
||||
}
|
||||
|
||||
// bindContract generates the basic wrapper code for interacting with an Ethereum
|
||||
// contract via the abi package. All contract methods will call into the generic
|
||||
// ones generated here.
|
||||
func bindContract(kind string, abi string) string {
|
||||
code := ""
|
||||
|
||||
// Generate the hard coded ABI used for Ethereum interaction
|
||||
code += fmt.Sprintf("// Ethereum ABI used to generate the binding from.\nconst %sABI = `%s`\n\n", kind, strings.TrimSpace(abi))
|
||||
|
||||
// Generate the Go struct with all the maintenance fields
|
||||
code += fmt.Sprintf("// %s is an auto generated Go binding around an Ethereum contract.\n", kind)
|
||||
code += fmt.Sprintf("type %s struct {\n", kind)
|
||||
code += fmt.Sprintf(" %sCaller // Read-only binding to the contract\n", kind)
|
||||
code += fmt.Sprintf(" %sTransactor // Write-only binding to the contract\n", kind)
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
code += fmt.Sprintf("// %sCaller is an auto generated read-only Go binding around an Ethereum contract.\n", kind)
|
||||
code += fmt.Sprintf("type %sCaller struct {\n", kind)
|
||||
code += fmt.Sprintf(" common *common%s // Contract binding common to callers and transactors\n", kind)
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
code += fmt.Sprintf("// %sTransactor is an auto generated write-only Go binding around an Ethereum contract.\n", kind)
|
||||
code += fmt.Sprintf("type %sTransactor struct {\n", kind)
|
||||
code += fmt.Sprintf(" common *common%s // Contract binding common to callers and transactors\n", kind)
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
code += fmt.Sprintf("// common%s is an auto generated Go binding around an Ethereum contract.\n", kind)
|
||||
code += fmt.Sprintf("type common%s struct {\n", kind)
|
||||
code += fmt.Sprintf(" contract *bind.BoundContract // Generic contract wrapper for the low level calls\n")
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
// Generate the constructor to create a bound contract
|
||||
code += fmt.Sprintf("// New%s creates a new instance of %s, bound to a specific deployed contract.\n", kind, kind)
|
||||
code += fmt.Sprintf("func New%s(address common.Address, backend bind.ContractBackend) (*%s, error) {\n", kind, kind)
|
||||
code += fmt.Sprintf(" common, err := newCommon%s(address, backend.(bind.ContractCaller), backend.(bind.ContractTransactor))\n", kind)
|
||||
code += fmt.Sprintf(" if err != nil {\n")
|
||||
code += fmt.Sprintf(" return nil, err\n")
|
||||
code += fmt.Sprintf(" }\n")
|
||||
code += fmt.Sprintf(" return &%s{%sCaller: %sCaller{common: common}, %sTransactor: %sTransactor{common: common}}, nil\n", kind, kind, kind, kind, kind)
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
code += fmt.Sprintf("// New%sCaller creates a new read-only instance of %s, bound to a specific deployed contract.\n", kind, kind)
|
||||
code += fmt.Sprintf("func New%sCaller(address common.Address, caller bind.ContractCaller) (*%sCaller, error) {\n", kind, kind)
|
||||
code += fmt.Sprintf(" common, err := newCommon%s(address, caller, nil)\n", kind)
|
||||
code += fmt.Sprintf(" if err != nil {\n")
|
||||
code += fmt.Sprintf(" return nil, err\n")
|
||||
code += fmt.Sprintf(" }\n")
|
||||
code += fmt.Sprintf(" return &%sCaller{common: common}, nil\n", kind)
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
code += fmt.Sprintf("// New%sTransactor creates a new write-only instance of %s, bound to a specific deployed contract.\n", kind, kind)
|
||||
code += fmt.Sprintf("func New%sTransactor(address common.Address, transactor bind.ContractTransactor) (*%sTransactor, error) {\n", kind, kind)
|
||||
code += fmt.Sprintf(" common, err := newCommon%s(address, nil, transactor)\n", kind)
|
||||
code += fmt.Sprintf(" if err != nil {\n")
|
||||
code += fmt.Sprintf(" return nil, err\n")
|
||||
code += fmt.Sprintf(" }\n")
|
||||
code += fmt.Sprintf(" return &%sTransactor{common: common}, nil\n", kind)
|
||||
code += fmt.Sprintf("}\n\n")
|
||||
|
||||
code += fmt.Sprintf("// newCommon%s creates an internal instance of %s, bound to a specific deployed contract.\n", kind, kind)
|
||||
code += fmt.Sprintf("func newCommon%s(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor) (*common%s, error) {\n", kind, kind)
|
||||
code += fmt.Sprintf(" parsed, err := abi.JSON(strings.NewReader(%sABI))\n", kind)
|
||||
code += fmt.Sprintf(" if err != nil {\n")
|
||||
code += fmt.Sprintf(" return nil, err\n")
|
||||
code += fmt.Sprintf(" }\n")
|
||||
code += fmt.Sprintf(" return &common%s{\n", kind)
|
||||
code += fmt.Sprintf(" contract: bind.NewBoundContract(address, parsed, caller, transactor),\n")
|
||||
code += fmt.Sprintf(" }, nil\n")
|
||||
code += fmt.Sprintf("}")
|
||||
|
||||
return code
|
||||
}
|
||||
|
||||
// bindMethod
|
||||
func bindMethod(kind string, method abi.Method) string {
|
||||
var (
|
||||
name = strings.ToUpper(method.Name[:1]) + method.Name[1:]
|
||||
prologue = new(bytes.Buffer)
|
||||
)
|
||||
// Generate the argument and return list for the function
|
||||
args := make([]string, 0, len(method.Inputs))
|
||||
for i, arg := range method.Inputs {
|
||||
param := arg.Name
|
||||
if param == "" {
|
||||
param = fmt.Sprintf("arg%d", i)
|
||||
}
|
||||
args = append(args, fmt.Sprintf("%s %s", param, bindType(arg.Type)))
|
||||
}
|
||||
returns, _ := bindReturn(prologue, name, method.Outputs)
|
||||
|
||||
// Generate the docs to help with coding against the binding
|
||||
callTypeDoc := "free data retrieval call"
|
||||
if !method.Const {
|
||||
callTypeDoc = "paid mutator transaction"
|
||||
}
|
||||
docs := fmt.Sprintf("// %s is a %s binding the contract method 0x%x.\n", name, callTypeDoc, method.Id())
|
||||
docs += fmt.Sprintf("// \n")
|
||||
docs += fmt.Sprintf("// Solidity: %s", strings.TrimPrefix(method.String(), "function "))
|
||||
|
||||
// Generate the method itself for both the read/write version and the combo too
|
||||
code := fmt.Sprintf("%s\n", prologue)
|
||||
if method.Const {
|
||||
code += fmt.Sprintf("%s\nfunc (_%s *%sCaller) %s(%s) (%s) {\n%s\n}\n", docs, kind, kind, name, strings.Join(args, ","), strings.Join(returns, ","), bindCallBody(kind, method.Name, args, returns))
|
||||
} else {
|
||||
args = append([]string{"auth *bind.AuthOpts"}, args...)
|
||||
code += fmt.Sprintf("%s\nfunc (_%s *%sTransactor) %s(%s) (*types.Transaction, error) {\n%s\n}\n", docs, kind, kind, name, strings.Join(args, ","), bindTransactionBody(kind, method.Name, args))
|
||||
}
|
||||
return code
|
||||
}
|
||||
|
||||
// bindType converts a Solidity type to a Go one. Since there is no clear mapping
|
||||
// from all Solidity types to Go ones (e.g. uint17), those that cannot be exactly
|
||||
// mapped will use an upscaled type (e.g. *big.Int).
|
||||
func bindType(kind abi.Type) string {
|
||||
stringKind := kind.String()
|
||||
|
||||
switch {
|
||||
case stringKind == "address":
|
||||
return "common.Address"
|
||||
|
||||
case stringKind == "hash":
|
||||
return "common.Hash"
|
||||
|
||||
case strings.HasPrefix(stringKind, "bytes"):
|
||||
if stringKind == "bytes" {
|
||||
return "[]byte"
|
||||
}
|
||||
return fmt.Sprintf("[%s]byte", stringKind[5:])
|
||||
|
||||
case strings.HasPrefix(stringKind, "int"):
|
||||
switch stringKind[:3] {
|
||||
case "8", "16", "32", "64":
|
||||
return stringKind
|
||||
}
|
||||
return "*big.Int"
|
||||
|
||||
case strings.HasPrefix(stringKind, "uint"):
|
||||
switch stringKind[:4] {
|
||||
case "8", "16", "32", "64":
|
||||
return stringKind
|
||||
}
|
||||
return "*big.Int"
|
||||
|
||||
default:
|
||||
return stringKind
|
||||
}
|
||||
}
|
||||
|
||||
// bindReturn creates the list of return parameters for a method invocation. If
|
||||
// all the fields of the return type are named, and there is more than one value
|
||||
// being returned, the returns are wrapped in a result struct.
|
||||
func bindReturn(prologue *bytes.Buffer, method string, outputs []abi.Argument) ([]string, string) {
|
||||
// Generate the anonymous return list for when a struct is not needed/possible
|
||||
var (
|
||||
returns = make([]string, 0, len(outputs)+1)
|
||||
anonymous = false
|
||||
)
|
||||
for _, ret := range outputs {
|
||||
returns = append(returns, bindType(ret.Type))
|
||||
if ret.Name == "" {
|
||||
anonymous = true
|
||||
}
|
||||
}
|
||||
if anonymous || len(returns) < 2 {
|
||||
returns = append(returns, "error")
|
||||
return returns, ""
|
||||
}
|
||||
// If the returns are named and numerous, wrap in a result struct
|
||||
wrapper, impl := bindReturnStruct(method, outputs)
|
||||
prologue.WriteString(impl + "\n")
|
||||
return []string{"*" + wrapper, "error"}, wrapper
|
||||
}
|
||||
|
||||
// bindReturnStruct creates a Go structure with the specified fields to be used
|
||||
// as the return type from a method call.
|
||||
func bindReturnStruct(method string, returns []abi.Argument) (string, string) {
|
||||
fields := make([]string, 0, len(returns))
|
||||
for _, ret := range returns {
|
||||
fields = append(fields, fmt.Sprintf("%s %s", strings.ToUpper(ret.Name[:1])+ret.Name[1:], bindType(ret.Type)))
|
||||
}
|
||||
kind := fmt.Sprintf("%sResult", method)
|
||||
docs := fmt.Sprintf("// %s is the result of the %s invocation.", kind, method)
|
||||
|
||||
return kind, fmt.Sprintf("%s\ntype %s struct {\n%s\n}", docs, kind, strings.Join(fields, "\n"))
|
||||
}
|
||||
|
||||
// bindCallBody creates the Go code to declare a batch of return values, invoke
|
||||
// an Ethereum method call with the requested parameters, parse the binary output
|
||||
// into the return values and return them.
|
||||
func bindCallBody(kind string, method string, params []string, returns []string) string {
|
||||
body := ""
|
||||
|
||||
// Allocate memory for each of the return values
|
||||
rets := make([]string, 0, len(returns)-1)
|
||||
if len(returns) > 1 {
|
||||
body += "var ("
|
||||
for i, kind := range returns[:len(returns)-1] { // Omit the final error
|
||||
name := fmt.Sprintf("ret%d", i)
|
||||
|
||||
rets = append(rets, name)
|
||||
body += fmt.Sprintf("%s = new(%s)\n", name, strings.TrimPrefix(kind, "*"))
|
||||
}
|
||||
body += ")\n"
|
||||
}
|
||||
// Assemble a single collector variable for the result ABI initialization
|
||||
result := strings.Join(rets, ",")
|
||||
if len(returns) > 2 {
|
||||
result = "[]interface{}{" + result + "}"
|
||||
}
|
||||
// Extract the parameter list into a flat variable name list
|
||||
inputs := make([]string, len(params))
|
||||
for i, param := range params {
|
||||
inputs[i] = strings.Split(param, " ")[0]
|
||||
}
|
||||
input := ""
|
||||
if len(inputs) > 0 {
|
||||
input = "," + strings.Join(inputs, ",")
|
||||
}
|
||||
// Request executing the contract call and return the results with the errors
|
||||
body += fmt.Sprintf("err := _%s.common.contract.Call(%s, \"%s\" %s)\n", kind, result, method, input)
|
||||
|
||||
outs := make([]string, 0, len(returns))
|
||||
for i, ret := range returns[:len(returns)-1] { // Handle th final error separately
|
||||
if strings.HasPrefix(ret, "*") {
|
||||
outs = append(outs, rets[i])
|
||||
} else {
|
||||
outs = append(outs, "*"+rets[i])
|
||||
}
|
||||
}
|
||||
outs = append(outs, "err")
|
||||
|
||||
body += fmt.Sprintf("return %s", strings.Join(outs, ","))
|
||||
|
||||
return body
|
||||
}
|
||||
|
||||
// bindTransactionBody creates the Go code to invoke an Ethereum transaction call
|
||||
// with the requested parameters, and return the assembled transaction object.
|
||||
func bindTransactionBody(kind string, method string, params []string) string {
|
||||
// Extract the parameter list into a flat variable name list
|
||||
inputs := make([]string, len(params)-1) // Omit the auth options
|
||||
for i, param := range params[1:] {
|
||||
inputs[i] = strings.Split(param, " ")[0]
|
||||
}
|
||||
input := ""
|
||||
if len(inputs) > 0 {
|
||||
input = "," + strings.Join(inputs, ",")
|
||||
}
|
||||
// Request executing the contract call and return the results with the errors
|
||||
return fmt.Sprintf("return _%s.common.contract.Transact(auth, \"%s\" %s)", kind, method, input)
|
||||
}
|
140
accounts/abi/bind/bind_test.go
Normal file
140
accounts/abi/bind/bind_test.go
Normal file
File diff suppressed because one or more lines are too long
71
cmd/abigen/main.go
Normal file
71
cmd/abigen/main.go
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright 2016 The go-ethereum Authors
|
||||
// This file is part of the go-ethereum library.
|
||||
//
|
||||
// The go-ethereum library is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Lesser General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// The go-ethereum library is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Lesser General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Lesser General Public License
|
||||
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/ethereum/go-ethereum/accounts/abi/bind"
|
||||
)
|
||||
|
||||
var (
|
||||
abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind")
|
||||
pkgFlag = flag.String("pkg", "", "Go package name to generate the binding into")
|
||||
typFlag = flag.String("type", "", "Go struct name for the binding (default = package name)")
|
||||
outFlag = flag.String("out", "", "Output path for the generated binding")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Parse and validate the command line flags
|
||||
flag.Parse()
|
||||
|
||||
if *abiFlag == "" {
|
||||
fmt.Printf("No contract ABI path specified (--abi)\n")
|
||||
os.Exit(-1)
|
||||
}
|
||||
if *pkgFlag == "" {
|
||||
fmt.Printf("No destination Go package specified (--pkg)\n")
|
||||
os.Exit(-1)
|
||||
}
|
||||
// Generate the contract binding
|
||||
in, err := ioutil.ReadFile(*abiFlag)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to read input ABI: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
kind := *typFlag
|
||||
if kind == "" {
|
||||
kind = *pkgFlag
|
||||
}
|
||||
code, err := bind.Bind(string(in), *pkgFlag, kind)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to generate ABI binding: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
// Either flush it out to a file or display on the standard output
|
||||
if *outFlag == "" {
|
||||
fmt.Printf("%s\n", code)
|
||||
return
|
||||
}
|
||||
if err := ioutil.WriteFile(*outFlag, []byte(code), 0600); err != nil {
|
||||
fmt.Printf("Failed to write ABI binding: %v\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
@ -689,7 +689,7 @@ func (s *PublicBlockChainAPI) Call(args CallArgs, blockNr rpc.BlockNumber) (stri
|
||||
|
||||
// EstimateGas returns an estimate of the amount of gas needed to execute the given transaction.
|
||||
func (s *PublicBlockChainAPI) EstimateGas(args CallArgs) (*rpc.HexNumber, error) {
|
||||
_, gas, err := s.doCall(args, rpc.LatestBlockNumber)
|
||||
_, gas, err := s.doCall(args, rpc.PendingBlockNumber)
|
||||
return rpc.NewHexNumber(gas), err
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user