diff --git a/Gopkg.lock b/Gopkg.lock
index cca9a777..87b3d5f2 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -225,6 +225,12 @@
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
version = "v1.1.0"
+[[projects]]
+ name = "github.com/philhofer/fwd"
+ packages = ["."]
+ revision = "bb6d471dc95d4fe11e432687f8b70ff496cf3136"
+ version = "v1.0.0"
+
[[projects]]
name = "github.com/rjeczalik/notify"
packages = ["."]
@@ -295,6 +301,12 @@
]
revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b"
+[[projects]]
+ name = "github.com/tinylib/msgp"
+ packages = ["msgp"]
+ revision = "b2b6a672cf1e5b90748f79b8b81fc8c5cf0571a1"
+ version = "1.0.2"
+
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
@@ -326,7 +338,10 @@
[[projects]]
branch = "master"
name = "golang.org/x/sys"
- packages = ["unix"]
+ packages = [
+ "unix",
+ "windows"
+ ]
revision = "a0f4589a76f1f83070cb9e5613809e1d07b97c13"
[[projects]]
@@ -366,6 +381,17 @@
]
revision = "8cc4e8a6f4841aa92a8683fca47bc5d64b58875b"
+[[projects]]
+ name = "gopkg.in/DataDog/dd-trace-go.v1"
+ packages = [
+ "ddtrace",
+ "ddtrace/ext",
+ "ddtrace/internal",
+ "ddtrace/tracer"
+ ]
+ revision = "8efc9a798f2db99a9e00c7e57f45fc13611214e0"
+ version = "v1.2.3"
+
[[projects]]
name = "gopkg.in/fatih/set.v0"
packages = ["."]
@@ -393,6 +419,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "2122d1d04fbc67daf75d751e144926e73f0fd838d51711cccf9604eabb46b95b"
+ inputs-digest = "7a913c984013e026536456baa75bd95e261bbb0d294b7de77785819ac182b465"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/README.md b/README.md
index d00eccdc..99b6867b 100644
--- a/README.md
+++ b/README.md
@@ -17,6 +17,7 @@ Vulcanize DB is a set of tools that make it easier for developers to write appli
## Installation
`go get github.com/vulcanize/vulcanizedb`
+`go get gopkg.in/DataDog/dd-trace-go.v1/ddtrace`
## Setting up the Database
1. Install Postgres
@@ -27,6 +28,9 @@ Vulcanize DB is a set of tools that make it easier for developers to write appli
* See below for configuring additional environments
+## Create a migration file (up and down)
+1. ./script/create_migrate create_bite_table
+
## Configuration
- To use a local Ethereum node, copy `environments/public.toml.example` to
`environments/public.toml` and update the `ipcPath` and `levelDbPath`.
diff --git a/cmd/root.go b/cmd/root.go
index c0336edc..e2715f62 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -70,6 +70,7 @@ func init() {
rootCmd.PersistentFlags().String("database-password", "", "database password")
rootCmd.PersistentFlags().String("client-ipcPath", "", "location of geth.ipc file")
rootCmd.PersistentFlags().String("client-levelDbPath", "", "location of levelDb chaindata")
+ rootCmd.PersistentFlags().String("datadog-name", "vulcanize-test", "datadog service name")
viper.BindPFlag("database.name", rootCmd.PersistentFlags().Lookup("database-name"))
viper.BindPFlag("database.port", rootCmd.PersistentFlags().Lookup("database-port"))
@@ -78,6 +79,7 @@ func init() {
viper.BindPFlag("database.password", rootCmd.PersistentFlags().Lookup("database-password"))
viper.BindPFlag("client.ipcPath", rootCmd.PersistentFlags().Lookup("client-ipcPath"))
viper.BindPFlag("client.levelDbPath", rootCmd.PersistentFlags().Lookup("client-levelDbPath"))
+ viper.BindPFlag("datadog.name", rootCmd.PersistentFlags().Lookup("datadog-name"))
}
func initConfig() {
diff --git a/db/migrations/1532468319_create_flip_kick_table.up.sql b/db/migrations/1532468319_create_flip_kick_table.up.sql
index ad4f672a..123008d5 100644
--- a/db/migrations/1532468319_create_flip_kick_table.up.sql
+++ b/db/migrations/1532468319_create_flip_kick_table.up.sql
@@ -2,15 +2,11 @@ CREATE TABLE maker.flip_kick (
db_id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
id NUMERIC NOT NULL UNIQUE,
- mom VARCHAR,
- vat VARCHAR,
- ilk VARCHAR,
lot NUMERIC,
bid NUMERIC,
- guy VARCHAR,
gal VARCHAR,
"end" TIMESTAMP WITH TIME ZONE,
- era TIMESTAMP WITH TIME ZONE,
- lad VARCHAR,
- tab NUMERIC
+ urn VARCHAR,
+ tab NUMERIC,
+ raw_log JSONB
);
diff --git a/db/migrations/1533844125_create_frob_table.up.sql b/db/migrations/1533844125_create_frob_table.up.sql
index 9c17a51a..78a0fdb0 100644
--- a/db/migrations/1533844125_create_frob_table.up.sql
+++ b/db/migrations/1533844125_create_frob_table.up.sql
@@ -1,13 +1,14 @@
CREATE TABLE maker.frob (
id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
- tx_idx INTEGER,
ilk bytea,
- lad bytea,
+ urn bytea,
dink NUMERIC,
dart NUMERIC,
ink NUMERIC,
art NUMERIC,
- iart NUMERIC,
+ iart NUMERIC,
+ tx_idx INTEGER NOT NUll,
+ raw_log JSONB,
UNIQUE (header_id, tx_idx)
);
\ No newline at end of file
diff --git a/db/migrations/1534193915_add_raw_log_column_to_flip_kick.down.sql b/db/migrations/1534193915_add_raw_log_column_to_flip_kick.down.sql
deleted file mode 100644
index 6e303b91..00000000
--- a/db/migrations/1534193915_add_raw_log_column_to_flip_kick.down.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE maker.flip_kick
- DROP COLUMN raw_log;
diff --git a/db/migrations/1534193915_add_raw_log_column_to_flip_kick.up.sql b/db/migrations/1534193915_add_raw_log_column_to_flip_kick.up.sql
deleted file mode 100644
index a7c6cc8f..00000000
--- a/db/migrations/1534193915_add_raw_log_column_to_flip_kick.up.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE maker.flip_kick
- ADD COLUMN raw_log json;
diff --git a/db/migrations/1534295712_create_tend_table.up.sql b/db/migrations/1534295712_create_tend_table.up.sql
index d88c2efb..62ce46ea 100644
--- a/db/migrations/1534295712_create_tend_table.up.sql
+++ b/db/migrations/1534295712_create_tend_table.up.sql
@@ -1,12 +1,11 @@
CREATE TABLE maker.tend (
db_id SERIAL PRIMARY KEY,
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
- id NUMERIC NOT NULL UNIQUE,
+ bid_id NUMERIC NOT NULL UNIQUE,
lot NUMERIC,
bid NUMERIC,
- guy BYTEA,
+ guy VARCHAR,
tic NUMERIC,
- era TIMESTAMP WITH TIME ZONE,
tx_idx INTEGER NOT NUll,
raw_log JSONB
);
diff --git a/db/migrations/1534295713_create_bite_table.down.sql b/db/migrations/1534295713_create_bite_table.down.sql
new file mode 100644
index 00000000..1ca07801
--- /dev/null
+++ b/db/migrations/1534295713_create_bite_table.down.sql
@@ -0,0 +1 @@
+DROP TABLE maker.bite;
diff --git a/db/migrations/1534295713_create_bite_table.up.sql b/db/migrations/1534295713_create_bite_table.up.sql
new file mode 100644
index 00000000..5cc7041f
--- /dev/null
+++ b/db/migrations/1534295713_create_bite_table.up.sql
@@ -0,0 +1,14 @@
+CREATE TABLE maker.bite (
+ id SERIAL PRIMARY KEY,
+ header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
+ ilk bytea,
+ lad bytea,
+ ink VARCHAR,
+ art VARCHAR,
+ iArt VARCHAR,
+ tab NUMERIC,
+ flip VARCHAR,
+ tx_idx INTEGER NOT NUll,
+ raw_log JSONB,
+ UNIQUE (header_id, tx_idx)
+)
\ No newline at end of file
diff --git a/db/migrations/1534455274_remove_mom_field_from_flip_kick.down.sql b/db/migrations/1534455274_remove_mom_field_from_flip_kick.down.sql
deleted file mode 100644
index 8fdcef96..00000000
--- a/db/migrations/1534455274_remove_mom_field_from_flip_kick.down.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE maker.flip_kick
- ADD COLUMN mom VARCHAR;
diff --git a/db/migrations/1534455274_remove_mom_field_from_flip_kick.up.sql b/db/migrations/1534455274_remove_mom_field_from_flip_kick.up.sql
deleted file mode 100644
index 9ea57ff7..00000000
--- a/db/migrations/1534455274_remove_mom_field_from_flip_kick.up.sql
+++ /dev/null
@@ -1,2 +0,0 @@
-ALTER TABLE maker.flip_kick
- DROP COLUMN mom;
\ No newline at end of file
diff --git a/db/migrations/1534799167_create_dent_table.down.sql b/db/migrations/1534799167_create_dent_table.down.sql
new file mode 100644
index 00000000..91bf017b
--- /dev/null
+++ b/db/migrations/1534799167_create_dent_table.down.sql
@@ -0,0 +1 @@
+DROP TABLE maker.dent;
\ No newline at end of file
diff --git a/db/migrations/1534799167_create_dent_table.up.sql b/db/migrations/1534799167_create_dent_table.up.sql
new file mode 100644
index 00000000..eeb5c2b2
--- /dev/null
+++ b/db/migrations/1534799167_create_dent_table.up.sql
@@ -0,0 +1,11 @@
+CREATE TABLE maker.dent (
+ db_id SERIAL PRIMARY KEY,
+ header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
+ bid_id NUMERIC NOT NULL UNIQUE,
+ lot NUMERIC,
+ bid NUMERIC,
+ guy BYTEA,
+ tic NUMERIC,
+ tx_idx INTEGER NOT NUll,
+ raw_log JSONB
+);
diff --git a/db/migrations/1535667935_create_pit_file_table.down.sql b/db/migrations/1535667935_create_pit_file_table.down.sql
new file mode 100644
index 00000000..f08e0913
--- /dev/null
+++ b/db/migrations/1535667935_create_pit_file_table.down.sql
@@ -0,0 +1,3 @@
+DROP TABLE maker.pit_file_ilk;
+DROP TABLE maker.pit_file_stability_fee;
+DROP TABLE maker.pit_file_debt_ceiling;
\ No newline at end of file
diff --git a/db/migrations/1535667935_create_pit_file_table.up.sql b/db/migrations/1535667935_create_pit_file_table.up.sql
new file mode 100644
index 00000000..a86fd681
--- /dev/null
+++ b/db/migrations/1535667935_create_pit_file_table.up.sql
@@ -0,0 +1,30 @@
+CREATE TABLE maker.pit_file_ilk (
+ id SERIAL PRIMARY KEY,
+ header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
+ ilk TEXT,
+ what TEXT,
+ data NUMERIC,
+ tx_idx INTEGER NOT NUll,
+ raw_log JSONB,
+ UNIQUE (header_id, tx_idx)
+);
+
+CREATE TABLE maker.pit_file_stability_fee (
+ id SERIAL PRIMARY KEY,
+ header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
+ what TEXT,
+ data TEXT,
+ tx_idx INTEGER NOT NULL,
+ raw_log JSONB,
+ UNIQUE (header_id, tx_idx)
+);
+
+CREATE TABLE maker.pit_file_debt_ceiling (
+ id SERIAL PRIMARY KEY,
+ header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
+ what TEXT,
+ data NUMERIC,
+ tx_idx INTEGER NOT NULL,
+ raw_log JSONB,
+ UNIQUE (header_id, tx_idx)
+);
\ No newline at end of file
diff --git a/db/migrations/1536267596_create_vat_init_table.down.sql b/db/migrations/1536267596_create_vat_init_table.down.sql
new file mode 100644
index 00000000..49367e7c
--- /dev/null
+++ b/db/migrations/1536267596_create_vat_init_table.down.sql
@@ -0,0 +1 @@
+DROP TABLE maker.vat_init;
\ No newline at end of file
diff --git a/db/migrations/1536267596_create_vat_init_table.up.sql b/db/migrations/1536267596_create_vat_init_table.up.sql
new file mode 100644
index 00000000..f48ed523
--- /dev/null
+++ b/db/migrations/1536267596_create_vat_init_table.up.sql
@@ -0,0 +1,8 @@
+CREATE TABLE maker.vat_init (
+ id SERIAL PRIMARY KEY,
+ header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
+ ilk TEXT,
+ tx_idx INTEGER NOT NUll,
+ raw_log JSONB,
+ UNIQUE (header_id, tx_idx)
+);
\ No newline at end of file
diff --git a/db/schema.sql b/db/schema.sql
index 069be85b..afc35cc9 100644
--- a/db/schema.sql
+++ b/db/schema.sql
@@ -56,6 +56,82 @@ SET default_tablespace = '';
SET default_with_oids = false;
+--
+-- Name: bite; Type: TABLE; Schema: maker; Owner: -
+--
+
+CREATE TABLE maker.bite (
+ id integer NOT NULL,
+ header_id integer NOT NULL,
+ ilk bytea,
+ lad bytea,
+ ink character varying,
+ art character varying,
+ iart character varying,
+ tab numeric,
+ flip character varying,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
+);
+
+
+--
+-- Name: bite_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
+--
+
+CREATE SEQUENCE maker.bite_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: bite_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
+--
+
+ALTER SEQUENCE maker.bite_id_seq OWNED BY maker.bite.id;
+
+
+--
+-- Name: dent; Type: TABLE; Schema: maker; Owner: -
+--
+
+CREATE TABLE maker.dent (
+ db_id integer NOT NULL,
+ header_id integer NOT NULL,
+ bid_id numeric NOT NULL,
+ lot numeric,
+ bid numeric,
+ guy bytea,
+ tic numeric,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
+);
+
+
+--
+-- Name: dent_db_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
+--
+
+CREATE SEQUENCE maker.dent_db_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: dent_db_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
+--
+
+ALTER SEQUENCE maker.dent_db_id_seq OWNED BY maker.dent.db_id;
+
+
--
-- Name: flip_kick; Type: TABLE; Schema: maker; Owner: -
--
@@ -64,17 +140,13 @@ CREATE TABLE maker.flip_kick (
db_id integer NOT NULL,
header_id integer NOT NULL,
id numeric NOT NULL,
- vat character varying,
- ilk character varying,
lot numeric,
bid numeric,
- guy character varying,
gal character varying,
"end" timestamp with time zone,
- era timestamp with time zone,
- lad character varying,
+ urn character varying,
tab numeric,
- raw_log json
+ raw_log jsonb
);
@@ -105,14 +177,15 @@ ALTER SEQUENCE maker.flip_kick_db_id_seq OWNED BY maker.flip_kick.db_id;
CREATE TABLE maker.frob (
id integer NOT NULL,
header_id integer NOT NULL,
- tx_idx integer,
ilk bytea,
- lad bytea,
+ urn bytea,
dink numeric,
dart numeric,
ink numeric,
art numeric,
- iart numeric
+ iart numeric,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
);
@@ -136,6 +209,109 @@ CREATE SEQUENCE maker.frob_id_seq
ALTER SEQUENCE maker.frob_id_seq OWNED BY maker.frob.id;
+--
+-- Name: pit_file_debt_ceiling; Type: TABLE; Schema: maker; Owner: -
+--
+
+CREATE TABLE maker.pit_file_debt_ceiling (
+ id integer NOT NULL,
+ header_id integer NOT NULL,
+ what text,
+ data numeric,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
+);
+
+
+--
+-- Name: pit_file_debt_ceiling_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
+--
+
+CREATE SEQUENCE maker.pit_file_debt_ceiling_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: pit_file_debt_ceiling_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
+--
+
+ALTER SEQUENCE maker.pit_file_debt_ceiling_id_seq OWNED BY maker.pit_file_debt_ceiling.id;
+
+
+--
+-- Name: pit_file_ilk; Type: TABLE; Schema: maker; Owner: -
+--
+
+CREATE TABLE maker.pit_file_ilk (
+ id integer NOT NULL,
+ header_id integer NOT NULL,
+ ilk text,
+ what text,
+ data numeric,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
+);
+
+
+--
+-- Name: pit_file_ilk_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
+--
+
+CREATE SEQUENCE maker.pit_file_ilk_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: pit_file_ilk_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
+--
+
+ALTER SEQUENCE maker.pit_file_ilk_id_seq OWNED BY maker.pit_file_ilk.id;
+
+
+--
+-- Name: pit_file_stability_fee; Type: TABLE; Schema: maker; Owner: -
+--
+
+CREATE TABLE maker.pit_file_stability_fee (
+ id integer NOT NULL,
+ header_id integer NOT NULL,
+ what text,
+ data text,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
+);
+
+
+--
+-- Name: pit_file_stability_fee_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
+--
+
+CREATE SEQUENCE maker.pit_file_stability_fee_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: pit_file_stability_fee_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
+--
+
+ALTER SEQUENCE maker.pit_file_stability_fee_id_seq OWNED BY maker.pit_file_stability_fee.id;
+
+
--
-- Name: price_feeds; Type: TABLE; Schema: maker; Owner: -
--
@@ -177,12 +353,11 @@ ALTER SEQUENCE maker.price_feeds_id_seq OWNED BY maker.price_feeds.id;
CREATE TABLE maker.tend (
db_id integer NOT NULL,
header_id integer NOT NULL,
- id numeric NOT NULL,
+ bid_id numeric NOT NULL,
lot numeric,
bid numeric,
- guy bytea,
+ guy character varying,
tic numeric,
- era timestamp with time zone,
tx_idx integer NOT NULL,
raw_log jsonb
);
@@ -208,6 +383,39 @@ CREATE SEQUENCE maker.tend_db_id_seq
ALTER SEQUENCE maker.tend_db_id_seq OWNED BY maker.tend.db_id;
+--
+-- Name: vat_init; Type: TABLE; Schema: maker; Owner: -
+--
+
+CREATE TABLE maker.vat_init (
+ id integer NOT NULL,
+ header_id integer NOT NULL,
+ ilk text,
+ tx_idx integer NOT NULL,
+ raw_log jsonb
+);
+
+
+--
+-- Name: vat_init_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
+--
+
+CREATE SEQUENCE maker.vat_init_id_seq
+ AS integer
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+
+--
+-- Name: vat_init_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
+--
+
+ALTER SEQUENCE maker.vat_init_id_seq OWNED BY maker.vat_init.id;
+
+
--
-- Name: logs; Type: TABLE; Schema: public; Owner: -
--
@@ -580,6 +788,20 @@ CREATE VIEW public.watched_event_logs AS
WHERE ((((log_filters.topic0)::text = (logs.topic0)::text) OR (log_filters.topic0 IS NULL)) AND (((log_filters.topic1)::text = (logs.topic1)::text) OR (log_filters.topic1 IS NULL)) AND (((log_filters.topic2)::text = (logs.topic2)::text) OR (log_filters.topic2 IS NULL)) AND (((log_filters.topic3)::text = (logs.topic3)::text) OR (log_filters.topic3 IS NULL)));
+--
+-- Name: bite id; Type: DEFAULT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.bite ALTER COLUMN id SET DEFAULT nextval('maker.bite_id_seq'::regclass);
+
+
+--
+-- Name: dent db_id; Type: DEFAULT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.dent ALTER COLUMN db_id SET DEFAULT nextval('maker.dent_db_id_seq'::regclass);
+
+
--
-- Name: flip_kick db_id; Type: DEFAULT; Schema: maker; Owner: -
--
@@ -594,6 +816,27 @@ ALTER TABLE ONLY maker.flip_kick ALTER COLUMN db_id SET DEFAULT nextval('maker.f
ALTER TABLE ONLY maker.frob ALTER COLUMN id SET DEFAULT nextval('maker.frob_id_seq'::regclass);
+--
+-- Name: pit_file_debt_ceiling id; Type: DEFAULT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_debt_ceiling ALTER COLUMN id SET DEFAULT nextval('maker.pit_file_debt_ceiling_id_seq'::regclass);
+
+
+--
+-- Name: pit_file_ilk id; Type: DEFAULT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_ilk ALTER COLUMN id SET DEFAULT nextval('maker.pit_file_ilk_id_seq'::regclass);
+
+
+--
+-- Name: pit_file_stability_fee id; Type: DEFAULT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_stability_fee ALTER COLUMN id SET DEFAULT nextval('maker.pit_file_stability_fee_id_seq'::regclass);
+
+
--
-- Name: price_feeds id; Type: DEFAULT; Schema: maker; Owner: -
--
@@ -608,6 +851,13 @@ ALTER TABLE ONLY maker.price_feeds ALTER COLUMN id SET DEFAULT nextval('maker.pr
ALTER TABLE ONLY maker.tend ALTER COLUMN db_id SET DEFAULT nextval('maker.tend_db_id_seq'::regclass);
+--
+-- Name: vat_init id; Type: DEFAULT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.vat_init ALTER COLUMN id SET DEFAULT nextval('maker.vat_init_id_seq'::regclass);
+
+
--
-- Name: blocks id; Type: DEFAULT; Schema: public; Owner: -
--
@@ -671,6 +921,38 @@ ALTER TABLE ONLY public.transactions ALTER COLUMN id SET DEFAULT nextval('public
ALTER TABLE ONLY public.watched_contracts ALTER COLUMN contract_id SET DEFAULT nextval('public.watched_contracts_contract_id_seq'::regclass);
+--
+-- Name: bite bite_header_id_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.bite
+ ADD CONSTRAINT bite_header_id_tx_idx_key UNIQUE (header_id, tx_idx);
+
+
+--
+-- Name: bite bite_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.bite
+ ADD CONSTRAINT bite_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: dent dent_bid_id_key; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.dent
+ ADD CONSTRAINT dent_bid_id_key UNIQUE (bid_id);
+
+
+--
+-- Name: dent dent_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.dent
+ ADD CONSTRAINT dent_pkey PRIMARY KEY (db_id);
+
+
--
-- Name: flip_kick flip_kick_id_key; Type: CONSTRAINT; Schema: maker; Owner: -
--
@@ -703,6 +985,54 @@ ALTER TABLE ONLY maker.frob
ADD CONSTRAINT frob_pkey PRIMARY KEY (id);
+--
+-- Name: pit_file_debt_ceiling pit_file_debt_ceiling_header_id_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_debt_ceiling
+ ADD CONSTRAINT pit_file_debt_ceiling_header_id_tx_idx_key UNIQUE (header_id, tx_idx);
+
+
+--
+-- Name: pit_file_debt_ceiling pit_file_debt_ceiling_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_debt_ceiling
+ ADD CONSTRAINT pit_file_debt_ceiling_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pit_file_ilk pit_file_ilk_header_id_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_ilk
+ ADD CONSTRAINT pit_file_ilk_header_id_tx_idx_key UNIQUE (header_id, tx_idx);
+
+
+--
+-- Name: pit_file_ilk pit_file_ilk_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_ilk
+ ADD CONSTRAINT pit_file_ilk_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: pit_file_stability_fee pit_file_stability_fee_header_id_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_stability_fee
+ ADD CONSTRAINT pit_file_stability_fee_header_id_tx_idx_key UNIQUE (header_id, tx_idx);
+
+
+--
+-- Name: pit_file_stability_fee pit_file_stability_fee_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_stability_fee
+ ADD CONSTRAINT pit_file_stability_fee_pkey PRIMARY KEY (id);
+
+
--
-- Name: price_feeds price_feeds_header_id_medianizer_address_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
--
@@ -720,11 +1050,11 @@ ALTER TABLE ONLY maker.price_feeds
--
--- Name: tend tend_id_key; Type: CONSTRAINT; Schema: maker; Owner: -
+-- Name: tend tend_bid_id_key; Type: CONSTRAINT; Schema: maker; Owner: -
--
ALTER TABLE ONLY maker.tend
- ADD CONSTRAINT tend_id_key UNIQUE (id);
+ ADD CONSTRAINT tend_bid_id_key UNIQUE (bid_id);
--
@@ -735,6 +1065,22 @@ ALTER TABLE ONLY maker.tend
ADD CONSTRAINT tend_pkey PRIMARY KEY (db_id);
+--
+-- Name: vat_init vat_init_header_id_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.vat_init
+ ADD CONSTRAINT vat_init_header_id_tx_idx_key UNIQUE (header_id, tx_idx);
+
+
+--
+-- Name: vat_init vat_init_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.vat_init
+ ADD CONSTRAINT vat_init_pkey PRIMARY KEY (id);
+
+
--
-- Name: blocks blocks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
--
@@ -873,6 +1219,22 @@ CREATE INDEX tx_to_index ON public.transactions USING btree (tx_to);
CREATE TRIGGER notify_pricefeeds AFTER INSERT ON maker.price_feeds FOR EACH ROW EXECUTE PROCEDURE public.notify_pricefeed();
+--
+-- Name: bite bite_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.bite
+ ADD CONSTRAINT bite_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+
+
+--
+-- Name: dent dent_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.dent
+ ADD CONSTRAINT dent_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+
+
--
-- Name: flip_kick flip_kick_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
--
@@ -897,6 +1259,30 @@ ALTER TABLE ONLY maker.price_feeds
ADD CONSTRAINT headers_fk FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+--
+-- Name: pit_file_debt_ceiling pit_file_debt_ceiling_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_debt_ceiling
+ ADD CONSTRAINT pit_file_debt_ceiling_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+
+
+--
+-- Name: pit_file_ilk pit_file_ilk_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_ilk
+ ADD CONSTRAINT pit_file_ilk_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+
+
+--
+-- Name: pit_file_stability_fee pit_file_stability_fee_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.pit_file_stability_fee
+ ADD CONSTRAINT pit_file_stability_fee_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+
+
--
-- Name: tend tend_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
--
@@ -905,6 +1291,14 @@ ALTER TABLE ONLY maker.tend
ADD CONSTRAINT tend_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+--
+-- Name: vat_init vat_init_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
+--
+
+ALTER TABLE ONLY maker.vat_init
+ ADD CONSTRAINT vat_init_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
+
+
--
-- Name: transactions blocks_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
--
diff --git a/environments/staging.toml b/environments/staging.toml
index 2dc3941e..80d92b0b 100644
--- a/environments/staging.toml
+++ b/environments/staging.toml
@@ -7,3 +7,6 @@ port = 5432
[client]
ipcPath = "/mnt/geth.ipc"
+
+[datadog]
+name = "maker_vdb_staging"
\ No newline at end of file
diff --git a/main.go b/main.go
index a27eb869..4ff854f7 100644
--- a/main.go
+++ b/main.go
@@ -2,8 +2,15 @@ package main
import (
"github.com/vulcanize/vulcanizedb/cmd"
+
+ "github.com/spf13/viper"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
)
func main() {
+ tracer.Start(tracer.WithServiceName(viper.GetString("datadog.name")))
+
cmd.Execute()
+
+ defer tracer.Stop()
}
diff --git a/pkg/transformers/bite/bite_suite_test.go b/pkg/transformers/bite/bite_suite_test.go
new file mode 100644
index 00000000..e5a93b26
--- /dev/null
+++ b/pkg/transformers/bite/bite_suite_test.go
@@ -0,0 +1,33 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bite_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "io/ioutil"
+ "log"
+)
+
+func TestBite(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Bite Suite")
+}
+
+var _ = BeforeSuite(func() {
+ log.SetOutput(ioutil.Discard)
+})
diff --git a/pkg/transformers/bite/config.go b/pkg/transformers/bite/config.go
new file mode 100644
index 00000000..8266d62a
--- /dev/null
+++ b/pkg/transformers/bite/config.go
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2018 Vulcanize
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package bite
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+var BiteConfig = shared.TransformerConfig{
+ ContractAddress: shared.CatContractAddress,
+ ContractAbi: shared.CatABI,
+ Topics: []string{shared.BiteSignature},
+ StartingBlockNumber: 0,
+ EndingBlockNumber: 100,
+}
diff --git a/pkg/transformers/bite/converter.go b/pkg/transformers/bite/converter.go
new file mode 100644
index 00000000..20b0904d
--- /dev/null
+++ b/pkg/transformers/bite/converter.go
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018 Vulcanize
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package bite
+
+import (
+ "encoding/json"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/geth"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type Converter interface {
+ ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (BiteEntity, error)
+ ToModel(flipKick BiteEntity) (BiteModel, error)
+}
+
+type BiteConverter struct{}
+
+func (BiteConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (BiteEntity, error) {
+ entity := BiteEntity{}
+ address := common.HexToAddress(contractAddress)
+ abi, err := geth.ParseAbi(contractAbi)
+ if err != nil {
+ return entity, err
+ }
+
+ contract := bind.NewBoundContract(address, abi, nil, nil, nil)
+
+ err = contract.UnpackLog(&entity, "Bite", ethLog)
+ if err != nil {
+ return entity, err
+ }
+
+ entity.Raw = ethLog
+ entity.TransactionIndex = ethLog.TxIndex
+
+ return entity, nil
+}
+func (converter BiteConverter) ToModel(entity BiteEntity) (BiteModel, error) {
+
+ id := entity.Id.String()
+ ilk := entity.Ilk[:]
+ lad := entity.Lad[:]
+ ink := entity.Ink.String()
+ art := entity.Art.String()
+ iArt := entity.IArt.String()
+ tab := entity.Tab.String()
+ flip := entity.Flip.String()
+ txIdx := entity.TransactionIndex
+ rawLogJson, err := json.Marshal(entity.Raw)
+ rawLogString := string(rawLogJson)
+ if err != nil {
+ return BiteModel{}, err
+ }
+
+ return BiteModel{
+ Id: shared.ConvertNilToEmptyString(id),
+ Ilk: ilk,
+ Lad: lad,
+ Ink: shared.ConvertNilToEmptyString(ink),
+ Art: shared.ConvertNilToEmptyString(art),
+ IArt: shared.ConvertNilToEmptyString(iArt),
+ Tab: shared.ConvertNilToEmptyString(tab),
+ Flip: shared.ConvertNilToEmptyString(flip),
+ TransactionIndex: txIdx,
+ Raw: rawLogString,
+ }, nil
+}
diff --git a/pkg/transformers/bite/converter_test.go b/pkg/transformers/bite/converter_test.go
new file mode 100644
index 00000000..c351cb87
--- /dev/null
+++ b/pkg/transformers/bite/converter_test.go
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2018 Vulcanize
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package bite_test
+
+import (
+ "encoding/json"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+var _ = Describe("Bite Converter", func() {
+ var converter = bite.BiteConverter{}
+
+ Describe("ToEntity", func() {
+ It("converts an eth log to a bite entity", func() {
+ entity, err := converter.ToEntity(shared.CatContractAddress, shared.CatABI, test_data.EthBiteLog)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(entity.Ilk).To(Equal(test_data.BiteEntity.Ilk))
+ Expect(entity.Lad).To(Equal(test_data.BiteEntity.Lad))
+ Expect(entity.Ink).To(Equal(test_data.BiteEntity.Ink))
+ Expect(entity.Art).To(Equal(test_data.BiteEntity.Art))
+ Expect(entity.Tab).To(Equal(test_data.BiteEntity.Tab))
+ Expect(entity.Flip).To(Equal(test_data.BiteEntity.Flip))
+ Expect(entity.IArt).To(Equal(test_data.BiteEntity.IArt))
+ Expect(entity.TransactionIndex).To(Equal(test_data.BiteEntity.TransactionIndex))
+ Expect(entity.Raw).To(Equal(test_data.BiteEntity.Raw))
+ })
+
+ It("returns an error if converting log to entity fails", func() {
+ _, err := converter.ToEntity(shared.CatContractAddress, "error abi", test_data.EthBiteLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+ })
+
+ Describe("ToModel", func() {
+ var emptyEntity = bite.BiteEntity{}
+
+ BeforeEach(func() {
+ emptyEntity.Id = big.NewInt(1)
+ })
+
+ It("converts an Entity to a Model", func() {
+ model, err := converter.ToModel(test_data.BiteEntity)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(test_data.BiteModel))
+ Expect(model.TransactionIndex).To(Equal(test_data.BiteModel.TransactionIndex))
+ })
+
+ It("handles nil values", func() {
+ emptyLog, err := json.Marshal(types.Log{})
+ Expect(err).NotTo(HaveOccurred())
+ expectedModel := bite.BiteModel{
+ Id: "1",
+ Ilk: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ Lad: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ Ink: "",
+ Art: "",
+ IArt: "",
+ Tab: "",
+ Flip: "",
+ TransactionIndex: 0,
+ Raw: string(emptyLog),
+ }
+ model, err := converter.ToModel(emptyEntity)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(expectedModel))
+ })
+ })
+})
diff --git a/pkg/transformers/tend/entity.go b/pkg/transformers/bite/entity.go
similarity index 76%
rename from pkg/transformers/tend/entity.go
rename to pkg/transformers/bite/entity.go
index 526ecc9b..489aba0e 100644
--- a/pkg/transformers/tend/entity.go
+++ b/pkg/transformers/bite/entity.go
@@ -12,22 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package tend
+package bite
import (
- "math/big"
-
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "math/big"
)
-type TendEntity struct {
+type BiteEntity struct {
Id *big.Int
- Lot *big.Int
- Bid *big.Int
- Guy common.Address
- Tic *big.Int
- Era *big.Int
+ Ilk [32]byte
+ Lad [32]byte
+ Ink *big.Int
+ Art *big.Int
+ Tab *big.Int
+ Flip *big.Int
+ IArt *big.Int
TransactionIndex uint
Raw types.Log
}
diff --git a/pkg/transformers/bite/integration_test.go b/pkg/transformers/bite/integration_test.go
new file mode 100644
index 00000000..34419f3a
--- /dev/null
+++ b/pkg/transformers/bite/integration_test.go
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Vulcanize
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package bite_test
+
+import (
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethclient"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/ethereum/go-ethereum/rpc"
+ "github.com/vulcanize/vulcanizedb/pkg/geth"
+ "github.com/vulcanize/vulcanizedb/pkg/geth/client"
+ rpc2 "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
+ "github.com/vulcanize/vulcanizedb/pkg/geth/node"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("Integration tests", func() {
+ XIt("Fetches bite event logs from a local test chain", func() {
+ ipcPath := test_config.TestClient.IPCPath
+
+ rawRpcClient, err := rpc.Dial(ipcPath)
+ Expect(err).NotTo(HaveOccurred())
+
+ rpcClient := client.NewRpcClient(rawRpcClient, ipcPath)
+ ethClient := ethclient.NewClient(rawRpcClient)
+ blockChainClient := client.NewEthClient(ethClient)
+ realNode := node.MakeNode(rpcClient)
+ transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
+ realBlockChain := geth.NewBlockChain(blockChainClient, realNode, transactionConverter)
+ realFetcher := shared.NewFetcher(realBlockChain)
+ topic0 := common.HexToHash(shared.BiteSignature)
+ topics := [][]common.Hash{{topic0}}
+
+ result, err := realFetcher.FetchLogs(shared.CatContractAddress, topics, int64(26))
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(len(result) > 0).To(BeTrue())
+ Expect(result[0].Address).To(Equal(common.HexToAddress(shared.CatContractAddress)))
+ Expect(result[0].TxHash).To(Equal(test_data.EthBiteLog.TxHash))
+ Expect(result[0].BlockNumber).To(Equal(test_data.EthBiteLog.BlockNumber))
+ Expect(result[0].Topics).To(Equal(test_data.EthBiteLog.Topics))
+ Expect(result[0].Index).To(Equal(test_data.EthBiteLog.Index))
+ })
+
+ It("unpacks an event log", func() {
+ address := common.HexToAddress(shared.CatContractAddress)
+ abi, err := geth.ParseAbi(shared.CatABI)
+ Expect(err).NotTo(HaveOccurred())
+
+ contract := bind.NewBoundContract(address, abi, nil, nil, nil)
+ entity := &bite.BiteEntity{}
+
+ var eventLog = test_data.EthBiteLog
+
+ err = contract.UnpackLog(entity, "Bite", eventLog)
+ Expect(err).NotTo(HaveOccurred())
+
+ expectedEntity := test_data.BiteEntity
+ Expect(entity.Art).To(Equal(expectedEntity.Art))
+ Expect(entity.Ilk).To(Equal(expectedEntity.Ilk))
+ Expect(entity.Ink).To(Equal(expectedEntity.Ink))
+ })
+})
diff --git a/pkg/transformers/bite/model.go b/pkg/transformers/bite/model.go
new file mode 100644
index 00000000..5e58f998
--- /dev/null
+++ b/pkg/transformers/bite/model.go
@@ -0,0 +1,28 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bite
+
+type BiteModel struct {
+ Id string
+ Ilk []byte
+ Lad []byte
+ Ink string
+ Art string
+ IArt string
+ Tab string
+ Flip string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw string `db:"raw_log"`
+}
diff --git a/pkg/transformers/bite/repository.go b/pkg/transformers/bite/repository.go
new file mode 100644
index 00000000..8db052e6
--- /dev/null
+++ b/pkg/transformers/bite/repository.go
@@ -0,0 +1,63 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bite
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+type Repository interface {
+ Create(headerID int64, model BiteModel) error
+ MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
+}
+
+type BiteRepository struct {
+ db *postgres.DB
+}
+
+func NewBiteRepository(db *postgres.DB) Repository {
+ return BiteRepository{db: db}
+}
+
+func (repository BiteRepository) Create(headerID int64, model BiteModel) error {
+ _, err := repository.db.Exec(
+ `INSERT into maker.bite (header_id, id, ilk, lad, ink, art, iart, tab, flip, tx_idx, raw_log)
+ VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)`,
+ headerID, model.Id, model.Ilk, model.Lad, model.Ink, model.Art, model.IArt, model.Tab, model.Flip, model.TransactionIndex, model.Raw,
+ )
+
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+func (repository BiteRepository) MissingHeaders(startingBlockNumber int64, endingBlockNumber int64) ([]core.Header, error) {
+ var result []core.Header
+ err := repository.db.Select(
+ &result,
+ `SELECT headers.id, headers.block_number FROM headers
+ LEFT JOIN maker.bite on headers.id = header_id
+ WHERE header_id ISNULL
+ AND headers.block_number >= $1
+ AND headers.block_number <= $2
+ AND headers.eth_node_fingerprint = $3`,
+ startingBlockNumber,
+ endingBlockNumber,
+ repository.db.Node.ID,
+ )
+ return result, err
+}
diff --git a/pkg/transformers/bite/repository_test.go b/pkg/transformers/bite/repository_test.go
new file mode 100644
index 00000000..dd26d89b
--- /dev/null
+++ b/pkg/transformers/bite/repository_test.go
@@ -0,0 +1,135 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bite_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "database/sql"
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("Bite repository", func() {
+ Describe("Create", func() {
+ It("persists a bite record", func() {
+ node := core.Node{}
+ db := test_config.NewTestDB(node)
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ biteRepository := bite.NewBiteRepository(db)
+
+ err = biteRepository.Create(headerID, test_data.BiteModel)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbBite bite.BiteModel
+ err = db.Get(&dbBite, `SELECT id, ilk, lad, ink, art, tab, flip, tx_idx, raw_log FROM maker.bite WHERE header_id = $1`, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dbBite.Id).To(Equal(test_data.BiteModel.Id))
+ Expect(dbBite.Ilk).To(Equal(test_data.BiteModel.Ilk))
+ Expect(dbBite.Lad).To(Equal(test_data.BiteModel.Lad))
+ Expect(dbBite.Art).To(Equal(test_data.BiteModel.Art))
+ Expect(dbBite.Tab).To(Equal(test_data.BiteModel.Tab))
+ Expect(dbBite.Flip).To(Equal(test_data.BiteModel.Flip))
+ Expect(dbBite.TransactionIndex).To(Equal(test_data.BiteModel.TransactionIndex))
+ Expect(dbBite.Raw).To(MatchJSON(test_data.BiteModel.Raw))
+ })
+
+ It("does not duplicate bite events", func() {
+ node := core.Node{}
+ db := test_config.NewTestDB(node)
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ biteRepository := bite.NewBiteRepository(db)
+ err = biteRepository.Create(headerID, test_data.BiteModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ var anotherBiteModel = bite.BiteModel{
+ Id: "11",
+ Ilk: test_data.BiteModel.Ilk,
+ Lad: test_data.BiteModel.Lad,
+ Ink: test_data.BiteModel.Ink,
+ Art: test_data.BiteModel.Art,
+ Tab: test_data.BiteModel.Tab,
+ Flip: test_data.BiteModel.Flip,
+ IArt: test_data.BiteModel.IArt,
+ TransactionIndex: test_data.BiteModel.TransactionIndex,
+ Raw: test_data.BiteModel.Raw,
+ }
+
+ err = biteRepository.Create(headerID, anotherBiteModel)
+
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
+ })
+
+ It("removes bite if corresponding header is deleted", func() {
+ node := core.Node{}
+ db := test_config.NewTestDB(node)
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ biteRepository := bite.NewBiteRepository(db)
+ err = biteRepository.Create(headerID, test_data.BiteModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbBite bite.BiteModel
+ err = db.Get(&dbBite, `SELECT id, ilk, lad, ink, art, tab, flip, tx_idx, raw_log FROM maker.bite WHERE header_id = $1`, headerID)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(sql.ErrNoRows))
+ })
+ })
+
+ Describe("MissingHeaders", func() {
+ It("returns headers with no associated bite event", func() {
+ node := core.Node{}
+ db := test_config.NewTestDB(node)
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ startingBlockNumber := int64(1)
+ biteBlockNumber := int64(2)
+ endingBlockNumber := int64(3)
+ blockNumbers := []int64{startingBlockNumber, biteBlockNumber, endingBlockNumber, endingBlockNumber + 1}
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ headerIDs = append(headerIDs, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ biteRepository := bite.NewBiteRepository(db)
+ err := biteRepository.Create(headerIDs[1], test_data.BiteModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ headers, err := biteRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(headers)).To(Equal(2))
+ Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ })
+ })
+})
diff --git a/pkg/transformers/bite/transformer.go b/pkg/transformers/bite/transformer.go
new file mode 100644
index 00000000..7a68311a
--- /dev/null
+++ b/pkg/transformers/bite/transformer.go
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2018 Vulcanize
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package bite
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "log"
+)
+
+type BiteTransformer struct {
+ Repository Repository
+ Fetcher shared.LogFetcher
+ Converter Converter
+ Config shared.TransformerConfig
+}
+
+type BiteTransformerInitializer struct {
+ Config shared.TransformerConfig
+}
+
+func (i BiteTransformerInitializer) NewBiteTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ fetcher := shared.NewFetcher(blockChain)
+ repository := NewBiteRepository(db)
+ transformer := BiteTransformer{
+ Fetcher: fetcher,
+ Repository: repository,
+ Converter: BiteConverter{},
+ Config: i.Config,
+ }
+
+ return transformer
+}
+
+func (b BiteTransformer) Execute() error {
+ config := b.Config
+ topics := [][]common.Hash{{common.HexToHash(shared.BiteSignature)}}
+
+ missingHeaders, err := b.Repository.MissingHeaders(config.StartingBlockNumber, config.EndingBlockNumber)
+ if err != nil {
+ log.Println("Error fetching missing headers:", err)
+ return err
+ }
+
+ for _, header := range missingHeaders {
+ ethLogs, err := b.Fetcher.FetchLogs(config.ContractAddress, topics, header.BlockNumber)
+ if err != nil {
+ log.Println("Error fetching matching logs:", err)
+ return err
+ }
+
+ for _, ethLog := range ethLogs {
+ entity, err := b.Converter.ToEntity(config.ContractAddress, config.ContractAbi, ethLog)
+ model, err := b.Converter.ToModel(entity)
+ if err != nil {
+ log.Println("Error converting logs:", err)
+ return err
+ }
+
+ err = b.Repository.Create(header.Id, model)
+ if err != nil {
+ log.Println("Error persisting bite record:", err)
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+func (b BiteTransformer) SetConfig(config shared.TransformerConfig) {
+ b.Config = config
+}
diff --git a/pkg/transformers/bite/transfromer_test.go b/pkg/transformers/bite/transfromer_test.go
new file mode 100644
index 00000000..21e43fbb
--- /dev/null
+++ b/pkg/transformers/bite/transfromer_test.go
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2018 Vulcanize
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package bite_test
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
+ bite_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/bite"
+ "math/rand"
+)
+
+var _ = Describe("Bite Transformer", func() {
+ var repository bite_mocks.MockBiteRepository
+ var fetcher mocks.MockLogFetcher
+ var converter bite_mocks.MockBiteConverter
+ var transformer bite.BiteTransformer
+ var blockNumber1 = rand.Int63()
+ var blockNumber2 = rand.Int63()
+ var testConfig shared.TransformerConfig
+
+ BeforeEach(func() {
+ repository = bite_mocks.MockBiteRepository{}
+ fetcher = mocks.MockLogFetcher{}
+ converter = bite_mocks.MockBiteConverter{}
+
+ transformer = bite.BiteTransformer{
+ Repository: &repository,
+ Fetcher: &fetcher,
+ Converter: &converter,
+ Config: bite.BiteConfig,
+ }
+
+ testConfig = shared.TransformerConfig{
+ ContractAddress: "0x12345",
+ ContractAbi: "test abi",
+ Topics: []string{shared.BiteSignature},
+ StartingBlockNumber: blockNumber1,
+ EndingBlockNumber: blockNumber2,
+ }
+ transformer.SetConfig(testConfig)
+ })
+
+ It("gets missing headers for blocks in the configured range", func() {
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedStartingBlockNumber).To(Equal(bite.BiteConfig.StartingBlockNumber))
+ Expect(repository.PassedEndingBlockNumber).To(Equal(bite.BiteConfig.EndingBlockNumber))
+ })
+
+ It("returns an error if it fails to get missing headers", func() {
+ repository.SetMissingHeadersErr(fakes.FakeError)
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("fetches eth logs for each missing header", func() {
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1}, {BlockNumber: blockNumber2}})
+ expectedTopics := [][]common.Hash{{common.HexToHash(shared.BiteSignature)}}
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(fetcher.FetchedBlocks).To(Equal([]int64{blockNumber1, blockNumber2}))
+ Expect(fetcher.FetchedTopics).To(Equal(expectedTopics))
+ Expect(fetcher.FetchedContractAddress).To(Equal(bite.BiteConfig.ContractAddress))
+ })
+
+ It("returns an error if fetching logs fails", func() {
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ fetcher.SetFetcherError(fakes.FakeError)
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts an eth log to an Entity", func() {
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthBiteLog})
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(converter.ConverterContract).To(Equal(bite.BiteConfig.ContractAddress))
+ Expect(converter.ConverterAbi).To(Equal(bite.BiteConfig.ContractAbi))
+ Expect(converter.LogsToConvert).To(Equal([]types.Log{test_data.EthBiteLog}))
+ })
+
+ It("returns an error if converter fails", func() {
+ headerId := int64(1)
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1, Id: headerId}})
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthBiteLog})
+ converter.SetConverterError(fakes.FakeError)
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("persists the bite record", func() {
+ headerId := int64(1)
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1, Id: headerId}})
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthBiteLog})
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedHeaderID).To(Equal(headerId))
+ Expect(repository.PassedBiteModel).To(Equal(test_data.BiteModel))
+ })
+
+ It("returns error if persisting bite record fails", func() {
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1}})
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthBiteLog})
+ repository.SetCreateError(fakes.FakeError)
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+})
diff --git a/pkg/transformers/dent/config.go b/pkg/transformers/dent/config.go
new file mode 100644
index 00000000..cd15c67f
--- /dev/null
+++ b/pkg/transformers/dent/config.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+import "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+
+var DentConfig = shared.TransformerConfig{
+ ContractAddress: shared.FlipperContractAddress,
+ ContractAbi: shared.FlipperABI,
+ Topics: []string{shared.DentFunctionSignature},
+ StartingBlockNumber: 0,
+ EndingBlockNumber: 100,
+}
diff --git a/pkg/transformers/dent/converter.go b/pkg/transformers/dent/converter.go
new file mode 100644
index 00000000..ad487d48
--- /dev/null
+++ b/pkg/transformers/dent/converter.go
@@ -0,0 +1,87 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+import (
+ "encoding/json"
+ "errors"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+type Converter interface {
+ Convert(contractAddress string, contractAbi string, ethLog types.Log) (DentModel, error)
+}
+
+type DentConverter struct{}
+
+func NewDentConverter() DentConverter {
+ return DentConverter{}
+}
+
+func (c DentConverter) Convert(contractAddress, contractAbi string, ethLog types.Log) (DentModel, error) {
+ err := validateLog(ethLog)
+ if err != nil {
+ return DentModel{}, err
+ }
+
+ bidId := ethLog.Topics[2].Big()
+ lot := ethLog.Topics[3].Big().String()
+ bidValue := getBidValue(ethLog)
+ guy := common.HexToAddress(ethLog.Topics[1].Hex()).String()
+ tic := "0"
+ //TODO: it is likely that the tic value will need to be added to an emitted event,
+ //so this will need to be updated at that point
+
+ transactionIndex := ethLog.TxIndex
+
+ raw, err := json.Marshal(ethLog)
+ if err != nil {
+ return DentModel{}, err
+ }
+
+ return DentModel{
+ BidId: bidId.String(),
+ Lot: lot,
+ Bid: bidValue,
+ Guy: guy,
+ Tic: tic,
+ TransactionIndex: transactionIndex,
+ Raw: raw,
+ }, nil
+}
+
+func validateLog(ethLog types.Log) error {
+ if len(ethLog.Data) <= 0 {
+ return errors.New("dent log data is empty")
+ }
+
+ if len(ethLog.Topics) < 4 {
+ return errors.New("dent log does not contain expected topics")
+ }
+
+ return nil
+}
+
+func getBidValue(ethLog types.Log) string {
+ itemByteLength := 32
+ lastDataItemStartIndex := len(ethLog.Data) - itemByteLength
+ lastItem := ethLog.Data[lastDataItemStartIndex:]
+ lastValue := big.NewInt(0).SetBytes(lastItem)
+
+ return lastValue.String()
+}
diff --git a/pkg/transformers/dent/converter_test.go b/pkg/transformers/dent/converter_test.go
new file mode 100644
index 00000000..fbecd4f3
--- /dev/null
+++ b/pkg/transformers/dent/converter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent_test
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+var _ = Describe("Dent Converter", func() {
+ var converter dent.DentConverter
+
+ BeforeEach(func() {
+ converter = dent.NewDentConverter()
+ })
+
+ It("converts an eth log to a db model", func() {
+ model, err := converter.Convert(shared.FlipperContractAddress, shared.FlipperABI, test_data.DentLog)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(test_data.DentModel))
+ })
+
+ It("returns an error if the expected amount of topics aren't in the log", func() {
+ invalidLog := test_data.DentLog
+ invalidLog.Topics = []common.Hash{}
+ model, err := converter.Convert(shared.FlipperContractAddress, shared.FlipperABI, invalidLog)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("dent log does not contain expected topics"))
+ Expect(model).To(Equal(dent.DentModel{}))
+ })
+
+ It("returns an error if the log data is empty", func() {
+ emptyDataLog := test_data.DentLog
+ emptyDataLog.Data = []byte{}
+ model, err := converter.Convert(shared.FlipperContractAddress, shared.FlipperABI, emptyDataLog)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("dent log data is empty"))
+ Expect(model).To(Equal(dent.DentModel{}))
+ })
+})
diff --git a/pkg/transformers/dent/dent_suite_test.go b/pkg/transformers/dent/dent_suite_test.go
new file mode 100644
index 00000000..40c7e0c5
--- /dev/null
+++ b/pkg/transformers/dent/dent_suite_test.go
@@ -0,0 +1,19 @@
+package dent_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+ "io/ioutil"
+ "log"
+)
+
+func TestDent(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Dent Suite")
+}
+
+var _ = BeforeSuite(func() {
+ log.SetOutput(ioutil.Discard)
+})
diff --git a/pkg/transformers/dent/model.go b/pkg/transformers/dent/model.go
new file mode 100644
index 00000000..8ee86de3
--- /dev/null
+++ b/pkg/transformers/dent/model.go
@@ -0,0 +1,25 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+type DentModel struct {
+ BidId string `db:"bid_id"`
+ Lot string
+ Bid string
+ Guy string
+ Tic string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw []byte `db:"raw_log"`
+}
diff --git a/pkg/transformers/dent/repository.go b/pkg/transformers/dent/repository.go
new file mode 100644
index 00000000..47678c66
--- /dev/null
+++ b/pkg/transformers/dent/repository.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+type Repository interface {
+ Create(headerId int64, model DentModel) error
+ MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
+}
+
+type DentRepository struct {
+ db *postgres.DB
+}
+
+func NewDentRepository(database *postgres.DB) DentRepository {
+ return DentRepository{db: database}
+}
+
+func (r DentRepository) Create(headerId int64, model DentModel) error {
+ _, err := r.db.Exec(
+ `INSERT into maker.dent (header_id, bid_id, lot, bid, guy, tic, tx_idx, raw_log)
+ VALUES($1, $2, $3, $4, $5, $6, $7, $8)`,
+ headerId, model.BidId, model.Lot, model.Bid, model.Guy, model.Tic, model.TransactionIndex, model.Raw,
+ )
+
+ return err
+}
+
+func (r DentRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ var missingHeaders []core.Header
+
+ err := r.db.Select(
+ &missingHeaders,
+ `SELECT headers.id, headers.block_number FROM headers
+ LEFT JOIN maker.dent on headers.id = header_id
+ WHERE header_id ISNULL
+ AND headers.block_number >= $1
+ AND headers.block_number <= $2
+ AND headers.eth_node_fingerprint = $3`,
+ startingBlockNumber,
+ endingBlockNumber,
+ r.db.Node.ID,
+ )
+
+ return missingHeaders, err
+}
diff --git a/pkg/transformers/dent/repository_test.go b/pkg/transformers/dent/repository_test.go
new file mode 100644
index 00000000..6d286187
--- /dev/null
+++ b/pkg/transformers/dent/repository_test.go
@@ -0,0 +1,153 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent_test
+
+import (
+ "math/rand"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("Dent Repository", func() {
+ var node core.Node
+ var db *postgres.DB
+ var dentRepository dent.DentRepository
+ var headerRepository repositories.HeaderRepository
+ var headerId int64
+ var err error
+
+ BeforeEach(func() {
+ node = test_config.NewTestNode()
+ db = test_config.NewTestDB(node)
+ test_config.CleanTestDB(db)
+ dentRepository = dent.NewDentRepository(db)
+ headerRepository = repositories.NewHeaderRepository(db)
+ })
+
+ Describe("Create", func() {
+ BeforeEach(func() {
+ headerId, err = headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+
+ err := dentRepository.Create(headerId, test_data.DentModel)
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("persists a dent record", func() {
+ var count int
+ db.QueryRow(`SELECT count(*) FROM maker.dent`).Scan(&count)
+ Expect(count).To(Equal(1))
+
+ var dbResult dent.DentModel
+ err = db.Get(&dbResult, `SELECT bid_id, lot, bid, guy, tic, tx_idx, raw_log FROM maker.dent WHERE header_id = $1`, headerId)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dbResult.BidId).To(Equal(test_data.DentModel.BidId))
+ Expect(dbResult.Lot).To(Equal(test_data.DentModel.Lot))
+ Expect(dbResult.Bid).To(Equal(test_data.DentModel.Bid))
+ Expect(dbResult.Guy).To(Equal(test_data.DentModel.Guy))
+ Expect(dbResult.Tic).To(Equal(test_data.DentModel.Tic))
+ Expect(dbResult.TransactionIndex).To(Equal(test_data.DentModel.TransactionIndex))
+ Expect(dbResult.Raw).To(MatchJSON(test_data.DentModel.Raw))
+ })
+
+ It("returns an error if inserting a dent record fails", func() {
+ err = dentRepository.Create(headerId, test_data.DentModel)
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
+ })
+
+ It("deletes the tend record if its corresponding header record is deleted", func() {
+ var count int
+ err = db.QueryRow(`SELECT count(*) from maker.dent`).Scan(&count)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(count).To(Equal(1))
+
+ _, err = db.Exec(`DELETE FROM headers where id = $1`, headerId)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = db.QueryRow(`SELECT count(*) from maker.dent`).Scan(&count)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(count).To(Equal(0))
+ })
+ })
+
+ Describe("MissingHeaders", func() {
+ var dentBlockNumber int64
+ var startingBlockNumber int64
+ var endingBlockNumber int64
+ var blockNumbers []int64
+
+ BeforeEach(func() {
+ dentBlockNumber = rand.Int63()
+ startingBlockNumber = dentBlockNumber - 1
+ endingBlockNumber = dentBlockNumber + 1
+ outOfRangeBlockNumber := dentBlockNumber + 2
+
+ blockNumbers = []int64{startingBlockNumber, dentBlockNumber, endingBlockNumber, outOfRangeBlockNumber}
+
+ var headerIds []int64
+ for _, number := range blockNumbers {
+ headerId, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: number})
+ Expect(err).NotTo(HaveOccurred())
+ headerIds = append(headerIds, headerId)
+ }
+
+ dentRepository.Create(headerIds[1], test_data.DentModel)
+ })
+
+ It("returns header records that don't have a corresponding dents", func() {
+ missingHeaders, err := dentRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(missingHeaders)).To(Equal(2))
+ Expect(missingHeaders[0].BlockNumber).To(Equal(startingBlockNumber))
+ Expect(missingHeaders[1].BlockNumber).To(Equal(endingBlockNumber))
+ })
+
+ It("only returns missing headers for the given node", func() {
+ node2 := core.Node{}
+ db2 := test_config.NewTestDB(node2)
+ dentRepository2 := dent.NewDentRepository(db2)
+ headerRepository2 := repositories.NewHeaderRepository(db2)
+ var node2HeaderIds []int64
+ for _, number := range blockNumbers {
+ id, err := headerRepository2.CreateOrUpdateHeader(core.Header{BlockNumber: number})
+ node2HeaderIds = append(node2HeaderIds, id)
+ Expect(err).NotTo(HaveOccurred())
+ }
+
+ missingHeadersNode1, err := dentRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(missingHeadersNode1)).To(Equal(2))
+ Expect(missingHeadersNode1[0].BlockNumber).To(Equal(startingBlockNumber))
+ Expect(missingHeadersNode1[1].BlockNumber).To(Equal(endingBlockNumber))
+
+ missingHeadersNode2, err := dentRepository2.MissingHeaders(startingBlockNumber, endingBlockNumber)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(missingHeadersNode2)).To(Equal(3))
+ Expect(missingHeadersNode2[0].BlockNumber).To(Equal(startingBlockNumber))
+ Expect(missingHeadersNode2[1].BlockNumber).To(Equal(dentBlockNumber))
+ Expect(missingHeadersNode2[2].BlockNumber).To(Equal(endingBlockNumber))
+ })
+ })
+})
diff --git a/pkg/transformers/dent/transformer.go b/pkg/transformers/dent/transformer.go
new file mode 100644
index 00000000..035f5eb1
--- /dev/null
+++ b/pkg/transformers/dent/transformer.go
@@ -0,0 +1,80 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+import (
+ "log"
+
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type DentTransformer struct {
+ Config shared.TransformerConfig
+ Converter Converter
+ Fetcher shared.LogFetcher
+ Repository Repository
+}
+
+type DentTransformerInitializer struct {
+ Config shared.TransformerConfig
+}
+
+func (i DentTransformerInitializer) NewDentTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ converter := NewDentConverter()
+ fetcher := shared.NewFetcher(blockChain)
+ repository := NewDentRepository(db)
+ return DentTransformer{
+ Config: i.Config,
+ Converter: converter,
+ Fetcher: fetcher,
+ Repository: repository,
+ }
+}
+
+func (t DentTransformer) Execute() error {
+ config := t.Config
+ topics := [][]common.Hash{{common.HexToHash(shared.DentFunctionSignature)}}
+ headers, err := t.Repository.MissingHeaders(config.StartingBlockNumber, config.EndingBlockNumber)
+ for _, header := range headers {
+ ethLogs, err := t.Fetcher.FetchLogs(config.ContractAddress, topics, header.BlockNumber)
+
+ if err != nil {
+ log.Println("Error fetching dent logs:", err)
+ return err
+ }
+
+ for _, ethLog := range ethLogs {
+ model, err := t.Converter.Convert(config.ContractAddress, config.ContractAbi, ethLog)
+
+ if err != nil {
+ log.Println("Error converting dent log", err)
+ return err
+ }
+
+ err = t.Repository.Create(header.Id, model)
+
+ if err != nil {
+ log.Println("Error persisting dent record", err)
+ return err
+ }
+ }
+ }
+
+ return err
+}
diff --git a/pkg/transformers/dent/transformer_test.go b/pkg/transformers/dent/transformer_test.go
new file mode 100644
index 00000000..01969c17
--- /dev/null
+++ b/pkg/transformers/dent/transformer_test.go
@@ -0,0 +1,131 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent_test
+
+import (
+ "math/rand"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
+ dent_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/dent"
+)
+
+var _ = Describe("DentTransformer", func() {
+ var config = dent.DentConfig
+ var dentRepository dent_mocks.MockDentRepository
+ var fetcher mocks.MockLogFetcher
+ var converter dent_mocks.MockDentConverter
+ var transformer dent.DentTransformer
+
+ BeforeEach(func() {
+ dentRepository = dent_mocks.MockDentRepository{}
+ fetcher = mocks.MockLogFetcher{}
+ converter = dent_mocks.MockDentConverter{}
+ transformer = dent.DentTransformer{
+ Repository: &dentRepository,
+ Config: config,
+ Fetcher: &fetcher,
+ Converter: &converter,
+ }
+ })
+
+ It("gets missing headers", func() {
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dentRepository.PassedStartingBlockNumber).To(Equal(config.StartingBlockNumber))
+ Expect(dentRepository.PassedEndingBlockNumber).To(Equal(config.EndingBlockNumber))
+ })
+
+ It("returns an error if fetching the missing headers fails", func() {
+ dentRepository.SetMissingHeadersError(fakes.FakeError)
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("fetches logs for each missing header", func() {
+ header1 := core.Header{BlockNumber: rand.Int63()}
+ header2 := core.Header{BlockNumber: rand.Int63()}
+ dentRepository.SetMissingHeaders([]core.Header{header1, header2})
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(fetcher.FetchedContractAddress).To(Equal(config.ContractAddress))
+ expectedTopics := [][]common.Hash{{common.HexToHash(shared.DentFunctionSignature)}}
+ Expect(fetcher.FetchedTopics).To(Equal(expectedTopics))
+ Expect(fetcher.FetchedBlocks).To(Equal([]int64{header1.BlockNumber, header2.BlockNumber}))
+ })
+
+ It("returns an error if fetching logs fails", func() {
+ dentRepository.SetMissingHeaders([]core.Header{{}})
+ fetcher.SetFetcherError(fakes.FakeError)
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts each eth log to a Model", func() {
+ dentRepository.SetMissingHeaders([]core.Header{{}})
+ fetcher.SetFetchedLogs([]types.Log{test_data.DentLog})
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(converter.PassedContractAddress).To(Equal(config.ContractAddress))
+ Expect(converter.PassedContractAbi).To(Equal(config.ContractAbi))
+ Expect(converter.LogsToConvert).To(Equal([]types.Log{test_data.DentLog}))
+ })
+
+ It("returns an error if converting the eth log fails", func() {
+ dentRepository.SetMissingHeaders([]core.Header{{}})
+ fetcher.SetFetchedLogs([]types.Log{test_data.DentLog})
+ converter.SetConverterError(fakes.FakeError)
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("persists each model as a Dent record", func() {
+ header1 := core.Header{Id: rand.Int63()}
+ header2 := core.Header{Id: rand.Int63()}
+ dentRepository.SetMissingHeaders([]core.Header{header1, header2})
+ fetcher.SetFetchedLogs([]types.Log{test_data.DentLog})
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dentRepository.PassedDentModels).To(Equal([]dent.DentModel{test_data.DentModel, test_data.DentModel}))
+ Expect(dentRepository.PassedHeaderIds).To(Equal([]int64{header1.Id, header2.Id}))
+ })
+
+ It("returns an error if persisting dent record fails", func() {
+ dentRepository.SetMissingHeaders([]core.Header{{}})
+ dentRepository.SetCreateError(fakes.FakeError)
+ fetcher.SetFetchedLogs([]types.Log{test_data.DentLog})
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ })
+})
diff --git a/pkg/transformers/flip_kick/config.go b/pkg/transformers/flip_kick/config.go
index 6798d061..c5e1448f 100644
--- a/pkg/transformers/flip_kick/config.go
+++ b/pkg/transformers/flip_kick/config.go
@@ -17,7 +17,7 @@ package flip_kick
import "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
var FlipKickConfig = shared.TransformerConfig{
- ContractAddresses: "0x08cb6176addcca2e1d1ffe21bee464b72ee4cd8d", //this is a temporary address deployed locally
+ ContractAddress: shared.FlipperContractAddress,
ContractAbi: shared.FlipperABI,
Topics: []string{shared.FlipKickSignature},
StartingBlockNumber: 0,
diff --git a/pkg/transformers/flip_kick/constants.go b/pkg/transformers/flip_kick/constants.go
deleted file mode 100644
index 617f9287..00000000
--- a/pkg/transformers/flip_kick/constants.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2018 Vulcanize
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package flip_kick
-
-var FlipKickSignature = "0x8828a22eb6a18623309ad55592866c4b077989e9e8a25e1b85f9bf6f7282520f"
-var FlipperABI = "[{\"constant\":true,\"inputs\":[],\"name\":\"era\",\"outputs\":[{\"name\":\"\",\"type\":\"uint48\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"lad\",\"type\":\"address\"},{\"name\":\"gal\",\"type\":\"address\"},{\"name\":\"tab\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"bid\",\"type\":\"uint256\"}],\"name\":\"kick\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"vat\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"bids\",\"outputs\":[{\"name\":\"bid\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"guy\",\"type\":\"address\"},{\"name\":\"tic\",\"type\":\"uint48\"},{\"name\":\"end\",\"type\":\"uint48\"},{\"name\":\"lad\",\"type\":\"address\"},{\"name\":\"gal\",\"type\":\"address\"},{\"name\":\"tab\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"bid\",\"type\":\"uint256\"}],\"name\":\"tend\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"ttl\",\"outputs\":[{\"name\":\"\",\"type\":\"uint48\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"bid\",\"type\":\"uint256\"}],\"name\":\"dent\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"beg\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"ilk\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"deal\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"tau\",\"outputs\":[{\"name\":\"\",\"type\":\"uint48\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"kicks\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"tick\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"vat_\",\"type\":\"address\"},{\"name\":\"ilk_\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"src\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"dst\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"int256\"}],\"name\":\"Move\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"src\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"dst\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"act\",\"type\":\"bytes32\"}],\"name\":\"Push\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"risk\",\"type\":\"int256\"}],\"name\":\"FileIlk\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"FileAddr\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"risk\",\"type\":\"int256\"}],\"name\":\"FileInt\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"risk\",\"type\":\"uint256\"}],\"name\":\"FileUint\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"dink\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"dart\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"ink\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"art\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Frob\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"ink\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"art\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"tab\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"flip\",\"type\":\"uint256\"}],\"name\":\"Bite\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"lad\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"int256\"}],\"name\":\"Slip\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"vat\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gal\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"end\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"lad\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"tab\",\"type\":\"uint256\"}],\"name\":\"FlipKick\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"pie\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"vow\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"end\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"FlopKick\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"pie\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gal\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"end\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"FlapKick\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"tic\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Tend\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"tic\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Dent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Deal\",\"type\":\"event\"}]"
diff --git a/pkg/transformers/flip_kick/converter.go b/pkg/transformers/flip_kick/converter.go
index a1f2b1bc..abf1feba 100644
--- a/pkg/transformers/flip_kick/converter.go
+++ b/pkg/transformers/flip_kick/converter.go
@@ -17,7 +17,6 @@ package flip_kick
import (
"encoding/json"
"errors"
- "strings"
"time"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
@@ -25,7 +24,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/geth"
- "github.com/vulcanize/vulcanizedb/pkg/transformers/utilities"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
)
type Converter interface {
@@ -45,7 +44,7 @@ func (FlipKickConverter) ToEntity(contractAddress string, contractAbi string, et
contract := bind.NewBoundContract(address, abi, nil, nil, nil)
- err = contract.UnpackLog(entity, "FlipKick", ethLog)
+ err = contract.UnpackLog(entity, "Kick", ethLog)
if err != nil {
return entity, err
}
@@ -54,25 +53,18 @@ func (FlipKickConverter) ToEntity(contractAddress string, contractAbi string, et
}
func (FlipKickConverter) ToModel(flipKick FlipKickEntity) (FlipKickModel, error) {
- //TODO: Confirm if the following values can be/ever will be nil
-
if flipKick.Id == nil {
return FlipKickModel{}, errors.New("FlipKick log ID cannot be nil.")
}
id := flipKick.Id.String()
- vat := strings.ToLower(flipKick.Vat.String())
- ilk := strings.ToLower(common.ToHex(flipKick.Ilk[:]))
- lot := utilities.ConvertNilToEmptyString(flipKick.Lot.String())
- bid := utilities.ConvertNilToEmptyString(flipKick.Bid.String())
- guy := strings.ToLower(flipKick.Guy.String())
- gal := strings.ToLower(flipKick.Gal.String())
- endValue := utilities.ConvertNilToZeroTimeValue(flipKick.End)
+ lot := shared.ConvertNilToEmptyString(flipKick.Lot.String())
+ bid := shared.ConvertNilToEmptyString(flipKick.Bid.String())
+ gal := flipKick.Gal.String()
+ endValue := shared.ConvertNilToZeroTimeValue(flipKick.End)
end := time.Unix(endValue, 0)
- eraValue := utilities.ConvertNilToZeroTimeValue(flipKick.Era)
- era := time.Unix(eraValue, 0)
- lad := strings.ToLower(flipKick.Lad.String())
- tab := utilities.ConvertNilToEmptyString(flipKick.Tab.String())
+ urn := common.BytesToAddress(flipKick.Urn[:common.AddressLength]).String()
+ tab := shared.ConvertNilToEmptyString(flipKick.Tab.String())
rawLogJson, err := json.Marshal(flipKick.Raw)
if err != nil {
return FlipKickModel{}, err
@@ -81,15 +73,11 @@ func (FlipKickConverter) ToModel(flipKick FlipKickEntity) (FlipKickModel, error)
return FlipKickModel{
Id: id,
- Vat: vat,
- Ilk: ilk,
Lot: lot,
Bid: bid,
- Guy: guy,
Gal: gal,
End: end,
- Era: era,
- Lad: lad,
+ Urn: urn,
Tab: tab,
Raw: rawLogString,
}, nil
diff --git a/pkg/transformers/flip_kick/converter_test.go b/pkg/transformers/flip_kick/converter_test.go
index a75dcd02..579f0d1e 100644
--- a/pkg/transformers/flip_kick/converter_test.go
+++ b/pkg/transformers/flip_kick/converter_test.go
@@ -33,25 +33,21 @@ var _ = Describe("FlipKick Converter", func() {
Describe("ToEntity", func() {
It("converts an Eth Log to a FlipKickEntity", func() {
- entity, err := converter.ToEntity(test_data.FlipAddress, shared.FlipperABI, test_data.EthFlipKickLog)
+ entity, err := converter.ToEntity(shared.FlipperContractAddress, shared.FlipperABI, test_data.EthFlipKickLog)
Expect(err).NotTo(HaveOccurred())
Expect(entity.Id).To(Equal(test_data.FlipKickEntity.Id))
- Expect(entity.Vat).To(Equal(test_data.FlipKickEntity.Vat))
- Expect(entity.Ilk).To(Equal(test_data.FlipKickEntity.Ilk))
Expect(entity.Lot).To(Equal(test_data.FlipKickEntity.Lot))
Expect(entity.Bid).To(Equal(test_data.FlipKickEntity.Bid))
- Expect(entity.Guy).To(Equal(test_data.FlipKickEntity.Guy))
Expect(entity.Gal).To(Equal(test_data.FlipKickEntity.Gal))
Expect(entity.End).To(Equal(test_data.FlipKickEntity.End))
- Expect(entity.Era).To(Equal(test_data.FlipKickEntity.Era))
- Expect(entity.Lad).To(Equal(test_data.FlipKickEntity.Lad))
+ Expect(entity.Urn).To(Equal(test_data.FlipKickEntity.Urn))
Expect(entity.Tab).To(Equal(test_data.FlipKickEntity.Tab))
Expect(entity.Raw).To(Equal(test_data.FlipKickEntity.Raw))
})
It("returns an error if converting log to entity fails", func() {
- _, err := converter.ToEntity(test_data.FlipAddress, "error abi", test_data.EthFlipKickLog)
+ _, err := converter.ToEntity(shared.FlipperContractAddress, "error abi", test_data.EthFlipKickLog)
Expect(err).To(HaveOccurred())
})
@@ -59,7 +55,6 @@ var _ = Describe("FlipKick Converter", func() {
Describe("ToModel", func() {
var emptyAddressHex = "0x0000000000000000000000000000000000000000"
- var emptyByteArrayHex = "0x0000000000000000000000000000000000000000000000000000000000000000"
var emptyString = ""
var emptyTime = time.Unix(0, 0)
var emptyEntity = flip_kick.FlipKickEntity{}
@@ -87,15 +82,11 @@ var _ = Describe("FlipKick Converter", func() {
Expect(err).NotTo(HaveOccurred())
Expect(model.Id).To(Equal("1"))
- Expect(model.Vat).To(Equal(emptyAddressHex))
- Expect(model.Ilk).To(Equal(emptyByteArrayHex))
Expect(model.Lot).To(Equal(emptyString))
Expect(model.Bid).To(Equal(emptyString))
- Expect(model.Guy).To(Equal(emptyAddressHex))
Expect(model.Gal).To(Equal(emptyAddressHex))
Expect(model.End).To(Equal(emptyTime))
- Expect(model.Era).To(Equal(emptyTime))
- Expect(model.Lad).To(Equal(emptyAddressHex))
+ Expect(model.Urn).To(Equal(emptyAddressHex))
Expect(model.Tab).To(Equal(emptyString))
Expect(model.Raw).To(Equal(emptyRawLog))
})
diff --git a/pkg/transformers/flip_kick/entity.go b/pkg/transformers/flip_kick/entity.go
index 0d78ecd8..b1b43ad3 100644
--- a/pkg/transformers/flip_kick/entity.go
+++ b/pkg/transformers/flip_kick/entity.go
@@ -23,15 +23,11 @@ import (
type FlipKickEntity struct {
Id *big.Int
- Vat common.Address
- Ilk [32]byte
Lot *big.Int
Bid *big.Int
- Guy common.Address
Gal common.Address
End *big.Int
- Era *big.Int
- Lad common.Address
+ Urn [32]byte
Tab *big.Int
Raw types.Log
}
diff --git a/pkg/transformers/flip_kick/integration_test.go b/pkg/transformers/flip_kick/integration_test.go
index 911f7e10..e5141d12 100644
--- a/pkg/transformers/flip_kick/integration_test.go
+++ b/pkg/transformers/flip_kick/integration_test.go
@@ -17,51 +17,18 @@ package flip_kick_test
import (
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/ethclient"
- "github.com/ethereum/go-ethereum/rpc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/geth"
- "github.com/vulcanize/vulcanizedb/pkg/geth/client"
- rpc2 "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
- "github.com/vulcanize/vulcanizedb/pkg/geth/node"
"github.com/vulcanize/vulcanizedb/pkg/transformers/flip_kick"
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
- "github.com/vulcanize/vulcanizedb/test_config"
)
var _ = Describe("Integration tests", func() {
- XIt("Fetches FlipKickEntity event logs from a local test chain", func() {
- ipcPath := test_config.TestClient.IPCPath
-
- rawRpcClient, err := rpc.Dial(ipcPath)
- Expect(err).NotTo(HaveOccurred())
-
- rpcClient := client.NewRpcClient(rawRpcClient, ipcPath)
- ethClient := ethclient.NewClient(rawRpcClient)
- blockChainClient := client.NewEthClient(ethClient)
- realNode := node.MakeNode(rpcClient)
- transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
- realBlockChain := geth.NewBlockChain(blockChainClient, realNode, transactionConverter)
- realFetcher := shared.NewFetcher(realBlockChain)
- topic0 := common.HexToHash(shared.FlipKickSignature)
- topics := [][]common.Hash{{topic0}}
-
- result, err := realFetcher.FetchLogs(test_data.FlipAddress, topics, test_data.FlipKickBlockNumber)
- Expect(err).NotTo(HaveOccurred())
-
- Expect(len(result) > 0).To(BeTrue())
- Expect(result[0].Address).To(Equal(test_data.EthFlipKickLog.Address))
- Expect(result[0].TxHash).To(Equal(test_data.EthFlipKickLog.TxHash))
- Expect(result[0].BlockNumber).To(Equal(test_data.EthFlipKickLog.BlockNumber))
- Expect(result[0].Topics).To(Equal(test_data.EthFlipKickLog.Topics))
- Expect(result[0].Index).To(Equal(test_data.EthFlipKickLog.Index))
- })
-
It("unpacks an event log", func() {
- address := common.HexToAddress(test_data.FlipAddress)
+ address := common.HexToAddress(shared.FlipperContractAddress)
abi, err := geth.ParseAbi(shared.FlipperABI)
Expect(err).NotTo(HaveOccurred())
@@ -70,20 +37,16 @@ var _ = Describe("Integration tests", func() {
var eventLog = test_data.EthFlipKickLog
- err = contract.UnpackLog(entity, "FlipKick", eventLog)
+ err = contract.UnpackLog(entity, "Kick", eventLog)
Expect(err).NotTo(HaveOccurred())
expectedEntity := test_data.FlipKickEntity
Expect(entity.Id).To(Equal(expectedEntity.Id))
- Expect(entity.Vat).To(Equal(expectedEntity.Vat))
- Expect(entity.Ilk).To(Equal(expectedEntity.Ilk))
Expect(entity.Lot).To(Equal(expectedEntity.Lot))
Expect(entity.Bid).To(Equal(expectedEntity.Bid))
- Expect(entity.Guy).To(Equal(expectedEntity.Guy))
Expect(entity.Gal).To(Equal(expectedEntity.Gal))
Expect(entity.End).To(Equal(expectedEntity.End))
- Expect(entity.Era).To(Equal(expectedEntity.Era))
- Expect(entity.Lad).To(Equal(expectedEntity.Lad))
+ Expect(entity.Urn).To(Equal(expectedEntity.Urn))
Expect(entity.Tab).To(Equal(expectedEntity.Tab))
})
})
diff --git a/pkg/transformers/flip_kick/model.go b/pkg/transformers/flip_kick/model.go
index d66c73f4..0904ee22 100644
--- a/pkg/transformers/flip_kick/model.go
+++ b/pkg/transformers/flip_kick/model.go
@@ -18,15 +18,11 @@ import "time"
type FlipKickModel struct {
Id string
- Vat string
- Ilk string
Lot string
Bid string
- Guy string
Gal string
End time.Time
- Era time.Time
- Lad string
+ Urn string
Tab string
Raw string `db:"raw_log"`
}
diff --git a/pkg/transformers/flip_kick/repository.go b/pkg/transformers/flip_kick/repository.go
index 60443d29..14b20fa4 100644
--- a/pkg/transformers/flip_kick/repository.go
+++ b/pkg/transformers/flip_kick/repository.go
@@ -35,9 +35,9 @@ func NewFlipKickRepository(db *postgres.DB) FlipKickRepository {
}
func (fkr FlipKickRepository) Create(headerId int64, flipKick FlipKickModel) error {
_, err := fkr.DB.Exec(
- `INSERT into maker.flip_kick (header_id, id, vat, ilk, lot, bid, guy, gal, "end", era, lad, tab, raw_log)
- VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`,
- headerId, flipKick.Id, flipKick.Vat, flipKick.Ilk, flipKick.Lot, flipKick.Bid, flipKick.Guy, flipKick.Gal, flipKick.End, flipKick.Era, flipKick.Lad, flipKick.Tab, flipKick.Raw,
+ `INSERT into maker.flip_kick (header_id, id, lot, bid, gal, "end", urn, tab, raw_log)
+ VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9)`,
+ headerId, flipKick.Id, flipKick.Lot, flipKick.Bid, flipKick.Gal, flipKick.End, flipKick.Urn, flipKick.Tab, flipKick.Raw,
)
if err != nil {
diff --git a/pkg/transformers/flip_kick/repository_test.go b/pkg/transformers/flip_kick/repository_test.go
index cc50e1ce..b2998f5a 100644
--- a/pkg/transformers/flip_kick/repository_test.go
+++ b/pkg/transformers/flip_kick/repository_test.go
@@ -65,17 +65,13 @@ var _ = Describe("FlipKick Repository", func() {
Expect(err).NotTo(HaveOccurred())
Expect(dbResult.HeaderId).To(Equal(headerId))
Expect(dbResult.Id).To(Equal(flipKick.Id))
- Expect(dbResult.Vat).To(Equal(flipKick.Vat))
- Expect(dbResult.Ilk).To(Equal(flipKick.Ilk))
Expect(dbResult.Lot).To(Equal(flipKick.Lot))
Expect(dbResult.Bid).To(Equal(flipKick.Bid))
- Expect(dbResult.Guy).To(Equal(flipKick.Guy))
Expect(dbResult.Gal).To(Equal(flipKick.Gal))
Expect(dbResult.End.Equal(flipKick.End)).To(BeTrue())
- Expect(dbResult.Era.Equal(flipKick.Era)).To(BeTrue())
- Expect(dbResult.Lad).To(Equal(flipKick.Lad))
+ Expect(dbResult.Urn).To(Equal(flipKick.Urn))
Expect(dbResult.Tab).To(Equal(flipKick.Tab))
- Expect(dbResult.Raw).To(Equal(flipKick.Raw))
+ Expect(dbResult.Raw).To(MatchJSON(flipKick.Raw))
})
It("returns an error if inserting the flip_kick record fails", func() {
diff --git a/pkg/transformers/flip_kick/transformer.go b/pkg/transformers/flip_kick/transformer.go
index 0f700b57..a7331073 100644
--- a/pkg/transformers/flip_kick/transformer.go
+++ b/pkg/transformers/flip_kick/transformer.go
@@ -91,13 +91,13 @@ func (fkt FlipKickTransformer) Execute() error {
log.Printf("Fetching event logs for %d headers \n", len(headers))
var resultingErrors []error
for _, header := range headers {
- ethLogs, err := fkt.Fetcher.FetchLogs(config.ContractAddresses, topics, header.BlockNumber)
+ ethLogs, err := fkt.Fetcher.FetchLogs(config.ContractAddress, topics, header.BlockNumber)
if err != nil {
resultingErrors = append(resultingErrors, newTransformerError(err, header.BlockNumber, FetcherError))
}
for _, ethLog := range ethLogs {
- entity, err := fkt.Converter.ToEntity(config.ContractAddresses, config.ContractAbi, ethLog)
+ entity, err := fkt.Converter.ToEntity(config.ContractAddress, config.ContractAbi, ethLog)
if err != nil {
resultingErrors = append(resultingErrors, newTransformerError(err, header.BlockNumber, LogToEntityError))
}
diff --git a/pkg/transformers/flip_kick/transformer_test.go b/pkg/transformers/flip_kick/transformer_test.go
index 0f14665a..7e4350df 100644
--- a/pkg/transformers/flip_kick/transformer_test.go
+++ b/pkg/transformers/flip_kick/transformer_test.go
@@ -54,7 +54,7 @@ var _ = Describe("FlipKick Transformer", func() {
startingBlockNumber := rand.Int63()
testConfig = shared.TransformerConfig{
- ContractAddresses: "0x12345",
+ ContractAddress: "0x12345",
ContractAbi: "test abi",
Topics: []string{shared.FlipKickSignature},
StartingBlockNumber: startingBlockNumber,
@@ -83,7 +83,7 @@ var _ = Describe("FlipKick Transformer", func() {
err := transformer.Execute()
Expect(err).NotTo(HaveOccurred())
- Expect(fetcher.FetchedContractAddress).To(Equal(testConfig.ContractAddresses))
+ Expect(fetcher.FetchedContractAddress).To(Equal(testConfig.ContractAddress))
Expect(fetcher.FetchedTopics).To(Equal(expectedTopics))
Expect(fetcher.FetchedBlocks).To(Equal([]int64{blockNumber}))
})
@@ -100,7 +100,7 @@ var _ = Describe("FlipKick Transformer", func() {
err := transformer.Execute()
Expect(err).NotTo(HaveOccurred())
- Expect(converter.ConverterContract).To(Equal(testConfig.ContractAddresses))
+ Expect(converter.ConverterContract).To(Equal(testConfig.ContractAddress))
Expect(converter.ConverterAbi).To(Equal(testConfig.ContractAbi))
Expect(converter.LogsToConvert).To(Equal(logs))
Expect(converter.EntitiesToConvert).To(Equal([]flip_kick.FlipKickEntity{test_data.FlipKickEntity}))
diff --git a/pkg/transformers/frob/config.go b/pkg/transformers/frob/config.go
index cd824f06..8c934544 100644
--- a/pkg/transformers/frob/config.go
+++ b/pkg/transformers/frob/config.go
@@ -17,9 +17,9 @@ package frob
import "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
var FrobConfig = shared.TransformerConfig{
- ContractAddresses: "0xff3f2400f1600f3f493a9a92704a29b96795af1a", //this is a temporary address deployed locally
- ContractAbi: FrobABI,
- Topics: []string{FrobEventSignature},
+ ContractAddress: shared.PitContractAddress,
+ ContractAbi: shared.PitABI,
+ Topics: []string{shared.FrobSignature},
StartingBlockNumber: 0,
EndingBlockNumber: 100,
}
diff --git a/pkg/transformers/frob/constants.go b/pkg/transformers/frob/constants.go
deleted file mode 100644
index 7b8e2e8a..00000000
--- a/pkg/transformers/frob/constants.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 Vulcanize
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package frob
-
-var (
- FrobABI = `[{"constant":true,"inputs":[],"name":"vat","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"live","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"drip","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"Line","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"wards","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"ilks","outputs":[{"name":"spot","type":"uint256"},{"name":"line","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[{"name":"vat_","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"ilk","type":"bytes32"},{"indexed":true,"name":"lad","type":"bytes32"},{"indexed":false,"name":"ink","type":"uint256"},{"indexed":false,"name":"art","type":"uint256"},{"indexed":false,"name":"dink","type":"int256"},{"indexed":false,"name":"dart","type":"int256"},{"indexed":false,"name":"iArt","type":"uint256"}],"name":"Frob","type":"event"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"rely","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"deny","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"what","type":"bytes32"},{"name":"data","type":"uint256"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"what","type":"bytes32"},{"name":"data","type":"uint256"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"what","type":"bytes32"},{"name":"data","type":"address"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"frob","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"}]`
- FrobEventSignature = "0x6cedf1d3a466a3d6bab04887b1642177bf6dbf1daa737c2e8f639cd0b020d9d0"
-)
diff --git a/pkg/transformers/frob/converter.go b/pkg/transformers/frob/converter.go
index 42af3e25..c35cdba5 100644
--- a/pkg/transformers/frob/converter.go
+++ b/pkg/transformers/frob/converter.go
@@ -19,16 +19,16 @@ import (
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "encoding/json"
"github.com/vulcanize/vulcanizedb/pkg/geth"
)
type Converter interface {
ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (FrobEntity, error)
- ToModel(flipKick FrobEntity) FrobModel
+ ToModel(flipKick FrobEntity) (FrobModel, error)
}
-type FrobConverter struct {
-}
+type FrobConverter struct{}
func (FrobConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (FrobEntity, error) {
entity := FrobEntity{}
@@ -39,17 +39,25 @@ func (FrobConverter) ToEntity(contractAddress string, contractAbi string, ethLog
}
contract := bind.NewBoundContract(address, abi, nil, nil, nil)
err = contract.UnpackLog(&entity, "Frob", ethLog)
+ entity.TransactionIndex = ethLog.TxIndex
+ entity.Raw = ethLog
return entity, err
}
-func (FrobConverter) ToModel(frob FrobEntity) FrobModel {
- return FrobModel{
- Ilk: frob.Ilk[:],
- Lad: frob.Lad[:],
- Dink: frob.Dink.String(),
- Dart: frob.Dart.String(),
- Ink: frob.Ink.String(),
- Art: frob.Art.String(),
- IArt: frob.IArt.String(),
+func (FrobConverter) ToModel(frob FrobEntity) (FrobModel, error) {
+ rawLog, err := json.Marshal(frob.Raw)
+ if err != nil {
+ return FrobModel{}, err
}
+ return FrobModel{
+ Ilk: frob.Ilk[:],
+ Urn: frob.Urn[:],
+ Ink: frob.Ink.String(),
+ Art: frob.Art.String(),
+ Dink: frob.Dink.String(),
+ Dart: frob.Dart.String(),
+ IArt: frob.IArt.String(),
+ TransactionIndex: frob.TransactionIndex,
+ Raw: rawLog,
+ }, nil
}
diff --git a/pkg/transformers/frob/converter_test.go b/pkg/transformers/frob/converter_test.go
index 29279946..9c190fdc 100644
--- a/pkg/transformers/frob/converter_test.go
+++ b/pkg/transformers/frob/converter_test.go
@@ -19,6 +19,7 @@ import (
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/transformers/frob"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
)
@@ -26,7 +27,7 @@ var _ = Describe("Frob converter", func() {
It("converts a log to an entity", func() {
converter := frob.FrobConverter{}
- entity, err := converter.ToEntity(test_data.TemporaryFrobAddress, frob.FrobABI, test_data.EthFrobLog)
+ entity, err := converter.ToEntity(shared.PitContractAddress, shared.PitABI, test_data.EthFrobLog)
Expect(err).NotTo(HaveOccurred())
Expect(entity).To(Equal(test_data.FrobEntity))
@@ -35,8 +36,9 @@ var _ = Describe("Frob converter", func() {
It("converts an entity to a model", func() {
converter := frob.FrobConverter{}
- model := converter.ToModel(test_data.FrobEntity)
+ model, err := converter.ToModel(test_data.FrobEntity)
+ Expect(err).NotTo(HaveOccurred())
Expect(model).To(Equal(test_data.FrobModel))
})
})
diff --git a/pkg/transformers/frob/entity.go b/pkg/transformers/frob/entity.go
index 0010d558..c150219e 100644
--- a/pkg/transformers/frob/entity.go
+++ b/pkg/transformers/frob/entity.go
@@ -16,14 +16,18 @@ package frob
import (
"math/big"
+
+ "github.com/ethereum/go-ethereum/core/types"
)
type FrobEntity struct {
- Ilk [32]byte
- Lad [32]byte
- Ink *big.Int
- Art *big.Int
- Dink *big.Int
- Dart *big.Int
- IArt *big.Int
+ Ilk [32]byte
+ Urn [32]byte
+ Ink *big.Int
+ Art *big.Int
+ Dink *big.Int
+ Dart *big.Int
+ IArt *big.Int
+ TransactionIndex uint
+ Raw types.Log
}
diff --git a/pkg/transformers/frob/integration_test.go b/pkg/transformers/frob/integration_test.go
index 0f4c6988..02d47968 100644
--- a/pkg/transformers/frob/integration_test.go
+++ b/pkg/transformers/frob/integration_test.go
@@ -46,14 +46,14 @@ var _ = Describe("Integration tests", func() {
transactionConverter := vRpc.NewRpcTransactionConverter(ethClient)
realBlockChain := geth.NewBlockChain(blockChainClient, realNode, transactionConverter)
realFetcher := shared.NewFetcher(realBlockChain)
- topic0 := common.HexToHash(frob.FrobEventSignature)
+ topic0 := common.HexToHash(shared.FrobSignature)
topics := [][]common.Hash{{topic0}}
- result, err := realFetcher.FetchLogs(test_data.TemporaryFrobAddress, topics, int64(12))
+ result, err := realFetcher.FetchLogs(shared.PitContractAddress, topics, int64(12))
Expect(err).NotTo(HaveOccurred())
Expect(len(result) > 0).To(BeTrue())
- Expect(result[0].Address).To(Equal(common.HexToAddress(test_data.TemporaryFrobAddress)))
+ Expect(result[0].Address).To(Equal(common.HexToAddress(shared.PitContractAddress)))
Expect(result[0].TxHash).To(Equal(test_data.EthFrobLog.TxHash))
Expect(result[0].BlockNumber).To(Equal(test_data.EthFrobLog.BlockNumber))
Expect(result[0].Topics).To(Equal(test_data.EthFrobLog.Topics))
@@ -61,8 +61,8 @@ var _ = Describe("Integration tests", func() {
})
It("unpacks an event log", func() {
- address := common.HexToAddress(test_data.TemporaryFrobAddress)
- abi, err := geth.ParseAbi(frob.FrobABI)
+ address := common.HexToAddress(shared.PitContractAddress)
+ abi, err := geth.ParseAbi(shared.PitABI)
Expect(err).NotTo(HaveOccurred())
contract := bind.NewBoundContract(address, abi, nil, nil, nil)
@@ -78,6 +78,6 @@ var _ = Describe("Integration tests", func() {
Expect(entity.IArt).To(Equal(expectedEntity.IArt))
Expect(entity.Ilk).To(Equal(expectedEntity.Ilk))
Expect(entity.Ink).To(Equal(expectedEntity.Ink))
- Expect(entity.Lad).To(Equal(expectedEntity.Lad))
+ Expect(entity.Urn).To(Equal(expectedEntity.Urn))
})
})
diff --git a/pkg/transformers/frob/model.go b/pkg/transformers/frob/model.go
index 90a1b3eb..566fe8e3 100644
--- a/pkg/transformers/frob/model.go
+++ b/pkg/transformers/frob/model.go
@@ -15,11 +15,13 @@
package frob
type FrobModel struct {
- Ilk []byte
- Lad []byte
- Ink string
- Art string
- Dink string
- Dart string
- IArt string
+ Ilk []byte
+ Urn []byte
+ Ink string
+ Art string
+ Dink string
+ Dart string
+ IArt string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw []byte `db:"raw_log"`
}
diff --git a/pkg/transformers/frob/repository.go b/pkg/transformers/frob/repository.go
index 0d9bb922..f2d60b31 100644
--- a/pkg/transformers/frob/repository.go
+++ b/pkg/transformers/frob/repository.go
@@ -20,7 +20,7 @@ import (
)
type Repository interface {
- Create(headerID int64, transactionIndex uint, model FrobModel) error
+ Create(headerID int64, model FrobModel) error
MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
}
@@ -32,10 +32,10 @@ func NewFrobRepository(db *postgres.DB) FrobRepository {
return FrobRepository{db: db}
}
-func (repository FrobRepository) Create(headerID int64, transactionIndex uint, model FrobModel) error {
- _, err := repository.db.Exec(`INSERT INTO maker.frob (header_id, tx_idx, art, dart, dink, iart, ilk, ink, lad)
- VALUES($1, $2, $3::NUMERIC, $4::NUMERIC, $5::NUMERIC, $6::NUMERIC, $7, $8::NUMERIC, $9)`,
- headerID, transactionIndex, model.Art, model.Dart, model.Dink, model.IArt, model.Ilk, model.Ink, model.Lad)
+func (repository FrobRepository) Create(headerID int64, model FrobModel) error {
+ _, err := repository.db.Exec(`INSERT INTO maker.frob (header_id, art, dart, dink, iart, ilk, ink, urn, raw_log, tx_idx)
+ VALUES($1, $2::NUMERIC, $3::NUMERIC, $4::NUMERIC, $5::NUMERIC, $6, $7::NUMERIC, $8, $9, $10)`,
+ headerID, model.Art, model.Dart, model.Dink, model.IArt, model.Ilk, model.Ink, model.Urn, model.Raw, model.TransactionIndex)
return err
}
diff --git a/pkg/transformers/frob/repository_test.go b/pkg/transformers/frob/repository_test.go
index cd7378f2..bfba9eed 100644
--- a/pkg/transformers/frob/repository_test.go
+++ b/pkg/transformers/frob/repository_test.go
@@ -30,56 +30,61 @@ import (
var _ = Describe("Frob repository", func() {
Describe("Create", func() {
It("adds a frob", func() {
- node := core.Node{}
- db := test_config.NewTestDB(node)
+ db := test_config.NewTestDB(core.Node{})
test_config.CleanTestDB(db)
headerRepository := repositories.NewHeaderRepository(db)
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
Expect(err).NotTo(HaveOccurred())
frobRepository := frob.NewFrobRepository(db)
- err = frobRepository.Create(headerID, 123, test_data.FrobModel)
+ err = frobRepository.Create(headerID, test_data.FrobModel)
Expect(err).NotTo(HaveOccurred())
var dbFrob frob.FrobModel
- err = db.Get(&dbFrob, `SELECT art, dart, dink, iart, ilk, ink, lad FROM maker.frob WHERE header_id = $1`, headerID)
+ err = db.Get(&dbFrob, `SELECT art, dart, dink, iart, ilk, ink, urn, tx_idx, raw_log FROM maker.frob WHERE header_id = $1`, headerID)
Expect(err).NotTo(HaveOccurred())
- Expect(dbFrob).To(Equal(test_data.FrobModel))
+ Expect(dbFrob.Ilk).To(Equal(test_data.FrobModel.Ilk))
+ Expect(dbFrob.Urn).To(Equal(test_data.FrobModel.Urn))
+ Expect(dbFrob.Ink).To(Equal(test_data.FrobModel.Ink))
+ Expect(dbFrob.Art).To(Equal(test_data.FrobModel.Art))
+ Expect(dbFrob.Dink).To(Equal(test_data.FrobModel.Dink))
+ Expect(dbFrob.Dart).To(Equal(test_data.FrobModel.Dart))
+ Expect(dbFrob.IArt).To(Equal(test_data.FrobModel.IArt))
+ Expect(dbFrob.TransactionIndex).To(Equal(test_data.FrobModel.TransactionIndex))
+ Expect(dbFrob.Raw).To(MatchJSON(test_data.FrobModel.Raw))
})
It("does not duplicate frob events", func() {
- node := core.Node{}
- db := test_config.NewTestDB(node)
+ db := test_config.NewTestDB(core.Node{})
test_config.CleanTestDB(db)
headerRepository := repositories.NewHeaderRepository(db)
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
Expect(err).NotTo(HaveOccurred())
frobRepository := frob.NewFrobRepository(db)
- err = frobRepository.Create(headerID, 123, test_data.FrobModel)
+ err = frobRepository.Create(headerID, test_data.FrobModel)
Expect(err).NotTo(HaveOccurred())
- err = frobRepository.Create(headerID, 123, test_data.FrobModel)
+ err = frobRepository.Create(headerID, test_data.FrobModel)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
})
It("removes frob if corresponding header is deleted", func() {
- node := core.Node{}
- db := test_config.NewTestDB(node)
+ db := test_config.NewTestDB(core.Node{})
test_config.CleanTestDB(db)
headerRepository := repositories.NewHeaderRepository(db)
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
Expect(err).NotTo(HaveOccurred())
frobRepository := frob.NewFrobRepository(db)
- err = frobRepository.Create(headerID, 123, test_data.FrobModel)
+ err = frobRepository.Create(headerID, test_data.FrobModel)
Expect(err).NotTo(HaveOccurred())
_, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
Expect(err).NotTo(HaveOccurred())
var dbFrob frob.FrobModel
- err = db.Get(&dbFrob, `SELECT art, iart, ilk, ink, lad FROM maker.frob WHERE header_id = $1`, headerID)
+ err = db.Get(&dbFrob, `SELECT art, iart, ilk, ink, urn, tx_idx, raw_log FROM maker.frob WHERE header_id = $1`, headerID)
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(sql.ErrNoRows))
})
@@ -87,8 +92,7 @@ var _ = Describe("Frob repository", func() {
Describe("MissingHeaders", func() {
It("returns headers with no associated frob event", func() {
- node := core.Node{}
- db := test_config.NewTestDB(node)
+ db := test_config.NewTestDB(core.Node{})
test_config.CleanTestDB(db)
headerRepository := repositories.NewHeaderRepository(db)
startingBlockNumber := int64(1)
@@ -102,7 +106,7 @@ var _ = Describe("Frob repository", func() {
Expect(err).NotTo(HaveOccurred())
}
frobRepository := frob.NewFrobRepository(db)
- err := frobRepository.Create(headerIDs[1], 123, test_data.FrobModel)
+ err := frobRepository.Create(headerIDs[1], test_data.FrobModel)
Expect(err).NotTo(HaveOccurred())
headers, err := frobRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
@@ -114,13 +118,11 @@ var _ = Describe("Frob repository", func() {
})
It("only returns headers associated with the current node", func() {
- nodeOne := core.Node{}
- db := test_config.NewTestDB(nodeOne)
+ db := test_config.NewTestDB(core.Node{})
test_config.CleanTestDB(db)
blockNumbers := []int64{1, 2, 3}
headerRepository := repositories.NewHeaderRepository(db)
- nodeTwo := core.Node{ID: "second"}
- dbTwo := test_config.NewTestDB(nodeTwo)
+ dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
var headerIDs []int64
for _, n := range blockNumbers {
@@ -132,7 +134,7 @@ var _ = Describe("Frob repository", func() {
}
frobRepository := frob.NewFrobRepository(db)
frobRepositoryTwo := frob.NewFrobRepository(dbTwo)
- err := frobRepository.Create(headerIDs[0], 0, test_data.FrobModel)
+ err := frobRepository.Create(headerIDs[0], test_data.FrobModel)
Expect(err).NotTo(HaveOccurred())
nodeOneMissingHeaders, err := frobRepository.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
diff --git a/pkg/transformers/frob/transformer.go b/pkg/transformers/frob/transformer.go
index f72d2b08..3460bac2 100644
--- a/pkg/transformers/frob/transformer.go
+++ b/pkg/transformers/frob/transformer.go
@@ -51,18 +51,21 @@ func (transformer FrobTransformer) Execute() error {
return err
}
for _, header := range missingHeaders {
- topics := [][]common.Hash{{common.HexToHash(FrobEventSignature)}}
- matchingLogs, err := transformer.Fetcher.FetchLogs(FrobConfig.ContractAddresses, topics, header.BlockNumber)
+ topics := [][]common.Hash{{common.HexToHash(shared.FrobSignature)}}
+ matchingLogs, err := transformer.Fetcher.FetchLogs(FrobConfig.ContractAddress, topics, header.BlockNumber)
if err != nil {
return err
}
for _, log := range matchingLogs {
- entity, err := transformer.Converter.ToEntity(FrobConfig.ContractAddresses, FrobConfig.ContractAbi, log)
+ entity, err := transformer.Converter.ToEntity(FrobConfig.ContractAddress, FrobConfig.ContractAbi, log)
if err != nil {
return err
}
- model := transformer.Converter.ToModel(entity)
- err = transformer.Repository.Create(header.Id, log.TxIndex, model)
+ model, err := transformer.Converter.ToModel(entity)
+ if err != nil {
+ return err
+ }
+ err = transformer.Repository.Create(header.Id, model)
if err != nil {
return err
}
diff --git a/pkg/transformers/frob/transformer_test.go b/pkg/transformers/frob/transformer_test.go
index ca7dfe31..58d4348c 100644
--- a/pkg/transformers/frob/transformer_test.go
+++ b/pkg/transformers/frob/transformer_test.go
@@ -23,6 +23,7 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
"github.com/vulcanize/vulcanizedb/pkg/transformers/frob"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
frob_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/frob"
@@ -74,8 +75,8 @@ var _ = Describe("Frob transformer", func() {
Expect(err).NotTo(HaveOccurred())
Expect(fetcher.FetchedBlocks).To(Equal([]int64{1, 2}))
- Expect(fetcher.FetchedContractAddress).To(Equal(frob.FrobConfig.ContractAddresses))
- Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(frob.FrobEventSignature)}}))
+ Expect(fetcher.FetchedContractAddress).To(Equal(frob.FrobConfig.ContractAddress))
+ Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(shared.FrobSignature)}}))
})
It("returns error if fetcher returns error", func() {
@@ -95,7 +96,7 @@ var _ = Describe("Frob transformer", func() {
Expect(err).To(MatchError(fakes.FakeError))
})
- It("converts matching logs", func() {
+ It("converts matching logs to entity", func() {
converter := &frob_mocks.MockFrobConverter{}
fetcher := &mocks.MockLogFetcher{}
fetcher.SetFetchedLogs([]types.Log{test_data.EthFrobLog})
@@ -110,15 +111,51 @@ var _ = Describe("Frob transformer", func() {
err := transformer.Execute()
Expect(err).NotTo(HaveOccurred())
- Expect(converter.PassedContractAddress).To(Equal(frob.FrobConfig.ContractAddresses))
+ Expect(converter.PassedContractAddress).To(Equal(frob.FrobConfig.ContractAddress))
Expect(converter.PassedContractABI).To(Equal(frob.FrobConfig.ContractAbi))
Expect(converter.PassedLog).To(Equal(test_data.EthFrobLog))
+ })
+
+ It("returns error if converting to entity returns error", func() {
+ converter := &frob_mocks.MockFrobConverter{}
+ converter.SetToEntityError(fakes.FakeError)
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthFrobLog})
+ repository := &frob_mocks.MockFrobRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := frob.FrobTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts frob entity to model", func() {
+ converter := &frob_mocks.MockFrobConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthFrobLog})
+ repository := &frob_mocks.MockFrobRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := frob.FrobTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
Expect(converter.PassedEntity).To(Equal(test_data.FrobEntity))
})
- It("returns error if converter returns error", func() {
+ It("returns error if converting to model returns error", func() {
converter := &frob_mocks.MockFrobConverter{}
- converter.SetConverterError(fakes.FakeError)
+ converter.SetToModelError(fakes.FakeError)
fetcher := &mocks.MockLogFetcher{}
fetcher.SetFetchedLogs([]types.Log{test_data.EthFrobLog})
repository := &frob_mocks.MockFrobRepository{}
@@ -152,7 +189,6 @@ var _ = Describe("Frob transformer", func() {
Expect(err).NotTo(HaveOccurred())
Expect(repository.PassedHeaderID).To(Equal(fakeHeader.Id))
- Expect(repository.PassedTransactionIndex).To(Equal(test_data.EthFrobLog.TxIndex))
Expect(repository.PassedFrobModel).To(Equal(test_data.FrobModel))
})
diff --git a/pkg/transformers/pit_file/config.go b/pkg/transformers/pit_file/config.go
new file mode 100644
index 00000000..c596b3fb
--- /dev/null
+++ b/pkg/transformers/pit_file/config.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pit_file
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+var PitFileConfig = shared.TransformerConfig{
+ ContractAddress: shared.PitContractAddress,
+ ContractAbi: shared.PitABI,
+ Topics: []string{shared.PitFileIlkSignature, shared.PitFileDebtCeilingSignature, shared.PitFileStabilityFeeSignature},
+ StartingBlockNumber: 0,
+ EndingBlockNumber: 100,
+}
diff --git a/pkg/transformers/pit_file/debt_ceiling/converter.go b/pkg/transformers/pit_file/debt_ceiling/converter.go
new file mode 100644
index 00000000..58595ab4
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/converter.go
@@ -0,0 +1,59 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling
+
+import (
+ "encoding/json"
+ "math/big"
+
+ "errors"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type Converter interface {
+ ToModel(ethLog types.Log) (PitFileDebtCeilingModel, error)
+}
+
+type PitFileDebtCeilingConverter struct{}
+
+func (PitFileDebtCeilingConverter) ToModel(ethLog types.Log) (PitFileDebtCeilingModel, error) {
+ err := verifyLog(ethLog)
+ if err != nil {
+ return PitFileDebtCeilingModel{}, err
+ }
+ what := common.HexToAddress(ethLog.Topics[1].String()).String()
+ riskBytes := ethLog.Data[len(ethLog.Data)-shared.DataItemLength:]
+ data := big.NewInt(0).SetBytes(riskBytes).String()
+
+ raw, err := json.Marshal(ethLog)
+ return PitFileDebtCeilingModel{
+ What: what,
+ Data: data,
+ TransactionIndex: ethLog.TxIndex,
+ Raw: raw,
+ }, err
+}
+
+func verifyLog(log types.Log) error {
+ if len(log.Topics) < 2 {
+ return errors.New("log missing topics")
+ }
+ if len(log.Data) < shared.DataItemLength {
+ return errors.New("log missing data")
+ }
+ return nil
+}
diff --git a/pkg/transformers/pit_file/debt_ceiling/converter_test.go b/pkg/transformers/pit_file/debt_ceiling/converter_test.go
new file mode 100644
index 00000000..0fe07f40
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/converter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+var _ = Describe("", func() {
+ It("returns err if log is missing topics", func() {
+ converter := debt_ceiling.PitFileDebtCeilingConverter{}
+ badLog := types.Log{
+ Data: []byte{1, 1, 1, 1, 1},
+ }
+
+ _, err := converter.ToModel(badLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("returns err if log is missing data", func() {
+ converter := debt_ceiling.PitFileDebtCeilingConverter{}
+ badLog := types.Log{
+ Topics: []common.Hash{{}, {}, {}, {}},
+ }
+
+ _, err := converter.ToModel(badLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("converts a log to an model", func() {
+ converter := debt_ceiling.PitFileDebtCeilingConverter{}
+
+ model, err := converter.ToModel(test_data.EthPitFileDebtCeilingLog)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(test_data.PitFileDebtCeilingModel))
+ })
+})
diff --git a/pkg/transformers/pit_file/debt_ceiling/debt_ceiling_suite_test.go b/pkg/transformers/pit_file/debt_ceiling/debt_ceiling_suite_test.go
new file mode 100644
index 00000000..c1ebd0bc
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/debt_ceiling_suite_test.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestDebtCeiling(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "DebtCeiling Suite")
+}
diff --git a/pkg/transformers/pit_file/debt_ceiling/model.go b/pkg/transformers/pit_file/debt_ceiling/model.go
new file mode 100644
index 00000000..4879ff5b
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/model.go
@@ -0,0 +1,22 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling
+
+type PitFileDebtCeilingModel struct {
+ What string
+ Data string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw []byte `db:"raw_log"`
+}
diff --git a/pkg/transformers/pit_file/debt_ceiling/repository.go b/pkg/transformers/pit_file/debt_ceiling/repository.go
new file mode 100644
index 00000000..81ddf35f
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/repository.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+type Repository interface {
+ Create(headerID int64, model PitFileDebtCeilingModel) error
+ MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
+}
+
+type PitFileDebtCeilingRepository struct {
+ db *postgres.DB
+}
+
+func NewPitFileDebtCeilingRepository(db *postgres.DB) PitFileDebtCeilingRepository {
+ return PitFileDebtCeilingRepository{
+ db: db,
+ }
+}
+
+func (repository PitFileDebtCeilingRepository) Create(headerID int64, model PitFileDebtCeilingModel) error {
+ _, err := repository.db.Exec(
+ `INSERT into maker.pit_file_debt_ceiling (header_id, what, data, tx_idx, raw_log)
+ VALUES($1, $2, $3::NUMERIC, $4, $5)`,
+ headerID, model.What, model.Data, model.TransactionIndex, model.Raw,
+ )
+ return err
+}
+
+func (repository PitFileDebtCeilingRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ var result []core.Header
+ err := repository.db.Select(
+ &result,
+ `SELECT headers.id, headers.block_number FROM headers
+ LEFT JOIN maker.pit_file_debt_ceiling on headers.id = header_id
+ WHERE header_id ISNULL
+ AND headers.block_number >= $1
+ AND headers.block_number <= $2
+ AND headers.eth_node_fingerprint = $3`,
+ startingBlockNumber,
+ endingBlockNumber,
+ repository.db.Node.ID,
+ )
+
+ return result, err
+}
diff --git a/pkg/transformers/pit_file/debt_ceiling/repository_test.go b/pkg/transformers/pit_file/debt_ceiling/repository_test.go
new file mode 100644
index 00000000..17782f7e
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/repository_test.go
@@ -0,0 +1,144 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling_test
+
+import (
+ "database/sql"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("", func() {
+ Describe("Create", func() {
+ It("adds a pit file debt ceiling event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := debt_ceiling.NewPitFileDebtCeilingRepository(db)
+
+ err = pitFileRepository.Create(headerID, test_data.PitFileDebtCeilingModel)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbPitFile debt_ceiling.PitFileDebtCeilingModel
+ err = db.Get(&dbPitFile, `SELECT what, data, tx_idx, raw_log FROM maker.pit_file_debt_ceiling WHERE header_id = $1`, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dbPitFile.What).To(Equal(test_data.PitFileDebtCeilingModel.What))
+ Expect(dbPitFile.Data).To(Equal(test_data.PitFileDebtCeilingModel.Data))
+ Expect(dbPitFile.TransactionIndex).To(Equal(test_data.PitFileDebtCeilingModel.TransactionIndex))
+ Expect(dbPitFile.Raw).To(MatchJSON(test_data.PitFileDebtCeilingModel.Raw))
+ })
+
+ It("does not duplicate pit file events", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := debt_ceiling.NewPitFileDebtCeilingRepository(db)
+ err = pitFileRepository.Create(headerID, test_data.PitFileDebtCeilingModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pitFileRepository.Create(headerID, test_data.PitFileDebtCeilingModel)
+
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
+ })
+
+ It("removes pit file if corresponding header is deleted", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := debt_ceiling.NewPitFileDebtCeilingRepository(db)
+ err = pitFileRepository.Create(headerID, test_data.PitFileDebtCeilingModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbPitFile debt_ceiling.PitFileDebtCeilingModel
+ err = db.Get(&dbPitFile, `SELECT what, data, tx_idx, raw_log FROM maker.pit_file_debt_ceiling WHERE header_id = $1`, headerID)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(sql.ErrNoRows))
+ })
+ })
+
+ Describe("MissingHeaders", func() {
+ It("returns headers with no associated pit file event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ startingBlockNumber := int64(1)
+ pitFileBlockNumber := int64(2)
+ endingBlockNumber := int64(3)
+ blockNumbers := []int64{startingBlockNumber, pitFileBlockNumber, endingBlockNumber, endingBlockNumber + 1}
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ headerIDs = append(headerIDs, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ pitFileRepository := debt_ceiling.NewPitFileDebtCeilingRepository(db)
+ err := pitFileRepository.Create(headerIDs[1], test_data.PitFileDebtCeilingModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ headers, err := pitFileRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(headers)).To(Equal(2))
+ Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ })
+
+ It("only returns headers associated with the current node", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ blockNumbers := []int64{1, 2, 3}
+ headerRepository := repositories.NewHeaderRepository(db)
+ dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
+ headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ headerIDs = append(headerIDs, headerID)
+ _, err = headerRepositoryTwo.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ }
+ pitFileRepository := debt_ceiling.NewPitFileDebtCeilingRepository(db)
+ pitFileRepositoryTwo := debt_ceiling.NewPitFileDebtCeilingRepository(dbTwo)
+ err := pitFileRepository.Create(headerIDs[0], test_data.PitFileDebtCeilingModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ nodeOneMissingHeaders, err := pitFileRepository.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeOneMissingHeaders)).To(Equal(len(blockNumbers) - 1))
+
+ nodeTwoMissingHeaders, err := pitFileRepositoryTwo.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeTwoMissingHeaders)).To(Equal(len(blockNumbers)))
+ })
+ })
+})
diff --git a/pkg/transformers/pit_file/debt_ceiling/transformer.go b/pkg/transformers/pit_file/debt_ceiling/transformer.go
new file mode 100644
index 00000000..7b6dfdd4
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/transformer.go
@@ -0,0 +1,72 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type PitFileDebtCeilingTransformer struct {
+ Config shared.TransformerConfig
+ Converter Converter
+ Fetcher shared.LogFetcher
+ Repository Repository
+}
+
+type PitFileDebtCeilingTransformerInitializer struct {
+ Config shared.TransformerConfig
+}
+
+func (initializer PitFileDebtCeilingTransformerInitializer) NewPitFileDebtCeilingTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ converter := PitFileDebtCeilingConverter{}
+ fetcher := shared.NewFetcher(blockChain)
+ repository := NewPitFileDebtCeilingRepository(db)
+ return PitFileDebtCeilingTransformer{
+ Config: initializer.Config,
+ Converter: converter,
+ Fetcher: fetcher,
+ Repository: repository,
+ }
+}
+
+func (transformer PitFileDebtCeilingTransformer) Execute() error {
+ missingHeaders, err := transformer.Repository.MissingHeaders(transformer.Config.StartingBlockNumber, transformer.Config.EndingBlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, header := range missingHeaders {
+ topics := [][]common.Hash{{common.HexToHash(shared.PitFileDebtCeilingSignature)}}
+ matchingLogs, err := transformer.Fetcher.FetchLogs(pit_file.PitFileConfig.ContractAddress, topics, header.BlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, log := range matchingLogs {
+ model, err := transformer.Converter.ToModel(log)
+ if err != nil {
+ return err
+ }
+ err = transformer.Repository.Create(header.Id, model)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/transformers/pit_file/debt_ceiling/transformer_test.go b/pkg/transformers/pit_file/debt_ceiling/transformer_test.go
new file mode 100644
index 00000000..9b441fd8
--- /dev/null
+++ b/pkg/transformers/pit_file/debt_ceiling/transformer_test.go
@@ -0,0 +1,175 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling_test
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
+ debt_ceiling_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/pit_file/debt_ceiling"
+)
+
+var _ = Describe("", func() {
+ It("gets missing headers for block numbers specified in config", func() {
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Config: pit_file.PitFileConfig,
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedStartingBlockNumber).To(Equal(pit_file.PitFileConfig.StartingBlockNumber))
+ Expect(repository.PassedEndingBlockNumber).To(Equal(pit_file.PitFileConfig.EndingBlockNumber))
+ })
+
+ It("returns error if repository returns error for missing headers", func() {
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ repository.SetMissingHeadersErr(fakes.FakeError)
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("fetches logs for missing headers", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}, {BlockNumber: 2}})
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: fetcher,
+ Converter: &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(fetcher.FetchedBlocks).To(Equal([]int64{1, 2}))
+ Expect(fetcher.FetchedContractAddress).To(Equal(pit_file.PitFileConfig.ContractAddress))
+ Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(shared.PitFileDebtCeilingSignature)}}))
+ })
+
+ It("returns error if fetcher returns error", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetcherError(fakes.FakeError)
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: fetcher,
+ Converter: &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts matching logs", func() {
+ converter := &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileDebtCeilingLog})
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(converter.PassedLog).To(Equal(test_data.EthPitFileDebtCeilingLog))
+ })
+
+ It("returns error if converter returns error", func() {
+ converter := &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{}
+ converter.SetConverterError(fakes.FakeError)
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileDebtCeilingLog})
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("persists pit file model", func() {
+ converter := &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileDebtCeilingLog})
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ fakeHeader := core.Header{BlockNumber: 1, Id: 2}
+ repository.SetMissingHeaders([]core.Header{fakeHeader})
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedHeaderID).To(Equal(fakeHeader.Id))
+ Expect(repository.PassedModel).To(Equal(test_data.PitFileDebtCeilingModel))
+ })
+
+ It("returns error if repository returns error for create", func() {
+ converter := &debt_ceiling_mocks.MockPitFileDebtCeilingConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileDebtCeilingLog})
+ repository := &debt_ceiling_mocks.MockPitFileDebtCeilingRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: 2}})
+ repository.SetCreateError(fakes.FakeError)
+ transformer := debt_ceiling.PitFileDebtCeilingTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+})
diff --git a/pkg/transformers/pit_file/ilk/converter.go b/pkg/transformers/pit_file/ilk/converter.go
new file mode 100644
index 00000000..9bee67e8
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/converter.go
@@ -0,0 +1,61 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk
+
+import (
+ "bytes"
+ "encoding/json"
+ "math/big"
+
+ "errors"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type Converter interface {
+ ToModel(ethLog types.Log) (PitFileIlkModel, error)
+}
+
+type PitFileIlkConverter struct{}
+
+func (PitFileIlkConverter) ToModel(ethLog types.Log) (PitFileIlkModel, error) {
+ err := verifyLog(ethLog)
+ if err != nil {
+ return PitFileIlkModel{}, err
+ }
+ ilk := string(bytes.Trim(ethLog.Topics[2].Bytes(), "\x00"))
+ what := string(bytes.Trim(ethLog.Topics[3].Bytes(), "\x00"))
+ riskBytes := ethLog.Data[len(ethLog.Data)-shared.DataItemLength:]
+ risk := big.NewInt(0).SetBytes(riskBytes).String()
+
+ raw, err := json.Marshal(ethLog)
+ return PitFileIlkModel{
+ Ilk: ilk,
+ What: what,
+ Data: risk,
+ TransactionIndex: ethLog.TxIndex,
+ Raw: raw,
+ }, err
+}
+
+func verifyLog(log types.Log) error {
+ if len(log.Topics) < 4 {
+ return errors.New("log missing topics")
+ }
+ if len(log.Data) < shared.DataItemLength {
+ return errors.New("log missing data")
+ }
+ return nil
+}
diff --git a/pkg/transformers/pit_file/ilk/converter_test.go b/pkg/transformers/pit_file/ilk/converter_test.go
new file mode 100644
index 00000000..38e5ffe0
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/converter_test.go
@@ -0,0 +1,58 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+var _ = Describe("Pit file ilk converter", func() {
+ It("returns err if log is missing topics", func() {
+ converter := ilk.PitFileIlkConverter{}
+ badLog := types.Log{
+ Data: []byte{1, 1, 1, 1, 1},
+ }
+
+ _, err := converter.ToModel(badLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("returns err if log is missing data", func() {
+ converter := ilk.PitFileIlkConverter{}
+ badLog := types.Log{
+ Topics: []common.Hash{{}, {}, {}, {}},
+ }
+
+ _, err := converter.ToModel(badLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("converts a log to an model", func() {
+ converter := ilk.PitFileIlkConverter{}
+
+ model, err := converter.ToModel(test_data.EthPitFileIlkLog)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(test_data.PitFileIlkModel))
+ })
+})
diff --git a/pkg/transformers/pit_file/ilk/ilk_suite_test.go b/pkg/transformers/pit_file/ilk/ilk_suite_test.go
new file mode 100644
index 00000000..0cd56e44
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/ilk_suite_test.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestIlk(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Ilk Suite")
+}
diff --git a/pkg/transformers/pit_file/ilk/model.go b/pkg/transformers/pit_file/ilk/model.go
new file mode 100644
index 00000000..442a239e
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/model.go
@@ -0,0 +1,23 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk
+
+type PitFileIlkModel struct {
+ Ilk string
+ What string
+ Data string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw []byte `db:"raw_log"`
+}
diff --git a/pkg/transformers/pit_file/ilk/repository.go b/pkg/transformers/pit_file/ilk/repository.go
new file mode 100644
index 00000000..8c9374f8
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/repository.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+type Repository interface {
+ Create(headerID int64, model PitFileIlkModel) error
+ MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
+}
+
+type PitFileIlkRepository struct {
+ db *postgres.DB
+}
+
+func NewPitFileIlkRepository(db *postgres.DB) PitFileIlkRepository {
+ return PitFileIlkRepository{
+ db: db,
+ }
+}
+
+func (repository PitFileIlkRepository) Create(headerID int64, model PitFileIlkModel) error {
+ _, err := repository.db.Exec(
+ `INSERT into maker.pit_file_ilk (header_id, ilk, what, data, tx_idx, raw_log)
+ VALUES($1, $2, $3, $4::NUMERIC, $5, $6)`,
+ headerID, model.Ilk, model.What, model.Data, model.TransactionIndex, model.Raw,
+ )
+ return err
+}
+
+func (repository PitFileIlkRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ var result []core.Header
+ err := repository.db.Select(
+ &result,
+ `SELECT headers.id, headers.block_number FROM headers
+ LEFT JOIN maker.pit_file_ilk on headers.id = header_id
+ WHERE header_id ISNULL
+ AND headers.block_number >= $1
+ AND headers.block_number <= $2
+ AND headers.eth_node_fingerprint = $3`,
+ startingBlockNumber,
+ endingBlockNumber,
+ repository.db.Node.ID,
+ )
+
+ return result, err
+}
diff --git a/pkg/transformers/pit_file/ilk/repository_test.go b/pkg/transformers/pit_file/ilk/repository_test.go
new file mode 100644
index 00000000..cbc77b61
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/repository_test.go
@@ -0,0 +1,145 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk_test
+
+import (
+ "database/sql"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("Pit file ilk repository", func() {
+ Describe("Create", func() {
+ It("adds a pit file ilk event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := ilk.NewPitFileIlkRepository(db)
+
+ err = pitFileRepository.Create(headerID, test_data.PitFileIlkModel)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbPitFile ilk.PitFileIlkModel
+ err = db.Get(&dbPitFile, `SELECT ilk, what, data, tx_idx, raw_log FROM maker.pit_file_ilk WHERE header_id = $1`, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dbPitFile.Ilk).To(Equal(test_data.PitFileIlkModel.Ilk))
+ Expect(dbPitFile.What).To(Equal(test_data.PitFileIlkModel.What))
+ Expect(dbPitFile.Data).To(Equal(test_data.PitFileIlkModel.Data))
+ Expect(dbPitFile.TransactionIndex).To(Equal(test_data.PitFileIlkModel.TransactionIndex))
+ Expect(dbPitFile.Raw).To(MatchJSON(test_data.PitFileIlkModel.Raw))
+ })
+
+ It("does not duplicate pit file ilk events", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := ilk.NewPitFileIlkRepository(db)
+ err = pitFileRepository.Create(headerID, test_data.PitFileIlkModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pitFileRepository.Create(headerID, test_data.PitFileIlkModel)
+
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
+ })
+
+ It("removes pit file ilk if corresponding header is deleted", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := ilk.NewPitFileIlkRepository(db)
+ err = pitFileRepository.Create(headerID, test_data.PitFileIlkModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbPitFile ilk.PitFileIlkModel
+ err = db.Get(&dbPitFile, `SELECT ilk, what, data, tx_idx, raw_log FROM maker.pit_file_ilk WHERE header_id = $1`, headerID)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(sql.ErrNoRows))
+ })
+ })
+
+ Describe("MissingHeaders", func() {
+ It("returns headers with no associated pit file ilk event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ startingBlockNumber := int64(1)
+ pitFileBlockNumber := int64(2)
+ endingBlockNumber := int64(3)
+ blockNumbers := []int64{startingBlockNumber, pitFileBlockNumber, endingBlockNumber, endingBlockNumber + 1}
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ headerIDs = append(headerIDs, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ pitFileRepository := ilk.NewPitFileIlkRepository(db)
+ err := pitFileRepository.Create(headerIDs[1], test_data.PitFileIlkModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ headers, err := pitFileRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(headers)).To(Equal(2))
+ Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ })
+
+ It("only returns headers associated with the current node", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ blockNumbers := []int64{1, 2, 3}
+ headerRepository := repositories.NewHeaderRepository(db)
+ dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
+ headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ headerIDs = append(headerIDs, headerID)
+ _, err = headerRepositoryTwo.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ }
+ pitFileRepository := ilk.NewPitFileIlkRepository(db)
+ pitFileRepositoryTwo := ilk.NewPitFileIlkRepository(dbTwo)
+ err := pitFileRepository.Create(headerIDs[0], test_data.PitFileIlkModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ nodeOneMissingHeaders, err := pitFileRepository.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeOneMissingHeaders)).To(Equal(len(blockNumbers) - 1))
+
+ nodeTwoMissingHeaders, err := pitFileRepositoryTwo.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeTwoMissingHeaders)).To(Equal(len(blockNumbers)))
+ })
+ })
+})
diff --git a/pkg/transformers/pit_file/ilk/transformer.go b/pkg/transformers/pit_file/ilk/transformer.go
new file mode 100644
index 00000000..e5e5a54a
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/transformer.go
@@ -0,0 +1,72 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type PitFileIlkTransformer struct {
+ Config shared.TransformerConfig
+ Converter Converter
+ Fetcher shared.LogFetcher
+ Repository Repository
+}
+
+type PitFileIlkTransformerInitializer struct {
+ Config shared.TransformerConfig
+}
+
+func (initializer PitFileIlkTransformerInitializer) NewPitFileIlkTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ converter := PitFileIlkConverter{}
+ fetcher := shared.NewFetcher(blockChain)
+ repository := NewPitFileIlkRepository(db)
+ return PitFileIlkTransformer{
+ Config: initializer.Config,
+ Converter: converter,
+ Fetcher: fetcher,
+ Repository: repository,
+ }
+}
+
+func (transformer PitFileIlkTransformer) Execute() error {
+ missingHeaders, err := transformer.Repository.MissingHeaders(transformer.Config.StartingBlockNumber, transformer.Config.EndingBlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, header := range missingHeaders {
+ topics := [][]common.Hash{{common.HexToHash(shared.PitFileIlkSignature)}}
+ matchingLogs, err := transformer.Fetcher.FetchLogs(pit_file.PitFileConfig.ContractAddress, topics, header.BlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, log := range matchingLogs {
+ model, err := transformer.Converter.ToModel(log)
+ if err != nil {
+ return err
+ }
+ err = transformer.Repository.Create(header.Id, model)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/transformers/pit_file/ilk/transformer_test.go b/pkg/transformers/pit_file/ilk/transformer_test.go
new file mode 100644
index 00000000..5d5769c2
--- /dev/null
+++ b/pkg/transformers/pit_file/ilk/transformer_test.go
@@ -0,0 +1,175 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk_test
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
+ pit_file_ilk_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/pit_file/ilk"
+)
+
+var _ = Describe("Pit file ilk transformer", func() {
+ It("gets missing headers for block numbers specified in config", func() {
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ transformer := ilk.PitFileIlkTransformer{
+ Config: pit_file.PitFileConfig,
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &pit_file_ilk_mocks.MockPitFileIlkConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedStartingBlockNumber).To(Equal(pit_file.PitFileConfig.StartingBlockNumber))
+ Expect(repository.PassedEndingBlockNumber).To(Equal(pit_file.PitFileConfig.EndingBlockNumber))
+ })
+
+ It("returns error if repository returns error for missing headers", func() {
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ repository.SetMissingHeadersErr(fakes.FakeError)
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &pit_file_ilk_mocks.MockPitFileIlkConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("fetches logs for missing headers", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}, {BlockNumber: 2}})
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: fetcher,
+ Converter: &pit_file_ilk_mocks.MockPitFileIlkConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(fetcher.FetchedBlocks).To(Equal([]int64{1, 2}))
+ Expect(fetcher.FetchedContractAddress).To(Equal(pit_file.PitFileConfig.ContractAddress))
+ Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(shared.PitFileIlkSignature)}}))
+ })
+
+ It("returns error if fetcher returns error", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetcherError(fakes.FakeError)
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: fetcher,
+ Converter: &pit_file_ilk_mocks.MockPitFileIlkConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts matching logs", func() {
+ converter := &pit_file_ilk_mocks.MockPitFileIlkConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileIlkLog})
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(converter.PassedLog).To(Equal(test_data.EthPitFileIlkLog))
+ })
+
+ It("returns error if converter returns error", func() {
+ converter := &pit_file_ilk_mocks.MockPitFileIlkConverter{}
+ converter.SetConverterError(fakes.FakeError)
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileIlkLog})
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("persists pit file model", func() {
+ converter := &pit_file_ilk_mocks.MockPitFileIlkConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileIlkLog})
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ fakeHeader := core.Header{BlockNumber: 1, Id: 2}
+ repository.SetMissingHeaders([]core.Header{fakeHeader})
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedHeaderID).To(Equal(fakeHeader.Id))
+ Expect(repository.PassedModel).To(Equal(test_data.PitFileIlkModel))
+ })
+
+ It("returns error if repository returns error for create", func() {
+ converter := &pit_file_ilk_mocks.MockPitFileIlkConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileIlkLog})
+ repository := &pit_file_ilk_mocks.MockPitFileIlkRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: 2}})
+ repository.SetCreateError(fakes.FakeError)
+ transformer := ilk.PitFileIlkTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+})
diff --git a/pkg/transformers/pit_file/stability_fee/converter.go b/pkg/transformers/pit_file/stability_fee/converter.go
new file mode 100644
index 00000000..d78d6b2a
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/converter.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee
+
+import (
+ "bytes"
+ "encoding/json"
+
+ "errors"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+type Converter interface {
+ ToModel(ethLog types.Log) (PitFileStabilityFeeModel, error)
+}
+
+type PitFileStabilityFeeConverter struct{}
+
+func (PitFileStabilityFeeConverter) ToModel(ethLog types.Log) (PitFileStabilityFeeModel, error) {
+ err := verifyLog(ethLog)
+ if err != nil {
+ return PitFileStabilityFeeModel{}, err
+ }
+ what := string(bytes.Trim(ethLog.Topics[2].Bytes(), "\x00"))
+ data := common.HexToAddress(ethLog.Topics[1].String()).Hex()
+
+ raw, err := json.Marshal(ethLog)
+ return PitFileStabilityFeeModel{
+ What: what,
+ Data: data,
+ TransactionIndex: ethLog.TxIndex,
+ Raw: raw,
+ }, err
+}
+
+func verifyLog(log types.Log) error {
+ if len(log.Topics) < 3 {
+ return errors.New("log missing topics")
+ }
+ return nil
+}
diff --git a/pkg/transformers/pit_file/stability_fee/converter_test.go b/pkg/transformers/pit_file/stability_fee/converter_test.go
new file mode 100644
index 00000000..f91cf927
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/converter_test.go
@@ -0,0 +1,44 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+var _ = Describe("Pit file stability fee converter", func() {
+ It("returns err if log is missing topics", func() {
+ converter := stability_fee.PitFileStabilityFeeConverter{}
+ badLog := types.Log{}
+
+ _, err := converter.ToModel(badLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("converts a log to an model", func() {
+ converter := stability_fee.PitFileStabilityFeeConverter{}
+
+ model, err := converter.ToModel(test_data.EthPitFileStabilityFeeLog)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(test_data.PitFileStabilityFeeModel))
+ })
+})
diff --git a/pkg/transformers/pit_file/stability_fee/model.go b/pkg/transformers/pit_file/stability_fee/model.go
new file mode 100644
index 00000000..f303d575
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/model.go
@@ -0,0 +1,22 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee
+
+type PitFileStabilityFeeModel struct {
+ What string
+ Data string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw []byte `db:"raw_log"`
+}
diff --git a/pkg/transformers/pit_file/stability_fee/repository.go b/pkg/transformers/pit_file/stability_fee/repository.go
new file mode 100644
index 00000000..51376a9d
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/repository.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+type Repository interface {
+ Create(headerID int64, model PitFileStabilityFeeModel) error
+ MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
+}
+
+type PitFileStabilityFeeRepository struct {
+ db *postgres.DB
+}
+
+func NewPitFileStabilityFeeRepository(db *postgres.DB) PitFileStabilityFeeRepository {
+ return PitFileStabilityFeeRepository{
+ db: db,
+ }
+}
+
+func (repository PitFileStabilityFeeRepository) Create(headerID int64, model PitFileStabilityFeeModel) error {
+ _, err := repository.db.Exec(
+ `INSERT into maker.pit_file_stability_fee (header_id, what, data, tx_idx, raw_log)
+ VALUES($1, $2, $3, $4, $5)`,
+ headerID, model.What, model.Data, model.TransactionIndex, model.Raw,
+ )
+ return err
+}
+
+func (repository PitFileStabilityFeeRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ var result []core.Header
+ err := repository.db.Select(
+ &result,
+ `SELECT headers.id, headers.block_number FROM headers
+ LEFT JOIN maker.pit_file_stability_fee on headers.id = header_id
+ WHERE header_id ISNULL
+ AND headers.block_number >= $1
+ AND headers.block_number <= $2
+ AND headers.eth_node_fingerprint = $3`,
+ startingBlockNumber,
+ endingBlockNumber,
+ repository.db.Node.ID,
+ )
+
+ return result, err
+}
diff --git a/pkg/transformers/pit_file/stability_fee/repository_test.go b/pkg/transformers/pit_file/stability_fee/repository_test.go
new file mode 100644
index 00000000..edc9f12e
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/repository_test.go
@@ -0,0 +1,144 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee_test
+
+import (
+ "database/sql"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("", func() {
+ Describe("Create", func() {
+ It("adds a pit file stability fee event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := stability_fee.NewPitFileStabilityFeeRepository(db)
+
+ err = pitFileRepository.Create(headerID, test_data.PitFileStabilityFeeModel)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbPitFile stability_fee.PitFileStabilityFeeModel
+ err = db.Get(&dbPitFile, `SELECT what, data, tx_idx, raw_log FROM maker.pit_file_stability_fee WHERE header_id = $1`, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dbPitFile.What).To(Equal(test_data.PitFileStabilityFeeModel.What))
+ Expect(dbPitFile.Data).To(Equal(test_data.PitFileStabilityFeeModel.Data))
+ Expect(dbPitFile.TransactionIndex).To(Equal(test_data.PitFileStabilityFeeModel.TransactionIndex))
+ Expect(dbPitFile.Raw).To(MatchJSON(test_data.PitFileStabilityFeeModel.Raw))
+ })
+
+ It("does not duplicate pit file events", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := stability_fee.NewPitFileStabilityFeeRepository(db)
+ err = pitFileRepository.Create(headerID, test_data.PitFileStabilityFeeModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = pitFileRepository.Create(headerID, test_data.PitFileStabilityFeeModel)
+
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
+ })
+
+ It("removes pit file if corresponding header is deleted", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ pitFileRepository := stability_fee.NewPitFileStabilityFeeRepository(db)
+ err = pitFileRepository.Create(headerID, test_data.PitFileStabilityFeeModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbPitFile stability_fee.PitFileStabilityFeeModel
+ err = db.Get(&dbPitFile, `SELECT what, data, tx_idx, raw_log FROM maker.pit_file_stability_fee WHERE header_id = $1`, headerID)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(sql.ErrNoRows))
+ })
+ })
+
+ Describe("MissingHeaders", func() {
+ It("returns headers with no associated pit file event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ startingBlockNumber := int64(1)
+ pitFileBlockNumber := int64(2)
+ endingBlockNumber := int64(3)
+ blockNumbers := []int64{startingBlockNumber, pitFileBlockNumber, endingBlockNumber, endingBlockNumber + 1}
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ headerIDs = append(headerIDs, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ pitFileRepository := stability_fee.NewPitFileStabilityFeeRepository(db)
+ err := pitFileRepository.Create(headerIDs[1], test_data.PitFileStabilityFeeModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ headers, err := pitFileRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(headers)).To(Equal(2))
+ Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ })
+
+ It("only returns headers associated with the current node", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ blockNumbers := []int64{1, 2, 3}
+ headerRepository := repositories.NewHeaderRepository(db)
+ dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
+ headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ headerIDs = append(headerIDs, headerID)
+ _, err = headerRepositoryTwo.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ }
+ pitFileRepository := stability_fee.NewPitFileStabilityFeeRepository(db)
+ pitFileRepositoryTwo := stability_fee.NewPitFileStabilityFeeRepository(dbTwo)
+ err := pitFileRepository.Create(headerIDs[0], test_data.PitFileStabilityFeeModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ nodeOneMissingHeaders, err := pitFileRepository.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeOneMissingHeaders)).To(Equal(len(blockNumbers) - 1))
+
+ nodeTwoMissingHeaders, err := pitFileRepositoryTwo.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeTwoMissingHeaders)).To(Equal(len(blockNumbers)))
+ })
+ })
+})
diff --git a/pkg/transformers/pit_file/stability_fee/stability_fee_suite_test.go b/pkg/transformers/pit_file/stability_fee/stability_fee_suite_test.go
new file mode 100644
index 00000000..acaae864
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/stability_fee_suite_test.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestStabilityFee(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "StabilityFee Suite")
+}
diff --git a/pkg/transformers/pit_file/stability_fee/transformer.go b/pkg/transformers/pit_file/stability_fee/transformer.go
new file mode 100644
index 00000000..16fa1f37
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/transformer.go
@@ -0,0 +1,72 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type PitFileStabilityFeeTransformer struct {
+ Config shared.TransformerConfig
+ Converter Converter
+ Fetcher shared.LogFetcher
+ Repository Repository
+}
+
+type PitFileStabilityFeeTransformerInitializer struct {
+ Config shared.TransformerConfig
+}
+
+func (initializer PitFileStabilityFeeTransformerInitializer) NewPitFileStabilityFeeTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ converter := PitFileStabilityFeeConverter{}
+ fetcher := shared.NewFetcher(blockChain)
+ repository := NewPitFileStabilityFeeRepository(db)
+ return PitFileStabilityFeeTransformer{
+ Config: initializer.Config,
+ Converter: converter,
+ Fetcher: fetcher,
+ Repository: repository,
+ }
+}
+
+func (transformer PitFileStabilityFeeTransformer) Execute() error {
+ missingHeaders, err := transformer.Repository.MissingHeaders(transformer.Config.StartingBlockNumber, transformer.Config.EndingBlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, header := range missingHeaders {
+ topics := [][]common.Hash{{common.HexToHash(shared.PitFileStabilityFeeSignature)}}
+ matchingLogs, err := transformer.Fetcher.FetchLogs(pit_file.PitFileConfig.ContractAddress, topics, header.BlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, log := range matchingLogs {
+ model, err := transformer.Converter.ToModel(log)
+ if err != nil {
+ return err
+ }
+ err = transformer.Repository.Create(header.Id, model)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/transformers/pit_file/stability_fee/transformer_test.go b/pkg/transformers/pit_file/stability_fee/transformer_test.go
new file mode 100644
index 00000000..7106c3dd
--- /dev/null
+++ b/pkg/transformers/pit_file/stability_fee/transformer_test.go
@@ -0,0 +1,175 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee_test
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
+ stability_fee_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/pit_file/stability_fee"
+)
+
+var _ = Describe("", func() {
+ It("gets missing headers for block numbers specified in config", func() {
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Config: pit_file.PitFileConfig,
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &stability_fee_mocks.MockPitFileStabilityFeeConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedStartingBlockNumber).To(Equal(pit_file.PitFileConfig.StartingBlockNumber))
+ Expect(repository.PassedEndingBlockNumber).To(Equal(pit_file.PitFileConfig.EndingBlockNumber))
+ })
+
+ It("returns error if repository returns error for missing headers", func() {
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ repository.SetMissingHeadersErr(fakes.FakeError)
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &stability_fee_mocks.MockPitFileStabilityFeeConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("fetches logs for missing headers", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}, {BlockNumber: 2}})
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: fetcher,
+ Converter: &stability_fee_mocks.MockPitFileStabilityFeeConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(fetcher.FetchedBlocks).To(Equal([]int64{1, 2}))
+ Expect(fetcher.FetchedContractAddress).To(Equal(pit_file.PitFileConfig.ContractAddress))
+ Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(shared.PitFileStabilityFeeSignature)}}))
+ })
+
+ It("returns error if fetcher returns error", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetcherError(fakes.FakeError)
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: fetcher,
+ Converter: &stability_fee_mocks.MockPitFileStabilityFeeConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts matching logs", func() {
+ converter := &stability_fee_mocks.MockPitFileStabilityFeeConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileStabilityFeeLog})
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(converter.PassedLog).To(Equal(test_data.EthPitFileStabilityFeeLog))
+ })
+
+ It("returns error if converter returns error", func() {
+ converter := &stability_fee_mocks.MockPitFileStabilityFeeConverter{}
+ converter.SetConverterError(fakes.FakeError)
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileStabilityFeeLog})
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("persists pit file model", func() {
+ converter := &stability_fee_mocks.MockPitFileStabilityFeeConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileStabilityFeeLog})
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ fakeHeader := core.Header{BlockNumber: 1, Id: 2}
+ repository.SetMissingHeaders([]core.Header{fakeHeader})
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedHeaderID).To(Equal(fakeHeader.Id))
+ Expect(repository.PassedModel).To(Equal(test_data.PitFileStabilityFeeModel))
+ })
+
+ It("returns error if repository returns error for create", func() {
+ converter := &stability_fee_mocks.MockPitFileStabilityFeeConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthPitFileStabilityFeeLog})
+ repository := &stability_fee_mocks.MockPitFileStabilityFeeRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: 2}})
+ repository.SetCreateError(fakes.FakeError)
+ transformer := stability_fee.PitFileStabilityFeeTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+})
diff --git a/pkg/transformers/price_feeds/config.go b/pkg/transformers/price_feeds/config.go
index ddeabef3..b468d0ec 100644
--- a/pkg/transformers/price_feeds/config.go
+++ b/pkg/transformers/price_feeds/config.go
@@ -14,11 +14,9 @@
package price_feeds
-var (
- PepAddress = "0x99041F808D598B782D5a3e498681C2452A31da08"
- PipAddress = "0x729D19f657BD0614b4985Cf1D82531c67569197B"
- RepAddress = "0xF5f94b7F9De14D43112e713835BCef2d55b76c1C"
-)
+import "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+
+var ()
type IPriceFeedConfig struct {
ContractAddresses []string
@@ -28,9 +26,9 @@ type IPriceFeedConfig struct {
var PriceFeedConfig = IPriceFeedConfig{
ContractAddresses: []string{
- PepAddress,
- PipAddress,
- RepAddress,
+ shared.PepContractAddress,
+ shared.PipContractAddress,
+ shared.RepContractAddress,
},
StartingBlockNumber: 0,
EndingBlockNumber: 100,
diff --git a/pkg/transformers/price_feeds/constants.go b/pkg/transformers/price_feeds/constants.go
index 846adf74..9a407c42 100644
--- a/pkg/transformers/price_feeds/constants.go
+++ b/pkg/transformers/price_feeds/constants.go
@@ -22,7 +22,5 @@ import (
var (
ErrNoMatchingLog = errors.New("no matching log")
Ether = big.NewFloat(1e18)
- LogValueTopic0 = "0x296ba4ca62c6c21c95e828080cb8aec7481b71390585605300a8a76f9e95b527"
- MedianizerABI = `[{"constant":false,"inputs":[{"name":"owner_","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"","type":"bytes32"}],"name":"poke","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"poke","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"compute","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"wat","type":"address"}],"name":"set","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"wat","type":"address"}],"name":"unset","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"indexes","outputs":[{"name":"","type":"bytes12"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"next","outputs":[{"name":"","type":"bytes12"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"read","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"peek","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes12"}],"name":"values","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"min_","type":"uint96"}],"name":"setMin","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"authority_","type":"address"}],"name":"setAuthority","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"void","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"pos","type":"bytes12"},{"name":"wat","type":"address"}],"name":"set","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"authority","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"pos","type":"bytes12"}],"name":"unset","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"next_","type":"bytes12"}],"name":"setNext","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"min","outputs":[{"name":"","type":"uint96"}],"payable":false,"stateMutability":"view","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"val","type":"bytes32"}],"name":"LogValue","type":"event"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"authority","type":"address"}],"name":"LogSetAuthority","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"owner","type":"address"}],"name":"LogSetOwner","type":"event"}]]`
Ray = big.NewFloat(1e27)
)
diff --git a/pkg/transformers/price_feeds/converter_test.go b/pkg/transformers/price_feeds/converter_test.go
index 0c309477..89111265 100644
--- a/pkg/transformers/price_feeds/converter_test.go
+++ b/pkg/transformers/price_feeds/converter_test.go
@@ -21,6 +21,7 @@ import (
. "github.com/onsi/gomega"
"github.com/vulcanize/vulcanizedb/pkg/transformers/price_feeds"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
)
var _ = Describe("Price feed converter", func() {
@@ -31,7 +32,7 @@ var _ = Describe("Price feed converter", func() {
// https://etherscan.io/tx/0xa51a50a2adbfba4e2ab3d72dfd67a21c769f1bc8d2b180663a15500a56cde58f
log := types.Log{
Address: medianizerAddress,
- Topics: []common.Hash{common.HexToHash(price_feeds.LogValueTopic0)},
+ Topics: []common.Hash{common.HexToHash(shared.LogValueSignature)},
Data: common.FromHex("00000000000000000000000000000000000000000000001486f658319fb0c100"),
BlockNumber: blockNumber,
TxHash: common.HexToHash("0xa51a50a2adbfba4e2ab3d72dfd67a21c769f1bc8d2b180663a15500a56cde58f"),
diff --git a/pkg/transformers/price_feeds/fetcher.go b/pkg/transformers/price_feeds/fetcher.go
index ecc79d4d..480710d2 100644
--- a/pkg/transformers/price_feeds/fetcher.go
+++ b/pkg/transformers/price_feeds/fetcher.go
@@ -15,11 +15,14 @@
package price_feeds
import (
+ "math/big"
+
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+
"github.com/vulcanize/vulcanizedb/pkg/core"
- "math/big"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
)
type IPriceFeedFetcher interface {
@@ -48,7 +51,7 @@ func (fetcher PriceFeedFetcher) FetchLogValues(blockNumber int64) ([]types.Log,
FromBlock: n,
ToBlock: n,
Addresses: addresses,
- Topics: [][]common.Hash{{common.HexToHash(LogValueTopic0)}},
+ Topics: [][]common.Hash{{common.HexToHash(shared.LogValueSignature)}},
}
return fetcher.blockChain.GetEthLogsWithCustomQuery(query)
}
diff --git a/pkg/transformers/price_feeds/fetcher_test.go b/pkg/transformers/price_feeds/fetcher_test.go
index acb0acfb..e6bb942a 100644
--- a/pkg/transformers/price_feeds/fetcher_test.go
+++ b/pkg/transformers/price_feeds/fetcher_test.go
@@ -25,6 +25,7 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/fakes"
"github.com/vulcanize/vulcanizedb/pkg/transformers/price_feeds"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
)
var _ = Describe("Price fetcher", func() {
@@ -46,7 +47,7 @@ var _ = Describe("Price fetcher", func() {
FromBlock: big.NewInt(blockNumber),
ToBlock: big.NewInt(blockNumber),
Addresses: expectedAddresses,
- Topics: [][]common.Hash{{common.HexToHash(price_feeds.LogValueTopic0)}},
+ Topics: [][]common.Hash{{common.HexToHash(shared.LogValueSignature)}},
}
mockBlockChain.AssertGetEthLogsWithCustomQueryCalledWith(expectedQuery)
})
diff --git a/pkg/transformers/price_feeds/price_update.go b/pkg/transformers/price_feeds/price_update.go
index d052cda5..8b8559d2 100644
--- a/pkg/transformers/price_feeds/price_update.go
+++ b/pkg/transformers/price_feeds/price_update.go
@@ -15,8 +15,9 @@
package price_feeds
import (
- "github.com/ethereum/go-ethereum/common"
"math/big"
+
+ "github.com/ethereum/go-ethereum/common"
)
type LogValueEntity struct {
diff --git a/pkg/transformers/price_feeds/transformer_test.go b/pkg/transformers/price_feeds/transformer_test.go
index 675694c2..263dc80d 100644
--- a/pkg/transformers/price_feeds/transformer_test.go
+++ b/pkg/transformers/price_feeds/transformer_test.go
@@ -16,23 +16,25 @@ package price_feeds_test
import (
"fmt"
+ "math/big"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+
"github.com/vulcanize/vulcanizedb/pkg/core"
"github.com/vulcanize/vulcanizedb/pkg/fakes"
"github.com/vulcanize/vulcanizedb/pkg/transformers/price_feeds"
- price_feeds2 "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/price_feeds"
- "math/big"
+ price_feeds_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/price_feeds"
)
var _ = Describe("Price feed transformer", func() {
It("gets missing headers for price feeds", func() {
- mockRepository := &price_feeds2.MockPriceFeedRepository{}
+ mockRepository := &price_feeds_mocks.MockPriceFeedRepository{}
transformer := price_feeds.PriceFeedTransformer{
Config: price_feeds.PriceFeedConfig,
- Fetcher: &price_feeds2.MockPriceFeedFetcher{},
+ Fetcher: &price_feeds_mocks.MockPriceFeedFetcher{},
Repository: mockRepository,
}
@@ -43,10 +45,10 @@ var _ = Describe("Price feed transformer", func() {
})
It("returns error is missing headers call returns err", func() {
- mockRepository := &price_feeds2.MockPriceFeedRepository{}
+ mockRepository := &price_feeds_mocks.MockPriceFeedRepository{}
mockRepository.SetMissingHeadersErr(fakes.FakeError)
transformer := price_feeds.PriceFeedTransformer{
- Fetcher: &price_feeds2.MockPriceFeedFetcher{},
+ Fetcher: &price_feeds_mocks.MockPriceFeedFetcher{},
Repository: mockRepository,
}
@@ -57,11 +59,11 @@ var _ = Describe("Price feed transformer", func() {
})
It("fetches logs for missing headers", func() {
- mockRepository := &price_feeds2.MockPriceFeedRepository{}
+ mockRepository := &price_feeds_mocks.MockPriceFeedRepository{}
blockNumberOne := int64(1)
blockNumberTwo := int64(2)
mockRepository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumberOne}, {BlockNumber: blockNumberTwo}})
- mockFetcher := &price_feeds2.MockPriceFeedFetcher{}
+ mockFetcher := &price_feeds_mocks.MockPriceFeedFetcher{}
transformer := price_feeds.PriceFeedTransformer{
Fetcher: mockFetcher,
Repository: mockRepository,
@@ -74,9 +76,9 @@ var _ = Describe("Price feed transformer", func() {
})
It("returns err if fetcher returns err", func() {
- mockRepository := &price_feeds2.MockPriceFeedRepository{}
+ mockRepository := &price_feeds_mocks.MockPriceFeedRepository{}
mockRepository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
- mockFetcher := &price_feeds2.MockPriceFeedFetcher{}
+ mockFetcher := &price_feeds_mocks.MockPriceFeedFetcher{}
mockFetcher.SetReturnErr(fakes.FakeError)
transformer := price_feeds.PriceFeedTransformer{
Fetcher: mockFetcher,
@@ -90,10 +92,10 @@ var _ = Describe("Price feed transformer", func() {
})
It("persists model converted from log", func() {
- mockRepository := &price_feeds2.MockPriceFeedRepository{}
+ mockRepository := &price_feeds_mocks.MockPriceFeedRepository{}
headerID := int64(11111)
mockRepository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: headerID}})
- mockFetcher := &price_feeds2.MockPriceFeedFetcher{}
+ mockFetcher := &price_feeds_mocks.MockPriceFeedFetcher{}
blockNumber := uint64(22222)
txIndex := uint(33333)
usdValue := int64(44444)
@@ -132,10 +134,10 @@ var _ = Describe("Price feed transformer", func() {
})
It("returns error if creating price feed update returns error", func() {
- mockRepository := &price_feeds2.MockPriceFeedRepository{}
+ mockRepository := &price_feeds_mocks.MockPriceFeedRepository{}
mockRepository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: 2}})
mockRepository.SetCreateErr(fakes.FakeError)
- mockFetcher := &price_feeds2.MockPriceFeedFetcher{}
+ mockFetcher := &price_feeds_mocks.MockPriceFeedFetcher{}
mockFetcher.SetReturnLogs([]types.Log{{}})
transformer := price_feeds.PriceFeedTransformer{
Fetcher: mockFetcher,
diff --git a/pkg/transformers/shared/constants.go b/pkg/transformers/shared/constants.go
index 54a5e1d7..1cb63992 100644
--- a/pkg/transformers/shared/constants.go
+++ b/pkg/transformers/shared/constants.go
@@ -15,7 +15,43 @@
package shared
var (
- FlipperABI = "[{\"constant\":true,\"inputs\":[],\"name\":\"era\",\"outputs\":[{\"name\":\"\",\"type\":\"uint48\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"lad\",\"type\":\"address\"},{\"name\":\"gal\",\"type\":\"address\"},{\"name\":\"tab\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"bid\",\"type\":\"uint256\"}],\"name\":\"kick\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"vat\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"bids\",\"outputs\":[{\"name\":\"bid\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"guy\",\"type\":\"address\"},{\"name\":\"tic\",\"type\":\"uint48\"},{\"name\":\"end\",\"type\":\"uint48\"},{\"name\":\"lad\",\"type\":\"address\"},{\"name\":\"gal\",\"type\":\"address\"},{\"name\":\"tab\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"bid\",\"type\":\"uint256\"}],\"name\":\"tend\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"ttl\",\"outputs\":[{\"name\":\"\",\"type\":\"uint48\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"},{\"name\":\"lot\",\"type\":\"uint256\"},{\"name\":\"bid\",\"type\":\"uint256\"}],\"name\":\"dent\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"beg\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"ilk\",\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"deal\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"tau\",\"outputs\":[{\"name\":\"\",\"type\":\"uint48\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"kicks\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"id\",\"type\":\"uint256\"}],\"name\":\"tick\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"vat_\",\"type\":\"address\"},{\"name\":\"ilk_\",\"type\":\"bytes32\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"src\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"dst\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"int256\"}],\"name\":\"Move\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"src\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"dst\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"act\",\"type\":\"bytes32\"}],\"name\":\"Push\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"risk\",\"type\":\"int256\"}],\"name\":\"FileIlk\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"addr\",\"type\":\"address\"}],\"name\":\"FileAddr\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"risk\",\"type\":\"int256\"}],\"name\":\"FileInt\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"what\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"risk\",\"type\":\"uint256\"}],\"name\":\"FileUint\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"dink\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"dart\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"ink\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"art\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Frob\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"ink\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"art\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"tab\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"flip\",\"type\":\"uint256\"}],\"name\":\"Bite\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"lad\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"int256\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"int256\"}],\"name\":\"Slip\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"vat\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gal\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"end\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"lad\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"tab\",\"type\":\"uint256\"}],\"name\":\"FlipKick\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"pie\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"vow\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"end\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"FlopKick\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"pie\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gem\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"gal\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"end\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"FlapKick\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"tic\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Tend\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"lot\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"bid\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":false,\"name\":\"tic\",\"type\":\"uint48\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Dent\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"name\":\"id\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"era\",\"type\":\"uint48\"}],\"name\":\"Deal\",\"type\":\"event\"}]"
- FlipKickSignature = "0x8828a22eb6a18623309ad55592866c4b077989e9e8a25e1b85f9bf6f7282520f"
- TendSignature = "0xd4aef477d7912041a69c5b85f2d78b618c76e40a4a92b91122c85ab5b404a64a"
+ DataItemLength = 32
+
+ CatABI = "[{\"constant\":true,\"inputs\":[],\"name\":\"vat\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"vow\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"flips\",\"outputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"lad\",\"type\":\"bytes32\"},{\"name\":\"ink\",\"type\":\"uint256\"},{\"name\":\"tab\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"nflip\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"live\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"address\"}],\"name\":\"wards\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"ilks\",\"outputs\":[{\"name\":\"flip\",\"type\":\"address\"},{\"name\":\"chop\",\"type\":\"uint256\"},{\"name\":\"lump\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"pit\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"vat_\",\"type\":\"address\"},{\"name\":\"pit_\",\"type\":\"address\"},{\"name\":\"vow_\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":true,\"name\":\"lad\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"ink\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"art\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"tab\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"flip\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"iArt\",\"type\":\"uint256\"}],\"name\":\"Bite\",\"type\":\"event\"},{\"anonymous\":true,\"inputs\":[{\"indexed\":true,\"name\":\"sig\",\"type\":\"bytes4\"},{\"indexed\":true,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"foo\",\"type\":\"bytes32\"},{\"indexed\":true,\"name\":\"bar\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"fax\",\"type\":\"bytes\"}],\"name\":\"LogNote\",\"type\":\"event\"},{\"constant\":false,\"inputs\":[{\"name\":\"guy\",\"type\":\"address\"}],\"name\":\"rely\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"guy\",\"type\":\"address\"}],\"name\":\"deny\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"what\",\"type\":\"bytes32\"},{\"name\":\"data\",\"type\":\"uint256\"}],\"name\":\"file\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"what\",\"type\":\"bytes32\"},{\"name\":\"flip\",\"type\":\"address\"}],\"name\":\"file\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"lad\",\"type\":\"bytes32\"}],\"name\":\"bite\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"n\",\"type\":\"uint256\"},{\"name\":\"wad\",\"type\":\"uint256\"}],\"name\":\"flip\",\"outputs\":[{\"name\":\"id\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
+ FlipperABI = `[{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"bids","outputs":[{"name":"bid","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"guy","type":"address"},{"name":"tic","type":"uint48"},{"name":"end","type":"uint48"},{"name":"urn","type":"bytes32"},{"name":"gal","type":"address"},{"name":"tab","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x4423c5f1"},{"constant":true,"inputs":[],"name":"ttl","outputs":[{"name":"","type":"uint48"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x4e8b1dd5"},{"constant":true,"inputs":[],"name":"gem","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x7bd2bea7"},{"constant":true,"inputs":[],"name":"beg","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x7d780d82"},{"constant":true,"inputs":[],"name":"tau","outputs":[{"name":"","type":"uint48"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xcfc4af55"},{"constant":true,"inputs":[],"name":"kicks","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xcfdd3302"},{"constant":true,"inputs":[],"name":"dai","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xf4b9fa75"},{"inputs":[{"name":"dai_","type":"address"},{"name":"gem_","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor","signature":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"id","type":"uint256"},{"indexed":false,"name":"lot","type":"uint256"},{"indexed":false,"name":"bid","type":"uint256"},{"indexed":false,"name":"gal","type":"address"},{"indexed":false,"name":"end","type":"uint48"},{"indexed":true,"name":"urn","type":"bytes32"},{"indexed":false,"name":"tab","type":"uint256"}],"name":"Kick","type":"event","signature":"0xbac86238bdba81d21995024470425ecb370078fa62b7271b90cf28cbd1e3e87e"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event","signature":"0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31"},{"constant":true,"inputs":[],"name":"era","outputs":[{"name":"","type":"uint48"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x143e55e0"},{"constant":false,"inputs":[{"name":"urn","type":"bytes32"},{"name":"gal","type":"address"},{"name":"tab","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"bid","type":"uint256"}],"name":"kick","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xeae19d9e"},{"constant":false,"inputs":[{"name":"id","type":"uint256"}],"name":"tick","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xfc7b6aee"},{"constant":false,"inputs":[{"name":"id","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"bid","type":"uint256"}],"name":"tend","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x4b43ed12"},{"constant":false,"inputs":[{"name":"id","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"bid","type":"uint256"}],"name":"dent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x5ff3a382"},{"constant":false,"inputs":[{"name":"id","type":"uint256"}],"name":"deal","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xc959c42b"}]`
+ MedianizerABI = `[{"constant":false,"inputs":[{"name":"owner_","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"","type":"bytes32"}],"name":"poke","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"poke","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"compute","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"wat","type":"address"}],"name":"set","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"wat","type":"address"}],"name":"unset","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"indexes","outputs":[{"name":"","type":"bytes12"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"next","outputs":[{"name":"","type":"bytes12"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"read","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"peek","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes12"}],"name":"values","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"min_","type":"uint96"}],"name":"setMin","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"authority_","type":"address"}],"name":"setAuthority","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"void","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"pos","type":"bytes12"},{"name":"wat","type":"address"}],"name":"set","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"authority","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"pos","type":"bytes12"}],"name":"unset","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"next_","type":"bytes12"}],"name":"setNext","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"min","outputs":[{"name":"","type":"uint96"}],"payable":false,"stateMutability":"view","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"val","type":"bytes32"}],"name":"LogValue","type":"event"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"authority","type":"address"}],"name":"LogSetAuthority","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"owner","type":"address"}],"name":"LogSetOwner","type":"event"}]]`
+ PitABI = `[{"constant":true,"inputs":[],"name":"vat","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x36569e77"},{"constant":true,"inputs":[],"name":"live","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x957aa58c"},{"constant":true,"inputs":[],"name":"drip","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x9f678cca"},{"constant":true,"inputs":[],"name":"Line","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xbabe8a3f"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"wards","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xbf353dbb"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"ilks","outputs":[{"name":"spot","type":"uint256"},{"name":"line","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xd9638d36"},{"inputs":[{"name":"vat_","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor","signature":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"ilk","type":"bytes32"},{"indexed":true,"name":"urn","type":"bytes32"},{"indexed":false,"name":"ink","type":"uint256"},{"indexed":false,"name":"art","type":"uint256"},{"indexed":false,"name":"dink","type":"int256"},{"indexed":false,"name":"dart","type":"int256"},{"indexed":false,"name":"iArt","type":"uint256"}],"name":"Frob","type":"event","signature":"0xb2afa28318bcc689926b52835d844de174ef8de97e982a85c0199d584920791b"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event","signature":"0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"rely","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x65fae35e"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"deny","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x9c52a7f1"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"what","type":"bytes32"},{"name":"data","type":"uint256"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x1a0b287e"},{"constant":false,"inputs":[{"name":"what","type":"bytes32"},{"name":"data","type":"uint256"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x29ae8114"},{"constant":false,"inputs":[{"name":"what","type":"bytes32"},{"name":"data","type":"address"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xd4e8be83"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"frob","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x5a984ded"}]`
+ VatABI = `[{"constant":true,"inputs":[],"name":"debt","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x0dca59c1"},{"constant":true,"inputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes32"}],"name":"urns","outputs":[{"name":"ink","type":"uint256"},{"name":"art","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x26e27482"},{"constant":true,"inputs":[],"name":"vice","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x2d61a355"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"sin","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xa60f1d3e"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"wards","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xbf353dbb"},{"constant":true,"inputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes32"}],"name":"gem","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xc0912683"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"ilks","outputs":[{"name":"take","type":"uint256"},{"name":"rate","type":"uint256"},{"name":"Ink","type":"uint256"},{"name":"Art","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xd9638d36"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"dai","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xf53e4e69"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor","signature":"constructor"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":true,"name":"too","type":"bytes32"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"Note","type":"event","signature":"0x8c2dbbc2b33ffaa77c104b777e574a8a4ff79829dfee8b66f4dc63e3f8067152"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"rely","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x65fae35e"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"deny","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x9c52a7f1"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"}],"name":"init","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x3b663195"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"guy","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"slip","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x42066cbb"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"src","type":"bytes32"},{"name":"dst","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"flux","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xa6e41821"},{"constant":false,"inputs":[{"name":"src","type":"bytes32"},{"name":"dst","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"move","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x78f19470"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"v","type":"bytes32"},{"name":"w","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"tune","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x5dd6471a"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"v","type":"bytes32"},{"name":"w","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"grab","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x3690ae4c"},{"constant":false,"inputs":[{"name":"u","type":"bytes32"},{"name":"v","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"heal","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x990a5f63"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"rate","type":"int256"}],"name":"fold","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xe6a6a64d"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"take","type":"int256"}],"name":"toll","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x09b7a0b5"}]`
+
+ // temporary addresses from local Ganache deployment
+ CatContractAddress = "0xe0f0fa6982c59d8aa4ae0134bfe048327bd788cacf758b643ca41f055ffce76c"
+ FlipperContractAddress = "0x6b59c42097e2fff7cad96cb08ceefd601081ad9c"
+ PepContractAddress = "0x99041F808D598B782D5a3e498681C2452A31da08"
+ PipContractAddress = "0x729D19f657BD0614b4985Cf1D82531c67569197B"
+ PitContractAddress = "0xff3f2400f1600f3f493a9a92704a29b96795af1a"
+ RepContractAddress = "0xF5f94b7F9De14D43112e713835BCef2d55b76c1C"
+ VatContractAddress = "0x239E6f0AB02713f1F8AA90ebeDeD9FC66Dc96CD6"
+
+ biteMethod = GetSolidityMethodSignature(CatABI, "Bite")
+ dentMethod = GetSolidityMethodSignature(FlipperABI, "dent")
+ flipKickMethod = GetSolidityMethodSignature(FlipperABI, "Kick")
+ frobMethod = GetSolidityMethodSignature(PitABI, "Frob")
+ //TODO: get these pit file method signatures directly from the ABI
+ pitFileDebtCeilingMethod = "file(bytes32,uint256)"
+ pitFileIlkMethod = "file(bytes32,bytes32,uint256)"
+ pitFileStabilityFeeMethod = GetSolidityMethodSignature(PitABI, "file")
+ tendMethod = GetSolidityMethodSignature(FlipperABI, "tend")
+ logValueMethod = GetSolidityMethodSignature(MedianizerABI, "LogValue")
+ vatInitMethod = GetSolidityMethodSignature(VatABI, "init")
+
+ BiteSignature = GetEventSignature(biteMethod)
+ DentFunctionSignature = GetLogNoteSignature(dentMethod)
+ FlipKickSignature = GetEventSignature(flipKickMethod)
+ FrobSignature = GetEventSignature(frobMethod)
+ LogValueSignature = GetEventSignature(logValueMethod)
+ PitFileDebtCeilingSignature = GetLogNoteSignature(pitFileDebtCeilingMethod)
+ PitFileIlkSignature = GetLogNoteSignature(pitFileIlkMethod)
+ PitFileStabilityFeeSignature = GetLogNoteSignature(pitFileStabilityFeeMethod)
+ TendFunctionSignature = GetLogNoteSignature(tendMethod)
+ VatInitSignature = GetLogNoteSignature(vatInitMethod)
)
diff --git a/pkg/transformers/shared/event_signature_generator.go b/pkg/transformers/shared/event_signature_generator.go
new file mode 100644
index 00000000..4d54541d
--- /dev/null
+++ b/pkg/transformers/shared/event_signature_generator.go
@@ -0,0 +1,56 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package shared
+
+import (
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/vulcanize/vulcanizedb/pkg/geth"
+ "github.com/ethereum/go-ethereum/accounts/abi"
+ "fmt"
+ "strings"
+)
+
+func GetEventSignature(solidityMethodSignature string) string {
+ eventSignature := []byte(solidityMethodSignature)
+ hash := crypto.Keccak256Hash(eventSignature)
+ return hash.Hex()
+}
+
+func GetLogNoteSignature(solidityMethodSignature string) string {
+ rawSignature := GetEventSignature(solidityMethodSignature)
+ return rawSignature[:10] + "00000000000000000000000000000000000000000000000000000000"
+}
+
+func GetSolidityMethodSignature(abi, name string) string {
+ parsedAbi, _ := geth.ParseAbi(abi)
+
+ if method, ok := parsedAbi.Methods[name]; ok {
+ return method.Sig()
+ } else if event, ok := parsedAbi.Events[name]; ok {
+ return getEventSignature(event)
+ }
+ return ""
+
+}
+
+func getEventSignature(event abi.Event) string {
+ types := make([]string, len(event.Inputs))
+ for i, input := range event.Inputs {
+ types[i] = input.Type.String()
+ i++
+ }
+
+ return fmt.Sprintf("%v(%v)", event.Name, strings.Join(types, ","))
+}
\ No newline at end of file
diff --git a/pkg/transformers/shared/event_signature_generator_test.go b/pkg/transformers/shared/event_signature_generator_test.go
new file mode 100644
index 00000000..251de8d3
--- /dev/null
+++ b/pkg/transformers/shared/event_signature_generator_test.go
@@ -0,0 +1,148 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package shared_test
+
+import (
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+var _ = Describe("Event signature generator", func() {
+ Describe("generating non-anonymous event signatures", func() {
+ It("generates bite event signature", func() {
+ expected := "0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"
+ actual := shared.GetEventSignature("Bite(bytes32,bytes32,uint256,uint256,uint256,uint256,uint256)")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("generates frob event signature", func() {
+ expected := "0xb2afa28318bcc689926b52835d844de174ef8de97e982a85c0199d584920791b"
+ actual := shared.GetEventSignature("Frob(bytes32,bytes32,uint256,uint256,int256,int256,uint256)")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("generates flip kick event signature", func() {
+ expected := "0xbac86238bdba81d21995024470425ecb370078fa62b7271b90cf28cbd1e3e87e"
+ actual := shared.GetEventSignature("Kick(uint256,uint256,uint256,address,uint48,bytes32,uint256)")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("generates log value event signature", func() {
+ expected := "0x296ba4ca62c6c21c95e828080cb8aec7481b71390585605300a8a76f9e95b527"
+ actual := shared.GetEventSignature("LogValue(bytes32)")
+
+ Expect(expected).To(Equal(actual))
+ })
+ })
+
+ Describe("generating LogNote event signatures", func() {
+ It("generates flip tend event signature", func() {
+ expected := "0x4b43ed1200000000000000000000000000000000000000000000000000000000"
+ actual := shared.GetLogNoteSignature("tend(uint256,uint256,uint256)")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("generates pit file event signature for overloaded function with three arguments", func() {
+ expected := "0x1a0b287e00000000000000000000000000000000000000000000000000000000"
+ actual := shared.GetLogNoteSignature("file(bytes32,bytes32,uint256)")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("generates pit file event signature for overloaded function with two arguments", func() {
+ expected := "0x29ae811400000000000000000000000000000000000000000000000000000000"
+ actual := shared.GetLogNoteSignature("file(bytes32,uint256)")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("generates pit file event signature for overloaded function with two different arguments", func() {
+ expected := "0xd4e8be8300000000000000000000000000000000000000000000000000000000"
+ actual := shared.GetLogNoteSignature("file(bytes32,address)")
+
+ Expect(expected).To(Equal(actual))
+ })
+ })
+
+ Describe("getting the solidity method/event signature from the abi", func() {
+ Describe("it handles methods", func() {
+ It("gets the flip dent method signature", func() {
+ expected := "dent(uint256,uint256,uint256)"
+ actual := shared.GetSolidityMethodSignature(shared.FlipperABI, "dent")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("gets the flip tend method signature", func() {
+ expected := "tend(uint256,uint256,uint256)"
+ actual := shared.GetSolidityMethodSignature(shared.FlipperABI, "tend")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("gets the pit file deb ceiling method signature", func() {
+ expected := "file(bytes32,address)"
+ actual := shared.GetSolidityMethodSignature(shared.PitABI, "file")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("gets the vat init method signature", func() {
+ expected := "init(bytes32)"
+ actual := shared.GetSolidityMethodSignature(shared.VatABI, "init")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ })
+
+ Describe("it handles events", func() {
+ It("gets the Bite event signature", func() {
+ expected := "Bite(bytes32,bytes32,uint256,uint256,uint256,uint256,uint256)"
+ actual := shared.GetSolidityMethodSignature(shared.CatABI, "Bite")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("gets the flip Kick event signature", func() {
+ expected := "Kick(uint256,uint256,uint256,address,uint48,bytes32,uint256)"
+ actual := shared.GetSolidityMethodSignature(shared.FlipperABI, "Kick")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("gets the pit frob event signature", func() {
+ expected := "Frob(bytes32,bytes32,uint256,uint256,int256,int256,uint256)"
+ actual := shared.GetSolidityMethodSignature(shared.PitABI, "Frob")
+
+ Expect(expected).To(Equal(actual))
+ })
+
+ It("gets the log value method signature", func() {
+ expected := "LogValue(bytes32)"
+ actual := shared.GetSolidityMethodSignature(shared.MedianizerABI, "LogValue")
+
+ Expect(expected).To(Equal(actual))
+
+ })
+ })
+ })
+})
diff --git a/pkg/transformers/shared/fetcher.go b/pkg/transformers/shared/log_fetcher.go
similarity index 100%
rename from pkg/transformers/shared/fetcher.go
rename to pkg/transformers/shared/log_fetcher.go
diff --git a/pkg/transformers/shared/fetcher_test.go b/pkg/transformers/shared/log_fetcher_test.go
similarity index 100%
rename from pkg/transformers/shared/fetcher_test.go
rename to pkg/transformers/shared/log_fetcher_test.go
diff --git a/pkg/transformers/shared/transformer.go b/pkg/transformers/shared/transformer.go
index 56f8fc40..9632e6cd 100644
--- a/pkg/transformers/shared/transformer.go
+++ b/pkg/transformers/shared/transformer.go
@@ -28,7 +28,7 @@ type Transformer interface {
type TransformerInitializer func(db *postgres.DB, blockChain core.BlockChain) Transformer
type TransformerConfig struct {
- ContractAddresses string
+ ContractAddress string
ContractAbi string
Topics []string
StartingBlockNumber int64
diff --git a/pkg/transformers/utilities/utils.go b/pkg/transformers/shared/utilities.go
similarity index 97%
rename from pkg/transformers/utilities/utils.go
rename to pkg/transformers/shared/utilities.go
index 6d14caa8..7130a0b7 100644
--- a/pkg/transformers/utilities/utils.go
+++ b/pkg/transformers/shared/utilities.go
@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-package utilities
+package shared
import "math/big"
diff --git a/pkg/transformers/tend/config.go b/pkg/transformers/tend/config.go
index bbb9bd92..f8fcc847 100644
--- a/pkg/transformers/tend/config.go
+++ b/pkg/transformers/tend/config.go
@@ -17,9 +17,9 @@ package tend
import "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
var TendConfig = shared.TransformerConfig{
- ContractAddresses: "0x08cb6176addcca2e1d1ffe21bee464b72ee4cd8d", //this is a temporary address deployed locally
+ ContractAddress: shared.FlipperContractAddress,
ContractAbi: shared.FlipperABI,
- Topics: []string{shared.TendSignature},
+ Topics: []string{shared.TendFunctionSignature},
StartingBlockNumber: 0,
EndingBlockNumber: 100,
}
diff --git a/pkg/transformers/tend/converter.go b/pkg/transformers/tend/converter.go
index dbb85485..7be49f53 100644
--- a/pkg/transformers/tend/converter.go
+++ b/pkg/transformers/tend/converter.go
@@ -17,60 +17,67 @@ package tend
import (
"encoding/json"
"errors"
- "time"
+ "math/big"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/vulcanize/vulcanizedb/pkg/geth"
- "github.com/vulcanize/vulcanizedb/pkg/transformers/utilities"
)
type Converter interface {
- ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (TendEntity, error)
- ToModel(entity TendEntity) (TendModel, error)
+ Convert(contractAddress string, contractAbi string, ethLog types.Log) (TendModel, error)
}
-type TendConverter struct{}
+type TendConverter struct {}
-func (c TendConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (TendEntity, error) {
- entity := TendEntity{}
- address := common.HexToAddress(contractAddress)
- abi, err := geth.ParseAbi(contractAbi)
-
- if err != nil {
- return entity, err
- }
-
- contract := bind.NewBoundContract(address, abi, nil, nil, nil)
- err = contract.UnpackLog(&entity, "Tend", ethLog)
- if err != nil {
- return entity, err
- }
- entity.TransactionIndex = ethLog.TxIndex
- entity.Raw = ethLog
- return entity, nil
+func NewTendConverter() TendConverter {
+ return TendConverter{}
}
-func (c TendConverter) ToModel(entity TendEntity) (TendModel, error) {
- if entity.Id == nil {
- return TendModel{}, errors.New("Tend log ID cannot be nil.")
- }
-
- rawJson, err := json.Marshal(entity.Raw)
+func (c TendConverter) Convert(contractAddress string, contractAbi string, ethLog types.Log) (TendModel, error) {
+ err := validateLog(ethLog)
if err != nil {
return TendModel{}, err
}
- era := utilities.ConvertNilToZeroTimeValue(entity.Era)
+
+ bidId := ethLog.Topics[2].Big()
+ guy := common.HexToAddress(ethLog.Topics[1].Hex()).String()
+ lot := ethLog.Topics[3].Big().String()
+
+ lastDataItemStartIndex := len(ethLog.Data) - 32
+ lastItem := ethLog.Data[lastDataItemStartIndex:]
+ last := big.NewInt(0).SetBytes(lastItem)
+ bidValue := last.String()
+ tic := "0"
+ //TODO: it is likely that the tic value will need to be added to an emitted event,
+ //so this will need to be updated at that point
+ transactionIndex := ethLog.TxIndex
+
+ rawJson, err := json.Marshal(ethLog)
+ if err != nil {
+ return TendModel{}, err
+ }
+ raw := string(rawJson)
+
return TendModel{
- Id: utilities.ConvertNilToEmptyString(entity.Id.String()),
- Lot: utilities.ConvertNilToEmptyString(entity.Lot.String()),
- Bid: utilities.ConvertNilToEmptyString(entity.Bid.String()),
- Guy: entity.Guy[:],
- Tic: utilities.ConvertNilToEmptyString(entity.Tic.String()),
- Era: time.Unix(era, 0),
- TransactionIndex: entity.TransactionIndex,
- Raw: string(rawJson),
+ BidId: bidId.String(),
+ Lot: lot,
+ Bid: bidValue,
+ Guy: guy,
+ Tic: tic,
+ TransactionIndex: transactionIndex,
+ Raw: raw,
}, nil
}
+
+func validateLog(ethLog types.Log) error {
+ if len(ethLog.Data) <= 0 {
+ return errors.New("tend log note data is empty")
+ }
+
+ if len(ethLog.Topics) < 4 {
+ return errors.New("tend log does not contain expected topics")
+ }
+
+ return nil
+}
diff --git a/pkg/transformers/tend/converter_test.go b/pkg/transformers/tend/converter_test.go
index f327cdf0..decc5450 100644
--- a/pkg/transformers/tend/converter_test.go
+++ b/pkg/transformers/tend/converter_test.go
@@ -15,14 +15,10 @@
package tend_test
import (
- "encoding/json"
- "math/big"
- "time"
-
- "github.com/ethereum/go-ethereum/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ "github.com/ethereum/go-ethereum/common"
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
"github.com/vulcanize/vulcanizedb/pkg/transformers/tend"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
@@ -30,73 +26,35 @@ import (
var _ = Describe("Tend TendConverter", func() {
var converter tend.TendConverter
- var emptyEntity tend.TendEntity
- var testEntity tend.TendEntity
BeforeEach(func() {
- converter = tend.TendConverter{}
- emptyEntity = tend.TendEntity{}
- testEntity = test_data.TendEntity
+ converter = tend.NewTendConverter()
})
- Describe("ToEntity", func() {
- It("converts a log to an entity", func() {
- entity, err := converter.ToEntity(test_data.FlipAddress, shared.FlipperABI, test_data.TendLog)
-
- Expect(err).NotTo(HaveOccurred())
- Expect(entity).To(Equal(testEntity))
- })
-
- It("returns an error if there is a failure in parsing the abi", func() {
- malformedAbi := "bad"
- entity, err := converter.ToEntity(test_data.FlipAddress, malformedAbi, test_data.TendLog)
-
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("invalid abi"))
- Expect(entity).To(Equal(emptyEntity))
- })
-
- It("returns an error if there is a failure unpacking the log", func() {
- incompleteAbi := "[{}]"
- entity, err := converter.ToEntity(test_data.FlipAddress, incompleteAbi, test_data.TendLog)
-
- Expect(err).To(HaveOccurred())
- Expect(err.Error()).To(ContainSubstring("abi: could not locate"))
- Expect(entity).To(Equal(emptyEntity))
- })
- })
-
- Describe("ToModel", func() {
- It("converts an entity to a model", func() {
- model, err := converter.ToModel(testEntity)
+ Describe("Convert", func() {
+ It("converts an eth log to a db model", func() {
+ model, err := converter.Convert(shared.FlipperContractAddress, shared.FlipperABI, test_data.TendLogNote)
Expect(err).NotTo(HaveOccurred())
Expect(model).To(Equal(test_data.TendModel))
})
- It("handles nil values", func() {
- emptyEntity.Id = big.NewInt(1)
- emptyLog, err := json.Marshal(types.Log{})
- Expect(err).NotTo(HaveOccurred())
- expectedModel := tend.TendModel{
- Id: "1",
- Lot: "",
- Bid: "",
- Guy: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
- Tic: "",
- Era: time.Unix(0, 0),
- Raw: string(emptyLog),
- }
- model, err := converter.ToModel(emptyEntity)
-
- Expect(err).NotTo(HaveOccurred())
- Expect(model).To(Equal(expectedModel))
- })
-
- It("returns an error if the log Id is nil", func() {
- model, err := converter.ToModel(emptyEntity)
+ It("returns an error if the log data is empty", func() {
+ emptyDataLog := test_data.TendLogNote
+ emptyDataLog.Data = []byte{}
+ model, err := converter.Convert(shared.FlipperContractAddress, shared.FlipperABI, emptyDataLog)
Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError("tend log note data is empty"))
+ Expect(model).To(Equal(tend.TendModel{}))
+ })
+
+ It("returns an error if the expected amount of topics aren't in the log", func() {
+ invalidLog := test_data.TendLogNote
+ invalidLog.Topics = []common.Hash{}
+ model, err := converter.Convert(shared.FlipperContractAddress, shared.FlipperABI, invalidLog)
+
+ Expect(err).To(MatchError("tend log does not contain expected topics"))
Expect(model).To(Equal(tend.TendModel{}))
})
})
diff --git a/pkg/transformers/tend/integration_test.go b/pkg/transformers/tend/integration_test.go
index f1d8a193..6905b2c8 100644
--- a/pkg/transformers/tend/integration_test.go
+++ b/pkg/transformers/tend/integration_test.go
@@ -15,7 +15,6 @@
package tend_test
import (
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/rpc"
@@ -27,14 +26,13 @@ import (
rpc2 "github.com/vulcanize/vulcanizedb/pkg/geth/converters/rpc"
"github.com/vulcanize/vulcanizedb/pkg/geth/node"
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
- "github.com/vulcanize/vulcanizedb/pkg/transformers/tend"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
"github.com/vulcanize/vulcanizedb/test_config"
)
-// These test are pending either being able to emit a Tend event on a Ganache test chain or until the contracts are deployed to Kovan.
-var _ = XDescribe("Integration tests", func() {
- It("Fetches Tend event logs from a local test chain", func() {
+// These test are marked as pending until the Flip contract is deployed to Kovan.
+var _ = Describe("Integration tests", func() {
+ XIt("Fetches Tend event logs from a local test chain", func() {
ipcPath := test_config.TestClient.IPCPath
rawRpcClient, err := rpc.Dial(ipcPath)
@@ -47,10 +45,10 @@ var _ = XDescribe("Integration tests", func() {
transactionConverter := rpc2.NewRpcTransactionConverter(ethClient)
realBlockChain := geth.NewBlockChain(blockChainClient, realNode, transactionConverter)
realFetcher := shared.NewFetcher(realBlockChain)
- topic0 := common.HexToHash(shared.TendSignature)
+ topic0 := common.HexToHash(shared.TendFunctionSignature)
topics := [][]common.Hash{{topic0}}
- result, err := realFetcher.FetchLogs(test_data.FlipAddress, topics, test_data.FlipKickBlockNumber)
+ result, err := realFetcher.FetchLogs(shared.FlipperContractAddress, topics, test_data.FlipKickBlockNumber)
Expect(err).NotTo(HaveOccurred())
Expect(len(result) > 0).To(BeTrue())
@@ -60,27 +58,4 @@ var _ = XDescribe("Integration tests", func() {
Expect(result[0].Topics).To(Equal(test_data.EthFlipKickLog.Topics))
Expect(result[0].Index).To(Equal(test_data.EthFlipKickLog.Index))
})
-
- It("unpacks an event log", func() {
- address := common.HexToAddress(test_data.FlipAddress)
- abi, err := geth.ParseAbi(shared.FlipperABI)
- Expect(err).NotTo(HaveOccurred())
-
- contract := bind.NewBoundContract(address, abi, nil, nil, nil)
- entity := tend.TendEntity{}
-
- var eventLog = test_data.TendLog
-
- err = contract.UnpackLog(&entity, "Tend", eventLog)
- Expect(err).NotTo(HaveOccurred())
-
- expectedEntity := test_data.TendEntity
- Expect(entity.Id).To(Equal(expectedEntity.Id))
- Expect(entity.Lot).To(Equal(expectedEntity.Lot))
- Expect(entity.Bid).To(Equal(expectedEntity.Bid))
- Expect(entity.Guy).To(Equal(expectedEntity.Guy))
- Expect(entity.Tic).To(Equal(expectedEntity.Tic))
- Expect(entity.Era).To(Equal(expectedEntity.Era))
- Expect(entity.Raw).To(Equal(expectedEntity.Raw))
- })
})
diff --git a/pkg/transformers/tend/model.go b/pkg/transformers/tend/model.go
index 45a51a5a..0361e08c 100644
--- a/pkg/transformers/tend/model.go
+++ b/pkg/transformers/tend/model.go
@@ -14,17 +14,12 @@
package tend
-import (
- "time"
-)
-
type TendModel struct {
- Id string
+ BidId string `db:"bid_id"`
Lot string
Bid string
- Guy []byte
+ Guy string
Tic string
- Era time.Time
TransactionIndex uint `db:"tx_idx"`
Raw string `db:"raw_log"`
}
diff --git a/pkg/transformers/tend/repository.go b/pkg/transformers/tend/repository.go
index 21a5f5ff..75ab3386 100644
--- a/pkg/transformers/tend/repository.go
+++ b/pkg/transformers/tend/repository.go
@@ -34,9 +34,9 @@ func NewTendRepository(db *postgres.DB) TendRepository {
func (r TendRepository) Create(headerId int64, tend TendModel) error {
_, err := r.DB.Exec(
- `INSERT into maker.tend (header_id, id, lot, bid, guy, tic, era, tx_idx, raw_log)
- VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9)`,
- headerId, tend.Id, tend.Lot, tend.Bid, tend.Guy, tend.Tic, tend.Era, tend.TransactionIndex, tend.Raw,
+ `INSERT into maker.tend (header_id, bid_id, lot, bid, guy, tic, tx_idx, raw_log)
+ VALUES($1, $2, $3, $4, $5, $6, $7, $8)`,
+ headerId, tend.BidId, tend.Lot, tend.Bid, tend.Guy, tend.Tic, tend.TransactionIndex, tend.Raw,
)
return err
diff --git a/pkg/transformers/tend/repository_test.go b/pkg/transformers/tend/repository_test.go
index a9bc3e9c..277cc79a 100644
--- a/pkg/transformers/tend/repository_test.go
+++ b/pkg/transformers/tend/repository_test.go
@@ -58,17 +58,16 @@ var _ = Describe("TendRepository", func() {
Expect(count).To(Equal(1))
dbResult := tend.TendModel{}
- err = db.Get(&dbResult, `SELECT id, lot, bid, guy, tic, era, tx_idx, raw_log FROM maker.tend WHERE header_id = $1`, headerId)
+ err = db.Get(&dbResult, `SELECT bid_id, lot, bid, guy, tic, tx_idx, raw_log FROM maker.tend WHERE header_id = $1`, headerId)
Expect(err).NotTo(HaveOccurred())
- Expect(dbResult.Id).To(Equal(test_data.TendModel.Id))
+ Expect(dbResult.BidId).To(Equal(test_data.TendModel.BidId))
Expect(dbResult.Lot).To(Equal(test_data.TendModel.Lot))
Expect(dbResult.Bid).To(Equal(test_data.TendModel.Bid))
Expect(dbResult.Guy).To(Equal(test_data.TendModel.Guy))
Expect(dbResult.Tic).To(Equal(test_data.TendModel.Tic))
- Expect(dbResult.Era.Equal(test_data.TendModel.Era)).To(BeTrue())
Expect(dbResult.TransactionIndex).To(Equal(test_data.TendModel.TransactionIndex))
- Expect(dbResult.Raw).To(MatchJSON(test_data.RawJson))
+ Expect(dbResult.Raw).To(MatchJSON(test_data.RawLogNoteJson))
})
It("returns an error if inserting a tend record fails", func() {
@@ -111,7 +110,7 @@ var _ = Describe("TendRepository", func() {
outOfRangeBlockNumber = tendBlockNumber + 2
})
- It("returns headers for which there isn't an associated flip_kick record", func() {
+ It("returns headers for which there isn't an associated tend record", func() {
var headerIds []int64
for _, number := range []int64{startingBlockNumber, tendBlockNumber, endingBlockNumber, outOfRangeBlockNumber} {
diff --git a/pkg/transformers/tend/transformer.go b/pkg/transformers/tend/transformer.go
index 90a6858d..2f8d3d64 100644
--- a/pkg/transformers/tend/transformer.go
+++ b/pkg/transformers/tend/transformer.go
@@ -36,21 +36,20 @@ type TendTransformerInitializer struct {
}
func (i TendTransformerInitializer) NewTendTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ converter := NewTendConverter()
fetcher := shared.NewFetcher(blockChain)
repository := NewTendRepository(db)
- transformer := TendTransformer{
+ return TendTransformer{
Fetcher: fetcher,
Repository: repository,
- Converter: TendConverter{},
+ Converter: converter,
Config: i.Config,
}
-
- return transformer
}
func (t TendTransformer) Execute() error {
config := t.Config
- topics := [][]common.Hash{{common.HexToHash(shared.TendSignature)}}
+ topics := [][]common.Hash{{common.HexToHash(shared.TendFunctionSignature)}}
missingHeaders, err := t.Repository.MissingHeaders(config.StartingBlockNumber, config.EndingBlockNumber)
if err != nil {
@@ -59,15 +58,14 @@ func (t TendTransformer) Execute() error {
}
for _, header := range missingHeaders {
- ethLogs, err := t.Fetcher.FetchLogs(config.ContractAddresses, topics, header.BlockNumber)
+ ethLogs, err := t.Fetcher.FetchLogs(config.ContractAddress, topics, header.BlockNumber)
if err != nil {
log.Println("Error fetching matching logs:", err)
return err
}
for _, ethLog := range ethLogs {
- entity, err := t.Converter.ToEntity(config.ContractAddresses, config.ContractAbi, ethLog)
- model, err := t.Converter.ToModel(entity)
+ model, err := t.Converter.Convert(config.ContractAddress, config.ContractAbi, ethLog)
if err != nil {
log.Println("Error converting logs:", err)
return err
diff --git a/pkg/transformers/tend/transformer_test.go b/pkg/transformers/tend/transformer_test.go
index d019fbb8..d3fd3570 100644
--- a/pkg/transformers/tend/transformer_test.go
+++ b/pkg/transformers/tend/transformer_test.go
@@ -69,13 +69,13 @@ var _ = Describe("Tend Transformer", func() {
It("fetches eth logs for each missing header", func() {
repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1}, {BlockNumber: blockNumber2}})
- expectedTopics := [][]common.Hash{{common.HexToHash(shared.TendSignature)}}
+ expectedTopics := [][]common.Hash{{common.HexToHash(shared.TendFunctionSignature)}}
err := transformer.Execute()
Expect(err).NotTo(HaveOccurred())
Expect(fetcher.FetchedBlocks).To(Equal([]int64{blockNumber1, blockNumber2}))
Expect(fetcher.FetchedTopics).To(Equal(expectedTopics))
- Expect(fetcher.FetchedContractAddress).To(Equal(test_data.FlipAddress))
+ Expect(fetcher.FetchedContractAddress).To(Equal(shared.FlipperContractAddress))
})
It("returns an error if fetching logs fails", func() {
@@ -87,40 +87,30 @@ var _ = Describe("Tend Transformer", func() {
Expect(err).To(MatchError(fakes.FakeError))
})
- It("converts an eth log to an Entity", func() {
+ It("converts an eth log to an Model", func() {
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
- fetcher.SetFetchedLogs([]types.Log{test_data.TendLog})
+ fetcher.SetFetchedLogs([]types.Log{test_data.TendLogNote})
err := transformer.Execute()
Expect(err).NotTo(HaveOccurred())
- Expect(converter.ConverterContract).To(Equal(tend.TendConfig.ContractAddresses))
+ Expect(converter.ConverterContract).To(Equal(tend.TendConfig.ContractAddress))
Expect(converter.ConverterAbi).To(Equal(tend.TendConfig.ContractAbi))
- Expect(converter.LogsToConvert).To(Equal([]types.Log{test_data.TendLog}))
+ Expect(converter.LogsToConvert).To(Equal([]types.Log{test_data.TendLogNote}))
})
It("returns an error if converter fails", func() {
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
- fetcher.SetFetchedLogs([]types.Log{test_data.TendLog})
+ fetcher.SetFetchedLogs([]types.Log{test_data.TendLogNote})
converter.SetConverterError(fakes.FakeError)
err := transformer.Execute()
-
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(fakes.FakeError))
})
- It("returns an error if converter fails", func() {
- repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
- fetcher.SetFetchedLogs([]types.Log{test_data.TendLog})
- err := transformer.Execute()
-
- Expect(err).NotTo(HaveOccurred())
- Expect(converter.EntitiesToConvert).To(ContainElement(test_data.TendEntity))
- })
-
It("persists the tend record", func() {
headerId := int64(1)
repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1, Id: headerId}})
- fetcher.SetFetchedLogs([]types.Log{test_data.TendLog})
+ fetcher.SetFetchedLogs([]types.Log{test_data.TendLogNote})
err := transformer.Execute()
@@ -131,7 +121,7 @@ var _ = Describe("Tend Transformer", func() {
It("returns error if persisting tend record fails", func() {
repository.SetMissingHeaders([]core.Header{{BlockNumber: blockNumber1}})
- fetcher.SetFetchedLogs([]types.Log{test_data.TendLog})
+ fetcher.SetFetchedLogs([]types.Log{test_data.TendLogNote})
repository.SetCreateError(fakes.FakeError)
err := transformer.Execute()
diff --git a/pkg/transformers/test_data/bite.go b/pkg/transformers/test_data/bite.go
new file mode 100644
index 00000000..7d30aabd
--- /dev/null
+++ b/pkg/transformers/test_data/bite.go
@@ -0,0 +1,88 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test_data
+
+import (
+ "encoding/json"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "math/big"
+ "strconv"
+)
+
+var (
+ TemporaryBiteBlockHash = common.HexToHash("0xd130caaccc9203ca63eb149faeb013aed21f0317ce23489c0486da2f9adcd0eb")
+ TemporaryBiteBlockNumber = int64(26)
+ TemporaryBiteData = "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000005"
+ TemporaryBiteTransaction = "0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42"
+)
+
+var (
+ biteInk = big.NewInt(1)
+ biteArt = big.NewInt(2)
+ biteTab = big.NewInt(3)
+ biteFlip = big.NewInt(4)
+ biteIArt = big.NewInt(5)
+ biteRawJson, _ = json.Marshal(EthBiteLog)
+ biteRawString = string(biteRawJson)
+ biteIlk = [32]byte{102, 97, 107, 101, 32, 105, 108, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ biteLad = [32]byte{102, 97, 107, 101, 32, 108, 97, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ biteId = int64(1)
+)
+
+var EthBiteLog = types.Log{
+ Address: common.HexToAddress(shared.CatContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash("0x99b5620489b6ef926d4518936cfec15d305452712b88bd59da2d9c10fb0953e8"),
+ common.HexToHash("0x66616b6520696c6b000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x66616b65206c6164000000000000000000000000000000000000000000000000"),
+ },
+ Data: hexutil.MustDecode(TemporaryBiteData),
+ BlockNumber: uint64(TemporaryBiteBlockNumber),
+ TxHash: common.HexToHash(TemporaryBiteTransaction),
+ TxIndex: 111,
+ BlockHash: TemporaryBiteBlockHash,
+ Index: 0,
+ Removed: false,
+}
+
+var BiteEntity = bite.BiteEntity{
+ Id: big.NewInt(biteId),
+ Ilk: biteIlk,
+ Lad: biteLad,
+ Ink: biteInk,
+ Art: biteArt,
+ Tab: biteTab,
+ Flip: biteFlip,
+ IArt: biteIArt,
+ TransactionIndex: EthBiteLog.TxIndex,
+ Raw: EthBiteLog,
+}
+
+var BiteModel = bite.BiteModel{
+ Id: strconv.FormatInt(biteId, 10),
+ Ilk: biteIlk[:],
+ Lad: biteLad[:],
+ Ink: biteInk.String(),
+ Art: biteArt.String(),
+ Tab: biteTab.String(),
+ Flip: biteFlip.String(),
+ IArt: biteIArt.String(),
+ TransactionIndex: EthBiteLog.TxIndex,
+ Raw: biteRawString,
+}
diff --git a/pkg/transformers/test_data/constants.go b/pkg/transformers/test_data/constants.go
deleted file mode 100644
index 3c2a10b8..00000000
--- a/pkg/transformers/test_data/constants.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2018 Vulcanize
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package test_data
-
-var (
- FlipAddress = "0x08cb6176addcca2e1d1ffe21bee464b72ee4cd8d"
- FlipKickTransactionHash = "0x6b155a55fd77b751195deeebf7abfd8691ca01ee588817a920f19d5b27f65191"
- FlipKickData = "0x000000000000000000000000000000000000000000000000000000000000000100000000000000000000000038219779a699d67d7e7740b8c8f43d3e2dae218266616b6520696c6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000000000000000000000000000000064d922894153be9eef7b7218dc565d1d0ce2a09200000000000000000000000007fa9ef6609ca7921112231f8f195138ebba2977000000000000000000000000000000000000000000000000000000005b8063b7000000000000000000000000000000000000000000000000000000005b7729370000000000000000000000007340e006f4135ba6970d43bf43d88dcad4e7a8ca0000000000000000000000000000000000000000000000000000000000000032"
- FlipKickBlockHash = "0x32f8b12023b3a1b4c73f9a46da976931b0355714ada8b8044ebcb2cd295751a9"
- FlipKickBlockNumber = int64(10)
- TendData = "0x00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000003200000000000000000000000064d922894153be9eef7b7218dc565d1d0ce2a0920000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005b6db414"
- TendTransactionHash = "0xadeddf804e0fef88b6145807df063c538c9942df2725a0458a084900c0fbf5e9"
- TendBlockHash = "0xdd6238b841c8cf4d91b05da7540b7f0851176fcc8477cdc4b75c93e28dfe0a88"
- TendBlockNumber = int64(11)
-)
diff --git a/pkg/transformers/test_data/dent.go b/pkg/transformers/test_data/dent.go
new file mode 100644
index 00000000..7fb94a08
--- /dev/null
+++ b/pkg/transformers/test_data/dent.go
@@ -0,0 +1,64 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test_data
+
+import (
+ "encoding/json"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "strconv"
+)
+
+var (
+ DentData = "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000645ff3a382000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000098a7d9b8314c000000000000000000000000000000000000000000000000000029a2241af62c0000"
+ DentTransactionHash = "0x5a210319fcd31eea5959fedb4a1b20881c21a21976e23ff19dff3b44cc1c71e8"
+ DentBlockHash = "0x105b771e04d7b8516f9291b1f006c46c09cfbff9efa8bc52498b171ff99d28b5"
+ dentBidId = int64(1)
+ dentLot = "11000000000000000000"
+ dentBid = "3000000000000000000"
+ DentTic = "0"
+ dentGuy = "0x64d922894153BE9EEf7b7218dc565d1D0Ce2a092"
+ dentRawJson, _ = json.Marshal(DentLog)
+)
+
+var DentLog = types.Log{
+ Address: common.StringToAddress(shared.FlipperContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash("0x5ff3a38200000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x00000000000000000000000064d922894153be9eef7b7218dc565d1d0ce2a092"),
+ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
+ common.HexToHash("0x00000000000000000000000000000000000000000000000098a7d9b8314c0000"),
+ },
+ Data: hexutil.MustDecode(DentData),
+ BlockNumber: 15,
+ TxHash: common.HexToHash(DentTransactionHash),
+ TxIndex: 5,
+ BlockHash: common.HexToHash(DentBlockHash),
+ Index: 2,
+ Removed: false,
+}
+
+var DentModel = dent.DentModel{
+ BidId: strconv.FormatInt(dentBidId, 10),
+ Lot: dentLot,
+ Bid: dentBid,
+ Guy: dentGuy,
+ Tic: DentTic,
+ TransactionIndex: DentLog.TxIndex,
+ Raw: dentRawJson,
+}
diff --git a/pkg/transformers/test_data/flip_kick.go b/pkg/transformers/test_data/flip_kick.go
index f253b87d..94b0e0a1 100644
--- a/pkg/transformers/test_data/flip_kick.go
+++ b/pkg/transformers/test_data/flip_kick.go
@@ -27,62 +27,64 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
)
-var idString = "1"
-var id, _ = new(big.Int).SetString(idString, 10)
-var vat = "0x38219779a699d67d7e7740b8c8f43d3e2dae2182"
-var ilk = [32]byte{102, 97, 107, 101, 32, 105, 108, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
-var lotString = "100"
-var lot, _ = new(big.Int).SetString(lotString, 10)
-var bidString = "0"
-var bid = new(big.Int).SetBytes([]byte{0})
-var guy = "0x64d922894153be9eef7b7218dc565d1d0ce2a092"
-var gal = "0x07fa9ef6609ca7921112231f8f195138ebba2977"
-var end = int64(1535140791)
-var era = int64(1534535991)
-var lad = "0x7340e006f4135ba6970d43bf43d88dcad4e7a8ca"
-var tabString = "50"
-var tab, _ = new(big.Int).SetString(tabString, 10)
-var rawLogJson, _ = json.Marshal(EthFlipKickLog)
-var rawLogString = string(rawLogJson)
+var (
+ idString = "1"
+ id, _ = new(big.Int).SetString(idString, 10)
+ lotString = "100"
+ lot, _ = new(big.Int).SetString(lotString, 10)
+ bidString = "0"
+ bid = new(big.Int).SetBytes([]byte{0})
+ gal = "0x07Fa9eF6609cA7921112231F8f195138ebbA2977"
+ end = int64(1535991025)
+ urn = [32]byte{115, 64, 224, 6, 244, 19, 91, 166, 151, 13, 67, 191, 67, 216, 141, 202, 212, 231, 168, 202, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ urnString = "0x7340e006f4135BA6970D43bf43d88DCAD4e7a8CA"
+ tabString = "50"
+ tab, _ = new(big.Int).SetString(tabString, 10)
+ rawLogJson, _ = json.Marshal(EthFlipKickLog)
+ rawLogString = string(rawLogJson)
+)
+
+var (
+ flipKickTransactionHash = "0xd11ab35cfb1ad71f790d3dd488cc1a2046080e765b150e8997aa0200947d4a9b"
+ flipKickData = "0x0000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007fa9ef6609ca7921112231f8f195138ebba2977000000000000000000000000000000000000000000000000000000005b8d5cf10000000000000000000000000000000000000000000000000000000000000032"
+ flipKickBlockHash = "0x40fcad7863ab4bef421d638b7ad6116e81577f14a62ef847b07f8527944466fd"
+ FlipKickBlockNumber = int64(10)
+)
var EthFlipKickLog = types.Log{
- Address: common.HexToAddress(FlipAddress),
- Topics: []common.Hash{common.HexToHash(shared.FlipKickSignature)},
- Data: hexutil.MustDecode(FlipKickData),
+ Address: common.HexToAddress(shared.FlipperContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash(shared.FlipKickSignature),
+ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"),
+ common.HexToHash("0x7340e006f4135ba6970d43bf43d88dcad4e7a8ca000000000000000000000000"),
+ },
+ Data: hexutil.MustDecode(flipKickData),
BlockNumber: uint64(FlipKickBlockNumber),
- TxHash: common.HexToHash(FlipKickTransactionHash),
+ TxHash: common.HexToHash(flipKickTransactionHash),
TxIndex: 0,
- BlockHash: common.HexToHash(FlipKickBlockHash),
+ BlockHash: common.HexToHash(flipKickBlockHash),
Index: 0,
Removed: false,
}
var FlipKickEntity = flip_kick.FlipKickEntity{
Id: id,
- Vat: common.HexToAddress(vat),
- Ilk: ilk,
Lot: lot,
Bid: bid,
- Guy: common.HexToAddress(guy),
Gal: common.HexToAddress(gal),
End: big.NewInt(end),
- Era: big.NewInt(era),
- Lad: common.HexToAddress(lad),
+ Urn: urn,
Tab: tab,
Raw: EthFlipKickLog,
}
var FlipKickModel = flip_kick.FlipKickModel{
Id: idString,
- Vat: vat,
- Ilk: "0x" + common.Bytes2Hex(ilk[:]),
Lot: lotString,
Bid: bidString,
- Guy: guy,
Gal: gal,
End: time.Unix(end, 0),
- Era: time.Unix(era, 0),
- Lad: lad,
+ Urn: urnString,
Tab: tabString,
Raw: rawLogString,
}
diff --git a/pkg/transformers/test_data/frob.go b/pkg/transformers/test_data/frob.go
index 19bde57f..37334f4a 100644
--- a/pkg/transformers/test_data/frob.go
+++ b/pkg/transformers/test_data/frob.go
@@ -15,15 +15,16 @@
package test_data
import (
+ "encoding/json"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/vulcanize/vulcanizedb/pkg/transformers/frob"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
"math/big"
)
var (
- TemporaryFrobAddress = "0xff3f2400f1600f3f493a9a92704a29b96795af1a"
TemporaryFrobBlockHash = common.HexToHash("0x67ae45eace52de052a0fc58598974b101733f823fc191329ace7aded9a72b84b")
TemporaryFrobBlockNumber = int64(13)
TemporaryFrobData = "0x000000000000000000000000000000000000000000000000000000000000000f0000000000000000000000000000000000000000000000000000000000000014000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000019"
@@ -39,10 +40,11 @@ var (
frobLad = [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100, 217, 34, 137, 65, 83, 190, 158, 239, 123, 114, 24, 220, 86, 93, 29, 12, 226, 160, 146}
gem, _ = big.NewInt(0).SetString("115792089237316195423570985008687907853269984665640564039457584007913129639926", 10)
ink = big.NewInt(15)
+ ilk = [32]byte{102, 97, 107, 101, 32, 105, 108, 107, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
)
var EthFrobLog = types.Log{
- Address: common.HexToAddress(TemporaryFrobAddress),
+ Address: common.HexToAddress(shared.PitContractAddress),
Topics: []common.Hash{
common.HexToHash("0xb2afa28318bcc689926b52835d844de174ef8de97e982a85c0199d584920791b"),
common.HexToHash("0x66616b6520696c6b000000000000000000000000000000000000000000000000"),
@@ -58,21 +60,26 @@ var EthFrobLog = types.Log{
}
var FrobEntity = frob.FrobEntity{
- Ilk: ilk,
- Lad: frobLad,
- Dink: dink,
- Dart: dart,
- Ink: ink,
- Art: art,
- IArt: iArt,
+ Ilk: ilk,
+ Urn: frobLad,
+ Ink: ink,
+ Art: art,
+ Dink: dink,
+ Dart: dart,
+ IArt: iArt,
+ TransactionIndex: EthFrobLog.TxIndex,
+ Raw: EthFrobLog,
}
+var rawFrobLog, _ = json.Marshal(EthFrobLog)
var FrobModel = frob.FrobModel{
- Ilk: ilk[:],
- Lad: frobLad[:],
- Dink: dink.String(),
- Dart: dart.String(),
- Ink: ink.String(),
- Art: art.String(),
- IArt: iArt.String(),
+ Ilk: ilk[:],
+ Urn: frobLad[:],
+ Ink: ink.String(),
+ Art: art.String(),
+ Dink: dink.String(),
+ Dart: dart.String(),
+ IArt: iArt.String(),
+ TransactionIndex: EthFrobLog.TxIndex,
+ Raw: rawFrobLog,
}
diff --git a/pkg/transformers/test_data/mocks/bite/converter.go b/pkg/transformers/test_data/mocks/bite/converter.go
new file mode 100644
index 00000000..8d902519
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/bite/converter.go
@@ -0,0 +1,45 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bite
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ . "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+type MockBiteConverter struct {
+ ConverterContract string
+ ConverterAbi string
+ LogsToConvert []types.Log
+ EntitiesToConvert []BiteEntity
+ ConverterError error
+}
+
+func (mbc *MockBiteConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (BiteEntity, error) {
+ mbc.ConverterContract = contractAddress
+ mbc.ConverterAbi = contractAbi
+ mbc.LogsToConvert = append(mbc.LogsToConvert, ethLog)
+ return test_data.BiteEntity, mbc.ConverterError
+}
+
+func (mbc *MockBiteConverter) ToModel(entity BiteEntity) (BiteModel, error) {
+ mbc.EntitiesToConvert = append(mbc.EntitiesToConvert, entity)
+ return test_data.BiteModel, mbc.ConverterError
+}
+func (mbc *MockBiteConverter) SetConverterError(err error) {
+ mbc.ConverterError = err
+}
diff --git a/pkg/transformers/test_data/mocks/bite/repository.go b/pkg/transformers/test_data/mocks/bite/repository.go
new file mode 100644
index 00000000..12a7e774
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/bite/repository.go
@@ -0,0 +1,55 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package bite
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+)
+
+type MockBiteRepository struct {
+ createError error
+ PassedEndingBlockNumber int64
+ PassedBiteModel bite.BiteModel
+ PassedHeaderID int64
+ PassedStartingBlockNumber int64
+ PassedTransactionIndex uint
+ missingHeaders []core.Header
+ missingHeadersErr error
+}
+
+func (repository *MockBiteRepository) SetCreateError(err error) {
+ repository.createError = err
+}
+
+func (repository *MockBiteRepository) SetMissingHeadersErr(err error) {
+ repository.missingHeadersErr = err
+}
+
+func (repository *MockBiteRepository) SetMissingHeaders(headers []core.Header) {
+ repository.missingHeaders = headers
+}
+
+func (repository *MockBiteRepository) Create(headerID int64, model bite.BiteModel) error {
+ repository.PassedHeaderID = headerID
+ repository.PassedBiteModel = model
+ return repository.createError
+}
+
+func (repository *MockBiteRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ repository.PassedStartingBlockNumber = startingBlockNumber
+ repository.PassedEndingBlockNumber = endingBlockNumber
+ return repository.missingHeaders, repository.missingHeadersErr
+}
diff --git a/pkg/transformers/test_data/mocks/dent/converter.go b/pkg/transformers/test_data/mocks/dent/converter.go
new file mode 100644
index 00000000..648946cd
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/dent/converter.go
@@ -0,0 +1,40 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+type MockDentConverter struct {
+ converterError error
+ PassedContractAddress string
+ PassedContractAbi string
+ LogsToConvert []types.Log
+}
+
+func (c *MockDentConverter) Convert(contractAddress string, contractAbi string, ethLog types.Log) (dent.DentModel, error) {
+ c.PassedContractAddress = contractAddress
+ c.PassedContractAbi = contractAbi
+ c.LogsToConvert = append(c.LogsToConvert, ethLog)
+ return test_data.DentModel, c.converterError
+}
+
+func (c *MockDentConverter) SetConverterError(err error) {
+ c.converterError = err
+}
diff --git a/pkg/transformers/test_data/mocks/dent/repository.go b/pkg/transformers/test_data/mocks/dent/repository.go
new file mode 100644
index 00000000..48dc657e
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/dent/repository.go
@@ -0,0 +1,56 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dent
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
+)
+
+type MockDentRepository struct {
+ PassedStartingBlockNumber int64
+ PassedEndingBlockNumber int64
+ PassedDentModels []dent.DentModel
+ PassedHeaderIds []int64
+ missingHeaders []core.Header
+ missingHeadersError error
+ createError error
+}
+
+func (r *MockDentRepository) Create(headerId int64, model dent.DentModel) error {
+ r.PassedHeaderIds = append(r.PassedHeaderIds, headerId)
+ r.PassedDentModels = append(r.PassedDentModels, model)
+
+ return r.createError
+}
+
+func (r *MockDentRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ r.PassedStartingBlockNumber = startingBlockNumber
+ r.PassedEndingBlockNumber = endingBlockNumber
+
+ return r.missingHeaders, r.missingHeadersError
+}
+
+func (r *MockDentRepository) SetMissingHeadersError(err error) {
+ r.missingHeadersError = err
+}
+
+func (r *MockDentRepository) SetMissingHeaders(headers []core.Header) {
+ r.missingHeaders = headers
+}
+
+func (r *MockDentRepository) SetCreateError(err error) {
+ r.createError = err
+}
diff --git a/pkg/transformers/test_data/mocks/flip_kick/converter.go b/pkg/transformers/test_data/mocks/flip_kick/converter.go
index 1a21f29d..0069ea54 100644
--- a/pkg/transformers/test_data/mocks/flip_kick/converter.go
+++ b/pkg/transformers/test_data/mocks/flip_kick/converter.go
@@ -16,6 +16,7 @@ package flip_kick
import (
"github.com/ethereum/go-ethereum/core/types"
+
"github.com/vulcanize/vulcanizedb/pkg/transformers/flip_kick"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
)
diff --git a/pkg/transformers/test_data/mocks/frob/converter.go b/pkg/transformers/test_data/mocks/frob/converter.go
index a290840e..e69a996b 100644
--- a/pkg/transformers/test_data/mocks/frob/converter.go
+++ b/pkg/transformers/test_data/mocks/frob/converter.go
@@ -16,6 +16,7 @@ package frob
import (
"github.com/ethereum/go-ethereum/core/types"
+
"github.com/vulcanize/vulcanizedb/pkg/transformers/frob"
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
)
@@ -25,21 +26,26 @@ type MockFrobConverter struct {
PassedContractABI string
PassedLog types.Log
PassedEntity frob.FrobEntity
- converterError error
+ toEntityError error
+ toModelError error
}
-func (converter *MockFrobConverter) SetConverterError(err error) {
- converter.converterError = err
+func (converter *MockFrobConverter) SetToEntityError(err error) {
+ converter.toEntityError = err
+}
+
+func (converter *MockFrobConverter) SetToModelError(err error) {
+ converter.toModelError = err
}
func (converter *MockFrobConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (frob.FrobEntity, error) {
converter.PassedContractAddress = contractAddress
converter.PassedContractABI = contractAbi
converter.PassedLog = ethLog
- return test_data.FrobEntity, converter.converterError
+ return test_data.FrobEntity, converter.toEntityError
}
-func (converter *MockFrobConverter) ToModel(frobEntity frob.FrobEntity) frob.FrobModel {
+func (converter *MockFrobConverter) ToModel(frobEntity frob.FrobEntity) (frob.FrobModel, error) {
converter.PassedEntity = frobEntity
- return test_data.FrobModel
+ return test_data.FrobModel, converter.toModelError
}
diff --git a/pkg/transformers/test_data/mocks/frob/repository.go b/pkg/transformers/test_data/mocks/frob/repository.go
index 79c37ab5..5358fda6 100644
--- a/pkg/transformers/test_data/mocks/frob/repository.go
+++ b/pkg/transformers/test_data/mocks/frob/repository.go
@@ -25,7 +25,6 @@ type MockFrobRepository struct {
PassedFrobModel frob.FrobModel
PassedHeaderID int64
PassedStartingBlockNumber int64
- PassedTransactionIndex uint
missingHeaders []core.Header
missingHeadersErr error
}
@@ -42,9 +41,8 @@ func (repository *MockFrobRepository) SetMissingHeaders(headers []core.Header) {
repository.missingHeaders = headers
}
-func (repository *MockFrobRepository) Create(headerID int64, transactionIndex uint, model frob.FrobModel) error {
+func (repository *MockFrobRepository) Create(headerID int64, model frob.FrobModel) error {
repository.PassedHeaderID = headerID
- repository.PassedTransactionIndex = transactionIndex
repository.PassedFrobModel = model
return repository.createError
}
diff --git a/pkg/transformers/test_data/mocks/pit_file/debt_ceiling/converter.go b/pkg/transformers/test_data/mocks/pit_file/debt_ceiling/converter.go
new file mode 100644
index 00000000..98aa0d79
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/pit_file/debt_ceiling/converter.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+type MockPitFileDebtCeilingConverter struct {
+ converterErr error
+ PassedLog types.Log
+}
+
+func (converter *MockPitFileDebtCeilingConverter) ToModel(ethLog types.Log) (debt_ceiling.PitFileDebtCeilingModel, error) {
+ converter.PassedLog = ethLog
+ return test_data.PitFileDebtCeilingModel, converter.converterErr
+}
+
+func (converter *MockPitFileDebtCeilingConverter) SetConverterError(e error) {
+ converter.converterErr = e
+}
diff --git a/pkg/transformers/test_data/mocks/pit_file/debt_ceiling/repository.go b/pkg/transformers/test_data/mocks/pit_file/debt_ceiling/repository.go
new file mode 100644
index 00000000..949a56e1
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/pit_file/debt_ceiling/repository.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debt_ceiling
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+)
+
+type MockPitFileDebtCeilingRepository struct {
+ createErr error
+ missingHeaders []core.Header
+ missingHeadersErr error
+ PassedStartingBlockNumber int64
+ PassedEndingBlockNumber int64
+ PassedHeaderID int64
+ PassedModel debt_ceiling.PitFileDebtCeilingModel
+}
+
+func (repository *MockPitFileDebtCeilingRepository) Create(headerID int64, model debt_ceiling.PitFileDebtCeilingModel) error {
+ repository.PassedHeaderID = headerID
+ repository.PassedModel = model
+ return repository.createErr
+}
+
+func (repository *MockPitFileDebtCeilingRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ repository.PassedStartingBlockNumber = startingBlockNumber
+ repository.PassedEndingBlockNumber = endingBlockNumber
+ return repository.missingHeaders, repository.missingHeadersErr
+}
+
+func (repository *MockPitFileDebtCeilingRepository) SetMissingHeadersErr(e error) {
+ repository.missingHeadersErr = e
+}
+
+func (repository *MockPitFileDebtCeilingRepository) SetMissingHeaders(headers []core.Header) {
+ repository.missingHeaders = headers
+}
+
+func (repository *MockPitFileDebtCeilingRepository) SetCreateError(e error) {
+ repository.createErr = e
+}
diff --git a/pkg/transformers/test_data/mocks/pit_file/ilk/converter.go b/pkg/transformers/test_data/mocks/pit_file/ilk/converter.go
new file mode 100644
index 00000000..47da762a
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/pit_file/ilk/converter.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+type MockPitFileIlkConverter struct {
+ PassedLog types.Log
+ converterError error
+}
+
+func (converter *MockPitFileIlkConverter) SetConverterError(err error) {
+ converter.converterError = err
+}
+
+func (converter *MockPitFileIlkConverter) ToModel(ethLog types.Log) (ilk.PitFileIlkModel, error) {
+ converter.PassedLog = ethLog
+ return test_data.PitFileIlkModel, converter.converterError
+}
diff --git a/pkg/transformers/test_data/mocks/pit_file/ilk/repository.go b/pkg/transformers/test_data/mocks/pit_file/ilk/repository.go
new file mode 100644
index 00000000..5b75664a
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/pit_file/ilk/repository.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ilk
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+)
+
+type MockPitFileIlkRepository struct {
+ createError error
+ PassedEndingBlockNumber int64
+ PassedModel ilk.PitFileIlkModel
+ PassedHeaderID int64
+ PassedStartingBlockNumber int64
+ missingHeaders []core.Header
+ missingHeadersErr error
+}
+
+func (repository *MockPitFileIlkRepository) SetCreateError(err error) {
+ repository.createError = err
+}
+
+func (repository *MockPitFileIlkRepository) SetMissingHeadersErr(err error) {
+ repository.missingHeadersErr = err
+}
+
+func (repository *MockPitFileIlkRepository) SetMissingHeaders(headers []core.Header) {
+ repository.missingHeaders = headers
+}
+
+func (repository *MockPitFileIlkRepository) Create(headerID int64, model ilk.PitFileIlkModel) error {
+ repository.PassedHeaderID = headerID
+ repository.PassedModel = model
+ return repository.createError
+}
+
+func (repository *MockPitFileIlkRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ repository.PassedStartingBlockNumber = startingBlockNumber
+ repository.PassedEndingBlockNumber = endingBlockNumber
+ return repository.missingHeaders, repository.missingHeadersErr
+}
diff --git a/pkg/transformers/test_data/mocks/pit_file/stability_fee/converter.go b/pkg/transformers/test_data/mocks/pit_file/stability_fee/converter.go
new file mode 100644
index 00000000..198cbecb
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/pit_file/stability_fee/converter.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+)
+
+type MockPitFileStabilityFeeConverter struct {
+ converterErr error
+ PassedLog types.Log
+}
+
+func (converter *MockPitFileStabilityFeeConverter) ToModel(ethLog types.Log) (stability_fee.PitFileStabilityFeeModel, error) {
+ converter.PassedLog = ethLog
+ return test_data.PitFileStabilityFeeModel, converter.converterErr
+}
+func (converter *MockPitFileStabilityFeeConverter) SetConverterError(e error) {
+ converter.converterErr = e
+}
diff --git a/pkg/transformers/test_data/mocks/pit_file/stability_fee/repository.go b/pkg/transformers/test_data/mocks/pit_file/stability_fee/repository.go
new file mode 100644
index 00000000..a05f6eee
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/pit_file/stability_fee/repository.go
@@ -0,0 +1,52 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package stability_fee
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
+)
+
+type MockPitFileStabilityFeeRepository struct {
+ createErr error
+ missingHeaders []core.Header
+ missingHeadersErr error
+ PassedStartingBlockNumber int64
+ PassedEndingBlockNumber int64
+ PassedHeaderID int64
+ PassedModel stability_fee.PitFileStabilityFeeModel
+}
+
+func (repository *MockPitFileStabilityFeeRepository) Create(headerID int64, model stability_fee.PitFileStabilityFeeModel) error {
+ repository.PassedModel = model
+ repository.PassedHeaderID = headerID
+ return repository.createErr
+}
+
+func (repository *MockPitFileStabilityFeeRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ repository.PassedStartingBlockNumber = startingBlockNumber
+ repository.PassedEndingBlockNumber = endingBlockNumber
+ return repository.missingHeaders, repository.missingHeadersErr
+}
+
+func (repository *MockPitFileStabilityFeeRepository) SetMissingHeadersErr(e error) {
+ repository.missingHeadersErr = e
+}
+func (repository *MockPitFileStabilityFeeRepository) SetMissingHeaders(headers []core.Header) {
+ repository.missingHeaders = headers
+}
+func (repository *MockPitFileStabilityFeeRepository) SetCreateError(e error) {
+ repository.createErr = e
+}
diff --git a/pkg/transformers/test_data/mocks/price_feeds/fetcher.go b/pkg/transformers/test_data/mocks/price_feeds/fetcher.go
index 95ca63d4..951e6aa2 100644
--- a/pkg/transformers/test_data/mocks/price_feeds/fetcher.go
+++ b/pkg/transformers/test_data/mocks/price_feeds/fetcher.go
@@ -1,3 +1,17 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package price_feeds
import (
diff --git a/pkg/transformers/test_data/mocks/price_feeds/repository.go b/pkg/transformers/test_data/mocks/price_feeds/repository.go
index cdf323e0..a689c2bb 100644
--- a/pkg/transformers/test_data/mocks/price_feeds/repository.go
+++ b/pkg/transformers/test_data/mocks/price_feeds/repository.go
@@ -1,3 +1,17 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package price_feeds
import (
diff --git a/pkg/transformers/test_data/mocks/tend/converter.go b/pkg/transformers/test_data/mocks/tend/converter.go
index 89e41e53..8f0ab2b0 100644
--- a/pkg/transformers/test_data/mocks/tend/converter.go
+++ b/pkg/transformers/test_data/mocks/tend/converter.go
@@ -25,19 +25,13 @@ type MockTendConverter struct {
ConverterContract string
ConverterAbi string
LogsToConvert []types.Log
- EntitiesToConvert []tend.TendEntity
ConverterError error
}
-func (c *MockTendConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (tend.TendEntity, error) {
+func (c *MockTendConverter) Convert(contractAddress string, contractAbi string, ethLog types.Log) (tend.TendModel, error) {
c.ConverterContract = contractAddress
c.ConverterAbi = contractAbi
c.LogsToConvert = append(c.LogsToConvert, ethLog)
- return test_data.TendEntity, c.ConverterError
-}
-
-func (c *MockTendConverter) ToModel(entity tend.TendEntity) (tend.TendModel, error) {
- c.EntitiesToConvert = append(c.EntitiesToConvert, entity)
return test_data.TendModel, c.ConverterError
}
diff --git a/pkg/transformers/test_data/mocks/vat_init/converter.go b/pkg/transformers/test_data/mocks/vat_init/converter.go
new file mode 100644
index 00000000..f90f5ac4
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/vat_init/converter.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
+)
+
+type MockVatInitConverter struct {
+ converterErr error
+ PassedLog types.Log
+}
+
+func (converter *MockVatInitConverter) ToModel(ethLog types.Log) (vat_init.VatInitModel, error) {
+ converter.PassedLog = ethLog
+ return test_data.VatInitModel, converter.converterErr
+}
+
+func (converter *MockVatInitConverter) SetConverterError(e error) {
+ converter.converterErr = e
+}
diff --git a/pkg/transformers/test_data/mocks/vat_init/repository.go b/pkg/transformers/test_data/mocks/vat_init/repository.go
new file mode 100644
index 00000000..a27c5539
--- /dev/null
+++ b/pkg/transformers/test_data/mocks/vat_init/repository.go
@@ -0,0 +1,54 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
+)
+
+type MockVatInitRepository struct {
+ createErr error
+ missingHeaders []core.Header
+ missingHeadersErr error
+ PassedStartingBlockNumber int64
+ PassedEndingBlockNumber int64
+ PassedHeaderID int64
+ PassedModel vat_init.VatInitModel
+}
+
+func (repository *MockVatInitRepository) Create(headerID int64, model vat_init.VatInitModel) error {
+ repository.PassedHeaderID = headerID
+ repository.PassedModel = model
+ return repository.createErr
+}
+
+func (repository *MockVatInitRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ repository.PassedStartingBlockNumber = startingBlockNumber
+ repository.PassedEndingBlockNumber = endingBlockNumber
+ return repository.missingHeaders, repository.missingHeadersErr
+}
+
+func (repository *MockVatInitRepository) SetMissingHeadersErr(e error) {
+ repository.missingHeadersErr = e
+}
+
+func (repository *MockVatInitRepository) SetMissingHeaders(headers []core.Header) {
+ repository.missingHeaders = headers
+}
+
+func (repository *MockVatInitRepository) SetCreateError(e error) {
+ repository.createErr = e
+}
diff --git a/pkg/transformers/test_data/pit_file.go b/pkg/transformers/test_data/pit_file.go
new file mode 100644
index 00000000..8b2daaf8
--- /dev/null
+++ b/pkg/transformers/test_data/pit_file.go
@@ -0,0 +1,105 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test_data
+
+import (
+ "encoding/json"
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+ ilk2 "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+var EthPitFileDebtCeilingLog = types.Log{
+ Address: common.HexToAddress(shared.PitContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash("0x29ae811400000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x00000000000000000000000064d922894153be9eef7b7218dc565d1d0ce2a092"),
+ common.HexToHash("0x6472697000000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000007b"),
+ },
+ Data: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000004429ae81146472697000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b"),
+ BlockNumber: 22,
+ TxHash: common.HexToHash("0xd744878a0b6655e3ba729e1019f56b563b4a16750196b8ad6104f3977db43f42"),
+ TxIndex: 333,
+ BlockHash: common.HexToHash("0xa54d9d99c315bea3dda7256a36e51773ed009a01c0859295c5382d4b83d7eeb9"),
+ Index: 0,
+ Removed: false,
+}
+
+var rawPitFileDebtCeilingLog, _ = json.Marshal(EthPitFileDebtCeilingLog)
+var PitFileDebtCeilingModel = debt_ceiling.PitFileDebtCeilingModel{
+ What: "0x64d922894153BE9EEf7b7218dc565d1D0Ce2a092",
+ Data: big.NewInt(123).String(),
+ TransactionIndex: EthPitFileDebtCeilingLog.TxIndex,
+ Raw: rawPitFileDebtCeilingLog,
+}
+
+var EthPitFileIlkLog = types.Log{
+ Address: common.HexToAddress(shared.PitContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash("0x1a0b287e00000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x0000000000000000000000000f243e26db94b5426032e6dfa6007802dea2a614"),
+ common.HexToHash("0x66616b6520696c6b000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x73706f7400000000000000000000000000000000000000000000000000000000"),
+ },
+ Data: hexutil.MustDecode("0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000641a0b287e66616b6520696c6b00000000000000000000000000000000000000000000000073706f7400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007b"),
+ BlockNumber: 11,
+ TxHash: common.HexToHash("0x1ba8125f60fa045c85b35df3983bee37db8627fbc32e3442a5cf17c85bb83f09"),
+ TxIndex: 111,
+ BlockHash: common.HexToHash("0x6dc284247c524b22b10a75ef1c9d1709a509208d04c15fa2b675a293db637d21"),
+ Index: 0,
+ Removed: false,
+}
+
+var rawPitFileIlkLog, _ = json.Marshal(EthPitFileIlkLog)
+var PitFileIlkModel = ilk2.PitFileIlkModel{
+ Ilk: "fake ilk",
+ What: "spot",
+ Data: big.NewInt(123).String(),
+ TransactionIndex: EthPitFileIlkLog.TxIndex,
+ Raw: rawPitFileIlkLog,
+}
+
+var EthPitFileStabilityFeeLog = types.Log{
+ Address: common.HexToAddress("0x6b59c42097e2Fff7cad96cb08cEeFd601081aD9c"),
+ Topics: []common.Hash{
+ common.HexToHash("0xd4e8be8300000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x00000000000000000000000064d922894153be9eef7b7218dc565d1d0ce2a092"),
+ common.HexToHash("0x6472697000000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ },
+ Data: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000044d4e8be8364726970000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
+ BlockNumber: 12,
+ TxHash: common.HexToHash("0x78cdc62316ccf8e31515d09745cc724f557569f01a557d0d09b1066bf7079fd2"),
+ TxIndex: 222,
+ BlockHash: common.HexToHash("0xe3d8e458421533170871b4033f978a3793ef10b7e33a9328a13c09e2fd90208d"),
+ Index: 0,
+ Removed: false,
+}
+
+var rawPitFileStabilityFeeLog, _ = json.Marshal(EthPitFileStabilityFeeLog)
+var PitFileStabilityFeeModel = stability_fee.PitFileStabilityFeeModel{
+ What: "drip",
+ Data: "0x64d922894153BE9EEf7b7218dc565d1D0Ce2a092",
+ TransactionIndex: EthPitFileStabilityFeeLog.TxIndex,
+ Raw: rawPitFileStabilityFeeLog,
+}
diff --git a/pkg/transformers/test_data/tend.go b/pkg/transformers/test_data/tend.go
index 40a191a6..f9f958ca 100644
--- a/pkg/transformers/test_data/tend.go
+++ b/pkg/transformers/test_data/tend.go
@@ -16,9 +16,7 @@ package test_data
import (
"encoding/json"
- "math/big"
"strconv"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
@@ -28,45 +26,41 @@ import (
"github.com/vulcanize/vulcanizedb/pkg/transformers/tend"
)
-var tendLot = big.NewInt(100)
-var tendBid = big.NewInt(50)
-var tendGuy = common.HexToAddress("0x64d922894153be9eef7b7218dc565d1d0ce2a092")
-var tic = new(big.Int).SetBytes([]byte{0})
-var tendEra = big.NewInt(1533916180)
-var RawJson, _ = json.Marshal(TendLog)
-var rawString = string(RawJson)
+var (
+ tendBidId = int64(10)
+ tendLot = "85000000000000000000"
+ tendBid = "1000000000000000000"
+ tendGuy = "0x7d7bEe5fCfD8028cf7b00876C5b1421c800561A6"
+ tendData = "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000644b43ed12000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000049b9ca9a6943400000000000000000000000000000000000000000000000000000de0b6b3a7640000"
+ tendTransactionHash = "0x7909c8793ded2b8348f5db623044fbc26bb7ab78ad5792897abdf68ddc1df63d"
+ tendBlockHash = "0xa8ea87147c0a68daeb6b1d9f8c0937ba975a650809cab80d19c969e8d0df452c"
+ TendTic = "0"
+)
-var TendLog = types.Log{
- Address: common.HexToAddress(FlipAddress),
- Topics: []common.Hash{common.HexToHash(shared.TendSignature)},
- Data: hexutil.MustDecode(TendData),
- BlockNumber: uint64(TendBlockNumber),
- TxHash: common.HexToHash(TendTransactionHash),
- TxIndex: 1,
- BlockHash: common.HexToHash(TendBlockHash),
- Index: 0,
+var TendLogNote = types.Log{
+ Address: common.StringToAddress(shared.FlipperContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash("0x4b43ed1200000000000000000000000000000000000000000000000000000000"), //abbreviated tend function signature
+ common.HexToHash("0x0000000000000000000000007d7bee5fcfd8028cf7b00876c5b1421c800561a6"), //msg caller address
+ common.HexToHash("0x000000000000000000000000000000000000000000000000000000000000000a"), //first param of the function called (i.e. flip kick id)
+ common.HexToHash("0x0000000000000000000000000000000000000000000000049b9ca9a694340000"), //second param of the function called (i.e. lot)
+ },
+ Data: hexutil.MustDecode(tendData),
+ BlockNumber: 11,
+ TxHash: common.HexToHash(tendTransactionHash),
+ TxIndex: 10,
+ BlockHash: common.HexToHash(tendBlockHash),
+ Index: 1,
Removed: false,
}
+var RawLogNoteJson, _ = json.Marshal(TendLogNote)
-var tendId = int64(1)
-var TendEntity = tend.TendEntity{
- Id: big.NewInt(tendId),
+var TendModel = tend.TendModel{
+ BidId: strconv.FormatInt(tendBidId, 10),
Lot: tendLot,
Bid: tendBid,
Guy: tendGuy,
- Tic: tic,
- Era: tendEra,
- TransactionIndex: TendLog.TxIndex,
- Raw: TendLog,
-}
-
-var TendModel = tend.TendModel{
- Id: strconv.FormatInt(tendId, 10),
- Lot: tendLot.String(),
- Bid: tendBid.String(),
- Guy: tendGuy[:],
- Tic: tic.String(),
- Era: time.Unix(tendEra.Int64(), 0),
- TransactionIndex: TendLog.TxIndex,
- Raw: rawString,
+ Tic: TendTic,
+ TransactionIndex: TendLogNote.TxIndex,
+ Raw: string(RawLogNoteJson),
}
diff --git a/pkg/transformers/test_data/vat_init.go b/pkg/transformers/test_data/vat_init.go
new file mode 100644
index 00000000..acb95086
--- /dev/null
+++ b/pkg/transformers/test_data/vat_init.go
@@ -0,0 +1,50 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package test_data
+
+import (
+ "encoding/json"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/core/types"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
+)
+
+var EthVatInitLog = types.Log{
+ Address: common.HexToAddress(shared.VatContractAddress),
+ Topics: []common.Hash{
+ common.HexToHash("0x3b66319500000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x66616b6520696c6b000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ },
+ Data: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000243b66319566616b6520696c6b000000000000000000000000000000000000000000000000"),
+ BlockNumber: 24,
+ TxHash: common.HexToHash("0xe8f39fbb7fea3621f543868f19b1114e305aff6a063a30d32835ff1012526f91"),
+ TxIndex: 7,
+ BlockHash: common.HexToHash("0xe3dd2e05bd8b92833e20ed83e2171bbc06a9ec823232eca1730a807bd8f5edc0"),
+ Index: 0,
+ Removed: false,
+}
+
+var rawVatInitLog, _ = json.Marshal(EthVatInitLog)
+var VatInitModel = vat_init.VatInitModel{
+ Ilk: "fake ilk",
+ TransactionIndex: EthVatInitLog.TxIndex,
+ Raw: rawVatInitLog,
+}
diff --git a/pkg/transformers/transformers.go b/pkg/transformers/transformers.go
index ad53de4a..6ef0888f 100644
--- a/pkg/transformers/transformers.go
+++ b/pkg/transformers/transformers.go
@@ -15,27 +15,44 @@
package transformers
import (
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/bite"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/dent"
"github.com/vulcanize/vulcanizedb/pkg/transformers/flip_kick"
"github.com/vulcanize/vulcanizedb/pkg/transformers/frob"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/debt_ceiling"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/ilk"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/pit_file/stability_fee"
"github.com/vulcanize/vulcanizedb/pkg/transformers/price_feeds"
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
"github.com/vulcanize/vulcanizedb/pkg/transformers/tend"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
)
func TransformerInitializers() []shared.TransformerInitializer {
- flipKickConfig := flip_kick.FlipKickConfig
- flipKickTransformerInitializer := flip_kick.FlipKickTransformerInitializer{Config: flipKickConfig}
- frobConfig := frob.FrobConfig
- frobTransformerInitializer := frob.FrobTransformerInitializer{Config: frobConfig}
- priceFeedConfig := price_feeds.PriceFeedConfig
- priceFeedTransformerInitializer := price_feeds.PriceFeedTransformerInitializer{Config: priceFeedConfig}
- tendConfig := tend.TendConfig
- tendTransformerInitializer := tend.TendTransformerInitializer{Config: tendConfig}
+ biteTransformerInitializer := bite.BiteTransformerInitializer{Config: bite.BiteConfig}
+ dentTransformerInitializer := dent.DentTransformerInitializer{Config: dent.DentConfig}
+ flipKickTransformerInitializer := flip_kick.FlipKickTransformerInitializer{Config: flip_kick.FlipKickConfig}
+ frobTransformerInitializer := frob.FrobTransformerInitializer{Config: frob.FrobConfig}
+ pitFileConfig := pit_file.PitFileConfig
+ pitFileDebtCeilingTransformerInitializer := debt_ceiling.PitFileDebtCeilingTransformerInitializer{Config: pitFileConfig}
+ pitFileIlkTransformerInitializer := ilk.PitFileIlkTransformerInitializer{Config: pitFileConfig}
+ pitFileStabilityFeeTransformerInitializer := stability_fee.PitFileStabilityFeeTransformerInitializer{Config: pitFileConfig}
+ priceFeedTransformerInitializer := price_feeds.PriceFeedTransformerInitializer{Config: price_feeds.PriceFeedConfig}
+ tendTransformerInitializer := tend.TendTransformerInitializer{Config: tend.TendConfig}
+ vatInitConfig := vat_init.VatInitConfig
+ vatInitTransformerInitializer := vat_init.VatInitTransformerInitializer{Config: vatInitConfig}
return []shared.TransformerInitializer{
+ biteTransformerInitializer.NewBiteTransformer,
+ dentTransformerInitializer.NewDentTransformer,
flipKickTransformerInitializer.NewFlipKickTransformer,
frobTransformerInitializer.NewFrobTransformer,
+ pitFileDebtCeilingTransformerInitializer.NewPitFileDebtCeilingTransformer,
+ pitFileIlkTransformerInitializer.NewPitFileIlkTransformer,
+ pitFileStabilityFeeTransformerInitializer.NewPitFileStabilityFeeTransformer,
priceFeedTransformerInitializer.NewPriceFeedTransformer,
tendTransformerInitializer.NewTendTransformer,
+ vatInitTransformerInitializer.NewVatInitTransformer,
}
}
diff --git a/pkg/transformers/vat_init/config.go b/pkg/transformers/vat_init/config.go
new file mode 100644
index 00000000..846032d3
--- /dev/null
+++ b/pkg/transformers/vat_init/config.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+var VatInitConfig = shared.TransformerConfig{
+ ContractAddress: shared.VatContractAddress,
+ ContractAbi: shared.VatABI,
+ Topics: []string{shared.VatInitSignature},
+ StartingBlockNumber: 0,
+ EndingBlockNumber: 100,
+}
diff --git a/pkg/transformers/vat_init/converter.go b/pkg/transformers/vat_init/converter.go
new file mode 100644
index 00000000..63ecba87
--- /dev/null
+++ b/pkg/transformers/vat_init/converter.go
@@ -0,0 +1,50 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+type Converter interface {
+ ToModel(ethLog types.Log) (VatInitModel, error)
+}
+
+type VatInitConverter struct{}
+
+func (VatInitConverter) ToModel(ethLog types.Log) (VatInitModel, error) {
+ err := verifyLog(ethLog)
+ if err != nil {
+ return VatInitModel{}, err
+ }
+ ilk := string(bytes.Trim(ethLog.Topics[1].Bytes(), "\x00"))
+ raw, err := json.Marshal(ethLog)
+ return VatInitModel{
+ Ilk: ilk,
+ TransactionIndex: ethLog.TxIndex,
+ Raw: raw,
+ }, err
+}
+
+func verifyLog(log types.Log) error {
+ if len(log.Topics) < 2 {
+ return errors.New("log missing topics")
+ }
+ return nil
+}
diff --git a/pkg/transformers/vat_init/converter_test.go b/pkg/transformers/vat_init/converter_test.go
new file mode 100644
index 00000000..78b4cb7d
--- /dev/null
+++ b/pkg/transformers/vat_init/converter_test.go
@@ -0,0 +1,44 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init_test
+
+import (
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
+)
+
+var _ = Describe("Vat init converter", func() {
+ It("returns err if log missing topics", func() {
+ converter := vat_init.VatInitConverter{}
+ badLog := types.Log{}
+
+ _, err := converter.ToModel(badLog)
+
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("converts a log to an model", func() {
+ converter := vat_init.VatInitConverter{}
+
+ model, err := converter.ToModel(test_data.EthVatInitLog)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(model).To(Equal(test_data.VatInitModel))
+ })
+})
diff --git a/pkg/transformers/vat_init/model.go b/pkg/transformers/vat_init/model.go
new file mode 100644
index 00000000..8528af13
--- /dev/null
+++ b/pkg/transformers/vat_init/model.go
@@ -0,0 +1,21 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+type VatInitModel struct {
+ Ilk string
+ TransactionIndex uint `db:"tx_idx"`
+ Raw []byte `db:"raw_log"`
+}
diff --git a/pkg/transformers/vat_init/repository.go b/pkg/transformers/vat_init/repository.go
new file mode 100644
index 00000000..c79861c9
--- /dev/null
+++ b/pkg/transformers/vat_init/repository.go
@@ -0,0 +1,62 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+import (
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+)
+
+type Repository interface {
+ Create(headerID int64, model VatInitModel) error
+ MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
+}
+
+type VatInitRepository struct {
+ db *postgres.DB
+}
+
+func NewVatInitRepository(db *postgres.DB) VatInitRepository {
+ return VatInitRepository{
+ db: db,
+ }
+}
+
+func (repository VatInitRepository) Create(headerID int64, model VatInitModel) error {
+ _, err := repository.db.Exec(
+ `INSERT into maker.vat_init (header_id, ilk, tx_idx, raw_log)
+ VALUES($1, $2, $3, $4)`,
+ headerID, model.Ilk, model.TransactionIndex, model.Raw,
+ )
+ return err
+}
+
+func (repository VatInitRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
+ var result []core.Header
+ err := repository.db.Select(
+ &result,
+ `SELECT headers.id, headers.block_number FROM headers
+ LEFT JOIN maker.vat_init on headers.id = header_id
+ WHERE header_id ISNULL
+ AND headers.block_number >= $1
+ AND headers.block_number <= $2
+ AND headers.eth_node_fingerprint = $3`,
+ startingBlockNumber,
+ endingBlockNumber,
+ repository.db.Node.ID,
+ )
+
+ return result, err
+}
diff --git a/pkg/transformers/vat_init/repository_test.go b/pkg/transformers/vat_init/repository_test.go
new file mode 100644
index 00000000..22b9173a
--- /dev/null
+++ b/pkg/transformers/vat_init/repository_test.go
@@ -0,0 +1,143 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init_test
+
+import (
+ "database/sql"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
+ "github.com/vulcanize/vulcanizedb/test_config"
+)
+
+var _ = Describe("", func() {
+ Describe("Create", func() {
+ It("adds a vat event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ vatInitRepository := vat_init.NewVatInitRepository(db)
+
+ err = vatInitRepository.Create(headerID, test_data.VatInitModel)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbVatInit vat_init.VatInitModel
+ err = db.Get(&dbVatInit, `SELECT ilk,tx_idx, raw_log FROM maker.vat_init WHERE header_id = $1`, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(dbVatInit.Ilk).To(Equal(test_data.VatInitModel.Ilk))
+ Expect(dbVatInit.TransactionIndex).To(Equal(test_data.VatInitModel.TransactionIndex))
+ Expect(dbVatInit.Raw).To(MatchJSON(test_data.VatInitModel.Raw))
+ })
+
+ It("does not duplicate vat events", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ vatInitRepository := vat_init.NewVatInitRepository(db)
+ err = vatInitRepository.Create(headerID, test_data.VatInitModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = vatInitRepository.Create(headerID, test_data.VatInitModel)
+
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
+ })
+
+ It("removes vat if corresponding header is deleted", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
+ Expect(err).NotTo(HaveOccurred())
+ vatInitRepository := vat_init.NewVatInitRepository(db)
+ err = vatInitRepository.Create(headerID, test_data.VatInitModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ _, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
+
+ Expect(err).NotTo(HaveOccurred())
+ var dbVatInit vat_init.VatInitModel
+ err = db.Get(&dbVatInit, `SELECT ilk, tx_idx, raw_log FROM maker.vat_init WHERE header_id = $1`, headerID)
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(sql.ErrNoRows))
+ })
+ })
+
+ Describe("MissingHeaders", func() {
+ It("returns headers with no associated vat event", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ headerRepository := repositories.NewHeaderRepository(db)
+ startingBlockNumber := int64(1)
+ vatInitBlockNumber := int64(2)
+ endingBlockNumber := int64(3)
+ blockNumbers := []int64{startingBlockNumber, vatInitBlockNumber, endingBlockNumber, endingBlockNumber + 1}
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ headerIDs = append(headerIDs, headerID)
+ Expect(err).NotTo(HaveOccurred())
+ }
+ vatInitRepository := vat_init.NewVatInitRepository(db)
+ err := vatInitRepository.Create(headerIDs[1], test_data.VatInitModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ headers, err := vatInitRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(headers)).To(Equal(2))
+ Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
+ })
+
+ It("only returns headers associated with the current node", func() {
+ db := test_config.NewTestDB(core.Node{})
+ test_config.CleanTestDB(db)
+ blockNumbers := []int64{1, 2, 3}
+ headerRepository := repositories.NewHeaderRepository(db)
+ dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
+ headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
+ var headerIDs []int64
+ for _, n := range blockNumbers {
+ headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ headerIDs = append(headerIDs, headerID)
+ _, err = headerRepositoryTwo.CreateOrUpdateHeader(core.Header{BlockNumber: n})
+ Expect(err).NotTo(HaveOccurred())
+ }
+ vatInitRepository := vat_init.NewVatInitRepository(db)
+ vatInitRepositoryTwo := vat_init.NewVatInitRepository(dbTwo)
+ err := vatInitRepository.Create(headerIDs[0], test_data.VatInitModel)
+ Expect(err).NotTo(HaveOccurred())
+
+ nodeOneMissingHeaders, err := vatInitRepository.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeOneMissingHeaders)).To(Equal(len(blockNumbers) - 1))
+
+ nodeTwoMissingHeaders, err := vatInitRepositoryTwo.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
+ Expect(err).NotTo(HaveOccurred())
+ Expect(len(nodeTwoMissingHeaders)).To(Equal(len(blockNumbers)))
+ })
+ })
+})
diff --git a/pkg/transformers/vat_init/transformer.go b/pkg/transformers/vat_init/transformer.go
new file mode 100644
index 00000000..e626381c
--- /dev/null
+++ b/pkg/transformers/vat_init/transformer.go
@@ -0,0 +1,71 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+)
+
+type VatInitTransformerInitializer struct {
+ Config shared.TransformerConfig
+}
+
+func (initializer VatInitTransformerInitializer) NewVatInitTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
+ converter := VatInitConverter{}
+ fetcher := shared.NewFetcher(blockChain)
+ repository := NewVatInitRepository(db)
+ return VatInitTransformer{
+ Config: initializer.Config,
+ Converter: converter,
+ Fetcher: fetcher,
+ Repository: repository,
+ }
+}
+
+type VatInitTransformer struct {
+ Config shared.TransformerConfig
+ Converter Converter
+ Fetcher shared.LogFetcher
+ Repository Repository
+}
+
+func (transformer VatInitTransformer) Execute() error {
+ missingHeaders, err := transformer.Repository.MissingHeaders(transformer.Config.StartingBlockNumber, transformer.Config.EndingBlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, header := range missingHeaders {
+ topics := [][]common.Hash{{common.HexToHash(shared.VatInitSignature)}}
+ matchingLogs, err := transformer.Fetcher.FetchLogs(VatInitConfig.ContractAddress, topics, header.BlockNumber)
+ if err != nil {
+ return err
+ }
+ for _, log := range matchingLogs {
+ model, err := transformer.Converter.ToModel(log)
+ if err != nil {
+ return err
+ }
+ err = transformer.Repository.Create(header.Id, model)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/pkg/transformers/vat_init/transformer_test.go b/pkg/transformers/vat_init/transformer_test.go
new file mode 100644
index 00000000..081a42ff
--- /dev/null
+++ b/pkg/transformers/vat_init/transformer_test.go
@@ -0,0 +1,174 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init_test
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+
+ "github.com/vulcanize/vulcanizedb/pkg/core"
+ "github.com/vulcanize/vulcanizedb/pkg/fakes"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
+ vat_init_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/vat_init"
+ "github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
+)
+
+var _ = Describe("Vat init transformer", func() {
+ It("gets missing headers for block numbers specified in config", func() {
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ transformer := vat_init.VatInitTransformer{
+ Config: vat_init.VatInitConfig,
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &vat_init_mocks.MockVatInitConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedStartingBlockNumber).To(Equal(vat_init.VatInitConfig.StartingBlockNumber))
+ Expect(repository.PassedEndingBlockNumber).To(Equal(vat_init.VatInitConfig.EndingBlockNumber))
+ })
+
+ It("returns error if repository returns error for missing headers", func() {
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ repository.SetMissingHeadersErr(fakes.FakeError)
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: &mocks.MockLogFetcher{},
+ Converter: &vat_init_mocks.MockVatInitConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("fetches logs for missing headers", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}, {BlockNumber: 2}})
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: fetcher,
+ Converter: &vat_init_mocks.MockVatInitConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(fetcher.FetchedBlocks).To(Equal([]int64{1, 2}))
+ Expect(fetcher.FetchedContractAddress).To(Equal(vat_init.VatInitConfig.ContractAddress))
+ Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(shared.VatInitSignature)}}))
+ })
+
+ It("returns error if fetcher returns error", func() {
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetcherError(fakes.FakeError)
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: fetcher,
+ Converter: &vat_init_mocks.MockVatInitConverter{},
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("converts matching logs", func() {
+ converter := &vat_init_mocks.MockVatInitConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(converter.PassedLog).To(Equal(test_data.EthVatInitLog))
+ })
+
+ It("returns error if converter returns error", func() {
+ converter := &vat_init_mocks.MockVatInitConverter{}
+ converter.SetConverterError(fakes.FakeError)
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+
+ It("persists vat init model", func() {
+ converter := &vat_init_mocks.MockVatInitConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ fakeHeader := core.Header{BlockNumber: 1, Id: 2}
+ repository.SetMissingHeaders([]core.Header{fakeHeader})
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).NotTo(HaveOccurred())
+ Expect(repository.PassedHeaderID).To(Equal(fakeHeader.Id))
+ Expect(repository.PassedModel).To(Equal(test_data.VatInitModel))
+ })
+
+ It("returns error if repository returns error for create", func() {
+ converter := &vat_init_mocks.MockVatInitConverter{}
+ fetcher := &mocks.MockLogFetcher{}
+ fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
+ repository := &vat_init_mocks.MockVatInitRepository{}
+ repository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: 2}})
+ repository.SetCreateError(fakes.FakeError)
+ transformer := vat_init.VatInitTransformer{
+ Fetcher: fetcher,
+ Converter: converter,
+ Repository: repository,
+ }
+
+ err := transformer.Execute()
+
+ Expect(err).To(HaveOccurred())
+ Expect(err).To(MatchError(fakes.FakeError))
+ })
+})
diff --git a/pkg/transformers/vat_init/vat_init_suite_test.go b/pkg/transformers/vat_init/vat_init_suite_test.go
new file mode 100644
index 00000000..9fda78dc
--- /dev/null
+++ b/pkg/transformers/vat_init/vat_init_suite_test.go
@@ -0,0 +1,27 @@
+// Copyright 2018 Vulcanize
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package vat_init_test
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo"
+ . "github.com/onsi/gomega"
+)
+
+func TestVatInit(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "VatInit Suite")
+}
diff --git a/test_config/test_config.go b/test_config/test_config.go
index e6926a82..5e14d6e5 100644
--- a/test_config/test_config.go
+++ b/test_config/test_config.go
@@ -77,6 +77,8 @@ func CleanTestDB(db *postgres.DB) {
db.MustExec("DELETE FROM headers")
db.MustExec("DELETE FROM log_filters")
db.MustExec("DELETE FROM logs")
+ db.MustExec("DELETE FROM maker.pit_file_ilk")
+ db.MustExec("DELETE FROM maker.pit_file_stability_fee")
db.MustExec("DELETE FROM receipts")
db.MustExec("DELETE FROM transactions")
db.MustExec("DELETE FROM watched_contracts")
diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md
new file mode 100644
index 00000000..1ac6a81f
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/LICENSE.md
@@ -0,0 +1,7 @@
+Copyright (c) 2014-2015, Philip Hofer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/philhofer/fwd/README.md b/vendor/github.com/philhofer/fwd/README.md
new file mode 100644
index 00000000..38349af3
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/README.md
@@ -0,0 +1,315 @@
+
+# fwd
+ import "github.com/philhofer/fwd"
+
+The `fwd` package provides a buffered reader
+and writer. Each has methods that help improve
+the encoding/decoding performance of some binary
+protocols.
+
+The `fwd.Writer` and `fwd.Reader` type provide similar
+functionality to their counterparts in `bufio`, plus
+a few extra utility methods that simplify read-ahead
+and write-ahead. I wrote this package to improve serialization
+performance for http://github.com/tinylib/msgp,
+where it provided about a 2x speedup over `bufio` for certain
+workloads. However, care must be taken to understand the semantics of the
+extra methods provided by this package, as they allow
+the user to access and manipulate the buffer memory
+directly.
+
+The extra methods for `fwd.Reader` are `Peek`, `Skip`
+and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
+will re-allocate the read buffer in order to accommodate arbitrarily
+large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
+in the stream, and uses the `io.Seeker` interface if the underlying
+stream implements it. `(*fwd.Reader).Next` returns a slice pointing
+to the next `n` bytes in the read buffer (like `Peek`), but also
+increments the read position. This allows users to process streams
+in arbitrary block sizes without having to manage appropriately-sized
+slices. Additionally, obviating the need to copy the data from the
+buffer to another location in memory can improve performance dramatically
+in CPU-bound applications.
+
+`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
+returns a slice pointing to the next `n` bytes of the writer, and increments
+the write position by the length of the returned slice. This allows users
+to write directly to the end of the buffer.
+
+
+
+
+## Constants
+``` go
+const (
+ // DefaultReaderSize is the default size of the read buffer
+ DefaultReaderSize = 2048
+)
+```
+``` go
+const (
+ // DefaultWriterSize is the
+ // default write buffer size.
+ DefaultWriterSize = 2048
+)
+```
+
+
+
+## type Reader
+``` go
+type Reader struct {
+ // contains filtered or unexported fields
+}
+```
+Reader is a buffered look-ahead reader
+
+
+
+
+
+
+
+
+
+### func NewReader
+``` go
+func NewReader(r io.Reader) *Reader
+```
+NewReader returns a new *Reader that reads from 'r'
+
+
+### func NewReaderSize
+``` go
+func NewReaderSize(r io.Reader, n int) *Reader
+```
+NewReaderSize returns a new *Reader that
+reads from 'r' and has a buffer size 'n'
+
+
+
+
+### func (\*Reader) BufferSize
+``` go
+func (r *Reader) BufferSize() int
+```
+BufferSize returns the total size of the buffer
+
+
+
+### func (\*Reader) Buffered
+``` go
+func (r *Reader) Buffered() int
+```
+Buffered returns the number of bytes currently in the buffer
+
+
+
+### func (\*Reader) Next
+``` go
+func (r *Reader) Next(n int) ([]byte, error)
+```
+Next returns the next 'n' bytes in the stream.
+Unlike Peek, Next advances the reader position.
+The returned bytes point to the same
+data as the buffer, so the slice is
+only valid until the next reader method call.
+An EOF is considered an unexpected error.
+If an the returned slice is less than the
+length asked for, an error will be returned,
+and the reader position will not be incremented.
+
+
+
+### func (\*Reader) Peek
+``` go
+func (r *Reader) Peek(n int) ([]byte, error)
+```
+Peek returns the next 'n' buffered bytes,
+reading from the underlying reader if necessary.
+It will only return a slice shorter than 'n' bytes
+if it also returns an error. Peek does not advance
+the reader. EOF errors are *not* returned as
+io.ErrUnexpectedEOF.
+
+
+
+### func (\*Reader) Read
+``` go
+func (r *Reader) Read(b []byte) (int, error)
+```
+Read implements `io.Reader`
+
+
+
+### func (\*Reader) ReadByte
+``` go
+func (r *Reader) ReadByte() (byte, error)
+```
+ReadByte implements `io.ByteReader`
+
+
+
+### func (\*Reader) ReadFull
+``` go
+func (r *Reader) ReadFull(b []byte) (int, error)
+```
+ReadFull attempts to read len(b) bytes into
+'b'. It returns the number of bytes read into
+'b', and an error if it does not return len(b).
+EOF is considered an unexpected error.
+
+
+
+### func (\*Reader) Reset
+``` go
+func (r *Reader) Reset(rd io.Reader)
+```
+Reset resets the underlying reader
+and the read buffer.
+
+
+
+### func (\*Reader) Skip
+``` go
+func (r *Reader) Skip(n int) (int, error)
+```
+Skip moves the reader forward 'n' bytes.
+Returns the number of bytes skipped and any
+errors encountered. It is analogous to Seek(n, 1).
+If the underlying reader implements io.Seeker, then
+that method will be used to skip forward.
+
+If the reader encounters
+an EOF before skipping 'n' bytes, it
+returns io.ErrUnexpectedEOF. If the
+underlying reader implements io.Seeker, then
+those rules apply instead. (Many implementations
+will not return `io.EOF` until the next call
+to Read.)
+
+
+
+### func (\*Reader) WriteTo
+``` go
+func (r *Reader) WriteTo(w io.Writer) (int64, error)
+```
+WriteTo implements `io.WriterTo`
+
+
+
+## type Writer
+``` go
+type Writer struct {
+ // contains filtered or unexported fields
+}
+```
+Writer is a buffered writer
+
+
+
+
+
+
+
+
+
+### func NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a new writer
+that writes to 'w' and has a buffer
+that is `DefaultWriterSize` bytes.
+
+
+### func NewWriterSize
+``` go
+func NewWriterSize(w io.Writer, size int) *Writer
+```
+NewWriterSize returns a new writer
+that writes to 'w' and has a buffer
+that is 'size' bytes.
+
+
+
+
+### func (\*Writer) BufferSize
+``` go
+func (w *Writer) BufferSize() int
+```
+BufferSize returns the maximum size of the buffer.
+
+
+
+### func (\*Writer) Buffered
+``` go
+func (w *Writer) Buffered() int
+```
+Buffered returns the number of buffered bytes
+in the reader.
+
+
+
+### func (\*Writer) Flush
+``` go
+func (w *Writer) Flush() error
+```
+Flush flushes any buffered bytes
+to the underlying writer.
+
+
+
+### func (\*Writer) Next
+``` go
+func (w *Writer) Next(n int) ([]byte, error)
+```
+Next returns the next 'n' free bytes
+in the write buffer, flushing the writer
+as necessary. Next will return `io.ErrShortBuffer`
+if 'n' is greater than the size of the write buffer.
+Calls to 'next' increment the write position by
+the size of the returned buffer.
+
+
+
+### func (\*Writer) ReadFrom
+``` go
+func (w *Writer) ReadFrom(r io.Reader) (int64, error)
+```
+ReadFrom implements `io.ReaderFrom`
+
+
+
+### func (\*Writer) Write
+``` go
+func (w *Writer) Write(p []byte) (int, error)
+```
+Write implements `io.Writer`
+
+
+
+### func (\*Writer) WriteByte
+``` go
+func (w *Writer) WriteByte(b byte) error
+```
+WriteByte implements `io.ByteWriter`
+
+
+
+### func (\*Writer) WriteString
+``` go
+func (w *Writer) WriteString(s string) (int, error)
+```
+WriteString is analogous to Write, but it takes a string.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go
new file mode 100644
index 00000000..75be62ab
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/reader.go
@@ -0,0 +1,383 @@
+// The `fwd` package provides a buffered reader
+// and writer. Each has methods that help improve
+// the encoding/decoding performance of some binary
+// protocols.
+//
+// The `fwd.Writer` and `fwd.Reader` type provide similar
+// functionality to their counterparts in `bufio`, plus
+// a few extra utility methods that simplify read-ahead
+// and write-ahead. I wrote this package to improve serialization
+// performance for http://github.com/tinylib/msgp,
+// where it provided about a 2x speedup over `bufio` for certain
+// workloads. However, care must be taken to understand the semantics of the
+// extra methods provided by this package, as they allow
+// the user to access and manipulate the buffer memory
+// directly.
+//
+// The extra methods for `fwd.Reader` are `Peek`, `Skip`
+// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
+// will re-allocate the read buffer in order to accommodate arbitrarily
+// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
+// in the stream, and uses the `io.Seeker` interface if the underlying
+// stream implements it. `(*fwd.Reader).Next` returns a slice pointing
+// to the next `n` bytes in the read buffer (like `Peek`), but also
+// increments the read position. This allows users to process streams
+// in arbitrary block sizes without having to manage appropriately-sized
+// slices. Additionally, obviating the need to copy the data from the
+// buffer to another location in memory can improve performance dramatically
+// in CPU-bound applications.
+//
+// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
+// returns a slice pointing to the next `n` bytes of the writer, and increments
+// the write position by the length of the returned slice. This allows users
+// to write directly to the end of the buffer.
+//
+package fwd
+
+import "io"
+
+const (
+ // DefaultReaderSize is the default size of the read buffer
+ DefaultReaderSize = 2048
+
+ // minimum read buffer; straight from bufio
+ minReaderSize = 16
+)
+
+// NewReader returns a new *Reader that reads from 'r'
+func NewReader(r io.Reader) *Reader {
+ return NewReaderSize(r, DefaultReaderSize)
+}
+
+// NewReaderSize returns a new *Reader that
+// reads from 'r' and has a buffer size 'n'
+func NewReaderSize(r io.Reader, n int) *Reader {
+ rd := &Reader{
+ r: r,
+ data: make([]byte, 0, max(minReaderSize, n)),
+ }
+ if s, ok := r.(io.Seeker); ok {
+ rd.rs = s
+ }
+ return rd
+}
+
+// Reader is a buffered look-ahead reader
+type Reader struct {
+ r io.Reader // underlying reader
+
+ // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space
+ data []byte // data
+ n int // read offset
+ state error // last read error
+
+ // if the reader past to NewReader was
+ // also an io.Seeker, this is non-nil
+ rs io.Seeker
+}
+
+// Reset resets the underlying reader
+// and the read buffer.
+func (r *Reader) Reset(rd io.Reader) {
+ r.r = rd
+ r.data = r.data[0:0]
+ r.n = 0
+ r.state = nil
+ if s, ok := rd.(io.Seeker); ok {
+ r.rs = s
+ } else {
+ r.rs = nil
+ }
+}
+
+// more() does one read on the underlying reader
+func (r *Reader) more() {
+ // move data backwards so that
+ // the read offset is 0; this way
+ // we can supply the maximum number of
+ // bytes to the reader
+ if r.n != 0 {
+ if r.n < len(r.data) {
+ r.data = r.data[:copy(r.data[0:], r.data[r.n:])]
+ } else {
+ r.data = r.data[:0]
+ }
+ r.n = 0
+ }
+ var a int
+ a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)])
+ if a == 0 && r.state == nil {
+ r.state = io.ErrNoProgress
+ return
+ } else if a > 0 && r.state == io.EOF {
+ // discard the io.EOF if we read more than 0 bytes.
+ // the next call to Read should return io.EOF again.
+ r.state = nil
+ }
+ r.data = r.data[:len(r.data)+a]
+}
+
+// pop error
+func (r *Reader) err() (e error) {
+ e, r.state = r.state, nil
+ return
+}
+
+// pop error; EOF -> io.ErrUnexpectedEOF
+func (r *Reader) noEOF() (e error) {
+ e, r.state = r.state, nil
+ if e == io.EOF {
+ e = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+// buffered bytes
+func (r *Reader) buffered() int { return len(r.data) - r.n }
+
+// Buffered returns the number of bytes currently in the buffer
+func (r *Reader) Buffered() int { return len(r.data) - r.n }
+
+// BufferSize returns the total size of the buffer
+func (r *Reader) BufferSize() int { return cap(r.data) }
+
+// Peek returns the next 'n' buffered bytes,
+// reading from the underlying reader if necessary.
+// It will only return a slice shorter than 'n' bytes
+// if it also returns an error. Peek does not advance
+// the reader. EOF errors are *not* returned as
+// io.ErrUnexpectedEOF.
+func (r *Reader) Peek(n int) ([]byte, error) {
+ // in the degenerate case,
+ // we may need to realloc
+ // (the caller asked for more
+ // bytes than the size of the buffer)
+ if cap(r.data) < n {
+ old := r.data[r.n:]
+ r.data = make([]byte, n+r.buffered())
+ r.data = r.data[:copy(r.data, old)]
+ r.n = 0
+ }
+
+ // keep filling until
+ // we hit an error or
+ // read enough bytes
+ for r.buffered() < n && r.state == nil {
+ r.more()
+ }
+
+ // we must have hit an error
+ if r.buffered() < n {
+ return r.data[r.n:], r.err()
+ }
+
+ return r.data[r.n : r.n+n], nil
+}
+
+// Skip moves the reader forward 'n' bytes.
+// Returns the number of bytes skipped and any
+// errors encountered. It is analogous to Seek(n, 1).
+// If the underlying reader implements io.Seeker, then
+// that method will be used to skip forward.
+//
+// If the reader encounters
+// an EOF before skipping 'n' bytes, it
+// returns io.ErrUnexpectedEOF. If the
+// underlying reader implements io.Seeker, then
+// those rules apply instead. (Many implementations
+// will not return `io.EOF` until the next call
+// to Read.)
+func (r *Reader) Skip(n int) (int, error) {
+
+ // fast path
+ if r.buffered() >= n {
+ r.n += n
+ return n, nil
+ }
+
+ // use seeker implementation
+ // if we can
+ if r.rs != nil {
+ return r.skipSeek(n)
+ }
+
+ // loop on filling
+ // and then erasing
+ o := n
+ for r.buffered() < n && r.state == nil {
+ r.more()
+ // we can skip forward
+ // up to r.buffered() bytes
+ step := min(r.buffered(), n)
+ r.n += step
+ n -= step
+ }
+ // at this point, n should be
+ // 0 if everything went smoothly
+ return o - n, r.noEOF()
+}
+
+// Next returns the next 'n' bytes in the stream.
+// Unlike Peek, Next advances the reader position.
+// The returned bytes point to the same
+// data as the buffer, so the slice is
+// only valid until the next reader method call.
+// An EOF is considered an unexpected error.
+// If an the returned slice is less than the
+// length asked for, an error will be returned,
+// and the reader position will not be incremented.
+func (r *Reader) Next(n int) ([]byte, error) {
+
+ // in case the buffer is too small
+ if cap(r.data) < n {
+ old := r.data[r.n:]
+ r.data = make([]byte, n+r.buffered())
+ r.data = r.data[:copy(r.data, old)]
+ r.n = 0
+ }
+
+ // fill at least 'n' bytes
+ for r.buffered() < n && r.state == nil {
+ r.more()
+ }
+
+ if r.buffered() < n {
+ return r.data[r.n:], r.noEOF()
+ }
+ out := r.data[r.n : r.n+n]
+ r.n += n
+ return out, nil
+}
+
+// skipSeek uses the io.Seeker to seek forward.
+// only call this function when n > r.buffered()
+func (r *Reader) skipSeek(n int) (int, error) {
+ o := r.buffered()
+ // first, clear buffer
+ n -= o
+ r.n = 0
+ r.data = r.data[:0]
+
+ // then seek forward remaning bytes
+ i, err := r.rs.Seek(int64(n), 1)
+ return int(i) + o, err
+}
+
+// Read implements `io.Reader`
+func (r *Reader) Read(b []byte) (int, error) {
+ // if we have data in the buffer, just
+ // return that.
+ if r.buffered() != 0 {
+ x := copy(b, r.data[r.n:])
+ r.n += x
+ return x, nil
+ }
+ var n int
+ // we have no buffered data; determine
+ // whether or not to buffer or call
+ // the underlying reader directly
+ if len(b) >= cap(r.data) {
+ n, r.state = r.r.Read(b)
+ } else {
+ r.more()
+ n = copy(b, r.data)
+ r.n = n
+ }
+ if n == 0 {
+ return 0, r.err()
+ }
+ return n, nil
+}
+
+// ReadFull attempts to read len(b) bytes into
+// 'b'. It returns the number of bytes read into
+// 'b', and an error if it does not return len(b).
+// EOF is considered an unexpected error.
+func (r *Reader) ReadFull(b []byte) (int, error) {
+ var n int // read into b
+ var nn int // scratch
+ l := len(b)
+ // either read buffered data,
+ // or read directly for the underlying
+ // buffer, or fetch more buffered data.
+ for n < l && r.state == nil {
+ if r.buffered() != 0 {
+ nn = copy(b[n:], r.data[r.n:])
+ n += nn
+ r.n += nn
+ } else if l-n > cap(r.data) {
+ nn, r.state = r.r.Read(b[n:])
+ n += nn
+ } else {
+ r.more()
+ }
+ }
+ if n < l {
+ return n, r.noEOF()
+ }
+ return n, nil
+}
+
+// ReadByte implements `io.ByteReader`
+func (r *Reader) ReadByte() (byte, error) {
+ for r.buffered() < 1 && r.state == nil {
+ r.more()
+ }
+ if r.buffered() < 1 {
+ return 0, r.err()
+ }
+ b := r.data[r.n]
+ r.n++
+ return b, nil
+}
+
+// WriteTo implements `io.WriterTo`
+func (r *Reader) WriteTo(w io.Writer) (int64, error) {
+ var (
+ i int64
+ ii int
+ err error
+ )
+ // first, clear buffer
+ if r.buffered() > 0 {
+ ii, err = w.Write(r.data[r.n:])
+ i += int64(ii)
+ if err != nil {
+ return i, err
+ }
+ r.data = r.data[0:0]
+ r.n = 0
+ }
+ for r.state == nil {
+ // here we just do
+ // 1:1 reads and writes
+ r.more()
+ if r.buffered() > 0 {
+ ii, err = w.Write(r.data)
+ i += int64(ii)
+ if err != nil {
+ return i, err
+ }
+ r.data = r.data[0:0]
+ r.n = 0
+ }
+ }
+ if r.state != io.EOF {
+ return i, r.err()
+ }
+ return i, nil
+}
+
+func min(a int, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a int, b int) int {
+ if a < b {
+ return b
+ }
+ return a
+}
diff --git a/vendor/github.com/philhofer/fwd/reader_test.go b/vendor/github.com/philhofer/fwd/reader_test.go
new file mode 100644
index 00000000..e96303ad
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/reader_test.go
@@ -0,0 +1,398 @@
+package fwd
+
+import (
+ "bytes"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "testing"
+ "unsafe"
+)
+
+// partialReader reads into only
+// part of the supplied byte slice
+// to the underlying reader
+type partialReader struct {
+ r io.Reader
+}
+
+func (p partialReader) Read(b []byte) (int, error) {
+ n := max(1, rand.Intn(len(b)))
+ return p.r.Read(b[:n])
+}
+
+func randomBts(sz int) []byte {
+ o := make([]byte, sz)
+ for i := 0; i < len(o); i += 8 {
+ j := (*int64)(unsafe.Pointer(&o[i]))
+ *j = rand.Int63()
+ }
+ return o
+}
+
+func TestRead(t *testing.T) {
+ bts := randomBts(512)
+
+ // make the buffer much
+ // smaller than the underlying
+ // bytes to incur multiple fills
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 128)
+
+ if rd.BufferSize() != cap(rd.data) {
+ t.Errorf("BufferSize() returned %d; should return %d", rd.BufferSize(), cap(rd.data))
+ }
+
+ // starting Buffered() should be 0
+ if rd.Buffered() != 0 {
+ t.Errorf("Buffered() should return 0 at initialization; got %d", rd.Buffered())
+ }
+
+ some := make([]byte, 32)
+ n, err := rd.Read(some)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n == 0 {
+ t.Fatal("read 0 bytes w/ a non-nil error!")
+ }
+ some = some[:n]
+
+ more := make([]byte, 64)
+ j, err := rd.Read(more)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if j == 0 {
+ t.Fatal("read 0 bytes w/ a non-nil error")
+ }
+ more = more[:j]
+
+ out, err := ioutil.ReadAll(rd)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ all := append(some, more...)
+ all = append(all, out...)
+
+ if !bytes.Equal(bts, all) {
+ t.Errorf("bytes not equal; %d bytes in and %d bytes out", len(bts), len(out))
+ }
+
+ // test filling out of the underlying reader
+ big := randomBts(1 << 21)
+ rd = NewReaderSize(partialReader{bytes.NewReader(big)}, 2048)
+ buf := make([]byte, 3100)
+
+ n, err = rd.ReadFull(buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 3100 {
+ t.Errorf("expected 3100 bytes read by ReadFull; got %d", n)
+ }
+ if !bytes.Equal(buf[:n], big[:n]) {
+ t.Error("data parity")
+ }
+ rest := make([]byte, (1<<21)-3100)
+ n, err = io.ReadFull(rd, rest)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != len(rest) {
+ t.Errorf("expected %d bytes read by io.ReadFull; got %d", len(rest), n)
+ }
+ if !bytes.Equal(append(buf, rest...), big) {
+ t.Fatal("data parity")
+ }
+}
+
+func TestReadByte(t *testing.T) {
+ bts := randomBts(512)
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 98)
+
+ var (
+ err error
+ i int
+ b byte
+ )
+
+ // scan through the whole
+ // array byte-by-byte
+ for err != io.EOF {
+ b, err = rd.ReadByte()
+ if err == nil {
+ if b != bts[i] {
+ t.Fatalf("offset %d: %d in; %d out", i, b, bts[i])
+ }
+ }
+ i++
+ }
+ if err != io.EOF {
+ t.Fatal(err)
+ }
+}
+
+func TestSkipNoSeek(t *testing.T) {
+ bts := randomBts(1024)
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
+
+ n, err := rd.Skip(512)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 512 {
+ t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512)
+ }
+
+ var b byte
+ b, err = rd.ReadByte()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if b != bts[512] {
+ t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b)
+ }
+
+ n, err = rd.Skip(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 10 {
+ t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10)
+ }
+
+ // now try to skip past the end
+ rd = NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
+
+ n, err = rd.Skip(2000)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("expected error %q; got %q", io.EOF, err)
+ }
+ if n != 1024 {
+ t.Fatalf("expected to skip only 1024 bytes; skipped %d", n)
+ }
+}
+
+func TestSkipSeek(t *testing.T) {
+ bts := randomBts(1024)
+
+ // bytes.Reader implements io.Seeker
+ rd := NewReaderSize(bytes.NewReader(bts), 200)
+
+ n, err := rd.Skip(512)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 512 {
+ t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512)
+ }
+
+ var b byte
+ b, err = rd.ReadByte()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if b != bts[512] {
+ t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b)
+ }
+
+ n, err = rd.Skip(10)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 10 {
+ t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10)
+ }
+
+ // now try to skip past the end
+ rd.Reset(bytes.NewReader(bts))
+
+ // because of how bytes.Reader
+ // implements Seek, this should
+ // return (2000, nil)
+ n, err = rd.Skip(2000)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 2000 {
+ t.Fatalf("should have returned %d bytes; returned %d", 2000, n)
+ }
+
+ // the next call to Read()
+ // should return io.EOF
+ n, err = rd.Read([]byte{0, 0, 0})
+ if err != io.EOF {
+ t.Errorf("expected %q; got %q", io.EOF, err)
+ }
+ if n != 0 {
+ t.Errorf("expected 0 bytes read; got %d", n)
+ }
+
+}
+
+func TestPeek(t *testing.T) {
+ bts := randomBts(1024)
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
+
+ // first, a peek < buffer size
+ var (
+ peek []byte
+ err error
+ )
+ peek, err = rd.Peek(100)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(peek) != 100 {
+ t.Fatalf("asked for %d bytes; got %d", 100, len(peek))
+ }
+ if !bytes.Equal(peek, bts[:100]) {
+ t.Fatal("peeked bytes not equal")
+ }
+
+ // now, a peek > buffer size
+ peek, err = rd.Peek(256)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(peek) != 256 {
+ t.Fatalf("asked for %d bytes; got %d", 100, len(peek))
+ }
+ if !bytes.Equal(peek, bts[:256]) {
+ t.Fatal("peeked bytes not equal")
+ }
+
+ // now try to peek past EOF
+ peek, err = rd.Peek(2048)
+ if err != io.EOF {
+ t.Fatalf("expected error %q; got %q", io.EOF, err)
+ }
+ if len(peek) != 1024 {
+ t.Fatalf("expected %d bytes peek-able; got %d", 1024, len(peek))
+ }
+}
+
+func TestNext(t *testing.T) {
+ size := 1024
+ bts := randomBts(size)
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
+
+ chunksize := 256
+ chunks := size / chunksize
+
+ for i := 0; i < chunks; i++ {
+ out, err := rd.Next(chunksize)
+ if err != nil {
+ t.Fatal(err)
+ }
+ start := chunksize * i
+ if !bytes.Equal(bts[start:start+chunksize], out) {
+ t.Fatalf("chunk %d: chunks not equal", i+1)
+ }
+ }
+}
+
+func TestWriteTo(t *testing.T) {
+ bts := randomBts(2048)
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
+
+ // cause the buffer
+ // to fill a little, just
+ // to complicate things
+ rd.Peek(25)
+
+ var out bytes.Buffer
+ n, err := rd.WriteTo(&out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 2048 {
+ t.Fatalf("should have written %d bytes; wrote %d", 2048, n)
+ }
+ if !bytes.Equal(out.Bytes(), bts) {
+ t.Fatal("bytes not equal")
+ }
+}
+
+func TestReadFull(t *testing.T) {
+ bts := randomBts(1024)
+ rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 256)
+
+ // try to ReadFull() the whole thing
+ out := make([]byte, 1024)
+ n, err := rd.ReadFull(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != 1024 {
+ t.Fatalf("expected to read %d bytes; read %d", 1024, n)
+ }
+ if !bytes.Equal(bts, out) {
+ t.Fatal("bytes not equal")
+ }
+
+ // we've read everything; this should EOF
+ n, err = rd.Read(out)
+ if err != io.EOF {
+ t.Fatalf("expected %q; got %q", io.EOF, err)
+ }
+
+ rd.Reset(partialReader{bytes.NewReader(bts)})
+
+ // now try to read *past* EOF
+ out = make([]byte, 1500)
+ n, err = rd.ReadFull(out)
+ if err != io.ErrUnexpectedEOF {
+ t.Fatalf("expected error %q; got %q", io.EOF, err)
+ }
+ if n != 1024 {
+ t.Fatalf("expected to read %d bytes; read %d", 1024, n)
+ }
+}
+
+type readCounter struct {
+ r io.Reader
+ count int
+}
+
+func (r *readCounter) Read(p []byte) (int, error) {
+ r.count++
+ return r.r.Read(p)
+}
+
+func TestReadFullPerf(t *testing.T) {
+ const size = 1 << 22
+ data := randomBts(size)
+
+ c := readCounter{
+ r: &partialReader{
+ r: bytes.NewReader(data),
+ },
+ }
+
+ r := NewReader(&c)
+
+ const segments = 4
+ out := make([]byte, size/segments)
+
+ for i := 0; i < segments; i++ {
+ // force an unaligned read
+ _, err := r.Peek(5)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ n, err := r.ReadFull(out)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != size/segments {
+ t.Fatalf("read %d bytes, not %d", n, size/segments)
+ }
+ }
+
+ t.Logf("called Read() on the underlying reader %d times to fill %d buffers", c.count, size/r.BufferSize())
+}
diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go
new file mode 100644
index 00000000..2dc392a9
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer.go
@@ -0,0 +1,224 @@
+package fwd
+
+import "io"
+
+const (
+ // DefaultWriterSize is the
+ // default write buffer size.
+ DefaultWriterSize = 2048
+
+ minWriterSize = minReaderSize
+)
+
+// Writer is a buffered writer
+type Writer struct {
+ w io.Writer // writer
+ buf []byte // 0:len(buf) is bufered data
+}
+
+// NewWriter returns a new writer
+// that writes to 'w' and has a buffer
+// that is `DefaultWriterSize` bytes.
+func NewWriter(w io.Writer) *Writer {
+ if wr, ok := w.(*Writer); ok {
+ return wr
+ }
+ return &Writer{
+ w: w,
+ buf: make([]byte, 0, DefaultWriterSize),
+ }
+}
+
+// NewWriterSize returns a new writer
+// that writes to 'w' and has a buffer
+// that is 'size' bytes.
+func NewWriterSize(w io.Writer, size int) *Writer {
+ if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size {
+ return wr
+ }
+ return &Writer{
+ w: w,
+ buf: make([]byte, 0, max(size, minWriterSize)),
+ }
+}
+
+// Buffered returns the number of buffered bytes
+// in the reader.
+func (w *Writer) Buffered() int { return len(w.buf) }
+
+// BufferSize returns the maximum size of the buffer.
+func (w *Writer) BufferSize() int { return cap(w.buf) }
+
+// Flush flushes any buffered bytes
+// to the underlying writer.
+func (w *Writer) Flush() error {
+ l := len(w.buf)
+ if l > 0 {
+ n, err := w.w.Write(w.buf)
+
+ // if we didn't write the whole
+ // thing, copy the unwritten
+ // bytes to the beginnning of the
+ // buffer.
+ if n < l && n > 0 {
+ w.pushback(n)
+ if err == nil {
+ err = io.ErrShortWrite
+ }
+ }
+ if err != nil {
+ return err
+ }
+ w.buf = w.buf[:0]
+ return nil
+ }
+ return nil
+}
+
+// Write implements `io.Writer`
+func (w *Writer) Write(p []byte) (int, error) {
+ c, l, ln := cap(w.buf), len(w.buf), len(p)
+ avail := c - l
+
+ // requires flush
+ if avail < ln {
+ if err := w.Flush(); err != nil {
+ return 0, err
+ }
+ l = len(w.buf)
+ }
+ // too big to fit in buffer;
+ // write directly to w.w
+ if c < ln {
+ return w.w.Write(p)
+ }
+
+ // grow buf slice; copy; return
+ w.buf = w.buf[:l+ln]
+ return copy(w.buf[l:], p), nil
+}
+
+// WriteString is analogous to Write, but it takes a string.
+func (w *Writer) WriteString(s string) (int, error) {
+ c, l, ln := cap(w.buf), len(w.buf), len(s)
+ avail := c - l
+
+ // requires flush
+ if avail < ln {
+ if err := w.Flush(); err != nil {
+ return 0, err
+ }
+ l = len(w.buf)
+ }
+ // too big to fit in buffer;
+ // write directly to w.w
+ //
+ // yes, this is unsafe. *but*
+ // io.Writer is not allowed
+ // to mutate its input or
+ // maintain a reference to it,
+ // per the spec in package io.
+ //
+ // plus, if the string is really
+ // too big to fit in the buffer, then
+ // creating a copy to write it is
+ // expensive (and, strictly speaking,
+ // unnecessary)
+ if c < ln {
+ return w.w.Write(unsafestr(s))
+ }
+
+ // grow buf slice; copy; return
+ w.buf = w.buf[:l+ln]
+ return copy(w.buf[l:], s), nil
+}
+
+// WriteByte implements `io.ByteWriter`
+func (w *Writer) WriteByte(b byte) error {
+ if len(w.buf) == cap(w.buf) {
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ }
+ w.buf = append(w.buf, b)
+ return nil
+}
+
+// Next returns the next 'n' free bytes
+// in the write buffer, flushing the writer
+// as necessary. Next will return `io.ErrShortBuffer`
+// if 'n' is greater than the size of the write buffer.
+// Calls to 'next' increment the write position by
+// the size of the returned buffer.
+func (w *Writer) Next(n int) ([]byte, error) {
+ c, l := cap(w.buf), len(w.buf)
+ if n > c {
+ return nil, io.ErrShortBuffer
+ }
+ avail := c - l
+ if avail < n {
+ if err := w.Flush(); err != nil {
+ return nil, err
+ }
+ l = len(w.buf)
+ }
+ w.buf = w.buf[:l+n]
+ return w.buf[l:], nil
+}
+
+// take the bytes from w.buf[n:len(w.buf)]
+// and put them at the beginning of w.buf,
+// and resize to the length of the copied segment.
+func (w *Writer) pushback(n int) {
+ w.buf = w.buf[:copy(w.buf, w.buf[n:])]
+}
+
+// ReadFrom implements `io.ReaderFrom`
+func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
+ // anticipatory flush
+ if err := w.Flush(); err != nil {
+ return 0, err
+ }
+
+ w.buf = w.buf[0:cap(w.buf)] // expand buffer
+
+ var nn int64 // written
+ var err error // error
+ var x int // read
+
+ // 1:1 reads and writes
+ for err == nil {
+ x, err = r.Read(w.buf)
+ if x > 0 {
+ n, werr := w.w.Write(w.buf[:x])
+ nn += int64(n)
+
+ if err != nil {
+ if n < x && n > 0 {
+ w.pushback(n - x)
+ }
+ return nn, werr
+ }
+ if n < x {
+ w.pushback(n - x)
+ return nn, io.ErrShortWrite
+ }
+ } else if err == nil {
+ err = io.ErrNoProgress
+ break
+ }
+ }
+ if err != io.EOF {
+ return nn, err
+ }
+
+ // we only clear here
+ // because we are sure
+ // the writes have
+ // succeeded. otherwise,
+ // we retain the data in case
+ // future writes succeed.
+ w.buf = w.buf[0:0]
+
+ return nn, nil
+}
diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go
new file mode 100644
index 00000000..e367f393
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer_appengine.go
@@ -0,0 +1,5 @@
+// +build appengine
+
+package fwd
+
+func unsafestr(s string) []byte { return []byte(s) }
diff --git a/vendor/github.com/philhofer/fwd/writer_test.go b/vendor/github.com/philhofer/fwd/writer_test.go
new file mode 100644
index 00000000..3dcf3a5b
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer_test.go
@@ -0,0 +1,239 @@
+package fwd
+
+import (
+ "bytes"
+ "io"
+ "math/rand"
+ "testing"
+)
+
+type chunkedWriter struct {
+ w *Writer
+}
+
+// writes 'p' in randomly-sized chunks
+func (c chunkedWriter) Write(p []byte) (int, error) {
+ l := len(p)
+ n := 0
+ for n < l {
+ amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1
+ nn, err := c.w.Write(p[n : n+amt]) //
+ n += nn
+ if err == nil && nn < amt {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// analogous to Write(), but w/ str
+func (c chunkedWriter) WriteString(s string) (int, error) {
+ l := len(s)
+ n := 0
+ for n < l {
+ amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1
+ nn, err := c.w.WriteString(s[n : n+amt]) //
+ n += nn
+ if err == nil && nn < amt {
+ err = io.ErrShortWrite
+ }
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// writes via random calls to Next()
+type nextWriter struct {
+ wr *Writer
+}
+
+func (c nextWriter) Write(p []byte) (int, error) {
+ l := len(p)
+ n := 0
+ for n < l {
+ amt := max(rand.Intn(l-n), 1) // at least 1 byte
+ fwd, err := c.wr.Next(amt) // get next (amt) bytes
+ if err != nil {
+
+ // this may happen occasionally
+ if err == io.ErrShortBuffer {
+ if cap(c.wr.buf) >= amt {
+ panic("bad io.ErrShortBuffer")
+ }
+ continue
+ }
+
+ return n, err
+ }
+ if len(fwd) != amt {
+ panic("bad Next() len")
+ }
+ n += copy(fwd, p[n:])
+ }
+ return n, nil
+}
+
+func TestWrite(t *testing.T) {
+ nbts := 4096
+ bts := randomBts(nbts)
+ var buf bytes.Buffer
+ wr := NewWriterSize(&buf, 512)
+
+ if wr.BufferSize() != 512 {
+ t.Fatalf("expected BufferSize() to be %d; found %d", 512, wr.BufferSize())
+ }
+
+ cwr := chunkedWriter{wr}
+ nb, err := cwr.Write(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nb != nbts {
+ t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if wr.Buffered() != 0 {
+ t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered())
+ }
+
+ if buf.Len() != nbts {
+ t.Fatalf("wrote %d bytes, but buffer is %d bytes long", nbts, buf.Len())
+ }
+ if !bytes.Equal(bts, buf.Bytes()) {
+ t.Fatal("buf.Bytes() is not the same as the input bytes")
+ }
+}
+
+func TestWriteString(t *testing.T) {
+ nbts := 3998
+ str := string(randomBts(nbts))
+ var buf bytes.Buffer
+ wr := NewWriterSize(&buf, 1137)
+
+ if wr.BufferSize() != 1137 {
+ t.Fatalf("expected BufferSize() to return %d; returned %d", 1137, wr.BufferSize())
+ }
+
+ cwr := chunkedWriter{wr}
+ nb, err := cwr.WriteString(str)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nb != nbts {
+ t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb)
+ }
+
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if wr.Buffered() != 0 {
+ t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered())
+ }
+
+ if buf.Len() != nbts {
+ t.Fatalf("wrote %d bytes, buf buffer is %d bytes long", nbts, buf.Len())
+ }
+ if buf.String() != str {
+ t.Fatal("buf.String() is not the same as input string")
+ }
+}
+
+func TestWriteByte(t *testing.T) {
+ nbts := 3200
+ bts := randomBts(nbts)
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+
+ if wr.BufferSize() != DefaultWriterSize {
+ t.Fatalf("expected BufferSize() to return %d; returned %d", DefaultWriterSize, wr.BufferSize())
+ }
+
+ // write byte-by-byte
+ for _, b := range bts {
+ if err := wr.WriteByte(b); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ err := wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if buf.Len() != nbts {
+ t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
+ }
+
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Fatal("buf.Bytes() and input are not equal")
+ }
+}
+
+func TestWriterNext(t *testing.T) {
+ nbts := 1871
+ bts := randomBts(nbts)
+ var buf bytes.Buffer
+ wr := NewWriterSize(&buf, 500)
+ nwr := nextWriter{wr}
+
+ nb, err := nwr.Write(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if nb != nbts {
+ t.Fatalf("expected to write %d bytes; wrote %d", nbts, nb)
+ }
+
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if buf.Len() != nbts {
+ t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
+ }
+
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Fatal("buf.Bytes() and input are not equal")
+ }
+}
+
+func TestReadFrom(t *testing.T) {
+ nbts := 2139
+ bts := randomBts(nbts)
+ var buf bytes.Buffer
+ wr := NewWriterSize(&buf, 987)
+
+ rd := partialReader{bytes.NewReader(bts)}
+
+ nb, err := wr.ReadFrom(rd)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nb != int64(nbts) {
+ t.Fatalf("expeted to write %d bytes; wrote %d", nbts, nb)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if buf.Len() != nbts {
+ t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
+ }
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Fatal("buf.Bytes() and input are not equal")
+ }
+
+}
diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go
new file mode 100644
index 00000000..a0bf453b
--- /dev/null
+++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go
@@ -0,0 +1,18 @@
+// +build !appengine
+
+package fwd
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// unsafe cast string as []byte
+func unsafestr(b string) []byte {
+ l := len(b)
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Len: l,
+ Cap: l,
+ Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data,
+ }))
+}
diff --git a/vendor/github.com/tinylib/msgp/.gitignore b/vendor/github.com/tinylib/msgp/.gitignore
new file mode 100644
index 00000000..17f1ccdc
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/.gitignore
@@ -0,0 +1,7 @@
+_generated/generated.go
+_generated/generated_test.go
+_generated/*_gen.go
+_generated/*_gen_test.go
+msgp/defgen_test.go
+msgp/cover.out
+*~
\ No newline at end of file
diff --git a/vendor/github.com/tinylib/msgp/.travis.yml b/vendor/github.com/tinylib/msgp/.travis.yml
new file mode 100644
index 00000000..b9c6a1b6
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+ - 1.7
+ - tip
+
+env:
+ - GIMME_ARCH=amd64
+ - GIMME_ARCH=386
+
+script: "make travis"
diff --git a/vendor/github.com/tinylib/msgp/LICENSE b/vendor/github.com/tinylib/msgp/LICENSE
new file mode 100644
index 00000000..14d60424
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/LICENSE
@@ -0,0 +1,8 @@
+Copyright (c) 2014 Philip Hofer
+Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/tinylib/msgp/Makefile b/vendor/github.com/tinylib/msgp/Makefile
new file mode 100644
index 00000000..81b8b126
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/Makefile
@@ -0,0 +1,55 @@
+
+# NOTE: This Makefile is only necessary if you
+# plan on developing the msgp tool and library.
+# Installation can still be performed with a
+# normal `go install`.
+
+# generated integration test files
+GGEN = ./_generated/generated.go ./_generated/generated_test.go
+# generated unit test files
+MGEN = ./msgp/defgen_test.go
+
+SHELL := /bin/bash
+
+BIN = $(GOBIN)/msgp
+
+.PHONY: clean wipe install get-deps bench all
+
+$(BIN): */*.go
+ @go install ./...
+
+install: $(BIN)
+
+$(GGEN): ./_generated/def.go
+ go generate ./_generated
+
+$(MGEN): ./msgp/defs_test.go
+ go generate ./msgp
+
+test: all
+ go test -v ./msgp
+ go test -v ./_generated
+
+bench: all
+ go test -bench . ./msgp
+ go test -bench . ./_generated
+
+clean:
+ $(RM) $(GGEN) $(MGEN)
+
+wipe: clean
+ $(RM) $(BIN)
+
+get-deps:
+ go get -d -t ./...
+
+all: install $(GGEN) $(MGEN)
+
+# travis CI enters here
+travis:
+ go get -d -t ./...
+ go build -o "$${GOPATH%%:*}/bin/msgp" .
+ go generate ./msgp
+ go generate ./_generated
+ go test ./msgp
+ go test ./_generated
diff --git a/vendor/github.com/tinylib/msgp/README.md b/vendor/github.com/tinylib/msgp/README.md
new file mode 100644
index 00000000..1328ccaf
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/README.md
@@ -0,0 +1,102 @@
+MessagePack Code Generator [![Build Status](https://travis-ci.org/tinylib/msgp.svg?branch=master)](https://travis-ci.org/tinylib/msgp)
+=======
+
+This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
+
+### Why?
+
+- Use Go as your schema language
+- Performance
+- [JSON interop](http://godoc.org/github.com/tinylib/msgp/msgp#CopyToJSON)
+- [User-defined extensions](http://github.com/tinylib/msgp/wiki/Using-Extensions)
+- Type safety
+- Encoding flexibility
+
+### Quickstart
+
+In a source file, include the following directive:
+
+```go
+//go:generate msgp
+```
+
+The `msgp` command will generate serialization methods for all exported type declarations in the file.
+
+You can [read more about the code generation options here](http://github.com/tinylib/msgp/wiki/Using-the-Code-Generator).
+
+### Use
+
+Field names can be set in much the same way as the `encoding/json` package. For example:
+
+```go
+type Person struct {
+ Name string `msg:"name"`
+ Address string `msg:"address"`
+ Age int `msg:"age"`
+ Hidden string `msg:"-"` // this field is ignored
+ unexported bool // this field is also ignored
+}
+```
+
+By default, the code generator will satisfy `msgp.Sizer`, `msgp.Encodable`, `msgp.Decodable`,
+`msgp.Marshaler`, and `msgp.Unmarshaler`. Carefully-designed applications can use these methods to do
+marshalling/unmarshalling with zero heap allocations.
+
+While `msgp.Marshaler` and `msgp.Unmarshaler` are quite similar to the standard library's
+`json.Marshaler` and `json.Unmarshaler`, `msgp.Encodable` and `msgp.Decodable` are useful for
+stream serialization. (`*msgp.Writer` and `*msgp.Reader` are essentially protocol-aware versions
+of `*bufio.Writer` and `*bufio.Reader`, respectively.)
+
+### Features
+
+ - Extremely fast generated code
+ - Test and benchmark generation
+ - JSON interoperability (see `msgp.CopyToJSON() and msgp.UnmarshalAsJSON()`)
+ - Support for complex type declarations
+ - Native support for Go's `time.Time`, `complex64`, and `complex128` types
+ - Generation of both `[]byte`-oriented and `io.Reader/io.Writer`-oriented methods
+ - Support for arbitrary type system extensions
+ - [Preprocessor directives](http://github.com/tinylib/msgp/wiki/Preprocessor-Directives)
+ - File-based dependency model means fast codegen regardless of source tree size.
+
+Consider the following:
+```go
+const Eight = 8
+type MyInt int
+type Data []byte
+
+type Struct struct {
+ Which map[string]*MyInt `msg:"which"`
+ Other Data `msg:"other"`
+ Nums [Eight]float64 `msg:"nums"`
+}
+```
+As long as the declarations of `MyInt` and `Data` are in the same file as `Struct`, the parser will determine that the type information for `MyInt` and `Data` can be passed into the definition of `Struct` before its methods are generated.
+
+#### Extensions
+
+MessagePack supports defining your own types through "extensions," which are just a tuple of
+the data "type" (`int8`) and the raw binary. You [can see a worked example in the wiki.](http://github.com/tinylib/msgp/wiki/Using-Extensions)
+
+### Status
+
+Mostly stable, in that no breaking changes have been made to the `/msgp` library in more than a year. Newer versions
+of the code may generate different code than older versions for performance reasons. I (@philhofer) am aware of a
+number of stability-critical commercial applications that use this code with good results. But, caveat emptor.
+
+You can read more about how `msgp` maps MessagePack types onto Go types [in the wiki](http://github.com/tinylib/msgp/wiki).
+
+Here some of the known limitations/restrictions:
+
+- Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
+- Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
+- Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
+- _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
+
+If the output compiles, then there's a pretty good chance things are fine. (Plus, we generate tests for you.) *Please, please, please* file an issue if you think the generator is writing broken code.
+
+### Performance
+
+If you like benchmarks, see [here](http://bravenewgeek.com/so-you-wanna-go-fast/) and [here](https://github.com/alecthomas/go_serialization_benchmarks).
+
+As one might expect, the generated methods that deal with `[]byte` are faster for small objects, but the `io.Reader/Writer` methods are generally more memory-efficient (and, at some point, faster) for large (> 2KB) objects.
diff --git a/vendor/github.com/tinylib/msgp/_generated/def.go b/vendor/github.com/tinylib/msgp/_generated/def.go
new file mode 100644
index 00000000..5579b256
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/_generated/def.go
@@ -0,0 +1,212 @@
+package _generated
+
+import (
+ "github.com/tinylib/msgp/msgp"
+ "os"
+ "time"
+)
+
+//go:generate msgp -o generated.go
+
+// All of the struct
+// definitions in this
+// file are fed to the code
+// generator when `make test` is
+// called, followed by an
+// invocation of `go test -v` in this
+// directory. A simple way of testing
+// a struct definition is
+// by adding it to this file.
+
+type Block [32]byte
+
+// tests edge-cases with
+// compiling size compilation.
+type X struct {
+ Values [32]byte // should compile to 32*msgp.ByteSize; encoded as Bin
+ ValuesPtr *[32]byte // check (*)[:] deref
+ More Block // should be identical to the above
+ Others [][32]int32 // should compile to len(x.Others)*32*msgp.Int32Size
+ Matrix [][]int32 // should not optimize
+ ManyFixed []Fixed
+}
+
+// test fixed-size struct
+// size compilation
+type Fixed struct {
+ A float64
+ B bool
+}
+
+type TestType struct {
+ F *float64 `msg:"float"`
+ Els map[string]string `msg:"elements"`
+ Obj struct { // test anonymous struct
+ ValueA string `msg:"value_a"`
+ ValueB []byte `msg:"value_b"`
+ } `msg:"object"`
+ Child *TestType `msg:"child"`
+ Time time.Time `msg:"time"`
+ Any interface{} `msg:"any"`
+ Appended msgp.Raw `msg:"appended"`
+ Num msgp.Number `msg:"num"`
+ Slice1 []string
+ Slice2 []string
+ SlicePtr *[]string
+}
+
+//msgp:tuple Object
+type Object struct {
+ ObjectNo string `msg:"objno"`
+ Slice1 []string `msg:"slice1"`
+ Slice2 []string `msg:"slice2"`
+ MapMap map[string]map[string]string
+}
+
+//msgp:tuple TestBench
+
+type TestBench struct {
+ Name string
+ BirthDay time.Time
+ Phone string
+ Siblings int
+ Spouse bool
+ Money float64
+}
+
+//msgp:tuple TestFast
+
+type TestFast struct {
+ Lat, Long, Alt float64 // test inline decl
+ Data []byte
+}
+
+// Test nested aliases
+type FastAlias TestFast
+type AliasContainer struct {
+ Fast FastAlias
+}
+
+// Test dependency resolution
+type IntA int
+type IntB IntA
+type IntC IntB
+
+type TestHidden struct {
+ A string
+ B []float64
+ Bad func(string) bool // This results in a warning: field "Bad" unsupported
+}
+
+type Embedded struct {
+ *Embedded // test embedded field
+ Children []Embedded
+ PtrChildren []*Embedded
+ Other string
+}
+
+const eight = 8
+
+type Things struct {
+ Cmplx complex64 `msg:"complex"` // test slices
+ Vals []int32 `msg:"values"`
+ Arr [msgp.ExtensionPrefixSize]float64 `msg:"arr"` // test const array and *ast.SelectorExpr as array size
+ Arr2 [4]float64 `msg:"arr2"` // test basic lit array
+ Ext *msgp.RawExtension `msg:"ext,extension"` // test extension
+ Oext msgp.RawExtension `msg:"oext,extension"` // test extension reference
+}
+
+//msgp:shim SpecialID as:[]byte using:toBytes/fromBytes
+
+type SpecialID string
+type TestObj struct{ ID1, ID2 SpecialID }
+
+func toBytes(id SpecialID) []byte { return []byte(string(id)) }
+func fromBytes(id []byte) SpecialID { return SpecialID(string(id)) }
+
+type MyEnum byte
+
+const (
+ A MyEnum = iota
+ B
+ C
+ D
+ invalid
+)
+
+// test shim directive (below)
+
+//msgp:shim MyEnum as:string using:(MyEnum).String/myenumStr
+
+//msgp:shim *os.File as:string using:filetostr/filefromstr
+
+func filetostr(f *os.File) string {
+ return f.Name()
+}
+
+func filefromstr(s string) *os.File {
+ f, _ := os.Open(s)
+ return f
+}
+
+func (m MyEnum) String() string {
+ switch m {
+ case A:
+ return "A"
+ case B:
+ return "B"
+ case C:
+ return "C"
+ case D:
+ return "D"
+ default:
+ return ""
+ }
+}
+
+func myenumStr(s string) MyEnum {
+ switch s {
+ case "A":
+ return A
+ case "B":
+ return B
+ case "C":
+ return C
+ case "D":
+ return D
+ default:
+ return invalid
+ }
+}
+
+// test pass-specific directive
+//msgp:decode ignore Insane
+
+type Insane [3]map[string]struct{ A, B CustomInt }
+
+type Custom struct {
+ Bts CustomBytes `msg:"bts"`
+ Mp map[string]*Embedded `msg:"mp"`
+ Enums []MyEnum `msg:"enums"` // test explicit enum shim
+ Some FileHandle `msg:file_handle`
+}
+
+type Files []*os.File
+
+type FileHandle struct {
+ Relevent Files `msg:"files"`
+ Name string `msg:"name"`
+}
+
+type CustomInt int
+type CustomBytes []byte
+
+type Wrapper struct {
+ Tree *Tree
+}
+
+type Tree struct {
+ Children []Tree
+ Element int
+ Parent *Wrapper
+}
diff --git a/vendor/github.com/tinylib/msgp/_generated/gen_test.go b/vendor/github.com/tinylib/msgp/_generated/gen_test.go
new file mode 100644
index 00000000..a89b3be8
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/_generated/gen_test.go
@@ -0,0 +1,150 @@
+package _generated
+
+import (
+ "bytes"
+ "github.com/tinylib/msgp/msgp"
+ "reflect"
+ "testing"
+ "time"
+)
+
+// benchmark encoding a small, "fast" type.
+// the point here is to see how much garbage
+// is generated intrinsically by the encoding/
+// decoding process as opposed to the nature
+// of the struct.
+func BenchmarkFastEncode(b *testing.B) {
+ v := &TestFast{
+ Lat: 40.12398,
+ Long: -41.9082,
+ Alt: 201.08290,
+ Data: []byte("whaaaaargharbl"),
+ }
+ var buf bytes.Buffer
+ msgp.Encode(&buf, v)
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.SetBytes(int64(buf.Len()))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.EncodeMsg(en)
+ }
+ en.Flush()
+}
+
+// benchmark decoding a small, "fast" type.
+// the point here is to see how much garbage
+// is generated intrinsically by the encoding/
+// decoding process as opposed to the nature
+// of the struct.
+func BenchmarkFastDecode(b *testing.B) {
+ v := &TestFast{
+ Lat: 40.12398,
+ Long: -41.9082,
+ Alt: 201.08290,
+ Data: []byte("whaaaaargharbl"),
+ }
+
+ var buf bytes.Buffer
+ msgp.Encode(&buf, v)
+ dc := msgp.NewReader(msgp.NewEndlessReader(buf.Bytes(), b))
+ b.SetBytes(int64(buf.Len()))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.DecodeMsg(dc)
+ }
+}
+
+func (a *TestType) Equal(b *TestType) bool {
+ // compare times, then zero out those
+ // fields, perform a DeepEqual, and restore them
+ ta, tb := a.Time, b.Time
+ if !ta.Equal(tb) {
+ return false
+ }
+ a.Time, b.Time = time.Time{}, time.Time{}
+ ok := reflect.DeepEqual(a, b)
+ a.Time, b.Time = ta, tb
+ return ok
+}
+
+// This covers the following cases:
+// - Recursive types
+// - Non-builtin identifiers (and recursive types)
+// - time.Time
+// - map[string]string
+// - anonymous structs
+//
+func Test1EncodeDecode(t *testing.T) {
+ f := 32.00
+ tt := &TestType{
+ F: &f,
+ Els: map[string]string{
+ "thing_one": "one",
+ "thing_two": "two",
+ },
+ Obj: struct {
+ ValueA string `msg:"value_a"`
+ ValueB []byte `msg:"value_b"`
+ }{
+ ValueA: "here's the first inner value",
+ ValueB: []byte("here's the second inner value"),
+ },
+ Child: nil,
+ Time: time.Now(),
+ Appended: msgp.Raw([]byte{0xc0}), // 'nil'
+ }
+
+ var buf bytes.Buffer
+
+ err := msgp.Encode(&buf, tt)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tnew := new(TestType)
+
+ err = msgp.Decode(&buf, tnew)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !tt.Equal(tnew) {
+ t.Logf("in: %v", tt)
+ t.Logf("out: %v", tnew)
+ t.Fatal("objects not equal")
+ }
+
+ tanother := new(TestType)
+
+ buf.Reset()
+ msgp.Encode(&buf, tt)
+
+ var left []byte
+ left, err = tanother.UnmarshalMsg(buf.Bytes())
+ if err != nil {
+ t.Error(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left", len(left))
+ }
+
+ if !tt.Equal(tanother) {
+ t.Logf("in: %v", tt)
+ t.Logf("out: %v", tanother)
+ t.Fatal("objects not equal")
+ }
+}
+
+func TestIssue168(t *testing.T) {
+ buf := bytes.Buffer{}
+ test := TestObj{}
+
+ msgp.Encode(&buf, &TestObj{ID1: "1", ID2: "2"})
+ msgp.Decode(&buf, &test)
+
+ if test.ID1 != "1" || test.ID2 != "2" {
+ t.Fatalf("got back %+v", test)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/_generated/issue94.go b/vendor/github.com/tinylib/msgp/_generated/issue94.go
new file mode 100644
index 00000000..4384d5d0
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/_generated/issue94.go
@@ -0,0 +1,31 @@
+package _generated
+
+import (
+ "time"
+)
+
+//go:generate msgp
+
+// Issue 94: shims were not propogated recursively,
+// which caused shims that weren't at the top level
+// to be silently ignored.
+//
+// The following line will generate an error after
+// the code is generated if the generated code doesn't
+// have the right identifier in it.
+
+//go:generate ./search.sh $GOFILE timetostr
+
+//msgp:shim time.Time as:string using:timetostr/strtotime
+type T struct {
+ T time.Time
+}
+
+func timetostr(t time.Time) string {
+ return t.Format(time.RFC3339)
+}
+
+func strtotime(s string) time.Time {
+ t, _ := time.Parse(time.RFC3339, s)
+ return t
+}
diff --git a/vendor/github.com/tinylib/msgp/_generated/search.sh b/vendor/github.com/tinylib/msgp/_generated/search.sh
new file mode 100755
index 00000000..aa6d6477
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/_generated/search.sh
@@ -0,0 +1,12 @@
+#! /bin/sh
+
+FILE=$(echo $1 | sed s/.go/_gen.go/)
+echo "searching" $FILE "for" $2
+grep -q $2 $FILE
+if [ $? -eq 0 ]
+then
+ echo "OK"
+else
+ echo "whoops!"
+ exit 1
+fi
diff --git a/vendor/github.com/tinylib/msgp/gen/decode.go b/vendor/github.com/tinylib/msgp/gen/decode.go
new file mode 100644
index 00000000..f3907601
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/decode.go
@@ -0,0 +1,218 @@
+package gen
+
+import (
+ "io"
+ "strconv"
+)
+
+func decode(w io.Writer) *decodeGen {
+ return &decodeGen{
+ p: printer{w: w},
+ hasfield: false,
+ }
+}
+
+type decodeGen struct {
+ passes
+ p printer
+ hasfield bool
+}
+
+func (d *decodeGen) Method() Method { return Decode }
+
+func (d *decodeGen) needsField() {
+ if d.hasfield {
+ return
+ }
+ d.p.print("\nvar field []byte; _ = field")
+ d.hasfield = true
+}
+
+func (d *decodeGen) Execute(p Elem) error {
+ p = d.applyall(p)
+ if p == nil {
+ return nil
+ }
+ d.hasfield = false
+ if !d.p.ok() {
+ return d.p.err
+ }
+
+ if !IsPrintable(p) {
+ return nil
+ }
+
+ d.p.comment("DecodeMsg implements msgp.Decodable")
+
+ d.p.printf("\nfunc (%s %s) DecodeMsg(dc *msgp.Reader) (err error) {", p.Varname(), methodReceiver(p))
+ next(d, p)
+ d.p.nakedReturn()
+ unsetReceiver(p)
+ return d.p.err
+}
+
+func (d *decodeGen) gStruct(s *Struct) {
+ if !d.p.ok() {
+ return
+ }
+ if s.AsTuple {
+ d.structAsTuple(s)
+ } else {
+ d.structAsMap(s)
+ }
+ return
+}
+
+func (d *decodeGen) assignAndCheck(name string, typ string) {
+ if !d.p.ok() {
+ return
+ }
+ d.p.printf("\n%s, err = dc.Read%s()", name, typ)
+ d.p.print(errcheck)
+}
+
+func (d *decodeGen) structAsTuple(s *Struct) {
+ nfields := len(s.Fields)
+
+ sz := randIdent()
+ d.p.declare(sz, u32)
+ d.assignAndCheck(sz, arrayHeader)
+ d.p.arrayCheck(strconv.Itoa(nfields), sz)
+ for i := range s.Fields {
+ if !d.p.ok() {
+ return
+ }
+ next(d, s.Fields[i].FieldElem)
+ }
+}
+
+func (d *decodeGen) structAsMap(s *Struct) {
+ d.needsField()
+ sz := randIdent()
+ d.p.declare(sz, u32)
+ d.assignAndCheck(sz, mapHeader)
+
+ d.p.printf("\nfor %s > 0 {\n%s--", sz, sz)
+ d.assignAndCheck("field", mapKey)
+ d.p.print("\nswitch msgp.UnsafeString(field) {")
+ for i := range s.Fields {
+ d.p.printf("\ncase \"%s\":", s.Fields[i].FieldTag)
+ next(d, s.Fields[i].FieldElem)
+ if !d.p.ok() {
+ return
+ }
+ }
+ d.p.print("\ndefault:\nerr = dc.Skip()")
+ d.p.print(errcheck)
+ d.p.closeblock() // close switch
+ d.p.closeblock() // close for loop
+}
+
+func (d *decodeGen) gBase(b *BaseElem) {
+ if !d.p.ok() {
+ return
+ }
+
+ // open block for 'tmp'
+ var tmp string
+ if b.Convert {
+ tmp = randIdent()
+ d.p.printf("\n{ var %s %s", tmp, b.BaseType())
+ }
+
+ vname := b.Varname() // e.g. "z.FieldOne"
+ bname := b.BaseName() // e.g. "Float64"
+
+ // handle special cases
+ // for object type.
+ switch b.Value {
+ case Bytes:
+ if b.Convert {
+ d.p.printf("\n%s, err = dc.ReadBytes([]byte(%s))", tmp, vname)
+ } else {
+ d.p.printf("\n%s, err = dc.ReadBytes(%s)", vname, vname)
+ }
+ case IDENT:
+ d.p.printf("\nerr = %s.DecodeMsg(dc)", vname)
+ case Ext:
+ d.p.printf("\nerr = dc.ReadExtension(%s)", vname)
+ default:
+ if b.Convert {
+ d.p.printf("\n%s, err = dc.Read%s()", tmp, bname)
+ } else {
+ d.p.printf("\n%s, err = dc.Read%s()", vname, bname)
+ }
+ }
+
+ // close block for 'tmp'
+ if b.Convert {
+ d.p.printf("\n%s = %s(%s)\n}", vname, b.FromBase(), tmp)
+ }
+
+ d.p.print(errcheck)
+}
+
+func (d *decodeGen) gMap(m *Map) {
+ if !d.p.ok() {
+ return
+ }
+ sz := randIdent()
+
+ // resize or allocate map
+ d.p.declare(sz, u32)
+ d.assignAndCheck(sz, mapHeader)
+ d.p.resizeMap(sz, m)
+
+ // for element in map, read string/value
+ // pair and assign
+ d.p.printf("\nfor %s > 0 {\n%s--", sz, sz)
+ d.p.declare(m.Keyidx, "string")
+ d.p.declare(m.Validx, m.Value.TypeName())
+ d.assignAndCheck(m.Keyidx, stringTyp)
+ next(d, m.Value)
+ d.p.mapAssign(m)
+ d.p.closeblock()
+}
+
+func (d *decodeGen) gSlice(s *Slice) {
+ if !d.p.ok() {
+ return
+ }
+ sz := randIdent()
+ d.p.declare(sz, u32)
+ d.assignAndCheck(sz, arrayHeader)
+ d.p.resizeSlice(sz, s)
+ d.p.rangeBlock(s.Index, s.Varname(), d, s.Els)
+}
+
+func (d *decodeGen) gArray(a *Array) {
+ if !d.p.ok() {
+ return
+ }
+
+ // special case if we have [const]byte
+ if be, ok := a.Els.(*BaseElem); ok && (be.Value == Byte || be.Value == Uint8) {
+ d.p.printf("\nerr = dc.ReadExactBytes((%s)[:])", a.Varname())
+ d.p.print(errcheck)
+ return
+ }
+ sz := randIdent()
+ d.p.declare(sz, u32)
+ d.assignAndCheck(sz, arrayHeader)
+ d.p.arrayCheck(a.Size, sz)
+
+ d.p.rangeBlock(a.Index, a.Varname(), d, a.Els)
+}
+
+func (d *decodeGen) gPtr(p *Ptr) {
+ if !d.p.ok() {
+ return
+ }
+ d.p.print("\nif dc.IsNil() {")
+ d.p.print("\nerr = dc.ReadNil()")
+ d.p.print(errcheck)
+ d.p.printf("\n%s = nil\n} else {", p.Varname())
+ d.p.initPtr(p)
+ next(d, p.Value)
+ d.p.closeblock()
+}
diff --git a/vendor/github.com/tinylib/msgp/gen/elem.go b/vendor/github.com/tinylib/msgp/gen/elem.go
new file mode 100644
index 00000000..719df2e8
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/elem.go
@@ -0,0 +1,598 @@
+package gen
+
+import (
+ "fmt"
+ "math/rand"
+ "strings"
+)
+
+const (
+ idxChars = "abcdefghijlkmnopqrstuvwxyz"
+ idxLen = 3
+)
+
+// generate a random identifier name
+func randIdent() string {
+ bts := make([]byte, idxLen)
+ for i := range bts {
+ bts[i] = idxChars[rand.Intn(len(idxChars))]
+ }
+
+ // Use a `z` prefix so the randomly generated bytes can't conflict with
+ // Go keywords (such as `int` and `var`).
+ return "z" + string(bts)
+}
+
+// This code defines the type declaration tree.
+//
+// Consider the following:
+//
+// type Marshaler struct {
+// Thing1 *float64 `msg:"thing1"`
+// Body []byte `msg:"body"`
+// }
+//
+// A parser using this generator as a backend
+// should parse the above into:
+//
+// var val Elem = &Ptr{
+// name: "z",
+// Value: &Struct{
+// Name: "Marshaler",
+// Fields: []StructField{
+// {
+// FieldTag: "thing1",
+// FieldElem: &Ptr{
+// name: "z.Thing1",
+// Value: &BaseElem{
+// name: "*z.Thing1",
+// Value: Float64,
+// Convert: false,
+// },
+// },
+// },
+// {
+// FieldTag: "body",
+// FieldElem: &BaseElem{
+// name: "z.Body",
+// Value: Bytes,
+// Convert: false,
+// },
+// },
+// },
+// },
+// }
+
+// Base is one of the
+// base types
+type Primitive uint8
+
+// this is effectively the
+// list of currently available
+// ReadXxxx / WriteXxxx methods.
+const (
+ Invalid Primitive = iota
+ Bytes
+ String
+ Float32
+ Float64
+ Complex64
+ Complex128
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Byte
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Bool
+ Intf // interface{}
+ Time // time.Time
+ Ext // extension
+
+ IDENT // IDENT means an unrecognized identifier
+)
+
+// all of the recognized identities
+// that map to primitive types
+var primitives = map[string]Primitive{
+ "[]byte": Bytes,
+ "string": String,
+ "float32": Float32,
+ "float64": Float64,
+ "complex64": Complex64,
+ "complex128": Complex128,
+ "uint": Uint,
+ "uint8": Uint8,
+ "uint16": Uint16,
+ "uint32": Uint32,
+ "uint64": Uint64,
+ "byte": Byte,
+ "int": Int,
+ "int8": Int8,
+ "int16": Int16,
+ "int32": Int32,
+ "int64": Int64,
+ "bool": Bool,
+ "interface{}": Intf,
+ "time.Time": Time,
+ "msgp.Extension": Ext,
+}
+
+// types built into the library
+// that satisfy all of the
+// interfaces.
+var builtins = map[string]struct{}{
+ "msgp.Raw": struct{}{},
+ "msgp.Number": struct{}{},
+}
+
+// common data/methods for every Elem
+type common struct{ vname, alias string }
+
+func (c *common) SetVarname(s string) { c.vname = s }
+func (c *common) Varname() string { return c.vname }
+func (c *common) Alias(typ string) { c.alias = typ }
+func (c *common) hidden() {}
+
+func IsPrintable(e Elem) bool {
+ if be, ok := e.(*BaseElem); ok && !be.Printable() {
+ return false
+ }
+ return true
+}
+
+// Elem is a go type capable of being
+// serialized into MessagePack. It is
+// implemented by *Ptr, *Struct, *Array,
+// *Slice, *Map, and *BaseElem.
+type Elem interface {
+ // SetVarname sets this nodes
+ // variable name and recursively
+ // sets the names of all its children.
+ // In general, this should only be
+ // called on the parent of the tree.
+ SetVarname(s string)
+
+ // Varname returns the variable
+ // name of the element.
+ Varname() string
+
+ // TypeName is the canonical
+ // go type name of the node
+ // e.g. "string", "int", "map[string]float64"
+ // OR the alias name, if it has been set.
+ TypeName() string
+
+ // Alias sets a type (alias) name
+ Alias(typ string)
+
+ // Copy should perform a deep copy of the object
+ Copy() Elem
+
+ // Complexity returns a measure of the
+ // complexity of element (greater than
+ // or equal to 1.)
+ Complexity() int
+
+ hidden()
+}
+
+// Ident returns the *BaseElem that corresponds
+// to the provided identity.
+func Ident(id string) *BaseElem {
+ p, ok := primitives[id]
+ if ok {
+ return &BaseElem{Value: p}
+ }
+ be := &BaseElem{Value: IDENT}
+ be.Alias(id)
+ return be
+}
+
+type Array struct {
+ common
+ Index string // index variable name
+ Size string // array size
+ Els Elem // child
+}
+
+func (a *Array) SetVarname(s string) {
+ a.common.SetVarname(s)
+ridx:
+ a.Index = randIdent()
+
+ // try to avoid using the same
+ // index as a parent slice
+ if strings.Contains(a.Varname(), a.Index) {
+ goto ridx
+ }
+
+ a.Els.SetVarname(fmt.Sprintf("%s[%s]", a.Varname(), a.Index))
+}
+
+func (a *Array) TypeName() string {
+ if a.common.alias != "" {
+ return a.common.alias
+ }
+ a.common.Alias(fmt.Sprintf("[%s]%s", a.Size, a.Els.TypeName()))
+ return a.common.alias
+}
+
+func (a *Array) Copy() Elem {
+ b := *a
+ b.Els = a.Els.Copy()
+ return &b
+}
+
+func (a *Array) Complexity() int { return 1 + a.Els.Complexity() }
+
+// Map is a map[string]Elem
+type Map struct {
+ common
+ Keyidx string // key variable name
+ Validx string // value variable name
+ Value Elem // value element
+}
+
+func (m *Map) SetVarname(s string) {
+ m.common.SetVarname(s)
+ridx:
+ m.Keyidx = randIdent()
+ m.Validx = randIdent()
+
+ // just in case
+ if m.Keyidx == m.Validx {
+ goto ridx
+ }
+
+ m.Value.SetVarname(m.Validx)
+}
+
+func (m *Map) TypeName() string {
+ if m.common.alias != "" {
+ return m.common.alias
+ }
+ m.common.Alias("map[string]" + m.Value.TypeName())
+ return m.common.alias
+}
+
+func (m *Map) Copy() Elem {
+ g := *m
+ g.Value = m.Value.Copy()
+ return &g
+}
+
+func (m *Map) Complexity() int { return 2 + m.Value.Complexity() }
+
+type Slice struct {
+ common
+ Index string
+ Els Elem // The type of each element
+}
+
+func (s *Slice) SetVarname(a string) {
+ s.common.SetVarname(a)
+ s.Index = randIdent()
+ varName := s.Varname()
+ if varName[0] == '*' {
+ // Pointer-to-slice requires parenthesis for slicing.
+ varName = "(" + varName + ")"
+ }
+ s.Els.SetVarname(fmt.Sprintf("%s[%s]", varName, s.Index))
+}
+
+func (s *Slice) TypeName() string {
+ if s.common.alias != "" {
+ return s.common.alias
+ }
+ s.common.Alias("[]" + s.Els.TypeName())
+ return s.common.alias
+}
+
+func (s *Slice) Copy() Elem {
+ z := *s
+ z.Els = s.Els.Copy()
+ return &z
+}
+
+func (s *Slice) Complexity() int {
+ return 1 + s.Els.Complexity()
+}
+
+type Ptr struct {
+ common
+ Value Elem
+}
+
+func (s *Ptr) SetVarname(a string) {
+ s.common.SetVarname(a)
+
+ // struct fields are dereferenced
+ // automatically...
+ switch x := s.Value.(type) {
+ case *Struct:
+ // struct fields are automatically dereferenced
+ x.SetVarname(a)
+ return
+
+ case *BaseElem:
+ // identities have pointer receivers
+ if x.Value == IDENT {
+ x.SetVarname(a)
+ } else {
+ x.SetVarname("*" + a)
+ }
+ return
+
+ default:
+ s.Value.SetVarname("*" + a)
+ return
+ }
+}
+
+func (s *Ptr) TypeName() string {
+ if s.common.alias != "" {
+ return s.common.alias
+ }
+ s.common.Alias("*" + s.Value.TypeName())
+ return s.common.alias
+}
+
+func (s *Ptr) Copy() Elem {
+ v := *s
+ v.Value = s.Value.Copy()
+ return &v
+}
+
+func (s *Ptr) Complexity() int { return 1 + s.Value.Complexity() }
+
+func (s *Ptr) Needsinit() bool {
+ if be, ok := s.Value.(*BaseElem); ok && be.needsref {
+ return false
+ }
+ return true
+}
+
+type Struct struct {
+ common
+ Fields []StructField // field list
+ AsTuple bool // write as an array instead of a map
+}
+
+func (s *Struct) TypeName() string {
+ if s.common.alias != "" {
+ return s.common.alias
+ }
+ str := "struct{\n"
+ for i := range s.Fields {
+ str += s.Fields[i].FieldName + " " + s.Fields[i].FieldElem.TypeName() + ";\n"
+ }
+ str += "}"
+ s.common.Alias(str)
+ return s.common.alias
+}
+
+func (s *Struct) SetVarname(a string) {
+ s.common.SetVarname(a)
+ writeStructFields(s.Fields, a)
+}
+
+func (s *Struct) Copy() Elem {
+ g := *s
+ g.Fields = make([]StructField, len(s.Fields))
+ copy(g.Fields, s.Fields)
+ for i := range s.Fields {
+ g.Fields[i].FieldElem = s.Fields[i].FieldElem.Copy()
+ }
+ return &g
+}
+
+func (s *Struct) Complexity() int {
+ c := 1
+ for i := range s.Fields {
+ c += s.Fields[i].FieldElem.Complexity()
+ }
+ return c
+}
+
+type StructField struct {
+ FieldTag string // the string inside the `msg:""` tag
+ FieldName string // the name of the struct field
+ FieldElem Elem // the field type
+}
+
+// BaseElem is an element that
+// can be represented by a primitive
+// MessagePack type.
+type BaseElem struct {
+ common
+ ShimToBase string // shim to base type, or empty
+ ShimFromBase string // shim from base type, or empty
+ Value Primitive // Type of element
+ Convert bool // should we do an explicit conversion?
+ mustinline bool // must inline; not printable
+ needsref bool // needs reference for shim
+}
+
+func (s *BaseElem) Printable() bool { return !s.mustinline }
+
+func (s *BaseElem) Alias(typ string) {
+ s.common.Alias(typ)
+ if s.Value != IDENT {
+ s.Convert = true
+ }
+ if strings.Contains(typ, ".") {
+ s.mustinline = true
+ }
+}
+
+func (s *BaseElem) SetVarname(a string) {
+ // extensions whose parents
+ // are not pointers need to
+ // be explicitly referenced
+ if s.Value == Ext || s.needsref {
+ if strings.HasPrefix(a, "*") {
+ s.common.SetVarname(a[1:])
+ return
+ }
+ s.common.SetVarname("&" + a)
+ return
+ }
+
+ s.common.SetVarname(a)
+}
+
+// TypeName returns the syntactically correct Go
+// type name for the base element.
+func (s *BaseElem) TypeName() string {
+ if s.common.alias != "" {
+ return s.common.alias
+ }
+ s.common.Alias(s.BaseType())
+ return s.common.alias
+}
+
+// ToBase, used if Convert==true, is used as tmp = {{ToBase}}({{Varname}})
+func (s *BaseElem) ToBase() string {
+ if s.ShimToBase != "" {
+ return s.ShimToBase
+ }
+ return s.BaseType()
+}
+
+// FromBase, used if Convert==true, is used as {{Varname}} = {{FromBase}}(tmp)
+func (s *BaseElem) FromBase() string {
+ if s.ShimFromBase != "" {
+ return s.ShimFromBase
+ }
+ return s.TypeName()
+}
+
+// BaseName returns the string form of the
+// base type (e.g. Float64, Ident, etc)
+func (s *BaseElem) BaseName() string {
+ // time is a special case;
+ // we strip the package prefix
+ if s.Value == Time {
+ return "Time"
+ }
+ return s.Value.String()
+}
+
+func (s *BaseElem) BaseType() string {
+ switch s.Value {
+ case IDENT:
+ return s.TypeName()
+
+ // exceptions to the naming/capitalization
+ // rule:
+ case Intf:
+ return "interface{}"
+ case Bytes:
+ return "[]byte"
+ case Time:
+ return "time.Time"
+ case Ext:
+ return "msgp.Extension"
+
+ // everything else is base.String() with
+ // the first letter as lowercase
+ default:
+ return strings.ToLower(s.BaseName())
+ }
+}
+
+func (s *BaseElem) Needsref(b bool) {
+ s.needsref = b
+}
+
+func (s *BaseElem) Copy() Elem {
+ g := *s
+ return &g
+}
+
+func (s *BaseElem) Complexity() int {
+ if s.Convert && !s.mustinline {
+ return 2
+ }
+ // we need to return 1 if !printable(),
+ // in order to make sure that stuff gets
+ // inlined appropriately
+ return 1
+}
+
+// Resolved returns whether or not
+// the type of the element is
+// a primitive or a builtin provided
+// by the package.
+func (s *BaseElem) Resolved() bool {
+ if s.Value == IDENT {
+ _, ok := builtins[s.TypeName()]
+ return ok
+ }
+ return true
+}
+
+func (k Primitive) String() string {
+ switch k {
+ case String:
+ return "String"
+ case Bytes:
+ return "Bytes"
+ case Float32:
+ return "Float32"
+ case Float64:
+ return "Float64"
+ case Complex64:
+ return "Complex64"
+ case Complex128:
+ return "Complex128"
+ case Uint:
+ return "Uint"
+ case Uint8:
+ return "Uint8"
+ case Uint16:
+ return "Uint16"
+ case Uint32:
+ return "Uint32"
+ case Uint64:
+ return "Uint64"
+ case Byte:
+ return "Byte"
+ case Int:
+ return "Int"
+ case Int8:
+ return "Int8"
+ case Int16:
+ return "Int16"
+ case Int32:
+ return "Int32"
+ case Int64:
+ return "Int64"
+ case Bool:
+ return "Bool"
+ case Intf:
+ return "Intf"
+ case Time:
+ return "time.Time"
+ case Ext:
+ return "Extension"
+ case IDENT:
+ return "Ident"
+ default:
+ return "INVALID"
+ }
+}
+
+// writeStructFields is a trampoline for writeBase for
+// all of the fields in a struct
+func writeStructFields(s []StructField, name string) {
+ for i := range s {
+ s[i].FieldElem.SetVarname(fmt.Sprintf("%s.%s", name, s[i].FieldName))
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/gen/encode.go b/vendor/github.com/tinylib/msgp/gen/encode.go
new file mode 100644
index 00000000..a224a594
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/encode.go
@@ -0,0 +1,184 @@
+package gen
+
+import (
+ "fmt"
+ "github.com/tinylib/msgp/msgp"
+ "io"
+)
+
+func encode(w io.Writer) *encodeGen {
+ return &encodeGen{
+ p: printer{w: w},
+ }
+}
+
+type encodeGen struct {
+ passes
+ p printer
+ fuse []byte
+}
+
+func (e *encodeGen) Method() Method { return Encode }
+
+func (e *encodeGen) Apply(dirs []string) error {
+ return nil
+}
+
+func (e *encodeGen) writeAndCheck(typ string, argfmt string, arg interface{}) {
+ e.p.printf("\nerr = en.Write%s(%s)", typ, fmt.Sprintf(argfmt, arg))
+ e.p.print(errcheck)
+}
+
+func (e *encodeGen) fuseHook() {
+ if len(e.fuse) > 0 {
+ e.appendraw(e.fuse)
+ e.fuse = e.fuse[:0]
+ }
+}
+
+func (e *encodeGen) Fuse(b []byte) {
+ if len(e.fuse) > 0 {
+ e.fuse = append(e.fuse, b...)
+ } else {
+ e.fuse = b
+ }
+}
+
+func (e *encodeGen) Execute(p Elem) error {
+ if !e.p.ok() {
+ return e.p.err
+ }
+ p = e.applyall(p)
+ if p == nil {
+ return nil
+ }
+ if !IsPrintable(p) {
+ return nil
+ }
+
+ e.p.comment("EncodeMsg implements msgp.Encodable")
+
+ e.p.printf("\nfunc (%s %s) EncodeMsg(en *msgp.Writer) (err error) {", p.Varname(), imutMethodReceiver(p))
+ next(e, p)
+ e.p.nakedReturn()
+ return e.p.err
+}
+
+func (e *encodeGen) gStruct(s *Struct) {
+ if !e.p.ok() {
+ return
+ }
+ if s.AsTuple {
+ e.tuple(s)
+ } else {
+ e.structmap(s)
+ }
+ return
+}
+
+func (e *encodeGen) tuple(s *Struct) {
+ nfields := len(s.Fields)
+ data := msgp.AppendArrayHeader(nil, uint32(nfields))
+ e.p.printf("\n// array header, size %d", nfields)
+ e.Fuse(data)
+ for i := range s.Fields {
+ if !e.p.ok() {
+ return
+ }
+ next(e, s.Fields[i].FieldElem)
+ }
+}
+
+func (e *encodeGen) appendraw(bts []byte) {
+ e.p.print("\nerr = en.Append(")
+ for i, b := range bts {
+ if i != 0 {
+ e.p.print(", ")
+ }
+ e.p.printf("0x%x", b)
+ }
+ e.p.print(")\nif err != nil { return err }")
+}
+
+func (e *encodeGen) structmap(s *Struct) {
+ nfields := len(s.Fields)
+ data := msgp.AppendMapHeader(nil, uint32(nfields))
+ e.p.printf("\n// map header, size %d", nfields)
+ e.Fuse(data)
+ for i := range s.Fields {
+ if !e.p.ok() {
+ return
+ }
+ data = msgp.AppendString(nil, s.Fields[i].FieldTag)
+ e.p.printf("\n// write %q", s.Fields[i].FieldTag)
+ e.Fuse(data)
+ next(e, s.Fields[i].FieldElem)
+ }
+}
+
+func (e *encodeGen) gMap(m *Map) {
+ if !e.p.ok() {
+ return
+ }
+ e.fuseHook()
+ vname := m.Varname()
+ e.writeAndCheck(mapHeader, lenAsUint32, vname)
+
+ e.p.printf("\nfor %s, %s := range %s {", m.Keyidx, m.Validx, vname)
+ e.writeAndCheck(stringTyp, literalFmt, m.Keyidx)
+ next(e, m.Value)
+ e.p.closeblock()
+}
+
+func (e *encodeGen) gPtr(s *Ptr) {
+ if !e.p.ok() {
+ return
+ }
+ e.fuseHook()
+ e.p.printf("\nif %s == nil { err = en.WriteNil(); if err != nil { return; } } else {", s.Varname())
+ next(e, s.Value)
+ e.p.closeblock()
+}
+
+func (e *encodeGen) gSlice(s *Slice) {
+ if !e.p.ok() {
+ return
+ }
+ e.fuseHook()
+ e.writeAndCheck(arrayHeader, lenAsUint32, s.Varname())
+ e.p.rangeBlock(s.Index, s.Varname(), e, s.Els)
+}
+
+func (e *encodeGen) gArray(a *Array) {
+ if !e.p.ok() {
+ return
+ }
+ e.fuseHook()
+ // shortcut for [const]byte
+ if be, ok := a.Els.(*BaseElem); ok && (be.Value == Byte || be.Value == Uint8) {
+ e.p.printf("\nerr = en.WriteBytes((%s)[:])", a.Varname())
+ e.p.print(errcheck)
+ return
+ }
+
+ e.writeAndCheck(arrayHeader, literalFmt, a.Size)
+ e.p.rangeBlock(a.Index, a.Varname(), e, a.Els)
+}
+
+func (e *encodeGen) gBase(b *BaseElem) {
+ if !e.p.ok() {
+ return
+ }
+ e.fuseHook()
+ vname := b.Varname()
+ if b.Convert {
+ vname = tobaseConvert(b)
+ }
+
+ if b.Value == IDENT { // unknown identity
+ e.p.printf("\nerr = %s.EncodeMsg(en)", vname)
+ e.p.print(errcheck)
+ } else { // typical case
+ e.writeAndCheck(b.BaseName(), literalFmt, vname)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/gen/marshal.go b/vendor/github.com/tinylib/msgp/gen/marshal.go
new file mode 100644
index 00000000..90eccc22
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/marshal.go
@@ -0,0 +1,198 @@
+package gen
+
+import (
+ "fmt"
+ "github.com/tinylib/msgp/msgp"
+ "io"
+)
+
+func marshal(w io.Writer) *marshalGen {
+ return &marshalGen{
+ p: printer{w: w},
+ }
+}
+
+type marshalGen struct {
+ passes
+ p printer
+ fuse []byte
+}
+
+func (m *marshalGen) Method() Method { return Marshal }
+
+func (m *marshalGen) Apply(dirs []string) error {
+ return nil
+}
+
+func (m *marshalGen) Execute(p Elem) error {
+ if !m.p.ok() {
+ return m.p.err
+ }
+ p = m.applyall(p)
+ if p == nil {
+ return nil
+ }
+ if !IsPrintable(p) {
+ return nil
+ }
+
+ m.p.comment("MarshalMsg implements msgp.Marshaler")
+
+ // save the vname before
+ // calling methodReceiver so
+ // that z.Msgsize() is printed correctly
+ c := p.Varname()
+
+ m.p.printf("\nfunc (%s %s) MarshalMsg(b []byte) (o []byte, err error) {", p.Varname(), imutMethodReceiver(p))
+ m.p.printf("\no = msgp.Require(b, %s.Msgsize())", c)
+ next(m, p)
+ m.p.nakedReturn()
+ return m.p.err
+}
+
+func (m *marshalGen) rawAppend(typ string, argfmt string, arg interface{}) {
+ m.p.printf("\no = msgp.Append%s(o, %s)", typ, fmt.Sprintf(argfmt, arg))
+}
+
+func (m *marshalGen) fuseHook() {
+ if len(m.fuse) > 0 {
+ m.rawbytes(m.fuse)
+ m.fuse = m.fuse[:0]
+ }
+}
+
+func (m *marshalGen) Fuse(b []byte) {
+ if len(m.fuse) == 0 {
+ m.fuse = b
+ } else {
+ m.fuse = append(m.fuse, b...)
+ }
+}
+
+func (m *marshalGen) gStruct(s *Struct) {
+ if !m.p.ok() {
+ return
+ }
+
+ if s.AsTuple {
+ m.tuple(s)
+ } else {
+ m.mapstruct(s)
+ }
+ return
+}
+
+func (m *marshalGen) tuple(s *Struct) {
+ data := make([]byte, 0, 5)
+ data = msgp.AppendArrayHeader(data, uint32(len(s.Fields)))
+ m.p.printf("\n// array header, size %d", len(s.Fields))
+ m.Fuse(data)
+ for i := range s.Fields {
+ if !m.p.ok() {
+ return
+ }
+ next(m, s.Fields[i].FieldElem)
+ }
+}
+
+func (m *marshalGen) mapstruct(s *Struct) {
+ data := make([]byte, 0, 64)
+ data = msgp.AppendMapHeader(data, uint32(len(s.Fields)))
+ m.p.printf("\n// map header, size %d", len(s.Fields))
+ m.Fuse(data)
+ for i := range s.Fields {
+ if !m.p.ok() {
+ return
+ }
+ data = msgp.AppendString(nil, s.Fields[i].FieldTag)
+
+ m.p.printf("\n// string %q", s.Fields[i].FieldTag)
+ m.Fuse(data)
+
+ next(m, s.Fields[i].FieldElem)
+ }
+}
+
+// append raw data
+func (m *marshalGen) rawbytes(bts []byte) {
+ m.p.print("\no = append(o, ")
+ for _, b := range bts {
+ m.p.printf("0x%x,", b)
+ }
+ m.p.print(")")
+}
+
+func (m *marshalGen) gMap(s *Map) {
+ if !m.p.ok() {
+ return
+ }
+ m.fuseHook()
+ vname := s.Varname()
+ m.rawAppend(mapHeader, lenAsUint32, vname)
+ m.p.printf("\nfor %s, %s := range %s {", s.Keyidx, s.Validx, vname)
+ m.rawAppend(stringTyp, literalFmt, s.Keyidx)
+ next(m, s.Value)
+ m.p.closeblock()
+}
+
+func (m *marshalGen) gSlice(s *Slice) {
+ if !m.p.ok() {
+ return
+ }
+ m.fuseHook()
+ vname := s.Varname()
+ m.rawAppend(arrayHeader, lenAsUint32, vname)
+ m.p.rangeBlock(s.Index, vname, m, s.Els)
+}
+
+func (m *marshalGen) gArray(a *Array) {
+ if !m.p.ok() {
+ return
+ }
+ m.fuseHook()
+ if be, ok := a.Els.(*BaseElem); ok && be.Value == Byte {
+ m.rawAppend("Bytes", "(%s)[:]", a.Varname())
+ return
+ }
+
+ m.rawAppend(arrayHeader, literalFmt, a.Size)
+ m.p.rangeBlock(a.Index, a.Varname(), m, a.Els)
+}
+
+func (m *marshalGen) gPtr(p *Ptr) {
+ if !m.p.ok() {
+ return
+ }
+ m.fuseHook()
+ m.p.printf("\nif %s == nil {\no = msgp.AppendNil(o)\n} else {", p.Varname())
+ next(m, p.Value)
+ m.p.closeblock()
+}
+
+func (m *marshalGen) gBase(b *BaseElem) {
+ if !m.p.ok() {
+ return
+ }
+ m.fuseHook()
+ vname := b.Varname()
+
+ if b.Convert {
+ vname = tobaseConvert(b)
+ }
+
+ var echeck bool
+ switch b.Value {
+ case IDENT:
+ echeck = true
+ m.p.printf("\no, err = %s.MarshalMsg(o)", vname)
+ case Intf, Ext:
+ echeck = true
+ m.p.printf("\no, err = msgp.Append%s(o, %s)", b.BaseName(), vname)
+ default:
+ m.rawAppend(b.BaseName(), literalFmt, vname)
+ }
+
+ if echeck {
+ m.p.print(errcheck)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/gen/size.go b/vendor/github.com/tinylib/msgp/gen/size.go
new file mode 100644
index 00000000..3e636e47
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/size.go
@@ -0,0 +1,272 @@
+package gen
+
+import (
+ "fmt"
+ "github.com/tinylib/msgp/msgp"
+ "io"
+ "strconv"
+)
+
+type sizeState uint8
+
+const (
+ // need to write "s = ..."
+ assign sizeState = iota
+
+ // need to write "s += ..."
+ add
+
+ // can just append "+ ..."
+ expr
+)
+
+func sizes(w io.Writer) *sizeGen {
+ return &sizeGen{
+ p: printer{w: w},
+ state: assign,
+ }
+}
+
+type sizeGen struct {
+ passes
+ p printer
+ state sizeState
+}
+
+func (s *sizeGen) Method() Method { return Size }
+
+func (s *sizeGen) Apply(dirs []string) error {
+ return nil
+}
+
+func builtinSize(typ string) string {
+ return "msgp." + typ + "Size"
+}
+
+// this lets us chain together addition
+// operations where possible
+func (s *sizeGen) addConstant(sz string) {
+ if !s.p.ok() {
+ return
+ }
+
+ switch s.state {
+ case assign:
+ s.p.print("\ns = " + sz)
+ s.state = expr
+ return
+ case add:
+ s.p.print("\ns += " + sz)
+ s.state = expr
+ return
+ case expr:
+ s.p.print(" + " + sz)
+ return
+ }
+
+ panic("unknown size state")
+}
+
+func (s *sizeGen) Execute(p Elem) error {
+ if !s.p.ok() {
+ return s.p.err
+ }
+ p = s.applyall(p)
+ if p == nil {
+ return nil
+ }
+ if !IsPrintable(p) {
+ return nil
+ }
+
+ s.p.comment("Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message")
+
+ s.p.printf("\nfunc (%s %s) Msgsize() (s int) {", p.Varname(), imutMethodReceiver(p))
+ s.state = assign
+ next(s, p)
+ s.p.nakedReturn()
+ return s.p.err
+}
+
+func (s *sizeGen) gStruct(st *Struct) {
+ if !s.p.ok() {
+ return
+ }
+
+ nfields := uint32(len(st.Fields))
+
+ if st.AsTuple {
+ data := msgp.AppendArrayHeader(nil, nfields)
+ s.addConstant(strconv.Itoa(len(data)))
+ for i := range st.Fields {
+ if !s.p.ok() {
+ return
+ }
+ next(s, st.Fields[i].FieldElem)
+ }
+ } else {
+ data := msgp.AppendMapHeader(nil, nfields)
+ s.addConstant(strconv.Itoa(len(data)))
+ for i := range st.Fields {
+ data = data[:0]
+ data = msgp.AppendString(data, st.Fields[i].FieldTag)
+ s.addConstant(strconv.Itoa(len(data)))
+ next(s, st.Fields[i].FieldElem)
+ }
+ }
+}
+
+func (s *sizeGen) gPtr(p *Ptr) {
+ s.state = add // inner must use add
+ s.p.printf("\nif %s == nil {\ns += msgp.NilSize\n} else {", p.Varname())
+ next(s, p.Value)
+ s.state = add // closing block; reset to add
+ s.p.closeblock()
+}
+
+func (s *sizeGen) gSlice(sl *Slice) {
+ if !s.p.ok() {
+ return
+ }
+
+ s.addConstant(builtinSize(arrayHeader))
+
+ // if the slice's element is a fixed size
+ // (e.g. float64, [32]int, etc.), then
+ // print the length times the element size directly
+ if str, ok := fixedsizeExpr(sl.Els); ok {
+ s.addConstant(fmt.Sprintf("(%s * (%s))", lenExpr(sl), str))
+ return
+ }
+
+ // add inside the range block, and immediately after
+ s.state = add
+ s.p.rangeBlock(sl.Index, sl.Varname(), s, sl.Els)
+ s.state = add
+}
+
+func (s *sizeGen) gArray(a *Array) {
+ if !s.p.ok() {
+ return
+ }
+
+ s.addConstant(builtinSize(arrayHeader))
+
+ // if the array's children are a fixed
+ // size, we can compile an expression
+ // that always represents the array's wire size
+ if str, ok := fixedsizeExpr(a); ok {
+ s.addConstant(str)
+ return
+ }
+
+ s.state = add
+ s.p.rangeBlock(a.Index, a.Varname(), s, a.Els)
+ s.state = add
+}
+
+func (s *sizeGen) gMap(m *Map) {
+ s.addConstant(builtinSize(mapHeader))
+ vn := m.Varname()
+ s.p.printf("\nif %s != nil {", vn)
+ s.p.printf("\nfor %s, %s := range %s {", m.Keyidx, m.Validx, vn)
+ s.p.printf("\n_ = %s", m.Validx) // we may not use the value
+ s.p.printf("\ns += msgp.StringPrefixSize + len(%s)", m.Keyidx)
+ s.state = expr
+ next(s, m.Value)
+ s.p.closeblock()
+ s.p.closeblock()
+ s.state = add
+}
+
+func (s *sizeGen) gBase(b *BaseElem) {
+ if !s.p.ok() {
+ return
+ }
+ s.addConstant(basesizeExpr(b))
+}
+
+// returns "len(slice)"
+func lenExpr(sl *Slice) string {
+ return "len(" + sl.Varname() + ")"
+}
+
+// is a given primitive always the same (max)
+// size on the wire?
+func fixedSize(p Primitive) bool {
+ switch p {
+ case Intf, Ext, IDENT, Bytes, String:
+ return false
+ default:
+ return true
+ }
+}
+
+// strip reference from string
+func stripRef(s string) string {
+ if s[0] == '&' {
+ return s[1:]
+ }
+ return s
+}
+
+// return a fixed-size expression, if possible.
+// only possible for *BaseElem and *Array.
+// returns (expr, ok)
+func fixedsizeExpr(e Elem) (string, bool) {
+ switch e := e.(type) {
+ case *Array:
+ if str, ok := fixedsizeExpr(e.Els); ok {
+ return fmt.Sprintf("(%s * (%s))", e.Size, str), true
+ }
+ case *BaseElem:
+ if fixedSize(e.Value) {
+ return builtinSize(e.BaseName()), true
+ }
+ case *Struct:
+ var str string
+ for _, f := range e.Fields {
+ if fs, ok := fixedsizeExpr(f.FieldElem); ok {
+ if str == "" {
+ str = fs
+ } else {
+ str += "+" + fs
+ }
+ } else {
+ return "", false
+ }
+ }
+ var hdrlen int
+ mhdr := msgp.AppendMapHeader(nil, uint32(len(e.Fields)))
+ hdrlen += len(mhdr)
+ var strbody []byte
+ for _, f := range e.Fields {
+ strbody = msgp.AppendString(strbody[:0], f.FieldTag)
+ hdrlen += len(strbody)
+ }
+ return fmt.Sprintf("%d + %s", hdrlen, str), true
+ }
+ return "", false
+}
+
+// print size expression of a variable name
+func basesizeExpr(b *BaseElem) string {
+ vname := b.Varname()
+ if b.Convert {
+ vname = tobaseConvert(b)
+ }
+ switch b.Value {
+ case Ext:
+ return "msgp.ExtensionPrefixSize + " + stripRef(vname) + ".Len()"
+ case Intf:
+ return "msgp.GuessSize(" + vname + ")"
+ case IDENT:
+ return vname + ".Msgsize()"
+ case Bytes:
+ return "msgp.BytesPrefixSize + len(" + vname + ")"
+ case String:
+ return "msgp.StringPrefixSize + len(" + vname + ")"
+ default:
+ return builtinSize(b.BaseName())
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/gen/spec.go b/vendor/github.com/tinylib/msgp/gen/spec.go
new file mode 100644
index 00000000..e1968165
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/spec.go
@@ -0,0 +1,376 @@
+package gen
+
+import (
+ "fmt"
+ "io"
+)
+
+const (
+ errcheck = "\nif err != nil { return }"
+ lenAsUint32 = "uint32(len(%s))"
+ literalFmt = "%s"
+ intFmt = "%d"
+ quotedFmt = `"%s"`
+ mapHeader = "MapHeader"
+ arrayHeader = "ArrayHeader"
+ mapKey = "MapKeyPtr"
+ stringTyp = "String"
+ u32 = "uint32"
+)
+
+// Method is a bitfield representing something that the
+// generator knows how to print.
+type Method uint8
+
+// are the bits in 'f' set in 'm'?
+func (m Method) isset(f Method) bool { return (m&f == f) }
+
+// String implements fmt.Stringer
+func (m Method) String() string {
+ switch m {
+ case 0, invalidmeth:
+ return ""
+ case Decode:
+ return "decode"
+ case Encode:
+ return "encode"
+ case Marshal:
+ return "marshal"
+ case Unmarshal:
+ return "unmarshal"
+ case Size:
+ return "size"
+ case Test:
+ return "test"
+ default:
+ // return e.g. "decode+encode+test"
+ modes := [...]Method{Decode, Encode, Marshal, Unmarshal, Size, Test}
+ any := false
+ nm := ""
+ for _, mm := range modes {
+ if m.isset(mm) {
+ if any {
+ nm += "+" + mm.String()
+ } else {
+ nm += mm.String()
+ any = true
+ }
+ }
+ }
+ return nm
+
+ }
+}
+
+func strtoMeth(s string) Method {
+ switch s {
+ case "encode":
+ return Encode
+ case "decode":
+ return Decode
+ case "marshal":
+ return Marshal
+ case "unmarshal":
+ return Unmarshal
+ case "size":
+ return Size
+ case "test":
+ return Test
+ default:
+ return 0
+ }
+}
+
+const (
+ Decode Method = 1 << iota // msgp.Decodable
+ Encode // msgp.Encodable
+ Marshal // msgp.Marshaler
+ Unmarshal // msgp.Unmarshaler
+ Size // msgp.Sizer
+ Test // generate tests
+ invalidmeth // this isn't a method
+ encodetest = Encode | Decode | Test // tests for Encodable and Decodable
+ marshaltest = Marshal | Unmarshal | Test // tests for Marshaler and Unmarshaler
+)
+
+type Printer struct {
+ gens []generator
+}
+
+func NewPrinter(m Method, out io.Writer, tests io.Writer) *Printer {
+ if m.isset(Test) && tests == nil {
+ panic("cannot print tests with 'nil' tests argument!")
+ }
+ gens := make([]generator, 0, 7)
+ if m.isset(Decode) {
+ gens = append(gens, decode(out))
+ }
+ if m.isset(Encode) {
+ gens = append(gens, encode(out))
+ }
+ if m.isset(Marshal) {
+ gens = append(gens, marshal(out))
+ }
+ if m.isset(Unmarshal) {
+ gens = append(gens, unmarshal(out))
+ }
+ if m.isset(Size) {
+ gens = append(gens, sizes(out))
+ }
+ if m.isset(marshaltest) {
+ gens = append(gens, mtest(tests))
+ }
+ if m.isset(encodetest) {
+ gens = append(gens, etest(tests))
+ }
+ if len(gens) == 0 {
+ panic("NewPrinter called with invalid method flags")
+ }
+ return &Printer{gens: gens}
+}
+
+// TransformPass is a pass that transforms individual
+// elements. (Note that if the returned is different from
+// the argument, it should not point to the same objects.)
+type TransformPass func(Elem) Elem
+
+// IgnoreTypename is a pass that just ignores
+// types of a given name.
+func IgnoreTypename(name string) TransformPass {
+ return func(e Elem) Elem {
+ if e.TypeName() == name {
+ return nil
+ }
+ return e
+ }
+}
+
+// ApplyDirective applies a directive to a named pass
+// and all of its dependents.
+func (p *Printer) ApplyDirective(pass Method, t TransformPass) {
+ for _, g := range p.gens {
+ if g.Method().isset(pass) {
+ g.Add(t)
+ }
+ }
+}
+
+// Print prints an Elem.
+func (p *Printer) Print(e Elem) error {
+ for _, g := range p.gens {
+ err := g.Execute(e)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// generator is the interface through
+// which code is generated.
+type generator interface {
+ Method() Method
+ Add(p TransformPass)
+ Execute(Elem) error // execute writes the method for the provided object.
+}
+
+type passes []TransformPass
+
+func (p *passes) Add(t TransformPass) {
+ *p = append(*p, t)
+}
+
+func (p *passes) applyall(e Elem) Elem {
+ for _, t := range *p {
+ e = t(e)
+ if e == nil {
+ return nil
+ }
+ }
+ return e
+}
+
+type traversal interface {
+ gMap(*Map)
+ gSlice(*Slice)
+ gArray(*Array)
+ gPtr(*Ptr)
+ gBase(*BaseElem)
+ gStruct(*Struct)
+}
+
+// type-switch dispatch to the correct
+// method given the type of 'e'
+func next(t traversal, e Elem) {
+ switch e := e.(type) {
+ case *Map:
+ t.gMap(e)
+ case *Struct:
+ t.gStruct(e)
+ case *Slice:
+ t.gSlice(e)
+ case *Array:
+ t.gArray(e)
+ case *Ptr:
+ t.gPtr(e)
+ case *BaseElem:
+ t.gBase(e)
+ default:
+ panic("bad element type")
+ }
+}
+
+// possibly-immutable method receiver
+func imutMethodReceiver(p Elem) string {
+ switch e := p.(type) {
+ case *Struct:
+ // TODO(HACK): actually do real math here.
+ if len(e.Fields) <= 3 {
+ for i := range e.Fields {
+ if be, ok := e.Fields[i].FieldElem.(*BaseElem); !ok || (be.Value == IDENT || be.Value == Bytes) {
+ goto nope
+ }
+ }
+ return p.TypeName()
+ }
+ nope:
+ return "*" + p.TypeName()
+
+ // gets dereferenced automatically
+ case *Array:
+ return "*" + p.TypeName()
+
+ // everything else can be
+ // by-value.
+ default:
+ return p.TypeName()
+ }
+}
+
+// if necessary, wraps a type
+// so that its method receiver
+// is of the write type.
+func methodReceiver(p Elem) string {
+ switch p.(type) {
+
+ // structs and arrays are
+ // dereferenced automatically,
+ // so no need to alter varname
+ case *Struct, *Array:
+ return "*" + p.TypeName()
+ // set variable name to
+ // *varname
+ default:
+ p.SetVarname("(*" + p.Varname() + ")")
+ return "*" + p.TypeName()
+ }
+}
+
+func unsetReceiver(p Elem) {
+ switch p.(type) {
+ case *Struct, *Array:
+ default:
+ p.SetVarname("z")
+ }
+}
+
+// shared utility for generators
+type printer struct {
+ w io.Writer
+ err error
+}
+
+// writes "var {{name}} {{typ}};"
+func (p *printer) declare(name string, typ string) {
+ p.printf("\nvar %s %s", name, typ)
+}
+
+// does:
+//
+// if m != nil && size > 0 {
+// m = make(type, size)
+// } else if len(m) > 0 {
+// for key, _ := range m { delete(m, key) }
+// }
+//
+func (p *printer) resizeMap(size string, m *Map) {
+ vn := m.Varname()
+ if !p.ok() {
+ return
+ }
+ p.printf("\nif %s == nil && %s > 0 {", vn, size)
+ p.printf("\n%s = make(%s, %s)", vn, m.TypeName(), size)
+ p.printf("\n} else if len(%s) > 0 {", vn)
+ p.clearMap(vn)
+ p.closeblock()
+}
+
+// assign key to value based on varnames
+func (p *printer) mapAssign(m *Map) {
+ if !p.ok() {
+ return
+ }
+ p.printf("\n%s[%s] = %s", m.Varname(), m.Keyidx, m.Validx)
+}
+
+// clear map keys
+func (p *printer) clearMap(name string) {
+ p.printf("\nfor key, _ := range %[1]s { delete(%[1]s, key) }", name)
+}
+
+func (p *printer) resizeSlice(size string, s *Slice) {
+ p.printf("\nif cap(%[1]s) >= int(%[2]s) { %[1]s = (%[1]s)[:%[2]s] } else { %[1]s = make(%[3]s, %[2]s) }", s.Varname(), size, s.TypeName())
+}
+
+func (p *printer) arrayCheck(want string, got string) {
+ p.printf("\nif %[1]s != %[2]s { err = msgp.ArrayError{Wanted: %[2]s, Got: %[1]s}; return }", got, want)
+}
+
+func (p *printer) closeblock() { p.print("\n}") }
+
+// does:
+//
+// for idx := range iter {
+// {{generate inner}}
+// }
+//
+func (p *printer) rangeBlock(idx string, iter string, t traversal, inner Elem) {
+ p.printf("\n for %s := range %s {", idx, iter)
+ next(t, inner)
+ p.closeblock()
+}
+
+func (p *printer) nakedReturn() {
+ if p.ok() {
+ p.print("\nreturn\n}\n")
+ }
+}
+
+func (p *printer) comment(s string) {
+ p.print("\n// " + s)
+}
+
+func (p *printer) printf(format string, args ...interface{}) {
+ if p.err == nil {
+ _, p.err = fmt.Fprintf(p.w, format, args...)
+ }
+}
+
+func (p *printer) print(format string) {
+ if p.err == nil {
+ _, p.err = io.WriteString(p.w, format)
+ }
+}
+
+func (p *printer) initPtr(pt *Ptr) {
+ if pt.Needsinit() {
+ vname := pt.Varname()
+ p.printf("\nif %s == nil { %s = new(%s); }", vname, vname, pt.Value.TypeName())
+ }
+}
+
+func (p *printer) ok() bool { return p.err == nil }
+
+func tobaseConvert(b *BaseElem) string {
+ return b.ToBase() + "(" + b.Varname() + ")"
+}
diff --git a/vendor/github.com/tinylib/msgp/gen/testgen.go b/vendor/github.com/tinylib/msgp/gen/testgen.go
new file mode 100644
index 00000000..a0e0114e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/gen/testgen.go
@@ -0,0 +1,182 @@
+package gen
+
+import (
+ "io"
+ "text/template"
+)
+
+var (
+ marshalTestTempl = template.New("MarshalTest")
+ encodeTestTempl = template.New("EncodeTest")
+)
+
+// TODO(philhofer):
+// for simplicity's sake, right now
+// we can only generate tests for types
+// that can be initialized with the
+// "Type{}" syntax.
+// we should support all the types.
+
+func mtest(w io.Writer) *mtestGen {
+ return &mtestGen{w: w}
+}
+
+type mtestGen struct {
+ passes
+ w io.Writer
+}
+
+func (m *mtestGen) Execute(p Elem) error {
+ p = m.applyall(p)
+ if p != nil && IsPrintable(p) {
+ switch p.(type) {
+ case *Struct, *Array, *Slice, *Map:
+ return marshalTestTempl.Execute(m.w, p)
+ }
+ }
+ return nil
+}
+
+func (m *mtestGen) Method() Method { return marshaltest }
+
+type etestGen struct {
+ passes
+ w io.Writer
+}
+
+func etest(w io.Writer) *etestGen {
+ return &etestGen{w: w}
+}
+
+func (e *etestGen) Execute(p Elem) error {
+ p = e.applyall(p)
+ if p != nil && IsPrintable(p) {
+ switch p.(type) {
+ case *Struct, *Array, *Slice, *Map:
+ return encodeTestTempl.Execute(e.w, p)
+ }
+ }
+ return nil
+}
+
+func (e *etestGen) Method() Method { return encodetest }
+
+func init() {
+ template.Must(marshalTestTempl.Parse(`func TestMarshalUnmarshal{{.TypeName}}(t *testing.T) {
+ v := {{.TypeName}}{}
+ bts, err := v.MarshalMsg(nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func BenchmarkMarshalMsg{{.TypeName}}(b *testing.B) {
+ v := {{.TypeName}}{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i:=0; i m {
+ t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
+ }
+
+ vn := {{.TypeName}}{}
+ err := msgp.Decode(&buf, &vn)
+ if err != nil {
+ t.Error(err)
+ }
+
+ buf.Reset()
+ msgp.Encode(&buf, &v)
+ err = msgp.NewReader(&buf).Skip()
+ if err != nil {
+ t.Error(err)
+ }
+}
+
+func BenchmarkEncode{{.TypeName}}(b *testing.B) {
+ v := {{.TypeName}}{}
+ var buf bytes.Buffer
+ msgp.Encode(&buf, &v)
+ b.SetBytes(int64(buf.Len()))
+ en := msgp.NewWriter(msgp.Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i:=0; i 0 {", sz)
+ u.p.printf("\n%s--; field, bts, err = msgp.ReadMapKeyZC(bts)", sz)
+ u.p.print(errcheck)
+ u.p.print("\nswitch msgp.UnsafeString(field) {")
+ for i := range s.Fields {
+ if !u.p.ok() {
+ return
+ }
+ u.p.printf("\ncase \"%s\":", s.Fields[i].FieldTag)
+ next(u, s.Fields[i].FieldElem)
+ }
+ u.p.print("\ndefault:\nbts, err = msgp.Skip(bts)")
+ u.p.print(errcheck)
+ u.p.print("\n}\n}") // close switch and for loop
+}
+
+func (u *unmarshalGen) gBase(b *BaseElem) {
+ if !u.p.ok() {
+ return
+ }
+
+ refname := b.Varname() // assigned to
+ lowered := b.Varname() // passed as argument
+ if b.Convert {
+ // begin 'tmp' block
+ refname = randIdent()
+ lowered = b.ToBase() + "(" + lowered + ")"
+ u.p.printf("\n{\nvar %s %s", refname, b.BaseType())
+ }
+
+ switch b.Value {
+ case Bytes:
+ u.p.printf("\n%s, bts, err = msgp.ReadBytesBytes(bts, %s)", refname, lowered)
+ case Ext:
+ u.p.printf("\nbts, err = msgp.ReadExtensionBytes(bts, %s)", lowered)
+ case IDENT:
+ u.p.printf("\nbts, err = %s.UnmarshalMsg(bts)", lowered)
+ default:
+ u.p.printf("\n%s, bts, err = msgp.Read%sBytes(bts)", refname, b.BaseName())
+ }
+ if b.Convert {
+ // close 'tmp' block
+ u.p.printf("\n%s = %s(%s)\n}", b.Varname(), b.FromBase(), refname)
+ }
+
+ u.p.print(errcheck)
+}
+
+func (u *unmarshalGen) gArray(a *Array) {
+ if !u.p.ok() {
+ return
+ }
+
+ // special case for [const]byte objects
+ // see decode.go for symmetry
+ if be, ok := a.Els.(*BaseElem); ok && be.Value == Byte {
+ u.p.printf("\nbts, err = msgp.ReadExactBytes(bts, (%s)[:])", a.Varname())
+ u.p.print(errcheck)
+ return
+ }
+
+ sz := randIdent()
+ u.p.declare(sz, u32)
+ u.assignAndCheck(sz, arrayHeader)
+ u.p.arrayCheck(a.Size, sz)
+ u.p.rangeBlock(a.Index, a.Varname(), u, a.Els)
+}
+
+func (u *unmarshalGen) gSlice(s *Slice) {
+ if !u.p.ok() {
+ return
+ }
+ sz := randIdent()
+ u.p.declare(sz, u32)
+ u.assignAndCheck(sz, arrayHeader)
+ u.p.resizeSlice(sz, s)
+ u.p.rangeBlock(s.Index, s.Varname(), u, s.Els)
+}
+
+func (u *unmarshalGen) gMap(m *Map) {
+ if !u.p.ok() {
+ return
+ }
+ sz := randIdent()
+ u.p.declare(sz, u32)
+ u.assignAndCheck(sz, mapHeader)
+
+ // allocate or clear map
+ u.p.resizeMap(sz, m)
+
+ // loop and get key,value
+ u.p.printf("\nfor %s > 0 {", sz)
+ u.p.printf("\nvar %s string; var %s %s; %s--", m.Keyidx, m.Validx, m.Value.TypeName(), sz)
+ u.assignAndCheck(m.Keyidx, stringTyp)
+ next(u, m.Value)
+ u.p.mapAssign(m)
+ u.p.closeblock()
+}
+
+func (u *unmarshalGen) gPtr(p *Ptr) {
+ u.p.printf("\nif msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts); if err != nil { return }; %s = nil; } else { ", p.Varname())
+ u.p.initPtr(p)
+ next(u, p.Value)
+ u.p.closeblock()
+}
diff --git a/vendor/github.com/tinylib/msgp/main.go b/vendor/github.com/tinylib/msgp/main.go
new file mode 100644
index 00000000..4369d739
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/main.go
@@ -0,0 +1,119 @@
+// msgp is a code generation tool for
+// creating methods to serialize and de-serialize
+// Go data structures to and from MessagePack.
+//
+// This package is targeted at the `go generate` tool.
+// To use it, include the following directive in a
+// go source file with types requiring source generation:
+//
+// //go:generate msgp
+//
+// The go generate tool should set the proper environment variables for
+// the generator to execute without any command-line flags. However, the
+// following options are supported, if you need them:
+//
+// -o = output file name (default is {input}_gen.go)
+// -file = input file name (or directory; default is $GOFILE, which is set by the `go generate` command)
+// -io = satisfy the `msgp.Decodable` and `msgp.Encodable` interfaces (default is true)
+// -marshal = satisfy the `msgp.Marshaler` and `msgp.Unmarshaler` interfaces (default is true)
+// -tests = generate tests and benchmarks (default is true)
+//
+// For more information, please read README.md, and the wiki at github.com/tinylib/msgp
+//
+package main
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/tinylib/msgp/gen"
+ "github.com/tinylib/msgp/parse"
+ "github.com/tinylib/msgp/printer"
+ "github.com/ttacon/chalk"
+)
+
+var (
+ out = flag.String("o", "", "output file")
+ file = flag.String("file", "", "input file")
+ encode = flag.Bool("io", true, "create Encode and Decode methods")
+ marshal = flag.Bool("marshal", true, "create Marshal and Unmarshal methods")
+ tests = flag.Bool("tests", true, "create tests and benchmarks")
+ unexported = flag.Bool("unexported", false, "also process unexported types")
+)
+
+func main() {
+ flag.Parse()
+
+ // GOFILE is set by go generate
+ if *file == "" {
+ *file = os.Getenv("GOFILE")
+ if *file == "" {
+ fmt.Println(chalk.Red.Color("No file to parse."))
+ os.Exit(1)
+ }
+ }
+
+ var mode gen.Method
+ if *encode {
+ mode |= (gen.Encode | gen.Decode | gen.Size)
+ }
+ if *marshal {
+ mode |= (gen.Marshal | gen.Unmarshal | gen.Size)
+ }
+ if *tests {
+ mode |= gen.Test
+ }
+
+ if mode&^gen.Test == 0 {
+ fmt.Println(chalk.Red.Color("No methods to generate; -io=false && -marshal=false"))
+ os.Exit(1)
+ }
+
+ if err := Run(*file, mode, *unexported); err != nil {
+ fmt.Println(chalk.Red.Color(err.Error()))
+ os.Exit(1)
+ }
+}
+
+// Run writes all methods using the associated file or path, e.g.
+//
+// err := msgp.Run("path/to/myfile.go", gen.Size|gen.Marshal|gen.Unmarshal|gen.Test, false)
+//
+func Run(gofile string, mode gen.Method, unexported bool) error {
+ if mode&^gen.Test == 0 {
+ return nil
+ }
+ fmt.Println(chalk.Magenta.Color("======== MessagePack Code Generator ======="))
+ fmt.Printf(chalk.Magenta.Color(">>> Input: \"%s\"\n"), gofile)
+ fs, err := parse.File(gofile, unexported)
+ if err != nil {
+ return err
+ }
+
+ if len(fs.Identities) == 0 {
+ fmt.Println(chalk.Magenta.Color("No types requiring code generation were found!"))
+ return nil
+ }
+
+ return printer.PrintFile(newFilename(gofile, fs.Package), fs, mode)
+}
+
+// picks a new file name based on input flags and input filename(s).
+func newFilename(old string, pkg string) string {
+ if *out != "" {
+ if pre := strings.TrimPrefix(*out, old); len(pre) > 0 &&
+ !strings.HasSuffix(*out, ".go") {
+ return filepath.Join(old, *out)
+ }
+ return *out
+ }
+
+ if fi, err := os.Stat(old); err == nil && fi.IsDir() {
+ old = filepath.Join(old, pkg)
+ }
+ // new file name is old file name + _gen.go
+ return strings.TrimSuffix(old, ".go") + "_gen.go"
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
new file mode 100644
index 00000000..6c6bb37a
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go
@@ -0,0 +1,24 @@
+// +build linux,!appengine
+
+package msgp
+
+import (
+ "os"
+ "syscall"
+)
+
+func adviseRead(mem []byte) {
+ syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
+}
+
+func adviseWrite(mem []byte) {
+ syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
+}
+
+func fallocate(f *os.File, sz int64) error {
+ err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
+ if err == syscall.ENOTSUP {
+ return f.Truncate(sz)
+ }
+ return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
new file mode 100644
index 00000000..da65ea54
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go
@@ -0,0 +1,17 @@
+// +build !linux appengine
+
+package msgp
+
+import (
+ "os"
+)
+
+// TODO: darwin, BSD support
+
+func adviseRead(mem []byte) {}
+
+func adviseWrite(mem []byte) {}
+
+func fallocate(f *os.File, sz int64) error {
+ return f.Truncate(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/appengine.go b/vendor/github.com/tinylib/msgp/msgp/appengine.go
new file mode 100644
index 00000000..bff9e768
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/appengine.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+package msgp
+
+// let's just assume appengine
+// uses 64-bit hardware...
+const smallint = false
+
+func UnsafeString(b []byte) string {
+ return string(b)
+}
+
+func UnsafeBytes(s string) []byte {
+ return []byte(s)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go
new file mode 100644
index 00000000..a0434c7e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/circular.go
@@ -0,0 +1,39 @@
+package msgp
+
+type timer interface {
+ StartTimer()
+ StopTimer()
+}
+
+// EndlessReader is an io.Reader
+// that loops over the same data
+// endlessly. It is used for benchmarking.
+type EndlessReader struct {
+ tb timer
+ data []byte
+ offset int
+}
+
+// NewEndlessReader returns a new endless reader
+func NewEndlessReader(b []byte, tb timer) *EndlessReader {
+ return &EndlessReader{tb: tb, data: b, offset: 0}
+}
+
+// Read implements io.Reader. In practice, it
+// always returns (len(p), nil), although it
+// fills the supplied slice while the benchmark
+// timer is stopped.
+func (c *EndlessReader) Read(p []byte) (int, error) {
+ c.tb.StopTimer()
+ var n int
+ l := len(p)
+ m := len(c.data)
+ for n < l {
+ nn := copy(p[n:], c.data[c.offset:])
+ n += nn
+ c.offset += nn
+ c.offset %= m
+ }
+ c.tb.StartTimer()
+ return n, nil
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go
new file mode 100644
index 00000000..c634eef1
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/defs.go
@@ -0,0 +1,142 @@
+// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp).
+//
+// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack
+// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code
+// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces.
+//
+// This package defines four "families" of functions:
+// - AppendXxxx() appends an object to a []byte in MessagePack encoding.
+// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes.
+// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type.
+// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type.
+//
+// Once a type has satisfied the `Encodable` and `Decodable` interfaces,
+// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using
+// msgp.Encode(io.Writer, msgp.Encodable)
+// and
+// msgp.Decode(io.Reader, msgp.Decodable)
+//
+// There are also methods for converting MessagePack to JSON without
+// an explicit de-serialization step.
+//
+// For additional tips, tricks, and gotchas, please visit
+// the wiki at http://github.com/tinylib/msgp
+package msgp
+
+const last4 = 0x0f
+const first4 = 0xf0
+const last5 = 0x1f
+const first3 = 0xe0
+const last7 = 0x7f
+
+func isfixint(b byte) bool {
+ return b>>7 == 0
+}
+
+func isnfixint(b byte) bool {
+ return b&first3 == mnfixint
+}
+
+func isfixmap(b byte) bool {
+ return b&first4 == mfixmap
+}
+
+func isfixarray(b byte) bool {
+ return b&first4 == mfixarray
+}
+
+func isfixstr(b byte) bool {
+ return b&first3 == mfixstr
+}
+
+func wfixint(u uint8) byte {
+ return u & last7
+}
+
+func rfixint(b byte) uint8 {
+ return b
+}
+
+func wnfixint(i int8) byte {
+ return byte(i) | mnfixint
+}
+
+func rnfixint(b byte) int8 {
+ return int8(b)
+}
+
+func rfixmap(b byte) uint8 {
+ return b & last4
+}
+
+func wfixmap(u uint8) byte {
+ return mfixmap | (u & last4)
+}
+
+func rfixstr(b byte) uint8 {
+ return b & last5
+}
+
+func wfixstr(u uint8) byte {
+ return (u & last5) | mfixstr
+}
+
+func rfixarray(b byte) uint8 {
+ return (b & last4)
+}
+
+func wfixarray(u uint8) byte {
+ return (u & last4) | mfixarray
+}
+
+// These are all the byte
+// prefixes defined by the
+// msgpack standard
+const (
+ // 0XXXXXXX
+ mfixint uint8 = 0x00
+
+ // 111XXXXX
+ mnfixint uint8 = 0xe0
+
+ // 1000XXXX
+ mfixmap uint8 = 0x80
+
+ // 1001XXXX
+ mfixarray uint8 = 0x90
+
+ // 101XXXXX
+ mfixstr uint8 = 0xa0
+
+ mnil uint8 = 0xc0
+ mfalse uint8 = 0xc2
+ mtrue uint8 = 0xc3
+ mbin8 uint8 = 0xc4
+ mbin16 uint8 = 0xc5
+ mbin32 uint8 = 0xc6
+ mext8 uint8 = 0xc7
+ mext16 uint8 = 0xc8
+ mext32 uint8 = 0xc9
+ mfloat32 uint8 = 0xca
+ mfloat64 uint8 = 0xcb
+ muint8 uint8 = 0xcc
+ muint16 uint8 = 0xcd
+ muint32 uint8 = 0xce
+ muint64 uint8 = 0xcf
+ mint8 uint8 = 0xd0
+ mint16 uint8 = 0xd1
+ mint32 uint8 = 0xd2
+ mint64 uint8 = 0xd3
+ mfixext1 uint8 = 0xd4
+ mfixext2 uint8 = 0xd5
+ mfixext4 uint8 = 0xd6
+ mfixext8 uint8 = 0xd7
+ mfixext16 uint8 = 0xd8
+ mstr8 uint8 = 0xd9
+ mstr16 uint8 = 0xda
+ mstr32 uint8 = 0xdb
+ marray16 uint8 = 0xdc
+ marray32 uint8 = 0xdd
+ mmap16 uint8 = 0xde
+ mmap32 uint8 = 0xdf
+)
diff --git a/vendor/github.com/tinylib/msgp/msgp/defs_test.go b/vendor/github.com/tinylib/msgp/msgp/defs_test.go
new file mode 100644
index 00000000..667dfd60
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/defs_test.go
@@ -0,0 +1,12 @@
+package msgp_test
+
+//go:generate msgp -o=defgen_test.go -tests=false
+
+type Blobs []Blob
+
+type Blob struct {
+ Name string `msg:"name"`
+ Float float64 `msg:"float"`
+ Bytes []byte `msg:"bytes"`
+ Amount int64 `msg:"amount"`
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go
new file mode 100644
index 00000000..41f92986
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/edit.go
@@ -0,0 +1,241 @@
+package msgp
+
+import (
+ "math"
+)
+
+// Locate returns a []byte pointing to the field
+// in a messagepack map with the provided key. (The returned []byte
+// points to a sub-slice of 'raw'; Locate does no allocations.) If the
+// key doesn't exist in the map, a zero-length []byte will be returned.
+func Locate(key string, raw []byte) []byte {
+ s, n := locate(raw, key)
+ return raw[s:n]
+}
+
+// Replace takes a key ("key") in a messagepack map ("raw")
+// and replaces its value with the one provided and returns
+// the new []byte. The returned []byte may point to the same
+// memory as "raw". Replace makes no effort to evaluate the validity
+// of the contents of 'val'. It may use up to the full capacity of 'raw.'
+// Replace returns 'nil' if the field doesn't exist or if the object in 'raw'
+// is not a map.
+func Replace(key string, raw []byte, val []byte) []byte {
+ start, end := locate(raw, key)
+ if start == end {
+ return nil
+ }
+ return replace(raw, start, end, val, true)
+}
+
+// CopyReplace works similarly to Replace except that the returned
+// byte slice does not point to the same memory as 'raw'. CopyReplace
+// returns 'nil' if the field doesn't exist or 'raw' isn't a map.
+func CopyReplace(key string, raw []byte, val []byte) []byte {
+ start, end := locate(raw, key)
+ if start == end {
+ return nil
+ }
+ return replace(raw, start, end, val, false)
+}
+
+// Remove removes a key-value pair from 'raw'. It returns
+// 'raw' unchanged if the key didn't exist.
+func Remove(key string, raw []byte) []byte {
+ start, end := locateKV(raw, key)
+ if start == end {
+ return raw
+ }
+ raw = raw[:start+copy(raw[start:], raw[end:])]
+ return resizeMap(raw, -1)
+}
+
+// HasKey returns whether the map in 'raw' has
+// a field with key 'key'
+func HasKey(key string, raw []byte) bool {
+ sz, bts, err := ReadMapHeaderBytes(raw)
+ if err != nil {
+ return false
+ }
+ var field []byte
+ for i := uint32(0); i < sz; i++ {
+ field, bts, err = ReadStringZC(bts)
+ if err != nil {
+ return false
+ }
+ if UnsafeString(field) == key {
+ return true
+ }
+ }
+ return false
+}
+
+func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte {
+ ll := end - start // length of segment to replace
+ lv := len(val)
+
+ if inplace {
+ extra := lv - ll
+
+ // fastest case: we're doing
+ // a 1:1 replacement
+ if extra == 0 {
+ copy(raw[start:], val)
+ return raw
+
+ } else if extra < 0 {
+ // 'val' smaller than replaced value
+ // copy in place and shift back
+
+ x := copy(raw[start:], val)
+ y := copy(raw[start+x:], raw[end:])
+ return raw[:start+x+y]
+
+ } else if extra < cap(raw)-len(raw) {
+ // 'val' less than (cap-len) extra bytes
+ // copy in place and shift forward
+ raw = raw[0 : len(raw)+extra]
+ // shift end forward
+ copy(raw[end+extra:], raw[end:])
+ copy(raw[start:], val)
+ return raw
+ }
+ }
+
+ // we have to allocate new space
+ out := make([]byte, len(raw)+len(val)-ll)
+ x := copy(out, raw[:start])
+ y := copy(out[x:], val)
+ copy(out[x+y:], raw[end:])
+ return out
+}
+
+// locate does a naive O(n) search for the map key; returns start, end
+// (returns 0,0 on error)
+func locate(raw []byte, key string) (start int, end int) {
+ var (
+ sz uint32
+ bts []byte
+ field []byte
+ err error
+ )
+ sz, bts, err = ReadMapHeaderBytes(raw)
+ if err != nil {
+ return
+ }
+
+ // loop and locate field
+ for i := uint32(0); i < sz; i++ {
+ field, bts, err = ReadStringZC(bts)
+ if err != nil {
+ return 0, 0
+ }
+ if UnsafeString(field) == key {
+ // start location
+ l := len(raw)
+ start = l - len(bts)
+ bts, err = Skip(bts)
+ if err != nil {
+ return 0, 0
+ }
+ end = l - len(bts)
+ return
+ }
+ bts, err = Skip(bts)
+ if err != nil {
+ return 0, 0
+ }
+ }
+ return 0, 0
+}
+
+// locate key AND value
+func locateKV(raw []byte, key string) (start int, end int) {
+ var (
+ sz uint32
+ bts []byte
+ field []byte
+ err error
+ )
+ sz, bts, err = ReadMapHeaderBytes(raw)
+ if err != nil {
+ return 0, 0
+ }
+
+ for i := uint32(0); i < sz; i++ {
+ tmp := len(bts)
+ field, bts, err = ReadStringZC(bts)
+ if err != nil {
+ return 0, 0
+ }
+ if UnsafeString(field) == key {
+ start = len(raw) - tmp
+ bts, err = Skip(bts)
+ if err != nil {
+ return 0, 0
+ }
+ end = len(raw) - len(bts)
+ return
+ }
+ bts, err = Skip(bts)
+ if err != nil {
+ return 0, 0
+ }
+ }
+ return 0, 0
+}
+
+// delta is delta on map size
+func resizeMap(raw []byte, delta int64) []byte {
+ var sz int64
+ switch raw[0] {
+ case mmap16:
+ sz = int64(big.Uint16(raw[1:]))
+ if sz+delta <= math.MaxUint16 {
+ big.PutUint16(raw[1:], uint16(sz+delta))
+ return raw
+ }
+ if cap(raw)-len(raw) >= 2 {
+ raw = raw[0 : len(raw)+2]
+ copy(raw[5:], raw[3:])
+ big.PutUint32(raw[1:], uint32(sz+delta))
+ return raw
+ }
+ n := make([]byte, 0, len(raw)+5)
+ n = AppendMapHeader(n, uint32(sz+delta))
+ return append(n, raw[3:]...)
+
+ case mmap32:
+ sz = int64(big.Uint32(raw[1:]))
+ big.PutUint32(raw[1:], uint32(sz+delta))
+ return raw
+
+ default:
+ sz = int64(rfixmap(raw[0]))
+ if sz+delta < 16 {
+ raw[0] = wfixmap(uint8(sz + delta))
+ return raw
+ } else if sz+delta <= math.MaxUint16 {
+ if cap(raw)-len(raw) >= 2 {
+ raw = raw[0 : len(raw)+2]
+ copy(raw[3:], raw[1:])
+ raw[0] = mmap16
+ big.PutUint16(raw[1:], uint16(sz+delta))
+ return raw
+ }
+ n := make([]byte, 0, len(raw)+5)
+ n = AppendMapHeader(n, uint32(sz+delta))
+ return append(n, raw[1:]...)
+ }
+ if cap(raw)-len(raw) >= 4 {
+ raw = raw[0 : len(raw)+4]
+ copy(raw[5:], raw[1:])
+ raw[0] = mmap32
+ big.PutUint32(raw[1:], uint32(sz+delta))
+ return raw
+ }
+ n := make([]byte, 0, len(raw)+5)
+ n = AppendMapHeader(n, uint32(sz+delta))
+ return append(n, raw[1:]...)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/edit_test.go b/vendor/github.com/tinylib/msgp/msgp/edit_test.go
new file mode 100644
index 00000000..e33b4e1b
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/edit_test.go
@@ -0,0 +1,200 @@
+package msgp
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+)
+
+func TestRemove(t *testing.T) {
+ var buf bytes.Buffer
+ w := NewWriter(&buf)
+ w.WriteMapHeader(3)
+ w.WriteString("first")
+ w.WriteFloat64(-3.1)
+ w.WriteString("second")
+ w.WriteString("DELETE ME!!!")
+ w.WriteString("third")
+ w.WriteBytes([]byte("blah"))
+ w.Flush()
+
+ raw := Remove("second", buf.Bytes())
+
+ m, _, err := ReadMapStrIntfBytes(raw, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(m) != 2 {
+ t.Errorf("expected %d fields; found %d", 2, len(m))
+ }
+ if _, ok := m["first"]; !ok {
+ t.Errorf("field %q not found", "first")
+ }
+ if _, ok := m["third"]; !ok {
+ t.Errorf("field %q not found", "third")
+ }
+ if _, ok := m["second"]; ok {
+ t.Errorf("field %q (deleted field) still present", "second")
+ }
+}
+
+func TestLocate(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteMapHeader(2)
+ en.WriteString("thing_one")
+ en.WriteString("value_one")
+ en.WriteString("thing_two")
+ en.WriteFloat64(2.0)
+ en.Flush()
+
+ field := Locate("thing_one", buf.Bytes())
+ if len(field) == 0 {
+ t.Fatal("field not found")
+ }
+
+ if !HasKey("thing_one", buf.Bytes()) {
+ t.Fatal("field not found")
+ }
+
+ var zbuf bytes.Buffer
+ w := NewWriter(&zbuf)
+ w.WriteString("value_one")
+ w.Flush()
+
+ if !bytes.Equal(zbuf.Bytes(), field) {
+ t.Errorf("got %q; wanted %q", field, zbuf.Bytes())
+ }
+
+ zbuf.Reset()
+ w.WriteFloat64(2.0)
+ w.Flush()
+ field = Locate("thing_two", buf.Bytes())
+ if len(field) == 0 {
+ t.Fatal("field not found")
+ }
+ if !bytes.Equal(zbuf.Bytes(), field) {
+ t.Errorf("got %q; wanted %q", field, zbuf.Bytes())
+ }
+
+ field = Locate("nope", buf.Bytes())
+ if len(field) != 0 {
+ t.Fatalf("wanted a zero-length returned slice")
+ }
+
+}
+
+func TestReplace(t *testing.T) {
+ // there are 4 cases that need coverage:
+ // - new value is smaller than old value
+ // - new value is the same size as the old value
+ // - new value is larger than old, but fits within cap(b)
+ // - new value is larger than old, and doesn't fit within cap(b)
+
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteMapHeader(3)
+ en.WriteString("thing_one")
+ en.WriteString("value_one")
+ en.WriteString("thing_two")
+ en.WriteFloat64(2.0)
+ en.WriteString("some_bytes")
+ en.WriteBytes([]byte("here are some bytes"))
+ en.Flush()
+
+ // same-size replacement
+ var fbuf bytes.Buffer
+ w := NewWriter(&fbuf)
+ w.WriteFloat64(4.0)
+ w.Flush()
+
+ // replace 2.0 with 4.0 in field two
+ raw := Replace("thing_two", buf.Bytes(), fbuf.Bytes())
+ if len(raw) == 0 {
+ t.Fatal("field not found")
+ }
+ var err error
+ m := make(map[string]interface{})
+ m, _, err = ReadMapStrIntfBytes(raw, m)
+ if err != nil {
+ t.Logf("%q", raw)
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(m["thing_two"], 4.0) {
+ t.Errorf("wanted %v; got %v", 4.0, m["thing_two"])
+ }
+
+ // smaller-size replacement
+ // replace 2.0 with []byte("hi!")
+ fbuf.Reset()
+ w.WriteBytes([]byte("hi!"))
+ w.Flush()
+ raw = Replace("thing_two", raw, fbuf.Bytes())
+ if len(raw) == 0 {
+ t.Fatal("field not found")
+ }
+
+ m, _, err = ReadMapStrIntfBytes(raw, m)
+ if err != nil {
+ t.Logf("%q", raw)
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(m["thing_two"], []byte("hi!")) {
+ t.Errorf("wanted %v; got %v", []byte("hi!"), m["thing_two"])
+ }
+
+ // larger-size replacement
+ fbuf.Reset()
+ w.WriteBytes([]byte("some even larger bytes than before"))
+ w.Flush()
+ raw = Replace("some_bytes", raw, fbuf.Bytes())
+ if len(raw) == 0 {
+ t.Logf("%q", raw)
+ t.Fatal(err)
+ }
+
+ m, _, err = ReadMapStrIntfBytes(raw, m)
+ if err != nil {
+ t.Logf("%q", raw)
+ t.Fatal(err)
+ }
+
+ if !reflect.DeepEqual(m["some_bytes"], []byte("some even larger bytes than before")) {
+ t.Errorf("wanted %v; got %v", []byte("hello there!"), m["some_bytes"])
+ }
+
+ // identical in-place replacement
+ field := Locate("some_bytes", raw)
+ newraw := CopyReplace("some_bytes", raw, field)
+
+ if !bytes.Equal(newraw, raw) {
+ t.Logf("in: %q", raw)
+ t.Logf("out: %q", newraw)
+ t.Error("bytes not equal after copyreplace")
+ }
+}
+
+func BenchmarkLocate(b *testing.B) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteMapHeader(3)
+ en.WriteString("thing_one")
+ en.WriteString("value_one")
+ en.WriteString("thing_two")
+ en.WriteFloat64(2.0)
+ en.WriteString("thing_three")
+ en.WriteBytes([]byte("hello!"))
+ en.Flush()
+
+ raw := buf.Bytes()
+ // bytes/s will be the number of bytes traversed per unit of time
+ field := Locate("thing_three", raw)
+ b.SetBytes(int64(len(raw) - len(field)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ Locate("thing_three", raw)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go
new file mode 100644
index 00000000..95762e7e
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go
@@ -0,0 +1,99 @@
+package msgp
+
+// size of every object on the wire,
+// plus type information. gives us
+// constant-time type information
+// for traversing composite objects.
+//
+var sizes = [256]bytespec{
+ mnil: {size: 1, extra: constsize, typ: NilType},
+ mfalse: {size: 1, extra: constsize, typ: BoolType},
+ mtrue: {size: 1, extra: constsize, typ: BoolType},
+ mbin8: {size: 2, extra: extra8, typ: BinType},
+ mbin16: {size: 3, extra: extra16, typ: BinType},
+ mbin32: {size: 5, extra: extra32, typ: BinType},
+ mext8: {size: 3, extra: extra8, typ: ExtensionType},
+ mext16: {size: 4, extra: extra16, typ: ExtensionType},
+ mext32: {size: 6, extra: extra32, typ: ExtensionType},
+ mfloat32: {size: 5, extra: constsize, typ: Float32Type},
+ mfloat64: {size: 9, extra: constsize, typ: Float64Type},
+ muint8: {size: 2, extra: constsize, typ: UintType},
+ muint16: {size: 3, extra: constsize, typ: UintType},
+ muint32: {size: 5, extra: constsize, typ: UintType},
+ muint64: {size: 9, extra: constsize, typ: UintType},
+ mint8: {size: 2, extra: constsize, typ: IntType},
+ mint16: {size: 3, extra: constsize, typ: IntType},
+ mint32: {size: 5, extra: constsize, typ: IntType},
+ mint64: {size: 9, extra: constsize, typ: IntType},
+ mfixext1: {size: 3, extra: constsize, typ: ExtensionType},
+ mfixext2: {size: 4, extra: constsize, typ: ExtensionType},
+ mfixext4: {size: 6, extra: constsize, typ: ExtensionType},
+ mfixext8: {size: 10, extra: constsize, typ: ExtensionType},
+ mfixext16: {size: 18, extra: constsize, typ: ExtensionType},
+ mstr8: {size: 2, extra: extra8, typ: StrType},
+ mstr16: {size: 3, extra: extra16, typ: StrType},
+ mstr32: {size: 5, extra: extra32, typ: StrType},
+ marray16: {size: 3, extra: array16v, typ: ArrayType},
+ marray32: {size: 5, extra: array32v, typ: ArrayType},
+ mmap16: {size: 3, extra: map16v, typ: MapType},
+ mmap32: {size: 5, extra: map32v, typ: MapType},
+}
+
+func init() {
+ // set up fixed fields
+
+ // fixint
+ for i := mfixint; i < 0x80; i++ {
+ sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType}
+ }
+
+ // nfixint
+ for i := uint16(mnfixint); i < 0x100; i++ {
+ sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType}
+ }
+
+ // fixstr gets constsize,
+ // since the prefix yields the size
+ for i := mfixstr; i < 0xc0; i++ {
+ sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType}
+ }
+
+ // fixmap
+ for i := mfixmap; i < 0x90; i++ {
+ sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType}
+ }
+
+ // fixarray
+ for i := mfixarray; i < 0xa0; i++ {
+ sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType}
+ }
+}
+
+// a valid bytespsec has
+// non-zero 'size' and
+// non-zero 'typ'
+type bytespec struct {
+ size uint8 // prefix size information
+ extra varmode // extra size information
+ typ Type // type
+ _ byte // makes bytespec 4 bytes (yes, this matters)
+}
+
+// size mode
+// if positive, # elements for composites
+type varmode int8
+
+const (
+ constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects)
+ extra8 = -1 // has uint8(p[1]) extra bytes
+ extra16 = -2 // has be16(p[1:]) extra bytes
+ extra32 = -3 // has be32(p[1:]) extra bytes
+ map16v = -4 // use map16
+ map32v = -5 // use map32
+ array16v = -6 // use array16
+ array32v = -7 // use array32
+)
+
+func getType(v byte) Type {
+ return sizes[v].typ
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go
new file mode 100644
index 00000000..5c24f271
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/errors.go
@@ -0,0 +1,142 @@
+package msgp
+
+import (
+ "fmt"
+ "reflect"
+)
+
+var (
+ // ErrShortBytes is returned when the
+ // slice being decoded is too short to
+ // contain the contents of the message
+ ErrShortBytes error = errShort{}
+
+ // this error is only returned
+ // if we reach code that should
+ // be unreachable
+ fatal error = errFatal{}
+)
+
+// Error is the interface satisfied
+// by all of the errors that originate
+// from this package.
+type Error interface {
+ error
+
+ // Resumable returns whether
+ // or not the error means that
+ // the stream of data is malformed
+ // and the information is unrecoverable.
+ Resumable() bool
+}
+
+type errShort struct{}
+
+func (e errShort) Error() string { return "msgp: too few bytes left to read object" }
+func (e errShort) Resumable() bool { return false }
+
+type errFatal struct{}
+
+func (f errFatal) Error() string { return "msgp: fatal decoding error (unreachable code)" }
+func (f errFatal) Resumable() bool { return false }
+
+// ArrayError is an error returned
+// when decoding a fix-sized array
+// of the wrong size
+type ArrayError struct {
+ Wanted uint32
+ Got uint32
+}
+
+// Error implements the error interface
+func (a ArrayError) Error() string {
+ return fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got)
+}
+
+// Resumable is always 'true' for ArrayErrors
+func (a ArrayError) Resumable() bool { return true }
+
+// IntOverflow is returned when a call
+// would downcast an integer to a type
+// with too few bits to hold its value.
+type IntOverflow struct {
+ Value int64 // the value of the integer
+ FailedBitsize int // the bit size that the int64 could not fit into
+}
+
+// Error implements the error interface
+func (i IntOverflow) Error() string {
+ return fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize)
+}
+
+// Resumable is always 'true' for overflows
+func (i IntOverflow) Resumable() bool { return true }
+
+// UintOverflow is returned when a call
+// would downcast an unsigned integer to a type
+// with too few bits to hold its value
+type UintOverflow struct {
+ Value uint64 // value of the uint
+ FailedBitsize int // the bit size that couldn't fit the value
+}
+
+// Error implements the error interface
+func (u UintOverflow) Error() string {
+ return fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize)
+}
+
+// Resumable is always 'true' for overflows
+func (u UintOverflow) Resumable() bool { return true }
+
+// A TypeError is returned when a particular
+// decoding method is unsuitable for decoding
+// a particular MessagePack value.
+type TypeError struct {
+ Method Type // Type expected by method
+ Encoded Type // Type actually encoded
+}
+
+// Error implements the error interface
+func (t TypeError) Error() string {
+ return fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method)
+}
+
+// Resumable returns 'true' for TypeErrors
+func (t TypeError) Resumable() bool { return true }
+
+// returns either InvalidPrefixError or
+// TypeError depending on whether or not
+// the prefix is recognized
+func badPrefix(want Type, lead byte) error {
+ t := sizes[lead].typ
+ if t == InvalidType {
+ return InvalidPrefixError(lead)
+ }
+ return TypeError{Method: want, Encoded: t}
+}
+
+// InvalidPrefixError is returned when a bad encoding
+// uses a prefix that is not recognized in the MessagePack standard.
+// This kind of error is unrecoverable.
+type InvalidPrefixError byte
+
+// Error implements the error interface
+func (i InvalidPrefixError) Error() string {
+ return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i))
+}
+
+// Resumable returns 'false' for InvalidPrefixErrors
+func (i InvalidPrefixError) Resumable() bool { return false }
+
+// ErrUnsupportedType is returned
+// when a bad argument is supplied
+// to a function that takes `interface{}`.
+type ErrUnsupportedType struct {
+ T reflect.Type
+}
+
+// Error implements error
+func (e *ErrUnsupportedType) Error() string { return fmt.Sprintf("msgp: type %q not supported", e.T) }
+
+// Resumable returns 'true' for ErrUnsupportedType
+func (e *ErrUnsupportedType) Resumable() bool { return true }
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go
new file mode 100644
index 00000000..588b18f9
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/extension.go
@@ -0,0 +1,548 @@
+package msgp
+
+import (
+ "fmt"
+ "math"
+)
+
+const (
+ // Complex64Extension is the extension number used for complex64
+ Complex64Extension = 3
+
+ // Complex128Extension is the extension number used for complex128
+ Complex128Extension = 4
+
+ // TimeExtension is the extension number used for time.Time
+ TimeExtension = 5
+)
+
+// our extensions live here
+var extensionReg = make(map[int8]func() Extension)
+
+// RegisterExtension registers extensions so that they
+// can be initialized and returned by methods that
+// decode `interface{}` values. This should only
+// be called during initialization. f() should return
+// a newly-initialized zero value of the extension. Keep in
+// mind that extensions 3, 4, and 5 are reserved for
+// complex64, complex128, and time.Time, respectively,
+// and that MessagePack reserves extension types from -127 to -1.
+//
+// For example, if you wanted to register a user-defined struct:
+//
+// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} })
+//
+// RegisterExtension will panic if you call it multiple times
+// with the same 'typ' argument, or if you use a reserved
+// type (3, 4, or 5).
+func RegisterExtension(typ int8, f func() Extension) {
+ switch typ {
+ case Complex64Extension, Complex128Extension, TimeExtension:
+ panic(fmt.Sprint("msgp: forbidden extension type:", typ))
+ }
+ if _, ok := extensionReg[typ]; ok {
+ panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once"))
+ }
+ extensionReg[typ] = f
+}
+
+// ExtensionTypeError is an error type returned
+// when there is a mis-match between an extension type
+// and the type encoded on the wire
+type ExtensionTypeError struct {
+ Got int8
+ Want int8
+}
+
+// Error implements the error interface
+func (e ExtensionTypeError) Error() string {
+ return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got)
+}
+
+// Resumable returns 'true' for ExtensionTypeErrors
+func (e ExtensionTypeError) Resumable() bool { return true }
+
+func errExt(got int8, wanted int8) error {
+ return ExtensionTypeError{Got: got, Want: wanted}
+}
+
+// Extension is the interface fulfilled
+// by types that want to define their
+// own binary encoding.
+type Extension interface {
+ // ExtensionType should return
+ // a int8 that identifies the concrete
+ // type of the extension. (Types <0 are
+ // officially reserved by the MessagePack
+ // specifications.)
+ ExtensionType() int8
+
+ // Len should return the length
+ // of the data to be encoded
+ Len() int
+
+ // MarshalBinaryTo should copy
+ // the data into the supplied slice,
+ // assuming that the slice has length Len()
+ MarshalBinaryTo([]byte) error
+
+ UnmarshalBinary([]byte) error
+}
+
+// RawExtension implements the Extension interface
+type RawExtension struct {
+ Data []byte
+ Type int8
+}
+
+// ExtensionType implements Extension.ExtensionType, and returns r.Type
+func (r *RawExtension) ExtensionType() int8 { return r.Type }
+
+// Len implements Extension.Len, and returns len(r.Data)
+func (r *RawExtension) Len() int { return len(r.Data) }
+
+// MarshalBinaryTo implements Extension.MarshalBinaryTo,
+// and returns a copy of r.Data
+func (r *RawExtension) MarshalBinaryTo(d []byte) error {
+ copy(d, r.Data)
+ return nil
+}
+
+// UnmarshalBinary implements Extension.UnmarshalBinary,
+// and sets r.Data to the contents of the provided slice
+func (r *RawExtension) UnmarshalBinary(b []byte) error {
+ if cap(r.Data) >= len(b) {
+ r.Data = r.Data[0:len(b)]
+ } else {
+ r.Data = make([]byte, len(b))
+ }
+ copy(r.Data, b)
+ return nil
+}
+
+// WriteExtension writes an extension type to the writer
+func (mw *Writer) WriteExtension(e Extension) error {
+ l := e.Len()
+ var err error
+ switch l {
+ case 0:
+ o, err := mw.require(3)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mext8
+ mw.buf[o+1] = 0
+ mw.buf[o+2] = byte(e.ExtensionType())
+ case 1:
+ o, err := mw.require(2)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext1
+ mw.buf[o+1] = byte(e.ExtensionType())
+ case 2:
+ o, err := mw.require(2)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext2
+ mw.buf[o+1] = byte(e.ExtensionType())
+ case 4:
+ o, err := mw.require(2)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext4
+ mw.buf[o+1] = byte(e.ExtensionType())
+ case 8:
+ o, err := mw.require(2)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext8
+ mw.buf[o+1] = byte(e.ExtensionType())
+ case 16:
+ o, err := mw.require(2)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext16
+ mw.buf[o+1] = byte(e.ExtensionType())
+ default:
+ switch {
+ case l < math.MaxUint8:
+ o, err := mw.require(3)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mext8
+ mw.buf[o+1] = byte(uint8(l))
+ mw.buf[o+2] = byte(e.ExtensionType())
+ case l < math.MaxUint16:
+ o, err := mw.require(4)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mext16
+ big.PutUint16(mw.buf[o+1:], uint16(l))
+ mw.buf[o+3] = byte(e.ExtensionType())
+ default:
+ o, err := mw.require(6)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mext32
+ big.PutUint32(mw.buf[o+1:], uint32(l))
+ mw.buf[o+5] = byte(e.ExtensionType())
+ }
+ }
+ // we can only write directly to the
+ // buffer if we're sure that it
+ // fits the object
+ if l <= mw.bufsize() {
+ o, err := mw.require(l)
+ if err != nil {
+ return err
+ }
+ return e.MarshalBinaryTo(mw.buf[o:])
+ }
+ // here we create a new buffer
+ // just large enough for the body
+ // and save it as the write buffer
+ err = mw.flush()
+ if err != nil {
+ return err
+ }
+ buf := make([]byte, l)
+ err = e.MarshalBinaryTo(buf)
+ if err != nil {
+ return err
+ }
+ mw.buf = buf
+ mw.wloc = l
+ return nil
+}
+
+// peek at the extension type, assuming the next
+// kind to be read is Extension
+func (m *Reader) peekExtensionType() (int8, error) {
+ p, err := m.R.Peek(2)
+ if err != nil {
+ return 0, err
+ }
+ spec := sizes[p[0]]
+ if spec.typ != ExtensionType {
+ return 0, badPrefix(ExtensionType, p[0])
+ }
+ if spec.extra == constsize {
+ return int8(p[1]), nil
+ }
+ size := spec.size
+ p, err = m.R.Peek(int(size))
+ if err != nil {
+ return 0, err
+ }
+ return int8(p[size-1]), nil
+}
+
+// peekExtension peeks at the extension encoding type
+// (must guarantee at least 1 byte in 'b')
+func peekExtension(b []byte) (int8, error) {
+ spec := sizes[b[0]]
+ size := spec.size
+ if spec.typ != ExtensionType {
+ return 0, badPrefix(ExtensionType, b[0])
+ }
+ if len(b) < int(size) {
+ return 0, ErrShortBytes
+ }
+ // for fixed extensions,
+ // the type information is in
+ // the second byte
+ if spec.extra == constsize {
+ return int8(b[1]), nil
+ }
+ // otherwise, it's in the last
+ // part of the prefix
+ return int8(b[size-1]), nil
+}
+
+// ReadExtension reads the next object from the reader
+// as an extension. ReadExtension will fail if the next
+// object in the stream is not an extension, or if
+// e.Type() is not the same as the wire type.
+func (m *Reader) ReadExtension(e Extension) (err error) {
+ var p []byte
+ p, err = m.R.Peek(2)
+ if err != nil {
+ return
+ }
+ lead := p[0]
+ var read int
+ var off int
+ switch lead {
+ case mfixext1:
+ if int8(p[1]) != e.ExtensionType() {
+ err = errExt(int8(p[1]), e.ExtensionType())
+ return
+ }
+ p, err = m.R.Peek(3)
+ if err != nil {
+ return
+ }
+ err = e.UnmarshalBinary(p[2:])
+ if err == nil {
+ _, err = m.R.Skip(3)
+ }
+ return
+
+ case mfixext2:
+ if int8(p[1]) != e.ExtensionType() {
+ err = errExt(int8(p[1]), e.ExtensionType())
+ return
+ }
+ p, err = m.R.Peek(4)
+ if err != nil {
+ return
+ }
+ err = e.UnmarshalBinary(p[2:])
+ if err == nil {
+ _, err = m.R.Skip(4)
+ }
+ return
+
+ case mfixext4:
+ if int8(p[1]) != e.ExtensionType() {
+ err = errExt(int8(p[1]), e.ExtensionType())
+ return
+ }
+ p, err = m.R.Peek(6)
+ if err != nil {
+ return
+ }
+ err = e.UnmarshalBinary(p[2:])
+ if err == nil {
+ _, err = m.R.Skip(6)
+ }
+ return
+
+ case mfixext8:
+ if int8(p[1]) != e.ExtensionType() {
+ err = errExt(int8(p[1]), e.ExtensionType())
+ return
+ }
+ p, err = m.R.Peek(10)
+ if err != nil {
+ return
+ }
+ err = e.UnmarshalBinary(p[2:])
+ if err == nil {
+ _, err = m.R.Skip(10)
+ }
+ return
+
+ case mfixext16:
+ if int8(p[1]) != e.ExtensionType() {
+ err = errExt(int8(p[1]), e.ExtensionType())
+ return
+ }
+ p, err = m.R.Peek(18)
+ if err != nil {
+ return
+ }
+ err = e.UnmarshalBinary(p[2:])
+ if err == nil {
+ _, err = m.R.Skip(18)
+ }
+ return
+
+ case mext8:
+ p, err = m.R.Peek(3)
+ if err != nil {
+ return
+ }
+ if int8(p[2]) != e.ExtensionType() {
+ err = errExt(int8(p[2]), e.ExtensionType())
+ return
+ }
+ read = int(uint8(p[1]))
+ off = 3
+
+ case mext16:
+ p, err = m.R.Peek(4)
+ if err != nil {
+ return
+ }
+ if int8(p[3]) != e.ExtensionType() {
+ err = errExt(int8(p[3]), e.ExtensionType())
+ return
+ }
+ read = int(big.Uint16(p[1:]))
+ off = 4
+
+ case mext32:
+ p, err = m.R.Peek(6)
+ if err != nil {
+ return
+ }
+ if int8(p[5]) != e.ExtensionType() {
+ err = errExt(int8(p[5]), e.ExtensionType())
+ return
+ }
+ read = int(big.Uint32(p[1:]))
+ off = 6
+
+ default:
+ err = badPrefix(ExtensionType, lead)
+ return
+ }
+
+ p, err = m.R.Peek(read + off)
+ if err != nil {
+ return
+ }
+ err = e.UnmarshalBinary(p[off:])
+ if err == nil {
+ _, err = m.R.Skip(read + off)
+ }
+ return
+}
+
+// AppendExtension appends a MessagePack extension to the provided slice
+func AppendExtension(b []byte, e Extension) ([]byte, error) {
+ l := e.Len()
+ var o []byte
+ var n int
+ switch l {
+ case 0:
+ o, n = ensure(b, 3)
+ o[n] = mext8
+ o[n+1] = 0
+ o[n+2] = byte(e.ExtensionType())
+ return o[:n+3], nil
+ case 1:
+ o, n = ensure(b, 3)
+ o[n] = mfixext1
+ o[n+1] = byte(e.ExtensionType())
+ n += 2
+ case 2:
+ o, n = ensure(b, 4)
+ o[n] = mfixext2
+ o[n+1] = byte(e.ExtensionType())
+ n += 2
+ case 4:
+ o, n = ensure(b, 6)
+ o[n] = mfixext4
+ o[n+1] = byte(e.ExtensionType())
+ n += 2
+ case 8:
+ o, n = ensure(b, 10)
+ o[n] = mfixext8
+ o[n+1] = byte(e.ExtensionType())
+ n += 2
+ case 16:
+ o, n = ensure(b, 18)
+ o[n] = mfixext16
+ o[n+1] = byte(e.ExtensionType())
+ n += 2
+ }
+ switch {
+ case l < math.MaxUint8:
+ o, n = ensure(b, l+3)
+ o[n] = mext8
+ o[n+1] = byte(uint8(l))
+ o[n+2] = byte(e.ExtensionType())
+ n += 3
+ case l < math.MaxUint16:
+ o, n = ensure(b, l+4)
+ o[n] = mext16
+ big.PutUint16(o[n+1:], uint16(l))
+ o[n+3] = byte(e.ExtensionType())
+ n += 4
+ default:
+ o, n = ensure(b, l+6)
+ o[n] = mext32
+ big.PutUint32(o[n+1:], uint32(l))
+ o[n+5] = byte(e.ExtensionType())
+ n += 6
+ }
+ return o, e.MarshalBinaryTo(o[n:])
+}
+
+// ReadExtensionBytes reads an extension from 'b' into 'e'
+// and returns any remaining bytes.
+// Possible errors:
+// - ErrShortBytes ('b' not long enough)
+// - ExtensionTypeErorr{} (wire type not the same as e.Type())
+// - TypeErorr{} (next object not an extension)
+// - InvalidPrefixError
+// - An umarshal error returned from e.UnmarshalBinary
+func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) {
+ l := len(b)
+ if l < 3 {
+ return b, ErrShortBytes
+ }
+ lead := b[0]
+ var (
+ sz int // size of 'data'
+ off int // offset of 'data'
+ typ int8
+ )
+ switch lead {
+ case mfixext1:
+ typ = int8(b[1])
+ sz = 1
+ off = 2
+ case mfixext2:
+ typ = int8(b[1])
+ sz = 2
+ off = 2
+ case mfixext4:
+ typ = int8(b[1])
+ sz = 4
+ off = 2
+ case mfixext8:
+ typ = int8(b[1])
+ sz = 8
+ off = 2
+ case mfixext16:
+ typ = int8(b[1])
+ sz = 16
+ off = 2
+ case mext8:
+ sz = int(uint8(b[1]))
+ typ = int8(b[2])
+ off = 3
+ if sz == 0 {
+ return b[3:], e.UnmarshalBinary(b[3:3])
+ }
+ case mext16:
+ if l < 4 {
+ return b, ErrShortBytes
+ }
+ sz = int(big.Uint16(b[1:]))
+ typ = int8(b[3])
+ off = 4
+ case mext32:
+ if l < 6 {
+ return b, ErrShortBytes
+ }
+ sz = int(big.Uint32(b[1:]))
+ typ = int8(b[5])
+ off = 6
+ default:
+ return b, badPrefix(ExtensionType, lead)
+ }
+
+ if typ != e.ExtensionType() {
+ return b, errExt(typ, e.ExtensionType())
+ }
+
+ // the data of the extension starts
+ // at 'off' and is 'sz' bytes long
+ if len(b[off:]) < sz {
+ return b, ErrShortBytes
+ }
+ tot := off + sz
+ return b[tot:], e.UnmarshalBinary(b[off:tot])
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/extension_test.go b/vendor/github.com/tinylib/msgp/msgp/extension_test.go
new file mode 100644
index 00000000..d46fcfee
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/extension_test.go
@@ -0,0 +1,49 @@
+package msgp
+
+import (
+ "bytes"
+ "math/rand"
+ "testing"
+ "time"
+)
+
+var extSizes = [...]int{0, 1, 2, 4, 8, 16, int(tint8), int(tuint16), int(tuint32)}
+
+func randomExt() RawExtension {
+ e := RawExtension{}
+ e.Type = int8(rand.Int())
+ e.Data = RandBytes(extSizes[rand.Intn(len(extSizes))])
+ return e
+}
+
+func TestReadWriteExtension(t *testing.T) {
+ rand.Seed(time.Now().Unix())
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ dc := NewReader(&buf)
+
+ for i := 0; i < 25; i++ {
+ buf.Reset()
+ e := randomExt()
+ en.WriteExtension(&e)
+ en.Flush()
+ err := dc.ReadExtension(&e)
+ if err != nil {
+ t.Errorf("error with extension (length %d): %s", len(buf.Bytes()), err)
+ }
+ }
+}
+
+func TestReadWriteExtensionBytes(t *testing.T) {
+ var bts []byte
+ rand.Seed(time.Now().Unix())
+
+ for i := 0; i < 24; i++ {
+ e := randomExt()
+ bts, _ = AppendExtension(bts[0:0], &e)
+ _, err := ReadExtensionBytes(bts, &e)
+ if err != nil {
+ t.Errorf("error with extension (length %d): %s", len(bts), err)
+ }
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go
new file mode 100644
index 00000000..8e7370eb
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file.go
@@ -0,0 +1,92 @@
+// +build linux darwin dragonfly freebsd netbsd openbsd
+// +build !appengine
+
+package msgp
+
+import (
+ "os"
+ "syscall"
+)
+
+// ReadFile reads a file into 'dst' using
+// a read-only memory mapping. Consequently,
+// the file must be mmap-able, and the
+// Unmarshaler should never write to
+// the source memory. (Methods generated
+// by the msgp tool obey that constraint, but
+// user-defined implementations may not.)
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+func ReadFile(dst Unmarshaler, file *os.File) error {
+ stat, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+ adviseRead(data)
+ _, err = dst.UnmarshalMsg(data)
+ uerr := syscall.Munmap(data)
+ if err == nil {
+ err = uerr
+ }
+ return err
+}
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+// WriteFile writes a file from 'src' using
+// memory mapping. It overwrites the entire
+// contents of the previous file.
+// The mapping size is calculated
+// using the `Msgsize()` method
+// of 'src', so it must produce a result
+// equal to or greater than the actual encoded
+// size of the object. Otherwise,
+// a fault (SIGBUS) will occur.
+//
+// Reading and writing through file mappings
+// is only efficient for large files; small
+// files are best read and written using
+// the ordinary streaming interfaces.
+//
+// NOTE: The performance of this call
+// is highly OS- and filesystem-dependent.
+// Users should take care to test that this
+// performs as expected in a production environment.
+// (Linux users should run a kernel and filesystem
+// that support fallocate(2) for the best results.)
+func WriteFile(src MarshalSizer, file *os.File) error {
+ sz := src.Msgsize()
+ err := fallocate(file, int64(sz))
+ if err != nil {
+ return err
+ }
+ data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
+ if err != nil {
+ return err
+ }
+ adviseWrite(data)
+ chunk := data[:0]
+ chunk, err = src.MarshalMsg(chunk)
+ if err != nil {
+ return err
+ }
+ uerr := syscall.Munmap(data)
+ if uerr != nil {
+ return uerr
+ }
+ return file.Truncate(int64(len(chunk)))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go
new file mode 100644
index 00000000..6e654dbd
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go
@@ -0,0 +1,47 @@
+// +build windows appengine
+
+package msgp
+
+import (
+ "io/ioutil"
+ "os"
+)
+
+// MarshalSizer is the combination
+// of the Marshaler and Sizer
+// interfaces.
+type MarshalSizer interface {
+ Marshaler
+ Sizer
+}
+
+func ReadFile(dst Unmarshaler, file *os.File) error {
+ if u, ok := dst.(Decodable); ok {
+ return u.DecodeMsg(NewReader(file))
+ }
+
+ data, err := ioutil.ReadAll(file)
+ if err != nil {
+ return err
+ }
+ _, err = dst.UnmarshalMsg(data)
+ return err
+}
+
+func WriteFile(src MarshalSizer, file *os.File) error {
+ if e, ok := src.(Encodable); ok {
+ w := NewWriter(file)
+ err := e.EncodeMsg(w)
+ if err == nil {
+ err = w.Flush()
+ }
+ return err
+ }
+
+ raw, err := src.MarshalMsg(nil)
+ if err != nil {
+ return err
+ }
+ _, err = file.Write(raw)
+ return err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/file_test.go b/vendor/github.com/tinylib/msgp/msgp/file_test.go
new file mode 100644
index 00000000..1cc01cec
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/file_test.go
@@ -0,0 +1,103 @@
+// +build linux darwin dragonfly freebsd netbsd openbsd
+
+package msgp_test
+
+import (
+ "bytes"
+ "crypto/rand"
+ "github.com/tinylib/msgp/msgp"
+ prand "math/rand"
+ "os"
+ "testing"
+)
+
+type rawBytes []byte
+
+func (r rawBytes) MarshalMsg(b []byte) ([]byte, error) {
+ return msgp.AppendBytes(b, []byte(r)), nil
+}
+
+func (r rawBytes) Msgsize() int {
+ return msgp.BytesPrefixSize + len(r)
+}
+
+func (r *rawBytes) UnmarshalMsg(b []byte) ([]byte, error) {
+ tmp, out, err := msgp.ReadBytesBytes(b, (*(*[]byte)(r))[:0])
+ *r = rawBytes(tmp)
+ return out, err
+}
+
+func TestReadWriteFile(t *testing.T) {
+ t.Parallel()
+
+ f, err := os.Create("tmpfile")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() {
+ f.Close()
+ os.Remove("tmpfile")
+ }()
+
+ data := make([]byte, 1024*1024)
+ rand.Read(data)
+
+ err = msgp.WriteFile(rawBytes(data), f)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var out rawBytes
+ f.Seek(0, os.SEEK_SET)
+ err = msgp.ReadFile(&out, f)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal([]byte(out), []byte(data)) {
+ t.Fatal("Input and output not equal.")
+ }
+}
+
+var blobstrings = []string{"", "a string", "a longer string here!"}
+var blobfloats = []float64{0.0, -1.0, 1.0, 3.1415926535}
+var blobints = []int64{0, 1, -1, 80000, 1 << 30}
+var blobbytes = [][]byte{[]byte{}, []byte("hello"), []byte("{\"is_json\":true,\"is_compact\":\"unable to determine\"}")}
+
+func BenchmarkWriteReadFile(b *testing.B) {
+
+ // let's not run out of disk space...
+ if b.N > 10000000 {
+ b.N = 10000000
+ }
+
+ fname := "bench-tmpfile"
+ f, err := os.Create(fname)
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer func(f *os.File, name string) {
+ f.Close()
+ os.Remove(name)
+ }(f, fname)
+
+ data := make(Blobs, b.N)
+
+ for i := range data {
+ data[i].Name = blobstrings[prand.Intn(len(blobstrings))]
+ data[i].Float = blobfloats[prand.Intn(len(blobfloats))]
+ data[i].Amount = blobints[prand.Intn(len(blobints))]
+ data[i].Bytes = blobbytes[prand.Intn(len(blobbytes))]
+ }
+
+ b.SetBytes(int64(data.Msgsize() / b.N))
+ b.ResetTimer()
+ err = msgp.WriteFile(data, f)
+ if err != nil {
+ b.Fatal(err)
+ }
+ err = msgp.ReadFile(&data, f)
+ if err != nil {
+ b.Fatal(err)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/floatbench_test.go b/vendor/github.com/tinylib/msgp/msgp/floatbench_test.go
new file mode 100644
index 00000000..575b081b
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/floatbench_test.go
@@ -0,0 +1,25 @@
+package msgp
+
+import (
+ "testing"
+)
+
+func BenchmarkReadWriteFloat32(b *testing.B) {
+ var f float32 = 3.9081
+ bts := AppendFloat32([]byte{}, f)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = AppendFloat32(bts[0:0], f)
+ f, bts, _ = ReadFloat32Bytes(bts)
+ }
+}
+
+func BenchmarkReadWriteFloat64(b *testing.B) {
+ var f float64 = 3.9081
+ bts := AppendFloat64([]byte{}, f)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = AppendFloat64(bts[0:0], f)
+ f, bts, _ = ReadFloat64Bytes(bts)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go
new file mode 100644
index 00000000..f817d775
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/integers.go
@@ -0,0 +1,174 @@
+package msgp
+
+/* ----------------------------------
+ integer encoding utilities
+ (inline-able)
+
+ TODO(tinylib): there are faster,
+ albeit non-portable solutions
+ to the code below. implement
+ byteswap?
+ ---------------------------------- */
+
+func putMint64(b []byte, i int64) {
+ b[0] = mint64
+ b[1] = byte(i >> 56)
+ b[2] = byte(i >> 48)
+ b[3] = byte(i >> 40)
+ b[4] = byte(i >> 32)
+ b[5] = byte(i >> 24)
+ b[6] = byte(i >> 16)
+ b[7] = byte(i >> 8)
+ b[8] = byte(i)
+}
+
+func getMint64(b []byte) int64 {
+ return (int64(b[1]) << 56) | (int64(b[2]) << 48) |
+ (int64(b[3]) << 40) | (int64(b[4]) << 32) |
+ (int64(b[5]) << 24) | (int64(b[6]) << 16) |
+ (int64(b[7]) << 8) | (int64(b[8]))
+}
+
+func putMint32(b []byte, i int32) {
+ b[0] = mint32
+ b[1] = byte(i >> 24)
+ b[2] = byte(i >> 16)
+ b[3] = byte(i >> 8)
+ b[4] = byte(i)
+}
+
+func getMint32(b []byte) int32 {
+ return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4]))
+}
+
+func putMint16(b []byte, i int16) {
+ b[0] = mint16
+ b[1] = byte(i >> 8)
+ b[2] = byte(i)
+}
+
+func getMint16(b []byte) (i int16) {
+ return (int16(b[1]) << 8) | int16(b[2])
+}
+
+func putMint8(b []byte, i int8) {
+ b[0] = mint8
+ b[1] = byte(i)
+}
+
+func getMint8(b []byte) (i int8) {
+ return int8(b[1])
+}
+
+func putMuint64(b []byte, u uint64) {
+ b[0] = muint64
+ b[1] = byte(u >> 56)
+ b[2] = byte(u >> 48)
+ b[3] = byte(u >> 40)
+ b[4] = byte(u >> 32)
+ b[5] = byte(u >> 24)
+ b[6] = byte(u >> 16)
+ b[7] = byte(u >> 8)
+ b[8] = byte(u)
+}
+
+func getMuint64(b []byte) uint64 {
+ return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) |
+ (uint64(b[3]) << 40) | (uint64(b[4]) << 32) |
+ (uint64(b[5]) << 24) | (uint64(b[6]) << 16) |
+ (uint64(b[7]) << 8) | (uint64(b[8]))
+}
+
+func putMuint32(b []byte, u uint32) {
+ b[0] = muint32
+ b[1] = byte(u >> 24)
+ b[2] = byte(u >> 16)
+ b[3] = byte(u >> 8)
+ b[4] = byte(u)
+}
+
+func getMuint32(b []byte) uint32 {
+ return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4]))
+}
+
+func putMuint16(b []byte, u uint16) {
+ b[0] = muint16
+ b[1] = byte(u >> 8)
+ b[2] = byte(u)
+}
+
+func getMuint16(b []byte) uint16 {
+ return (uint16(b[1]) << 8) | uint16(b[2])
+}
+
+func putMuint8(b []byte, u uint8) {
+ b[0] = muint8
+ b[1] = byte(u)
+}
+
+func getMuint8(b []byte) uint8 {
+ return uint8(b[1])
+}
+
+func getUnix(b []byte) (sec int64, nsec int32) {
+ sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) |
+ (int64(b[2]) << 40) | (int64(b[3]) << 32) |
+ (int64(b[4]) << 24) | (int64(b[5]) << 16) |
+ (int64(b[6]) << 8) | (int64(b[7]))
+
+ nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11]))
+ return
+}
+
+func putUnix(b []byte, sec int64, nsec int32) {
+ b[0] = byte(sec >> 56)
+ b[1] = byte(sec >> 48)
+ b[2] = byte(sec >> 40)
+ b[3] = byte(sec >> 32)
+ b[4] = byte(sec >> 24)
+ b[5] = byte(sec >> 16)
+ b[6] = byte(sec >> 8)
+ b[7] = byte(sec)
+ b[8] = byte(nsec >> 24)
+ b[9] = byte(nsec >> 16)
+ b[10] = byte(nsec >> 8)
+ b[11] = byte(nsec)
+}
+
+/* -----------------------------
+ prefix utilities
+ ----------------------------- */
+
+// write prefix and uint8
+func prefixu8(b []byte, pre byte, sz uint8) {
+ b[0] = pre
+ b[1] = byte(sz)
+}
+
+// write prefix and big-endian uint16
+func prefixu16(b []byte, pre byte, sz uint16) {
+ b[0] = pre
+ b[1] = byte(sz >> 8)
+ b[2] = byte(sz)
+}
+
+// write prefix and big-endian uint32
+func prefixu32(b []byte, pre byte, sz uint32) {
+ b[0] = pre
+ b[1] = byte(sz >> 24)
+ b[2] = byte(sz >> 16)
+ b[3] = byte(sz >> 8)
+ b[4] = byte(sz)
+}
+
+func prefixu64(b []byte, pre byte, sz uint64) {
+ b[0] = pre
+ b[1] = byte(sz >> 56)
+ b[2] = byte(sz >> 48)
+ b[3] = byte(sz >> 40)
+ b[4] = byte(sz >> 32)
+ b[5] = byte(sz >> 24)
+ b[6] = byte(sz >> 16)
+ b[7] = byte(sz >> 8)
+ b[8] = byte(sz)
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go
new file mode 100644
index 00000000..4325860a
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/json.go
@@ -0,0 +1,542 @@
+package msgp
+
+import (
+ "bufio"
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "strconv"
+ "unicode/utf8"
+)
+
+var (
+ null = []byte("null")
+ hex = []byte("0123456789abcdef")
+)
+
+var defuns [_maxtype]func(jsWriter, *Reader) (int, error)
+
+// note: there is an initialization loop if
+// this isn't set up during init()
+func init() {
+ // since none of these functions are inline-able,
+ // there is not much of a penalty to the indirect
+ // call. however, this is best expressed as a jump-table...
+ defuns = [_maxtype]func(jsWriter, *Reader) (int, error){
+ StrType: rwString,
+ BinType: rwBytes,
+ MapType: rwMap,
+ ArrayType: rwArray,
+ Float64Type: rwFloat64,
+ Float32Type: rwFloat32,
+ BoolType: rwBool,
+ IntType: rwInt,
+ UintType: rwUint,
+ NilType: rwNil,
+ ExtensionType: rwExtension,
+ Complex64Type: rwExtension,
+ Complex128Type: rwExtension,
+ TimeType: rwTime,
+ }
+}
+
+// this is the interface
+// used to write json
+type jsWriter interface {
+ io.Writer
+ io.ByteWriter
+ WriteString(string) (int, error)
+}
+
+// CopyToJSON reads MessagePack from 'src' and copies it
+// as JSON to 'dst' until EOF.
+func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) {
+ r := NewReader(src)
+ n, err = r.WriteToJSON(dst)
+ freeR(r)
+ return
+}
+
+// WriteToJSON translates MessagePack from 'r' and writes it as
+// JSON to 'w' until the underlying reader returns io.EOF. It returns
+// the number of bytes written, and an error if it stopped before EOF.
+func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
+ var j jsWriter
+ var bf *bufio.Writer
+ if jsw, ok := w.(jsWriter); ok {
+ j = jsw
+ } else {
+ bf = bufio.NewWriter(w)
+ j = bf
+ }
+ var nn int
+ for err == nil {
+ nn, err = rwNext(j, r)
+ n += int64(nn)
+ }
+ if err != io.EOF {
+ if bf != nil {
+ bf.Flush()
+ }
+ return
+ }
+ err = nil
+ if bf != nil {
+ err = bf.Flush()
+ }
+ return
+}
+
+func rwNext(w jsWriter, src *Reader) (int, error) {
+ t, err := src.NextType()
+ if err != nil {
+ return 0, err
+ }
+ return defuns[t](w, src)
+}
+
+func rwMap(dst jsWriter, src *Reader) (n int, err error) {
+ var comma bool
+ var sz uint32
+ var field []byte
+
+ sz, err = src.ReadMapHeader()
+ if err != nil {
+ return
+ }
+
+ if sz == 0 {
+ return dst.WriteString("{}")
+ }
+
+ err = dst.WriteByte('{')
+ if err != nil {
+ return
+ }
+ n++
+ var nn int
+ for i := uint32(0); i < sz; i++ {
+ if comma {
+ err = dst.WriteByte(',')
+ if err != nil {
+ return
+ }
+ n++
+ }
+
+ field, err = src.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ nn, err = rwquoted(dst, field)
+ n += nn
+ if err != nil {
+ return
+ }
+
+ err = dst.WriteByte(':')
+ if err != nil {
+ return
+ }
+ n++
+ nn, err = rwNext(dst, src)
+ n += nn
+ if err != nil {
+ return
+ }
+ if !comma {
+ comma = true
+ }
+ }
+
+ err = dst.WriteByte('}')
+ if err != nil {
+ return
+ }
+ n++
+ return
+}
+
+func rwArray(dst jsWriter, src *Reader) (n int, err error) {
+ err = dst.WriteByte('[')
+ if err != nil {
+ return
+ }
+ var sz uint32
+ var nn int
+ sz, err = src.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ comma := false
+ for i := uint32(0); i < sz; i++ {
+ if comma {
+ err = dst.WriteByte(',')
+ if err != nil {
+ return
+ }
+ n++
+ }
+ nn, err = rwNext(dst, src)
+ n += nn
+ if err != nil {
+ return
+ }
+ comma = true
+ }
+
+ err = dst.WriteByte(']')
+ if err != nil {
+ return
+ }
+ n++
+ return
+}
+
+func rwNil(dst jsWriter, src *Reader) (int, error) {
+ err := src.ReadNil()
+ if err != nil {
+ return 0, err
+ }
+ return dst.Write(null)
+}
+
+func rwFloat32(dst jsWriter, src *Reader) (int, error) {
+ f, err := src.ReadFloat32()
+ if err != nil {
+ return 0, err
+ }
+ src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64)
+ return dst.Write(src.scratch)
+}
+
+func rwFloat64(dst jsWriter, src *Reader) (int, error) {
+ f, err := src.ReadFloat64()
+ if err != nil {
+ return 0, err
+ }
+ src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32)
+ return dst.Write(src.scratch)
+}
+
+func rwInt(dst jsWriter, src *Reader) (int, error) {
+ i, err := src.ReadInt64()
+ if err != nil {
+ return 0, err
+ }
+ src.scratch = strconv.AppendInt(src.scratch[:0], i, 10)
+ return dst.Write(src.scratch)
+}
+
+func rwUint(dst jsWriter, src *Reader) (int, error) {
+ u, err := src.ReadUint64()
+ if err != nil {
+ return 0, err
+ }
+ src.scratch = strconv.AppendUint(src.scratch[:0], u, 10)
+ return dst.Write(src.scratch)
+}
+
+func rwBool(dst jsWriter, src *Reader) (int, error) {
+ b, err := src.ReadBool()
+ if err != nil {
+ return 0, err
+ }
+ if b {
+ return dst.WriteString("true")
+ }
+ return dst.WriteString("false")
+}
+
+func rwTime(dst jsWriter, src *Reader) (int, error) {
+ t, err := src.ReadTime()
+ if err != nil {
+ return 0, err
+ }
+ bts, err := t.MarshalJSON()
+ if err != nil {
+ return 0, err
+ }
+ return dst.Write(bts)
+}
+
+func rwExtension(dst jsWriter, src *Reader) (n int, err error) {
+ et, err := src.peekExtensionType()
+ if err != nil {
+ return 0, err
+ }
+
+ // registered extensions can override
+ // the JSON encoding
+ if j, ok := extensionReg[et]; ok {
+ var bts []byte
+ e := j()
+ err = src.ReadExtension(e)
+ if err != nil {
+ return
+ }
+ bts, err = json.Marshal(e)
+ if err != nil {
+ return
+ }
+ return dst.Write(bts)
+ }
+
+ e := RawExtension{}
+ e.Type = et
+ err = src.ReadExtension(&e)
+ if err != nil {
+ return
+ }
+
+ var nn int
+ err = dst.WriteByte('{')
+ if err != nil {
+ return
+ }
+ n++
+
+ nn, err = dst.WriteString(`"type:"`)
+ n += nn
+ if err != nil {
+ return
+ }
+
+ src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10)
+ nn, err = dst.Write(src.scratch)
+ n += nn
+ if err != nil {
+ return
+ }
+
+ nn, err = dst.WriteString(`,"data":"`)
+ n += nn
+ if err != nil {
+ return
+ }
+
+ enc := base64.NewEncoder(base64.StdEncoding, dst)
+
+ nn, err = enc.Write(e.Data)
+ n += nn
+ if err != nil {
+ return
+ }
+ err = enc.Close()
+ if err != nil {
+ return
+ }
+ nn, err = dst.WriteString(`"}`)
+ n += nn
+ return
+}
+
+func rwString(dst jsWriter, src *Reader) (n int, err error) {
+ var p []byte
+ p, err = src.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead := p[0]
+ var read int
+
+ if isfixstr(lead) {
+ read = int(rfixstr(lead))
+ src.R.Skip(1)
+ goto write
+ }
+
+ switch lead {
+ case mstr8:
+ p, err = src.R.Next(2)
+ if err != nil {
+ return
+ }
+ read = int(uint8(p[1]))
+ case mstr16:
+ p, err = src.R.Next(3)
+ if err != nil {
+ return
+ }
+ read = int(big.Uint16(p[1:]))
+ case mstr32:
+ p, err = src.R.Next(5)
+ if err != nil {
+ return
+ }
+ read = int(big.Uint32(p[1:]))
+ default:
+ err = badPrefix(StrType, lead)
+ return
+ }
+write:
+ p, err = src.R.Next(read)
+ if err != nil {
+ return
+ }
+ n, err = rwquoted(dst, p)
+ return
+}
+
+func rwBytes(dst jsWriter, src *Reader) (n int, err error) {
+ var nn int
+ err = dst.WriteByte('"')
+ if err != nil {
+ return
+ }
+ n++
+ src.scratch, err = src.ReadBytes(src.scratch[:0])
+ if err != nil {
+ return
+ }
+ enc := base64.NewEncoder(base64.StdEncoding, dst)
+ nn, err = enc.Write(src.scratch)
+ n += nn
+ if err != nil {
+ return
+ }
+ err = enc.Close()
+ if err != nil {
+ return
+ }
+ err = dst.WriteByte('"')
+ if err != nil {
+ return
+ }
+ n++
+ return
+}
+
+// Below (c) The Go Authors, 2009-2014
+// Subject to the BSD-style license found at http://golang.org
+//
+// see: encoding/json/encode.go:(*encodeState).stringbytes()
+func rwquoted(dst jsWriter, s []byte) (n int, err error) {
+ var nn int
+ err = dst.WriteByte('"')
+ if err != nil {
+ return
+ }
+ n++
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+ i++
+ continue
+ }
+ if start < i {
+ nn, err = dst.Write(s[start:i])
+ n += nn
+ if err != nil {
+ return
+ }
+ }
+ switch b {
+ case '\\', '"':
+ err = dst.WriteByte('\\')
+ if err != nil {
+ return
+ }
+ n++
+ err = dst.WriteByte(b)
+ if err != nil {
+ return
+ }
+ n++
+ case '\n':
+ err = dst.WriteByte('\\')
+ if err != nil {
+ return
+ }
+ n++
+ err = dst.WriteByte('n')
+ if err != nil {
+ return
+ }
+ n++
+ case '\r':
+ err = dst.WriteByte('\\')
+ if err != nil {
+ return
+ }
+ n++
+ err = dst.WriteByte('r')
+ if err != nil {
+ return
+ }
+ n++
+ default:
+ nn, err = dst.WriteString(`\u00`)
+ n += nn
+ if err != nil {
+ return
+ }
+ err = dst.WriteByte(hex[b>>4])
+ if err != nil {
+ return
+ }
+ n++
+ err = dst.WriteByte(hex[b&0xF])
+ if err != nil {
+ return
+ }
+ n++
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ nn, err = dst.Write(s[start:i])
+ n += nn
+ if err != nil {
+ return
+ }
+ nn, err = dst.WriteString(`\ufffd`)
+ n += nn
+ if err != nil {
+ return
+ }
+ i += size
+ start = i
+ continue
+ }
+ }
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ nn, err = dst.Write(s[start:i])
+ n += nn
+ if err != nil {
+ return
+ }
+ nn, err = dst.WriteString(`\u202`)
+ n += nn
+ if err != nil {
+ return
+ }
+ err = dst.WriteByte(hex[c&0xF])
+ if err != nil {
+ return
+ }
+ n++
+ }
+ }
+ i += size
+ }
+ if start < len(s) {
+ nn, err = dst.Write(s[start:])
+ n += nn
+ if err != nil {
+ return
+ }
+ }
+ err = dst.WriteByte('"')
+ if err != nil {
+ return
+ }
+ n++
+ return
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
new file mode 100644
index 00000000..438caf53
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go
@@ -0,0 +1,363 @@
+package msgp
+
+import (
+ "bufio"
+ "encoding/base64"
+ "encoding/json"
+ "io"
+ "strconv"
+ "time"
+)
+
+var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error)
+
+func init() {
+
+ // NOTE(pmh): this is best expressed as a jump table,
+ // but gc doesn't do that yet. revisit post-go1.5.
+ unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){
+ StrType: rwStringBytes,
+ BinType: rwBytesBytes,
+ MapType: rwMapBytes,
+ ArrayType: rwArrayBytes,
+ Float64Type: rwFloat64Bytes,
+ Float32Type: rwFloat32Bytes,
+ BoolType: rwBoolBytes,
+ IntType: rwIntBytes,
+ UintType: rwUintBytes,
+ NilType: rwNullBytes,
+ ExtensionType: rwExtensionBytes,
+ Complex64Type: rwExtensionBytes,
+ Complex128Type: rwExtensionBytes,
+ TimeType: rwTimeBytes,
+ }
+}
+
+// UnmarshalAsJSON takes raw messagepack and writes
+// it as JSON to 'w'. If an error is returned, the
+// bytes not translated will also be returned. If
+// no errors are encountered, the length of the returned
+// slice will be zero.
+func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) {
+ var (
+ scratch []byte
+ cast bool
+ dst jsWriter
+ err error
+ )
+ if jsw, ok := w.(jsWriter); ok {
+ dst = jsw
+ cast = true
+ } else {
+ dst = bufio.NewWriterSize(w, 512)
+ }
+ for len(msg) > 0 && err == nil {
+ msg, scratch, err = writeNext(dst, msg, scratch)
+ }
+ if !cast && err == nil {
+ err = dst.(*bufio.Writer).Flush()
+ }
+ return msg, err
+}
+
+func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ if len(msg) < 1 {
+ return msg, scratch, ErrShortBytes
+ }
+ t := getType(msg[0])
+ if t == InvalidType {
+ return msg, scratch, InvalidPrefixError(msg[0])
+ }
+ if t == ExtensionType {
+ et, err := peekExtension(msg)
+ if err != nil {
+ return nil, scratch, err
+ }
+ if et == TimeExtension {
+ t = TimeType
+ }
+ }
+ return unfuns[t](w, msg, scratch)
+}
+
+func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ sz, msg, err := ReadArrayHeaderBytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ err = w.WriteByte('[')
+ if err != nil {
+ return msg, scratch, err
+ }
+ for i := uint32(0); i < sz; i++ {
+ if i != 0 {
+ err = w.WriteByte(',')
+ if err != nil {
+ return msg, scratch, err
+ }
+ }
+ msg, scratch, err = writeNext(w, msg, scratch)
+ if err != nil {
+ return msg, scratch, err
+ }
+ }
+ err = w.WriteByte(']')
+ return msg, scratch, err
+}
+
+func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ sz, msg, err := ReadMapHeaderBytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ err = w.WriteByte('{')
+ if err != nil {
+ return msg, scratch, err
+ }
+ for i := uint32(0); i < sz; i++ {
+ if i != 0 {
+ err = w.WriteByte(',')
+ if err != nil {
+ return msg, scratch, err
+ }
+ }
+ msg, scratch, err = rwMapKeyBytes(w, msg, scratch)
+ if err != nil {
+ return msg, scratch, err
+ }
+ err = w.WriteByte(':')
+ if err != nil {
+ return msg, scratch, err
+ }
+ msg, scratch, err = writeNext(w, msg, scratch)
+ if err != nil {
+ return msg, scratch, err
+ }
+ }
+ err = w.WriteByte('}')
+ return msg, scratch, err
+}
+
+func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ msg, scratch, err := rwStringBytes(w, msg, scratch)
+ if err != nil {
+ if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
+ return rwBytesBytes(w, msg, scratch)
+ }
+ }
+ return msg, scratch, err
+}
+
+func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ str, msg, err := ReadStringZC(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ _, err = rwquoted(w, str)
+ return msg, scratch, err
+}
+
+func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ bts, msg, err := ReadBytesZC(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ l := base64.StdEncoding.EncodedLen(len(bts))
+ if cap(scratch) >= l {
+ scratch = scratch[0:l]
+ } else {
+ scratch = make([]byte, l)
+ }
+ base64.StdEncoding.Encode(scratch, bts)
+ err = w.WriteByte('"')
+ if err != nil {
+ return msg, scratch, err
+ }
+ _, err = w.Write(scratch)
+ if err != nil {
+ return msg, scratch, err
+ }
+ err = w.WriteByte('"')
+ return msg, scratch, err
+}
+
+func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ msg, err := ReadNilBytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ _, err = w.Write(null)
+ return msg, scratch, err
+}
+
+func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ b, msg, err := ReadBoolBytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ if b {
+ _, err = w.WriteString("true")
+ return msg, scratch, err
+ }
+ _, err = w.WriteString("false")
+ return msg, scratch, err
+}
+
+func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ i, msg, err := ReadInt64Bytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ scratch = strconv.AppendInt(scratch[0:0], i, 10)
+ _, err = w.Write(scratch)
+ return msg, scratch, err
+}
+
+func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ u, msg, err := ReadUint64Bytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ scratch = strconv.AppendUint(scratch[0:0], u, 10)
+ _, err = w.Write(scratch)
+ return msg, scratch, err
+}
+
+func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) {
+ var f float64
+ var err error
+ var sz int
+ if f64 {
+ sz = 64
+ f, msg, err = ReadFloat64Bytes(msg)
+ } else {
+ sz = 32
+ var v float32
+ v, msg, err = ReadFloat32Bytes(msg)
+ f = float64(v)
+ }
+ if err != nil {
+ return msg, scratch, err
+ }
+ scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz)
+ _, err = w.Write(scratch)
+ return msg, scratch, err
+}
+
+func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ var f float32
+ var err error
+ f, msg, err = ReadFloat32Bytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32)
+ _, err = w.Write(scratch)
+ return msg, scratch, err
+}
+
+func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ var f float64
+ var err error
+ f, msg, err = ReadFloat64Bytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)
+ _, err = w.Write(scratch)
+ return msg, scratch, err
+}
+
+func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ var t time.Time
+ var err error
+ t, msg, err = ReadTimeBytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ bts, err := t.MarshalJSON()
+ if err != nil {
+ return msg, scratch, err
+ }
+ _, err = w.Write(bts)
+ return msg, scratch, err
+}
+
+func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
+ var err error
+ var et int8
+ et, err = peekExtension(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+
+ // if it's time.Time
+ if et == TimeExtension {
+ var tm time.Time
+ tm, msg, err = ReadTimeBytes(msg)
+ if err != nil {
+ return msg, scratch, err
+ }
+ bts, err := tm.MarshalJSON()
+ if err != nil {
+ return msg, scratch, err
+ }
+ _, err = w.Write(bts)
+ return msg, scratch, err
+ }
+
+ // if the extension is registered,
+ // use its canonical JSON form
+ if f, ok := extensionReg[et]; ok {
+ e := f()
+ msg, err = ReadExtensionBytes(msg, e)
+ if err != nil {
+ return msg, scratch, err
+ }
+ bts, err := json.Marshal(e)
+ if err != nil {
+ return msg, scratch, err
+ }
+ _, err = w.Write(bts)
+ return msg, scratch, err
+ }
+
+ // otherwise, write `{"type": , "data": ""}`
+ r := RawExtension{}
+ r.Type = et
+ msg, err = ReadExtensionBytes(msg, &r)
+ if err != nil {
+ return msg, scratch, err
+ }
+ scratch, err = writeExt(w, r, scratch)
+ return msg, scratch, err
+}
+
+func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) {
+ _, err := w.WriteString(`{"type":`)
+ if err != nil {
+ return scratch, err
+ }
+ scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10)
+ _, err = w.Write(scratch)
+ if err != nil {
+ return scratch, err
+ }
+ _, err = w.WriteString(`,"data":"`)
+ if err != nil {
+ return scratch, err
+ }
+ l := base64.StdEncoding.EncodedLen(len(r.Data))
+ if cap(scratch) >= l {
+ scratch = scratch[0:l]
+ } else {
+ scratch = make([]byte, l)
+ }
+ base64.StdEncoding.Encode(scratch, r.Data)
+ _, err = w.Write(scratch)
+ if err != nil {
+ return scratch, err
+ }
+ _, err = w.WriteString(`"}`)
+ return scratch, err
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go
new file mode 100644
index 00000000..726974ab
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go
@@ -0,0 +1,121 @@
+package msgp
+
+import (
+ "bytes"
+ "encoding/json"
+ "testing"
+ "time"
+)
+
+func TestUnmarshalJSON(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewWriter(&buf)
+ enc.WriteMapHeader(5)
+
+ enc.WriteString("thing_1")
+ enc.WriteString("a string object")
+
+ enc.WriteString("a_map")
+ enc.WriteMapHeader(2)
+
+ // INNER
+ enc.WriteString("cmplx")
+ enc.WriteComplex64(complex(1.0, 1.0))
+ enc.WriteString("int_b")
+ enc.WriteInt64(-100)
+
+ enc.WriteString("an extension")
+ enc.WriteExtension(&RawExtension{Type: 1, Data: []byte("blaaahhh")})
+
+ enc.WriteString("some bytes")
+ enc.WriteBytes([]byte("here are some bytes"))
+
+ enc.WriteString("now")
+ enc.WriteTime(time.Now())
+
+ enc.Flush()
+
+ var js bytes.Buffer
+ _, err := UnmarshalAsJSON(&js, buf.Bytes())
+ if err != nil {
+ t.Logf("%s", js.Bytes())
+ t.Fatal(err)
+ }
+ mp := make(map[string]interface{})
+ err = json.Unmarshal(js.Bytes(), &mp)
+ if err != nil {
+ t.Log(js.String())
+ t.Fatalf("Error unmarshaling: %s", err)
+ }
+
+ if len(mp) != 5 {
+ t.Errorf("map length should be %d, not %d", 5, len(mp))
+ }
+
+ so, ok := mp["thing_1"]
+ if !ok || so != "a string object" {
+ t.Errorf("expected %q; got %q", "a string object", so)
+ }
+
+ if _, ok := mp["now"]; !ok {
+ t.Error(`"now" field doesn't exist`)
+ }
+
+ c, ok := mp["a_map"]
+ if !ok {
+ t.Error(`"a_map" field doesn't exist`)
+ } else {
+ if m, ok := c.(map[string]interface{}); ok {
+ if _, ok := m["cmplx"]; !ok {
+ t.Error(`"a_map.cmplx" doesn't exist`)
+ }
+ } else {
+ t.Error(`can't type-assert "c" to map[string]interface{}`)
+ }
+
+ }
+
+ t.Logf("JSON: %s", js.Bytes())
+}
+
+func BenchmarkUnmarshalAsJSON(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewWriter(&buf)
+ enc.WriteMapHeader(4)
+
+ enc.WriteString("thing_1")
+ enc.WriteString("a string object")
+
+ enc.WriteString("a_first_map")
+ enc.WriteMapHeader(2)
+ enc.WriteString("float_a")
+ enc.WriteFloat32(1.0)
+ enc.WriteString("int_b")
+ enc.WriteInt64(-100)
+
+ enc.WriteString("an array")
+ enc.WriteArrayHeader(2)
+ enc.WriteBool(true)
+ enc.WriteUint(2089)
+
+ enc.WriteString("a_second_map")
+ enc.WriteMapStrStr(map[string]string{
+ "internal_one": "blah",
+ "internal_two": "blahhh...",
+ })
+ enc.Flush()
+
+ var js bytes.Buffer
+ bts := buf.Bytes()
+ _, err := UnmarshalAsJSON(&js, bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(js.Bytes())))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ js.Reset()
+ UnmarshalAsJSON(&js, bts)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/json_test.go b/vendor/github.com/tinylib/msgp/msgp/json_test.go
new file mode 100644
index 00000000..439d4790
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/json_test.go
@@ -0,0 +1,142 @@
+package msgp
+
+import (
+ "bytes"
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestCopyJSON(t *testing.T) {
+ var buf bytes.Buffer
+ enc := NewWriter(&buf)
+ enc.WriteMapHeader(5)
+
+ enc.WriteString("thing_1")
+ enc.WriteString("a string object")
+
+ enc.WriteString("a_map")
+ enc.WriteMapHeader(2)
+ enc.WriteString("float_a")
+ enc.WriteFloat32(1.0)
+ enc.WriteString("int_b")
+ enc.WriteInt64(-100)
+
+ enc.WriteString("some bytes")
+ enc.WriteBytes([]byte("here are some bytes"))
+ enc.WriteString("a bool")
+ enc.WriteBool(true)
+
+ enc.WriteString("a map")
+ enc.WriteMapStrStr(map[string]string{
+ "internal_one": "blah",
+ "internal_two": "blahhh...",
+ })
+ enc.Flush()
+
+ var js bytes.Buffer
+ _, err := CopyToJSON(&js, &buf)
+ if err != nil {
+ t.Fatal(err)
+ }
+ mp := make(map[string]interface{})
+ err = json.Unmarshal(js.Bytes(), &mp)
+ if err != nil {
+ t.Log(js.String())
+ t.Fatalf("Error unmarshaling: %s", err)
+ }
+
+ if len(mp) != 5 {
+ t.Errorf("map length should be %d, not %d", 4, len(mp))
+ }
+
+ so, ok := mp["thing_1"]
+ if !ok || so != "a string object" {
+ t.Errorf("expected %q; got %q", "a string object", so)
+ }
+
+ in, ok := mp["a map"]
+ if !ok {
+ t.Error("no key 'a map'")
+ }
+ if inm, ok := in.(map[string]interface{}); !ok {
+ t.Error("inner map not type-assertable to map[string]interface{}")
+ } else {
+ inm1, ok := inm["internal_one"]
+ if !ok || !reflect.DeepEqual(inm1, "blah") {
+ t.Errorf("inner map field %q should be %q, not %q", "internal_one", "blah", inm1)
+ }
+ }
+}
+
+func BenchmarkCopyToJSON(b *testing.B) {
+ var buf bytes.Buffer
+ enc := NewWriter(&buf)
+ enc.WriteMapHeader(4)
+
+ enc.WriteString("thing_1")
+ enc.WriteString("a string object")
+
+ enc.WriteString("a_first_map")
+ enc.WriteMapHeader(2)
+ enc.WriteString("float_a")
+ enc.WriteFloat32(1.0)
+ enc.WriteString("int_b")
+ enc.WriteInt64(-100)
+
+ enc.WriteString("an array")
+ enc.WriteArrayHeader(2)
+ enc.WriteBool(true)
+ enc.WriteUint(2089)
+
+ enc.WriteString("a_second_map")
+ enc.WriteMapStrStr(map[string]string{
+ "internal_one": "blah",
+ "internal_two": "blahhh...",
+ })
+ enc.Flush()
+
+ var js bytes.Buffer
+ bts := buf.Bytes()
+ _, err := CopyToJSON(&js, &buf)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(js.Bytes())))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ js.Reset()
+ CopyToJSON(&js, bytes.NewReader(bts))
+ }
+}
+
+func BenchmarkStdlibJSON(b *testing.B) {
+ obj := map[string]interface{}{
+ "thing_1": "a string object",
+ "a_first_map": map[string]interface{}{
+ "float_a": float32(1.0),
+ "float_b": -100,
+ },
+ "an array": []interface{}{
+ "part_A",
+ "part_B",
+ },
+ "a_second_map": map[string]interface{}{
+ "internal_one": "blah",
+ "internal_two": "blahhh...",
+ },
+ }
+ var js bytes.Buffer
+ err := json.NewEncoder(&js).Encode(&obj)
+ if err != nil {
+ b.Fatal(err)
+ }
+ b.SetBytes(int64(len(js.Bytes())))
+ b.ResetTimer()
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ js.Reset()
+ json.NewEncoder(&js).Encode(&obj)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go
new file mode 100644
index 00000000..ad07ef99
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/number.go
@@ -0,0 +1,267 @@
+package msgp
+
+import (
+ "math"
+ "strconv"
+)
+
+// The portable parts of the Number implementation
+
+// Number can be
+// an int64, uint64, float32,
+// or float64 internally.
+// It can decode itself
+// from any of the native
+// messagepack number types.
+// The zero-value of Number
+// is Int(0). Using the equality
+// operator with Number compares
+// both the type and the value
+// of the number.
+type Number struct {
+ // internally, this
+ // is just a tagged union.
+ // the raw bits of the number
+ // are stored the same way regardless.
+ bits uint64
+ typ Type
+}
+
+// AsInt sets the number to an int64.
+func (n *Number) AsInt(i int64) {
+
+ // we always store int(0)
+ // as {0, InvalidType} in
+ // order to preserve
+ // the behavior of the == operator
+ if i == 0 {
+ n.typ = InvalidType
+ n.bits = 0
+ return
+ }
+
+ n.typ = IntType
+ n.bits = uint64(i)
+}
+
+// AsUint sets the number to a uint64.
+func (n *Number) AsUint(u uint64) {
+ n.typ = UintType
+ n.bits = u
+}
+
+// AsFloat32 sets the value of the number
+// to a float32.
+func (n *Number) AsFloat32(f float32) {
+ n.typ = Float32Type
+ n.bits = uint64(math.Float32bits(f))
+}
+
+// AsFloat64 sets the value of the
+// number to a float64.
+func (n *Number) AsFloat64(f float64) {
+ n.typ = Float64Type
+ n.bits = math.Float64bits(f)
+}
+
+// Int casts the number as an int64, and
+// returns whether or not that was the
+// underlying type.
+func (n *Number) Int() (int64, bool) {
+ return int64(n.bits), n.typ == IntType || n.typ == InvalidType
+}
+
+// Uint casts the number as a uint64, and returns
+// whether or not that was the underlying type.
+func (n *Number) Uint() (uint64, bool) {
+ return n.bits, n.typ == UintType
+}
+
+// Float casts the number to a float64, and
+// returns whether or not that was the underlying
+// type (either a float64 or a float32).
+func (n *Number) Float() (float64, bool) {
+ switch n.typ {
+ case Float32Type:
+ return float64(math.Float32frombits(uint32(n.bits))), true
+ case Float64Type:
+ return math.Float64frombits(n.bits), true
+ default:
+ return 0.0, false
+ }
+}
+
+// Type will return one of:
+// Float64Type, Float32Type, UintType, or IntType.
+func (n *Number) Type() Type {
+ if n.typ == InvalidType {
+ return IntType
+ }
+ return n.typ
+}
+
+// DecodeMsg implements msgp.Decodable
+func (n *Number) DecodeMsg(r *Reader) error {
+ typ, err := r.NextType()
+ if err != nil {
+ return err
+ }
+ switch typ {
+ case Float32Type:
+ f, err := r.ReadFloat32()
+ if err != nil {
+ return err
+ }
+ n.AsFloat32(f)
+ return nil
+ case Float64Type:
+ f, err := r.ReadFloat64()
+ if err != nil {
+ return err
+ }
+ n.AsFloat64(f)
+ return nil
+ case IntType:
+ i, err := r.ReadInt64()
+ if err != nil {
+ return err
+ }
+ n.AsInt(i)
+ return nil
+ case UintType:
+ u, err := r.ReadUint64()
+ if err != nil {
+ return err
+ }
+ n.AsUint(u)
+ return nil
+ default:
+ return TypeError{Encoded: typ, Method: IntType}
+ }
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) {
+ typ := NextType(b)
+ switch typ {
+ case IntType:
+ i, o, err := ReadInt64Bytes(b)
+ if err != nil {
+ return b, err
+ }
+ n.AsInt(i)
+ return o, nil
+ case UintType:
+ u, o, err := ReadUint64Bytes(b)
+ if err != nil {
+ return b, err
+ }
+ n.AsUint(u)
+ return o, nil
+ case Float64Type:
+ f, o, err := ReadFloat64Bytes(b)
+ if err != nil {
+ return b, err
+ }
+ n.AsFloat64(f)
+ return o, nil
+ case Float32Type:
+ f, o, err := ReadFloat32Bytes(b)
+ if err != nil {
+ return b, err
+ }
+ n.AsFloat32(f)
+ return o, nil
+ default:
+ return b, TypeError{Method: IntType, Encoded: typ}
+ }
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
+ switch n.typ {
+ case IntType:
+ return AppendInt64(b, int64(n.bits)), nil
+ case UintType:
+ return AppendUint64(b, uint64(n.bits)), nil
+ case Float64Type:
+ return AppendFloat64(b, math.Float64frombits(n.bits)), nil
+ case Float32Type:
+ return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
+ default:
+ return AppendInt64(b, 0), nil
+ }
+}
+
+// EncodeMsg implements msgp.Encodable
+func (n *Number) EncodeMsg(w *Writer) error {
+ switch n.typ {
+ case IntType:
+ return w.WriteInt64(int64(n.bits))
+ case UintType:
+ return w.WriteUint64(n.bits)
+ case Float64Type:
+ return w.WriteFloat64(math.Float64frombits(n.bits))
+ case Float32Type:
+ return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
+ default:
+ return w.WriteInt64(0)
+ }
+}
+
+// Msgsize implements msgp.Sizer
+func (n *Number) Msgsize() int {
+ switch n.typ {
+ case Float32Type:
+ return Float32Size
+ case Float64Type:
+ return Float64Size
+ case IntType:
+ return Int64Size
+ case UintType:
+ return Uint64Size
+ default:
+ return 1 // fixint(0)
+ }
+}
+
+// MarshalJSON implements json.Marshaler
+func (n *Number) MarshalJSON() ([]byte, error) {
+ t := n.Type()
+ if t == InvalidType {
+ return []byte{'0'}, nil
+ }
+ out := make([]byte, 0, 32)
+ switch t {
+ case Float32Type, Float64Type:
+ f, _ := n.Float()
+ return strconv.AppendFloat(out, f, 'f', -1, 64), nil
+ case IntType:
+ i, _ := n.Int()
+ return strconv.AppendInt(out, i, 10), nil
+ case UintType:
+ u, _ := n.Uint()
+ return strconv.AppendUint(out, u, 10), nil
+ default:
+ panic("(*Number).typ is invalid")
+ }
+}
+
+// String implements fmt.Stringer
+func (n *Number) String() string {
+ switch n.typ {
+ case InvalidType:
+ return "0"
+ case Float32Type, Float64Type:
+ f, _ := n.Float()
+ return strconv.FormatFloat(f, 'f', -1, 64)
+ case IntType:
+ i, _ := n.Int()
+ return strconv.FormatInt(i, 10)
+ case UintType:
+ u, _ := n.Uint()
+ return strconv.FormatUint(u, 10)
+ default:
+ panic("(*Number).typ is invalid")
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/number_test.go b/vendor/github.com/tinylib/msgp/msgp/number_test.go
new file mode 100644
index 00000000..3490647c
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/number_test.go
@@ -0,0 +1,94 @@
+package msgp
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestNumber(t *testing.T) {
+
+ n := Number{}
+
+ if n.Type() != IntType {
+ t.Errorf("expected zero-value type to be %s; got %s", IntType, n.Type())
+ }
+
+ if n.String() != "0" {
+ t.Errorf("expected Number{}.String() to be \"0\" but got %q", n.String())
+ }
+
+ n.AsInt(248)
+ i, ok := n.Int()
+ if !ok || i != 248 || n.Type() != IntType || n.String() != "248" {
+ t.Errorf("%d in; %d out!", 248, i)
+ }
+
+ n.AsFloat64(3.141)
+ f, ok := n.Float()
+ if !ok || f != 3.141 || n.Type() != Float64Type || n.String() != "3.141" {
+ t.Errorf("%f in; %f out!", 3.141, f)
+ }
+
+ n.AsUint(40000)
+ u, ok := n.Uint()
+ if !ok || u != 40000 || n.Type() != UintType || n.String() != "40000" {
+ t.Errorf("%d in; %d out!", 40000, u)
+ }
+
+ nums := []interface{}{
+ float64(3.14159),
+ int64(-29081),
+ uint64(90821983),
+ float32(3.141),
+ }
+
+ var dat []byte
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ for _, n := range nums {
+ dat, _ = AppendIntf(dat, n)
+ wr.WriteIntf(n)
+ }
+ wr.Flush()
+
+ mout := make([]Number, len(nums))
+ dout := make([]Number, len(nums))
+
+ rd := NewReader(&buf)
+ unm := dat
+ for i := range nums {
+ var err error
+ unm, err = mout[i].UnmarshalMsg(unm)
+ if err != nil {
+ t.Fatal("unmarshal error:", err)
+ }
+ err = dout[i].DecodeMsg(rd)
+ if err != nil {
+ t.Fatal("decode error:", err)
+ }
+ if mout[i] != dout[i] {
+ t.Errorf("for %#v, got %#v from unmarshal and %#v from decode", nums[i], mout[i], dout[i])
+ }
+ }
+
+ buf.Reset()
+ var odat []byte
+ for i := range nums {
+ var err error
+ odat, err = mout[i].MarshalMsg(odat)
+ if err != nil {
+ t.Fatal("marshal error:", err)
+ }
+ err = dout[i].EncodeMsg(wr)
+ }
+ wr.Flush()
+
+ if !bytes.Equal(dat, odat) {
+ t.Errorf("marshal: expected output %#v; got %#v", dat, odat)
+ }
+
+ if !bytes.Equal(dat, buf.Bytes()) {
+ t.Errorf("encode: expected output %#v; got %#v", dat, buf.Bytes())
+ }
+
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/raw_test.go b/vendor/github.com/tinylib/msgp/msgp/raw_test.go
new file mode 100644
index 00000000..9f3321f4
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/raw_test.go
@@ -0,0 +1,85 @@
+package msgp
+
+import (
+ "bytes"
+ "testing"
+ "time"
+)
+
+// all standard interfaces
+type allifaces interface {
+ Encodable
+ Decodable
+ Marshaler
+ Unmarshaler
+ Sizer
+}
+
+func TestRaw(t *testing.T) {
+ bts := make([]byte, 0, 512)
+ bts = AppendMapHeader(bts, 3)
+ bts = AppendString(bts, "key_one")
+ bts = AppendFloat64(bts, -1.0)
+ bts = AppendString(bts, "key_two")
+ bts = AppendString(bts, "value_two")
+ bts = AppendString(bts, "key_three")
+ bts = AppendTime(bts, time.Now())
+
+ var r Raw
+
+ // verify that Raw satisfies
+ // the interfaces we want it to
+ var _ allifaces = &r
+
+ // READ TESTS
+
+ extra, err := r.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal("error from UnmarshalMsg:", err)
+ }
+ if len(extra) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(extra))
+ }
+ if !bytes.Equal([]byte(r), bts) {
+ t.Fatal("value of raw and input slice are not equal after UnmarshalMsg")
+ }
+
+ r = r[:0]
+
+ var buf bytes.Buffer
+ buf.Write(bts)
+
+ rd := NewReader(&buf)
+
+ err = r.DecodeMsg(rd)
+ if err != nil {
+ t.Fatal("error from DecodeMsg:", err)
+ }
+
+ if !bytes.Equal([]byte(r), bts) {
+ t.Fatal("value of raw and input slice are not equal after DecodeMsg")
+ }
+
+ // WRITE TESTS
+
+ buf.Reset()
+ wr := NewWriter(&buf)
+ err = r.EncodeMsg(wr)
+ if err != nil {
+ t.Fatal("error from EncodeMsg:", err)
+ }
+
+ wr.Flush()
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Fatal("value of buf.Bytes() and input slice are not equal after EncodeMsg")
+ }
+
+ var outsl []byte
+ outsl, err = r.MarshalMsg(outsl)
+ if err != nil {
+ t.Fatal("error from MarshalMsg:", err)
+ }
+ if !bytes.Equal(outsl, bts) {
+ t.Fatal("value of output and input of MarshalMsg are not equal.")
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go
new file mode 100644
index 00000000..20cd1ef8
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/read.go
@@ -0,0 +1,1265 @@
+package msgp
+
+import (
+ "io"
+ "math"
+ "sync"
+ "time"
+
+ "github.com/philhofer/fwd"
+)
+
+// where we keep old *Readers
+var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }}
+
+// Type is a MessagePack wire type,
+// including this package's built-in
+// extension types.
+type Type byte
+
+// MessagePack Types
+//
+// The zero value of Type
+// is InvalidType.
+const (
+ InvalidType Type = iota
+
+ // MessagePack built-in types
+
+ StrType
+ BinType
+ MapType
+ ArrayType
+ Float64Type
+ Float32Type
+ BoolType
+ IntType
+ UintType
+ NilType
+ ExtensionType
+
+ // pseudo-types provided
+ // by extensions
+
+ Complex64Type
+ Complex128Type
+ TimeType
+
+ _maxtype
+)
+
+// String implements fmt.Stringer
+func (t Type) String() string {
+ switch t {
+ case StrType:
+ return "str"
+ case BinType:
+ return "bin"
+ case MapType:
+ return "map"
+ case ArrayType:
+ return "array"
+ case Float64Type:
+ return "float64"
+ case Float32Type:
+ return "float32"
+ case BoolType:
+ return "bool"
+ case UintType:
+ return "uint"
+ case IntType:
+ return "int"
+ case ExtensionType:
+ return "ext"
+ case NilType:
+ return "nil"
+ default:
+ return ""
+ }
+}
+
+func freeR(m *Reader) {
+ readerPool.Put(m)
+}
+
+// Unmarshaler is the interface fulfilled
+// by objects that know how to unmarshal
+// themselves from MessagePack.
+// UnmarshalMsg unmarshals the object
+// from binary, returing any leftover
+// bytes and any errors encountered.
+type Unmarshaler interface {
+ UnmarshalMsg([]byte) ([]byte, error)
+}
+
+// Decodable is the interface fulfilled
+// by objects that know how to read
+// themselves from a *Reader.
+type Decodable interface {
+ DecodeMsg(*Reader) error
+}
+
+// Decode decodes 'd' from 'r'.
+func Decode(r io.Reader, d Decodable) error {
+ rd := NewReader(r)
+ err := d.DecodeMsg(rd)
+ freeR(rd)
+ return err
+}
+
+// NewReader returns a *Reader that
+// reads from the provided reader. The
+// reader will be buffered.
+func NewReader(r io.Reader) *Reader {
+ p := readerPool.Get().(*Reader)
+ if p.R == nil {
+ p.R = fwd.NewReader(r)
+ } else {
+ p.R.Reset(r)
+ }
+ return p
+}
+
+// NewReaderSize returns a *Reader with a buffer of the given size.
+// (This is vastly preferable to passing the decoder a reader that is already buffered.)
+func NewReaderSize(r io.Reader, sz int) *Reader {
+ return &Reader{R: fwd.NewReaderSize(r, sz)}
+}
+
+// Reader wraps an io.Reader and provides
+// methods to read MessagePack-encoded values
+// from it. Readers are buffered.
+type Reader struct {
+ // R is the buffered reader
+ // that the Reader uses
+ // to decode MessagePack.
+ // The Reader itself
+ // is stateless; all the
+ // buffering is done
+ // within R.
+ R *fwd.Reader
+ scratch []byte
+}
+
+// Read implements `io.Reader`
+func (m *Reader) Read(p []byte) (int, error) {
+ return m.R.Read(p)
+}
+
+// CopyNext reads the next object from m without decoding it and writes it to w.
+// It avoids unnecessary copies internally.
+func (m *Reader) CopyNext(w io.Writer) (int64, error) {
+ sz, o, err := getNextSize(m.R)
+ if err != nil {
+ return 0, err
+ }
+
+ var n int64
+ // Opportunistic optimization: if we can fit the whole thing in the m.R
+ // buffer, then just get a pointer to that, and pass it to w.Write,
+ // avoiding an allocation.
+ if int(sz) <= m.R.BufferSize() {
+ var nn int
+ var buf []byte
+ buf, err = m.R.Next(int(sz))
+ if err != nil {
+ if err == io.ErrUnexpectedEOF {
+ err = ErrShortBytes
+ }
+ return 0, err
+ }
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ } else {
+ // Fall back to io.CopyN.
+ // May avoid allocating if w is a ReaderFrom (e.g. bytes.Buffer)
+ n, err = io.CopyN(w, m.R, int64(sz))
+ if err == io.ErrUnexpectedEOF {
+ err = ErrShortBytes
+ }
+ }
+ if err != nil {
+ return n, err
+ } else if n < int64(sz) {
+ return n, io.ErrShortWrite
+ }
+
+ // for maps and slices, read elements
+ for x := uintptr(0); x < o; x++ {
+ var n2 int64
+ n2, err = m.CopyNext(w)
+ if err != nil {
+ return n, err
+ }
+ n += n2
+ }
+ return n, nil
+}
+
+// ReadFull implements `io.ReadFull`
+func (m *Reader) ReadFull(p []byte) (int, error) {
+ return m.R.ReadFull(p)
+}
+
+// Reset resets the underlying reader.
+func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) }
+
+// Buffered returns the number of bytes currently in the read buffer.
+func (m *Reader) Buffered() int { return m.R.Buffered() }
+
+// BufferSize returns the capacity of the read buffer.
+func (m *Reader) BufferSize() int { return m.R.BufferSize() }
+
+// NextType returns the next object type to be decoded.
+func (m *Reader) NextType() (Type, error) {
+ p, err := m.R.Peek(1)
+ if err != nil {
+ return InvalidType, err
+ }
+ t := getType(p[0])
+ if t == InvalidType {
+ return t, InvalidPrefixError(p[0])
+ }
+ if t == ExtensionType {
+ v, err := m.peekExtensionType()
+ if err != nil {
+ return InvalidType, err
+ }
+ switch v {
+ case Complex64Extension:
+ return Complex64Type, nil
+ case Complex128Extension:
+ return Complex128Type, nil
+ case TimeExtension:
+ return TimeType, nil
+ }
+ }
+ return t, nil
+}
+
+// IsNil returns whether or not
+// the next byte is a null messagepack byte
+func (m *Reader) IsNil() bool {
+ p, err := m.R.Peek(1)
+ return err == nil && p[0] == mnil
+}
+
+// getNextSize returns the size of the next object on the wire.
+// returns (obj size, obj elements, error)
+// only maps and arrays have non-zero obj elements
+// for maps and arrays, obj size does not include elements
+//
+// use uintptr b/c it's guaranteed to be large enough
+// to hold whatever we can fit in memory.
+func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) {
+ b, err := r.Peek(1)
+ if err != nil {
+ return 0, 0, err
+ }
+ lead := b[0]
+ spec := &sizes[lead]
+ size, mode := spec.size, spec.extra
+ if size == 0 {
+ return 0, 0, InvalidPrefixError(lead)
+ }
+ if mode >= 0 {
+ return uintptr(size), uintptr(mode), nil
+ }
+ b, err = r.Peek(int(size))
+ if err != nil {
+ return 0, 0, err
+ }
+ switch mode {
+ case extra8:
+ return uintptr(size) + uintptr(b[1]), 0, nil
+ case extra16:
+ return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil
+ case extra32:
+ return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil
+ case map16v:
+ return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil
+ case map32v:
+ return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil
+ case array16v:
+ return uintptr(size), uintptr(big.Uint16(b[1:])), nil
+ case array32v:
+ return uintptr(size), uintptr(big.Uint32(b[1:])), nil
+ default:
+ return 0, 0, fatal
+ }
+}
+
+// Skip skips over the next object, regardless of
+// its type. If it is an array or map, the whole array
+// or map will be skipped.
+func (m *Reader) Skip() error {
+ var (
+ v uintptr // bytes
+ o uintptr // objects
+ err error
+ p []byte
+ )
+
+ // we can use the faster
+ // method if we have enough
+ // buffered data
+ if m.R.Buffered() >= 5 {
+ p, err = m.R.Peek(5)
+ if err != nil {
+ return err
+ }
+ v, o, err = getSize(p)
+ if err != nil {
+ return err
+ }
+ } else {
+ v, o, err = getNextSize(m.R)
+ if err != nil {
+ return err
+ }
+ }
+
+ // 'v' is always non-zero
+ // if err == nil
+ _, err = m.R.Skip(int(v))
+ if err != nil {
+ return err
+ }
+
+ // for maps and slices, skip elements
+ for x := uintptr(0); x < o; x++ {
+ err = m.Skip()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadMapHeader reads the next object
+// as a map header and returns the size
+// of the map and the number of bytes written.
+// It will return a TypeError{} if the next
+// object is not a map.
+func (m *Reader) ReadMapHeader() (sz uint32, err error) {
+ var p []byte
+ var lead byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+ if isfixmap(lead) {
+ sz = uint32(rfixmap(lead))
+ _, err = m.R.Skip(1)
+ return
+ }
+ switch lead {
+ case mmap16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mmap32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = big.Uint32(p[1:])
+ return
+ default:
+ err = badPrefix(MapType, lead)
+ return
+ }
+}
+
+// ReadMapKey reads either a 'str' or 'bin' field from
+// the reader and returns the value as a []byte. It uses
+// scratch for storage if it is large enough.
+func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) {
+ out, err := m.ReadStringAsBytes(scratch)
+ if err != nil {
+ if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
+ return m.ReadBytes(scratch)
+ }
+ return nil, err
+ }
+ return out, nil
+}
+
+// MapKeyPtr returns a []byte pointing to the contents
+// of a valid map key. The key cannot be empty, and it
+// must be shorter than the total buffer size of the
+// *Reader. Additionally, the returned slice is only
+// valid until the next *Reader method call. Users
+// should exercise extreme care when using this
+// method; writing into the returned slice may
+// corrupt future reads.
+func (m *Reader) ReadMapKeyPtr() ([]byte, error) {
+ p, err := m.R.Peek(1)
+ if err != nil {
+ return nil, err
+ }
+ lead := p[0]
+ var read int
+ if isfixstr(lead) {
+ read = int(rfixstr(lead))
+ m.R.Skip(1)
+ goto fill
+ }
+ switch lead {
+ case mstr8, mbin8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return nil, err
+ }
+ read = int(p[1])
+ case mstr16, mbin16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return nil, err
+ }
+ read = int(big.Uint16(p[1:]))
+ case mstr32, mbin32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return nil, err
+ }
+ read = int(big.Uint32(p[1:]))
+ default:
+ return nil, badPrefix(StrType, lead)
+ }
+fill:
+ if read == 0 {
+ return nil, ErrShortBytes
+ }
+ return m.R.Next(read)
+}
+
+// ReadArrayHeader reads the next object as an
+// array header and returns the size of the array
+// and the number of bytes read.
+func (m *Reader) ReadArrayHeader() (sz uint32, err error) {
+ var lead byte
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+ if isfixarray(lead) {
+ sz = uint32(rfixarray(lead))
+ _, err = m.R.Skip(1)
+ return
+ }
+ switch lead {
+ case marray16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+
+ case marray32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = big.Uint32(p[1:])
+ return
+
+ default:
+ err = badPrefix(ArrayType, lead)
+ return
+ }
+}
+
+// ReadNil reads a 'nil' MessagePack byte from the reader
+func (m *Reader) ReadNil() error {
+ p, err := m.R.Peek(1)
+ if err != nil {
+ return err
+ }
+ if p[0] != mnil {
+ return badPrefix(NilType, p[0])
+ }
+ _, err = m.R.Skip(1)
+ return err
+}
+
+// ReadFloat64 reads a float64 from the reader.
+// (If the value on the wire is encoded as a float32,
+// it will be up-cast to a float64.)
+func (m *Reader) ReadFloat64() (f float64, err error) {
+ var p []byte
+ p, err = m.R.Peek(9)
+ if err != nil {
+ // we'll allow a coversion from float32 to float64,
+ // since we don't lose any precision
+ if err == io.EOF && len(p) > 0 && p[0] == mfloat32 {
+ ef, err := m.ReadFloat32()
+ return float64(ef), err
+ }
+ return
+ }
+ if p[0] != mfloat64 {
+ // see above
+ if p[0] == mfloat32 {
+ ef, err := m.ReadFloat32()
+ return float64(ef), err
+ }
+ err = badPrefix(Float64Type, p[0])
+ return
+ }
+ f = math.Float64frombits(getMuint64(p))
+ _, err = m.R.Skip(9)
+ return
+}
+
+// ReadFloat32 reads a float32 from the reader
+func (m *Reader) ReadFloat32() (f float32, err error) {
+ var p []byte
+ p, err = m.R.Peek(5)
+ if err != nil {
+ return
+ }
+ if p[0] != mfloat32 {
+ err = badPrefix(Float32Type, p[0])
+ return
+ }
+ f = math.Float32frombits(getMuint32(p))
+ _, err = m.R.Skip(5)
+ return
+}
+
+// ReadBool reads a bool from the reader
+func (m *Reader) ReadBool() (b bool, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ switch p[0] {
+ case mtrue:
+ b = true
+ case mfalse:
+ default:
+ err = badPrefix(BoolType, p[0])
+ return
+ }
+ _, err = m.R.Skip(1)
+ return
+}
+
+// ReadInt64 reads an int64 from the reader
+func (m *Reader) ReadInt64() (i int64, err error) {
+ var p []byte
+ var lead byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+
+ if isfixint(lead) {
+ i = int64(rfixint(lead))
+ _, err = m.R.Skip(1)
+ return
+ } else if isnfixint(lead) {
+ i = int64(rnfixint(lead))
+ _, err = m.R.Skip(1)
+ return
+ }
+
+ switch lead {
+ case mint8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ i = int64(getMint8(p))
+ return
+
+ case mint16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ i = int64(getMint16(p))
+ return
+
+ case mint32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ i = int64(getMint32(p))
+ return
+
+ case mint64:
+ p, err = m.R.Next(9)
+ if err != nil {
+ return
+ }
+ i = getMint64(p)
+ return
+
+ default:
+ err = badPrefix(IntType, lead)
+ return
+ }
+}
+
+// ReadInt32 reads an int32 from the reader
+func (m *Reader) ReadInt32() (i int32, err error) {
+ var in int64
+ in, err = m.ReadInt64()
+ if in > math.MaxInt32 || in < math.MinInt32 {
+ err = IntOverflow{Value: in, FailedBitsize: 32}
+ return
+ }
+ i = int32(in)
+ return
+}
+
+// ReadInt16 reads an int16 from the reader
+func (m *Reader) ReadInt16() (i int16, err error) {
+ var in int64
+ in, err = m.ReadInt64()
+ if in > math.MaxInt16 || in < math.MinInt16 {
+ err = IntOverflow{Value: in, FailedBitsize: 16}
+ return
+ }
+ i = int16(in)
+ return
+}
+
+// ReadInt8 reads an int8 from the reader
+func (m *Reader) ReadInt8() (i int8, err error) {
+ var in int64
+ in, err = m.ReadInt64()
+ if in > math.MaxInt8 || in < math.MinInt8 {
+ err = IntOverflow{Value: in, FailedBitsize: 8}
+ return
+ }
+ i = int8(in)
+ return
+}
+
+// ReadInt reads an int from the reader
+func (m *Reader) ReadInt() (i int, err error) {
+ if smallint {
+ var in int32
+ in, err = m.ReadInt32()
+ i = int(in)
+ return
+ }
+ var in int64
+ in, err = m.ReadInt64()
+ i = int(in)
+ return
+}
+
+// ReadUint64 reads a uint64 from the reader
+func (m *Reader) ReadUint64() (u uint64, err error) {
+ var p []byte
+ var lead byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+ if isfixint(lead) {
+ u = uint64(rfixint(lead))
+ _, err = m.R.Skip(1)
+ return
+ }
+ switch lead {
+ case muint8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ u = uint64(getMuint8(p))
+ return
+
+ case muint16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ u = uint64(getMuint16(p))
+ return
+
+ case muint32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ u = uint64(getMuint32(p))
+ return
+
+ case muint64:
+ p, err = m.R.Next(9)
+ if err != nil {
+ return
+ }
+ u = getMuint64(p)
+ return
+
+ default:
+ err = badPrefix(UintType, lead)
+ return
+
+ }
+}
+
+// ReadUint32 reads a uint32 from the reader
+func (m *Reader) ReadUint32() (u uint32, err error) {
+ var in uint64
+ in, err = m.ReadUint64()
+ if in > math.MaxUint32 {
+ err = UintOverflow{Value: in, FailedBitsize: 32}
+ return
+ }
+ u = uint32(in)
+ return
+}
+
+// ReadUint16 reads a uint16 from the reader
+func (m *Reader) ReadUint16() (u uint16, err error) {
+ var in uint64
+ in, err = m.ReadUint64()
+ if in > math.MaxUint16 {
+ err = UintOverflow{Value: in, FailedBitsize: 16}
+ return
+ }
+ u = uint16(in)
+ return
+}
+
+// ReadUint8 reads a uint8 from the reader
+func (m *Reader) ReadUint8() (u uint8, err error) {
+ var in uint64
+ in, err = m.ReadUint64()
+ if in > math.MaxUint8 {
+ err = UintOverflow{Value: in, FailedBitsize: 8}
+ return
+ }
+ u = uint8(in)
+ return
+}
+
+// ReadUint reads a uint from the reader
+func (m *Reader) ReadUint() (u uint, err error) {
+ if smallint {
+ var un uint32
+ un, err = m.ReadUint32()
+ u = uint(un)
+ return
+ }
+ var un uint64
+ un, err = m.ReadUint64()
+ u = uint(un)
+ return
+}
+
+// ReadByte is analogous to ReadUint8.
+//
+// NOTE: this is *not* an implementation
+// of io.ByteReader.
+func (m *Reader) ReadByte() (b byte, err error) {
+ var in uint64
+ in, err = m.ReadUint64()
+ if in > math.MaxUint8 {
+ err = UintOverflow{Value: in, FailedBitsize: 8}
+ return
+ }
+ b = byte(in)
+ return
+}
+
+// ReadBytes reads a MessagePack 'bin' object
+// from the reader and returns its value. It may
+// use 'scratch' for storage if it is non-nil.
+func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) {
+ var p []byte
+ var lead byte
+ p, err = m.R.Peek(2)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+ var read int64
+ switch lead {
+ case mbin8:
+ read = int64(p[1])
+ m.R.Skip(2)
+ case mbin16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ read = int64(big.Uint16(p[1:]))
+ case mbin32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ read = int64(big.Uint32(p[1:]))
+ default:
+ err = badPrefix(BinType, lead)
+ return
+ }
+ if int64(cap(scratch)) < read {
+ b = make([]byte, read)
+ } else {
+ b = scratch[0:read]
+ }
+ _, err = m.R.ReadFull(b)
+ return
+}
+
+// ReadBytesHeader reads the size header
+// of a MessagePack 'bin' object. The user
+// is responsible for dealing with the next
+// 'sz' bytes from the reader in an application-specific
+// way.
+func (m *Reader) ReadBytesHeader() (sz uint32, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ switch p[0] {
+ case mbin8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ sz = uint32(p[1])
+ return
+ case mbin16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mbin32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint32(p[1:]))
+ return
+ default:
+ err = badPrefix(BinType, p[0])
+ return
+ }
+}
+
+// ReadExactBytes reads a MessagePack 'bin'-encoded
+// object off of the wire into the provided slice. An
+// ArrayError will be returned if the object is not
+// exactly the length of the input slice.
+func (m *Reader) ReadExactBytes(into []byte) error {
+ p, err := m.R.Peek(2)
+ if err != nil {
+ return err
+ }
+ lead := p[0]
+ var read int64 // bytes to read
+ var skip int // prefix size to skip
+ switch lead {
+ case mbin8:
+ read = int64(p[1])
+ skip = 2
+ case mbin16:
+ p, err = m.R.Peek(3)
+ if err != nil {
+ return err
+ }
+ read = int64(big.Uint16(p[1:]))
+ skip = 3
+ case mbin32:
+ p, err = m.R.Peek(5)
+ if err != nil {
+ return err
+ }
+ read = int64(big.Uint32(p[1:]))
+ skip = 5
+ default:
+ return badPrefix(BinType, lead)
+ }
+ if read != int64(len(into)) {
+ return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)}
+ }
+ m.R.Skip(skip)
+ _, err = m.R.ReadFull(into)
+ return err
+}
+
+// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string
+// and returns its value as bytes. It may use 'scratch' for storage
+// if it is non-nil.
+func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) {
+ var p []byte
+ var lead byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+ var read int64
+
+ if isfixstr(lead) {
+ read = int64(rfixstr(lead))
+ m.R.Skip(1)
+ goto fill
+ }
+
+ switch lead {
+ case mstr8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ read = int64(uint8(p[1]))
+ case mstr16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ read = int64(big.Uint16(p[1:]))
+ case mstr32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ read = int64(big.Uint32(p[1:]))
+ default:
+ err = badPrefix(StrType, lead)
+ return
+ }
+fill:
+ if int64(cap(scratch)) < read {
+ b = make([]byte, read)
+ } else {
+ b = scratch[0:read]
+ }
+ _, err = m.R.ReadFull(b)
+ return
+}
+
+// ReadStringHeader reads a string header
+// off of the wire. The user is then responsible
+// for dealing with the next 'sz' bytes from
+// the reader in an application-specific manner.
+func (m *Reader) ReadStringHeader() (sz uint32, err error) {
+ var p []byte
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead := p[0]
+ if isfixstr(lead) {
+ sz = uint32(rfixstr(lead))
+ m.R.Skip(1)
+ return
+ }
+ switch lead {
+ case mstr8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ sz = uint32(p[1])
+ return
+ case mstr16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ sz = uint32(big.Uint16(p[1:]))
+ return
+ case mstr32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ sz = big.Uint32(p[1:])
+ return
+ default:
+ err = badPrefix(StrType, lead)
+ return
+ }
+}
+
+// ReadString reads a utf-8 string from the reader
+func (m *Reader) ReadString() (s string, err error) {
+ var p []byte
+ var lead byte
+ var read int64
+ p, err = m.R.Peek(1)
+ if err != nil {
+ return
+ }
+ lead = p[0]
+
+ if isfixstr(lead) {
+ read = int64(rfixstr(lead))
+ m.R.Skip(1)
+ goto fill
+ }
+
+ switch lead {
+ case mstr8:
+ p, err = m.R.Next(2)
+ if err != nil {
+ return
+ }
+ read = int64(uint8(p[1]))
+ case mstr16:
+ p, err = m.R.Next(3)
+ if err != nil {
+ return
+ }
+ read = int64(big.Uint16(p[1:]))
+ case mstr32:
+ p, err = m.R.Next(5)
+ if err != nil {
+ return
+ }
+ read = int64(big.Uint32(p[1:]))
+ default:
+ err = badPrefix(StrType, lead)
+ return
+ }
+fill:
+ if read == 0 {
+ s, err = "", nil
+ return
+ }
+ // reading into the memory
+ // that will become the string
+ // itself has vastly superior
+ // worst-case performance, because
+ // the reader buffer doesn't have
+ // to be large enough to hold the string.
+ // the idea here is to make it more
+ // difficult for someone malicious
+ // to cause the system to run out of
+ // memory by sending very large strings.
+ //
+ // NOTE: this works because the argument
+ // passed to (*fwd.Reader).ReadFull escapes
+ // to the heap; its argument may, in turn,
+ // be passed to the underlying reader, and
+ // thus escape analysis *must* conclude that
+ // 'out' escapes.
+ out := make([]byte, read)
+ _, err = m.R.ReadFull(out)
+ if err != nil {
+ return
+ }
+ s = UnsafeString(out)
+ return
+}
+
+// ReadComplex64 reads a complex64 from the reader
+func (m *Reader) ReadComplex64() (f complex64, err error) {
+ var p []byte
+ p, err = m.R.Peek(10)
+ if err != nil {
+ return
+ }
+ if p[0] != mfixext8 {
+ err = badPrefix(Complex64Type, p[0])
+ return
+ }
+ if int8(p[1]) != Complex64Extension {
+ err = errExt(int8(p[1]), Complex64Extension)
+ return
+ }
+ f = complex(math.Float32frombits(big.Uint32(p[2:])),
+ math.Float32frombits(big.Uint32(p[6:])))
+ _, err = m.R.Skip(10)
+ return
+}
+
+// ReadComplex128 reads a complex128 from the reader
+func (m *Reader) ReadComplex128() (f complex128, err error) {
+ var p []byte
+ p, err = m.R.Peek(18)
+ if err != nil {
+ return
+ }
+ if p[0] != mfixext16 {
+ err = badPrefix(Complex128Type, p[0])
+ return
+ }
+ if int8(p[1]) != Complex128Extension {
+ err = errExt(int8(p[1]), Complex128Extension)
+ return
+ }
+ f = complex(math.Float64frombits(big.Uint64(p[2:])),
+ math.Float64frombits(big.Uint64(p[10:])))
+ _, err = m.R.Skip(18)
+ return
+}
+
+// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}.
+// (You must pass a non-nil map into the function.)
+func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) {
+ var sz uint32
+ sz, err = m.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for key := range mp {
+ delete(mp, key)
+ }
+ for i := uint32(0); i < sz; i++ {
+ var key string
+ var val interface{}
+ key, err = m.ReadString()
+ if err != nil {
+ return
+ }
+ val, err = m.ReadIntf()
+ if err != nil {
+ return
+ }
+ mp[key] = val
+ }
+ return
+}
+
+// ReadTime reads a time.Time object from the reader.
+// The returned time's location will be set to time.Local.
+func (m *Reader) ReadTime() (t time.Time, err error) {
+ var p []byte
+ p, err = m.R.Peek(15)
+ if err != nil {
+ return
+ }
+ if p[0] != mext8 || p[1] != 12 {
+ err = badPrefix(TimeType, p[0])
+ return
+ }
+ if int8(p[2]) != TimeExtension {
+ err = errExt(int8(p[2]), TimeExtension)
+ return
+ }
+ sec, nsec := getUnix(p[3:])
+ t = time.Unix(sec, int64(nsec)).Local()
+ _, err = m.R.Skip(15)
+ return
+}
+
+// ReadIntf reads out the next object as a raw interface{}.
+// Arrays are decoded as []interface{}, and maps are decoded
+// as map[string]interface{}. Integers are decoded as int64
+// and unsigned integers are decoded as uint64.
+func (m *Reader) ReadIntf() (i interface{}, err error) {
+ var t Type
+ t, err = m.NextType()
+ if err != nil {
+ return
+ }
+ switch t {
+ case BoolType:
+ i, err = m.ReadBool()
+ return
+
+ case IntType:
+ i, err = m.ReadInt64()
+ return
+
+ case UintType:
+ i, err = m.ReadUint64()
+ return
+
+ case BinType:
+ i, err = m.ReadBytes(nil)
+ return
+
+ case StrType:
+ i, err = m.ReadString()
+ return
+
+ case Complex64Type:
+ i, err = m.ReadComplex64()
+ return
+
+ case Complex128Type:
+ i, err = m.ReadComplex128()
+ return
+
+ case TimeType:
+ i, err = m.ReadTime()
+ return
+
+ case ExtensionType:
+ var t int8
+ t, err = m.peekExtensionType()
+ if err != nil {
+ return
+ }
+ f, ok := extensionReg[t]
+ if ok {
+ e := f()
+ err = m.ReadExtension(e)
+ i = e
+ return
+ }
+ var e RawExtension
+ e.Type = t
+ err = m.ReadExtension(&e)
+ i = &e
+ return
+
+ case MapType:
+ mp := make(map[string]interface{})
+ err = m.ReadMapStrIntf(mp)
+ i = mp
+ return
+
+ case NilType:
+ err = m.ReadNil()
+ i = nil
+ return
+
+ case Float32Type:
+ i, err = m.ReadFloat32()
+ return
+
+ case Float64Type:
+ i, err = m.ReadFloat64()
+ return
+
+ case ArrayType:
+ var sz uint32
+ sz, err = m.ReadArrayHeader()
+
+ if err != nil {
+ return
+ }
+ out := make([]interface{}, int(sz))
+ for j := range out {
+ out[j], err = m.ReadIntf()
+ if err != nil {
+ return
+ }
+ }
+ i = out
+ return
+
+ default:
+ return nil, fatal // unreachable
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
new file mode 100644
index 00000000..78e466fc
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go
@@ -0,0 +1,1089 @@
+package msgp
+
+import (
+ "bytes"
+ "encoding/binary"
+ "math"
+ "time"
+)
+
+var big = binary.BigEndian
+
+// NextType returns the type of the next
+// object in the slice. If the length
+// of the input is zero, it returns
+// InvalidType.
+func NextType(b []byte) Type {
+ if len(b) == 0 {
+ return InvalidType
+ }
+ spec := sizes[b[0]]
+ t := spec.typ
+ if t == ExtensionType && len(b) > int(spec.size) {
+ var tp int8
+ if spec.extra == constsize {
+ tp = int8(b[1])
+ } else {
+ tp = int8(b[spec.size-1])
+ }
+ switch tp {
+ case TimeExtension:
+ return TimeType
+ case Complex128Extension:
+ return Complex128Type
+ case Complex64Extension:
+ return Complex64Type
+ default:
+ return ExtensionType
+ }
+ }
+ return t
+}
+
+// IsNil returns true if len(b)>0 and
+// the leading byte is a 'nil' MessagePack
+// byte; false otherwise
+func IsNil(b []byte) bool {
+ if len(b) != 0 && b[0] == mnil {
+ return true
+ }
+ return false
+}
+
+// Raw is raw MessagePack.
+// Raw allows you to read and write
+// data without interpreting its contents.
+type Raw []byte
+
+// MarshalMsg implements msgp.Marshaler.
+// It appends the raw contents of 'raw'
+// to the provided byte slice. If 'raw'
+// is 0 bytes, 'nil' will be appended instead.
+func (r Raw) MarshalMsg(b []byte) ([]byte, error) {
+ i := len(r)
+ if i == 0 {
+ return AppendNil(b), nil
+ }
+ o, l := ensure(b, i)
+ copy(o[l:], []byte(r))
+ return o, nil
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler.
+// It sets the contents of *Raw to be the next
+// object in the provided byte slice.
+func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) {
+ l := len(b)
+ out, err := Skip(b)
+ if err != nil {
+ return b, err
+ }
+ rlen := l - len(out)
+ if cap(*r) < rlen {
+ *r = make(Raw, rlen)
+ } else {
+ *r = (*r)[0:rlen]
+ }
+ copy(*r, b[:rlen])
+ return out, nil
+}
+
+// EncodeMsg implements msgp.Encodable.
+// It writes the raw bytes to the writer.
+// If r is empty, it writes 'nil' instead.
+func (r Raw) EncodeMsg(w *Writer) error {
+ if len(r) == 0 {
+ return w.WriteNil()
+ }
+ _, err := w.Write([]byte(r))
+ return err
+}
+
+// DecodeMsg implements msgp.Decodable.
+// It sets the value of *Raw to be the
+// next object on the wire.
+func (r *Raw) DecodeMsg(f *Reader) error {
+ *r = (*r)[:0]
+ return appendNext(f, (*[]byte)(r))
+}
+
+// Msgsize implements msgp.Sizer
+func (r Raw) Msgsize() int {
+ l := len(r)
+ if l == 0 {
+ return 1 // for 'nil'
+ }
+ return l
+}
+
+func appendNext(f *Reader, d *[]byte) error {
+ amt, o, err := getNextSize(f.R)
+ if err != nil {
+ return err
+ }
+ var i int
+ *d, i = ensure(*d, int(amt))
+ _, err = f.R.ReadFull((*d)[i:])
+ if err != nil {
+ return err
+ }
+ for o > 0 {
+ err = appendNext(f, d)
+ if err != nil {
+ return err
+ }
+ o--
+ }
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler
+func (r *Raw) MarshalJSON() ([]byte, error) {
+ var buf bytes.Buffer
+ _, err := UnmarshalAsJSON(&buf, []byte(*r))
+ return buf.Bytes(), err
+}
+
+// ReadMapHeaderBytes reads a map header size
+// from 'b' and returns the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a map)
+func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) {
+ l := len(b)
+ if l < 1 {
+ err = ErrShortBytes
+ return
+ }
+
+ lead := b[0]
+ if isfixmap(lead) {
+ sz = uint32(rfixmap(lead))
+ o = b[1:]
+ return
+ }
+
+ switch lead {
+ case mmap16:
+ if l < 3 {
+ err = ErrShortBytes
+ return
+ }
+ sz = uint32(big.Uint16(b[1:]))
+ o = b[3:]
+ return
+
+ case mmap32:
+ if l < 5 {
+ err = ErrShortBytes
+ return
+ }
+ sz = big.Uint32(b[1:])
+ o = b[5:]
+ return
+
+ default:
+ err = badPrefix(MapType, lead)
+ return
+ }
+}
+
+// ReadMapKeyZC attempts to read a map key
+// from 'b' and returns the key bytes and the remaining bytes
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a str or bin)
+func ReadMapKeyZC(b []byte) ([]byte, []byte, error) {
+ o, b, err := ReadStringZC(b)
+ if err != nil {
+ if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
+ return ReadBytesZC(b)
+ }
+ return nil, b, err
+ }
+ return o, b, nil
+}
+
+// ReadArrayHeaderBytes attempts to read
+// the array header size off of 'b' and return
+// the size and remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not an array)
+func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) {
+ if len(b) < 1 {
+ return 0, nil, ErrShortBytes
+ }
+ lead := b[0]
+ if isfixarray(lead) {
+ sz = uint32(rfixarray(lead))
+ o = b[1:]
+ return
+ }
+
+ switch lead {
+ case marray16:
+ if len(b) < 3 {
+ err = ErrShortBytes
+ return
+ }
+ sz = uint32(big.Uint16(b[1:]))
+ o = b[3:]
+ return
+
+ case marray32:
+ if len(b) < 5 {
+ err = ErrShortBytes
+ return
+ }
+ sz = big.Uint32(b[1:])
+ o = b[5:]
+ return
+
+ default:
+ err = badPrefix(ArrayType, lead)
+ return
+ }
+}
+
+// ReadNilBytes tries to read a "nil" byte
+// off of 'b' and return the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a 'nil')
+// - InvalidPrefixError
+func ReadNilBytes(b []byte) ([]byte, error) {
+ if len(b) < 1 {
+ return nil, ErrShortBytes
+ }
+ if b[0] != mnil {
+ return b, badPrefix(NilType, b[0])
+ }
+ return b[1:], nil
+}
+
+// ReadFloat64Bytes tries to read a float64
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a float64)
+func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) {
+ if len(b) < 9 {
+ if len(b) >= 5 && b[0] == mfloat32 {
+ var tf float32
+ tf, o, err = ReadFloat32Bytes(b)
+ f = float64(tf)
+ return
+ }
+ err = ErrShortBytes
+ return
+ }
+
+ if b[0] != mfloat64 {
+ if b[0] == mfloat32 {
+ var tf float32
+ tf, o, err = ReadFloat32Bytes(b)
+ f = float64(tf)
+ return
+ }
+ err = badPrefix(Float64Type, b[0])
+ return
+ }
+
+ f = math.Float64frombits(getMuint64(b))
+ o = b[9:]
+ return
+}
+
+// ReadFloat32Bytes tries to read a float64
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a float32)
+func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) {
+ if len(b) < 5 {
+ err = ErrShortBytes
+ return
+ }
+
+ if b[0] != mfloat32 {
+ err = TypeError{Method: Float32Type, Encoded: getType(b[0])}
+ return
+ }
+
+ f = math.Float32frombits(getMuint32(b))
+ o = b[5:]
+ return
+}
+
+// ReadBoolBytes tries to read a float64
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a bool)
+func ReadBoolBytes(b []byte) (bool, []byte, error) {
+ if len(b) < 1 {
+ return false, b, ErrShortBytes
+ }
+ switch b[0] {
+ case mtrue:
+ return true, b[1:], nil
+ case mfalse:
+ return false, b[1:], nil
+ default:
+ return false, b, badPrefix(BoolType, b[0])
+ }
+}
+
+// ReadInt64Bytes tries to read an int64
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError (not a int)
+func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) {
+ l := len(b)
+ if l < 1 {
+ return 0, nil, ErrShortBytes
+ }
+
+ lead := b[0]
+ if isfixint(lead) {
+ i = int64(rfixint(lead))
+ o = b[1:]
+ return
+ }
+ if isnfixint(lead) {
+ i = int64(rnfixint(lead))
+ o = b[1:]
+ return
+ }
+
+ switch lead {
+ case mint8:
+ if l < 2 {
+ err = ErrShortBytes
+ return
+ }
+ i = int64(getMint8(b))
+ o = b[2:]
+ return
+
+ case mint16:
+ if l < 3 {
+ err = ErrShortBytes
+ return
+ }
+ i = int64(getMint16(b))
+ o = b[3:]
+ return
+
+ case mint32:
+ if l < 5 {
+ err = ErrShortBytes
+ return
+ }
+ i = int64(getMint32(b))
+ o = b[5:]
+ return
+
+ case mint64:
+ if l < 9 {
+ err = ErrShortBytes
+ return
+ }
+ i = getMint64(b)
+ o = b[9:]
+ return
+
+ default:
+ err = badPrefix(IntType, lead)
+ return
+ }
+}
+
+// ReadInt32Bytes tries to read an int32
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a int)
+// - IntOverflow{} (value doesn't fit in int32)
+func ReadInt32Bytes(b []byte) (int32, []byte, error) {
+ i, o, err := ReadInt64Bytes(b)
+ if i > math.MaxInt32 || i < math.MinInt32 {
+ return 0, o, IntOverflow{Value: i, FailedBitsize: 32}
+ }
+ return int32(i), o, err
+}
+
+// ReadInt16Bytes tries to read an int16
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a int)
+// - IntOverflow{} (value doesn't fit in int16)
+func ReadInt16Bytes(b []byte) (int16, []byte, error) {
+ i, o, err := ReadInt64Bytes(b)
+ if i > math.MaxInt16 || i < math.MinInt16 {
+ return 0, o, IntOverflow{Value: i, FailedBitsize: 16}
+ }
+ return int16(i), o, err
+}
+
+// ReadInt8Bytes tries to read an int16
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a int)
+// - IntOverflow{} (value doesn't fit in int8)
+func ReadInt8Bytes(b []byte) (int8, []byte, error) {
+ i, o, err := ReadInt64Bytes(b)
+ if i > math.MaxInt8 || i < math.MinInt8 {
+ return 0, o, IntOverflow{Value: i, FailedBitsize: 8}
+ }
+ return int8(i), o, err
+}
+
+// ReadIntBytes tries to read an int
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a int)
+// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only)
+func ReadIntBytes(b []byte) (int, []byte, error) {
+ if smallint {
+ i, b, err := ReadInt32Bytes(b)
+ return int(i), b, err
+ }
+ i, b, err := ReadInt64Bytes(b)
+ return int(i), b, err
+}
+
+// ReadUint64Bytes tries to read a uint64
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a uint)
+func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) {
+ l := len(b)
+ if l < 1 {
+ return 0, nil, ErrShortBytes
+ }
+
+ lead := b[0]
+ if isfixint(lead) {
+ u = uint64(rfixint(lead))
+ o = b[1:]
+ return
+ }
+
+ switch lead {
+ case muint8:
+ if l < 2 {
+ err = ErrShortBytes
+ return
+ }
+ u = uint64(getMuint8(b))
+ o = b[2:]
+ return
+
+ case muint16:
+ if l < 3 {
+ err = ErrShortBytes
+ return
+ }
+ u = uint64(getMuint16(b))
+ o = b[3:]
+ return
+
+ case muint32:
+ if l < 5 {
+ err = ErrShortBytes
+ return
+ }
+ u = uint64(getMuint32(b))
+ o = b[5:]
+ return
+
+ case muint64:
+ if l < 9 {
+ err = ErrShortBytes
+ return
+ }
+ u = getMuint64(b)
+ o = b[9:]
+ return
+
+ default:
+ err = badPrefix(UintType, lead)
+ return
+ }
+}
+
+// ReadUint32Bytes tries to read a uint32
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a uint)
+// - UintOverflow{} (value too large for uint32)
+func ReadUint32Bytes(b []byte) (uint32, []byte, error) {
+ v, o, err := ReadUint64Bytes(b)
+ if v > math.MaxUint32 {
+ return 0, nil, UintOverflow{Value: v, FailedBitsize: 32}
+ }
+ return uint32(v), o, err
+}
+
+// ReadUint16Bytes tries to read a uint16
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a uint)
+// - UintOverflow{} (value too large for uint16)
+func ReadUint16Bytes(b []byte) (uint16, []byte, error) {
+ v, o, err := ReadUint64Bytes(b)
+ if v > math.MaxUint16 {
+ return 0, nil, UintOverflow{Value: v, FailedBitsize: 16}
+ }
+ return uint16(v), o, err
+}
+
+// ReadUint8Bytes tries to read a uint8
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a uint)
+// - UintOverflow{} (value too large for uint8)
+func ReadUint8Bytes(b []byte) (uint8, []byte, error) {
+ v, o, err := ReadUint64Bytes(b)
+ if v > math.MaxUint8 {
+ return 0, nil, UintOverflow{Value: v, FailedBitsize: 8}
+ }
+ return uint8(v), o, err
+}
+
+// ReadUintBytes tries to read a uint
+// from 'b' and return the value and the remaining bytes.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a uint)
+// - UintOverflow{} (value too large for uint; 32-bit platforms only)
+func ReadUintBytes(b []byte) (uint, []byte, error) {
+ if smallint {
+ u, b, err := ReadUint32Bytes(b)
+ return uint(u), b, err
+ }
+ u, b, err := ReadUint64Bytes(b)
+ return uint(u), b, err
+}
+
+// ReadByteBytes is analogous to ReadUint8Bytes
+func ReadByteBytes(b []byte) (byte, []byte, error) {
+ return ReadUint8Bytes(b)
+}
+
+// ReadBytesBytes reads a 'bin' object
+// from 'b' and returns its vaue and
+// the remaining bytes in 'b'.
+// Possible errors:
+// - ErrShortBytes (too few bytes)
+// - TypeError{} (not a 'bin' object)
+func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+ return readBytesBytes(b, scratch, false)
+}
+
+func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) {
+ l := len(b)
+ if l < 1 {
+ return nil, nil, ErrShortBytes
+ }
+
+ lead := b[0]
+ var read int
+ switch lead {
+ case mbin8:
+ if l < 2 {
+ err = ErrShortBytes
+ return
+ }
+
+ read = int(b[1])
+ b = b[2:]
+
+ case mbin16:
+ if l < 3 {
+ err = ErrShortBytes
+ return
+ }
+ read = int(big.Uint16(b[1:]))
+ b = b[3:]
+
+ case mbin32:
+ if l < 5 {
+ err = ErrShortBytes
+ return
+ }
+ read = int(big.Uint32(b[1:]))
+ b = b[5:]
+
+ default:
+ err = badPrefix(BinType, lead)
+ return
+ }
+
+ if len(b) < read {
+ err = ErrShortBytes
+ return
+ }
+
+ // zero-copy
+ if zc {
+ v = b[0:read]
+ o = b[read:]
+ return
+ }
+
+ if cap(scratch) >= read {
+ v = scratch[0:read]
+ } else {
+ v = make([]byte, read)
+ }
+
+ o = b[copy(v, b):]
+ return
+}
+
+// ReadBytesZC extracts the messagepack-encoded
+// binary field without copying. The returned []byte
+// points to the same memory as the input slice.
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (object not 'bin')
+func ReadBytesZC(b []byte) (v []byte, o []byte, err error) {
+ return readBytesBytes(b, nil, true)
+}
+
+func ReadExactBytes(b []byte, into []byte) (o []byte, err error) {
+ l := len(b)
+ if l < 1 {
+ err = ErrShortBytes
+ return
+ }
+
+ lead := b[0]
+ var read uint32
+ var skip int
+ switch lead {
+ case mbin8:
+ if l < 2 {
+ err = ErrShortBytes
+ return
+ }
+
+ read = uint32(b[1])
+ skip = 2
+
+ case mbin16:
+ if l < 3 {
+ err = ErrShortBytes
+ return
+ }
+ read = uint32(big.Uint16(b[1:]))
+ skip = 3
+
+ case mbin32:
+ if l < 5 {
+ err = ErrShortBytes
+ return
+ }
+ read = uint32(big.Uint32(b[1:]))
+ skip = 5
+
+ default:
+ err = badPrefix(BinType, lead)
+ return
+ }
+
+ if read != uint32(len(into)) {
+ err = ArrayError{Wanted: uint32(len(into)), Got: read}
+ return
+ }
+
+ o = b[skip+copy(into, b[skip:]):]
+ return
+}
+
+// ReadStringZC reads a messagepack string field
+// without copying. The returned []byte points
+// to the same memory as the input slice.
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (object not 'str')
+func ReadStringZC(b []byte) (v []byte, o []byte, err error) {
+ l := len(b)
+ if l < 1 {
+ return nil, nil, ErrShortBytes
+ }
+
+ lead := b[0]
+ var read int
+
+ if isfixstr(lead) {
+ read = int(rfixstr(lead))
+ b = b[1:]
+ } else {
+ switch lead {
+ case mstr8:
+ if l < 2 {
+ err = ErrShortBytes
+ return
+ }
+ read = int(b[1])
+ b = b[2:]
+
+ case mstr16:
+ if l < 3 {
+ err = ErrShortBytes
+ return
+ }
+ read = int(big.Uint16(b[1:]))
+ b = b[3:]
+
+ case mstr32:
+ if l < 5 {
+ err = ErrShortBytes
+ return
+ }
+ read = int(big.Uint32(b[1:]))
+ b = b[5:]
+
+ default:
+ err = TypeError{Method: StrType, Encoded: getType(lead)}
+ return
+ }
+ }
+
+ if len(b) < read {
+ err = ErrShortBytes
+ return
+ }
+
+ v = b[0:read]
+ o = b[read:]
+ return
+}
+
+// ReadStringBytes reads a 'str' object
+// from 'b' and returns its value and the
+// remaining bytes in 'b'.
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (not 'str' type)
+// - InvalidPrefixError
+func ReadStringBytes(b []byte) (string, []byte, error) {
+ v, o, err := ReadStringZC(b)
+ return string(v), o, err
+}
+
+// ReadStringAsBytes reads a 'str' object
+// into a slice of bytes. 'v' is the value of
+// the 'str' object, which may reside in memory
+// pointed to by 'scratch.' 'o' is the remaining bytes
+// in 'b.''
+// Possible errors:
+// - ErrShortBytes (b not long enough)
+// - TypeError{} (not 'str' type)
+// - InvalidPrefixError (unknown type marker)
+func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) {
+ var tmp []byte
+ tmp, o, err = ReadStringZC(b)
+ v = append(scratch[:0], tmp...)
+ return
+}
+
+// ReadComplex128Bytes reads a complex128
+// extension object from 'b' and returns the
+// remaining bytes.
+// Possible errors:
+// - ErrShortBytes (not enough bytes in 'b')
+// - TypeError{} (object not a complex128)
+// - InvalidPrefixError
+// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128)
+func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) {
+ if len(b) < 18 {
+ err = ErrShortBytes
+ return
+ }
+ if b[0] != mfixext16 {
+ err = badPrefix(Complex128Type, b[0])
+ return
+ }
+ if int8(b[1]) != Complex128Extension {
+ err = errExt(int8(b[1]), Complex128Extension)
+ return
+ }
+ c = complex(math.Float64frombits(big.Uint64(b[2:])),
+ math.Float64frombits(big.Uint64(b[10:])))
+ o = b[18:]
+ return
+}
+
+// ReadComplex64Bytes reads a complex64
+// extension object from 'b' and returns the
+// remaining bytes.
+// Possible errors:
+// - ErrShortBytes (not enough bytes in 'b')
+// - TypeError{} (object not a complex64)
+// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64)
+func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) {
+ if len(b) < 10 {
+ err = ErrShortBytes
+ return
+ }
+ if b[0] != mfixext8 {
+ err = badPrefix(Complex64Type, b[0])
+ return
+ }
+ if b[1] != Complex64Extension {
+ err = errExt(int8(b[1]), Complex64Extension)
+ return
+ }
+ c = complex(math.Float32frombits(big.Uint32(b[2:])),
+ math.Float32frombits(big.Uint32(b[6:])))
+ o = b[10:]
+ return
+}
+
+// ReadTimeBytes reads a time.Time
+// extension object from 'b' and returns the
+// remaining bytes.
+// Possible errors:
+// - ErrShortBytes (not enough bytes in 'b')
+// - TypeError{} (object not a complex64)
+// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time)
+func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) {
+ if len(b) < 15 {
+ err = ErrShortBytes
+ return
+ }
+ if b[0] != mext8 || b[1] != 12 {
+ err = badPrefix(TimeType, b[0])
+ return
+ }
+ if int8(b[2]) != TimeExtension {
+ err = errExt(int8(b[2]), TimeExtension)
+ return
+ }
+ sec, nsec := getUnix(b[3:])
+ t = time.Unix(sec, int64(nsec)).Local()
+ o = b[15:]
+ return
+}
+
+// ReadMapStrIntfBytes reads a map[string]interface{}
+// out of 'b' and returns the map and remaining bytes.
+// If 'old' is non-nil, the values will be read into that map.
+func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) {
+ var sz uint32
+ o = b
+ sz, o, err = ReadMapHeaderBytes(o)
+
+ if err != nil {
+ return
+ }
+
+ if old != nil {
+ for key := range old {
+ delete(old, key)
+ }
+ v = old
+ } else {
+ v = make(map[string]interface{}, int(sz))
+ }
+
+ for z := uint32(0); z < sz; z++ {
+ if len(o) < 1 {
+ err = ErrShortBytes
+ return
+ }
+ var key []byte
+ key, o, err = ReadMapKeyZC(o)
+ if err != nil {
+ return
+ }
+ var val interface{}
+ val, o, err = ReadIntfBytes(o)
+ if err != nil {
+ return
+ }
+ v[string(key)] = val
+ }
+ return
+}
+
+// ReadIntfBytes attempts to read
+// the next object out of 'b' as a raw interface{} and
+// return the remaining bytes.
+func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) {
+ if len(b) < 1 {
+ err = ErrShortBytes
+ return
+ }
+
+ k := NextType(b)
+
+ switch k {
+ case MapType:
+ i, o, err = ReadMapStrIntfBytes(b, nil)
+ return
+
+ case ArrayType:
+ var sz uint32
+ sz, o, err = ReadArrayHeaderBytes(b)
+ if err != nil {
+ return
+ }
+ j := make([]interface{}, int(sz))
+ i = j
+ for d := range j {
+ j[d], o, err = ReadIntfBytes(o)
+ if err != nil {
+ return
+ }
+ }
+ return
+
+ case Float32Type:
+ i, o, err = ReadFloat32Bytes(b)
+ return
+
+ case Float64Type:
+ i, o, err = ReadFloat64Bytes(b)
+ return
+
+ case IntType:
+ i, o, err = ReadInt64Bytes(b)
+ return
+
+ case UintType:
+ i, o, err = ReadUint64Bytes(b)
+ return
+
+ case BoolType:
+ i, o, err = ReadBoolBytes(b)
+ return
+
+ case TimeType:
+ i, o, err = ReadTimeBytes(b)
+ return
+
+ case Complex64Type:
+ i, o, err = ReadComplex64Bytes(b)
+ return
+
+ case Complex128Type:
+ i, o, err = ReadComplex128Bytes(b)
+ return
+
+ case ExtensionType:
+ var t int8
+ t, err = peekExtension(b)
+ if err != nil {
+ return
+ }
+ // use a user-defined extension,
+ // if it's been registered
+ f, ok := extensionReg[t]
+ if ok {
+ e := f()
+ o, err = ReadExtensionBytes(b, e)
+ i = e
+ return
+ }
+ // last resort is a raw extension
+ e := RawExtension{}
+ e.Type = int8(t)
+ o, err = ReadExtensionBytes(b, &e)
+ i = &e
+ return
+
+ case NilType:
+ o, err = ReadNilBytes(b)
+ return
+
+ case BinType:
+ i, o, err = ReadBytesBytes(b, nil)
+ return
+
+ case StrType:
+ i, o, err = ReadStringBytes(b)
+ return
+
+ default:
+ err = InvalidPrefixError(b[0])
+ return
+ }
+}
+
+// Skip skips the next object in 'b' and
+// returns the remaining bytes. If the object
+// is a map or array, all of its elements
+// will be skipped.
+// Possible Errors:
+// - ErrShortBytes (not enough bytes in b)
+// - InvalidPrefixError (bad encoding)
+func Skip(b []byte) ([]byte, error) {
+ sz, asz, err := getSize(b)
+ if err != nil {
+ return b, err
+ }
+ if uintptr(len(b)) < sz {
+ return b, ErrShortBytes
+ }
+ b = b[sz:]
+ for asz > 0 {
+ b, err = Skip(b)
+ if err != nil {
+ return b, err
+ }
+ asz--
+ }
+ return b, nil
+}
+
+// returns (skip N bytes, skip M objects, error)
+func getSize(b []byte) (uintptr, uintptr, error) {
+ l := len(b)
+ if l == 0 {
+ return 0, 0, ErrShortBytes
+ }
+ lead := b[0]
+ spec := &sizes[lead] // get type information
+ size, mode := spec.size, spec.extra
+ if size == 0 {
+ return 0, 0, InvalidPrefixError(lead)
+ }
+ if mode >= 0 { // fixed composites
+ return uintptr(size), uintptr(mode), nil
+ }
+ if l < int(size) {
+ return 0, 0, ErrShortBytes
+ }
+ switch mode {
+ case extra8:
+ return uintptr(size) + uintptr(b[1]), 0, nil
+ case extra16:
+ return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil
+ case extra32:
+ return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil
+ case map16v:
+ return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil
+ case map32v:
+ return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil
+ case array16v:
+ return uintptr(size), uintptr(big.Uint16(b[1:])), nil
+ case array32v:
+ return uintptr(size), uintptr(big.Uint32(b[1:])), nil
+ default:
+ return 0, 0, fatal
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go
new file mode 100644
index 00000000..0049471b
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go
@@ -0,0 +1,518 @@
+package msgp
+
+import (
+ "bytes"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestReadMapHeaderBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []uint32{0, 1, 5, 49082}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteMapHeader(v)
+ en.Flush()
+
+ out, left, err := ReadMapHeaderBytes(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+
+ if out != v {
+ t.Errorf("%d in; %d out", v, out)
+ }
+ }
+}
+
+func BenchmarkReadMapHeaderBytes(b *testing.B) {
+ sizes := []uint32{1, 100, tuint16, tuint32}
+ buf := make([]byte, 0, 5*len(sizes))
+ for _, sz := range sizes {
+ buf = AppendMapHeader(buf, sz)
+ }
+ b.SetBytes(int64(len(buf) / len(sizes)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ o := buf
+ for i := 0; i < b.N; i++ {
+ _, buf, _ = ReadMapHeaderBytes(buf)
+ if len(buf) == 0 {
+ buf = o
+ }
+ }
+}
+
+func TestReadArrayHeaderBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []uint32{0, 1, 5, 49082}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteArrayHeader(v)
+ en.Flush()
+
+ out, left, err := ReadArrayHeaderBytes(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+
+ if out != v {
+ t.Errorf("%d in; %d out", v, out)
+ }
+ }
+}
+
+func BenchmarkReadArrayHeaderBytes(b *testing.B) {
+ sizes := []uint32{1, 100, tuint16, tuint32}
+ buf := make([]byte, 0, 5*len(sizes))
+ for _, sz := range sizes {
+ buf = AppendArrayHeader(buf, sz)
+ }
+ b.SetBytes(int64(len(buf) / len(sizes)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ o := buf
+ for i := 0; i < b.N; i++ {
+ _, buf, _ = ReadArrayHeaderBytes(buf)
+ if len(buf) == 0 {
+ buf = o
+ }
+ }
+}
+
+func TestReadNilBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteNil()
+ en.Flush()
+
+ left, err := ReadNilBytes(buf.Bytes())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+}
+
+func BenchmarkReadNilByte(b *testing.B) {
+ buf := []byte{mnil}
+ b.SetBytes(1)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ReadNilBytes(buf)
+ }
+}
+
+func TestReadFloat64Bytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteFloat64(3.14159)
+ en.Flush()
+
+ out, left, err := ReadFloat64Bytes(buf.Bytes())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if out != 3.14159 {
+ t.Errorf("%f in; %f out", 3.14159, out)
+ }
+}
+
+func BenchmarkReadFloat64Bytes(b *testing.B) {
+ f := float64(3.14159)
+ buf := make([]byte, 0, 9)
+ buf = AppendFloat64(buf, f)
+ b.SetBytes(int64(len(buf)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ReadFloat64Bytes(buf)
+ }
+}
+
+func TestReadFloat32Bytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteFloat32(3.1)
+ en.Flush()
+
+ out, left, err := ReadFloat32Bytes(buf.Bytes())
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if out != 3.1 {
+ t.Errorf("%f in; %f out", 3.1, out)
+ }
+}
+
+func BenchmarkReadFloat32Bytes(b *testing.B) {
+ f := float32(3.14159)
+ buf := make([]byte, 0, 5)
+ buf = AppendFloat32(buf, f)
+ b.SetBytes(int64(len(buf)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ReadFloat32Bytes(buf)
+ }
+}
+
+func TestReadBoolBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []bool{true, false}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteBool(v)
+ en.Flush()
+ out, left, err := ReadBoolBytes(buf.Bytes())
+
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+
+ if out != v {
+ t.Errorf("%t in; %t out", v, out)
+ }
+ }
+}
+
+func BenchmarkReadBoolBytes(b *testing.B) {
+ buf := []byte{mtrue, mfalse, mtrue, mfalse}
+ b.SetBytes(1)
+ b.ReportAllocs()
+ b.ResetTimer()
+ o := buf
+ for i := 0; i < b.N; i++ {
+ _, buf, _ = ReadBoolBytes(buf)
+ if len(buf) == 0 {
+ buf = o
+ }
+ }
+}
+
+func TestReadInt64Bytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []int64{-5, -30, 0, 1, 127, 300, 40921, 34908219}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteInt64(v)
+ en.Flush()
+ out, left, err := ReadInt64Bytes(buf.Bytes())
+
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+
+ if out != v {
+ t.Errorf("%d in; %d out", v, out)
+ }
+ }
+}
+
+func TestReadUint64Bytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []uint64{0, 1, 127, 300, 40921, 34908219}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteUint64(v)
+ en.Flush()
+ out, left, err := ReadUint64Bytes(buf.Bytes())
+
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+
+ if out != v {
+ t.Errorf("%d in; %d out", v, out)
+ }
+ }
+}
+
+func TestReadBytesBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")}
+ var scratch []byte
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteBytes(v)
+ en.Flush()
+ out, left, err := ReadBytesBytes(buf.Bytes(), scratch)
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if !bytes.Equal(out, v) {
+ t.Errorf("%q in; %q out", v, out)
+ }
+ }
+}
+
+func TestReadZCBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteBytes(v)
+ en.Flush()
+ out, left, err := ReadBytesZC(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if !bytes.Equal(out, v) {
+ t.Errorf("%q in; %q out", v, out)
+ }
+ }
+}
+
+func TestReadZCString(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []string{"", "hello", "here's another string......"}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteString(v)
+ en.Flush()
+
+ out, left, err := ReadStringZC(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if string(out) != v {
+ t.Errorf("%q in; %q out", v, out)
+ }
+ }
+}
+
+func TestReadStringBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []string{"", "hello", "here's another string......"}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteString(v)
+ en.Flush()
+
+ out, left, err := ReadStringBytes(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if out != v {
+ t.Errorf("%q in; %q out", v, out)
+ }
+ }
+}
+
+func TestReadComplex128Bytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []complex128{complex(0, 0), complex(12.8, 32.0)}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteComplex128(v)
+ en.Flush()
+
+ out, left, err := ReadComplex128Bytes(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if out != v {
+ t.Errorf("%f in; %f out", v, out)
+ }
+ }
+}
+
+func TestReadComplex64Bytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := []complex64{complex(0, 0), complex(12.8, 32.0)}
+
+ for i, v := range tests {
+ buf.Reset()
+ en.WriteComplex64(v)
+ en.Flush()
+
+ out, left, err := ReadComplex64Bytes(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if out != v {
+ t.Errorf("%f in; %f out", v, out)
+ }
+ }
+}
+
+func TestReadTimeBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ now := time.Now()
+ en.WriteTime(now)
+ en.Flush()
+ out, left, err := ReadTimeBytes(buf.Bytes())
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if !now.Equal(out) {
+ t.Errorf("%s in; %s out", now, out)
+ }
+}
+
+func BenchmarkReadTimeBytes(b *testing.B) {
+ data := AppendTime(nil, time.Now())
+ b.SetBytes(15)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ ReadTimeBytes(data)
+ }
+}
+
+func TestReadIntfBytes(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ tests := make([]interface{}, 0, 10)
+ tests = append(tests, float64(3.5))
+ tests = append(tests, int64(-49082))
+ tests = append(tests, uint64(34908))
+ tests = append(tests, string("hello!"))
+ tests = append(tests, []byte("blah."))
+ tests = append(tests, map[string]interface{}{
+ "key_one": 3.5,
+ "key_two": "hi.",
+ })
+
+ for i, v := range tests {
+ buf.Reset()
+ if err := en.WriteIntf(v); err != nil {
+ t.Fatal(err)
+ }
+ en.Flush()
+
+ out, left, err := ReadIntfBytes(buf.Bytes())
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if len(left) != 0 {
+ t.Errorf("expected 0 bytes left; found %d", len(left))
+ }
+ if !reflect.DeepEqual(v, out) {
+ t.Errorf("ReadIntf(): %v in; %v out", v, out)
+ }
+ }
+
+}
+
+func BenchmarkSkipBytes(b *testing.B) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteMapHeader(6)
+
+ en.WriteString("thing_one")
+ en.WriteString("value_one")
+
+ en.WriteString("thing_two")
+ en.WriteFloat64(3.14159)
+
+ en.WriteString("some_bytes")
+ en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
+
+ en.WriteString("the_time")
+ en.WriteTime(time.Now())
+
+ en.WriteString("what?")
+ en.WriteBool(true)
+
+ en.WriteString("ext")
+ en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
+ en.Flush()
+
+ bts := buf.Bytes()
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := Skip(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/read_test.go b/vendor/github.com/tinylib/msgp/msgp/read_test.go
new file mode 100644
index 00000000..8e781c10
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/read_test.go
@@ -0,0 +1,770 @@
+package msgp
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "math/rand"
+ "reflect"
+ "testing"
+ "time"
+)
+
+func TestSanity(t *testing.T) {
+ if !isfixint(0) {
+ t.Fatal("WUT.")
+ }
+}
+
+func TestReadIntf(t *testing.T) {
+ // NOTE: if you include cases
+ // with, say, int32s, the test
+ // will fail, b/c integers are
+ // always read out as int64, and
+ // unsigned integers as uint64
+
+ var testCases = []interface{}{
+ float64(128.032),
+ float32(9082.092),
+ int64(-40),
+ uint64(9082981),
+ time.Now(),
+ "hello!",
+ []byte("hello!"),
+ map[string]interface{}{
+ "thing-1": "thing-1-value",
+ "thing-2": int64(800),
+ "thing-3": []byte("some inner bytes..."),
+ "thing-4": false,
+ },
+ }
+
+ var buf bytes.Buffer
+ var v interface{}
+ dec := NewReader(&buf)
+ enc := NewWriter(&buf)
+
+ for i, ts := range testCases {
+ buf.Reset()
+ err := enc.WriteIntf(ts)
+ if err != nil {
+ t.Errorf("Test case %d: %s", i, err)
+ continue
+ }
+ err = enc.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ v, err = dec.ReadIntf()
+ if err != nil {
+ t.Errorf("Test case: %d: %s", i, err)
+ }
+
+ /* for time, use time.Equal instead of reflect.DeepEqual */
+ if tm, ok := v.(time.Time); ok {
+ if !tm.Equal(v.(time.Time)) {
+ t.Errorf("%v != %v", ts, v)
+ }
+ } else if !reflect.DeepEqual(v, ts) {
+ t.Errorf("%v in; %v out", ts, v)
+ }
+ }
+
+}
+
+func TestReadMapHeader(t *testing.T) {
+ tests := []struct {
+ Sz uint32
+ }{
+ {0},
+ {1},
+ {tuint16},
+ {tuint32},
+ }
+
+ var buf bytes.Buffer
+ var sz uint32
+ var err error
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+ for i, test := range tests {
+ buf.Reset()
+ err = wr.WriteMapHeader(test.Sz)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sz, err = rd.ReadMapHeader()
+ if err != nil {
+ t.Errorf("Test case %d: got error %s", i, err)
+ }
+ if sz != test.Sz {
+ t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
+ }
+ }
+}
+
+func BenchmarkReadMapHeader(b *testing.B) {
+ sizes := []uint32{0, 1, tuint16, tuint32}
+ data := make([]byte, 0, len(sizes)*5)
+ for _, d := range sizes {
+ data = AppendMapHeader(data, d)
+ }
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data) / len(sizes)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ rd.ReadMapHeader()
+ }
+}
+
+func TestReadArrayHeader(t *testing.T) {
+ tests := []struct {
+ Sz uint32
+ }{
+ {0},
+ {1},
+ {tuint16},
+ {tuint32},
+ }
+
+ var buf bytes.Buffer
+ var sz uint32
+ var err error
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+ for i, test := range tests {
+ buf.Reset()
+ err = wr.WriteArrayHeader(test.Sz)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ sz, err = rd.ReadArrayHeader()
+ if err != nil {
+ t.Errorf("Test case %d: got error %s", i, err)
+ }
+ if sz != test.Sz {
+ t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
+ }
+ }
+}
+
+func BenchmarkReadArrayHeader(b *testing.B) {
+ sizes := []uint32{0, 1, tuint16, tuint32}
+ data := make([]byte, 0, len(sizes)*5)
+ for _, d := range sizes {
+ data = AppendArrayHeader(data, d)
+ }
+ rd := NewReader(NewEndlessReader(data, b))
+ b.ReportAllocs()
+ b.SetBytes(int64(len(data) / len(sizes)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ rd.ReadArrayHeader()
+ }
+}
+
+func TestReadNil(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ wr.WriteNil()
+ wr.Flush()
+ err := rd.ReadNil()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func BenchmarkReadNil(b *testing.B) {
+ data := AppendNil(nil)
+ rd := NewReader(NewEndlessReader(data, b))
+ b.ReportAllocs()
+ b.SetBytes(1)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ err := rd.ReadNil()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestReadFloat64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ for i := 0; i < 100; i++ {
+ buf.Reset()
+
+ flt := (rand.Float64() - 0.5) * math.MaxFloat64
+ err := wr.WriteFloat64(flt)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ out, err := rd.ReadFloat64()
+ if err != nil {
+ t.Errorf("Error reading %f: %s", flt, err)
+ continue
+ }
+
+ if out != flt {
+ t.Errorf("Put in %f but got out %f", flt, out)
+ }
+ }
+}
+
+func BenchmarkReadFloat64(b *testing.B) {
+ fs := []float64{rand.Float64(), rand.Float64(), rand.Float64(), rand.Float64()}
+ data := make([]byte, 0, 9*len(fs))
+ for _, f := range fs {
+ data = AppendFloat64(data, f)
+ }
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadFloat64()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestReadFloat32(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ for i := 0; i < 10000; i++ {
+ buf.Reset()
+
+ flt := (rand.Float32() - 0.5) * math.MaxFloat32
+ err := wr.WriteFloat32(flt)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ out, err := rd.ReadFloat32()
+ if err != nil {
+ t.Errorf("Error reading %f: %s", flt, err)
+ continue
+ }
+
+ if out != flt {
+ t.Errorf("Put in %f but got out %f", flt, out)
+ }
+ }
+}
+
+func BenchmarkReadFloat32(b *testing.B) {
+ fs := []float32{rand.Float32(), rand.Float32(), rand.Float32(), rand.Float32()}
+ data := make([]byte, 0, 5*len(fs))
+ for _, f := range fs {
+ data = AppendFloat32(data, f)
+ }
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(5)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadFloat32()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestReadInt64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ ints := []int64{-100000, -5000, -5, 0, 8, 240, int64(tuint16), int64(tuint32), int64(tuint64)}
+
+ for i, num := range ints {
+ buf.Reset()
+
+ err := wr.WriteInt64(num)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ out, err := rd.ReadInt64()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if out != num {
+ t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
+ }
+ }
+}
+
+func BenchmarkReadInt64(b *testing.B) {
+ is := []int64{0, 1, 65000, rand.Int63()}
+ data := make([]byte, 0, 9*len(is))
+ for _, n := range is {
+ data = AppendInt64(data, n)
+ }
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data) / len(is)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadInt64()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestReadUint64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ ints := []uint64{0, 8, 240, uint64(tuint16), uint64(tuint32), uint64(tuint64)}
+
+ for i, num := range ints {
+ buf.Reset()
+
+ err := wr.WriteUint64(num)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ out, err := rd.ReadUint64()
+ if out != num {
+ t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
+ }
+ }
+}
+
+func BenchmarkReadUint64(b *testing.B) {
+ us := []uint64{0, 1, 10000, uint64(rand.Uint32() * 4)}
+ data := make([]byte, 0, 9*len(us))
+ for _, n := range us {
+ data = AppendUint64(data, n)
+ }
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data) / len(us)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadUint64()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestReadBytes(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ sizes := []int{0, 1, 225, int(tuint32)}
+ var scratch []byte
+ for i, size := range sizes {
+ buf.Reset()
+ bts := RandBytes(size)
+
+ err := wr.WriteBytes(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := rd.ReadBytes(scratch)
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ continue
+ }
+
+ if !bytes.Equal(bts, out) {
+ t.Errorf("test case %d: Bytes not equal.", i)
+ }
+
+ }
+}
+
+func benchBytes(size uint32, b *testing.B) {
+ data := make([]byte, 0, size+5)
+ data = AppendBytes(data, RandBytes(int(size)))
+
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ var scratch []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ scratch, err = rd.ReadBytes(scratch)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkRead16Bytes(b *testing.B) {
+ benchBytes(16, b)
+}
+
+func BenchmarkRead256Bytes(b *testing.B) {
+ benchBytes(256, b)
+}
+
+// This particular case creates
+// an object larger than the default
+// read buffer size, so it's a decent
+// indicator of worst-case performance.
+func BenchmarkRead2048Bytes(b *testing.B) {
+ benchBytes(2048, b)
+}
+
+func TestReadString(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ sizes := []int{0, 1, 225, int(math.MaxUint16 + 5)}
+ for i, size := range sizes {
+ buf.Reset()
+ in := string(RandBytes(size))
+
+ err := wr.WriteString(in)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := rd.ReadString()
+ if err != nil {
+ t.Errorf("test case %d: %s", i, err)
+ }
+ if out != in {
+ t.Errorf("test case %d: strings not equal.", i)
+ t.Errorf("string (len = %d) in; string (len = %d) out", size, len(out))
+ }
+
+ }
+}
+
+func benchString(size uint32, b *testing.B) {
+ str := string(RandBytes(int(size)))
+ data := make([]byte, 0, len(str)+5)
+ data = AppendString(data, str)
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadString()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func benchStringAsBytes(size uint32, b *testing.B) {
+ str := string(RandBytes(int(size)))
+ data := make([]byte, 0, len(str)+5)
+ data = AppendString(data, str)
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ var scratch []byte
+ var err error
+ for i := 0; i < b.N; i++ {
+ scratch, err = rd.ReadStringAsBytes(scratch)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func BenchmarkRead16StringAsBytes(b *testing.B) {
+ benchStringAsBytes(16, b)
+}
+
+func BenchmarkRead256StringAsBytes(b *testing.B) {
+ benchStringAsBytes(256, b)
+}
+
+func BenchmarkRead16String(b *testing.B) {
+ benchString(16, b)
+}
+
+func BenchmarkRead256String(b *testing.B) {
+ benchString(256, b)
+}
+
+func TestReadComplex64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ for i := 0; i < 100; i++ {
+ buf.Reset()
+ f := complex(rand.Float32()*math.MaxFloat32, rand.Float32()*math.MaxFloat32)
+
+ wr.WriteComplex64(f)
+ err := wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := rd.ReadComplex64()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ if out != f {
+ t.Errorf("Wrote %f; read %f", f, out)
+ }
+
+ }
+}
+
+func BenchmarkReadComplex64(b *testing.B) {
+ f := complex(rand.Float32(), rand.Float32())
+ data := AppendComplex64(nil, f)
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadComplex64()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestReadComplex128(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ for i := 0; i < 10; i++ {
+ buf.Reset()
+ f := complex(rand.Float64()*math.MaxFloat64, rand.Float64()*math.MaxFloat64)
+
+ wr.WriteComplex128(f)
+ err := wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := rd.ReadComplex128()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if out != f {
+ t.Errorf("Wrote %f; read %f", f, out)
+ }
+
+ }
+}
+
+func BenchmarkReadComplex128(b *testing.B) {
+ f := complex(rand.Float64(), rand.Float64())
+ data := AppendComplex128(nil, f)
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadComplex128()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestTime(t *testing.T) {
+ var buf bytes.Buffer
+ now := time.Now()
+ en := NewWriter(&buf)
+ dc := NewReader(&buf)
+
+ err := en.WriteTime(now)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = en.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ out, err := dc.ReadTime()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check for equivalence
+ if !now.Equal(out) {
+ t.Fatalf("%s in; %s out", now, out)
+ }
+}
+
+func BenchmarkReadTime(b *testing.B) {
+ t := time.Now()
+ data := AppendTime(nil, t)
+ rd := NewReader(NewEndlessReader(data, b))
+ b.SetBytes(int64(len(data)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := rd.ReadTime()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestSkip(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ rd := NewReader(&buf)
+
+ wr.WriteMapHeader(4)
+ wr.WriteString("key_1")
+ wr.WriteBytes([]byte("value_1"))
+ wr.WriteString("key_2")
+ wr.WriteFloat64(2.0)
+ wr.WriteString("key_3")
+ wr.WriteComplex128(3.0i)
+ wr.WriteString("key_4")
+ wr.WriteInt64(49080432189)
+ wr.Flush()
+
+ // this should skip the whole map
+ err := rd.Skip()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tp, err := rd.NextType()
+ if err != io.EOF {
+ t.Errorf("expected %q; got %q", io.EOF, err)
+ t.Errorf("returned type %q", tp)
+ }
+
+}
+
+func BenchmarkSkip(b *testing.B) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ en.WriteMapHeader(6)
+
+ en.WriteString("thing_one")
+ en.WriteString("value_one")
+
+ en.WriteString("thing_two")
+ en.WriteFloat64(3.14159)
+
+ en.WriteString("some_bytes")
+ en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
+
+ en.WriteString("the_time")
+ en.WriteTime(time.Now())
+
+ en.WriteString("what?")
+ en.WriteBool(true)
+
+ en.WriteString("ext")
+ en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
+ en.Flush()
+
+ bts := buf.Bytes()
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ rd := NewReader(NewEndlessReader(bts, b))
+ for i := 0; i < b.N; i++ {
+ err := rd.Skip()
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestCopyNext(t *testing.T) {
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ en.WriteMapHeader(6)
+
+ en.WriteString("thing_one")
+ en.WriteString("value_one")
+
+ en.WriteString("thing_two")
+ en.WriteFloat64(3.14159)
+
+ en.WriteString("some_bytes")
+ en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
+
+ en.WriteString("the_time")
+ en.WriteTime(time.Now())
+
+ en.WriteString("what?")
+ en.WriteBool(true)
+
+ en.WriteString("ext")
+ en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
+
+ en.Flush()
+
+ // Read from a copy of the original buf.
+ de := NewReader(bytes.NewReader(buf.Bytes()))
+
+ w := new(bytes.Buffer)
+
+ n, err := de.CopyNext(w)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if n != int64(buf.Len()) {
+ t.Fatalf("CopyNext returned the wrong value (%d != %d)",
+ n, buf.Len())
+ }
+
+ if !bytes.Equal(buf.Bytes(), w.Bytes()) {
+ t.Fatalf("not equal! %v, %v", buf.Bytes(), w.Bytes())
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go
new file mode 100644
index 00000000..ce2f8b16
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/size.go
@@ -0,0 +1,38 @@
+package msgp
+
+// The sizes provided
+// are the worst-case
+// encoded sizes for
+// each type. For variable-
+// length types ([]byte, string),
+// the total encoded size is
+// the prefix size plus the
+// length of the object.
+const (
+ Int64Size = 9
+ IntSize = Int64Size
+ UintSize = Int64Size
+ Int8Size = 2
+ Int16Size = 3
+ Int32Size = 5
+ Uint8Size = 2
+ ByteSize = Uint8Size
+ Uint16Size = 3
+ Uint32Size = 5
+ Uint64Size = Int64Size
+ Float64Size = 9
+ Float32Size = 5
+ Complex64Size = 10
+ Complex128Size = 18
+
+ TimeSize = 15
+ BoolSize = 1
+ NilSize = 1
+
+ MapHeaderSize = 5
+ ArrayHeaderSize = 5
+
+ BytesPrefixSize = 5
+ StringPrefixSize = 5
+ ExtensionPrefixSize = 6
+)
diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
new file mode 100644
index 00000000..0cb972e3
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go
@@ -0,0 +1,40 @@
+// +build !appengine
+
+package msgp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE:
+// all of the definition in this file
+// should be repeated in appengine.go,
+// but without using unsafe
+
+const (
+ // spec says int and uint are always
+ // the same size, but that int/uint
+ // size may not be machine word size
+ smallint = unsafe.Sizeof(int(0)) == 4
+)
+
+// UnsafeString returns the byte slice as a volatile string
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)}))
+}
+
+// UnsafeBytes returns the string as a byte slice
+// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
+// THIS IS EVIL CODE.
+// YOU HAVE BEEN WARNED.
+func UnsafeBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
+ Len: len(s),
+ Cap: len(s),
+ Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
+ }))
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go
new file mode 100644
index 00000000..da9099c2
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/write.go
@@ -0,0 +1,845 @@
+package msgp
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sync"
+ "time"
+)
+
+// Sizer is an interface implemented
+// by types that can estimate their
+// size when MessagePack encoded.
+// This interface is optional, but
+// encoding/marshaling implementations
+// may use this as a way to pre-allocate
+// memory for serialization.
+type Sizer interface {
+ Msgsize() int
+}
+
+var (
+ // Nowhere is an io.Writer to nowhere
+ Nowhere io.Writer = nwhere{}
+
+ btsType = reflect.TypeOf(([]byte)(nil))
+ writerPool = sync.Pool{
+ New: func() interface{} {
+ return &Writer{buf: make([]byte, 2048)}
+ },
+ }
+)
+
+func popWriter(w io.Writer) *Writer {
+ wr := writerPool.Get().(*Writer)
+ wr.Reset(w)
+ return wr
+}
+
+func pushWriter(wr *Writer) {
+ wr.w = nil
+ wr.wloc = 0
+ writerPool.Put(wr)
+}
+
+// freeW frees a writer for use
+// by other processes. It is not necessary
+// to call freeW on a writer. However, maintaining
+// a reference to a *Writer after calling freeW on
+// it will cause undefined behavior.
+func freeW(w *Writer) { pushWriter(w) }
+
+// Require ensures that cap(old)-len(old) >= extra.
+func Require(old []byte, extra int) []byte {
+ l := len(old)
+ c := cap(old)
+ r := l + extra
+ if c >= r {
+ return old
+ } else if l == 0 {
+ return make([]byte, 0, extra)
+ }
+ // the new size is the greater
+ // of double the old capacity
+ // and the sum of the old length
+ // and the number of new bytes
+ // necessary.
+ c <<= 1
+ if c < r {
+ c = r
+ }
+ n := make([]byte, l, c)
+ copy(n, old)
+ return n
+}
+
+// nowhere writer
+type nwhere struct{}
+
+func (n nwhere) Write(p []byte) (int, error) { return len(p), nil }
+
+// Marshaler is the interface implemented
+// by types that know how to marshal themselves
+// as MessagePack. MarshalMsg appends the marshalled
+// form of the object to the provided
+// byte slice, returning the extended
+// slice and any errors encountered.
+type Marshaler interface {
+ MarshalMsg([]byte) ([]byte, error)
+}
+
+// Encodable is the interface implemented
+// by types that know how to write themselves
+// as MessagePack using a *msgp.Writer.
+type Encodable interface {
+ EncodeMsg(*Writer) error
+}
+
+// Writer is a buffered writer
+// that can be used to write
+// MessagePack objects to an io.Writer.
+// You must call *Writer.Flush() in order
+// to flush all of the buffered data
+// to the underlying writer.
+type Writer struct {
+ w io.Writer
+ buf []byte
+ wloc int
+}
+
+// NewWriter returns a new *Writer.
+func NewWriter(w io.Writer) *Writer {
+ if wr, ok := w.(*Writer); ok {
+ return wr
+ }
+ return popWriter(w)
+}
+
+// NewWriterSize returns a writer with a custom buffer size.
+func NewWriterSize(w io.Writer, sz int) *Writer {
+ // we must be able to require() 18
+ // contiguous bytes, so that is the
+ // practical minimum buffer size
+ if sz < 18 {
+ sz = 18
+ }
+
+ return &Writer{
+ w: w,
+ buf: make([]byte, sz),
+ }
+}
+
+// Encode encodes an Encodable to an io.Writer.
+func Encode(w io.Writer, e Encodable) error {
+ wr := NewWriter(w)
+ err := e.EncodeMsg(wr)
+ if err == nil {
+ err = wr.Flush()
+ }
+ freeW(wr)
+ return err
+}
+
+func (mw *Writer) flush() error {
+ if mw.wloc == 0 {
+ return nil
+ }
+ n, err := mw.w.Write(mw.buf[:mw.wloc])
+ if err != nil {
+ if n > 0 {
+ mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc])
+ }
+ return err
+ }
+ mw.wloc = 0
+ return nil
+}
+
+// Flush flushes all of the buffered
+// data to the underlying writer.
+func (mw *Writer) Flush() error { return mw.flush() }
+
+// Buffered returns the number bytes in the write buffer
+func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc }
+
+func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc }
+
+func (mw *Writer) bufsize() int { return len(mw.buf) }
+
+// NOTE: this should only be called with
+// a number that is guaranteed to be less than
+// len(mw.buf). typically, it is called with a constant.
+//
+// NOTE: this is a hot code path
+func (mw *Writer) require(n int) (int, error) {
+ c := len(mw.buf)
+ wl := mw.wloc
+ if c-wl < n {
+ if err := mw.flush(); err != nil {
+ return 0, err
+ }
+ wl = mw.wloc
+ }
+ mw.wloc += n
+ return wl, nil
+}
+
+func (mw *Writer) Append(b ...byte) error {
+ if mw.avail() < len(b) {
+ err := mw.flush()
+ if err != nil {
+ return err
+ }
+ }
+ mw.wloc += copy(mw.buf[mw.wloc:], b)
+ return nil
+}
+
+// push one byte onto the buffer
+//
+// NOTE: this is a hot code path
+func (mw *Writer) push(b byte) error {
+ if mw.wloc == len(mw.buf) {
+ if err := mw.flush(); err != nil {
+ return err
+ }
+ }
+ mw.buf[mw.wloc] = b
+ mw.wloc++
+ return nil
+}
+
+func (mw *Writer) prefix8(b byte, u uint8) error {
+ const need = 2
+ if len(mw.buf)-mw.wloc < need {
+ if err := mw.flush(); err != nil {
+ return err
+ }
+ }
+ prefixu8(mw.buf[mw.wloc:], b, u)
+ mw.wloc += need
+ return nil
+}
+
+func (mw *Writer) prefix16(b byte, u uint16) error {
+ const need = 3
+ if len(mw.buf)-mw.wloc < need {
+ if err := mw.flush(); err != nil {
+ return err
+ }
+ }
+ prefixu16(mw.buf[mw.wloc:], b, u)
+ mw.wloc += need
+ return nil
+}
+
+func (mw *Writer) prefix32(b byte, u uint32) error {
+ const need = 5
+ if len(mw.buf)-mw.wloc < need {
+ if err := mw.flush(); err != nil {
+ return err
+ }
+ }
+ prefixu32(mw.buf[mw.wloc:], b, u)
+ mw.wloc += need
+ return nil
+}
+
+func (mw *Writer) prefix64(b byte, u uint64) error {
+ const need = 9
+ if len(mw.buf)-mw.wloc < need {
+ if err := mw.flush(); err != nil {
+ return err
+ }
+ }
+ prefixu64(mw.buf[mw.wloc:], b, u)
+ mw.wloc += need
+ return nil
+}
+
+// Write implements io.Writer, and writes
+// data directly to the buffer.
+func (mw *Writer) Write(p []byte) (int, error) {
+ l := len(p)
+ if mw.avail() < l {
+ if err := mw.flush(); err != nil {
+ return 0, err
+ }
+ if l > len(mw.buf) {
+ return mw.w.Write(p)
+ }
+ }
+ mw.wloc += copy(mw.buf[mw.wloc:], p)
+ return l, nil
+}
+
+// implements io.WriteString
+func (mw *Writer) writeString(s string) error {
+ l := len(s)
+ if mw.avail() < l {
+ if err := mw.flush(); err != nil {
+ return err
+ }
+ if l > len(mw.buf) {
+ _, err := io.WriteString(mw.w, s)
+ return err
+ }
+ }
+ mw.wloc += copy(mw.buf[mw.wloc:], s)
+ return nil
+}
+
+// Reset changes the underlying writer used by the Writer
+func (mw *Writer) Reset(w io.Writer) {
+ mw.buf = mw.buf[:cap(mw.buf)]
+ mw.w = w
+ mw.wloc = 0
+}
+
+// WriteMapHeader writes a map header of the given
+// size to the writer
+func (mw *Writer) WriteMapHeader(sz uint32) error {
+ switch {
+ case sz <= 15:
+ return mw.push(wfixmap(uint8(sz)))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mmap16, uint16(sz))
+ default:
+ return mw.prefix32(mmap32, sz)
+ }
+}
+
+// WriteArrayHeader writes an array header of the
+// given size to the writer
+func (mw *Writer) WriteArrayHeader(sz uint32) error {
+ switch {
+ case sz <= 15:
+ return mw.push(wfixarray(uint8(sz)))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(marray16, uint16(sz))
+ default:
+ return mw.prefix32(marray32, sz)
+ }
+}
+
+// WriteNil writes a nil byte to the buffer
+func (mw *Writer) WriteNil() error {
+ return mw.push(mnil)
+}
+
+// WriteFloat64 writes a float64 to the writer
+func (mw *Writer) WriteFloat64(f float64) error {
+ return mw.prefix64(mfloat64, math.Float64bits(f))
+}
+
+// WriteFloat32 writes a float32 to the writer
+func (mw *Writer) WriteFloat32(f float32) error {
+ return mw.prefix32(mfloat32, math.Float32bits(f))
+}
+
+// WriteInt64 writes an int64 to the writer
+func (mw *Writer) WriteInt64(i int64) error {
+ if i >= 0 {
+ switch {
+ case i <= math.MaxInt8:
+ return mw.push(wfixint(uint8(i)))
+ case i <= math.MaxInt16:
+ return mw.prefix16(mint16, uint16(i))
+ case i <= math.MaxInt32:
+ return mw.prefix32(mint32, uint32(i))
+ default:
+ return mw.prefix64(mint64, uint64(i))
+ }
+ }
+ switch {
+ case i >= -32:
+ return mw.push(wnfixint(int8(i)))
+ case i >= math.MinInt8:
+ return mw.prefix8(mint8, uint8(i))
+ case i >= math.MinInt16:
+ return mw.prefix16(mint16, uint16(i))
+ case i >= math.MinInt32:
+ return mw.prefix32(mint32, uint32(i))
+ default:
+ return mw.prefix64(mint64, uint64(i))
+ }
+}
+
+// WriteInt8 writes an int8 to the writer
+func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) }
+
+// WriteInt16 writes an int16 to the writer
+func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) }
+
+// WriteInt32 writes an int32 to the writer
+func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) }
+
+// WriteInt writes an int to the writer
+func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) }
+
+// WriteUint64 writes a uint64 to the writer
+func (mw *Writer) WriteUint64(u uint64) error {
+ switch {
+ case u <= (1<<7)-1:
+ return mw.push(wfixint(uint8(u)))
+ case u <= math.MaxUint8:
+ return mw.prefix8(muint8, uint8(u))
+ case u <= math.MaxUint16:
+ return mw.prefix16(muint16, uint16(u))
+ case u <= math.MaxUint32:
+ return mw.prefix32(muint32, uint32(u))
+ default:
+ return mw.prefix64(muint64, u)
+ }
+}
+
+// WriteByte is analogous to WriteUint8
+func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
+
+// WriteUint8 writes a uint8 to the writer
+func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteUint16 writes a uint16 to the writer
+func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteUint32 writes a uint32 to the writer
+func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteUint writes a uint to the writer
+func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) }
+
+// WriteBytes writes binary as 'bin' to the writer
+func (mw *Writer) WriteBytes(b []byte) error {
+ sz := uint32(len(b))
+ var err error
+ switch {
+ case sz <= math.MaxUint8:
+ err = mw.prefix8(mbin8, uint8(sz))
+ case sz <= math.MaxUint16:
+ err = mw.prefix16(mbin16, uint16(sz))
+ default:
+ err = mw.prefix32(mbin32, sz)
+ }
+ if err != nil {
+ return err
+ }
+ _, err = mw.Write(b)
+ return err
+}
+
+// WriteBytesHeader writes just the size header
+// of a MessagePack 'bin' object. The user is responsible
+// for then writing 'sz' more bytes into the stream.
+func (mw *Writer) WriteBytesHeader(sz uint32) error {
+ switch {
+ case sz <= math.MaxUint8:
+ return mw.prefix8(mbin8, uint8(sz))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mbin16, uint16(sz))
+ default:
+ return mw.prefix32(mbin32, sz)
+ }
+}
+
+// WriteBool writes a bool to the writer
+func (mw *Writer) WriteBool(b bool) error {
+ if b {
+ return mw.push(mtrue)
+ }
+ return mw.push(mfalse)
+}
+
+// WriteString writes a messagepack string to the writer.
+// (This is NOT an implementation of io.StringWriter)
+func (mw *Writer) WriteString(s string) error {
+ sz := uint32(len(s))
+ var err error
+ switch {
+ case sz <= 31:
+ err = mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ err = mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ err = mw.prefix16(mstr16, uint16(sz))
+ default:
+ err = mw.prefix32(mstr32, sz)
+ }
+ if err != nil {
+ return err
+ }
+ return mw.writeString(s)
+}
+
+// WriteStringHeader writes just the string size
+// header of a MessagePack 'str' object. The user
+// is responsible for writing 'sz' more valid UTF-8
+// bytes to the stream.
+func (mw *Writer) WriteStringHeader(sz uint32) error {
+ switch {
+ case sz <= 31:
+ return mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ return mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ return mw.prefix16(mstr16, uint16(sz))
+ default:
+ return mw.prefix32(mstr32, sz)
+ }
+}
+
+// WriteStringFromBytes writes a 'str' object
+// from a []byte.
+func (mw *Writer) WriteStringFromBytes(str []byte) error {
+ sz := uint32(len(str))
+ var err error
+ switch {
+ case sz <= 31:
+ err = mw.push(wfixstr(uint8(sz)))
+ case sz <= math.MaxUint8:
+ err = mw.prefix8(mstr8, uint8(sz))
+ case sz <= math.MaxUint16:
+ err = mw.prefix16(mstr16, uint16(sz))
+ default:
+ err = mw.prefix32(mstr32, sz)
+ }
+ if err != nil {
+ return err
+ }
+ _, err = mw.Write(str)
+ return err
+}
+
+// WriteComplex64 writes a complex64 to the writer
+func (mw *Writer) WriteComplex64(f complex64) error {
+ o, err := mw.require(10)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext8
+ mw.buf[o+1] = Complex64Extension
+ big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f)))
+ big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f)))
+ return nil
+}
+
+// WriteComplex128 writes a complex128 to the writer
+func (mw *Writer) WriteComplex128(f complex128) error {
+ o, err := mw.require(18)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mfixext16
+ mw.buf[o+1] = Complex128Extension
+ big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f)))
+ big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f)))
+ return nil
+}
+
+// WriteMapStrStr writes a map[string]string to the writer
+func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) {
+ err = mw.WriteMapHeader(uint32(len(mp)))
+ if err != nil {
+ return
+ }
+ for key, val := range mp {
+ err = mw.WriteString(key)
+ if err != nil {
+ return
+ }
+ err = mw.WriteString(val)
+ if err != nil {
+ return
+ }
+ }
+ return nil
+}
+
+// WriteMapStrIntf writes a map[string]interface to the writer
+func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) {
+ err = mw.WriteMapHeader(uint32(len(mp)))
+ if err != nil {
+ return
+ }
+ for key, val := range mp {
+ err = mw.WriteString(key)
+ if err != nil {
+ return
+ }
+ err = mw.WriteIntf(val)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// WriteTime writes a time.Time object to the wire.
+//
+// Time is encoded as Unix time, which means that
+// location (time zone) data is removed from the object.
+// The encoded object itself is 12 bytes: 8 bytes for
+// a big-endian 64-bit integer denoting seconds
+// elapsed since "zero" Unix time, followed by 4 bytes
+// for a big-endian 32-bit signed integer denoting
+// the nanosecond offset of the time. This encoding
+// is intended to ease portability across languages.
+// (Note that this is *not* the standard time.Time
+// binary encoding, because its implementation relies
+// heavily on the internal representation used by the
+// time package.)
+func (mw *Writer) WriteTime(t time.Time) error {
+ t = t.UTC()
+ o, err := mw.require(15)
+ if err != nil {
+ return err
+ }
+ mw.buf[o] = mext8
+ mw.buf[o+1] = 12
+ mw.buf[o+2] = TimeExtension
+ putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond()))
+ return nil
+}
+
+// WriteIntf writes the concrete type of 'v'.
+// WriteIntf will error if 'v' is not one of the following:
+// - A bool, float, string, []byte, int, uint, or complex
+// - A map of supported types (with string keys)
+// - An array or slice of supported types
+// - A pointer to a supported type
+// - A type that satisfies the msgp.Encodable interface
+// - A type that satisfies the msgp.Extension interface
+func (mw *Writer) WriteIntf(v interface{}) error {
+ if v == nil {
+ return mw.WriteNil()
+ }
+ switch v := v.(type) {
+
+ // preferred interfaces
+
+ case Encodable:
+ return v.EncodeMsg(mw)
+ case Extension:
+ return mw.WriteExtension(v)
+
+ // concrete types
+
+ case bool:
+ return mw.WriteBool(v)
+ case float32:
+ return mw.WriteFloat32(v)
+ case float64:
+ return mw.WriteFloat64(v)
+ case complex64:
+ return mw.WriteComplex64(v)
+ case complex128:
+ return mw.WriteComplex128(v)
+ case uint8:
+ return mw.WriteUint8(v)
+ case uint16:
+ return mw.WriteUint16(v)
+ case uint32:
+ return mw.WriteUint32(v)
+ case uint64:
+ return mw.WriteUint64(v)
+ case uint:
+ return mw.WriteUint(v)
+ case int8:
+ return mw.WriteInt8(v)
+ case int16:
+ return mw.WriteInt16(v)
+ case int32:
+ return mw.WriteInt32(v)
+ case int64:
+ return mw.WriteInt64(v)
+ case int:
+ return mw.WriteInt(v)
+ case string:
+ return mw.WriteString(v)
+ case []byte:
+ return mw.WriteBytes(v)
+ case map[string]string:
+ return mw.WriteMapStrStr(v)
+ case map[string]interface{}:
+ return mw.WriteMapStrIntf(v)
+ case time.Time:
+ return mw.WriteTime(v)
+ }
+
+ val := reflect.ValueOf(v)
+ if !isSupported(val.Kind()) || !val.IsValid() {
+ return fmt.Errorf("msgp: type %s not supported", val)
+ }
+
+ switch val.Kind() {
+ case reflect.Ptr:
+ if val.IsNil() {
+ return mw.WriteNil()
+ }
+ return mw.WriteIntf(val.Elem().Interface())
+ case reflect.Slice:
+ return mw.writeSlice(val)
+ case reflect.Map:
+ return mw.writeMap(val)
+ }
+ return &ErrUnsupportedType{val.Type()}
+}
+
+func (mw *Writer) writeMap(v reflect.Value) (err error) {
+ if v.Type().Key().Kind() != reflect.String {
+ return errors.New("msgp: map keys must be strings")
+ }
+ ks := v.MapKeys()
+ err = mw.WriteMapHeader(uint32(len(ks)))
+ if err != nil {
+ return
+ }
+ for _, key := range ks {
+ val := v.MapIndex(key)
+ err = mw.WriteString(key.String())
+ if err != nil {
+ return
+ }
+ err = mw.WriteIntf(val.Interface())
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (mw *Writer) writeSlice(v reflect.Value) (err error) {
+ // is []byte
+ if v.Type().ConvertibleTo(btsType) {
+ return mw.WriteBytes(v.Bytes())
+ }
+
+ sz := uint32(v.Len())
+ err = mw.WriteArrayHeader(sz)
+ if err != nil {
+ return
+ }
+ for i := uint32(0); i < sz; i++ {
+ err = mw.WriteIntf(v.Index(int(i)).Interface())
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (mw *Writer) writeStruct(v reflect.Value) error {
+ if enc, ok := v.Interface().(Encodable); ok {
+ return enc.EncodeMsg(mw)
+ }
+ return fmt.Errorf("msgp: unsupported type: %s", v.Type())
+}
+
+func (mw *Writer) writeVal(v reflect.Value) error {
+ if !isSupported(v.Kind()) {
+ return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type())
+ }
+
+ // shortcut for nil values
+ if v.IsNil() {
+ return mw.WriteNil()
+ }
+ switch v.Kind() {
+ case reflect.Bool:
+ return mw.WriteBool(v.Bool())
+
+ case reflect.Float32, reflect.Float64:
+ return mw.WriteFloat64(v.Float())
+
+ case reflect.Complex64, reflect.Complex128:
+ return mw.WriteComplex128(v.Complex())
+
+ case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
+ return mw.WriteInt64(v.Int())
+
+ case reflect.Interface, reflect.Ptr:
+ if v.IsNil() {
+ mw.WriteNil()
+ }
+ return mw.writeVal(v.Elem())
+
+ case reflect.Map:
+ return mw.writeMap(v)
+
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
+ return mw.WriteUint64(v.Uint())
+
+ case reflect.String:
+ return mw.WriteString(v.String())
+
+ case reflect.Slice, reflect.Array:
+ return mw.writeSlice(v)
+
+ case reflect.Struct:
+ return mw.writeStruct(v)
+
+ }
+ return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type())
+}
+
+// is the reflect.Kind encodable?
+func isSupported(k reflect.Kind) bool {
+ switch k {
+ case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer:
+ return false
+ default:
+ return true
+ }
+}
+
+// GuessSize guesses the size of the underlying
+// value of 'i'. If the underlying value is not
+// a simple builtin (or []byte), GuessSize defaults
+// to 512.
+func GuessSize(i interface{}) int {
+ if i == nil {
+ return NilSize
+ }
+
+ switch i := i.(type) {
+ case Sizer:
+ return i.Msgsize()
+ case Extension:
+ return ExtensionPrefixSize + i.Len()
+ case float64:
+ return Float64Size
+ case float32:
+ return Float32Size
+ case uint8, uint16, uint32, uint64, uint:
+ return UintSize
+ case int8, int16, int32, int64, int:
+ return IntSize
+ case []byte:
+ return BytesPrefixSize + len(i)
+ case string:
+ return StringPrefixSize + len(i)
+ case complex64:
+ return Complex64Size
+ case complex128:
+ return Complex128Size
+ case bool:
+ return BoolSize
+ case map[string]interface{}:
+ s := MapHeaderSize
+ for key, val := range i {
+ s += StringPrefixSize + len(key) + GuessSize(val)
+ }
+ return s
+ case map[string]string:
+ s := MapHeaderSize
+ for key, val := range i {
+ s += 2*StringPrefixSize + len(key) + len(val)
+ }
+ return s
+ default:
+ return 512
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
new file mode 100644
index 00000000..eaa03c46
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go
@@ -0,0 +1,411 @@
+package msgp
+
+import (
+ "math"
+ "reflect"
+ "time"
+)
+
+// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b)
+func ensure(b []byte, sz int) ([]byte, int) {
+ l := len(b)
+ c := cap(b)
+ if c-l < sz {
+ o := make([]byte, (2*c)+sz) // exponential growth
+ n := copy(o, b)
+ return o[:n+sz], n
+ }
+ return b[:l+sz], l
+}
+
+// AppendMapHeader appends a map header with the
+// given size to the slice
+func AppendMapHeader(b []byte, sz uint32) []byte {
+ switch {
+ case sz <= 15:
+ return append(b, wfixmap(uint8(sz)))
+
+ case sz <= math.MaxUint16:
+ o, n := ensure(b, 3)
+ prefixu16(o[n:], mmap16, uint16(sz))
+ return o
+
+ default:
+ o, n := ensure(b, 5)
+ prefixu32(o[n:], mmap32, sz)
+ return o
+ }
+}
+
+// AppendArrayHeader appends an array header with
+// the given size to the slice
+func AppendArrayHeader(b []byte, sz uint32) []byte {
+ switch {
+ case sz <= 15:
+ return append(b, wfixarray(uint8(sz)))
+
+ case sz <= math.MaxUint16:
+ o, n := ensure(b, 3)
+ prefixu16(o[n:], marray16, uint16(sz))
+ return o
+
+ default:
+ o, n := ensure(b, 5)
+ prefixu32(o[n:], marray32, sz)
+ return o
+ }
+}
+
+// AppendNil appends a 'nil' byte to the slice
+func AppendNil(b []byte) []byte { return append(b, mnil) }
+
+// AppendFloat64 appends a float64 to the slice
+func AppendFloat64(b []byte, f float64) []byte {
+ o, n := ensure(b, Float64Size)
+ prefixu64(o[n:], mfloat64, math.Float64bits(f))
+ return o
+}
+
+// AppendFloat32 appends a float32 to the slice
+func AppendFloat32(b []byte, f float32) []byte {
+ o, n := ensure(b, Float32Size)
+ prefixu32(o[n:], mfloat32, math.Float32bits(f))
+ return o
+}
+
+// AppendInt64 appends an int64 to the slice
+func AppendInt64(b []byte, i int64) []byte {
+ if i >= 0 {
+ switch {
+ case i <= math.MaxInt8:
+ return append(b, wfixint(uint8(i)))
+ case i <= math.MaxInt16:
+ o, n := ensure(b, 3)
+ putMint16(o[n:], int16(i))
+ return o
+ case i <= math.MaxInt32:
+ o, n := ensure(b, 5)
+ putMint32(o[n:], int32(i))
+ return o
+ default:
+ o, n := ensure(b, 9)
+ putMint64(o[n:], i)
+ return o
+ }
+ }
+ switch {
+ case i >= -32:
+ return append(b, wnfixint(int8(i)))
+ case i >= math.MinInt8:
+ o, n := ensure(b, 2)
+ putMint8(o[n:], int8(i))
+ return o
+ case i >= math.MinInt16:
+ o, n := ensure(b, 3)
+ putMint16(o[n:], int16(i))
+ return o
+ case i >= math.MinInt32:
+ o, n := ensure(b, 5)
+ putMint32(o[n:], int32(i))
+ return o
+ default:
+ o, n := ensure(b, 9)
+ putMint64(o[n:], i)
+ return o
+ }
+}
+
+// AppendInt appends an int to the slice
+func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendInt8 appends an int8 to the slice
+func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendInt16 appends an int16 to the slice
+func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendInt32 appends an int32 to the slice
+func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) }
+
+// AppendUint64 appends a uint64 to the slice
+func AppendUint64(b []byte, u uint64) []byte {
+ switch {
+ case u <= (1<<7)-1:
+ return append(b, wfixint(uint8(u)))
+
+ case u <= math.MaxUint8:
+ o, n := ensure(b, 2)
+ putMuint8(o[n:], uint8(u))
+ return o
+
+ case u <= math.MaxUint16:
+ o, n := ensure(b, 3)
+ putMuint16(o[n:], uint16(u))
+ return o
+
+ case u <= math.MaxUint32:
+ o, n := ensure(b, 5)
+ putMuint32(o[n:], uint32(u))
+ return o
+
+ default:
+ o, n := ensure(b, 9)
+ putMuint64(o[n:], u)
+ return o
+
+ }
+}
+
+// AppendUint appends a uint to the slice
+func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendUint8 appends a uint8 to the slice
+func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendByte is analogous to AppendUint8
+func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
+
+// AppendUint16 appends a uint16 to the slice
+func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendUint32 appends a uint32 to the slice
+func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) }
+
+// AppendBytes appends bytes to the slice as MessagePack 'bin' data
+func AppendBytes(b []byte, bts []byte) []byte {
+ sz := len(bts)
+ var o []byte
+ var n int
+ switch {
+ case sz <= math.MaxUint8:
+ o, n = ensure(b, 2+sz)
+ prefixu8(o[n:], mbin8, uint8(sz))
+ n += 2
+ case sz <= math.MaxUint16:
+ o, n = ensure(b, 3+sz)
+ prefixu16(o[n:], mbin16, uint16(sz))
+ n += 3
+ default:
+ o, n = ensure(b, 5+sz)
+ prefixu32(o[n:], mbin32, uint32(sz))
+ n += 5
+ }
+ return o[:n+copy(o[n:], bts)]
+}
+
+// AppendBool appends a bool to the slice
+func AppendBool(b []byte, t bool) []byte {
+ if t {
+ return append(b, mtrue)
+ }
+ return append(b, mfalse)
+}
+
+// AppendString appends a string as a MessagePack 'str' to the slice
+func AppendString(b []byte, s string) []byte {
+ sz := len(s)
+ var n int
+ var o []byte
+ switch {
+ case sz <= 31:
+ o, n = ensure(b, 1+sz)
+ o[n] = wfixstr(uint8(sz))
+ n++
+ case sz <= math.MaxUint8:
+ o, n = ensure(b, 2+sz)
+ prefixu8(o[n:], mstr8, uint8(sz))
+ n += 2
+ case sz <= math.MaxUint16:
+ o, n = ensure(b, 3+sz)
+ prefixu16(o[n:], mstr16, uint16(sz))
+ n += 3
+ default:
+ o, n = ensure(b, 5+sz)
+ prefixu32(o[n:], mstr32, uint32(sz))
+ n += 5
+ }
+ return o[:n+copy(o[n:], s)]
+}
+
+// AppendStringFromBytes appends a []byte
+// as a MessagePack 'str' to the slice 'b.'
+func AppendStringFromBytes(b []byte, str []byte) []byte {
+ sz := len(str)
+ var n int
+ var o []byte
+ switch {
+ case sz <= 31:
+ o, n = ensure(b, 1+sz)
+ o[n] = wfixstr(uint8(sz))
+ n++
+ case sz <= math.MaxUint8:
+ o, n = ensure(b, 2+sz)
+ prefixu8(o[n:], mstr8, uint8(sz))
+ n += 2
+ case sz <= math.MaxUint16:
+ o, n = ensure(b, 3+sz)
+ prefixu16(o[n:], mstr16, uint16(sz))
+ n += 3
+ default:
+ o, n = ensure(b, 5+sz)
+ prefixu32(o[n:], mstr32, uint32(sz))
+ n += 5
+ }
+ return o[:n+copy(o[n:], str)]
+}
+
+// AppendComplex64 appends a complex64 to the slice as a MessagePack extension
+func AppendComplex64(b []byte, c complex64) []byte {
+ o, n := ensure(b, Complex64Size)
+ o[n] = mfixext8
+ o[n+1] = Complex64Extension
+ big.PutUint32(o[n+2:], math.Float32bits(real(c)))
+ big.PutUint32(o[n+6:], math.Float32bits(imag(c)))
+ return o
+}
+
+// AppendComplex128 appends a complex128 to the slice as a MessagePack extension
+func AppendComplex128(b []byte, c complex128) []byte {
+ o, n := ensure(b, Complex128Size)
+ o[n] = mfixext16
+ o[n+1] = Complex128Extension
+ big.PutUint64(o[n+2:], math.Float64bits(real(c)))
+ big.PutUint64(o[n+10:], math.Float64bits(imag(c)))
+ return o
+}
+
+// AppendTime appends a time.Time to the slice as a MessagePack extension
+func AppendTime(b []byte, t time.Time) []byte {
+ o, n := ensure(b, TimeSize)
+ t = t.UTC()
+ o[n] = mext8
+ o[n+1] = 12
+ o[n+2] = TimeExtension
+ putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond()))
+ return o
+}
+
+// AppendMapStrStr appends a map[string]string to the slice
+// as a MessagePack map with 'str'-type keys and values
+func AppendMapStrStr(b []byte, m map[string]string) []byte {
+ sz := uint32(len(m))
+ b = AppendMapHeader(b, sz)
+ for key, val := range m {
+ b = AppendString(b, key)
+ b = AppendString(b, val)
+ }
+ return b
+}
+
+// AppendMapStrIntf appends a map[string]interface{} to the slice
+// as a MessagePack map with 'str'-type keys.
+func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
+ sz := uint32(len(m))
+ b = AppendMapHeader(b, sz)
+ var err error
+ for key, val := range m {
+ b = AppendString(b, key)
+ b, err = AppendIntf(b, val)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// AppendIntf appends the concrete type of 'i' to the
+// provided []byte. 'i' must be one of the following:
+// - 'nil'
+// - A bool, float, string, []byte, int, uint, or complex
+// - A map[string]interface{} or map[string]string
+// - A []T, where T is another supported type
+// - A *T, where T is another supported type
+// - A type that satisfieds the msgp.Marshaler interface
+// - A type that satisfies the msgp.Extension interface
+func AppendIntf(b []byte, i interface{}) ([]byte, error) {
+ if i == nil {
+ return AppendNil(b), nil
+ }
+
+ // all the concrete types
+ // for which we have methods
+ switch i := i.(type) {
+ case Marshaler:
+ return i.MarshalMsg(b)
+ case Extension:
+ return AppendExtension(b, i)
+ case bool:
+ return AppendBool(b, i), nil
+ case float32:
+ return AppendFloat32(b, i), nil
+ case float64:
+ return AppendFloat64(b, i), nil
+ case complex64:
+ return AppendComplex64(b, i), nil
+ case complex128:
+ return AppendComplex128(b, i), nil
+ case string:
+ return AppendString(b, i), nil
+ case []byte:
+ return AppendBytes(b, i), nil
+ case int8:
+ return AppendInt8(b, i), nil
+ case int16:
+ return AppendInt16(b, i), nil
+ case int32:
+ return AppendInt32(b, i), nil
+ case int64:
+ return AppendInt64(b, i), nil
+ case int:
+ return AppendInt64(b, int64(i)), nil
+ case uint:
+ return AppendUint64(b, uint64(i)), nil
+ case uint8:
+ return AppendUint8(b, i), nil
+ case uint16:
+ return AppendUint16(b, i), nil
+ case uint32:
+ return AppendUint32(b, i), nil
+ case uint64:
+ return AppendUint64(b, i), nil
+ case time.Time:
+ return AppendTime(b, i), nil
+ case map[string]interface{}:
+ return AppendMapStrIntf(b, i)
+ case map[string]string:
+ return AppendMapStrStr(b, i), nil
+ case []interface{}:
+ b = AppendArrayHeader(b, uint32(len(i)))
+ var err error
+ for _, k := range i {
+ b, err = AppendIntf(b, k)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+
+ var err error
+ v := reflect.ValueOf(i)
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice:
+ l := v.Len()
+ b = AppendArrayHeader(b, uint32(l))
+ for i := 0; i < l; i++ {
+ b, err = AppendIntf(b, v.Index(i).Interface())
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ case reflect.Ptr:
+ if v.IsNil() {
+ return AppendNil(b), err
+ }
+ b, err = AppendIntf(b, v.Elem().Interface())
+ return b, err
+ default:
+ return b, &ErrUnsupportedType{T: v.Type()}
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go
new file mode 100644
index 00000000..fa0b7d53
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go
@@ -0,0 +1,319 @@
+package msgp
+
+import (
+ "bytes"
+ "math"
+ "testing"
+ "time"
+)
+
+func TestIssue116(t *testing.T) {
+ data := AppendInt64(nil, math.MinInt64)
+ i, _, err := ReadInt64Bytes(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if i != math.MinInt64 {
+ t.Errorf("put %d in and got %d out", int64(math.MinInt64), i)
+ }
+
+ var buf bytes.Buffer
+
+ w := NewWriter(&buf)
+ w.WriteInt64(math.MinInt64)
+ w.Flush()
+ i, err = NewReader(&buf).ReadInt64()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if i != math.MinInt64 {
+ t.Errorf("put %d in and got %d out", int64(math.MinInt64), i)
+ }
+}
+
+func TestAppendMapHeader(t *testing.T) {
+ szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ var bts []byte
+ for _, sz := range szs {
+ buf.Reset()
+ en.WriteMapHeader(sz)
+ en.Flush()
+ bts = AppendMapHeader(bts[0:0], sz)
+
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts)
+ }
+ }
+}
+
+func BenchmarkAppendMapHeader(b *testing.B) {
+ buf := make([]byte, 0, 9)
+ N := b.N / 4
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < N; i++ {
+ AppendMapHeader(buf[:0], 0)
+ AppendMapHeader(buf[:0], uint32(tint8))
+ AppendMapHeader(buf[:0], tuint16)
+ AppendMapHeader(buf[:0], tuint32)
+ }
+}
+
+func TestAppendArrayHeader(t *testing.T) {
+ szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ var bts []byte
+ for _, sz := range szs {
+ buf.Reset()
+ en.WriteArrayHeader(sz)
+ en.Flush()
+ bts = AppendArrayHeader(bts[0:0], sz)
+
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts)
+ }
+ }
+}
+
+func BenchmarkAppendArrayHeader(b *testing.B) {
+ buf := make([]byte, 0, 9)
+ N := b.N / 4
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < N; i++ {
+ AppendArrayHeader(buf[:0], 0)
+ AppendArrayHeader(buf[:0], uint32(tint8))
+ AppendArrayHeader(buf[:0], tuint16)
+ AppendArrayHeader(buf[:0], tuint32)
+ }
+}
+
+func TestAppendNil(t *testing.T) {
+ var bts []byte
+ bts = AppendNil(bts[0:0])
+ if bts[0] != mnil {
+ t.Fatal("bts[0] is not 'nil'")
+ }
+}
+
+func TestAppendFloat64(t *testing.T) {
+ f := float64(3.14159)
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ var bts []byte
+ en.WriteFloat64(f)
+ en.Flush()
+ bts = AppendFloat64(bts[0:0], f)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts)
+ }
+}
+
+func BenchmarkAppendFloat64(b *testing.B) {
+ f := float64(3.14159)
+ buf := make([]byte, 0, 9)
+ b.SetBytes(9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendFloat64(buf[0:0], f)
+ }
+}
+
+func TestAppendFloat32(t *testing.T) {
+ f := float32(3.14159)
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ var bts []byte
+ en.WriteFloat32(f)
+ en.Flush()
+ bts = AppendFloat32(bts[0:0], f)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts)
+ }
+}
+
+func BenchmarkAppendFloat32(b *testing.B) {
+ f := float32(3.14159)
+ buf := make([]byte, 0, 5)
+ b.SetBytes(5)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendFloat32(buf[0:0], f)
+ }
+}
+
+func TestAppendInt64(t *testing.T) {
+ is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+
+ var bts []byte
+ for _, i := range is {
+ buf.Reset()
+ en.WriteInt64(i)
+ en.Flush()
+ bts = AppendInt64(bts[0:0], i)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for int64 %d, encoder wrote %q; append wrote %q", i, buf.Bytes(), bts)
+ }
+ }
+}
+
+func BenchmarkAppendInt64(b *testing.B) {
+ is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)}
+ l := len(is)
+ buf := make([]byte, 0, 9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendInt64(buf[0:0], is[i%l])
+ }
+}
+
+func TestAppendUint64(t *testing.T) {
+ us := []uint64{0, 1, uint64(tuint16), uint64(tuint32), tuint64}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ var bts []byte
+
+ for _, u := range us {
+ buf.Reset()
+ en.WriteUint64(u)
+ en.Flush()
+ bts = AppendUint64(bts[0:0], u)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for uint64 %d, encoder wrote %q; append wrote %q", u, buf.Bytes(), bts)
+ }
+ }
+}
+
+func BenchmarkAppendUint64(b *testing.B) {
+ us := []uint64{0, 1, 15, uint64(tuint16), uint64(tuint32), tuint64}
+ buf := make([]byte, 0, 9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ l := len(us)
+ for i := 0; i < b.N; i++ {
+ AppendUint64(buf[0:0], us[i%l])
+ }
+}
+
+func TestAppendBytes(t *testing.T) {
+ sizes := []int{0, 1, 225, int(tuint32)}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ var bts []byte
+
+ for _, sz := range sizes {
+ buf.Reset()
+ b := RandBytes(sz)
+ en.WriteBytes(b)
+ en.Flush()
+ bts = AppendBytes(b[0:0], b)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for bytes of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts))
+ }
+ }
+}
+
+func benchappendBytes(size uint32, b *testing.B) {
+ bts := RandBytes(int(size))
+ buf := make([]byte, 0, len(bts)+5)
+ b.SetBytes(int64(len(bts) + 5))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendBytes(buf[0:0], bts)
+ }
+}
+
+func BenchmarkAppend16Bytes(b *testing.B) { benchappendBytes(16, b) }
+
+func BenchmarkAppend256Bytes(b *testing.B) { benchappendBytes(256, b) }
+
+func BenchmarkAppend2048Bytes(b *testing.B) { benchappendBytes(2048, b) }
+
+func TestAppendString(t *testing.T) {
+ sizes := []int{0, 1, 225, int(tuint32)}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ var bts []byte
+
+ for _, sz := range sizes {
+ buf.Reset()
+ s := string(RandBytes(sz))
+ en.WriteString(s)
+ en.Flush()
+ bts = AppendString(bts[0:0], s)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for string of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts))
+ t.Errorf("WriteString prefix: %x", buf.Bytes()[0:5])
+ t.Errorf("Appendstring prefix: %x", bts[0:5])
+ }
+ }
+}
+
+func benchappendString(size uint32, b *testing.B) {
+ str := string(RandBytes(int(size)))
+ buf := make([]byte, 0, len(str)+5)
+ b.SetBytes(int64(len(str) + 5))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendString(buf[0:0], str)
+ }
+}
+
+func BenchmarkAppend16String(b *testing.B) { benchappendString(16, b) }
+
+func BenchmarkAppend256String(b *testing.B) { benchappendString(256, b) }
+
+func BenchmarkAppend2048String(b *testing.B) { benchappendString(2048, b) }
+
+func TestAppendBool(t *testing.T) {
+ vs := []bool{true, false}
+ var buf bytes.Buffer
+ en := NewWriter(&buf)
+ var bts []byte
+
+ for _, v := range vs {
+ buf.Reset()
+ en.WriteBool(v)
+ en.Flush()
+ bts = AppendBool(bts[0:0], v)
+ if !bytes.Equal(buf.Bytes(), bts) {
+ t.Errorf("for %t, encoder wrote %q and append wrote %q", v, buf.Bytes(), bts)
+ }
+ }
+}
+
+func BenchmarkAppendBool(b *testing.B) {
+ vs := []bool{true, false}
+ buf := make([]byte, 0, 1)
+ b.SetBytes(1)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendBool(buf[0:0], vs[i%2])
+ }
+}
+
+func BenchmarkAppendTime(b *testing.B) {
+ t := time.Now()
+ b.SetBytes(15)
+ buf := make([]byte, 0, 15)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ AppendTime(buf[0:0], t)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/msgp/write_test.go b/vendor/github.com/tinylib/msgp/msgp/write_test.go
new file mode 100644
index 00000000..c5e97fe2
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/msgp/write_test.go
@@ -0,0 +1,405 @@
+package msgp
+
+import (
+ "bytes"
+ "math"
+ "math/rand"
+ "testing"
+ "time"
+)
+
+var (
+ tint8 int8 = 126 // cannot be most fix* types
+ tint16 int16 = 150 // cannot be int8
+ tint32 int32 = math.MaxInt16 + 100 // cannot be int16
+ tint64 int64 = math.MaxInt32 + 100 // cannot be int32
+ tuint16 uint32 = 300 // cannot be uint8
+ tuint32 uint32 = math.MaxUint16 + 100 // cannot be uint16
+ tuint64 uint64 = math.MaxUint32 + 100 // cannot be uint32
+)
+
+func RandBytes(sz int) []byte {
+ out := make([]byte, sz)
+ for i := range out {
+ out[i] = byte(rand.Int63n(math.MaxInt64) % 256)
+ }
+ return out
+}
+
+func TestWriteMapHeader(t *testing.T) {
+ tests := []struct {
+ Sz uint32
+ Outbytes []byte
+ }{
+ {0, []byte{mfixmap}},
+ {1, []byte{mfixmap | byte(1)}},
+ {100, []byte{mmap16, byte(uint16(100) >> 8), byte(uint16(100))}},
+ {tuint32,
+ []byte{mmap32,
+ byte(tuint32 >> 24),
+ byte(tuint32 >> 16),
+ byte(tuint32 >> 8),
+ byte(tuint32),
+ },
+ },
+ }
+
+ var buf bytes.Buffer
+ var err error
+ wr := NewWriter(&buf)
+ for _, test := range tests {
+ buf.Reset()
+ err = wr.WriteMapHeader(test.Sz)
+ if err != nil {
+ t.Error(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf.Bytes(), test.Outbytes) {
+ t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes())
+ }
+ }
+}
+
+func BenchmarkWriteMapHeader(b *testing.B) {
+ wr := NewWriter(Nowhere)
+ N := b.N / 4
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < N; i++ {
+ wr.WriteMapHeader(0)
+ wr.WriteMapHeader(8)
+ wr.WriteMapHeader(tuint16)
+ wr.WriteMapHeader(tuint32)
+ }
+}
+
+func TestWriteArrayHeader(t *testing.T) {
+ tests := []struct {
+ Sz uint32
+ Outbytes []byte
+ }{
+ {0, []byte{mfixarray}},
+ {1, []byte{mfixarray | byte(1)}},
+ {tuint16, []byte{marray16, byte(tuint16 >> 8), byte(tuint16)}},
+ {tuint32, []byte{marray32, byte(tuint32 >> 24), byte(tuint32 >> 16), byte(tuint32 >> 8), byte(tuint32)}},
+ }
+
+ var buf bytes.Buffer
+ var err error
+ wr := NewWriter(&buf)
+ for _, test := range tests {
+ buf.Reset()
+ err = wr.WriteArrayHeader(test.Sz)
+ if err != nil {
+ t.Error(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(buf.Bytes(), test.Outbytes) {
+ t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes())
+ }
+ }
+}
+
+func TestReadWriteStringHeader(t *testing.T) {
+ sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32}
+ var buf bytes.Buffer
+ var err error
+ wr := NewWriter(&buf)
+ for _, sz := range sizes {
+ buf.Reset()
+ err = wr.WriteStringHeader(sz)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var nsz uint32
+ nsz, err = NewReader(&buf).ReadStringHeader()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nsz != sz {
+ t.Errorf("put in size %d but got out size %d", sz, nsz)
+ }
+ }
+}
+
+func TestReadWriteBytesHeader(t *testing.T) {
+ sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32}
+ var buf bytes.Buffer
+ var err error
+ wr := NewWriter(&buf)
+ for _, sz := range sizes {
+ buf.Reset()
+ err = wr.WriteBytesHeader(sz)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var nsz uint32
+ nsz, err = NewReader(&buf).ReadBytesHeader()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if nsz != sz {
+ t.Errorf("put in size %d but got out size %d", sz, nsz)
+ }
+ }
+}
+
+func BenchmarkWriteArrayHeader(b *testing.B) {
+ wr := NewWriter(Nowhere)
+ N := b.N / 4
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < N; i++ {
+ wr.WriteArrayHeader(0)
+ wr.WriteArrayHeader(16)
+ wr.WriteArrayHeader(tuint16)
+ wr.WriteArrayHeader(tuint32)
+ }
+}
+
+func TestWriteNil(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+
+ err := wr.WriteNil()
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bts := buf.Bytes()
+ if bts[0] != mnil {
+ t.Errorf("Expected %x; wrote %x", mnil, bts[0])
+ }
+}
+
+func TestWriteFloat64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+
+ for i := 0; i < 10000; i++ {
+ buf.Reset()
+ flt := (rand.Float64() - 0.5) * math.MaxFloat64
+ err := wr.WriteFloat64(flt)
+ if err != nil {
+ t.Errorf("Error with %f: %s", flt, err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bts := buf.Bytes()
+
+ if bts[0] != mfloat64 {
+ t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64)
+ }
+ }
+}
+
+func BenchmarkWriteFloat64(b *testing.B) {
+ f := rand.Float64()
+ wr := NewWriter(Nowhere)
+ b.SetBytes(9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr.WriteFloat64(f)
+ }
+}
+
+func TestWriteFloat32(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+
+ for i := 0; i < 10000; i++ {
+ buf.Reset()
+ flt := (rand.Float32() - 0.5) * math.MaxFloat32
+ err := wr.WriteFloat32(flt)
+ if err != nil {
+ t.Errorf("Error with %f: %s", flt, err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bts := buf.Bytes()
+
+ if bts[0] != mfloat32 {
+ t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64)
+ }
+ }
+}
+
+func BenchmarkWriteFloat32(b *testing.B) {
+ f := rand.Float32()
+ wr := NewWriter(Nowhere)
+ b.SetBytes(5)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr.WriteFloat32(f)
+ }
+}
+
+func TestWriteInt64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+
+ for i := 0; i < 10000; i++ {
+ buf.Reset()
+
+ num := (rand.Int63n(math.MaxInt64)) - (math.MaxInt64 / 2)
+
+ err := wr.WriteInt64(num)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if buf.Len() > 9 {
+ t.Errorf("buffer length should be <= 9; it's %d", buf.Len())
+ }
+ }
+}
+
+func BenchmarkWriteInt64(b *testing.B) {
+ wr := NewWriter(Nowhere)
+ b.SetBytes(9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr.WriteInt64(int64(tint64))
+ }
+}
+
+func TestWriteUint64(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+
+ for i := 0; i < 10000; i++ {
+ buf.Reset()
+
+ num := uint64(rand.Int63n(math.MaxInt64))
+
+ err := wr.WriteUint64(num)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if buf.Len() > 9 {
+ t.Errorf("buffer length should be <= 9; it's %d", buf.Len())
+ }
+ }
+}
+
+func BenchmarkWriteUint64(b *testing.B) {
+ wr := NewWriter(Nowhere)
+ b.SetBytes(9)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr.WriteUint64(uint64(tuint64))
+ }
+}
+
+func TestWriteBytes(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ sizes := []int{0, 1, 225, int(tuint32)}
+
+ for _, size := range sizes {
+ buf.Reset()
+ bts := RandBytes(size)
+
+ err := wr.WriteBytes(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if buf.Len() < len(bts) {
+ t.Errorf("somehow, %d bytes were encoded in %d bytes", len(bts), buf.Len())
+ }
+ }
+}
+
+func benchwrBytes(size uint32, b *testing.B) {
+ bts := RandBytes(int(size))
+ wr := NewWriter(Nowhere)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr.WriteBytes(bts)
+ }
+}
+
+func BenchmarkWrite16Bytes(b *testing.B) { benchwrBytes(16, b) }
+
+func BenchmarkWrite256Bytes(b *testing.B) { benchwrBytes(256, b) }
+
+func BenchmarkWrite2048Bytes(b *testing.B) { benchwrBytes(2048, b) }
+
+func TestWriteTime(t *testing.T) {
+ var buf bytes.Buffer
+ wr := NewWriter(&buf)
+ tm := time.Now()
+ err := wr.WriteTime(tm)
+ if err != nil {
+ t.Fatal(err)
+ }
+ err = wr.Flush()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if buf.Len() != 15 {
+ t.Errorf("expected time.Time to be %d bytes; got %d", 15, buf.Len())
+ }
+
+ newt, err := NewReader(&buf).ReadTime()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !newt.Equal(tm) {
+ t.Errorf("in/out not equal; %s in and %s out", tm, newt)
+ }
+}
+
+func BenchmarkWriteTime(b *testing.B) {
+ t := time.Now()
+ wr := NewWriter(Nowhere)
+ b.SetBytes(15)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ wr.WriteTime(t)
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/parse/directives.go b/vendor/github.com/tinylib/msgp/parse/directives.go
new file mode 100644
index 00000000..fb78974b
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/parse/directives.go
@@ -0,0 +1,117 @@
+package parse
+
+import (
+ "fmt"
+ "github.com/tinylib/msgp/gen"
+ "go/ast"
+ "strings"
+)
+
+const linePrefix = "//msgp:"
+
+// func(args, fileset)
+type directive func([]string, *FileSet) error
+
+// func(passName, args, printer)
+type passDirective func(gen.Method, []string, *gen.Printer) error
+
+// map of all recognized directives
+//
+// to add a directive, define a func([]string, *FileSet) error
+// and then add it to this list.
+var directives = map[string]directive{
+ "shim": applyShim,
+ "ignore": ignore,
+ "tuple": astuple,
+}
+
+var passDirectives = map[string]passDirective{
+ "ignore": passignore,
+}
+
+func passignore(m gen.Method, text []string, p *gen.Printer) error {
+ pushstate(m.String())
+ for _, a := range text {
+ p.ApplyDirective(m, gen.IgnoreTypename(a))
+ infof("ignoring %s\n", a)
+ }
+ popstate()
+ return nil
+}
+
+// find all comment lines that begin with //msgp:
+func yieldComments(c []*ast.CommentGroup) []string {
+ var out []string
+ for _, cg := range c {
+ for _, line := range cg.List {
+ if strings.HasPrefix(line.Text, linePrefix) {
+ out = append(out, strings.TrimPrefix(line.Text, linePrefix))
+ }
+ }
+ }
+ return out
+}
+
+//msgp:shim {Type} as:{Newtype} using:{toFunc/fromFunc}
+func applyShim(text []string, f *FileSet) error {
+ if len(text) != 4 {
+ return fmt.Errorf("shim directive should have 3 arguments; found %d", len(text)-1)
+ }
+
+ name := text[1]
+ be := gen.Ident(strings.TrimPrefix(strings.TrimSpace(text[2]), "as:")) // parse as::{base}
+ if name[0] == '*' {
+ name = name[1:]
+ be.Needsref(true)
+ }
+ be.Alias(name)
+
+ usestr := strings.TrimPrefix(strings.TrimSpace(text[3]), "using:") // parse using::{method/method}
+
+ methods := strings.Split(usestr, "/")
+ if len(methods) != 2 {
+ return fmt.Errorf("expected 2 using::{} methods; found %d (%q)", len(methods), text[3])
+ }
+
+ be.ShimToBase = methods[0]
+ be.ShimFromBase = methods[1]
+
+ infof("%s -> %s\n", name, be.Value.String())
+ f.findShim(name, be)
+
+ return nil
+}
+
+//msgp:ignore {TypeA} {TypeB}...
+func ignore(text []string, f *FileSet) error {
+ if len(text) < 2 {
+ return nil
+ }
+ for _, item := range text[1:] {
+ name := strings.TrimSpace(item)
+ if _, ok := f.Identities[name]; ok {
+ delete(f.Identities, name)
+ infof("ignoring %s\n", name)
+ }
+ }
+ return nil
+}
+
+//msgp:tuple {TypeA} {TypeB}...
+func astuple(text []string, f *FileSet) error {
+ if len(text) < 2 {
+ return nil
+ }
+ for _, item := range text[1:] {
+ name := strings.TrimSpace(item)
+ if el, ok := f.Identities[name]; ok {
+ if st, ok := el.(*gen.Struct); ok {
+ st.AsTuple = true
+ infoln(name)
+ } else {
+ warnf("%s: only structs can be tuples\n", name)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/tinylib/msgp/parse/getast.go b/vendor/github.com/tinylib/msgp/parse/getast.go
new file mode 100644
index 00000000..355ad772
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/parse/getast.go
@@ -0,0 +1,589 @@
+package parse
+
+import (
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/tinylib/msgp/gen"
+ "github.com/ttacon/chalk"
+)
+
+// A FileSet is the in-memory representation of a
+// parsed file.
+type FileSet struct {
+ Package string // package name
+ Specs map[string]ast.Expr // type specs in file
+ Identities map[string]gen.Elem // processed from specs
+ Directives []string // raw preprocessor directives
+ Imports []*ast.ImportSpec // imports
+}
+
+// File parses a file at the relative path
+// provided and produces a new *FileSet.
+// If you pass in a path to a directory, the entire
+// directory will be parsed.
+// If unexport is false, only exported identifiers are included in the FileSet.
+// If the resulting FileSet would be empty, an error is returned.
+func File(name string, unexported bool) (*FileSet, error) {
+ pushstate(name)
+ defer popstate()
+ fs := &FileSet{
+ Specs: make(map[string]ast.Expr),
+ Identities: make(map[string]gen.Elem),
+ }
+
+ fset := token.NewFileSet()
+ finfo, err := os.Stat(name)
+ if err != nil {
+ return nil, err
+ }
+ if finfo.IsDir() {
+ pkgs, err := parser.ParseDir(fset, name, nil, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ if len(pkgs) != 1 {
+ return nil, fmt.Errorf("multiple packages in directory: %s", name)
+ }
+ var one *ast.Package
+ for _, nm := range pkgs {
+ one = nm
+ break
+ }
+ fs.Package = one.Name
+ for _, fl := range one.Files {
+ pushstate(fl.Name.Name)
+ fs.Directives = append(fs.Directives, yieldComments(fl.Comments)...)
+ if !unexported {
+ ast.FileExports(fl)
+ }
+ fs.getTypeSpecs(fl)
+ popstate()
+ }
+ } else {
+ f, err := parser.ParseFile(fset, name, nil, parser.ParseComments)
+ if err != nil {
+ return nil, err
+ }
+ fs.Package = f.Name.Name
+ fs.Directives = yieldComments(f.Comments)
+ if !unexported {
+ ast.FileExports(f)
+ }
+ fs.getTypeSpecs(f)
+ }
+
+ if len(fs.Specs) == 0 {
+ return nil, fmt.Errorf("no definitions in %s", name)
+ }
+
+ fs.process()
+ fs.applyDirectives()
+ fs.propInline()
+
+ return fs, nil
+}
+
+// applyDirectives applies all of the directives that
+// are known to the parser. additional method-specific
+// directives remain in f.Directives
+func (f *FileSet) applyDirectives() {
+ newdirs := make([]string, 0, len(f.Directives))
+ for _, d := range f.Directives {
+ chunks := strings.Split(d, " ")
+ if len(chunks) > 0 {
+ if fn, ok := directives[chunks[0]]; ok {
+ pushstate(chunks[0])
+ err := fn(chunks, f)
+ if err != nil {
+ warnln(err.Error())
+ }
+ popstate()
+ } else {
+ newdirs = append(newdirs, d)
+ }
+ }
+ }
+ f.Directives = newdirs
+}
+
+// A linkset is a graph of unresolved
+// identities.
+//
+// Since gen.Ident can only represent
+// one level of type indirection (e.g. Foo -> uint8),
+// type declarations like `type Foo Bar`
+// aren't resolve-able until we've processed
+// everything else.
+//
+// The goal of this dependency resolution
+// is to distill the type declaration
+// into just one level of indirection.
+// In other words, if we have:
+//
+// type A uint64
+// type B A
+// type C B
+// type D C
+//
+// ... then we want to end up
+// figuring out that D is just a uint64.
+type linkset map[string]*gen.BaseElem
+
+func (f *FileSet) resolve(ls linkset) {
+ progress := true
+ for progress && len(ls) > 0 {
+ progress = false
+ for name, elem := range ls {
+ real, ok := f.Identities[elem.TypeName()]
+ if ok {
+ // copy the old type descriptor,
+ // alias it to the new value,
+ // and insert it into the resolved
+ // identities list
+ progress = true
+ nt := real.Copy()
+ nt.Alias(name)
+ f.Identities[name] = nt
+ delete(ls, name)
+ }
+ }
+ }
+
+ // what's left can't be resolved
+ for name, elem := range ls {
+ warnf("couldn't resolve type %s (%s)\n", name, elem.TypeName())
+ }
+}
+
+// process takes the contents of f.Specs and
+// uses them to populate f.Identities
+func (f *FileSet) process() {
+
+ deferred := make(linkset)
+parse:
+ for name, def := range f.Specs {
+ pushstate(name)
+ el := f.parseExpr(def)
+ if el == nil {
+ warnln("failed to parse")
+ popstate()
+ continue parse
+ }
+ // push unresolved identities into
+ // the graph of links and resolve after
+ // we've handled every possible named type.
+ if be, ok := el.(*gen.BaseElem); ok && be.Value == gen.IDENT {
+ deferred[name] = be
+ popstate()
+ continue parse
+ }
+ el.Alias(name)
+ f.Identities[name] = el
+ popstate()
+ }
+
+ if len(deferred) > 0 {
+ f.resolve(deferred)
+ }
+}
+
+func strToMethod(s string) gen.Method {
+ switch s {
+ case "encode":
+ return gen.Encode
+ case "decode":
+ return gen.Decode
+ case "test":
+ return gen.Test
+ case "size":
+ return gen.Size
+ case "marshal":
+ return gen.Marshal
+ case "unmarshal":
+ return gen.Unmarshal
+ default:
+ return 0
+ }
+}
+
+func (f *FileSet) applyDirs(p *gen.Printer) {
+ // apply directives of the form
+ //
+ // //msgp:encode ignore {{TypeName}}
+ //
+loop:
+ for _, d := range f.Directives {
+ chunks := strings.Split(d, " ")
+ if len(chunks) > 1 {
+ for i := range chunks {
+ chunks[i] = strings.TrimSpace(chunks[i])
+ }
+ m := strToMethod(chunks[0])
+ if m == 0 {
+ warnf("unknown pass name: %q\n", chunks[0])
+ continue loop
+ }
+ if fn, ok := passDirectives[chunks[1]]; ok {
+ pushstate(chunks[1])
+ err := fn(m, chunks[2:], p)
+ if err != nil {
+ warnf("error applying directive: %s\n", err)
+ }
+ popstate()
+ } else {
+ warnf("unrecognized directive %q\n", chunks[1])
+ }
+ } else {
+ warnf("empty directive: %q\n", d)
+ }
+ }
+}
+
+func (f *FileSet) PrintTo(p *gen.Printer) error {
+ f.applyDirs(p)
+ names := make([]string, 0, len(f.Identities))
+ for name := range f.Identities {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ el := f.Identities[name]
+ el.SetVarname("z")
+ pushstate(el.TypeName())
+ err := p.Print(el)
+ popstate()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// getTypeSpecs extracts all of the *ast.TypeSpecs in the file
+// into fs.Identities, but does not set the actual element
+func (fs *FileSet) getTypeSpecs(f *ast.File) {
+
+ // collect all imports...
+ fs.Imports = append(fs.Imports, f.Imports...)
+
+ // check all declarations...
+ for i := range f.Decls {
+
+ // for GenDecls...
+ if g, ok := f.Decls[i].(*ast.GenDecl); ok {
+
+ // and check the specs...
+ for _, s := range g.Specs {
+
+ // for ast.TypeSpecs....
+ if ts, ok := s.(*ast.TypeSpec); ok {
+ switch ts.Type.(type) {
+
+ // this is the list of parse-able
+ // type specs
+ case *ast.StructType,
+ *ast.ArrayType,
+ *ast.StarExpr,
+ *ast.MapType,
+ *ast.Ident:
+ fs.Specs[ts.Name.Name] = ts.Type
+
+ }
+ }
+ }
+ }
+ }
+}
+
+func fieldName(f *ast.Field) string {
+ switch len(f.Names) {
+ case 0:
+ return stringify(f.Type)
+ case 1:
+ return f.Names[0].Name
+ default:
+ return f.Names[0].Name + " (and others)"
+ }
+}
+
+func (fs *FileSet) parseFieldList(fl *ast.FieldList) []gen.StructField {
+ if fl == nil || fl.NumFields() == 0 {
+ return nil
+ }
+ out := make([]gen.StructField, 0, fl.NumFields())
+ for _, field := range fl.List {
+ pushstate(fieldName(field))
+ fds := fs.getField(field)
+ if len(fds) > 0 {
+ out = append(out, fds...)
+ } else {
+ warnln("ignored.")
+ }
+ popstate()
+ }
+ return out
+}
+
+// translate *ast.Field into []gen.StructField
+func (fs *FileSet) getField(f *ast.Field) []gen.StructField {
+ sf := make([]gen.StructField, 1)
+ var extension bool
+ // parse tag; otherwise field name is field tag
+ if f.Tag != nil {
+ body := reflect.StructTag(strings.Trim(f.Tag.Value, "`")).Get("msg")
+ tags := strings.Split(body, ",")
+ if len(tags) == 2 && tags[1] == "extension" {
+ extension = true
+ }
+ // ignore "-" fields
+ if tags[0] == "-" {
+ return nil
+ }
+ sf[0].FieldTag = tags[0]
+ }
+
+ ex := fs.parseExpr(f.Type)
+ if ex == nil {
+ return nil
+ }
+
+ // parse field name
+ switch len(f.Names) {
+ case 0:
+ sf[0].FieldName = embedded(f.Type)
+ case 1:
+ sf[0].FieldName = f.Names[0].Name
+ default:
+ // this is for a multiple in-line declaration,
+ // e.g. type A struct { One, Two int }
+ sf = sf[0:0]
+ for _, nm := range f.Names {
+ sf = append(sf, gen.StructField{
+ FieldTag: nm.Name,
+ FieldName: nm.Name,
+ FieldElem: ex.Copy(),
+ })
+ }
+ return sf
+ }
+ sf[0].FieldElem = ex
+ if sf[0].FieldTag == "" {
+ sf[0].FieldTag = sf[0].FieldName
+ }
+
+ // validate extension
+ if extension {
+ switch ex := ex.(type) {
+ case *gen.Ptr:
+ if b, ok := ex.Value.(*gen.BaseElem); ok {
+ b.Value = gen.Ext
+ } else {
+ warnln("couldn't cast to extension.")
+ return nil
+ }
+ case *gen.BaseElem:
+ ex.Value = gen.Ext
+ default:
+ warnln("couldn't cast to extension.")
+ return nil
+ }
+ }
+ return sf
+}
+
+// extract embedded field name
+//
+// so, for a struct like
+//
+// type A struct {
+// io.Writer
+// }
+//
+// we want "Writer"
+func embedded(f ast.Expr) string {
+ switch f := f.(type) {
+ case *ast.Ident:
+ return f.Name
+ case *ast.StarExpr:
+ return embedded(f.X)
+ case *ast.SelectorExpr:
+ return f.Sel.Name
+ default:
+ // other possibilities are disallowed
+ return ""
+ }
+}
+
+// stringify a field type name
+func stringify(e ast.Expr) string {
+ switch e := e.(type) {
+ case *ast.Ident:
+ return e.Name
+ case *ast.StarExpr:
+ return "*" + stringify(e.X)
+ case *ast.SelectorExpr:
+ return stringify(e.X) + "." + e.Sel.Name
+ case *ast.ArrayType:
+ if e.Len == nil {
+ return "[]" + stringify(e.Elt)
+ }
+ return fmt.Sprintf("[%s]%s", stringify(e.Len), stringify(e.Elt))
+ case *ast.InterfaceType:
+ if e.Methods == nil || e.Methods.NumFields() == 0 {
+ return "interface{}"
+ }
+ }
+ return ""
+}
+
+// recursively translate ast.Expr to gen.Elem; nil means type not supported
+// expected input types:
+// - *ast.MapType (map[T]J)
+// - *ast.Ident (name)
+// - *ast.ArrayType ([(sz)]T)
+// - *ast.StarExpr (*T)
+// - *ast.StructType (struct {})
+// - *ast.SelectorExpr (a.B)
+// - *ast.InterfaceType (interface {})
+func (fs *FileSet) parseExpr(e ast.Expr) gen.Elem {
+ switch e := e.(type) {
+
+ case *ast.MapType:
+ if k, ok := e.Key.(*ast.Ident); ok && k.Name == "string" {
+ if in := fs.parseExpr(e.Value); in != nil {
+ return &gen.Map{Value: in}
+ }
+ }
+ return nil
+
+ case *ast.Ident:
+ b := gen.Ident(e.Name)
+
+ // work to resove this expression
+ // can be done later, once we've resolved
+ // everything else.
+ if b.Value == gen.IDENT {
+ if _, ok := fs.Specs[e.Name]; !ok {
+ warnf("non-local identifier: %s\n", e.Name)
+ }
+ }
+ return b
+
+ case *ast.ArrayType:
+
+ // special case for []byte
+ if e.Len == nil {
+ if i, ok := e.Elt.(*ast.Ident); ok && i.Name == "byte" {
+ return &gen.BaseElem{Value: gen.Bytes}
+ }
+ }
+
+ // return early if we don't know
+ // what the slice element type is
+ els := fs.parseExpr(e.Elt)
+ if els == nil {
+ return nil
+ }
+
+ // array and not a slice
+ if e.Len != nil {
+ switch s := e.Len.(type) {
+ case *ast.BasicLit:
+ return &gen.Array{
+ Size: s.Value,
+ Els: els,
+ }
+
+ case *ast.Ident:
+ return &gen.Array{
+ Size: s.String(),
+ Els: els,
+ }
+
+ case *ast.SelectorExpr:
+ return &gen.Array{
+ Size: stringify(s),
+ Els: els,
+ }
+
+ default:
+ return nil
+ }
+ }
+ return &gen.Slice{Els: els}
+
+ case *ast.StarExpr:
+ if v := fs.parseExpr(e.X); v != nil {
+ return &gen.Ptr{Value: v}
+ }
+ return nil
+
+ case *ast.StructType:
+ if fields := fs.parseFieldList(e.Fields); len(fields) > 0 {
+ return &gen.Struct{Fields: fields}
+ }
+ return nil
+
+ case *ast.SelectorExpr:
+ return gen.Ident(stringify(e))
+
+ case *ast.InterfaceType:
+ // support `interface{}`
+ if len(e.Methods.List) == 0 {
+ return &gen.BaseElem{Value: gen.Intf}
+ }
+ return nil
+
+ default: // other types not supported
+ return nil
+ }
+}
+
+func infof(s string, v ...interface{}) {
+ pushstate(s)
+ fmt.Printf(chalk.Green.Color(strings.Join(logctx, ": ")), v...)
+ popstate()
+}
+
+func infoln(s string) {
+ pushstate(s)
+ fmt.Println(chalk.Green.Color(strings.Join(logctx, ": ")))
+ popstate()
+}
+
+func warnf(s string, v ...interface{}) {
+ pushstate(s)
+ fmt.Printf(chalk.Yellow.Color(strings.Join(logctx, ": ")), v...)
+ popstate()
+}
+
+func warnln(s string) {
+ pushstate(s)
+ fmt.Println(chalk.Yellow.Color(strings.Join(logctx, ": ")))
+ popstate()
+}
+
+func fatalf(s string, v ...interface{}) {
+ pushstate(s)
+ fmt.Printf(chalk.Red.Color(strings.Join(logctx, ": ")), v...)
+ popstate()
+}
+
+var logctx []string
+
+// push logging state
+func pushstate(s string) {
+ logctx = append(logctx, s)
+}
+
+// pop logging state
+func popstate() {
+ logctx = logctx[:len(logctx)-1]
+}
diff --git a/vendor/github.com/tinylib/msgp/parse/inline.go b/vendor/github.com/tinylib/msgp/parse/inline.go
new file mode 100644
index 00000000..5dba4e56
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/parse/inline.go
@@ -0,0 +1,146 @@
+package parse
+
+import (
+ "github.com/tinylib/msgp/gen"
+)
+
+// This file defines when and how we
+// propagate type information from
+// one type declaration to another.
+// After the processing pass, every
+// non-primitive type is marshalled/unmarshalled/etc.
+// through a function call. Here, we propagate
+// the type information into the caller's type
+// tree *if* the child type is simple enough.
+//
+// For example, types like
+//
+// type A [4]int
+//
+// will get pushed into parent methods,
+// whereas types like
+//
+// type B [3]map[string]struct{A, B [4]string}
+//
+// will not.
+
+// this is an approximate measure
+// of the number of children in a node
+const maxComplex = 5
+
+// begin recursive search for identities with the
+// given name and replace them with be
+func (f *FileSet) findShim(id string, be *gen.BaseElem) {
+ for name, el := range f.Identities {
+ pushstate(name)
+ switch el := el.(type) {
+ case *gen.Struct:
+ for i := range el.Fields {
+ f.nextShim(&el.Fields[i].FieldElem, id, be)
+ }
+ case *gen.Array:
+ f.nextShim(&el.Els, id, be)
+ case *gen.Slice:
+ f.nextShim(&el.Els, id, be)
+ case *gen.Map:
+ f.nextShim(&el.Value, id, be)
+ case *gen.Ptr:
+ f.nextShim(&el.Value, id, be)
+ }
+ popstate()
+ }
+ // we'll need this at the top level as well
+ f.Identities[id] = be
+}
+
+func (f *FileSet) nextShim(ref *gen.Elem, id string, be *gen.BaseElem) {
+ if (*ref).TypeName() == id {
+ vn := (*ref).Varname()
+ *ref = be.Copy()
+ (*ref).SetVarname(vn)
+ } else {
+ switch el := (*ref).(type) {
+ case *gen.Struct:
+ for i := range el.Fields {
+ f.nextShim(&el.Fields[i].FieldElem, id, be)
+ }
+ case *gen.Array:
+ f.nextShim(&el.Els, id, be)
+ case *gen.Slice:
+ f.nextShim(&el.Els, id, be)
+ case *gen.Map:
+ f.nextShim(&el.Value, id, be)
+ case *gen.Ptr:
+ f.nextShim(&el.Value, id, be)
+ }
+ }
+}
+
+// propInline identifies and inlines candidates
+func (f *FileSet) propInline() {
+ for name, el := range f.Identities {
+ pushstate(name)
+ switch el := el.(type) {
+ case *gen.Struct:
+ for i := range el.Fields {
+ f.nextInline(&el.Fields[i].FieldElem, name)
+ }
+ case *gen.Array:
+ f.nextInline(&el.Els, name)
+ case *gen.Slice:
+ f.nextInline(&el.Els, name)
+ case *gen.Map:
+ f.nextInline(&el.Value, name)
+ case *gen.Ptr:
+ f.nextInline(&el.Value, name)
+ }
+ popstate()
+ }
+}
+
+const fatalloop = `detected infinite recursion in inlining loop!
+Please file a bug at github.com/tinylib/msgp/issues!
+Thanks!
+`
+
+func (f *FileSet) nextInline(ref *gen.Elem, root string) {
+ switch el := (*ref).(type) {
+ case *gen.BaseElem:
+ // ensure that we're not inlining
+ // a type into itself
+ typ := el.TypeName()
+ if el.Value == gen.IDENT && typ != root {
+ if node, ok := f.Identities[typ]; ok && node.Complexity() < maxComplex {
+ infof("inlining %s\n", typ)
+
+ // This should never happen; it will cause
+ // infinite recursion.
+ if node == *ref {
+ panic(fatalloop)
+ }
+
+ *ref = node.Copy()
+ f.nextInline(ref, node.TypeName())
+ } else if !ok && !el.Resolved() {
+ // this is the point at which we're sure that
+ // we've got a type that isn't a primitive,
+ // a library builtin, or a processed type
+ warnf("unresolved identifier: %s\n", typ)
+ }
+ }
+ case *gen.Struct:
+ for i := range el.Fields {
+ f.nextInline(&el.Fields[i].FieldElem, root)
+ }
+ case *gen.Array:
+ f.nextInline(&el.Els, root)
+ case *gen.Slice:
+ f.nextInline(&el.Els, root)
+ case *gen.Map:
+ f.nextInline(&el.Value, root)
+ case *gen.Ptr:
+ f.nextInline(&el.Value, root)
+ default:
+ panic("bad elem type")
+ }
+}
diff --git a/vendor/github.com/tinylib/msgp/printer/print.go b/vendor/github.com/tinylib/msgp/printer/print.go
new file mode 100644
index 00000000..4766871f
--- /dev/null
+++ b/vendor/github.com/tinylib/msgp/printer/print.go
@@ -0,0 +1,128 @@
+package printer
+
+import (
+ "bytes"
+ "fmt"
+ "github.com/tinylib/msgp/gen"
+ "github.com/tinylib/msgp/parse"
+ "github.com/ttacon/chalk"
+ "golang.org/x/tools/imports"
+ "io"
+ "io/ioutil"
+ "strings"
+)
+
+func infof(s string, v ...interface{}) {
+ fmt.Printf(chalk.Magenta.Color(s), v...)
+}
+
+// PrintFile prints the methods for the provided list
+// of elements to the given file name and canonical
+// package path.
+func PrintFile(file string, f *parse.FileSet, mode gen.Method) error {
+ out, tests, err := generate(f, mode)
+ if err != nil {
+ return err
+ }
+
+ // we'll run goimports on the main file
+ // in another goroutine, and run it here
+ // for the test file. empirically, this
+ // takes about the same amount of time as
+ // doing them in serial when GOMAXPROCS=1,
+ // and faster otherwise.
+ res := goformat(file, out.Bytes())
+ if tests != nil {
+ testfile := strings.TrimSuffix(file, ".go") + "_test.go"
+ err = format(testfile, tests.Bytes())
+ if err != nil {
+ return err
+ }
+ infof(">>> Wrote and formatted \"%s\"\n", testfile)
+ }
+ err = <-res
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func format(file string, data []byte) error {
+ out, err := imports.Process(file, data, nil)
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(file, out, 0600)
+}
+
+func goformat(file string, data []byte) <-chan error {
+ out := make(chan error, 1)
+ go func(file string, data []byte, end chan error) {
+ end <- format(file, data)
+ infof(">>> Wrote and formatted \"%s\"\n", file)
+ }(file, data, out)
+ return out
+}
+
+func dedupImports(imp []string) []string {
+ m := make(map[string]struct{})
+ for i := range imp {
+ m[imp[i]] = struct{}{}
+ }
+ r := []string{}
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+func generate(f *parse.FileSet, mode gen.Method) (*bytes.Buffer, *bytes.Buffer, error) {
+ outbuf := bytes.NewBuffer(make([]byte, 0, 4096))
+ writePkgHeader(outbuf, f.Package)
+
+ myImports := []string{"github.com/tinylib/msgp/msgp"}
+ for _, imp := range f.Imports {
+ if imp.Name != nil {
+ // have an alias, include it.
+ myImports = append(myImports, imp.Name.Name+` `+imp.Path.Value)
+ } else {
+ myImports = append(myImports, imp.Path.Value)
+ }
+ }
+ dedup := dedupImports(myImports)
+ writeImportHeader(outbuf, dedup...)
+
+ var testbuf *bytes.Buffer
+ var testwr io.Writer
+ if mode&gen.Test == gen.Test {
+ testbuf = bytes.NewBuffer(make([]byte, 0, 4096))
+ writePkgHeader(testbuf, f.Package)
+ if mode&(gen.Encode|gen.Decode) != 0 {
+ writeImportHeader(testbuf, "bytes", "github.com/tinylib/msgp/msgp", "testing")
+ } else {
+ writeImportHeader(testbuf, "github.com/tinylib/msgp/msgp", "testing")
+ }
+ testwr = testbuf
+ }
+ return outbuf, testbuf, f.PrintTo(gen.NewPrinter(mode, outbuf, testwr))
+}
+
+func writePkgHeader(b *bytes.Buffer, name string) {
+ b.WriteString("package ")
+ b.WriteString(name)
+ b.WriteByte('\n')
+ b.WriteString("// NOTE: THIS FILE WAS PRODUCED BY THE\n// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)\n// DO NOT EDIT\n\n")
+}
+
+func writeImportHeader(b *bytes.Buffer, imports ...string) {
+ b.WriteString("import (\n")
+ for _, im := range imports {
+ if im[len(im)-1] == '"' {
+ // support aliased imports
+ fmt.Fprintf(b, "\t%s\n", im)
+ } else {
+ fmt.Fprintf(b, "\t%q\n", im)
+ }
+ }
+ b.WriteString(")\n\n")
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/.circleci/config.yml b/vendor/gopkg.in/DataDog/dd-trace-go.v1/.circleci/config.yml
new file mode 100644
index 00000000..fdd985a2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/.circleci/config.yml
@@ -0,0 +1,104 @@
+version: 2
+jobs:
+ build:
+ working_directory: /go/src/gopkg.in/DataDog/dd-trace-go.v1
+ resource_class: xlarge
+
+ docker:
+ - image: circleci/golang:latest
+ - image: cassandra:3.7
+ - image: circleci/mysql:5.7
+ environment:
+ MYSQL_ROOT_PASSWORD: admin
+ MYSQL_PASSWORD: test
+ MYSQL_USER: test
+ MYSQL_DATABASE: test
+ - image: circleci/postgres:9.5
+ environment:
+ POSTGRES_PASSWORD: postgres
+ POSTGRES_USER: postgres
+ POSTGRES_DB: postgres
+ - image: redis:3.2
+ - image: elasticsearch:2
+ environment:
+ ES_JAVA_OPTS: "-Xms750m -Xmx750m" # https://github.com/10up/wp-local-docker/issues/6
+ - image: elasticsearch:5
+ environment:
+ ES_JAVA_OPTS: "-Xms750m -Xmx750m" # https://github.com/10up/wp-local-docker/issues/6
+ - image: datadog/docker-dd-agent
+ environment:
+ DD_APM_ENABLED: "true"
+ DD_BIND_HOST: "0.0.0.0"
+ DD_API_KEY: invalid_key_but_this_is_fine
+ - image: circleci/mongo:latest-ram
+ - image: memcached:1.5.9
+
+ steps:
+ - checkout
+ - run:
+ name: Vendor gRPC v1.2.0
+ # This step vendors gRPC v1.2.0 inside our gRPC.v12 contrib
+ # to allow running the tests against the correct version of
+ # the gRPC library. The library is not committed into the
+ # repository to avoid conflicts with the user's imports.
+ environment:
+ GRPC_DEST: contrib/google.golang.org/grpc.v12/vendor/google.golang.org/grpc
+ command: |
+ mkdir -p $GRPC_DEST
+ git clone --branch v1.2.0 https://github.com/grpc/grpc-go $GRPC_DEST
+
+ - run:
+ name: Fetching dependencies
+ command: |
+ go get -v -t ./...
+ go get -v -u golang.org/x/lint/golint
+ go get -v -u github.com/alecthomas/gometalinter
+
+ - run:
+ name: Wait for MySQL
+ command: dockerize -wait tcp://localhost:3306 -timeout 1m
+
+ - run:
+ name: Wait for Postgres
+ command: dockerize -wait tcp://localhost:5432 -timeout 1m
+
+ - run:
+ name: Wait for Redis
+ command: dockerize -wait tcp://localhost:6379 -timeout 1m
+
+ - run:
+ name: Wait for ElasticSearch (1)
+ command: dockerize -wait http://localhost:9200 -timeout 1m
+
+ - run:
+ name: Wait for ElasticSearch (2)
+ command: dockerize -wait http://localhost:9201 -timeout 1m
+
+ - run:
+ name: Wait for Datadog Agent
+ command: dockerize -wait tcp://127.0.0.1:8126 -timeout 1m
+
+ - run:
+ name: Wait for Cassandra
+ command: dockerize -wait tcp://localhost:9042 -timeout 2m
+
+ - run:
+ name: Linting
+ command: |
+ gometalinter --disable-all --vendor --deadline=60s \
+ --enable=golint \
+ --enable=vet \
+ ./...
+
+ - run:
+ name: Testing
+ command: |
+ INTEGRATION=1 go test -v -race `go list ./... | grep -v contrib/go-redis/redis`
+
+ - run:
+ name: Testing contrib/go-redis/redis
+ command: |
+ (cd $GOPATH/src/github.com/go-redis/redis && git checkout v6.13.2)
+ INTEGRATION=1 go test -v -race ./contrib/go-redis/redis/...
+ (cd $GOPATH/src/github.com/go-redis/redis && git checkout master)
+ INTEGRATION=1 go test -v -race ./contrib/go-redis/redis/...
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/.gitignore b/vendor/gopkg.in/DataDog/dd-trace-go.v1/.gitignore
new file mode 100644
index 00000000..094cfe1b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/.gitignore
@@ -0,0 +1,14 @@
+# go
+bin/
+
+# profiling
+*.test
+*.out
+
+# generic
+.DS_Store
+*.cov
+*.lock
+*.swp
+
+/contrib/google.golang.org/grpc.v12/vendor/
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/CONTRIBUTING.md b/vendor/gopkg.in/DataDog/dd-trace-go.v1/CONTRIBUTING.md
new file mode 100644
index 00000000..cd479542
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+### Contributing
+
+Pull requests for bug fixes are welcome, but before submitting new features or changes to current functionalities [open an issue](https://github.com/DataDog/dd-trace-go/issues/new)
+and discuss your ideas or propose the changes you wish to make. After a resolution is reached a PR can be submitted for review.
+
+For commit messages, try to use the same conventions as most Go projects, for example:
+```
+contrib/database/sql: use method context on QueryContext and ExecContext
+
+QueryContext and ExecContext were using the wrong context to create
+spans. Instead of using the method's argument they were using the
+Prepare context, which was wrong.
+
+Fixes #113
+```
+Please apply the same logic for Pull Requests, start with the package name, followed by a colon and a description of the change, just like
+the official [Go language](https://github.com/golang/go/pulls).
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE
new file mode 100644
index 00000000..23351821
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2016, Datadog
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Datadog nor the
+ names of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv
new file mode 100644
index 00000000..582ea791
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv
@@ -0,0 +1,2 @@
+Component,Origin,License,Copyright
+import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors
\ No newline at end of file
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/MIGRATING.md b/vendor/gopkg.in/DataDog/dd-trace-go.v1/MIGRATING.md
new file mode 100644
index 00000000..84f5bf49
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/MIGRATING.md
@@ -0,0 +1,104 @@
+# Migration Guide
+
+This document outlines migrating from an older version of the Datadog tracer (0.6.x) to v1.
+
+Datadog's v1 version of the Go tracer provides not only an overhauled core that comes with huge performance improvements, but also the promise of a new and stable API to be relied on. It is the result of continuous feedback from customers, the community, as well as our extensive internal usage.
+
+As is common and recommended in the Go community, the best way to approach migrating to this new API is by using the [gradual code repair](https://talks.golang.org/2016/refactor.article) method. We have done the same internally and it has worked just great! For this exact reason we have provided a new, [semver](https://semver.org/) friendly import path to help with using both tracers in parallel, without conflict, for the duration of the migration. This new path is `gopkg.in/DataDog/dd-trace-go.v1`.
+
+Our [godoc page](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace) should deem helpful during this process. We also have the [official documentation](https://docs.datadoghq.com/tracing/setup/go/), which contains a couple of examples.
+
+This document will further outline some _before_ and _after_ examples.
+
+## Starting the tracer
+
+The new tracer needs to be started before it can be used. A default started tracer is no longer available. The default tracer is now a no-op.
+
+Here is an example of starting a custom tracer with a non-default agent endpoint using the old API:
+
+```go
+t := tracer.NewTracerTransport(tracer.NewTransport("localhost", "8199"))
+t.SetDebugLogging(true)
+defer t.ForceFlush()
+```
+
+This would now become:
+
+```go
+tracer.Start(
+ tracer.WithAgentAddr("localhost:8199"),
+ tracer.WithDebugMode(true),
+)
+defer tracer.Stop()
+```
+
+Notice that the tracer object is no longer returned. Consult the documentation to see [all possible parameters](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartOption) to the `Start` call.
+
+## Service Information
+
+The [`tracer.SetServiceInfo`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Tracer.SetServiceInfo) method has been deprecated. The service information is now set automatically based on the value of the [`ext.SpanType`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext#SpanType) tag that was set on the root span of a trace.
+
+## Spans
+
+Starting spans is now possible with [functional options](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartSpanOption). Which means that all span properties (or none) can be set when starting a span dynamically. Before:
+
+```go
+span := tracer.NewRootSpan("web.request", "my_service", "resource_name")
+```
+
+Becomes:
+
+```go
+span := tracer.StartSpan("web.request", tracer.ServiceName("my_service"), tracer.ResourceName("resource_name"))
+```
+
+We've done this because in many cases the extra parameters could become tedious, given that service names can be inherited and resource names can default to the operation name. This also allows us to have one single, more dynamic API for starting both root and child spans. Check out all possible [StartSpanOption](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartSpanOption) values to get an idea.
+
+### Children
+
+Here is an example for spawning a child of the previously declared span:
+```go
+child := tracer.StartSpan("process.user", tracer.ChildOf(span.Context()))
+```
+You will notice that the new tracer also introduces the concept of [SpanContext](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace#SpanContext), which is different from Go's context and is used to carry information needed to spawn children of a specific span and can be propagated cross-process. To learn more about distributed tracing check the package-level [documentation](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#ChildOf) of the `tracer` package.
+
+### Using Go's context
+
+It is also possible to create children of spans that live inside Go's [context](https://golang.org/pkg/context/):
+```go
+child, ctx := tracer.StartSpanFromContext(ctx, "process.user", tracer.Tag("key", "value"))
+```
+This will create a child of the span which exists inside the passed context and return it, along with a new context which contains the new span. To add or retrieve a span from a context use the [`ContextWithSpan`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#ContextWithSpan) or [`SpanFromContext`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#SpanFromContext) functions.
+
+### Setting errors
+
+The [`SetError`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Span.SetError) has been deprecated in favour of the [`ext.Error`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext#Error) tag value which matches other tracing libraries in the wild. Whereas before we had:
+
+```go
+span.SetError(err)
+```
+
+Now we have:
+
+```go
+span.SetTag(ext.Error, err)
+```
+
+Note that this tag can accept value of the types `error`, `string` and `bool` as well for setting errors.
+
+### Finishing
+
+The [`FinishWithErr`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Span.FinishWithErr) and [`FinishWithTime`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Span.FinishWithTime) methods have been removed in favour of a set of [`FinishOption`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#FinishOption). For example, this would now become:
+
+```go
+span.Finish(tracer.WithError(err), tracer.FinishTime(t))
+```
+
+Providing a `nil` value as an error is perfectly fine and will not mark the span as erroneous.
+
+## Further reading
+
+The new version of the tracer also comes with a lot of new features, such as support for distributed tracing and distributed sampling priority.
+
+* package level documentation of the [`tracer` package](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer) for a better overview.
+* [official documentation](https://docs.datadoghq.com/tracing/setup/go/)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/README.md b/vendor/gopkg.in/DataDog/dd-trace-go.v1/README.md
new file mode 100644
index 00000000..1a77333c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/README.md
@@ -0,0 +1,30 @@
+[![CircleCI](https://circleci.com/gh/DataDog/dd-trace-go/tree/v1.svg?style=svg)](https://circleci.com/gh/DataDog/dd-trace-go/tree/v1)
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace)
+
+### Installing
+
+```bash
+go get gopkg.in/DataDog/dd-trace-go.v1/ddtrace
+```
+
+Requires:
+
+* Go 1.9
+* Datadog's Trace Agent >= 5.21.1
+
+### Documentation
+
+The API is documented on [godoc](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace) as well as Datadog's [official documentation](https://docs.datadoghq.com/tracing/setup/go/). If you are migrating
+from an older version of the tracer (e.g. 0.6.x) you may also find the [migration document](https://github.com/DataDog/dd-trace-go/blob/v1/MIGRATING.md) we've put together helpful.
+
+### Testing
+
+Tests can be run locally using the Go toolset. The grpc.v12 integration will fail (and this is normal), because it covers for deprecated methods. In the CI environment
+we vendor this version of the library inside the integration. Under normal circumstances this is not something that we want to do, because users using this integration
+might be running versions different from the vendored one, creating hard to debug conflicts.
+
+To run integration tests locally, you should set the `INTEGRATION` environment variable. The dependencies of the integration tests are best run via Docker. To get an
+idea about the versions and the set-up take a look at our [CI config](https://github.com/DataDog/dd-trace-go/blob/v1/.circleci/config.yml).
+
+The best way to run the entire test suite is using the [CircleCI CLI](https://circleci.com/docs/2.0/local-jobs/). Simply run `circleci build`
+in the repository root. Note that you might have to increase the resources dedicated to Docker to around 4GB.
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/README.md b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/README.md
new file mode 100644
index 00000000..fb2de1a8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/README.md
@@ -0,0 +1,19 @@
+[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib)
+
+The purpose of these packages is to provide tracing on top of commonly used packages from the standard library as well as the
+community in a "plug-and-play" manner. This means that by simply importing the appropriate path, functions are exposed having
+ the same signature as the original package. These functions return structures which embed the original return value, allowing
+them to be used as they normally would with tracing activated out of the box.
+
+All of these libraries are supported by our [APM product](https://www.datadoghq.com/apm/).
+
+### Usage
+
+First, find the library which you'd like to integrate with. The naming convention for the integration packages is:
+
+* If the package is from the standard library (eg. `database/sql`), it will be located at the same path.
+* If the package is hosted on Github (eg. `github.com/user/repo`), it will be located at the shorthand path `user/repo`.
+* If the package is from anywhere else (eg. `google.golang.org/grpc`), it can be found under the full import path.
+
+Each integration comes with thorough documentation and usage examples. A good overview can be seen on our
+[godoc](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib) page.
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws.go
new file mode 100644
index 00000000..0cbd6999
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws.go
@@ -0,0 +1,99 @@
+// Package aws provides functions to trace aws/aws-sdk-go (https://github.com/aws/aws-sdk-go).
+package aws // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
+
+import (
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+const (
+ tagAWSAgent = "aws.agent"
+ tagAWSOperation = "aws.operation"
+ tagAWSRegion = "aws.region"
+)
+
+type handlers struct {
+ cfg *config
+}
+
+// WrapSession wraps a session.Session, causing requests and responses to be traced.
+func WrapSession(s *session.Session, opts ...Option) *session.Session {
+ cfg := new(config)
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ h := &handlers{cfg: cfg}
+ s = s.Copy()
+ s.Handlers.Send.PushFrontNamed(request.NamedHandler{
+ Name: "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/handlers.Send",
+ Fn: h.Send,
+ })
+ s.Handlers.Complete.PushBackNamed(request.NamedHandler{
+ Name: "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/handlers.Complete",
+ Fn: h.Complete,
+ })
+ return s
+}
+
+func (h *handlers) Send(req *request.Request) {
+ _, ctx := tracer.StartSpanFromContext(req.Context(), h.operationName(req),
+ tracer.SpanType(ext.SpanTypeHTTP),
+ tracer.ServiceName(h.serviceName(req)),
+ tracer.ResourceName(h.resourceName(req)),
+ tracer.Tag(tagAWSAgent, h.awsAgent(req)),
+ tracer.Tag(tagAWSOperation, h.awsOperation(req)),
+ tracer.Tag(tagAWSRegion, h.awsRegion(req)),
+ tracer.Tag(ext.HTTPMethod, req.Operation.HTTPMethod),
+ tracer.Tag(ext.HTTPURL, req.HTTPRequest.URL.String()),
+ )
+ req.SetContext(ctx)
+}
+
+func (h *handlers) Complete(req *request.Request) {
+ span, ok := tracer.SpanFromContext(req.Context())
+ if !ok {
+ return
+ }
+ if req.HTTPResponse != nil {
+ span.SetTag(ext.HTTPCode, strconv.Itoa(req.HTTPResponse.StatusCode))
+ }
+ span.Finish(tracer.WithError(req.Error))
+}
+
+func (h *handlers) operationName(req *request.Request) string {
+ return h.awsService(req) + ".command"
+}
+
+func (h *handlers) resourceName(req *request.Request) string {
+ return h.awsService(req) + "." + req.Operation.Name
+}
+
+func (h *handlers) serviceName(req *request.Request) string {
+ if h.cfg.serviceName != "" {
+ return h.cfg.serviceName
+ }
+ return "aws." + h.awsService(req)
+}
+
+func (h *handlers) awsAgent(req *request.Request) string {
+ if agent := req.HTTPRequest.Header.Get("User-Agent"); agent != "" {
+ return agent
+ }
+ return "aws-sdk-go"
+}
+
+func (h *handlers) awsOperation(req *request.Request) string {
+ return req.Operation.Name
+}
+
+func (h *handlers) awsRegion(req *request.Request) string {
+ return req.ClientInfo.SigningRegion
+}
+
+func (h *handlers) awsService(req *request.Request) string {
+ return req.ClientInfo.ServiceName
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws_test.go
new file mode 100644
index 00000000..26fe8d15
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws_test.go
@@ -0,0 +1,77 @@
+package aws
+
+import (
+ "context"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/ec2"
+ "github.com/aws/aws-sdk-go/service/s3"
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestAWS(t *testing.T) {
+ cfg := aws.NewConfig().
+ WithRegion("us-west-2").
+ WithDisableSSL(true).
+ WithCredentials(credentials.AnonymousCredentials)
+
+ session := WrapSession(session.Must(session.NewSession(cfg)))
+
+ t.Run("s3", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "test")
+ s3api := s3.New(session)
+ s3api.CreateBucketWithContext(ctx, &s3.CreateBucketInput{
+ Bucket: aws.String("BUCKET"),
+ })
+ root.Finish()
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 2)
+ assert.Equal(t, spans[1].TraceID(), spans[0].TraceID())
+
+ s := spans[0]
+ assert.Equal(t, "s3.command", s.OperationName())
+ assert.Contains(t, s.Tag(tagAWSAgent), "aws-sdk-go")
+ assert.Equal(t, "CreateBucket", s.Tag(tagAWSOperation))
+ assert.Equal(t, "us-west-2", s.Tag(tagAWSRegion))
+ assert.Equal(t, "s3.CreateBucket", s.Tag(ext.ResourceName))
+ assert.Equal(t, "aws.s3", s.Tag(ext.ServiceName))
+ assert.Equal(t, "403", s.Tag(ext.HTTPCode))
+ assert.Equal(t, "PUT", s.Tag(ext.HTTPMethod))
+ assert.Equal(t, "http://s3.us-west-2.amazonaws.com/BUCKET", s.Tag(ext.HTTPURL))
+ })
+
+ t.Run("ec2", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "test")
+ ec2api := ec2.New(session)
+ ec2api.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{})
+ root.Finish()
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 2)
+ assert.Equal(t, spans[1].TraceID(), spans[0].TraceID())
+
+ s := spans[0]
+ assert.Equal(t, "ec2.command", s.OperationName())
+ assert.Contains(t, s.Tag(tagAWSAgent), "aws-sdk-go")
+ assert.Equal(t, "DescribeInstances", s.Tag(tagAWSOperation))
+ assert.Equal(t, "us-west-2", s.Tag(tagAWSRegion))
+ assert.Equal(t, "ec2.DescribeInstances", s.Tag(ext.ResourceName))
+ assert.Equal(t, "aws.ec2", s.Tag(ext.ServiceName))
+ assert.Equal(t, "400", s.Tag(ext.HTTPCode))
+ assert.Equal(t, "POST", s.Tag(ext.HTTPMethod))
+ assert.Equal(t, "http://ec2.us-west-2.amazonaws.com/", s.Tag(ext.HTTPURL))
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/example_test.go
new file mode 100644
index 00000000..dcc8b390
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/example_test.go
@@ -0,0 +1,21 @@
+package aws_test
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/s3"
+ awstrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
+)
+
+// To start tracing requests, wrap the AWS session.Session by invoking
+// awstrace.WrapSession.
+func Example() {
+ cfg := aws.NewConfig().WithRegion("us-west-2")
+ sess := session.Must(session.NewSession(cfg))
+ sess = awstrace.WrapSession(sess)
+
+ s3api := s3.New(sess)
+ s3api.CreateBucket(&s3.CreateBucketInput{
+ Bucket: aws.String("some-bucket-name"),
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/option.go
new file mode 100644
index 00000000..6a306102
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/option.go
@@ -0,0 +1,17 @@
+package aws
+
+type config struct {
+ serviceName string
+}
+
+// Option represents an option that can be passed to Dial.
+type Option func(*config)
+
+// WithServiceName sets the given service name for the dialled connection.
+// When the service name is not explicitly set it will be inferred based on the
+// request to AWS.
+func WithServiceName(name string) Option {
+ return func(cfg *config) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/example_test.go
new file mode 100644
index 00000000..5d733bec
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/example_test.go
@@ -0,0 +1,22 @@
+package memcache_test
+
+import (
+ "context"
+
+ "github.com/bradfitz/gomemcache/memcache"
+ memcachetrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func Example() {
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/home"),
+ )
+ defer span.Finish()
+
+ mc := memcachetrace.WrapClient(memcache.New("127.0.0.1:11211"))
+ // you can use WithContext to set the parent span
+ mc.WithContext(ctx).Set(&memcache.Item{Key: "my key", Value: []byte("my value")})
+
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache.go
new file mode 100644
index 00000000..45aac47d
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache.go
@@ -0,0 +1,162 @@
+// Package memcache provides functions to trace the bradfitz/gomemcache package (https://github.com/bradfitz/gomemcache).
+//
+// `WrapClient` will wrap a memcache `Client` and return a new struct with all
+// the same methods, so should be seamless for existing applications. It also
+// has an additional `WithContext` method which can be used to connect a span
+// to an existing trace.
+package memcache // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache"
+
+import (
+ "context"
+
+ "github.com/bradfitz/gomemcache/memcache"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// WrapClient wraps a memcache.Client so that all requests are traced using the
+// default tracer with the service name "memcached".
+func WrapClient(client *memcache.Client, opts ...ClientOption) *Client {
+ cfg := new(clientConfig)
+ defaults(cfg)
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ return &Client{
+ Client: client,
+ cfg: cfg,
+ context: context.Background(),
+ }
+}
+
+// A Client is used to trace requests to the memcached server.
+type Client struct {
+ *memcache.Client
+ cfg *clientConfig
+ context context.Context
+}
+
+// WithContext creates a copy of the Client with the given context.
+func (c *Client) WithContext(ctx context.Context) *Client {
+ // the existing memcache client doesn't support context, but may in the
+ // future, so we do a runtime check to detect this
+ mc := c.Client
+ if wc, ok := (interface{})(c.Client).(interface {
+ WithContext(context.Context) *memcache.Client
+ }); ok {
+ mc = wc.WithContext(ctx)
+ }
+ return &Client{
+ Client: mc,
+ cfg: c.cfg,
+ context: ctx,
+ }
+}
+
+// startSpan starts a span from the context set with WithContext.
+func (c *Client) startSpan(resourceName string) ddtrace.Span {
+ span, _ := tracer.StartSpanFromContext(c.context, operationName,
+ tracer.SpanType(ext.SpanTypeMemcached),
+ tracer.ServiceName(c.cfg.serviceName),
+ tracer.ResourceName(resourceName))
+ return span
+}
+
+// wrapped methods:
+
+// Add invokes and traces Client.Add.
+func (c *Client) Add(item *memcache.Item) error {
+ span := c.startSpan("Add")
+ err := c.Client.Add(item)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// CompareAndSwap invokes and traces Client.CompareAndSwap.
+func (c *Client) CompareAndSwap(item *memcache.Item) error {
+ span := c.startSpan("CompareAndSwap")
+ err := c.Client.CompareAndSwap(item)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Decrement invokes and traces Client.Decrement.
+func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
+ span := c.startSpan("Decrement")
+ newValue, err = c.Client.Decrement(key, delta)
+ span.Finish(tracer.WithError(err))
+ return newValue, err
+}
+
+// Delete invokes and traces Client.Delete.
+func (c *Client) Delete(key string) error {
+ span := c.startSpan("Delete")
+ err := c.Client.Delete(key)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// DeleteAll invokes and traces Client.DeleteAll.
+func (c *Client) DeleteAll() error {
+ span := c.startSpan("DeleteAll")
+ err := c.Client.DeleteAll()
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// FlushAll invokes and traces Client.FlushAll.
+func (c *Client) FlushAll() error {
+ span := c.startSpan("FlushAll")
+ err := c.Client.FlushAll()
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Get invokes and traces Client.Get.
+func (c *Client) Get(key string) (item *memcache.Item, err error) {
+ span := c.startSpan("Get")
+ item, err = c.Client.Get(key)
+ span.Finish(tracer.WithError(err))
+ return item, err
+}
+
+// GetMulti invokes and traces Client.GetMulti.
+func (c *Client) GetMulti(keys []string) (map[string]*memcache.Item, error) {
+ span := c.startSpan("GetMulti")
+ items, err := c.Client.GetMulti(keys)
+ span.Finish(tracer.WithError(err))
+ return items, err
+}
+
+// Increment invokes and traces Client.Increment.
+func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
+ span := c.startSpan("Increment")
+ newValue, err = c.Client.Increment(key, delta)
+ span.Finish(tracer.WithError(err))
+ return newValue, err
+}
+
+// Replace invokes and traces Client.Replace.
+func (c *Client) Replace(item *memcache.Item) error {
+ span := c.startSpan("Replace")
+ err := c.Client.Replace(item)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Set invokes and traces Client.Set.
+func (c *Client) Set(item *memcache.Item) error {
+ span := c.startSpan("Set")
+ err := c.Client.Set(item)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Touch invokes and traces Client.Touch.
+func (c *Client) Touch(key string, seconds int32) error {
+ span := c.startSpan("Touch")
+ err := c.Client.Touch(key, seconds)
+ span.Finish(tracer.WithError(err))
+ return err
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache_test.go
new file mode 100644
index 00000000..fbef2323
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache_test.go
@@ -0,0 +1,146 @@
+package memcache
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "net"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/bradfitz/gomemcache/memcache"
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestMemcache(t *testing.T) {
+ li := makeFakeServer(t)
+ defer li.Close()
+
+ testMemcache(t, li.Addr().String())
+}
+
+func TestMemcacheIntegration(t *testing.T) {
+ if _, ok := os.LookupEnv("INTEGRATION"); !ok {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+
+ testMemcache(t, "localhost:11211")
+}
+
+func testMemcache(t *testing.T, addr string) {
+ client := WrapClient(memcache.New(addr), WithServiceName("test-memcache"))
+ defer client.DeleteAll()
+
+ validateMemcacheSpan := func(t *testing.T, span mocktracer.Span, resourceName string) {
+ assert.Equal(t, "test-memcache", span.Tag(ext.ServiceName),
+ "service name should be set to test-memcache")
+ assert.Equal(t, "memcached.query", span.OperationName(),
+ "operation name should be set to memcached.query")
+ assert.Equal(t, resourceName, span.Tag(ext.ResourceName),
+ "resource name should be set to the memcache command")
+ }
+
+ t.Run("traces without context", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ err := client.
+ Add(&memcache.Item{
+ Key: "key1",
+ Value: []byte("value1"),
+ })
+ assert.Nil(t, err)
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+ validateMemcacheSpan(t, spans[0], "Add")
+ })
+
+ t.Run("traces with context", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ ctx := context.Background()
+ span, ctx := tracer.StartSpanFromContext(ctx, "parent")
+
+ err := client.
+ WithContext(ctx).
+ Add(&memcache.Item{
+ Key: "key2",
+ Value: []byte("value2"),
+ })
+ assert.Nil(t, err)
+
+ span.Finish()
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 2)
+ validateMemcacheSpan(t, spans[0], "Add")
+ assert.Equal(t, span, spans[1])
+ assert.Equal(t, spans[1].TraceID(), spans[0].TraceID(),
+ "memcache span should be part of the parent trace")
+ })
+}
+
+func TestFakeServer(t *testing.T) {
+ li := makeFakeServer(t)
+ defer li.Close()
+
+ conn, err := net.Dial("tcp", li.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer conn.Close()
+
+ fmt.Fprintf(conn, "add %s\r\n%s\r\n", "key", "value")
+ s := bufio.NewScanner(conn)
+ assert.True(t, s.Scan())
+ assert.Equal(t, "STORED", s.Text())
+}
+
+func makeFakeServer(t *testing.T) net.Listener {
+ li, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ go func() {
+ for {
+ c, err := li.Accept()
+ if err != nil {
+ break
+ }
+ go func() {
+ defer c.Close()
+
+ // the memcache textual protocol is line-oriented with each
+ // command being space separated:
+ //
+ // command1 arg1 arg2
+ // command2 arg1 arg2
+ // ...
+ //
+ s := bufio.NewScanner(c)
+ for s.Scan() {
+ args := strings.Split(s.Text(), " ")
+ switch args[0] {
+ case "add":
+ if !s.Scan() {
+ return
+ }
+ fmt.Fprintf(c, "STORED\r\n")
+ default:
+ fmt.Fprintf(c, "SERVER ERROR unknown command: %v \r\n", args[0])
+ return
+ }
+ }
+ }()
+ }
+ }()
+
+ return li
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/option.go
new file mode 100644
index 00000000..8bcb57a2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/option.go
@@ -0,0 +1,22 @@
+package memcache
+
+const (
+ serviceName = "memcached"
+ operationName = "memcached.query"
+)
+
+type clientConfig struct{ serviceName string }
+
+// ClientOption represents an option that can be passed to Dial.
+type ClientOption func(*clientConfig)
+
+func defaults(cfg *clientConfig) {
+ cfg.serviceName = serviceName
+}
+
+// WithServiceName sets the given service name for the dialled connection.
+func WithServiceName(name string) ClientOption {
+ return func(cfg *clientConfig) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/conn.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/conn.go
new file mode 100644
index 00000000..661bb91b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/conn.go
@@ -0,0 +1,119 @@
+package sql // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
+
+import (
+ "context"
+ "database/sql/driver"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+var _ driver.Conn = (*tracedConn)(nil)
+
+type tracedConn struct {
+ driver.Conn
+ *traceParams
+}
+
+func (tc *tracedConn) BeginTx(ctx context.Context, opts driver.TxOptions) (tx driver.Tx, err error) {
+ span := tc.newChildSpanFromContext(ctx, "Begin", "")
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if connBeginTx, ok := tc.Conn.(driver.ConnBeginTx); ok {
+ tx, err = connBeginTx.BeginTx(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &tracedTx{tx, tc.traceParams, ctx}, nil
+ }
+ tx, err = tc.Conn.Begin()
+ if err != nil {
+ return nil, err
+ }
+ return &tracedTx{tx, tc.traceParams, ctx}, nil
+}
+
+func (tc *tracedConn) PrepareContext(ctx context.Context, query string) (stmt driver.Stmt, err error) {
+ span := tc.newChildSpanFromContext(ctx, "Prepare", query)
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if connPrepareCtx, ok := tc.Conn.(driver.ConnPrepareContext); ok {
+ stmt, err := connPrepareCtx.PrepareContext(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+ return &tracedStmt{stmt, tc.traceParams, ctx, query}, nil
+ }
+ stmt, err = tc.Prepare(query)
+ if err != nil {
+ return nil, err
+ }
+ return &tracedStmt{stmt, tc.traceParams, ctx, query}, nil
+}
+
+func (tc *tracedConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if execer, ok := tc.Conn.(driver.Execer); ok {
+ return execer.Exec(query, args)
+ }
+ return nil, driver.ErrSkip
+}
+
+func (tc *tracedConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (r driver.Result, err error) {
+ span := tc.newChildSpanFromContext(ctx, "Exec", query)
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if execContext, ok := tc.Conn.(driver.ExecerContext); ok {
+ return execContext.ExecContext(ctx, query, args)
+ }
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return tc.Exec(query, dargs)
+}
+
+// tracedConn has a Ping method in order to implement the pinger interface
+func (tc *tracedConn) Ping(ctx context.Context) (err error) {
+ span := tc.newChildSpanFromContext(ctx, "Ping", "")
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if pinger, ok := tc.Conn.(driver.Pinger); ok {
+ return pinger.Ping(ctx)
+ }
+ return nil
+}
+
+func (tc *tracedConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ if queryer, ok := tc.Conn.(driver.Queryer); ok {
+ return queryer.Query(query, args)
+ }
+ return nil, driver.ErrSkip
+}
+
+func (tc *tracedConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (rows driver.Rows, err error) {
+ span := tc.newChildSpanFromContext(ctx, "Query", query)
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if queryerContext, ok := tc.Conn.(driver.QueryerContext); ok {
+ return queryerContext.QueryContext(ctx, query, args)
+ }
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return tc.Query(query, dargs)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/driver.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/driver.go
new file mode 100644
index 00000000..ef3e3468
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/driver.go
@@ -0,0 +1,82 @@
+package sql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "fmt"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+var _ driver.Driver = (*tracedDriver)(nil)
+
+// tracedDriver wraps an inner sql driver with tracing. It implements the (database/sql).driver.Driver interface.
+type tracedDriver struct {
+ driver.Driver
+ driverName string
+ config *registerConfig
+}
+
+// Open returns a tracedConn so that we can pass all the info we get from the DSN
+// all along the tracing
+func (d *tracedDriver) Open(dsn string) (c driver.Conn, err error) {
+ var (
+ meta map[string]string
+ conn driver.Conn
+ )
+ meta, err = internal.ParseDSN(d.driverName, dsn)
+ if err != nil {
+ return nil, err
+ }
+ conn, err = d.Driver.Open(dsn)
+ if err != nil {
+ return nil, err
+ }
+ tp := &traceParams{
+ driverName: d.driverName,
+ config: d.config,
+ meta: meta,
+ }
+ return &tracedConn{conn, tp}, err
+}
+
+// traceParams stores all information relative to the tracing
+type traceParams struct {
+ config *registerConfig
+ driverName string
+ resource string
+ meta map[string]string
+}
+
+func (tp *traceParams) newChildSpanFromContext(ctx context.Context, resource string, query string) ddtrace.Span {
+ name := fmt.Sprintf("%s.query", tp.driverName)
+ span, _ := tracer.StartSpanFromContext(ctx, name,
+ tracer.SpanType(ext.SpanTypeSQL),
+ tracer.ServiceName(tp.config.serviceName),
+ )
+ if query != "" {
+ resource = query
+ }
+ span.SetTag(ext.ResourceName, resource)
+ for k, v := range tp.meta {
+ span.SetTag(k, v)
+ }
+ return span
+}
+
+// tracedDriverName returns the name of the traced version for the given driver name.
+func tracedDriverName(name string) string { return name + ".traced" }
+
+// driverExists returns true if the given driver name has already been registered.
+func driverExists(name string) bool {
+ for _, v := range sql.Drivers() {
+ if name == v {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/example_test.go
new file mode 100644
index 00000000..7a43b6a6
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/example_test.go
@@ -0,0 +1,82 @@
+package sql_test
+
+import (
+ "context"
+ "log"
+
+ sqlite "github.com/mattn/go-sqlite3" // Setup application to use Sqlite
+ sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/go-sql-driver/mysql"
+ "github.com/lib/pq"
+)
+
+func Example() {
+ // The first step is to register the driver that we will be using.
+ sqltrace.Register("postgres", &pq.Driver{})
+
+ // Followed by a call to Open.
+ db, err := sqltrace.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=disable")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Then, we continue using the database/sql package as we normally would, with tracing.
+ rows, err := db.Query("SELECT name FROM users WHERE age=?", 27)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+}
+
+func Example_context() {
+ // Register the driver that we will be using (in this case mysql) under a custom service name.
+ sqltrace.Register("mysql", &mysql.MySQLDriver{}, sqltrace.WithServiceName("my-db"))
+
+ // Open a connection to the DB using the driver we've just registered with tracing.
+ db, err := sqltrace.Open("mysql", "user:password@/dbname")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a root span, giving name, server and resource.
+ _, ctx := tracer.StartSpanFromContext(context.Background(), "my-query",
+ tracer.SpanType(ext.SpanTypeSQL),
+ tracer.ServiceName("my-db"),
+ tracer.ResourceName("initial-access"),
+ )
+
+ // Subsequent spans inherit their parent from context.
+ rows, err := db.QueryContext(ctx, "SELECT * FROM city LIMIT 5")
+ if err != nil {
+ log.Fatal(err)
+ }
+ rows.Close()
+}
+
+func Example_sqlite() {
+ // Register the driver that we will be using (in this case Sqlite) under a custom service name.
+ sqltrace.Register("sqlite", &sqlite.SQLiteDriver{}, sqltrace.WithServiceName("sqlite-example"))
+
+ // Open a connection to the DB using the driver we've just registered with tracing.
+ db, err := sqltrace.Open("sqlite", "./test.db")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a root span, giving name, server and resource.
+ _, ctx := tracer.StartSpanFromContext(context.Background(), "my-query",
+ tracer.SpanType("example"),
+ tracer.ServiceName("sqlite-example"),
+ tracer.ResourceName("initial-access"),
+ )
+
+ // Subsequent spans inherit their parent from context.
+ rows, err := db.QueryContext(ctx, "SELECT * FROM city LIMIT 5")
+ if err != nil {
+ log.Fatal(err)
+ }
+ rows.Close()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/dsn.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/dsn.go
new file mode 100644
index 00000000..2ef12e8c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/dsn.go
@@ -0,0 +1,83 @@
+package internal // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal"
+
+import (
+ "net"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+// ParseDSN parses various supported DSN types (currently mysql and postgres) into a
+// map of key/value pairs which can be used as valid tags.
+func ParseDSN(driverName, dsn string) (meta map[string]string, err error) {
+ meta = make(map[string]string)
+ switch driverName {
+ case "mysql":
+ meta, err = parseMySQLDSN(dsn)
+ if err != nil {
+ return
+ }
+ case "postgres":
+ meta, err = parsePostgresDSN(dsn)
+ if err != nil {
+ return
+ }
+ default:
+ // not supported
+ }
+ return reduceKeys(meta), nil
+}
+
+// reduceKeys takes a map containing parsed DSN information and returns a new
+// map containing only the keys relevant as tracing tags, if any.
+func reduceKeys(meta map[string]string) map[string]string {
+ var keysOfInterest = map[string]string{
+ "user": ext.DBUser,
+ "application_name": ext.DBApplication,
+ "dbname": ext.DBName,
+ "host": ext.TargetHost,
+ "port": ext.TargetPort,
+ }
+ m := make(map[string]string)
+ for k, v := range meta {
+ if nk, ok := keysOfInterest[k]; ok {
+ m[nk] = v
+ }
+ }
+ return m
+}
+
+// parseMySQLDSN parses a mysql-type dsn into a map.
+func parseMySQLDSN(dsn string) (m map[string]string, err error) {
+ var cfg *mySQLConfig
+ if cfg, err = mySQLConfigFromDSN(dsn); err == nil {
+ host, port, _ := net.SplitHostPort(cfg.Addr)
+ m = map[string]string{
+ "user": cfg.User,
+ "host": host,
+ "port": port,
+ "dbname": cfg.DBName,
+ }
+ return m, nil
+ }
+ return nil, err
+}
+
+// parsePostgresDSN parses a postgres-type dsn into a map.
+func parsePostgresDSN(dsn string) (map[string]string, error) {
+ var err error
+ if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") {
+ // url form, convert to opts
+ dsn, err = parseURL(dsn)
+ if err != nil {
+ return nil, err
+ }
+ }
+ meta := make(map[string]string)
+ if err := parseOpts(dsn, meta); err != nil {
+ return nil, err
+ }
+ // remove sensitive information
+ delete(meta, "password")
+ return meta, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/dsn_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/dsn_test.go
new file mode 100644
index 00000000..d9fa03f8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/dsn_test.go
@@ -0,0 +1,100 @@
+package internal
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+func TestParseDSN(t *testing.T) {
+ assert := assert.New(t)
+ for _, tt := range []struct {
+ driverName string
+ dsn string
+ expected map[string]string
+ }{
+ {
+ driverName: "postgres",
+ dsn: "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full",
+ expected: map[string]string{
+ ext.DBUser: "bob",
+ ext.TargetHost: "1.2.3.4",
+ ext.TargetPort: "5432",
+ ext.DBName: "mydb",
+ },
+ },
+ {
+ driverName: "mysql",
+ dsn: "bob:secret@tcp(1.2.3.4:5432)/mydb",
+ expected: map[string]string{
+ ext.DBName: "mydb",
+ ext.DBUser: "bob",
+ ext.TargetHost: "1.2.3.4",
+ ext.TargetPort: "5432",
+ },
+ },
+ {
+ driverName: "postgres",
+ dsn: "connect_timeout=0 binary_parameters=no password=zMWmQz26GORmgVVKEbEl dbname=dogdatastaging application_name=trace-api port=5433 sslmode=disable host=master-db-master-active.postgres.service.consul user=dog",
+ expected: map[string]string{
+ ext.TargetPort: "5433",
+ ext.TargetHost: "master-db-master-active.postgres.service.consul",
+ ext.DBName: "dogdatastaging",
+ ext.DBApplication: "trace-api",
+ ext.DBUser: "dog",
+ },
+ },
+ } {
+ m, err := ParseDSN(tt.driverName, tt.dsn)
+ assert.Equal(nil, err)
+ assert.Equal(tt.expected, m)
+ }
+}
+
+func TestParseMySQLDSN(t *testing.T) {
+ assert := assert.New(t)
+ expected := map[string]string{
+ "dbname": "mydb",
+ "user": "bob",
+ "host": "1.2.3.4",
+ "port": "5432",
+ }
+ m, err := parseMySQLDSN("bob:secret@tcp(1.2.3.4:5432)/mydb")
+ assert.Equal(nil, err)
+ assert.Equal(expected, m)
+}
+
+func TestParsePostgresDSN(t *testing.T) {
+ assert := assert.New(t)
+
+ for _, tt := range []struct {
+ dsn string
+ expected map[string]string
+ }{
+ {
+ dsn: "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full",
+ expected: map[string]string{
+ "user": "bob",
+ "host": "1.2.3.4",
+ "port": "5432",
+ "dbname": "mydb",
+ "sslmode": "verify-full",
+ },
+ },
+ {
+ dsn: "password=zMWmQz26GORmgVVKEbEl dbname=dogdatastaging application_name=trace-api port=5433 host=master-db-master-active.postgres.service.consul user=dog",
+ expected: map[string]string{
+ "user": "dog",
+ "port": "5433",
+ "host": "master-db-master-active.postgres.service.consul",
+ "dbname": "dogdatastaging",
+ "application_name": "trace-api",
+ },
+ },
+ } {
+ m, err := parsePostgresDSN(tt.dsn)
+ assert.Equal(nil, err)
+ assert.Equal(tt.expected, m)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/mysql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/mysql.go
new file mode 100644
index 00000000..acf7cce0
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/mysql.go
@@ -0,0 +1,167 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+// Copied from package github.com/go-sql-driver:
+// https://github.com/go-sql-driver/mysql/blob/9181e3a86a19bacd63e68d43ae8b7b36320d8092/dsn.go
+
+package internal
+
+import (
+ "crypto/tls"
+ "errors"
+ "strings"
+ "time"
+)
+
+const defaultCollation = "utf8_general_ci"
+
+// A blacklist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+}
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string
+type mySQLConfig struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ Strict bool // Return warnings as errors
+}
+
+// mySQLConfigFromDSN parses the MySQL DSN string to a Config.
+func mySQLConfigFromDSN(dsn string) (cfg *mySQLConfig, err error) {
+ // New config with some default values
+ cfg = &mySQLConfig{
+ Loc: time.UTC,
+ Collation: defaultCollation,
+ }
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return nil, errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ }
+
+ return
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/postgres.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/postgres.go
new file mode 100644
index 00000000..11bc3e3b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/internal/postgres.go
@@ -0,0 +1,194 @@
+// Copied from package github.com/lib/pq:
+//
+// parseOpts: https://github.com/lib/pq/blob/61fe37aa2ee24fabcdbe5c4ac1d4ac566f88f345/conn.go
+// parseURL: https://github.com/lib/pq/blob/50761b0867bd1d9d069276790bcd4a3bccf2324a/url.go
+
+package internal
+
+import (
+ "fmt"
+ "net"
+ nurl "net/url"
+ "sort"
+ "strings"
+ "unicode"
+)
+
+type values map[string]string
+
+// scanner implements a tokenizer for libpq-style option strings.
+type scanner struct {
+ s []rune
+ i int
+}
+
+// newScanner returns a new scanner initialized with the option string s.
+func newScanner(s string) *scanner {
+ return &scanner{[]rune(s), 0}
+}
+
+// Next returns the next rune.
+// It returns 0, false if the end of the text has been reached.
+func (s *scanner) Next() (rune, bool) {
+ if s.i >= len(s.s) {
+ return 0, false
+ }
+ r := s.s[s.i]
+ s.i++
+ return r, true
+}
+
+// SkipSpaces returns the next non-whitespace rune.
+// It returns 0, false if the end of the text has been reached.
+func (s *scanner) SkipSpaces() (rune, bool) {
+ r, ok := s.Next()
+ for unicode.IsSpace(r) && ok {
+ r, ok = s.Next()
+ }
+ return r, ok
+}
+
+// parseOpts parses the options from name and adds them to the values.
+// The parsing code is based on conninfo_parse from libpq's fe-connect.c
+func parseOpts(name string, o values) error {
+ s := newScanner(name)
+
+ for {
+ var (
+ keyRunes, valRunes []rune
+ r rune
+ ok bool
+ )
+
+ if r, ok = s.SkipSpaces(); !ok {
+ break
+ }
+
+ // Scan the key
+ for !unicode.IsSpace(r) && r != '=' {
+ keyRunes = append(keyRunes, r)
+ if r, ok = s.Next(); !ok {
+ break
+ }
+ }
+
+ // Skip any whitespace if we're not at the = yet
+ if r != '=' {
+ r, ok = s.SkipSpaces()
+ }
+
+ // The current character should be =
+ if r != '=' || !ok {
+ return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes))
+ }
+
+ // Skip any whitespace after the =
+ if r, ok = s.SkipSpaces(); !ok {
+ // If we reach the end here, the last value is just an empty string as per libpq.
+ o[string(keyRunes)] = ""
+ break
+ }
+
+ if r != '\'' {
+ for !unicode.IsSpace(r) {
+ if r == '\\' {
+ if r, ok = s.Next(); !ok {
+ return fmt.Errorf(`missing character after backslash`)
+ }
+ }
+ valRunes = append(valRunes, r)
+
+ if r, ok = s.Next(); !ok {
+ break
+ }
+ }
+ } else {
+ quote:
+ for {
+ if r, ok = s.Next(); !ok {
+ return fmt.Errorf(`unterminated quoted string literal in connection string`)
+ }
+ switch r {
+ case '\'':
+ break quote
+ case '\\':
+ r, _ = s.Next()
+ fallthrough
+ default:
+ valRunes = append(valRunes, r)
+ }
+ }
+ }
+
+ o[string(keyRunes)] = string(valRunes)
+ }
+
+ return nil
+}
+
+// parseURL no longer needs to be used by clients of this library since supplying a URL as a
+// connection string to sql.Open() is now supported:
+//
+// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full")
+//
+// It remains exported here for backwards-compatibility.
+//
+// parseURL converts a url to a connection string for driver.Open.
+// Example:
+//
+// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full"
+//
+// converts to:
+//
+// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full"
+//
+// A minimal example:
+//
+// "postgres://"
+//
+// This will be blank, causing driver.Open to use all of the defaults
+func parseURL(url string) (string, error) {
+ u, err := nurl.Parse(url)
+ if err != nil {
+ return "", err
+ }
+
+ if u.Scheme != "postgres" && u.Scheme != "postgresql" {
+ return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme)
+ }
+
+ var kvs []string
+ escaper := strings.NewReplacer(` `, `\ `, `'`, `\'`, `\`, `\\`)
+ accrue := func(k, v string) {
+ if v != "" {
+ kvs = append(kvs, k+"="+escaper.Replace(v))
+ }
+ }
+
+ if u.User != nil {
+ v := u.User.Username()
+ accrue("user", v)
+
+ v, _ = u.User.Password()
+ accrue("password", v)
+ }
+
+ if host, port, err := net.SplitHostPort(u.Host); err != nil {
+ accrue("host", u.Host)
+ } else {
+ accrue("host", host)
+ accrue("port", port)
+ }
+
+ if u.Path != "" {
+ accrue("dbname", u.Path[1:])
+ }
+
+ q := u.Query()
+ for k := range q {
+ accrue(k, q.Get(k))
+ }
+
+ sort.Strings(kvs) // Makes testing easier (not a performance concern)
+ return strings.Join(kvs, " "), nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/option.go
new file mode 100644
index 00000000..f3d18d9a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/option.go
@@ -0,0 +1,17 @@
+package sql
+
+type registerConfig struct{ serviceName string }
+
+// RegisterOption represents an option that can be passed to Register.
+type RegisterOption func(*registerConfig)
+
+func defaults(cfg *registerConfig) {
+ // default cfg.serviceName set in Register based on driver name
+}
+
+// WithServiceName sets the given service name for the registered driver.
+func WithServiceName(name string) RegisterOption {
+ return func(cfg *registerConfig) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/sql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/sql.go
new file mode 100644
index 00000000..301256b1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/sql.go
@@ -0,0 +1,60 @@
+// Package sql provides functions to trace the database/sql package (https://golang.org/pkg/database/sql).
+// It will automatically augment operations such as connections, statements and transactions with tracing.
+//
+// We start by telling the package which driver we will be using. For example, if we are using "github.com/lib/pq",
+// we would do as follows:
+//
+// sqltrace.Register("pq", pq.Driver{})
+// db, err := sqltrace.Open("pq", "postgres://pqgotest:password@localhost...")
+//
+// The rest of our application would continue as usual, but with tracing enabled.
+//
+package sql
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+)
+
+// Register tells the sql integration package about the driver that we will be tracing. It must
+// be called before Open, if that connection is to be traced. It uses the driverName suffixed
+// with ".db" as the default service name.
+func Register(driverName string, driver driver.Driver, opts ...RegisterOption) {
+ if driver == nil {
+ panic("sqltrace: Register driver is nil")
+ }
+ name := tracedDriverName(driverName)
+ if driverExists(name) {
+ // no problem, carry on
+ return
+ }
+ cfg := new(registerConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if cfg.serviceName == "" {
+ cfg.serviceName = driverName + ".db"
+ }
+ sql.Register(name, &tracedDriver{
+ Driver: driver,
+ driverName: driverName,
+ config: cfg,
+ })
+}
+
+// errNotRegistered is returned when there is an attempt to open a database connection towards a driver
+// that has not previously been registered using this package.
+var errNotRegistered = errors.New("sqltrace: Register must be called before Open")
+
+// Open returns connection to a DB using a the traced version of the given driver. In order for Open
+// to work, the driver must first be registered using Register or RegisterWithServiceName. If this
+// did not occur, Open will return an error.
+func Open(driverName, dataSourceName string) (*sql.DB, error) {
+ name := tracedDriverName(driverName)
+ if !driverExists(name) {
+ return nil, errNotRegistered
+ }
+ return sql.Open(name, dataSourceName)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/sql_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/sql_test.go
new file mode 100644
index 00000000..e9296016
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/sql_test.go
@@ -0,0 +1,77 @@
+package sql
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/sqltest"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+
+ "github.com/go-sql-driver/mysql"
+ "github.com/lib/pq"
+)
+
+// tableName holds the SQL table that these tests will be run against. It must be unique cross-repo.
+const tableName = "testsql"
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ defer sqltest.Prepare(tableName)()
+ os.Exit(m.Run())
+}
+
+func TestMySQL(t *testing.T) {
+ Register("mysql", &mysql.MySQLDriver{})
+ db, err := Open("mysql", "test:test@tcp(127.0.0.1:3306)/test")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ testConfig := &sqltest.Config{
+ DB: db,
+ DriverName: "mysql",
+ TableName: tableName,
+ ExpectName: "mysql.query",
+ ExpectTags: map[string]interface{}{
+ ext.ServiceName: "mysql.db",
+ ext.SpanType: ext.SpanTypeSQL,
+ ext.TargetHost: "127.0.0.1",
+ ext.TargetPort: "3306",
+ ext.DBUser: "test",
+ ext.DBName: "test",
+ },
+ }
+ sqltest.RunAll(t, testConfig)
+}
+
+func TestPostgres(t *testing.T) {
+ Register("postgres", &pq.Driver{}, WithServiceName("postgres-test"))
+ db, err := Open("postgres", "postgres://postgres:postgres@127.0.0.1:5432/postgres?sslmode=disable")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ testConfig := &sqltest.Config{
+ DB: db,
+ DriverName: "postgres",
+ TableName: tableName,
+ ExpectName: "postgres.query",
+ ExpectTags: map[string]interface{}{
+ ext.ServiceName: "postgres-test",
+ ext.SpanType: ext.SpanTypeSQL,
+ ext.TargetHost: "127.0.0.1",
+ ext.TargetPort: "5432",
+ ext.DBUser: "postgres",
+ ext.DBName: "postgres",
+ },
+ }
+ sqltest.RunAll(t, testConfig)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/stmt.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/stmt.go
new file mode 100644
index 00000000..358c79af
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/stmt.go
@@ -0,0 +1,82 @@
+package sql
+
+import (
+ "context"
+ "database/sql/driver"
+ "errors"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+var _ driver.Stmt = (*tracedStmt)(nil)
+
+// tracedStmt is traced version of sql.Stmt
+type tracedStmt struct {
+ driver.Stmt
+ *traceParams
+ ctx context.Context
+ query string
+}
+
+// Close sends a span before closing a statement
+func (s *tracedStmt) Close() (err error) {
+ span := s.newChildSpanFromContext(s.ctx, "Close", "")
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ return s.Stmt.Close()
+}
+
+// ExecContext is needed to implement the driver.StmtExecContext interface
+func (s *tracedStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (res driver.Result, err error) {
+ span := s.newChildSpanFromContext(ctx, "Exec", s.query)
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if stmtExecContext, ok := s.Stmt.(driver.StmtExecContext); ok {
+ return stmtExecContext.ExecContext(ctx, args)
+ }
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return s.Exec(dargs)
+}
+
+// QueryContext is needed to implement the driver.StmtQueryContext interface
+func (s *tracedStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (rows driver.Rows, err error) {
+ span := s.newChildSpanFromContext(ctx, "Query", s.query)
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ if stmtQueryContext, ok := s.Stmt.(driver.StmtQueryContext); ok {
+ return stmtQueryContext.QueryContext(ctx, args)
+ }
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ return s.Query(dargs)
+}
+
+// copied from stdlib database/sql package: src/database/sql/ctxutil.go
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ return nil, errors.New("sql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/tx.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/tx.go
new file mode 100644
index 00000000..6ad1a829
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql/tx.go
@@ -0,0 +1,35 @@
+package sql
+
+import (
+ "context"
+ "database/sql/driver"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+var _ driver.Tx = (*tracedTx)(nil)
+
+// tracedTx is a traced version of sql.Tx
+type tracedTx struct {
+ driver.Tx
+ *traceParams
+ ctx context.Context
+}
+
+// Commit sends a span at the end of the transaction
+func (t *tracedTx) Commit() (err error) {
+ span := t.newChildSpanFromContext(t.ctx, "Commit", "")
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ return t.Tx.Commit()
+}
+
+// Rollback sends a span if the connection is aborted
+func (t *tracedTx) Rollback() (err error) {
+ span := t.newChildSpanFromContext(t.ctx, "Rollback", "")
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+ return t.Tx.Rollback()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/example_test.go
new file mode 100644
index 00000000..9086e748
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/example_test.go
@@ -0,0 +1,80 @@
+package redigo_test
+
+import (
+ "context"
+ "log"
+ "time"
+
+ redigotrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/garyburd/redigo/redis"
+)
+
+// To start tracing Redis commands, use the TracedDial function to create a connection,
+// passing in a service name of choice.
+func Example() {
+ c, err := redigotrace.Dial("tcp", "127.0.0.1:6379")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Emit spans per command by using your Redis connection as usual
+ c.Do("SET", "vehicle", "truck")
+
+ // Use a context to pass information down the call chain
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/home"),
+ )
+
+ // When passed a context as the final argument, c.Do will emit a span inheriting from 'parent.request'
+ c.Do("SET", "food", "cheese", ctx)
+ root.Finish()
+}
+
+func Example_tracedConn() {
+ c, err := redigotrace.Dial("tcp", "127.0.0.1:6379",
+ redigotrace.WithServiceName("my-redis-backend"),
+ redis.DialKeepAlive(time.Minute),
+ )
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Emit spans per command by using your Redis connection as usual
+ c.Do("SET", "vehicle", "truck")
+
+ // Use a context to pass information down the call chain
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/home"),
+ )
+
+ // When passed a context as the final argument, c.Do will emit a span inheriting from 'parent.request'
+ c.Do("SET", "food", "cheese", ctx)
+ root.Finish()
+}
+
+// Alternatively, provide a redis URL to the TracedDialURL function
+func Example_dialURL() {
+ c, err := redigotrace.DialURL("redis://127.0.0.1:6379")
+ if err != nil {
+ log.Fatal(err)
+ }
+ c.Do("SET", "vehicle", "truck")
+}
+
+// When using a redigo Pool, set your Dial function to return a traced connection
+func Example_pool() {
+ pool := &redis.Pool{
+ Dial: func() (redis.Conn, error) {
+ return redigotrace.Dial("tcp", "127.0.0.1:6379",
+ redigotrace.WithServiceName("my-redis-backend"),
+ )
+ },
+ }
+
+ c := pool.Get()
+ c.Do("SET", " whiskey", " glass")
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/option.go
new file mode 100644
index 00000000..173cafc1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/option.go
@@ -0,0 +1,17 @@
+package redigo // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo"
+
+type dialConfig struct{ serviceName string }
+
+// DialOption represents an option that can be passed to Dial.
+type DialOption func(*dialConfig)
+
+func defaults(cfg *dialConfig) {
+ cfg.serviceName = "redis.conn"
+}
+
+// WithServiceName sets the given service name for the dialled connection.
+func WithServiceName(name string) DialOption {
+ return func(cfg *dialConfig) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/redigo.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/redigo.go
new file mode 100644
index 00000000..94dc3d11
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/redigo.go
@@ -0,0 +1,153 @@
+// Package redigo provides functions to trace the garyburd/redigo package (https://github.com/garyburd/redigo).
+package redigo
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "net"
+ "net/url"
+ "strconv"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ redis "github.com/garyburd/redigo/redis"
+)
+
+// Conn is an implementation of the redis.Conn interface that supports tracing
+type Conn struct {
+ redis.Conn
+ *params
+}
+
+// params contains fields and metadata useful for command tracing
+type params struct {
+ config *dialConfig
+ network string
+ host string
+ port string
+}
+
+// parseOptions parses a set of arbitrary options (which can be of type redis.DialOption
+// or the local DialOption) and returns the corresponding redis.DialOption set as well as
+// a configured dialConfig.
+func parseOptions(options ...interface{}) ([]redis.DialOption, *dialConfig) {
+ dialOpts := []redis.DialOption{}
+ cfg := new(dialConfig)
+ defaults(cfg)
+ for _, opt := range options {
+ switch o := opt.(type) {
+ case redis.DialOption:
+ dialOpts = append(dialOpts, o)
+ case DialOption:
+ o(cfg)
+ }
+ }
+ return dialOpts, cfg
+}
+
+// Dial dials into the network address and returns a traced redis.Conn.
+// The set of supported options must be either of type redis.DialOption or this package's DialOption.
+func Dial(network, address string, options ...interface{}) (redis.Conn, error) {
+ dialOpts, cfg := parseOptions(options...)
+ c, err := redis.Dial(network, address, dialOpts...)
+ if err != nil {
+ return nil, err
+ }
+ host, port, err := net.SplitHostPort(address)
+ if err != nil {
+ return nil, err
+ }
+ tc := Conn{c, ¶ms{cfg, network, host, port}}
+ return tc, nil
+}
+
+// DialURL connects to a Redis server at the given URL using the Redis
+// URI scheme. URLs should follow the draft IANA specification for the
+// scheme (https://www.iana.org/assignments/uri-schemes/prov/redis).
+// The returned redis.Conn is traced.
+func DialURL(rawurl string, options ...interface{}) (redis.Conn, error) {
+ dialOpts, cfg := parseOptions(options...)
+ u, err := url.Parse(rawurl)
+ if err != nil {
+ return Conn{}, err
+ }
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host = u.Host
+ port = "6379"
+ }
+ if host == "" {
+ host = "localhost"
+ }
+ network := "tcp"
+ c, err := redis.DialURL(rawurl, dialOpts...)
+ tc := Conn{c, ¶ms{cfg, network, host, port}}
+ return tc, err
+}
+
+// newChildSpan creates a span inheriting from the given context. It adds to the span useful metadata about the traced Redis connection
+func (tc Conn) newChildSpan(ctx context.Context) ddtrace.Span {
+ p := tc.params
+ span, _ := tracer.StartSpanFromContext(ctx, "redis.command",
+ tracer.SpanType(ext.SpanTypeRedis),
+ tracer.ServiceName(p.config.serviceName),
+ )
+ span.SetTag("out.network", p.network)
+ span.SetTag(ext.TargetPort, p.port)
+ span.SetTag(ext.TargetHost, p.host)
+ return span
+}
+
+// Do wraps redis.Conn.Do. It sends a command to the Redis server and returns the received reply.
+// In the process it emits a span containing key information about the command sent.
+// When passed a context.Context as the final argument, Do will ensure that any span created
+// inherits from this context. The rest of the arguments are passed through to the Redis server unchanged.
+func (tc Conn) Do(commandName string, args ...interface{}) (reply interface{}, err error) {
+ var (
+ ctx context.Context
+ ok bool
+ )
+ if n := len(args); n > 0 {
+ ctx, ok = args[n-1].(context.Context)
+ if ok {
+ args = args[:n-1]
+ }
+ }
+
+ span := tc.newChildSpan(ctx)
+ defer func() {
+ span.Finish(tracer.WithError(err))
+ }()
+
+ span.SetTag("redis.args_length", strconv.Itoa(len(args)))
+
+ if len(commandName) > 0 {
+ span.SetTag(ext.ResourceName, commandName)
+ } else {
+ // When the command argument to the Do method is "", then the Do method will flush the output buffer
+ // See https://godoc.org/github.com/garyburd/redigo/redis#hdr-Pipelining
+ span.SetTag(ext.ResourceName, "redigo.Conn.Flush")
+ }
+ var b bytes.Buffer
+ b.WriteString(commandName)
+ for _, arg := range args {
+ b.WriteString(" ")
+ switch arg := arg.(type) {
+ case string:
+ b.WriteString(arg)
+ case int:
+ b.WriteString(strconv.Itoa(arg))
+ case int32:
+ b.WriteString(strconv.FormatInt(int64(arg), 10))
+ case int64:
+ b.WriteString(strconv.FormatInt(arg, 10))
+ case fmt.Stringer:
+ b.WriteString(arg.String())
+ }
+ }
+ span.SetTag("redis.raw_command", b.String())
+ return tc.Conn.Do(commandName, args...)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/redigo_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/redigo_test.go
new file mode 100644
index 00000000..fa5e64b4
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/garyburd/redigo/redigo_test.go
@@ -0,0 +1,176 @@
+package redigo
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/garyburd/redigo/redis"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ os.Exit(m.Run())
+}
+
+func TestClient(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ c, err := Dial("tcp", "127.0.0.1:6379", WithServiceName("my-service"))
+ assert.Nil(err)
+ c.Do("SET", 1, "truck")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal("redis.command", span.OperationName())
+ assert.Equal(ext.SpanTypeRedis, span.Tag(ext.SpanType))
+ assert.Equal("my-service", span.Tag(ext.ServiceName))
+ assert.Equal("SET", span.Tag(ext.ResourceName))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6379", span.Tag(ext.TargetPort))
+ assert.Equal("SET 1 truck", span.Tag("redis.raw_command"))
+ assert.Equal("2", span.Tag("redis.args_length"))
+}
+
+func TestCommandError(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ c, err := Dial("tcp", "127.0.0.1:6379", WithServiceName("my-service"))
+ assert.Nil(err)
+ _, err = c.Do("NOT_A_COMMAND", context.Background())
+ assert.NotNil(err)
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ span := spans[0]
+
+ assert.Equal(err, span.Tag(ext.Error).(error))
+ assert.Equal("redis.command", span.OperationName())
+ assert.Equal("my-service", span.Tag(ext.ServiceName))
+ assert.Equal("NOT_A_COMMAND", span.Tag(ext.ResourceName))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6379", span.Tag(ext.TargetPort))
+ assert.Equal("NOT_A_COMMAND", span.Tag("redis.raw_command"))
+}
+
+func TestConnectionError(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ _, err := Dial("tcp", "127.0.0.1:1000", WithServiceName("redis-service"))
+
+ assert.NotNil(err)
+ assert.Contains(err.Error(), "dial tcp 127.0.0.1:1000")
+}
+
+func TestInheritance(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.span")
+ client, err := Dial("tcp", "127.0.0.1:6379", WithServiceName("redis-service"))
+ assert.Nil(err)
+ client.Do("SET", "water", "bottle", ctx)
+ root.Finish()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+
+ var child, parent mocktracer.Span
+ for _, s := range spans {
+ switch s.OperationName() {
+ case "redis.command":
+ child = s
+ case "parent.span":
+ parent = s
+ }
+ }
+ assert.NotNil(child)
+ assert.NotNil(parent)
+
+ assert.Equal(child.ParentID(), parent.SpanID())
+ assert.Equal(child.Tag(ext.TargetHost), "127.0.0.1")
+ assert.Equal(child.Tag(ext.TargetPort), "6379")
+}
+
+type stringifyTest struct{ A, B int }
+
+func (ts stringifyTest) String() string { return fmt.Sprintf("[%d, %d]", ts.A, ts.B) }
+
+func TestCommandsToSring(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ str := stringifyTest{A: 57, B: 8}
+ c, err := Dial("tcp", "127.0.0.1:6379", WithServiceName("my-service"))
+ assert.Nil(err)
+ c.Do("SADD", "testSet", "a", int(0), int32(1), int64(2), str, context.Background())
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ span := spans[0]
+
+ assert.Equal("redis.command", span.OperationName())
+ assert.Equal("my-service", span.Tag(ext.ServiceName))
+ assert.Equal("SADD", span.Tag(ext.ResourceName))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6379", span.Tag(ext.TargetPort))
+ assert.Equal("SADD testSet a 0 1 2 [57, 8]", span.Tag("redis.raw_command"))
+}
+
+func TestPool(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ pool := &redis.Pool{
+ MaxIdle: 2,
+ MaxActive: 3,
+ IdleTimeout: 23,
+ Wait: true,
+ Dial: func() (redis.Conn, error) {
+ return Dial("tcp", "127.0.0.1:6379", WithServiceName("my-service"))
+ },
+ }
+
+ pc := pool.Get()
+ pc.Do("SET", " whiskey", " glass", context.Background())
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ span := spans[0]
+ assert.Equal(span.Tag("out.network"), "tcp")
+}
+
+func TestTracingDialUrl(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ url := "redis://127.0.0.1:6379"
+ client, err := DialURL(url, WithServiceName("redis-service"))
+ assert.Nil(err)
+ client.Do("SET", "ONE", " TWO", context.Background())
+
+ spans := mt.FinishedSpans()
+ assert.True(len(spans) > 0)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/example_test.go
new file mode 100644
index 00000000..a6f11b7a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/example_test.go
@@ -0,0 +1,53 @@
+package gin_test
+
+import (
+ gintrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/gin-gonic/gin"
+)
+
+// To start tracing requests, add the trace middleware to your Gin router.
+func Example() {
+ // Create a gin.Engine
+ r := gin.New()
+
+ // Use the tracer middleware with your desired service name.
+ r.Use(gintrace.Middleware("my-web-app"))
+
+ // Set up some endpoints.
+ r.GET("/hello", func(c *gin.Context) {
+ c.String(200, "hello world!")
+ })
+
+ // And start gathering request traces.
+ r.Run(":8080")
+}
+
+func ExampleHTML() {
+ r := gin.Default()
+ r.Use(gintrace.Middleware("my-web-app"))
+ r.LoadHTMLGlob("templates/*")
+
+ r.GET("/index", func(c *gin.Context) {
+ // render the html and trace the execution time.
+ gintrace.HTML(c, 200, "index.tmpl", gin.H{
+ "title": "Main website",
+ })
+ })
+}
+
+func Example_spanFromContext() {
+ r := gin.Default()
+ r.Use(gintrace.Middleware("image-encoder"))
+ r.GET("/image/encode", func(c *gin.Context) {
+ ctx := c.Request.Context()
+ // create a child span to track operation timing.
+ encodeSpan, _ := tracer.StartSpanFromContext(ctx, "image.encode")
+ // encode a image
+ encodeSpan.Finish()
+
+ c.String(200, "ok!")
+ })
+
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/gintrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/gintrace.go
new file mode 100644
index 00000000..d0f3c6bf
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/gintrace.go
@@ -0,0 +1,62 @@
+// Package gin provides functions to trace the gin-gonic/gin package (https://github.com/gin-gonic/gin).
+package gin // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin"
+
+import (
+ "fmt"
+ "strconv"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/gin-gonic/gin"
+)
+
+// Middleware returns middleware that will trace incoming requests.
+// The last parameter is optional and can be used to pass a custom tracer.
+func Middleware(service string) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ resource := c.HandlerName()
+ opts := []ddtrace.StartSpanOption{
+ tracer.ServiceName(service),
+ tracer.ResourceName(resource),
+ tracer.SpanType(ext.SpanTypeWeb),
+ tracer.Tag(ext.HTTPMethod, c.Request.Method),
+ tracer.Tag(ext.HTTPURL, c.Request.URL.Path),
+ }
+ if spanctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(c.Request.Header)); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, ctx := tracer.StartSpanFromContext(c.Request.Context(), "http.request", opts...)
+ defer span.Finish()
+
+ // pass the span through the request context
+ c.Request = c.Request.WithContext(ctx)
+
+ // serve the request to the next middleware
+ c.Next()
+
+ span.SetTag(ext.HTTPCode, strconv.Itoa(c.Writer.Status()))
+
+ if len(c.Errors) > 0 {
+ span.SetTag("gin.errors", c.Errors.String())
+ span.SetTag(ext.Error, c.Errors[0])
+ }
+ }
+}
+
+// HTML will trace the rendering of the template as a child of the span in the given context.
+func HTML(c *gin.Context, code int, name string, obj interface{}) {
+ span, _ := tracer.StartSpanFromContext(c.Request.Context(), "gin.render.html")
+ span.SetTag("go.template", name)
+ defer func() {
+ if r := recover(); r != nil {
+ err := fmt.Errorf("error rendering tmpl:%s: %s", name, r)
+ span.Finish(tracer.WithError(err))
+ panic(r)
+ } else {
+ span.Finish()
+ }
+ }()
+ c.HTML(code, name, obj)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/gintrace_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/gintrace_test.go
new file mode 100644
index 00000000..e1f23534
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gin-gonic/gin/gintrace_test.go
@@ -0,0 +1,191 @@
+package gin
+
+import (
+ "errors"
+ "html/template"
+ "net/http/httptest"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/gin-gonic/gin"
+ "github.com/stretchr/testify/assert"
+)
+
+func init() {
+ gin.SetMode(gin.ReleaseMode) // silence annoying log msgs
+}
+
+func TestChildSpan(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ router := gin.New()
+ router.Use(Middleware("foobar"))
+ router.GET("/user/:id", func(c *gin.Context) {
+ _, ok := tracer.SpanFromContext(c.Request.Context())
+ assert.True(ok)
+ })
+
+ r := httptest.NewRequest("GET", "/user/123", nil)
+ w := httptest.NewRecorder()
+
+ router.ServeHTTP(w, r)
+}
+
+func TestTrace200(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ router := gin.New()
+ router.Use(Middleware("foobar"))
+ router.GET("/user/:id", func(c *gin.Context) {
+ span, ok := tracer.SpanFromContext(c.Request.Context())
+ assert.True(ok)
+ assert.Equal(span.(mocktracer.Span).Tag(ext.ServiceName), "foobar")
+ id := c.Param("id")
+ c.Writer.Write([]byte(id))
+ })
+
+ r := httptest.NewRequest("GET", "/user/123", nil)
+ w := httptest.NewRecorder()
+
+ // do and verify the request
+ router.ServeHTTP(w, r)
+ response := w.Result()
+ assert.Equal(response.StatusCode, 200)
+
+ // verify traces look good
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ if len(spans) < 1 {
+ t.Fatalf("no spans")
+ }
+ span := spans[0]
+ assert.Equal("http.request", span.OperationName())
+ assert.Equal(ext.SpanTypeWeb, span.Tag(ext.SpanType))
+ assert.Equal("foobar", span.Tag(ext.ServiceName))
+ assert.Contains(span.Tag(ext.ResourceName), "gin.TestTrace200")
+ assert.Equal("200", span.Tag(ext.HTTPCode))
+ assert.Equal("GET", span.Tag(ext.HTTPMethod))
+ // TODO(x) would be much nicer to have "/user/:id" here
+ assert.Equal("/user/123", span.Tag(ext.HTTPURL))
+}
+
+func TestError(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // setup
+ router := gin.New()
+ router.Use(Middleware("foobar"))
+ wantErr := errors.New("oh no")
+
+ // a handler with an error and make the requests
+ router.GET("/err", func(c *gin.Context) {
+ c.AbortWithError(500, wantErr)
+ })
+ r := httptest.NewRequest("GET", "/err", nil)
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+ response := w.Result()
+ assert.Equal(response.StatusCode, 500)
+
+ // verify the errors and status are correct
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ if len(spans) < 1 {
+ t.Fatalf("no spans")
+ }
+ span := spans[0]
+ assert.Equal("http.request", span.OperationName())
+ assert.Equal("foobar", span.Tag(ext.ServiceName))
+ assert.Equal("500", span.Tag(ext.HTTPCode))
+ assert.Equal(wantErr.Error(), span.Tag(ext.Error).(error).Error())
+}
+
+func TestHTML(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // setup
+ router := gin.New()
+ router.Use(Middleware("foobar"))
+
+ // add a template
+ tmpl := template.Must(template.New("hello").Parse("hello {{.}}"))
+ router.SetHTMLTemplate(tmpl)
+
+ // a handler with an error and make the requests
+ router.GET("/hello", func(c *gin.Context) {
+ HTML(c, 200, "hello", "world")
+ })
+ r := httptest.NewRequest("GET", "/hello", nil)
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+ response := w.Result()
+ assert.Equal(response.StatusCode, 200)
+ assert.Equal("hello world", w.Body.String())
+
+ // verify the errors and status are correct
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+ for _, s := range spans {
+ assert.Equal("foobar", s.Tag(ext.ServiceName), s.String())
+ }
+
+ var tspan mocktracer.Span
+ for _, s := range spans {
+ // we need to pick up the span we're searching for, as the
+ // order is not garanteed within the buffer
+ if s.OperationName() == "gin.render.html" {
+ tspan = s
+ }
+ }
+ assert.NotNil(tspan)
+ assert.Equal("hello", tspan.Tag("go.template"))
+}
+
+func TestGetSpanNotInstrumented(t *testing.T) {
+ assert := assert.New(t)
+ router := gin.New()
+ router.GET("/ping", func(c *gin.Context) {
+ // Assert we don't have a span on the context.
+ _, ok := tracer.SpanFromContext(c.Request.Context())
+ assert.False(ok)
+ c.Writer.Write([]byte("ok"))
+ })
+ r := httptest.NewRequest("GET", "/ping", nil)
+ w := httptest.NewRecorder()
+ router.ServeHTTP(w, r)
+ response := w.Result()
+ assert.Equal(response.StatusCode, 200)
+}
+
+func TestPropagation(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ r := httptest.NewRequest("GET", "/user/123", nil)
+ w := httptest.NewRecorder()
+
+ pspan := tracer.StartSpan("test")
+ tracer.Inject(pspan.Context(), tracer.HTTPHeadersCarrier(r.Header))
+
+ router := gin.New()
+ router.Use(Middleware("foobar"))
+ router.GET("/user/:id", func(c *gin.Context) {
+ span, ok := tracer.SpanFromContext(c.Request.Context())
+ assert.True(ok)
+ assert.Equal(span.(mocktracer.Span).ParentID(), pspan.(mocktracer.Span).SpanID())
+ })
+
+ router.ServeHTTP(w, r)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/collection.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/collection.go
new file mode 100644
index 00000000..bc2cfc11
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/collection.go
@@ -0,0 +1,202 @@
+package mgo
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/globalsign/mgo"
+ "github.com/globalsign/mgo/bson"
+)
+
+// Collection provides a mgo.Collection along with
+// data used for APM Tracing.
+type Collection struct {
+ *mgo.Collection
+ cfg mongoConfig
+}
+
+// Create invokes and traces Collection.Create
+func (c *Collection) Create(info *mgo.CollectionInfo) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.Create(info)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// DropCollection invokes and traces Collection.DropCollection
+func (c *Collection) DropCollection() error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.DropCollection()
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// EnsureIndexKey invokes and traces Collection.EnsureIndexKey
+func (c *Collection) EnsureIndexKey(key ...string) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.EnsureIndexKey(key...)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// EnsureIndex invokes and traces Collection.EnsureIndex
+func (c *Collection) EnsureIndex(index mgo.Index) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.EnsureIndex(index)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// DropIndex invokes and traces Collection.DropIndex
+func (c *Collection) DropIndex(key ...string) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.DropIndex(key...)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// DropIndexName invokes and traces Collection.DropIndexName
+func (c *Collection) DropIndexName(name string) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.DropIndexName(name)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Indexes invokes and traces Collection.Indexes
+func (c *Collection) Indexes() (indexes []mgo.Index, err error) {
+ span := newChildSpanFromContext(c.cfg)
+ indexes, err = c.Collection.Indexes()
+ span.Finish(tracer.WithError(err))
+ return indexes, err
+}
+
+// Insert invokes and traces Collectin.Insert
+func (c *Collection) Insert(docs ...interface{}) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.Insert(docs...)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Find invokes and traces Collection.Find
+func (c *Collection) Find(query interface{}) *Query {
+ return &Query{
+ Query: c.Collection.Find(query),
+ cfg: c.cfg,
+ }
+}
+
+// FindId invokes and traces Collection.FindId
+func (c *Collection) FindId(id interface{}) *Query { // nolint
+ return &Query{
+ Query: c.Collection.FindId(id),
+ cfg: c.cfg,
+ }
+}
+
+// Count invokes and traces Collection.Count
+func (c *Collection) Count() (n int, err error) {
+ span := newChildSpanFromContext(c.cfg)
+ n, err = c.Collection.Count()
+ span.Finish(tracer.WithError(err))
+ return n, err
+}
+
+// Bulk creates a trace ready wrapper around Collection.Bulk
+func (c *Collection) Bulk() *Bulk {
+ return &Bulk{
+ Bulk: c.Collection.Bulk(),
+ cfg: c.cfg,
+ }
+}
+
+// NewIter invokes and traces Collection.Iter
+func (c *Collection) NewIter(session *mgo.Session, firstBatch []bson.Raw, cursorId int64, err error) *Iter { // nolint
+ return &Iter{
+ Iter: c.Collection.NewIter(session, firstBatch, cursorId, err),
+ cfg: c.cfg,
+ }
+}
+
+// Pipe invokes and traces Collection.Pipe
+func (c *Collection) Pipe(pipeline interface{}) *Pipe {
+ return &Pipe{
+ Pipe: c.Collection.Pipe(pipeline),
+ cfg: c.cfg,
+ }
+}
+
+// Update invokes and traces Collection.Update
+func (c *Collection) Update(selector interface{}, update interface{}) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.Update(selector, update)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// UpdateId invokes and traces Collection.UpdateId
+func (c *Collection) UpdateId(id interface{}, update interface{}) error { // nolint
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.UpdateId(id, update)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// UpdateAll invokes and traces Collection.UpdateAll
+func (c *Collection) UpdateAll(selector interface{}, update interface{}) (info *mgo.ChangeInfo, err error) {
+ span := newChildSpanFromContext(c.cfg)
+ info, err = c.Collection.UpdateAll(selector, update)
+ span.Finish(tracer.WithError(err))
+ return info, err
+}
+
+// Upsert invokes and traces Collection.Upsert
+func (c *Collection) Upsert(selector interface{}, update interface{}) (info *mgo.ChangeInfo, err error) {
+ span := newChildSpanFromContext(c.cfg)
+ info, err = c.Collection.Upsert(selector, update)
+ span.Finish(tracer.WithError(err))
+ return info, err
+}
+
+// UpsertId invokes and traces Collection.UpsertId
+func (c *Collection) UpsertId(id interface{}, update interface{}) (info *mgo.ChangeInfo, err error) { // nolint
+ span := newChildSpanFromContext(c.cfg)
+ info, err = c.Collection.UpsertId(id, update)
+ span.Finish(tracer.WithError(err))
+ return info, err
+}
+
+// Remove invokes and traces Collection.Remove
+func (c *Collection) Remove(selector interface{}) error {
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.Remove(selector)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// RemoveId invokes and traces Collection.RemoveId
+func (c *Collection) RemoveId(id interface{}) error { // nolint
+ span := newChildSpanFromContext(c.cfg)
+ err := c.Collection.RemoveId(id)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// RemoveAll invokes and traces Collection.RemoveAll
+func (c *Collection) RemoveAll(selector interface{}) (info *mgo.ChangeInfo, err error) {
+ span := newChildSpanFromContext(c.cfg)
+ info, err = c.Collection.RemoveAll(selector)
+ span.Finish(tracer.WithError(err))
+ return info, err
+}
+
+// Repair invokes and traces Collection.Repair
+func (c *Collection) Repair() *Iter {
+ span := newChildSpanFromContext(c.cfg)
+ iter := c.Collection.Repair()
+ span.Finish()
+ return &Iter{
+ Iter: iter,
+ cfg: c.cfg,
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/mgo.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/mgo.go
new file mode 100644
index 00000000..27fb6f93
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/mgo.go
@@ -0,0 +1,144 @@
+// Package mgo provides functions and types which allow tracing of the MGO MongoDB client (https://github.com/globalsign/mgo)
+package mgo // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo"
+
+import (
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/globalsign/mgo"
+)
+
+// Dial opens a connection to a MongoDB server and configures it
+// for tracing.
+func Dial(url string, opts ...DialOption) (*Session, error) {
+ session, err := mgo.Dial(url)
+ s := &Session{Session: session}
+
+ defaults(&s.cfg)
+ for _, fn := range opts {
+ fn(&s.cfg)
+ }
+
+ // Record metadata so that it can be added to recorded traces
+ s.cfg.tags["hosts"] = strings.Join(session.LiveServers(), ", ")
+ info, _ := session.BuildInfo()
+ s.cfg.tags["mgo_version"] = info.Version
+
+ return s, err
+}
+
+// Session is an mgo.Session instance that will be traced.
+type Session struct {
+ *mgo.Session
+ cfg mongoConfig
+}
+
+func newChildSpanFromContext(config mongoConfig) ddtrace.Span {
+ span, _ := tracer.StartSpanFromContext(
+ config.ctx,
+ "mongodb.query",
+ tracer.SpanType(ext.SpanTypeMongoDB),
+ tracer.ServiceName(config.serviceName),
+ tracer.ResourceName("mongodb.query"))
+
+ for key, value := range config.tags {
+ span.SetTag(key, value)
+ }
+
+ return span
+}
+
+// Run invokes and traces Session.Run
+func (s *Session) Run(cmd interface{}, result interface{}) (err error) {
+ span := newChildSpanFromContext(s.cfg)
+ err = s.Session.Run(cmd, result)
+ span.Finish(tracer.WithError(err))
+ return
+}
+
+// Database is an mgo.Database along with the data necessary for tracing.
+type Database struct {
+ *mgo.Database
+ cfg mongoConfig
+}
+
+// DB returns a new database for this Session.
+func (s *Session) DB(name string) *Database {
+ dbCfg := mongoConfig{
+ ctx: s.cfg.ctx,
+ serviceName: s.cfg.serviceName,
+ tags: s.cfg.tags,
+ }
+
+ dbCfg.tags["database"] = name
+ return &Database{
+ Database: s.Session.DB(name),
+ cfg: dbCfg,
+ }
+}
+
+// C returns a new Collection from this Database.
+func (db *Database) C(name string) *Collection {
+ return &Collection{
+ Collection: db.Database.C(name),
+ cfg: db.cfg,
+ }
+}
+
+// Iter is an mgo.Iter instance that will be traced.
+type Iter struct {
+ *mgo.Iter
+
+ cfg mongoConfig
+}
+
+// Next invokes and traces Iter.Next
+func (iter *Iter) Next(result interface{}) bool {
+ span := newChildSpanFromContext(iter.cfg)
+ r := iter.Iter.Next(result)
+ span.Finish()
+ return r
+}
+
+// For invokes and traces Iter.For
+func (iter *Iter) For(result interface{}, f func() error) (err error) {
+ span := newChildSpanFromContext(iter.cfg)
+ err = iter.Iter.For(result, f)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// All invokes and traces Iter.All
+func (iter *Iter) All(result interface{}) (err error) {
+ span := newChildSpanFromContext(iter.cfg)
+ err = iter.Iter.All(result)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Close invokes and traces Iter.Close
+func (iter *Iter) Close() (err error) {
+ span := newChildSpanFromContext(iter.cfg)
+ err = iter.Iter.Close()
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Bulk is an mgo.Bulk instance that will be traced.
+type Bulk struct {
+ *mgo.Bulk
+
+ cfg mongoConfig
+}
+
+// Run invokes and traces Bulk.Run
+func (b *Bulk) Run() (result *mgo.BulkResult, err error) {
+ span := newChildSpanFromContext(b.cfg)
+ result, err = b.Bulk.Run()
+ span.Finish(tracer.WithError(err))
+
+ return result, err
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/mgo_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/mgo_test.go
new file mode 100644
index 00000000..4bac5404
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/mgo_test.go
@@ -0,0 +1,354 @@
+package mgo
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/globalsign/mgo"
+ "github.com/globalsign/mgo/bson"
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ os.Exit(m.Run())
+}
+
+func testMongoCollectionCommand(assert *assert.Assertions, command func(*Collection)) []mocktracer.Span {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ parentSpan, ctx := tracer.StartSpanFromContext(
+ context.Background(),
+ "mgo-unittest",
+ tracer.SpanType("app"),
+ tracer.ResourceName("insert-test"),
+ )
+
+ session, err := Dial("localhost:27017", WithServiceName("unit-tests"), WithContext(ctx))
+ defer session.Close()
+
+ assert.NotNil(session)
+ assert.Nil(err)
+
+ db := session.DB("my_db")
+ collection := db.C("MyCollection")
+
+ command(collection)
+
+ parentSpan.Finish()
+
+ spans := mt.FinishedSpans()
+ return spans
+}
+
+func TestCollection_Insert(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(2, len(spans))
+ assert.Equal("mongodb.query", spans[0].OperationName())
+}
+
+func TestCollection_Update(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ collection.Update(entity, entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(3, len(spans))
+ assert.Equal("mongodb.query", spans[1].OperationName())
+}
+
+func TestCollection_UpdateId(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ var r bson.D
+ collection.Find(entity).Iter().Next(&r)
+ collection.UpdateId(r.Map()["_id"], entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(5, len(spans))
+ assert.Equal("mongodb.query", spans[3].OperationName())
+}
+
+func TestCollection_Upsert(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ collection.Upsert(entity, entity)
+ var r bson.D
+ collection.Find(entity).Iter().Next(&r)
+ collection.UpsertId(r.Map()["_id"], entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(6, len(spans))
+ assert.Equal("mongodb.query", spans[1].OperationName())
+ assert.Equal("mongodb.query", spans[4].OperationName())
+}
+
+func TestCollection_UpdateAll(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ collection.UpdateAll(entity, entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(3, len(spans))
+ assert.Equal("mongodb.query", spans[1].OperationName())
+}
+
+func TestCollection_FindId(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ var r bson.D
+ collection.Find(entity).Iter().Next(&r)
+ var r2 bson.D
+ collection.FindId(r.Map()["_id"]).Iter().Next(&r2)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(6, len(spans))
+}
+
+func TestCollection_Remove(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ collection.Remove(entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(3, len(spans))
+ assert.Equal("mongodb.query", spans[1].OperationName())
+}
+
+func TestCollection_RemoveId(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ removeByID := func(collection *Collection) {
+ collection.Insert(entity)
+ query := collection.Find(entity)
+ iter := query.Iter()
+ var r bson.D
+ iter.Next(&r)
+ id := r.Map()["_id"]
+ err := collection.RemoveId(id)
+ assert.NoError(err)
+ }
+
+ spans := testMongoCollectionCommand(assert, removeByID)
+ assert.Equal(5, len(spans))
+ assert.Equal("mongodb.query", spans[3].OperationName())
+}
+
+func TestCollection_RemoveAll(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ collection.RemoveAll(entity)
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(3, len(spans))
+ assert.Equal("mongodb.query", spans[1].OperationName())
+}
+
+func TestCollection_DropCollection(t *testing.T) {
+ assert := assert.New(t)
+
+ insert := func(collection *Collection) {
+ collection.DropCollection()
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(2, len(spans))
+ assert.Equal("mongodb.query", spans[0].OperationName())
+}
+
+func TestCollection_Create(t *testing.T) {
+ assert := assert.New(t)
+
+ insert := func(collection *Collection) {
+ collection.Create(&mgo.CollectionInfo{})
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(2, len(spans))
+ assert.Equal("mongodb.query", spans[0].OperationName())
+}
+
+func TestCollection_Count(t *testing.T) {
+ assert := assert.New(t)
+
+ insert := func(collection *Collection) {
+ collection.Count()
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(2, len(spans))
+ assert.Equal("mongodb.query", spans[0].OperationName())
+}
+
+func TestCollection_IndexCommands(t *testing.T) {
+ assert := assert.New(t)
+
+ indexTest := func(collection *Collection) {
+ indexes, _ := collection.Indexes()
+ collection.DropIndex("_id_")
+ collection.DropIndexName("_id_")
+ collection.EnsureIndex(indexes[0])
+ collection.EnsureIndexKey("_id_")
+ }
+
+ spans := testMongoCollectionCommand(assert, indexTest)
+ assert.Equal(6, len(spans))
+ assert.Equal("mongodb.query", spans[0].OperationName())
+ assert.Equal("mongodb.query", spans[1].OperationName())
+ assert.Equal("mongodb.query", spans[2].OperationName())
+ assert.Equal("mongodb.query", spans[3].OperationName())
+ assert.Equal("mongodb.query", spans[4].OperationName())
+ assert.Equal("mgo-unittest", spans[5].OperationName())
+}
+
+func TestCollection_FindAndIter(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ collection.Insert(entity)
+ collection.Insert(entity)
+ collection.Insert(entity)
+
+ query := collection.Find(nil)
+ iter := query.Iter()
+ var r bson.D
+ iter.Next(&r)
+ var all []bson.D
+ iter.All(&all)
+ iter.Close()
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(8, len(spans))
+ assert.Equal("mongodb.query", spans[3].OperationName())
+ assert.Equal("mongodb.query", spans[4].OperationName())
+ assert.Equal("mongodb.query", spans[5].OperationName())
+ assert.Equal("mongodb.query", spans[6].OperationName())
+}
+
+func TestCollection_Bulk(t *testing.T) {
+ assert := assert.New(t)
+
+ entity := bson.D{
+ bson.DocElem{
+ Name: "entity",
+ Value: bson.DocElem{
+ Name: "index",
+ Value: 0}}}
+
+ insert := func(collection *Collection) {
+ bulk := collection.Bulk()
+ bulk.Insert(entity)
+ bulk.Run()
+ }
+
+ spans := testMongoCollectionCommand(assert, insert)
+ assert.Equal(2, len(spans))
+ assert.Equal("mongodb.query", spans[0].OperationName())
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/option.go
new file mode 100644
index 00000000..a23799ea
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/option.go
@@ -0,0 +1,32 @@
+package mgo
+
+import "context"
+
+type mongoConfig struct {
+ ctx context.Context
+ serviceName string
+ tags map[string]string
+}
+
+func defaults(cfg *mongoConfig) {
+ cfg.serviceName = "mongodb"
+ cfg.ctx = context.Background()
+ cfg.tags = make(map[string]string)
+}
+
+// DialOption represents an option that can be passed to Dial
+type DialOption func(*mongoConfig)
+
+// WithServiceName sets the service name for a given MongoDB context.
+func WithServiceName(name string) DialOption {
+ return func(cfg *mongoConfig) {
+ cfg.serviceName = name
+ }
+}
+
+// WithContext sets the context.
+func WithContext(ctx context.Context) DialOption {
+ return func(cfg *mongoConfig) {
+ cfg.ctx = ctx
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/pipe.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/pipe.go
new file mode 100644
index 00000000..0c384e96
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/pipe.go
@@ -0,0 +1,60 @@
+package mgo
+
+import (
+ "github.com/globalsign/mgo"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// Pipe is an mgo.Pipe instance along with the data necessary for tracing.
+type Pipe struct {
+ *mgo.Pipe
+ cfg mongoConfig
+}
+
+// Iter invokes and traces Pipe.Iter
+func (p *Pipe) Iter() *Iter {
+ span := newChildSpanFromContext(p.cfg)
+ iter := p.Pipe.Iter()
+ span.Finish()
+ return &Iter{
+ Iter: iter,
+ cfg: p.cfg,
+ }
+}
+
+// All invokes and traces Pipe.All
+func (p *Pipe) All(result interface{}) error {
+ return p.Iter().All(result)
+}
+
+// One invokes and traces Pipe.One
+func (p *Pipe) One(result interface{}) (err error) {
+ span := newChildSpanFromContext(p.cfg)
+ defer span.Finish(tracer.WithError(err))
+ err = p.Pipe.One(result)
+ return
+}
+
+// AllowDiskUse invokes and traces Pipe.AllowDiskUse
+func (p *Pipe) AllowDiskUse() *Pipe {
+ return &Pipe{
+ Pipe: p.Pipe.AllowDiskUse(),
+ cfg: p.cfg,
+ }
+}
+
+// Batch invokes and traces Pipe.Batch
+func (p *Pipe) Batch(n int) *Pipe {
+ return &Pipe{
+ Pipe: p.Pipe.Batch(n),
+ cfg: p.cfg,
+ }
+}
+
+// Explain invokes and traces Pipe.Explain
+func (p *Pipe) Explain(result interface{}) (err error) {
+ span := newChildSpanFromContext(p.cfg)
+ defer span.Finish(tracer.WithError(err))
+ err = p.Pipe.Explain(result)
+ return
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/query.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/query.go
new file mode 100644
index 00000000..58ff85ff
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/globalsign/mgo/query.go
@@ -0,0 +1,183 @@
+package mgo
+
+import (
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/globalsign/mgo"
+)
+
+// Query is an mgo.Query instance along with the data necessary for tracing.
+type Query struct {
+ *mgo.Query
+ cfg mongoConfig
+}
+
+// Iter invokes and traces Query.Iter
+func (q *Query) Iter() *Iter {
+ span := newChildSpanFromContext(q.cfg)
+ iter := q.Query.Iter()
+ span.Finish()
+ return &Iter{
+ Iter: iter,
+ cfg: q.cfg,
+ }
+}
+
+// All invokes and traces Query.All
+func (q *Query) All(result interface{}) error {
+ span := newChildSpanFromContext(q.cfg)
+ err := q.All(result)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Apply invokes and traces Query.Apply
+func (q *Query) Apply(change mgo.Change, result interface{}) (info *mgo.ChangeInfo, err error) {
+ span := newChildSpanFromContext(q.cfg)
+ info, err = q.Apply(change, result)
+ span.Finish(tracer.WithError(err))
+ return info, err
+}
+
+// Batch invokes and traces Query.Batch
+func (q *Query) Batch(n int) *Query {
+ return &Query{
+ Query: q.Query.Batch(n),
+ cfg: q.cfg,
+ }
+}
+
+// Collation invokes and traces Query.Collation
+func (q *Query) Collation(collation *mgo.Collation) *Query {
+ return &Query{
+ Query: q.Query.Collation(collation),
+ cfg: q.cfg,
+ }
+}
+
+// Comment invokes and traces Query.Comment
+func (q *Query) Comment(comment string) *Query {
+ return &Query{
+ Query: q.Query.Comment(comment),
+ cfg: q.cfg,
+ }
+}
+
+// Count invokes and traces Query.Count
+func (q *Query) Count() (n int, err error) {
+ span := newChildSpanFromContext(q.cfg)
+ n, err = q.Count()
+ span.Finish(tracer.WithError(err))
+ return n, err
+}
+
+// Distinct invokes and traces Query.Distinct
+func (q *Query) Distinct(key string, result interface{}) error {
+ span := newChildSpanFromContext(q.cfg)
+ err := q.Distinct(key, result)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Explain invokes and traces Query.Explain
+func (q *Query) Explain(result interface{}) error {
+ span := newChildSpanFromContext(q.cfg)
+ err := q.Explain(result)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// For invokes and traces Query.For
+func (q *Query) For(result interface{}, f func() error) error {
+ span := newChildSpanFromContext(q.cfg)
+ err := q.For(result, f)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// MapReduce invokes and traces Query.MapReduce
+func (q *Query) MapReduce(job *mgo.MapReduce, result interface{}) (info *mgo.MapReduceInfo, err error) {
+ span := newChildSpanFromContext(q.cfg)
+ info, err = q.MapReduce(job, result)
+ span.Finish(tracer.WithError(err))
+ return info, err
+}
+
+// One invokes and traces Query.One
+func (q *Query) One(result interface{}) error {
+ span := newChildSpanFromContext(q.cfg)
+ err := q.One(result)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Prefetch invokes Query.Prefetch and configures the
+// returned *Query for tracing.
+func (q *Query) Prefetch(p float64) *Query {
+ return &Query{
+ Query: q.Query.Prefetch(p),
+ cfg: q.cfg,
+ }
+}
+
+// Select invokes Query.Select and configures the
+// returned *Query for tracing.
+func (q *Query) Select(selector interface{}) *Query {
+ return &Query{
+ Query: q.Query.Select(selector),
+ cfg: q.cfg,
+ }
+}
+
+// SetMaxScan invokes and traces Query.SetMaxScan
+func (q *Query) SetMaxScan(n int) *Query {
+ return &Query{
+ Query: q.Query.SetMaxScan(n),
+ cfg: q.cfg,
+ }
+}
+
+// SetMaxTime invokes and traces Query.SetMaxTime
+func (q *Query) SetMaxTime(d time.Duration) *Query {
+ return &Query{
+ Query: q.Query.SetMaxTime(d),
+ cfg: q.cfg,
+ }
+}
+
+// Skip invokes and traces Query.Skip
+func (q *Query) Skip(n int) *Query {
+ return &Query{
+ Query: q.Query.Skip(n),
+ cfg: q.cfg,
+ }
+}
+
+// Snapshot invokes and traces Query.Snapshot
+func (q *Query) Snapshot() *Query {
+ return &Query{
+ Query: q.Query.Snapshot(),
+ cfg: q.cfg,
+ }
+}
+
+// Sort invokes and traces Query.Sort
+func (q *Query) Sort(fields ...string) *Query {
+ return &Query{
+ Query: q.Query.Sort(fields...),
+ cfg: q.cfg,
+ }
+}
+
+// Tail invokes and traces Query.Tail
+func (q *Query) Tail(timeout time.Duration) *Iter {
+ span := newChildSpanFromContext(q.cfg)
+ iter := q.Query.Tail(timeout)
+ span.Finish()
+ return &Iter{
+ Iter: iter,
+ cfg: q.cfg,
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/example_test.go
new file mode 100644
index 00000000..d4ef1212
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/example_test.go
@@ -0,0 +1,54 @@
+package redis_test
+
+import (
+ "context"
+ "time"
+
+ "github.com/go-redis/redis"
+ redistrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// To start tracing Redis, simply create a new client using the library and continue
+// using as you normally would.
+func Example() {
+ // create a new Client
+ opts := &redis.Options{Addr: "127.0.0.1", Password: "", DB: 0}
+ c := redistrace.NewClient(opts)
+
+ // any action emits a span
+ c.Set("test_key", "test_value", 0)
+
+ // optionally, create a new root span
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.SpanType(ext.SpanTypeRedis),
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/home"),
+ )
+
+ // set the context on the client
+ c = c.WithContext(ctx)
+
+ // commit further commands, which will inherit from the parent in the context.
+ c.Set("food", "cheese", 0)
+ root.Finish()
+}
+
+// You can also trace Redis Pipelines. Simply use as usual and the traces will be
+// automatically picked up by the underlying implementation.
+func Example_pipeliner() {
+ // create a client
+ opts := &redis.Options{Addr: "127.0.0.1", Password: "", DB: 0}
+ c := redistrace.NewClient(opts, redistrace.WithServiceName("my-redis-service"))
+
+ // open the pipeline
+ pipe := c.Pipeline()
+
+ // submit some commands
+ pipe.Incr("pipeline_counter")
+ pipe.Expire("pipeline_counter", time.Hour)
+
+ // execute with trace
+ pipe.Exec()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/option.go
new file mode 100644
index 00000000..711dbcde
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/option.go
@@ -0,0 +1,17 @@
+package redis // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis"
+
+type clientConfig struct{ serviceName string }
+
+// ClientOption represents an option that can be used to create or wrap a client.
+type ClientOption func(*clientConfig)
+
+func defaults(cfg *clientConfig) {
+ cfg.serviceName = "redis.client"
+}
+
+// WithServiceName sets the given service name for the client.
+func WithServiceName(name string) ClientOption {
+ return func(cfg *clientConfig) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/redis.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/redis.go
new file mode 100644
index 00000000..18227846
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/redis.go
@@ -0,0 +1,172 @@
+// Package redis provides tracing functions for tracing the go-redis/redis package (https://github.com/go-redis/redis).
+package redis
+
+import (
+ "bytes"
+ "context"
+ "net"
+ "strconv"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/go-redis/redis"
+)
+
+// Client is used to trace requests to a redis server.
+type Client struct {
+ *redis.Client
+ *params
+}
+
+// Pipeliner is used to trace pipelines executed on a Redis server.
+type Pipeliner struct {
+ redis.Pipeliner
+ *params
+}
+
+// params holds the tracer and a set of parameters which are recorded with every trace.
+type params struct {
+ host string
+ port string
+ db string
+ config *clientConfig
+}
+
+// NewClient returns a new Client that is traced with the default tracer under
+// the service name "redis".
+func NewClient(opt *redis.Options, opts ...ClientOption) *Client {
+ return WrapClient(redis.NewClient(opt), opts...)
+}
+
+// WrapClient wraps a given redis.Client with a tracer under the given service name.
+func WrapClient(c *redis.Client, opts ...ClientOption) *Client {
+ cfg := new(clientConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ opt := c.Options()
+ host, port, err := net.SplitHostPort(opt.Addr)
+ if err != nil {
+ host = opt.Addr
+ port = "6379"
+ }
+ params := ¶ms{
+ host: host,
+ port: port,
+ db: strconv.Itoa(opt.DB),
+ config: cfg,
+ }
+ tc := &Client{c, params}
+ tc.Client.WrapProcess(createWrapperFromClient(tc))
+ return tc
+}
+
+// Pipeline creates a Pipeline from a Client
+func (c *Client) Pipeline() *Pipeliner {
+ return &Pipeliner{c.Client.Pipeline(), c.params}
+}
+
+// ExecWithContext calls Pipeline.Exec(). It ensures that the resulting Redis calls
+// are traced, and that emitted spans are children of the given Context.
+func (c *Pipeliner) ExecWithContext(ctx context.Context) ([]redis.Cmder, error) {
+ return c.execWithContext(ctx)
+}
+
+// Exec calls Pipeline.Exec() ensuring that the resulting Redis calls are traced.
+func (c *Pipeliner) Exec() ([]redis.Cmder, error) {
+ return c.execWithContext(context.Background())
+}
+
+func (c *Pipeliner) execWithContext(ctx context.Context) ([]redis.Cmder, error) {
+ p := c.params
+ span, _ := tracer.StartSpanFromContext(ctx, "redis.command",
+ tracer.SpanType(ext.SpanTypeRedis),
+ tracer.ServiceName(p.config.serviceName),
+ tracer.ResourceName("redis"),
+ tracer.Tag(ext.TargetHost, p.host),
+ tracer.Tag(ext.TargetPort, p.port),
+ tracer.Tag("out.db", p.db),
+ )
+ cmds, err := c.Pipeliner.Exec()
+ span.SetTag(ext.ResourceName, commandsToString(cmds))
+ span.SetTag("redis.pipeline_length", strconv.Itoa(len(cmds)))
+ var opts []ddtrace.FinishOption
+ if err != redis.Nil {
+ opts = append(opts, tracer.WithError(err))
+ }
+ span.Finish(opts...)
+
+ return cmds, err
+}
+
+// commandsToString returns a string representation of a slice of redis Commands, separated by newlines.
+func commandsToString(cmds []redis.Cmder) string {
+ var b bytes.Buffer
+ for _, cmd := range cmds {
+ b.WriteString(cmderToString(cmd))
+ b.WriteString("\n")
+ }
+ return b.String()
+}
+
+// WithContext sets a context on a Client. Use it to ensure that emitted spans have the correct parent.
+func (c *Client) WithContext(ctx context.Context) *Client {
+ c.Client = c.Client.WithContext(ctx)
+ return c
+}
+
+// createWrapperFromClient returns a new createWrapper function which wraps the processor with tracing
+// information obtained from the provided Client. To understand this functionality better see the
+// documentation for the github.com/go-redis/redis.(*baseClient).WrapProcess function.
+func createWrapperFromClient(tc *Client) func(oldProcess func(cmd redis.Cmder) error) func(cmd redis.Cmder) error {
+ return func(oldProcess func(cmd redis.Cmder) error) func(cmd redis.Cmder) error {
+ return func(cmd redis.Cmder) error {
+ ctx := tc.Client.Context()
+ raw := cmderToString(cmd)
+ parts := strings.Split(raw, " ")
+ length := len(parts) - 1
+ p := tc.params
+ span, _ := tracer.StartSpanFromContext(ctx, "redis.command",
+ tracer.SpanType(ext.SpanTypeRedis),
+ tracer.ServiceName(p.config.serviceName),
+ tracer.ResourceName(parts[0]),
+ tracer.Tag(ext.TargetHost, p.host),
+ tracer.Tag(ext.TargetPort, p.port),
+ tracer.Tag("out.db", p.db),
+ tracer.Tag("redis.raw_command", raw),
+ tracer.Tag("redis.args_length", strconv.Itoa(length)),
+ )
+ err := oldProcess(cmd)
+ var opts []ddtrace.FinishOption
+ if err != redis.Nil {
+ opts = append(opts, tracer.WithError(err))
+ }
+ span.Finish(opts...)
+ return err
+ }
+ }
+}
+
+func cmderToString(cmd redis.Cmder) string {
+ // We want to support multiple versions of the go-redis library. In
+ // older versions Cmder implements the Stringer interface, while in
+ // newer versions that was removed, and this String method which
+ // sometimes returns an error is used instead. By doing a type assertion
+ // we can support both versions.
+ if s, ok := cmd.(interface{ String() string }); ok {
+ return s.String()
+ }
+
+ if s, ok := cmd.(interface{ String() (string, error) }); ok {
+ str, err := s.String()
+ if err == nil {
+ return str
+ }
+ }
+
+ return ""
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/redis_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/redis_test.go
new file mode 100644
index 00000000..9cd5b7a3
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/go-redis/redis/redis_test.go
@@ -0,0 +1,196 @@
+package redis
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/go-redis/redis"
+ "github.com/stretchr/testify/assert"
+)
+
+const debug = false
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ os.Exit(m.Run())
+}
+
+func TestClient(t *testing.T) {
+ opts := &redis.Options{Addr: "127.0.0.1:6379"}
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ client := NewClient(opts, WithServiceName("my-redis"))
+ client.Set("test_key", "test_value", 0)
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal("redis.command", span.OperationName())
+ assert.Equal(ext.SpanTypeRedis, span.Tag(ext.SpanType))
+ assert.Equal("my-redis", span.Tag(ext.ServiceName))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6379", span.Tag(ext.TargetPort))
+ assert.Equal("set test_key test_value: ", span.Tag("redis.raw_command"))
+ assert.Equal("3", span.Tag("redis.args_length"))
+}
+
+func TestPipeline(t *testing.T) {
+ opts := &redis.Options{Addr: "127.0.0.1:6379"}
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ client := NewClient(opts, WithServiceName("my-redis"))
+ pipeline := client.Pipeline()
+ pipeline.Expire("pipeline_counter", time.Hour)
+
+ // Exec with context test
+ pipeline.ExecWithContext(context.Background())
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal("redis.command", span.OperationName())
+ assert.Equal(ext.SpanTypeRedis, span.Tag(ext.SpanType))
+ assert.Equal("my-redis", span.Tag(ext.ServiceName))
+ assert.Equal("expire pipeline_counter 3600: false\n", span.Tag(ext.ResourceName))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6379", span.Tag(ext.TargetPort))
+ assert.Equal("1", span.Tag("redis.pipeline_length"))
+
+ mt.Reset()
+ pipeline.Expire("pipeline_counter", time.Hour)
+ pipeline.Expire("pipeline_counter_1", time.Minute)
+
+ // Rewriting Exec
+ pipeline.Exec()
+
+ spans = mt.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span = spans[0]
+ assert.Equal("redis.command", span.OperationName())
+ assert.Equal(ext.SpanTypeRedis, span.Tag(ext.SpanType))
+ assert.Equal("my-redis", span.Tag(ext.ServiceName))
+ assert.Equal("expire pipeline_counter 3600: false\nexpire pipeline_counter_1 60: false\n", span.Tag(ext.ResourceName))
+ assert.Equal("2", span.Tag("redis.pipeline_length"))
+}
+
+func TestChildSpan(t *testing.T) {
+ opts := &redis.Options{Addr: "127.0.0.1:6379"}
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // Parent span
+ client := NewClient(opts, WithServiceName("my-redis"))
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.span")
+ client = client.WithContext(ctx)
+ client.Set("test_key", "test_value", 0)
+ root.Finish()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+
+ var child, parent mocktracer.Span
+ for _, s := range spans {
+ // order of traces in buffer is not garanteed
+ switch s.OperationName() {
+ case "redis.command":
+ child = s
+ case "parent.span":
+ parent = s
+ }
+ }
+ assert.NotNil(parent)
+ assert.NotNil(child)
+
+ assert.Equal(child.ParentID(), parent.SpanID())
+ assert.Equal(child.Tag(ext.TargetHost), "127.0.0.1")
+ assert.Equal(child.Tag(ext.TargetPort), "6379")
+}
+
+func TestMultipleCommands(t *testing.T) {
+ opts := &redis.Options{Addr: "127.0.0.1:6379"}
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ client := NewClient(opts, WithServiceName("my-redis"))
+ client.Set("test_key", "test_value", 0)
+ client.Get("test_key")
+ client.Incr("int_key")
+ client.ClientList()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 4)
+
+ // Checking all commands were recorded
+ var commands [4]string
+ for i := 0; i < 4; i++ {
+ commands[i] = spans[i].Tag("redis.raw_command").(string)
+ }
+ assert.Contains(commands, "set test_key test_value: ")
+ assert.Contains(commands, "get test_key: ")
+ assert.Contains(commands, "incr int_key: 0")
+ assert.Contains(commands, "client list: ")
+}
+
+func TestError(t *testing.T) {
+ t.Run("wrong-port", func(t *testing.T) {
+ opts := &redis.Options{Addr: "127.0.0.1:6378"} // wrong port
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ client := NewClient(opts, WithServiceName("my-redis"))
+ _, err := client.Get("key").Result()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ span := spans[0]
+
+ assert.Equal("redis.command", span.OperationName())
+ assert.NotNil(err)
+ assert.Equal(err, span.Tag(ext.Error))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6378", span.Tag(ext.TargetPort))
+ assert.Equal("get key: ", span.Tag("redis.raw_command"))
+ })
+
+ t.Run("nil", func(t *testing.T) {
+ opts := &redis.Options{Addr: "127.0.0.1:6379"}
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ client := NewClient(opts, WithServiceName("my-redis"))
+ _, err := client.Get("non_existent_key").Result()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ span := spans[0]
+
+ assert.Equal(redis.Nil, err)
+ assert.Equal("redis.command", span.OperationName())
+ assert.Empty(span.Tag(ext.Error))
+ assert.Equal("127.0.0.1", span.Tag(ext.TargetHost))
+ assert.Equal("6379", span.Tag(ext.TargetPort))
+ assert.Equal("get non_existent_key: ", span.Tag("redis.raw_command"))
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/example_test.go
new file mode 100644
index 00000000..8688c586
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/example_test.go
@@ -0,0 +1,32 @@
+package gocql_test
+
+import (
+ "context"
+
+ "github.com/gocql/gocql"
+ gocqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// To trace Cassandra commands, use our query wrapper WrapQuery.
+func Example() {
+ // Initialise a Cassandra session as usual, create a query.
+ cluster := gocql.NewCluster("127.0.0.1")
+ session, _ := cluster.CreateSession()
+ query := session.Query("CREATE KEYSPACE if not exists trace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': 1}")
+
+ // Use context to pass information down the call chain
+ _, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.SpanType(ext.SpanTypeCassandra),
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/home"),
+ )
+
+ // Wrap the query to trace it and pass the context for inheritance
+ tracedQuery := gocqltrace.WrapQuery(query, gocqltrace.WithServiceName("ServiceName"))
+ tracedQuery.WithContext(ctx)
+
+ // Execute your query as usual
+ tracedQuery.Exec()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/gocql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/gocql.go
new file mode 100644
index 00000000..cfa81ab4
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/gocql.go
@@ -0,0 +1,150 @@
+// Package gocql provides functions to trace the gocql/gocql package (https://github.com/gocql/gocql).
+package gocql // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql"
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/gocql/gocql"
+)
+
+// Query inherits from gocql.Query, it keeps the tracer and the context.
+type Query struct {
+ *gocql.Query
+ *params
+ traceContext context.Context
+}
+
+// Iter inherits from gocql.Iter and contains a span.
+type Iter struct {
+ *gocql.Iter
+ span ddtrace.Span
+}
+
+// params containes fields and metadata useful for command tracing
+type params struct {
+ config *queryConfig
+ keyspace string
+ paginated bool
+}
+
+// WrapQuery wraps a gocql.Query into a traced Query under the given service name.
+// Note that the returned Query structure embeds the original gocql.Query structure.
+// This means that any method returning the query for chaining that is not part
+// of this package's Query structure should be called before WrapQuery, otherwise
+// the tracing context could be lost.
+//
+// To be more specific: it is ok (and recommended) to use and chain the return value
+// of `WithContext` and `PageState` but not that of `Consistency`, `Trace`,
+// `Observer`, etc.
+func WrapQuery(q *gocql.Query, opts ...WrapOption) *Query {
+ cfg := new(queryConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if cfg.resourceName == "" {
+ q := `"` + strings.SplitN(q.String(), "\"", 3)[1] + `"`
+ q, err := strconv.Unquote(q)
+ if err != nil {
+ // avoid having an empty resource as it will cause the trace
+ // to be dropped.
+ q = "_"
+ }
+ cfg.resourceName = q
+ }
+ tq := &Query{q, ¶ms{config: cfg}, context.Background()}
+ return tq
+}
+
+// WithContext adds the specified context to the traced Query structure.
+func (tq *Query) WithContext(ctx context.Context) *Query {
+ tq.traceContext = ctx
+ tq.Query.WithContext(ctx)
+ return tq
+}
+
+// PageState rewrites the original function so that spans are aware of the change.
+func (tq *Query) PageState(state []byte) *Query {
+ tq.params.paginated = true
+ tq.Query = tq.Query.PageState(state)
+ return tq
+}
+
+// NewChildSpan creates a new span from the params and the context.
+func (tq *Query) newChildSpan(ctx context.Context) ddtrace.Span {
+ p := tq.params
+ span, _ := tracer.StartSpanFromContext(ctx, ext.CassandraQuery,
+ tracer.SpanType(ext.SpanTypeCassandra),
+ tracer.ServiceName(p.config.serviceName),
+ tracer.ResourceName(p.config.resourceName),
+ tracer.Tag(ext.CassandraPaginated, fmt.Sprintf("%t", p.paginated)),
+ tracer.Tag(ext.CassandraKeyspace, p.keyspace),
+ )
+ return span
+}
+
+// Exec is rewritten so that it passes by our custom Iter
+func (tq *Query) Exec() error {
+ return tq.Iter().Close()
+}
+
+// MapScan wraps in a span query.MapScan call.
+func (tq *Query) MapScan(m map[string]interface{}) error {
+ span := tq.newChildSpan(tq.traceContext)
+ err := tq.Query.MapScan(m)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// Scan wraps in a span query.Scan call.
+func (tq *Query) Scan(dest ...interface{}) error {
+ span := tq.newChildSpan(tq.traceContext)
+ err := tq.Query.Scan(dest...)
+ span.Finish(tracer.WithError(err))
+ return err
+}
+
+// ScanCAS wraps in a span query.ScanCAS call.
+func (tq *Query) ScanCAS(dest ...interface{}) (applied bool, err error) {
+ span := tq.newChildSpan(tq.traceContext)
+ applied, err = tq.Query.ScanCAS(dest...)
+ span.Finish(tracer.WithError(err))
+ return applied, err
+}
+
+// Iter starts a new span at query.Iter call.
+func (tq *Query) Iter() *Iter {
+ span := tq.newChildSpan(tq.traceContext)
+ iter := tq.Query.Iter()
+ span.SetTag(ext.CassandraRowCount, strconv.Itoa(iter.NumRows()))
+ span.SetTag(ext.CassandraConsistencyLevel, strconv.Itoa(int(tq.GetConsistency())))
+
+ columns := iter.Columns()
+ if len(columns) > 0 {
+ span.SetTag(ext.CassandraKeyspace, columns[0].Keyspace)
+ }
+ tIter := &Iter{iter, span}
+ if tIter.Host() != nil {
+ tIter.span.SetTag(ext.TargetHost, tIter.Iter.Host().HostID())
+ tIter.span.SetTag(ext.TargetPort, strconv.Itoa(tIter.Iter.Host().Port()))
+ tIter.span.SetTag(ext.CassandraCluster, tIter.Iter.Host().DataCenter())
+ }
+ return tIter
+}
+
+// Close closes the Iter and finish the span created on Iter call.
+func (tIter *Iter) Close() error {
+ err := tIter.Iter.Close()
+ if err != nil {
+ tIter.span.SetTag(ext.Error, err)
+ }
+ tIter.span.Finish()
+ return err
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/gocql_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/gocql_test.go
new file mode 100644
index 00000000..b04987b3
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/gocql_test.go
@@ -0,0 +1,121 @@
+package gocql
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "os"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/gocql/gocql"
+ "github.com/stretchr/testify/assert"
+)
+
+const (
+ debug = false
+ cassandraHost = "127.0.0.1:9042"
+)
+
+func newCassandraCluster() *gocql.ClusterConfig {
+ cluster := gocql.NewCluster(cassandraHost)
+ // the InitialHostLookup must be disabled in newer versions of
+ // gocql otherwise "no connections were made when creating the session"
+ // error is returned for Cassandra misconfiguration (that we don't need
+ // since we're testing another behavior and not the client).
+ // Check: https://github.com/gocql/gocql/issues/946
+ cluster.DisableInitialHostLookup = true
+ return cluster
+}
+
+// TestMain sets up the Keyspace and table if they do not exist
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ cluster := newCassandraCluster()
+ session, err := cluster.CreateSession()
+ if err != nil {
+ log.Fatalf("%v\n", err)
+ }
+ // Ensures test keyspace and table person exists.
+ session.Query("CREATE KEYSPACE if not exists trace WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': 1}").Exec()
+ session.Query("CREATE TABLE if not exists trace.person (name text PRIMARY KEY, age int, description text)").Exec()
+ session.Query("INSERT INTO trace.person (name, age, description) VALUES ('Cassandra', 100, 'A cruel mistress')").Exec()
+
+ m.Run()
+}
+
+func TestErrorWrapper(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ cluster := newCassandraCluster()
+ session, err := cluster.CreateSession()
+ assert.Nil(err)
+ q := session.Query("CREATE KEYSPACE trace WITH REPLICATION = { 'class' : 'NetworkTopologyStrategy', 'datacenter1' : 1 };")
+ iter := WrapQuery(q, WithServiceName("ServiceName"), WithResourceName("CREATE KEYSPACE")).Iter()
+ err = iter.Close()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+ span := spans[0]
+
+ assert.Equal(span.Tag(ext.Error).(error), err)
+ assert.Equal(span.OperationName(), ext.CassandraQuery)
+ assert.Equal(span.Tag(ext.ResourceName), "CREATE KEYSPACE")
+ assert.Equal(span.Tag(ext.ServiceName), "ServiceName")
+ assert.Equal(span.Tag(ext.CassandraConsistencyLevel), "4")
+ assert.Equal(span.Tag(ext.CassandraPaginated), "false")
+
+ if iter.Host() != nil {
+ assert.Equal(span.Tag(ext.TargetPort), "9042")
+ assert.Equal(span.Tag(ext.TargetHost), "127.0.0.1")
+ assert.Equal(span.Tag(ext.CassandraCluster), "datacenter1")
+ }
+}
+
+func TestChildWrapperSpan(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // Parent span
+ parentSpan, ctx := tracer.StartSpanFromContext(context.Background(), "parentSpan")
+ cluster := newCassandraCluster()
+ session, err := cluster.CreateSession()
+ assert.Nil(err)
+ q := session.Query("SELECT * from trace.person")
+ tq := WrapQuery(q, WithServiceName("TestServiceName"))
+ iter := tq.WithContext(ctx).Iter()
+ iter.Close()
+ parentSpan.Finish()
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+
+ var childSpan, pSpan mocktracer.Span
+ if spans[0].ParentID() == spans[1].SpanID() {
+ childSpan = spans[0]
+ pSpan = spans[1]
+ } else {
+ childSpan = spans[1]
+ pSpan = spans[0]
+ }
+ assert.Equal(pSpan.OperationName(), "parentSpan")
+ assert.Equal(childSpan.ParentID(), pSpan.SpanID())
+ assert.Equal(childSpan.OperationName(), ext.CassandraQuery)
+ assert.Equal(childSpan.Tag(ext.ResourceName), "SELECT * from trace.person")
+ assert.Equal(childSpan.Tag(ext.CassandraKeyspace), "trace")
+ if iter.Host() != nil {
+ assert.Equal(childSpan.Tag(ext.TargetPort), "9042")
+ assert.Equal(childSpan.Tag(ext.TargetHost), "127.0.0.1")
+ assert.Equal(childSpan.Tag(ext.CassandraCluster), "datacenter1")
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/option.go
new file mode 100644
index 00000000..23bafae0
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gocql/gocql/option.go
@@ -0,0 +1,29 @@
+package gocql
+
+type queryConfig struct{ serviceName, resourceName string }
+
+// WrapOption represents an option that can be passed to WrapQuery.
+type WrapOption func(*queryConfig)
+
+func defaults(cfg *queryConfig) {
+ cfg.serviceName = "gocql.query"
+}
+
+// WithServiceName sets the given service name for the returned query.
+func WithServiceName(name string) WrapOption {
+ return func(cfg *queryConfig) {
+ cfg.serviceName = name
+ }
+}
+
+// WithResourceName sets a custom resource name to be used with the traced query.
+// By default, the query statement is extracted automatically. This method should
+// be used when a different resource name is desired or in performance critical
+// environments. The gocql library returns the query statement using an fmt.Sprintf
+// call, which can be costly when called repeatedly. Using WithResourceName will
+// avoid that call. Under normal circumstances, it is safe to rely on the default.
+func WithResourceName(name string) WrapOption {
+ return func(cfg *queryConfig) {
+ cfg.resourceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/example_test.go
new file mode 100644
index 00000000..bf3390e7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/example_test.go
@@ -0,0 +1,45 @@
+package grpc_test
+
+import (
+ "log"
+ "net"
+
+ grpctrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12"
+
+ "google.golang.org/grpc"
+)
+
+func Example_client() {
+ // Create the client interceptor using the grpc trace package.
+ i := grpctrace.UnaryClientInterceptor(grpctrace.WithServiceName("my-grpc-client"))
+
+ // Dial in using the created interceptor...
+ conn, err := grpc.Dial("localhost:50051", grpc.WithInsecure(), grpc.WithUnaryInterceptor(i))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer conn.Close()
+
+ // And continue using the connection as normal.
+}
+
+func Example_server() {
+ // Create a listener for the server.
+ ln, err := net.Listen("tcp", ":50051")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create the unary server interceptor using the grpc trace package.
+ i := grpctrace.UnaryServerInterceptor(grpctrace.WithServiceName("my-grpc-client"))
+
+ // Initialize the grpc server as normal, using the tracing interceptor.
+ s := grpc.NewServer(grpc.UnaryInterceptor(i))
+
+ // ... register your services
+
+ // Start serving incoming connections.
+ if err := s.Serve(ln); err != nil {
+ log.Fatalf("failed to serve: %v", err)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/fixtures_test.pb.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/fixtures_test.pb.go
new file mode 100644
index 00000000..3a7b7e6b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/fixtures_test.pb.go
@@ -0,0 +1,163 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: fixtures_test.proto
+
+/*
+Package grpc is a generated protocol buffer package.
+
+It is generated from these files:
+ fixtures_test.proto
+
+It has these top-level messages:
+ FixtureRequest
+ FixtureReply
+*/
+package grpc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+ context "golang.org/x/net/context"
+ grpc1 "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The request message containing the user's name.
+type FixtureRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *FixtureRequest) Reset() { *m = FixtureRequest{} }
+func (m *FixtureRequest) String() string { return proto.CompactTextString(m) }
+func (*FixtureRequest) ProtoMessage() {}
+func (*FixtureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *FixtureRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The response message containing the greetings
+type FixtureReply struct {
+ Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
+}
+
+func (m *FixtureReply) Reset() { *m = FixtureReply{} }
+func (m *FixtureReply) String() string { return proto.CompactTextString(m) }
+func (*FixtureReply) ProtoMessage() {}
+func (*FixtureReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *FixtureReply) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*FixtureRequest)(nil), "grpc.FixtureRequest")
+ proto.RegisterType((*FixtureReply)(nil), "grpc.FixtureReply")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc1.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc1.SupportPackageIsVersion4
+
+// Client API for Fixture service
+
+type FixtureClient interface {
+ Ping(ctx context.Context, in *FixtureRequest, opts ...grpc1.CallOption) (*FixtureReply, error)
+}
+
+type fixtureClient struct {
+ cc *grpc1.ClientConn
+}
+
+func NewFixtureClient(cc *grpc1.ClientConn) FixtureClient {
+ return &fixtureClient{cc}
+}
+
+func (c *fixtureClient) Ping(ctx context.Context, in *FixtureRequest, opts ...grpc1.CallOption) (*FixtureReply, error) {
+ out := new(FixtureReply)
+ err := grpc1.Invoke(ctx, "/grpc.Fixture/Ping", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Server API for Fixture service
+
+type FixtureServer interface {
+ Ping(context.Context, *FixtureRequest) (*FixtureReply, error)
+}
+
+func RegisterFixtureServer(s *grpc1.Server, srv FixtureServer) {
+ s.RegisterService(&_Fixture_serviceDesc, srv)
+}
+
+func _Fixture_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc1.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FixtureRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(FixtureServer).Ping(ctx, in)
+ }
+ info := &grpc1.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/grpc.Fixture/Ping",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(FixtureServer).Ping(ctx, req.(*FixtureRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Fixture_serviceDesc = grpc1.ServiceDesc{
+ ServiceName: "grpc.Fixture",
+ HandlerType: (*FixtureServer)(nil),
+ Methods: []grpc1.MethodDesc{
+ {
+ MethodName: "Ping",
+ Handler: _Fixture_Ping_Handler,
+ },
+ },
+ Streams: []grpc1.StreamDesc{},
+ Metadata: "fixtures_test.proto",
+}
+
+func init() { proto.RegisterFile("fixtures_test.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 177 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xcb, 0xac, 0x28,
+ 0x29, 0x2d, 0x4a, 0x2d, 0x8e, 0x2f, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
+ 0x62, 0x49, 0x2f, 0x2a, 0x48, 0x56, 0x52, 0xe1, 0xe2, 0x73, 0x83, 0x48, 0x06, 0xa5, 0x16, 0x96,
+ 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
+ 0x70, 0x06, 0x81, 0xd9, 0x4a, 0x1a, 0x5c, 0x3c, 0x70, 0x55, 0x05, 0x39, 0x95, 0x42, 0x12, 0x5c,
+ 0xec, 0xb9, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0x30, 0x65, 0x30, 0xae, 0x91, 0x2d, 0x17, 0x3b, 0x54,
+ 0xa5, 0x90, 0x11, 0x17, 0x4b, 0x40, 0x66, 0x5e, 0xba, 0x90, 0x88, 0x1e, 0xc8, 0x26, 0x3d, 0x54,
+ 0x6b, 0xa4, 0x84, 0xd0, 0x44, 0x0b, 0x72, 0x2a, 0x95, 0x18, 0x9c, 0x74, 0xb8, 0x24, 0x33, 0xf3,
+ 0x21, 0x32, 0xa9, 0x15, 0x89, 0xb9, 0x05, 0x39, 0xa9, 0xc5, 0x7a, 0x20, 0x37, 0x83, 0x44, 0x9c,
+ 0x78, 0x43, 0x52, 0x8b, 0x4b, 0xdc, 0x83, 0x02, 0x9c, 0x03, 0x40, 0x1e, 0x08, 0x60, 0x4c, 0x62,
+ 0x03, 0xfb, 0xc4, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x18, 0x42, 0x90, 0x4d, 0xe0, 0x00, 0x00,
+ 0x00,
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/fixtures_test.proto b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/fixtures_test.proto
new file mode 100644
index 00000000..15a8aa5c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/fixtures_test.proto
@@ -0,0 +1,21 @@
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "io.grpc.examples.testgrpc";
+option java_outer_classname = "TestGRPCProto";
+
+package grpc;
+
+service Fixture {
+ rpc Ping (FixtureRequest) returns (FixtureReply) {}
+}
+
+// The request message containing the user's name.
+message FixtureRequest {
+ string name = 1;
+}
+
+// The response message containing the greetings
+message FixtureReply {
+ string message = 1;
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/grpc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/grpc.go
new file mode 100644
index 00000000..5314b08d
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/grpc.go
@@ -0,0 +1,93 @@
+//go:generate protoc -I . fixtures_test.proto --go_out=plugins=grpc:.
+
+// Package grpc provides functions to trace the google.golang.org/grpc package v1.2.
+package grpc // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12"
+
+import (
+ "net"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ context "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+// UnaryServerInterceptor will trace requests to the given grpc server.
+func UnaryServerInterceptor(opts ...InterceptorOption) grpc.UnaryServerInterceptor {
+ cfg := new(interceptorConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if cfg.serviceName == "" {
+ cfg.serviceName = "grpc.server"
+ }
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ span, ctx := startSpanFromContext(ctx, info.FullMethod, cfg.serviceName)
+ resp, err := handler(ctx, req)
+ span.Finish(tracer.WithError(err))
+ return resp, err
+ }
+}
+
+func startSpanFromContext(ctx context.Context, method, service string) (ddtrace.Span, context.Context) {
+ opts := []ddtrace.StartSpanOption{
+ tracer.ServiceName(service),
+ tracer.ResourceName(method),
+ tracer.Tag(tagMethod, method),
+ tracer.SpanType(ext.AppTypeRPC),
+ }
+ md, _ := metadata.FromContext(ctx) // nil is ok
+ if sctx, err := tracer.Extract(grpcutil.MDCarrier(md)); err == nil {
+ opts = append(opts, tracer.ChildOf(sctx))
+ }
+ return tracer.StartSpanFromContext(ctx, "grpc.server", opts...)
+}
+
+// UnaryClientInterceptor will add tracing to a gprc client.
+func UnaryClientInterceptor(opts ...InterceptorOption) grpc.UnaryClientInterceptor {
+ cfg := new(interceptorConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if cfg.serviceName == "" {
+ cfg.serviceName = "grpc.client"
+ }
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ var (
+ span ddtrace.Span
+ p peer.Peer
+ )
+ span, ctx = tracer.StartSpanFromContext(ctx, "grpc.client",
+ tracer.Tag(tagMethod, method),
+ tracer.SpanType(ext.AppTypeRPC),
+ )
+ md, ok := metadata.FromContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+ _ = tracer.Inject(span.Context(), grpcutil.MDCarrier(md))
+ ctx = metadata.NewContext(ctx, md)
+ opts = append(opts, grpc.Peer(&p))
+ err := invoker(ctx, method, req, reply, cc, opts...)
+ if p.Addr != nil {
+ addr := p.Addr.String()
+ host, port, err := net.SplitHostPort(addr)
+ if err == nil {
+ if host != "" {
+ span.SetTag(ext.TargetHost, host)
+ }
+ span.SetTag(ext.TargetPort, port)
+ }
+ }
+ span.SetTag(tagCode, grpc.Code(err).String())
+ span.Finish(tracer.WithError(err))
+ return err
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/grpc_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/grpc_test.go
new file mode 100644
index 00000000..a4527172
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/grpc_test.go
@@ -0,0 +1,203 @@
+package grpc
+
+import (
+ "fmt"
+ "net"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/stretchr/testify/assert"
+ context "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+)
+
+func TestClient(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(true)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+ client := rig.client
+
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "a", tracer.ServiceName("b"), tracer.ResourceName("c"))
+ resp, err := client.Ping(ctx, &FixtureRequest{Name: "pass"})
+ assert.Nil(err)
+ span.Finish()
+ assert.Equal(resp.Message, "passed")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 3)
+
+ var serverSpan, clientSpan, rootSpan mocktracer.Span
+
+ for _, s := range spans {
+ // order of traces in buffer is not garanteed
+ switch s.OperationName() {
+ case "grpc.server":
+ serverSpan = s
+ case "grpc.client":
+ clientSpan = s
+ case "a":
+ rootSpan = s
+ }
+ }
+
+ assert.NotNil(serverSpan)
+ assert.NotNil(clientSpan)
+ assert.NotNil(rootSpan)
+
+ assert.Equal(clientSpan.Tag(ext.TargetHost), "127.0.0.1")
+ assert.Equal(clientSpan.Tag(ext.TargetPort), rig.port)
+ assert.Equal(clientSpan.Tag(tagCode), codes.OK.String())
+ assert.Equal(clientSpan.TraceID(), rootSpan.TraceID())
+ assert.Equal(serverSpan.Tag(ext.ServiceName), "grpc")
+ assert.Equal(serverSpan.Tag(ext.ResourceName), "/grpc.Fixture/Ping")
+ assert.Equal(serverSpan.TraceID(), rootSpan.TraceID())
+}
+
+func TestChild(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(false)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ client := rig.client
+ resp, err := client.Ping(context.Background(), &FixtureRequest{Name: "child"})
+ assert.Nil(err)
+ assert.Equal(resp.Message, "child")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+
+ var serverSpan, clientSpan mocktracer.Span
+
+ for _, s := range spans {
+ // order of traces in buffer is not garanteed
+ switch s.OperationName() {
+ case "grpc.server":
+ serverSpan = s
+ case "child":
+ clientSpan = s
+ }
+ }
+
+ assert.NotNil(clientSpan)
+ assert.Nil(clientSpan.Tag(ext.Error))
+ assert.Equal(clientSpan.Tag(ext.ServiceName), "grpc")
+ assert.Equal(clientSpan.Tag(ext.ResourceName), "child")
+ assert.True(clientSpan.FinishTime().Sub(clientSpan.StartTime()) > 0)
+
+ assert.NotNil(serverSpan)
+ assert.Nil(serverSpan.Tag(ext.Error))
+ assert.Equal(serverSpan.Tag(ext.ServiceName), "grpc")
+ assert.Equal(serverSpan.Tag(ext.ResourceName), "/grpc.Fixture/Ping")
+ assert.True(serverSpan.FinishTime().Sub(serverSpan.StartTime()) > 0)
+}
+
+func TestPass(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(false)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ client := rig.client
+ resp, err := client.Ping(context.Background(), &FixtureRequest{Name: "pass"})
+ assert.Nil(err)
+ assert.Equal(resp.Message, "passed")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+
+ s := spans[0]
+ assert.Nil(s.Tag(ext.Error))
+ assert.Equal(s.OperationName(), "grpc.server")
+ assert.Equal(s.Tag(ext.ServiceName), "grpc")
+ assert.Equal(s.Tag(ext.ResourceName), "/grpc.Fixture/Ping")
+ assert.Equal(s.Tag(ext.SpanType), ext.AppTypeRPC)
+ assert.True(s.FinishTime().Sub(s.StartTime()) > 0)
+}
+
+// fixtureServer a dummy implemenation of our grpc fixtureServer.
+type fixtureServer struct{}
+
+func (s *fixtureServer) Ping(ctx context.Context, in *FixtureRequest) (*FixtureReply, error) {
+ switch {
+ case in.Name == "child":
+ span, _ := tracer.StartSpanFromContext(ctx, "child")
+ span.Finish()
+ return &FixtureReply{Message: "child"}, nil
+ case in.Name == "disabled":
+ if _, ok := tracer.SpanFromContext(ctx); ok {
+ panic("should be disabled")
+ }
+ return &FixtureReply{Message: "disabled"}, nil
+ }
+ return &FixtureReply{Message: "passed"}, nil
+}
+
+// ensure it's a fixtureServer
+var _ FixtureServer = &fixtureServer{}
+
+// rig contains all of the servers and connections we'd need for a
+// grpc integration test
+type rig struct {
+ server *grpc.Server
+ port string
+ listener net.Listener
+ conn *grpc.ClientConn
+ client FixtureClient
+}
+
+func (r *rig) Close() {
+ r.server.Stop()
+ r.conn.Close()
+ r.listener.Close()
+}
+
+func newRig(traceClient bool) (*rig, error) {
+ server := grpc.NewServer(grpc.UnaryInterceptor(UnaryServerInterceptor(WithServiceName("grpc"))))
+
+ RegisterFixtureServer(server, new(fixtureServer))
+
+ li, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ _, port, _ := net.SplitHostPort(li.Addr().String())
+ // start our test fixtureServer.
+ go server.Serve(li)
+
+ opts := []grpc.DialOption{grpc.WithInsecure()}
+ if traceClient {
+ opts = append(opts, grpc.WithUnaryInterceptor(UnaryClientInterceptor(WithServiceName("grpc"))))
+ }
+ conn, err := grpc.Dial(li.Addr().String(), opts...)
+ if err != nil {
+ return nil, fmt.Errorf("error dialing: %s", err)
+ }
+ return &rig{
+ listener: li,
+ port: port,
+ server: server,
+ conn: conn,
+ client: NewFixtureClient(conn),
+ }, err
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/option.go
new file mode 100644
index 00000000..6fc609d5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/option.go
@@ -0,0 +1,18 @@
+package grpc
+
+type interceptorConfig struct{ serviceName string }
+
+// InterceptorOption represents an option that can be passed to the grpc unary
+// client and server interceptors.
+type InterceptorOption func(*interceptorConfig)
+
+func defaults(cfg *interceptorConfig) {
+ // cfg.serviceName default set in interceptor
+}
+
+// WithServiceName sets the given service name for the intercepted client.
+func WithServiceName(name string) InterceptorOption {
+ return func(cfg *interceptorConfig) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/tags.go
new file mode 100644
index 00000000..bbaf5c9e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc.v12/tags.go
@@ -0,0 +1,7 @@
+package grpc
+
+// Tags used for gRPC
+const (
+ tagMethod = "grpc.method"
+ tagCode = "grpc.code"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/client.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/client.go
new file mode 100644
index 00000000..b8ec84a7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/client.go
@@ -0,0 +1,176 @@
+package grpc
+
+import (
+ "net"
+
+ context "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+type clientStream struct {
+ grpc.ClientStream
+ cfg *interceptorConfig
+ method string
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+ if cs.cfg.traceStreamMessages {
+ span, _ := startSpanFromContext(cs.Context(), cs.method, "grpc.message", cs.cfg.clientServiceName())
+ if p, ok := peer.FromContext(cs.Context()); ok {
+ setSpanTargetFromPeer(span, *p)
+ }
+ defer span.Finish(withStreamError(err))
+ }
+ err = cs.ClientStream.RecvMsg(m)
+ return err
+}
+
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
+ if cs.cfg.traceStreamMessages {
+ span, _ := startSpanFromContext(cs.Context(), cs.method, "grpc.message", cs.cfg.clientServiceName())
+ if p, ok := peer.FromContext(cs.Context()); ok {
+ setSpanTargetFromPeer(span, *p)
+ }
+ defer span.Finish(withStreamError(err))
+ }
+ err = cs.ClientStream.SendMsg(m)
+ return err
+}
+
+// StreamClientInterceptor returns a grpc.StreamClientInterceptor which will trace client
+// streams using the given set of options.
+func StreamClientInterceptor(opts ...InterceptorOption) grpc.StreamClientInterceptor {
+ cfg := new(interceptorConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ var stream grpc.ClientStream
+ if cfg.traceStreamCalls {
+ span, err := doClientRequest(ctx, cfg, method, opts,
+ func(ctx context.Context, opts []grpc.CallOption) error {
+ var err error
+ stream, err = streamer(ctx, desc, cc, method, opts...)
+ return err
+ })
+ if err != nil {
+ span.Finish(withStreamError(err))
+ return nil, err
+ }
+
+ // the Peer call option only works with unary calls, so for streams
+ // we need to set it via FromContext
+ if p, ok := peer.FromContext(stream.Context()); ok {
+ setSpanTargetFromPeer(span, *p)
+ }
+
+ go func() {
+ <-stream.Context().Done()
+ span.Finish(withStreamError(stream.Context().Err()))
+ }()
+ } else {
+ // if call tracing is disabled, just call streamer, but still return
+ // a clientStream so that messages can be traced if enabled
+
+ // it's possible there's already a span on the context even though
+ // we're not tracing calls, so inject it if it's there
+ ctx = injectSpanIntoContext(ctx)
+
+ var err error
+ stream, err = streamer(ctx, desc, cc, method, opts...)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &clientStream{
+ ClientStream: stream,
+ cfg: cfg,
+ method: method,
+ }, nil
+ }
+}
+
+// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor which will trace requests using
+// the given set of options.
+func UnaryClientInterceptor(opts ...InterceptorOption) grpc.UnaryClientInterceptor {
+ cfg := new(interceptorConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ span, err := doClientRequest(ctx, cfg, method, opts,
+ func(ctx context.Context, opts []grpc.CallOption) error {
+ return invoker(ctx, method, req, reply, cc, opts...)
+ })
+ span.Finish(tracer.WithError(err))
+ return err
+ }
+}
+
+// doClientRequest starts a new span and invokes the handler with the new context
+// and options. The span should be finished by the caller.
+func doClientRequest(
+ ctx context.Context, cfg *interceptorConfig, method string, opts []grpc.CallOption,
+ handler func(ctx context.Context, opts []grpc.CallOption) error,
+) (ddtrace.Span, error) {
+ // inject the trace id into the metadata
+ span, ctx := startSpanFromContext(ctx, method, "grpc.client", cfg.clientServiceName())
+ ctx = injectSpanIntoContext(ctx)
+
+ // fill in the peer so we can add it to the tags
+ var p peer.Peer
+ opts = append(opts, grpc.Peer(&p))
+
+ err := handler(ctx, opts)
+
+ setSpanTargetFromPeer(span, p)
+
+ // set the code based on the error
+ span.SetTag(tagCode, grpc.Code(err).String())
+
+ return span, err
+}
+
+// setSpanTargetFromPeer sets the target tags in a span based on the gRPC peer.
+func setSpanTargetFromPeer(span ddtrace.Span, p peer.Peer) {
+ // if the peer was set, set the tags
+ if p.Addr != nil {
+ host, port, err := net.SplitHostPort(p.Addr.String())
+ if err == nil {
+ if host != "" {
+ span.SetTag(ext.TargetHost, host)
+ }
+ span.SetTag(ext.TargetPort, port)
+ }
+ }
+}
+
+// injectSpanIntoContext injects the span associated with a context as gRPC metadata
+// if no span is associated with the context, just return the original context.
+func injectSpanIntoContext(ctx context.Context) context.Context {
+ span, ok := tracer.SpanFromContext(ctx)
+ if !ok {
+ return ctx
+ }
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if ok {
+ // we have to copy the metadata because its not safe to modify
+ md = md.Copy()
+ } else {
+ md = metadata.MD{}
+ }
+ if err := tracer.Inject(span.Context(), grpcutil.MDCarrier(md)); err != nil {
+ // in practice this error should never really happen
+ grpclog.Warningf("ddtrace: failed to inject the span context into the gRPC metadata: %v", err)
+ }
+ return metadata.NewOutgoingContext(ctx, md)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/example_test.go
new file mode 100644
index 00000000..bc0df68a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/example_test.go
@@ -0,0 +1,48 @@
+package grpc_test
+
+import (
+ "log"
+ "net"
+
+ grpctrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc"
+
+ "google.golang.org/grpc"
+)
+
+func Example_client() {
+ // Create the client interceptor using the grpc trace package.
+ si := grpctrace.StreamClientInterceptor(grpctrace.WithServiceName("my-grpc-client"))
+ ui := grpctrace.UnaryClientInterceptor(grpctrace.WithServiceName("my-grpc-client"))
+
+ // Dial in using the created interceptor...
+ conn, err := grpc.Dial("localhost:50051", grpc.WithInsecure(),
+ grpc.WithStreamInterceptor(si), grpc.WithUnaryInterceptor(ui))
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer conn.Close()
+
+ // And continue using the connection as normal.
+}
+
+func Example_server() {
+ // Create a listener for the server.
+ ln, err := net.Listen("tcp", ":50051")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Create the server interceptor using the grpc trace package.
+ si := grpctrace.StreamServerInterceptor(grpctrace.WithServiceName("my-grpc-client"))
+ ui := grpctrace.UnaryServerInterceptor(grpctrace.WithServiceName("my-grpc-client"))
+
+ // Initialize the grpc server as normal, using the tracing interceptor.
+ s := grpc.NewServer(grpc.StreamInterceptor(si), grpc.UnaryInterceptor(ui))
+
+ // ... register your services
+
+ // Start serving incoming connections.
+ if err := s.Serve(ln); err != nil {
+ log.Fatalf("failed to serve: %v", err)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/fixtures_test.pb.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/fixtures_test.pb.go
new file mode 100644
index 00000000..abf28a01
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/fixtures_test.pb.go
@@ -0,0 +1,230 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: fixtures_test.proto
+
+/*
+Package grpc is a generated protocol buffer package.
+
+It is generated from these files:
+ fixtures_test.proto
+
+It has these top-level messages:
+ FixtureRequest
+ FixtureReply
+*/
+package grpc
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+import (
+ context "golang.org/x/net/context"
+ grpc1 "google.golang.org/grpc"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The request message containing the user's name.
+type FixtureRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *FixtureRequest) Reset() { *m = FixtureRequest{} }
+func (m *FixtureRequest) String() string { return proto.CompactTextString(m) }
+func (*FixtureRequest) ProtoMessage() {}
+func (*FixtureRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *FixtureRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// The response message containing the greetings
+type FixtureReply struct {
+ Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
+}
+
+func (m *FixtureReply) Reset() { *m = FixtureReply{} }
+func (m *FixtureReply) String() string { return proto.CompactTextString(m) }
+func (*FixtureReply) ProtoMessage() {}
+func (*FixtureReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *FixtureReply) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterType((*FixtureRequest)(nil), "grpc.FixtureRequest")
+ proto.RegisterType((*FixtureReply)(nil), "grpc.FixtureReply")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc1.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc1.SupportPackageIsVersion4
+
+// Client API for Fixture service
+
+type FixtureClient interface {
+ Ping(ctx context.Context, in *FixtureRequest, opts ...grpc1.CallOption) (*FixtureReply, error)
+ StreamPing(ctx context.Context, opts ...grpc1.CallOption) (Fixture_StreamPingClient, error)
+}
+
+type fixtureClient struct {
+ cc *grpc1.ClientConn
+}
+
+func NewFixtureClient(cc *grpc1.ClientConn) FixtureClient {
+ return &fixtureClient{cc}
+}
+
+func (c *fixtureClient) Ping(ctx context.Context, in *FixtureRequest, opts ...grpc1.CallOption) (*FixtureReply, error) {
+ out := new(FixtureReply)
+ err := grpc1.Invoke(ctx, "/grpc.Fixture/Ping", in, out, c.cc, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *fixtureClient) StreamPing(ctx context.Context, opts ...grpc1.CallOption) (Fixture_StreamPingClient, error) {
+ stream, err := grpc1.NewClientStream(ctx, &_Fixture_serviceDesc.Streams[0], c.cc, "/grpc.Fixture/StreamPing", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &fixtureStreamPingClient{stream}
+ return x, nil
+}
+
+type Fixture_StreamPingClient interface {
+ Send(*FixtureRequest) error
+ Recv() (*FixtureReply, error)
+ grpc1.ClientStream
+}
+
+type fixtureStreamPingClient struct {
+ grpc1.ClientStream
+}
+
+func (x *fixtureStreamPingClient) Send(m *FixtureRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *fixtureStreamPingClient) Recv() (*FixtureReply, error) {
+ m := new(FixtureReply)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// Server API for Fixture service
+
+type FixtureServer interface {
+ Ping(context.Context, *FixtureRequest) (*FixtureReply, error)
+ StreamPing(Fixture_StreamPingServer) error
+}
+
+func RegisterFixtureServer(s *grpc1.Server, srv FixtureServer) {
+ s.RegisterService(&_Fixture_serviceDesc, srv)
+}
+
+func _Fixture_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc1.UnaryServerInterceptor) (interface{}, error) {
+ in := new(FixtureRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(FixtureServer).Ping(ctx, in)
+ }
+ info := &grpc1.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/grpc.Fixture/Ping",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(FixtureServer).Ping(ctx, req.(*FixtureRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Fixture_StreamPing_Handler(srv interface{}, stream grpc1.ServerStream) error {
+ return srv.(FixtureServer).StreamPing(&fixtureStreamPingServer{stream})
+}
+
+type Fixture_StreamPingServer interface {
+ Send(*FixtureReply) error
+ Recv() (*FixtureRequest, error)
+ grpc1.ServerStream
+}
+
+type fixtureStreamPingServer struct {
+ grpc1.ServerStream
+}
+
+func (x *fixtureStreamPingServer) Send(m *FixtureReply) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *fixtureStreamPingServer) Recv() (*FixtureRequest, error) {
+ m := new(FixtureRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _Fixture_serviceDesc = grpc1.ServiceDesc{
+ ServiceName: "grpc.Fixture",
+ HandlerType: (*FixtureServer)(nil),
+ Methods: []grpc1.MethodDesc{
+ {
+ MethodName: "Ping",
+ Handler: _Fixture_Ping_Handler,
+ },
+ },
+ Streams: []grpc1.StreamDesc{
+ {
+ StreamName: "StreamPing",
+ Handler: _Fixture_StreamPing_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "fixtures_test.proto",
+}
+
+func init() { proto.RegisterFile("fixtures_test.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 194 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4e, 0xcb, 0xac, 0x28,
+ 0x29, 0x2d, 0x4a, 0x2d, 0x8e, 0x2f, 0x49, 0x2d, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17,
+ 0x62, 0x49, 0x2f, 0x2a, 0x48, 0x56, 0x52, 0xe1, 0xe2, 0x73, 0x83, 0x48, 0x06, 0xa5, 0x16, 0x96,
+ 0xa6, 0x16, 0x97, 0x08, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
+ 0x70, 0x06, 0x81, 0xd9, 0x4a, 0x1a, 0x5c, 0x3c, 0x70, 0x55, 0x05, 0x39, 0x95, 0x42, 0x12, 0x5c,
+ 0xec, 0xb9, 0xa9, 0xc5, 0xc5, 0x89, 0xe9, 0x30, 0x65, 0x30, 0xae, 0x51, 0x35, 0x17, 0x3b, 0x54,
+ 0xa5, 0x90, 0x11, 0x17, 0x4b, 0x40, 0x66, 0x5e, 0xba, 0x90, 0x88, 0x1e, 0xc8, 0x26, 0x3d, 0x54,
+ 0x6b, 0xa4, 0x84, 0xd0, 0x44, 0x0b, 0x72, 0x2a, 0x95, 0x18, 0x84, 0x6c, 0xb8, 0xb8, 0x82, 0x4b,
+ 0x8a, 0x52, 0x13, 0x73, 0x49, 0xd5, 0xa9, 0xc1, 0x68, 0xc0, 0xe8, 0xa4, 0xc3, 0x25, 0x99, 0x99,
+ 0x0f, 0x91, 0x4d, 0xad, 0x48, 0xcc, 0x2d, 0xc8, 0x49, 0x2d, 0xd6, 0x03, 0xf9, 0x18, 0x24, 0xe2,
+ 0xc4, 0x1b, 0x92, 0x5a, 0x5c, 0xe2, 0x1e, 0x14, 0xe0, 0x1c, 0x00, 0xf2, 0x7e, 0x00, 0x63, 0x12,
+ 0x1b, 0x38, 0x1c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xdd, 0xb5, 0x70, 0x1e, 0x01,
+ 0x00, 0x00,
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/fixtures_test.proto b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/fixtures_test.proto
new file mode 100644
index 00000000..97f5fe6c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/fixtures_test.proto
@@ -0,0 +1,22 @@
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "io.grpc.examples.testgrpc";
+option java_outer_classname = "TestGRPCProto";
+
+package grpc;
+
+service Fixture {
+ rpc Ping (FixtureRequest) returns (FixtureReply) {}
+ rpc StreamPing(stream FixtureRequest) returns (stream FixtureReply) {}
+}
+
+// The request message containing the user's name.
+message FixtureRequest {
+ string name = 1;
+}
+
+// The response message containing the greetings
+message FixtureReply {
+ string message = 1;
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/grpc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/grpc.go
new file mode 100644
index 00000000..bb753df8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/grpc.go
@@ -0,0 +1,41 @@
+//go:generate protoc -I . fixtures_test.proto --go_out=plugins=grpc:.
+
+// Package grpc provides functions to trace the google.golang.org/grpc package v1.2.
+package grpc // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc"
+
+import (
+ "io"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ context "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+func startSpanFromContext(ctx context.Context, method, operation, service string) (ddtrace.Span, context.Context) {
+ opts := []ddtrace.StartSpanOption{
+ tracer.ServiceName(service),
+ tracer.ResourceName(method),
+ tracer.Tag(tagMethod, method),
+ tracer.SpanType(ext.AppTypeRPC),
+ }
+ md, _ := metadata.FromIncomingContext(ctx) // nil is ok
+ if sctx, err := tracer.Extract(grpcutil.MDCarrier(md)); err == nil {
+ opts = append(opts, tracer.ChildOf(sctx))
+ }
+ return tracer.StartSpanFromContext(ctx, operation, opts...)
+}
+
+// withStreamError returns a tracer.WithError finish option, disregarding OK, EOF and Canceled errors.
+func withStreamError(err error) tracer.FinishOption {
+ errcode := status.Code(err)
+ if err == io.EOF || errcode == codes.Canceled || errcode == codes.OK || err == context.Canceled {
+ err = nil
+ }
+ return tracer.WithError(err)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/grpc_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/grpc_test.go
new file mode 100644
index 00000000..f98eee41
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/grpc_test.go
@@ -0,0 +1,423 @@
+package grpc
+
+import (
+ "fmt"
+ "net"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/stretchr/testify/assert"
+ context "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+)
+
+func TestClient(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(true)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+ client := rig.client
+
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "a", tracer.ServiceName("b"), tracer.ResourceName("c"))
+
+ resp, err := client.Ping(ctx, &FixtureRequest{Name: "pass"})
+ assert.Nil(err)
+ span.Finish()
+ assert.Equal(resp.Message, "passed")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 3)
+
+ var serverSpan, clientSpan, rootSpan mocktracer.Span
+
+ for _, s := range spans {
+ // order of traces in buffer is not garanteed
+ switch s.OperationName() {
+ case "grpc.server":
+ serverSpan = s
+ case "grpc.client":
+ clientSpan = s
+ case "a":
+ rootSpan = s
+ }
+ }
+
+ assert.NotNil(serverSpan)
+ assert.NotNil(clientSpan)
+ assert.NotNil(rootSpan)
+
+ assert.Equal(clientSpan.Tag(ext.TargetHost), "127.0.0.1")
+ assert.Equal(clientSpan.Tag(ext.TargetPort), rig.port)
+ assert.Equal(clientSpan.Tag(tagCode), codes.OK.String())
+ assert.Equal(clientSpan.TraceID(), rootSpan.TraceID())
+ assert.Equal(serverSpan.Tag(ext.ServiceName), "grpc")
+ assert.Equal(serverSpan.Tag(ext.ResourceName), "/grpc.Fixture/Ping")
+ assert.Equal(serverSpan.TraceID(), rootSpan.TraceID())
+
+}
+
+func TestStreaming(t *testing.T) {
+ // creates a stream, then sends/recvs two pings, then closes the stream
+ runPings := func(t *testing.T, ctx context.Context, client FixtureClient) {
+ stream, err := client.StreamPing(ctx)
+ assert.NoError(t, err)
+
+ for i := 0; i < 2; i++ {
+ err = stream.Send(&FixtureRequest{Name: "pass"})
+ assert.NoError(t, err)
+
+ resp, err := stream.Recv()
+ assert.NoError(t, err)
+ assert.Equal(t, resp.Message, "passed")
+ }
+ stream.CloseSend()
+ // to flush the spans
+ stream.Recv()
+ }
+
+ checkSpans := func(t *testing.T, rig *rig, spans []mocktracer.Span) {
+ var rootSpan mocktracer.Span
+ for _, span := range spans {
+ if span.OperationName() == "a" {
+ rootSpan = span
+ }
+ }
+ assert.NotNil(t, rootSpan)
+
+ for _, span := range spans {
+ if span != rootSpan {
+ assert.Equal(t, rootSpan.TraceID(), span.TraceID(),
+ "expected span to to have its trace id set to the root trace id (%d): %v",
+ rootSpan.TraceID(), span)
+ assert.Equal(t, ext.AppTypeRPC, span.Tag(ext.SpanType),
+ "expected span type to be rpc in span: %v",
+ span)
+ assert.Equal(t, "grpc", span.Tag(ext.ServiceName),
+ "expected service name to be grpc in span: %v",
+ span)
+ }
+
+ switch span.OperationName() {
+ case "grpc.client":
+ // code is only set for the call, not the send/recv messages
+ assert.Equal(t, codes.OK.String(), span.Tag(tagCode),
+ "expected grpc code to be set in span: %v", span)
+ assert.Equal(t, "127.0.0.1", span.Tag(ext.TargetHost),
+ "expected target host tag to be set in span: %v", span)
+ assert.Equal(t, rig.port, span.Tag(ext.TargetPort),
+ "expected target host port to be set in span: %v", span)
+ fallthrough
+ case "grpc.server", "grpc.message":
+ assert.Equal(t, "/grpc.Fixture/StreamPing", span.Tag(ext.ResourceName),
+ "expected resource name to be set in span: %v", span)
+ assert.Equal(t, "/grpc.Fixture/StreamPing", span.Tag(tagMethod),
+ "expected grpc method name to be set in span: %v", span)
+ }
+ }
+ }
+
+ t.Run("All", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(true)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "a",
+ tracer.ServiceName("b"),
+ tracer.ResourceName("c"))
+
+ runPings(t, ctx, rig.client)
+
+ span.Finish()
+
+ waitForSpans(mt, 13, 5*time.Second)
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 13,
+ "expected 4 client messages + 4 server messages + 1 server call + 1 client call + 1 error from empty recv + 1 parent ctx, but got %v",
+ len(spans))
+ checkSpans(t, rig, spans)
+ })
+
+ t.Run("CallsOnly", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(true, WithStreamMessages(false))
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "a",
+ tracer.ServiceName("b"),
+ tracer.ResourceName("c"))
+
+ runPings(t, ctx, rig.client)
+
+ span.Finish()
+
+ waitForSpans(mt, 3, 5*time.Second)
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 3,
+ "expected 1 server call + 1 client call + 1 parent ctx, but got %v",
+ len(spans))
+ checkSpans(t, rig, spans)
+ })
+
+ t.Run("MessagesOnly", func(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(true, WithStreamCalls(false))
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ span, ctx := tracer.StartSpanFromContext(context.Background(), "a",
+ tracer.ServiceName("b"),
+ tracer.ResourceName("c"))
+
+ runPings(t, ctx, rig.client)
+
+ span.Finish()
+
+ waitForSpans(mt, 11, 5*time.Second)
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 11,
+ "expected 4 client messages + 4 server messages + 1 error from empty recv + 1 parent ctx, but got %v",
+ len(spans))
+ checkSpans(t, rig, spans)
+ })
+}
+
+func TestChild(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(false)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ client := rig.client
+ resp, err := client.Ping(context.Background(), &FixtureRequest{Name: "child"})
+ assert.Nil(err)
+ assert.Equal(resp.Message, "child")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+
+ var serverSpan, clientSpan mocktracer.Span
+
+ for _, s := range spans {
+ // order of traces in buffer is not garanteed
+ switch s.OperationName() {
+ case "grpc.server":
+ serverSpan = s
+ case "child":
+ clientSpan = s
+ }
+ }
+
+ assert.NotNil(clientSpan)
+ assert.Nil(clientSpan.Tag(ext.Error))
+ assert.Equal(clientSpan.Tag(ext.ServiceName), "grpc")
+ assert.Equal(clientSpan.Tag(ext.ResourceName), "child")
+ assert.True(clientSpan.FinishTime().Sub(clientSpan.StartTime()) > 0)
+
+ assert.NotNil(serverSpan)
+ assert.Nil(serverSpan.Tag(ext.Error))
+ assert.Equal(serverSpan.Tag(ext.ServiceName), "grpc")
+ assert.Equal(serverSpan.Tag(ext.ResourceName), "/grpc.Fixture/Ping")
+ assert.True(serverSpan.FinishTime().Sub(serverSpan.StartTime()) > 0)
+}
+
+func TestPass(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(false)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ client := rig.client
+
+ resp, err := client.Ping(context.Background(), &FixtureRequest{Name: "pass"})
+ assert.Nil(err)
+ assert.Equal(resp.Message, "passed")
+
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 1)
+
+ s := spans[0]
+ assert.Nil(s.Tag(ext.Error))
+ assert.Equal(s.OperationName(), "grpc.server")
+ assert.Equal(s.Tag(ext.ServiceName), "grpc")
+ assert.Equal(s.Tag(ext.ResourceName), "/grpc.Fixture/Ping")
+ assert.Equal(s.Tag(ext.SpanType), ext.AppTypeRPC)
+ assert.True(s.FinishTime().Sub(s.StartTime()) > 0)
+}
+
+func TestPreservesMetadata(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ rig, err := newRig(true)
+ if err != nil {
+ t.Fatalf("error setting up rig: %s", err)
+ }
+ defer rig.Close()
+
+ ctx := context.Background()
+ ctx = metadata.AppendToOutgoingContext(ctx, "test-key", "test-value")
+ span, ctx := tracer.StartSpanFromContext(ctx, "x", tracer.ServiceName("y"), tracer.ResourceName("z"))
+ rig.client.Ping(ctx, &FixtureRequest{Name: "pass"})
+ span.Finish()
+
+ md := rig.fixtureServer.lastRequestMetadata.Load().(metadata.MD)
+ assert.Equal(t, []string{"test-value"}, md.Get("test-key"),
+ "existing metadata should be preserved")
+}
+
+// fixtureServer a dummy implemenation of our grpc fixtureServer.
+type fixtureServer struct {
+ lastRequestMetadata atomic.Value
+}
+
+func (s *fixtureServer) StreamPing(srv Fixture_StreamPingServer) error {
+ for {
+ msg, err := srv.Recv()
+ if err != nil {
+ return err
+ }
+
+ reply, err := s.Ping(srv.Context(), msg)
+ if err != nil {
+ return err
+ }
+
+ err = srv.Send(reply)
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func (s *fixtureServer) Ping(ctx context.Context, in *FixtureRequest) (*FixtureReply, error) {
+ if md, ok := metadata.FromIncomingContext(ctx); ok {
+ s.lastRequestMetadata.Store(md)
+ }
+ switch {
+ case in.Name == "child":
+ span, _ := tracer.StartSpanFromContext(ctx, "child")
+ span.Finish()
+ return &FixtureReply{Message: "child"}, nil
+ case in.Name == "disabled":
+ if _, ok := tracer.SpanFromContext(ctx); ok {
+ panic("should be disabled")
+ }
+ return &FixtureReply{Message: "disabled"}, nil
+ }
+ return &FixtureReply{Message: "passed"}, nil
+}
+
+// ensure it's a fixtureServer
+var _ FixtureServer = &fixtureServer{}
+
+// rig contains all of the servers and connections we'd need for a
+// grpc integration test
+type rig struct {
+ fixtureServer *fixtureServer
+ server *grpc.Server
+ port string
+ listener net.Listener
+ conn *grpc.ClientConn
+ client FixtureClient
+}
+
+func (r *rig) Close() {
+ r.server.Stop()
+ r.conn.Close()
+ r.listener.Close()
+}
+
+func newRig(traceClient bool, interceptorOpts ...InterceptorOption) (*rig, error) {
+ interceptorOpts = append([]InterceptorOption{WithServiceName("grpc")}, interceptorOpts...)
+
+ server := grpc.NewServer(
+ grpc.UnaryInterceptor(UnaryServerInterceptor(interceptorOpts...)),
+ grpc.StreamInterceptor(StreamServerInterceptor(interceptorOpts...)),
+ )
+
+ fixtureServer := new(fixtureServer)
+ RegisterFixtureServer(server, fixtureServer)
+
+ li, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ return nil, err
+ }
+ _, port, _ := net.SplitHostPort(li.Addr().String())
+ // start our test fixtureServer.
+ go server.Serve(li)
+
+ opts := []grpc.DialOption{grpc.WithInsecure()}
+ if traceClient {
+ opts = append(opts,
+ grpc.WithUnaryInterceptor(UnaryClientInterceptor(interceptorOpts...)),
+ grpc.WithStreamInterceptor(StreamClientInterceptor(interceptorOpts...)),
+ )
+ }
+ conn, err := grpc.Dial(li.Addr().String(), opts...)
+ if err != nil {
+ return nil, fmt.Errorf("error dialing: %s", err)
+ }
+ return &rig{
+ fixtureServer: fixtureServer,
+ listener: li,
+ port: port,
+ server: server,
+ conn: conn,
+ client: NewFixtureClient(conn),
+ }, err
+}
+
+// waitForSpans polls the mock tracer until the expected number of spans
+// appears
+func waitForSpans(mt mocktracer.Tracer, sz int, maxWait time.Duration) {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
+
+ for len(mt.FinishedSpans()) < sz {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ time.Sleep(time.Millisecond * 100)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/option.go
new file mode 100644
index 00000000..de4b8a4f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/option.go
@@ -0,0 +1,51 @@
+package grpc
+
+type interceptorConfig struct {
+ serviceName string
+ traceStreamCalls, traceStreamMessages bool
+}
+
+func (cfg *interceptorConfig) serverServiceName() string {
+ if cfg.serviceName == "" {
+ return "grpc.server"
+ }
+ return cfg.serviceName
+}
+
+func (cfg *interceptorConfig) clientServiceName() string {
+ if cfg.serviceName == "" {
+ return "grpc.client"
+ }
+ return cfg.serviceName
+}
+
+// InterceptorOption represents an option that can be passed to the grpc unary
+// client and server interceptors.
+type InterceptorOption func(*interceptorConfig)
+
+func defaults(cfg *interceptorConfig) {
+ // cfg.serviceName defaults are set in interceptors
+ cfg.traceStreamCalls = true
+ cfg.traceStreamMessages = true
+}
+
+// WithServiceName sets the given service name for the intercepted client.
+func WithServiceName(name string) InterceptorOption {
+ return func(cfg *interceptorConfig) {
+ cfg.serviceName = name
+ }
+}
+
+// WithStreamCalls enables or disables tracing of streaming calls.
+func WithStreamCalls(enabled bool) InterceptorOption {
+ return func(cfg *interceptorConfig) {
+ cfg.traceStreamCalls = enabled
+ }
+}
+
+// WithStreamMessages enables or disables tracing of streaming messages.
+func WithStreamMessages(enabled bool) InterceptorOption {
+ return func(cfg *interceptorConfig) {
+ cfg.traceStreamMessages = enabled
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/server.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/server.go
new file mode 100644
index 00000000..0c9a249c
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/server.go
@@ -0,0 +1,93 @@
+package grpc
+
+import (
+ context "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+type serverStream struct {
+ grpc.ServerStream
+ cfg *interceptorConfig
+ method string
+ ctx context.Context
+}
+
+// Context returns the ServerStream Context.
+//
+// One subtle difference between the server stream and the client stream is the
+// order the contexts are created. In the client stream we pass the context to
+// the streamer function, which means the ClientStream.Context() derives from
+// the span context, so we want to return that. However with the ServerStream
+// the span context derives from the ServerStream.Context, so we want to return
+// the span context instead.
+func (ss *serverStream) Context() context.Context {
+ return ss.ctx
+}
+
+func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+ if ss.cfg.traceStreamMessages {
+ span, _ := startSpanFromContext(ss.ctx, ss.method, "grpc.message", ss.cfg.serverServiceName())
+ defer span.Finish(withStreamError(err))
+ }
+ err = ss.ServerStream.RecvMsg(m)
+ return err
+}
+
+func (ss *serverStream) SendMsg(m interface{}) (err error) {
+ if ss.cfg.traceStreamMessages {
+ span, _ := startSpanFromContext(ss.ctx, ss.method, "grpc.message", ss.cfg.serverServiceName())
+ defer span.Finish(withStreamError(err))
+ }
+ err = ss.ServerStream.SendMsg(m)
+ return err
+}
+
+// StreamServerInterceptor will trace streaming requests to the given gRPC server.
+func StreamServerInterceptor(opts ...InterceptorOption) grpc.StreamServerInterceptor {
+ cfg := new(interceptorConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ if cfg.serviceName == "" {
+ cfg.serviceName = "grpc.server"
+ }
+ return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) (err error) {
+ ctx := ss.Context()
+
+ // if we've enabled call tracing, create a span
+ if cfg.traceStreamCalls {
+ var span ddtrace.Span
+ span, ctx = startSpanFromContext(ctx, info.FullMethod, "grpc.server", cfg.serviceName)
+ defer span.Finish(withStreamError(err))
+ }
+
+ // call the original handler with a new stream, which traces each send
+ // and recv if message tracing is enabled
+ err = handler(srv, &serverStream{
+ ServerStream: ss,
+ cfg: cfg,
+ method: info.FullMethod,
+ ctx: ctx,
+ })
+
+ return err
+ }
+}
+
+// UnaryServerInterceptor will trace requests to the given grpc server.
+func UnaryServerInterceptor(opts ...InterceptorOption) grpc.UnaryServerInterceptor {
+ cfg := new(interceptorConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ span, ctx := startSpanFromContext(ctx, info.FullMethod, "grpc.server", cfg.serverServiceName())
+ resp, err := handler(ctx, req)
+ span.Finish(tracer.WithError(err))
+ return resp, err
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/tags.go
new file mode 100644
index 00000000..bbaf5c9e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/grpc/tags.go
@@ -0,0 +1,7 @@
+package grpc
+
+// Tags used for gRPC
+const (
+ tagMethod = "grpc.method"
+ tagCode = "grpc.code"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil/mdcarrier.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil/mdcarrier.go
new file mode 100644
index 00000000..0f511507
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil/mdcarrier.go
@@ -0,0 +1,44 @@
+package grpcutil // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil"
+
+import (
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "google.golang.org/grpc/metadata"
+)
+
+// MDCarrier implements tracer.TextMapWriter and tracer.TextMapReader on top
+// of gRPC's metadata, allowing it to be used as a span context carrier for
+// distributed tracing.
+type MDCarrier metadata.MD
+
+var _ tracer.TextMapWriter = (*MDCarrier)(nil)
+var _ tracer.TextMapReader = (*MDCarrier)(nil)
+
+// Get will return the first entry in the metadata at the given key.
+func (mdc MDCarrier) Get(key string) string {
+ if m := mdc[key]; len(m) > 0 {
+ return m[0]
+ }
+ return ""
+}
+
+// Set will add the given value to the values found at key. Key will be lowercased to match
+// the metadata implementation.
+func (mdc MDCarrier) Set(key, val string) {
+ k := strings.ToLower(key) // as per google.golang.org/grpc/metadata/metadata.go
+ mdc[k] = append(mdc[k], val)
+}
+
+// ForeachKey will iterate over all key/value pairs in the metadata.
+func (mdc MDCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vs := range mdc {
+ for _, v := range vs {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil/mdcarrier_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil/mdcarrier_test.go
new file mode 100644
index 00000000..fba6797e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/google.golang.org/internal/grpcutil/mdcarrier_test.go
@@ -0,0 +1,57 @@
+package grpcutil
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "google.golang.org/grpc/metadata"
+)
+
+func TestMDCarrierSet(t *testing.T) {
+ assert := assert.New(t)
+ md := metadata.MD{}
+ mdc := MDCarrier(md)
+
+ // add one item
+ mdc.Set("k1", "v1")
+ assert.Len(md, 1)
+ assert.Len(md["k1"], 1)
+ assert.Equal("v1", md["k1"][0])
+
+ // add a second value
+ mdc.Set("k1", "v2")
+ assert.Len(md, 1)
+ assert.Len(md["k1"], 2)
+ assert.Equal("v1", md["k1"][0])
+ assert.Equal("v2", md["k1"][1])
+
+ // add a second item
+ mdc.Set("k2", "v21")
+ assert.Len(md, 2)
+ assert.Len(md["k2"], 1)
+ assert.Equal("v21", md["k2"][0])
+}
+
+func TestMDCarrierGet(t *testing.T) {
+ assert := assert.New(t)
+ md := metadata.Pairs("k1", "v1", "k2", "v2", "k2", "v22")
+ mdc := MDCarrier(md)
+
+ assert.Equal("v1", mdc.Get("k1"))
+ assert.Equal("v2", mdc.Get("k2"))
+}
+
+func TestMDCarrierForeachKey(t *testing.T) {
+ want := metadata.Pairs("k1", "v1", "k2", "v2", "k2", "v22")
+ got := metadata.MD{}
+ wantc := MDCarrier(want)
+ gotc := MDCarrier(got)
+
+ err := wantc.ForeachKey(func(k, v string) error {
+ gotc.Set(k, v)
+ return nil
+ })
+
+ assert.Nil(t, err)
+ assert.Equal(t, want, got)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/example_test.go
new file mode 100644
index 00000000..64e387a3
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/example_test.go
@@ -0,0 +1,23 @@
+package mux_test
+
+import (
+ "net/http"
+
+ muxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello World!\n"))
+}
+
+func Example() {
+ mux := muxtrace.NewRouter()
+ mux.HandleFunc("/", handler)
+ http.ListenAndServe(":8080", mux)
+}
+
+func Example_withServiceName() {
+ mux := muxtrace.NewRouter(muxtrace.WithServiceName("mux.route"))
+ mux.HandleFunc("/", handler)
+ http.ListenAndServe(":8080", mux)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/mux.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/mux.go
new file mode 100644
index 00000000..e12887f9
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/mux.go
@@ -0,0 +1,103 @@
+// Package mux provides tracing functions for tracing the gorilla/mux package (https://github.com/gorilla/mux).
+package mux // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux"
+
+import (
+ "net/http"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/gorilla/mux"
+)
+
+// Router registers routes to be matched and dispatches a handler.
+type Router struct {
+ *mux.Router
+ config *routerConfig
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+ r.Router.StrictSlash(value)
+ return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+ r.Router.SkipClean(value)
+ return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+ r.Router.UseEncodedPath()
+ return r
+}
+
+// NewRouter returns a new router instance traced with the global tracer.
+func NewRouter(opts ...RouterOption) *Router {
+ cfg := new(routerConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return &Router{
+ Router: mux.NewRouter(),
+ config: cfg,
+ }
+}
+
+// ServeHTTP dispatches the request to the handler
+// whose pattern most closely matches the request URL.
+// We only need to rewrite this function to be able to trace
+// all the incoming requests to the underlying multiplexer
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ var (
+ match mux.RouteMatch
+ spanopts []ddtrace.StartSpanOption
+ route = "unknown"
+ )
+ // get the resource associated to this request
+ if r.Match(req, &match) && match.Route != nil {
+ if r, err := match.Route.GetPathTemplate(); err == nil {
+ route = r
+ }
+ if h, err := match.Route.GetHostTemplate(); err == nil {
+ spanopts = append(spanopts, tracer.Tag("mux.host", h))
+ }
+ }
+ spanopts = append(spanopts, r.config.spanOpts...)
+ resource := req.Method + " " + route
+ httputil.TraceAndServe(r.Router, w, req, r.config.serviceName, resource, spanopts...)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/mux_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/mux_test.go
new file mode 100644
index 00000000..cc8c0fd0
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/mux_test.go
@@ -0,0 +1,141 @@
+package mux
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "strconv"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHttpTracer(t *testing.T) {
+ for _, ht := range []struct {
+ code int
+ method string
+ url string
+ resourceName string
+ errorStr string
+ }{
+ {
+ code: http.StatusOK,
+ method: "GET",
+ url: "/200",
+ resourceName: "GET /200",
+ },
+ {
+ code: http.StatusNotFound,
+ method: "GET",
+ url: "/not_a_real_route",
+ resourceName: "GET unknown",
+ },
+ {
+ code: http.StatusMethodNotAllowed,
+ method: "POST",
+ url: "/405",
+ resourceName: "POST unknown",
+ },
+ {
+ code: http.StatusInternalServerError,
+ method: "GET",
+ url: "/500",
+ resourceName: "GET /500",
+ errorStr: "500: Internal Server Error",
+ },
+ } {
+ t.Run(http.StatusText(ht.code), func(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ codeStr := strconv.Itoa(ht.code)
+
+ // Send and verify a request
+ r := httptest.NewRequest(ht.method, ht.url, nil)
+ w := httptest.NewRecorder()
+ router().ServeHTTP(w, r)
+ assert.Equal(ht.code, w.Code)
+ assert.Equal(codeStr+"!\n", w.Body.String())
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+
+ s := spans[0]
+ assert.Equal("http.request", s.OperationName())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal(codeStr, s.Tag(ext.HTTPCode))
+ assert.Equal(ht.method, s.Tag(ext.HTTPMethod))
+ assert.Equal(ht.url, s.Tag(ext.HTTPURL))
+ assert.Equal(ht.resourceName, s.Tag(ext.ResourceName))
+ if ht.errorStr != "" {
+ assert.Equal(ht.errorStr, s.Tag(ext.Error).(error).Error())
+ }
+ })
+ }
+}
+
+func TestDomain(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ mux := NewRouter(WithServiceName("my-service"))
+ mux.Handle("/200", okHandler()).Host("localhost")
+ r := httptest.NewRequest("GET", "http://localhost/200", nil)
+ w := httptest.NewRecorder()
+ mux.ServeHTTP(w, r)
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+ assert.Equal("localhost", spans[0].Tag("mux.host"))
+}
+
+func TestSpanOptions(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ mux := NewRouter(WithSpanOptions(tracer.Tag(ext.SamplingPriority, 2)))
+ mux.Handle("/200", okHandler()).Host("localhost")
+ r := httptest.NewRequest("GET", "http://localhost/200", nil)
+ w := httptest.NewRecorder()
+ mux.ServeHTTP(w, r)
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+ assert.Equal(2, spans[0].Tag(ext.SamplingPriority))
+}
+
+// TestImplementingMethods is a regression tests asserting that all the mux.Router methods
+// returning the router will return the modified traced version of it and not the original
+// router.
+func TestImplementingMethods(t *testing.T) {
+ r := NewRouter()
+ _ = (*Router)(r.StrictSlash(false))
+ _ = (*Router)(r.SkipClean(false))
+ _ = (*Router)(r.UseEncodedPath())
+}
+
+func router() http.Handler {
+ mux := NewRouter(WithServiceName("my-service"))
+ mux.Handle("/200", okHandler())
+ mux.Handle("/500", errorHandler(http.StatusInternalServerError))
+ mux.Handle("/405", okHandler()).Methods("GET")
+ mux.NotFoundHandler = errorHandler(http.StatusNotFound)
+ mux.MethodNotAllowedHandler = errorHandler(http.StatusMethodNotAllowed)
+ return mux
+}
+
+func errorHandler(code int) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, fmt.Sprintf("%d!", code), code)
+ })
+}
+
+func okHandler() http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("200!\n"))
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/option.go
new file mode 100644
index 00000000..0b96d26f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/gorilla/mux/option.go
@@ -0,0 +1,30 @@
+package mux
+
+import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+
+type routerConfig struct {
+ serviceName string
+ spanOpts []ddtrace.StartSpanOption // additional span options to be applied
+}
+
+// RouterOption represents an option that can be passed to NewRouter.
+type RouterOption func(*routerConfig)
+
+func defaults(cfg *routerConfig) {
+ cfg.serviceName = "mux.router"
+}
+
+// WithServiceName sets the given service name for the router.
+func WithServiceName(name string) RouterOption {
+ return func(cfg *routerConfig) {
+ cfg.serviceName = name
+ }
+}
+
+// WithSpanOptions applies the given set of options to the spans started
+// by the router.
+func WithSpanOptions(opts ...ddtrace.StartSpanOption) RouterOption {
+ return func(cfg *routerConfig) {
+ cfg.spanOpts = opts
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/example_test.go
new file mode 100644
index 00000000..0480e1d3
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/example_test.go
@@ -0,0 +1,32 @@
+package graphql_test
+
+import (
+ "log"
+ "net/http"
+
+ graphql "github.com/graph-gophers/graphql-go"
+ "github.com/graph-gophers/graphql-go/relay"
+ graphqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go"
+)
+
+type resolver struct{}
+
+func (*resolver) Hello() string { return "Hello, world!" }
+
+func Example() {
+ s := `
+ schema {
+ query: Query
+ }
+ type Query {
+ hello: String!
+ }
+ `
+ schema := graphql.MustParseSchema(s, new(resolver),
+ graphql.Tracer(graphqltrace.NewTracer()))
+ http.Handle("/query", &relay.Handler{Schema: schema})
+ log.Fatal(http.ListenAndServe(":8080", nil))
+
+ // then:
+ // $ curl -XPOST -d '{"query": "{ hello }"}' localhost:8080/query
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/graphql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/graphql.go
new file mode 100644
index 00000000..30f4a3f2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/graphql.go
@@ -0,0 +1,81 @@
+// Package graphql provides functions to trace the graph-gophers/graphql-go package (https://github.com/graph-gophers/graphql-go).
+//
+// We use the tracing mechanism available in the
+// https://godoc.org/github.com/graph-gophers/graphql-go/trace subpackage.
+// Create a new Tracer with `NewTracer` and pass it as an additional option to
+// `MustParseSchema`.
+package graphql // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go"
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/graph-gophers/graphql-go/errors"
+ "github.com/graph-gophers/graphql-go/introspection"
+ "github.com/graph-gophers/graphql-go/trace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+const (
+ tagGraphqlField = "graphql.field"
+ tagGraphqlQuery = "graphql.query"
+ tagGraphqlType = "graphql.type"
+)
+
+// A Tracer implements the graphql-go/trace.Tracer interface by sending traces
+// to the Datadog tracer.
+type Tracer struct {
+ cfg *config
+}
+
+var _ trace.Tracer = (*Tracer)(nil)
+
+// TraceQuery traces a GraphQL query.
+func (t *Tracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, trace.TraceQueryFinishFunc) {
+ span, ctx := tracer.StartSpanFromContext(ctx, "graphql.request",
+ tracer.ServiceName(t.cfg.serviceName),
+ tracer.Tag(tagGraphqlQuery, queryString),
+ )
+
+ return ctx, func(errs []*errors.QueryError) {
+ var err error
+ switch n := len(errs); n {
+ case 0:
+ // err = nil
+ case 1:
+ err = errs[0]
+ default:
+ err = fmt.Errorf("%s (and %d more errors)", errs[0], n-1)
+ }
+ span.Finish(tracer.WithError(err))
+ }
+}
+
+// TraceField traces a GraphQL field access.
+func (t *Tracer) TraceField(ctx context.Context, label string, typeName string, fieldName string, trivial bool, args map[string]interface{}) (context.Context, trace.TraceFieldFinishFunc) {
+ span, ctx := tracer.StartSpanFromContext(ctx, "graphql.field",
+ tracer.ServiceName(t.cfg.serviceName),
+ tracer.Tag(tagGraphqlField, fieldName),
+ tracer.Tag(tagGraphqlType, typeName),
+ )
+ return ctx, func(err *errors.QueryError) {
+ // must explicitly check for nil, see issue golang/go#22729
+ if err != nil {
+ span.Finish(tracer.WithError(err))
+ } else {
+ span.Finish()
+ }
+ }
+}
+
+// NewTracer creates a new Tracer.
+func NewTracer(opts ...Option) trace.Tracer {
+ cfg := new(config)
+ defaults(cfg)
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ return &Tracer{
+ cfg: cfg,
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/graphql_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/graphql_test.go
new file mode 100644
index 00000000..8d9580aa
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/graphql_test.go
@@ -0,0 +1,63 @@
+package graphql
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "strings"
+ "testing"
+
+ graphql "github.com/graph-gophers/graphql-go"
+ "github.com/graph-gophers/graphql-go/relay"
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+)
+
+type testResolver struct{}
+
+func (*testResolver) Hello() string { return "Hello, world!" }
+
+func Test(t *testing.T) {
+ s := `
+ schema {
+ query: Query
+ }
+ type Query {
+ hello: String!
+ }
+ `
+ schema := graphql.MustParseSchema(s, new(testResolver),
+ graphql.Tracer(NewTracer(WithServiceName("test-graphql-service"))))
+ srv := httptest.NewServer(&relay.Handler{Schema: schema})
+ defer srv.Close()
+
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ http.Post(srv.URL, "application/json", strings.NewReader(`{
+ "query": "{ hello }"
+ }`))
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 2)
+ assert.Equal(t, spans[1].TraceID(), spans[0].TraceID())
+
+ {
+ s := spans[0]
+ assert.Equal(t, "hello", s.Tag(tagGraphqlField))
+ assert.Nil(t, s.Tag(ext.Error))
+ assert.Equal(t, "test-graphql-service", s.Tag(ext.ServiceName))
+ assert.Equal(t, "Query", s.Tag(tagGraphqlType))
+ assert.Equal(t, "graphql.field", s.OperationName())
+ assert.Equal(t, "graphql.field", s.Tag(ext.ResourceName))
+ }
+
+ {
+ s := spans[1]
+ assert.Equal(t, "{ hello }", s.Tag(tagGraphqlQuery))
+ assert.Nil(t, s.Tag(ext.Error))
+ assert.Equal(t, "test-graphql-service", s.Tag(ext.ServiceName))
+ assert.Equal(t, "graphql.request", s.OperationName())
+ assert.Equal(t, "graphql.request", s.Tag(ext.ResourceName))
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/option.go
new file mode 100644
index 00000000..c7cb15bf
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/graph-gophers/graphql-go/option.go
@@ -0,0 +1,17 @@
+package graphql
+
+type config struct{ serviceName string }
+
+// Option represents an option that can be used customize the Tracer.
+type Option func(*config)
+
+func defaults(cfg *config) {
+ cfg.serviceName = "graphql.server"
+}
+
+// WithServiceName sets the given service name for the client.
+func WithServiceName(name string) Option {
+ return func(cfg *config) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/make_responsewriter.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/make_responsewriter.go
new file mode 100644
index 00000000..ae0482d0
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/make_responsewriter.go
@@ -0,0 +1,68 @@
+// +build ignore
+
+// This program generates wrapper implementations of http.ResponseWriter that
+// also satisfy http.Flusher, http.Pusher, http.CloseNotifier and http.Hijacker,
+// based on whether or not the passed in http.ResponseWriter also satisfies
+// them.
+
+package main
+
+import (
+ "os"
+ "text/template"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists"
+)
+
+func main() {
+ interfaces := []string{"Flusher", "Pusher", "CloseNotifier", "Hijacker"}
+ var combos [][][]string
+ for pick := len(interfaces); pick > 0; pick-- {
+ combos = append(combos, lists.Combinations(interfaces, pick))
+ }
+ template.Must(template.New("").Parse(tpl)).Execute(os.Stdout, map[string]interface{}{
+ "Interfaces": interfaces,
+ "Combinations": combos,
+ })
+}
+
+var tpl = `// Code generated by make_responsewriter.go DO NOT EDIT
+
+package httputil
+
+import (
+ "net/http"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+// wrapResponseWriter wraps an underlying http.ResponseWriter so that it can
+// trace the http response codes. It also checks for various http interfaces
+// (Flusher, Pusher, CloseNotifier, Hijacker) and if the underlying
+// http.ResponseWriter implements them it generates an unnamed struct with the
+// appropriate fields.
+//
+// This code is generated because we have to account for all the permutations
+// of the interfaces.
+func wrapResponseWriter(w http.ResponseWriter, span ddtrace.Span) http.ResponseWriter {
+{{- range .Interfaces }}
+ h{{.}}, ok{{.}} := w.(http.{{.}})
+{{- end }}
+
+ w = newResponseWriter(w, span)
+ switch {
+{{- range .Combinations }}
+ {{- range . }}
+ case {{ range $i, $v := . }}{{ if gt $i 0 }} && {{ end }}ok{{ $v }}{{ end }}:
+ w = struct {
+ http.ResponseWriter
+ {{- range . }}
+ http.{{.}}
+ {{- end }}
+ } { w{{ range . }}, h{{.}}{{ end }} }
+ {{- end }}
+{{- end }}
+ }
+
+ return w
+}
+`
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace.go
new file mode 100644
index 00000000..1f69d23f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace.go
@@ -0,0 +1,66 @@
+package httputil // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil"
+
+//go:generate sh -c "go run make_responsewriter.go | gofmt > trace_gen.go"
+
+import (
+ "fmt"
+ "net/http"
+ "strconv"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// TraceAndServe will apply tracing to the given http.Handler using the passed tracer under the given service and resource.
+func TraceAndServe(h http.Handler, w http.ResponseWriter, r *http.Request, service, resource string, spanopts ...ddtrace.StartSpanOption) {
+ opts := append([]ddtrace.StartSpanOption{
+ tracer.SpanType(ext.SpanTypeWeb),
+ tracer.ServiceName(service),
+ tracer.ResourceName(resource),
+ tracer.Tag(ext.HTTPMethod, r.Method),
+ tracer.Tag(ext.HTTPURL, r.URL.Path),
+ }, spanopts...)
+ if spanctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header)); err == nil {
+ opts = append(opts, tracer.ChildOf(spanctx))
+ }
+ span, ctx := tracer.StartSpanFromContext(r.Context(), "http.request", opts...)
+ defer span.Finish()
+
+ w = wrapResponseWriter(w, span)
+
+ h.ServeHTTP(w, r.WithContext(ctx))
+}
+
+// responseWriter is a small wrapper around an http response writer that will
+// intercept and store the status of a request.
+type responseWriter struct {
+ http.ResponseWriter
+ span ddtrace.Span
+ status int
+}
+
+func newResponseWriter(w http.ResponseWriter, span ddtrace.Span) *responseWriter {
+ return &responseWriter{w, span, 0}
+}
+
+// Write writes the data to the connection as part of an HTTP reply.
+// We explicitely call WriteHeader with the 200 status code
+// in order to get it reported into the span.
+func (w *responseWriter) Write(b []byte) (int, error) {
+ if w.status == 0 {
+ w.WriteHeader(http.StatusOK)
+ }
+ return w.ResponseWriter.Write(b)
+}
+
+// WriteHeader sends an HTTP response header with status code.
+// It also sets the status code to the span.
+func (w *responseWriter) WriteHeader(status int) {
+ w.ResponseWriter.WriteHeader(status)
+ w.status = status
+ w.span.SetTag(ext.HTTPCode, strconv.Itoa(status))
+ if status >= 500 && status < 600 {
+ w.span.SetTag(ext.Error, fmt.Errorf("%d: %s", status, http.StatusText(status)))
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace_gen.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace_gen.go
new file mode 100644
index 00000000..da5af82f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace_gen.go
@@ -0,0 +1,121 @@
+// Code generated by make_responsewriter.go DO NOT EDIT
+
+package httputil
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "net/http"
+)
+
+// wrapResponseWriter wraps an underlying http.ResponseWriter so that it can
+// trace the http response codes. It also checks for various http interfaces
+// (Flusher, Pusher, CloseNotifier, Hijacker) and if the underlying
+// http.ResponseWriter implements them it generates an unnamed struct with the
+// appropriate fields.
+//
+// This code is generated because we have to account for all the permutations
+// of the interfaces.
+func wrapResponseWriter(w http.ResponseWriter, span ddtrace.Span) http.ResponseWriter {
+ hFlusher, okFlusher := w.(http.Flusher)
+ hPusher, okPusher := w.(http.Pusher)
+ hCloseNotifier, okCloseNotifier := w.(http.CloseNotifier)
+ hHijacker, okHijacker := w.(http.Hijacker)
+
+ w = newResponseWriter(w, span)
+ switch {
+ case okFlusher && okPusher && okCloseNotifier && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ http.CloseNotifier
+ http.Hijacker
+ }{w, hFlusher, hPusher, hCloseNotifier, hHijacker}
+ case okFlusher && okPusher && okCloseNotifier:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ http.CloseNotifier
+ }{w, hFlusher, hPusher, hCloseNotifier}
+ case okFlusher && okPusher && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ http.Hijacker
+ }{w, hFlusher, hPusher, hHijacker}
+ case okFlusher && okCloseNotifier && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ http.Hijacker
+ }{w, hFlusher, hCloseNotifier, hHijacker}
+ case okPusher && okCloseNotifier && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Pusher
+ http.CloseNotifier
+ http.Hijacker
+ }{w, hPusher, hCloseNotifier, hHijacker}
+ case okFlusher && okPusher:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Pusher
+ }{w, hFlusher, hPusher}
+ case okFlusher && okCloseNotifier:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.CloseNotifier
+ }{w, hFlusher, hCloseNotifier}
+ case okFlusher && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ http.Hijacker
+ }{w, hFlusher, hHijacker}
+ case okPusher && okCloseNotifier:
+ w = struct {
+ http.ResponseWriter
+ http.Pusher
+ http.CloseNotifier
+ }{w, hPusher, hCloseNotifier}
+ case okPusher && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Pusher
+ http.Hijacker
+ }{w, hPusher, hHijacker}
+ case okCloseNotifier && okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ http.Hijacker
+ }{w, hCloseNotifier, hHijacker}
+ case okFlusher:
+ w = struct {
+ http.ResponseWriter
+ http.Flusher
+ }{w, hFlusher}
+ case okPusher:
+ w = struct {
+ http.ResponseWriter
+ http.Pusher
+ }{w, hPusher}
+ case okCloseNotifier:
+ w = struct {
+ http.ResponseWriter
+ http.CloseNotifier
+ }{w, hCloseNotifier}
+ case okHijacker:
+ w = struct {
+ http.ResponseWriter
+ http.Hijacker
+ }{w, hHijacker}
+ }
+
+ return w
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace_test.go
new file mode 100644
index 00000000..018b1fb5
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil/trace_test.go
@@ -0,0 +1,160 @@
+package httputil
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestTraceAndServe(t *testing.T) {
+ t.Run("regular", func(t *testing.T) {
+ mt := mocktracer.Start()
+ assert := assert.New(t)
+ defer mt.Stop()
+
+ called := false
+ w := httptest.NewRecorder()
+ r, err := http.NewRequest("GET", "/", nil)
+ assert.NoError(err)
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ _, ok := w.(http.Hijacker)
+ assert.False(ok)
+ http.Error(w, "some error", http.StatusServiceUnavailable)
+ called = true
+ }
+ TraceAndServe(http.HandlerFunc(handler), w, r, "service", "resource")
+ spans := mt.FinishedSpans()
+ span := spans[0]
+
+ assert.True(called)
+ assert.Len(spans, 1)
+ assert.Equal(ext.SpanTypeWeb, span.Tag(ext.SpanType))
+ assert.Equal("service", span.Tag(ext.ServiceName))
+ assert.Equal("resource", span.Tag(ext.ResourceName))
+ assert.Equal("GET", span.Tag(ext.HTTPMethod))
+ assert.Equal("/", span.Tag(ext.HTTPURL))
+ assert.Equal("503", span.Tag(ext.HTTPCode))
+ assert.Equal("503: Service Unavailable", span.Tag(ext.Error).(error).Error())
+ })
+
+ t.Run("Hijacker,Flusher,CloseNotifier", func(t *testing.T) {
+ assert := assert.New(t)
+ called := false
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ _, ok := w.(http.Hijacker)
+ assert.True(ok, "ResponseWriter should implement http.Hijacker")
+ _, ok = w.(http.Flusher)
+ assert.True(ok, "ResponseWriter should implement http.Flusher")
+ _, ok = w.(http.CloseNotifier)
+ assert.True(ok, "ResponseWriter should implement http.CloseNotifier")
+ fmt.Fprintln(w, "Hello, world!")
+ called = true
+ }
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ TraceAndServe(http.HandlerFunc(handler), w, r, "service", "resource")
+ }))
+ defer srv.Close()
+
+ res, err := http.Get(srv.URL)
+ assert.NoError(err)
+ slurp, err := ioutil.ReadAll(res.Body)
+ res.Body.Close()
+ assert.True(called)
+ assert.NoError(err)
+ assert.Equal("Hello, world!\n", string(slurp))
+ })
+
+ // there doesn't appear to be an easy way to test http.Pusher support via an http request
+ // so we'll just confirm wrapResponseWriter preserves it
+ t.Run("Pusher", func(t *testing.T) {
+ var i struct {
+ http.ResponseWriter
+ http.Pusher
+ }
+ var w http.ResponseWriter = i
+ _, ok := w.(http.ResponseWriter)
+ assert.True(t, ok)
+ _, ok = w.(http.Pusher)
+ assert.True(t, ok)
+
+ w = wrapResponseWriter(w, nil)
+ _, ok = w.(http.ResponseWriter)
+ assert.True(t, ok)
+ _, ok = w.(http.Pusher)
+ assert.True(t, ok)
+ })
+
+ t.Run("distributed", func(t *testing.T) {
+ mt := mocktracer.Start()
+ assert := assert.New(t)
+ defer mt.Stop()
+
+ called := false
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ called = true
+ }
+
+ // create a request with a span injected into its headers
+ parent := tracer.StartSpan("parent")
+ parent.Finish() // finish it so the mocktracer can catch it
+ r, err := http.NewRequest("GET", "/", nil)
+ assert.NoError(err)
+ carrier := tracer.HTTPHeadersCarrier(r.Header)
+ err = tracer.Inject(parent.Context(), carrier)
+ assert.NoError(err)
+ w := httptest.NewRecorder()
+
+ TraceAndServe(http.HandlerFunc(handler), w, r, "service", "resource")
+
+ var p, c mocktracer.Span
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+ if spans[0].OperationName() == "parent" {
+ p, c = spans[0], spans[1]
+ } else {
+ p, c = spans[1], spans[0]
+ }
+ assert.True(called)
+ assert.Equal(c.ParentID(), p.SpanID())
+ })
+
+ t.Run("context", func(t *testing.T) {
+ mt := mocktracer.Start()
+ assert := assert.New(t)
+ defer mt.Stop()
+
+ called := false
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ called = true
+ }
+
+ // create a request with a span in its context
+ parent := tracer.StartSpan("parent")
+ parent.Finish() // finish it so the mocktracer can catch it
+ r, err := http.NewRequest("GET", "/", nil)
+ assert.NoError(err)
+ r = r.WithContext(tracer.ContextWithSpan(r.Context(), parent))
+ w := httptest.NewRecorder()
+
+ TraceAndServe(http.HandlerFunc(handler), w, r, "service", "resource")
+
+ var p, c mocktracer.Span
+ spans := mt.FinishedSpans()
+ assert.Len(spans, 2)
+ if spans[0].OperationName() == "parent" {
+ p, c = spans[0], spans[1]
+ } else {
+ p, c = spans[1], spans[0]
+ }
+ assert.True(called)
+ assert.Equal(c.ParentID(), p.SpanID())
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists/combinations.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists/combinations.go
new file mode 100644
index 00000000..d91c6bbf
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists/combinations.go
@@ -0,0 +1,39 @@
+package lists
+
+// Combinations returns all possible unique selections of size `pick` of a list
+// of strings for which order does not matter
+//
+// an example:
+//
+// Combinations([cat, dog, bird], 2):
+// [cat] -> Combinations([dog, bird], 1)
+// [cat, dog]
+// [cat, bird]
+// [dog] -> Combinations([bird], 1)
+// [dog, bird]
+// [bird] -> Combinations([], 0)
+// n/a
+//
+func Combinations(list []string, pick int) (all [][]string) {
+ switch pick {
+ case 0:
+ // nothing to do
+ case 1:
+ for i := range list {
+ all = append(all, list[i:i+1])
+ }
+ default:
+ // we recursively find combinations by taking each item in the list
+ // and then finding the combinations at (pick-1) for the remaining
+ // items in the list
+ // the reason we start at [i+1:], is because the order of the items in
+ // the list doesn't matter, so this will remove all the duplicates we
+ // would get otherwise
+ for i := range list {
+ for _, next := range Combinations(list[i+1:], pick-1) {
+ all = append(all, append([]string{list[i]}, next...))
+ }
+ }
+ }
+ return all
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists/combinations_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists/combinations_test.go
new file mode 100644
index 00000000..b4b0a549
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/lists/combinations_test.go
@@ -0,0 +1,39 @@
+package lists
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCombinations(t *testing.T) {
+ {
+ combos := Combinations([]string{"cat", "dog", "bird", "mouse"}, 3)
+ assert.Equal(t, [][]string{
+ {"cat", "dog", "bird"},
+ {"cat", "dog", "mouse"},
+ {"cat", "bird", "mouse"},
+ {"dog", "bird", "mouse"},
+ }, combos)
+ }
+ {
+ combos := Combinations([]string{"cat", "dog", "bird", "mouse"}, 2)
+ assert.Equal(t, [][]string{
+ {"cat", "dog"},
+ {"cat", "bird"},
+ {"cat", "mouse"},
+ {"dog", "bird"},
+ {"dog", "mouse"},
+ {"bird", "mouse"},
+ }, combos)
+ }
+ {
+ combos := Combinations([]string{"cat", "dog", "bird", "mouse"}, 1)
+ assert.Equal(t, [][]string{
+ {"cat"},
+ {"dog"},
+ {"bird"},
+ {"mouse"},
+ }, combos)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/sqltest/sqltest.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/sqltest/sqltest.go
new file mode 100644
index 00000000..c37638fc
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/sqltest/sqltest.go
@@ -0,0 +1,216 @@
+package sqltest // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/sqltest"
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "log"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Prepare sets up a table with the given name in both the MySQL and Postgres databases and returns
+// a teardown function which will drop it.
+func Prepare(tableName string) func() {
+ queryDrop := fmt.Sprintf("DROP TABLE IF EXISTS %s", tableName)
+ queryCreate := fmt.Sprintf("CREATE TABLE %s (id integer NOT NULL DEFAULT '0', name text)", tableName)
+ mysql, err := sql.Open("mysql", "test:test@tcp(127.0.0.1:3306)/test")
+ defer mysql.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ mysql.Exec(queryDrop)
+ mysql.Exec(queryCreate)
+ postgres, err := sql.Open("postgres", "postgres://postgres:postgres@127.0.0.1:5432/postgres?sslmode=disable")
+ defer postgres.Close()
+ if err != nil {
+ log.Fatal(err)
+ }
+ postgres.Exec(queryDrop)
+ postgres.Exec(queryCreate)
+ return func() {
+ mysql.Exec(queryDrop)
+ postgres.Exec(queryDrop)
+ }
+}
+
+// RunAll applies a sequence of unit tests to check the correct tracing of sql features.
+func RunAll(t *testing.T, cfg *Config) {
+ cfg.mockTracer = mocktracer.Start()
+ defer cfg.mockTracer.Stop()
+
+ for name, test := range map[string]func(*Config) func(*testing.T){
+ "Ping": testPing,
+ "Query": testQuery,
+ "Statement": testStatement,
+ "BeginRollback": testBeginRollback,
+ "Exec": testExec,
+ } {
+ t.Run(name, test(cfg))
+ }
+}
+
+func testPing(cfg *Config) func(*testing.T) {
+ return func(t *testing.T) {
+ cfg.mockTracer.Reset()
+ assert := assert.New(t)
+ err := cfg.DB.Ping()
+ assert.Nil(err)
+ spans := cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal(cfg.ExpectName, span.OperationName())
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+ }
+}
+
+func testQuery(cfg *Config) func(*testing.T) {
+ query := fmt.Sprintf("SELECT id, name FROM %s LIMIT 5", cfg.TableName)
+ return func(t *testing.T) {
+ cfg.mockTracer.Reset()
+ assert := assert.New(t)
+ rows, err := cfg.DB.Query(query)
+ defer rows.Close()
+ assert.Nil(err)
+
+ spans := cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal(cfg.ExpectName, span.OperationName())
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+ }
+}
+
+func testStatement(cfg *Config) func(*testing.T) {
+ query := "INSERT INTO %s(name) VALUES(%s)"
+ switch cfg.DriverName {
+ case "postgres":
+ query = fmt.Sprintf(query, cfg.TableName, "$1")
+ case "mysql":
+ query = fmt.Sprintf(query, cfg.TableName, "?")
+ }
+ return func(t *testing.T) {
+ cfg.mockTracer.Reset()
+ assert := assert.New(t)
+ stmt, err := cfg.DB.Prepare(query)
+ assert.Equal(nil, err)
+
+ spans := cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal(cfg.ExpectName, span.OperationName())
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+
+ cfg.mockTracer.Reset()
+ _, err2 := stmt.Exec("New York")
+ assert.Equal(nil, err2)
+
+ spans = cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 1)
+ span = spans[0]
+ assert.Equal(cfg.ExpectName, span.OperationName())
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+ }
+}
+
+func testBeginRollback(cfg *Config) func(*testing.T) {
+ return func(t *testing.T) {
+ cfg.mockTracer.Reset()
+ assert := assert.New(t)
+
+ tx, err := cfg.DB.Begin()
+ assert.Equal(nil, err)
+
+ spans := cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 1)
+
+ span := spans[0]
+ assert.Equal(cfg.ExpectName, span.OperationName())
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+
+ cfg.mockTracer.Reset()
+ err = tx.Rollback()
+ assert.Equal(nil, err)
+
+ spans = cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 1)
+ span = spans[0]
+ assert.Equal(cfg.ExpectName, span.OperationName())
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+ }
+}
+
+func testExec(cfg *Config) func(*testing.T) {
+ return func(t *testing.T) {
+ assert := assert.New(t)
+ query := fmt.Sprintf("INSERT INTO %s(name) VALUES('New York')", cfg.TableName)
+
+ parent, ctx := tracer.StartSpanFromContext(context.Background(), "test.parent",
+ tracer.ServiceName("test"),
+ tracer.ResourceName("parent"),
+ )
+
+ cfg.mockTracer.Reset()
+ tx, err := cfg.DB.BeginTx(ctx, nil)
+ assert.Equal(nil, err)
+ _, err = tx.ExecContext(ctx, query)
+ assert.Equal(nil, err)
+ err = tx.Commit()
+ assert.Equal(nil, err)
+
+ parent.Finish() // flush children
+
+ spans := cfg.mockTracer.FinishedSpans()
+ assert.Len(spans, 4)
+
+ var span mocktracer.Span
+ for _, s := range spans {
+ if s.OperationName() == cfg.ExpectName && s.Tag(ext.ResourceName) == query {
+ span = s
+ }
+ }
+ assert.NotNil(span, "span not found")
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+ for _, s := range spans {
+ if s.OperationName() == cfg.ExpectName && s.Tag(ext.ResourceName) == "Commit" {
+ span = s
+ }
+ }
+ assert.NotNil(span, "span not found")
+ for k, v := range cfg.ExpectTags {
+ assert.Equal(v, span.Tag(k), "Value mismatch on tag %s", k)
+ }
+ }
+}
+
+// Config holds the test configuration.
+type Config struct {
+ *sql.DB
+ mockTracer mocktracer.Tracer
+ DriverName string
+ TableName string
+ ExpectName string
+ ExpectTags map[string]interface{}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/example_test.go
new file mode 100644
index 00000000..7ae89540
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/example_test.go
@@ -0,0 +1,34 @@
+package sqlx_test
+
+import (
+ "log"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/lib/pq"
+
+ sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
+ sqlxtrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
+)
+
+func ExampleOpen() {
+ // Register informs the sqlxtrace package of the driver that we will be using in our program.
+ // It uses a default service name, in the below case "postgres.db". To use a custom service
+ // name use RegisterWithServiceName.
+ sqltrace.Register("postgres", &pq.Driver{}, sqltrace.WithServiceName("my-service"))
+ db, err := sqlxtrace.Open("postgres", "postgres://pqgotest:password@localhost/pqgotest?sslmode=disable")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // All calls through sqlx API will then be traced.
+ query, args, err := sqlx.In("SELECT * FROM users WHERE level IN (?);", []int{4, 6, 7})
+ if err != nil {
+ log.Fatal(err)
+ }
+ query = db.Rebind(query)
+ rows, err := db.Queryx(query, args...)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer rows.Close()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/sql.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/sql.go
new file mode 100644
index 00000000..0341400e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/sql.go
@@ -0,0 +1,62 @@
+// Package sqlx provides functions to trace the jmoiron/sqlx package (https://github.com/jmoiron/sqlx).
+// To enable tracing, first use one of the "Register*" functions to register the sql driver that
+// you will be using, then continue using the package as you normally would.
+//
+// For more information on registering and why this needs to happen, please check the
+// github.com/DataDog/dd-trace-go/contrib/database/sql package.
+//
+package sqlx // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx"
+
+import (
+ sqltraced "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
+
+ "github.com/jmoiron/sqlx"
+)
+
+// Open opens a new (traced) connection to the database using the given driver and source.
+// Note that the driver must formerly be registered using database/sql integration's Register.
+func Open(driverName, dataSourceName string) (*sqlx.DB, error) {
+ db, err := sqltraced.Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ return sqlx.NewDb(db, driverName), nil
+}
+
+// MustOpen is the same as Open, but panics on error.
+// To get tracing, the driver must be formerly registered using the database/sql integration's
+// Register.
+func MustOpen(driverName, dataSourceName string) (*sqlx.DB, error) {
+ db, err := sqltraced.Open(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return sqlx.NewDb(db, driverName), nil
+}
+
+// Connect connects to the data source using the given driver.
+// To get tracing, the driver must be formerly registered using the database/sql integration's
+// Register.
+func Connect(driverName, dataSourceName string) (*sqlx.DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return nil, err
+ }
+ err = db.Ping()
+ if err != nil {
+ db.Close()
+ return nil, err
+ }
+ return db, nil
+}
+
+// MustConnect connects to a database and panics on error.
+// To get tracing, the driver must be formerly registered using the database/sql integration's
+// Register.
+func MustConnect(driverName, dataSourceName string) *sqlx.DB {
+ db, err := Connect(driverName, dataSourceName)
+ if err != nil {
+ panic(err)
+ }
+ return db
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/sql_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/sql_test.go
new file mode 100644
index 00000000..9574b497
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/jmoiron/sqlx/sql_test.go
@@ -0,0 +1,78 @@
+package sqlx
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "testing"
+
+ sqltrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/database/sql"
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/sqltest"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+
+ "github.com/go-sql-driver/mysql"
+ "github.com/lib/pq"
+)
+
+// tableName holds the SQL table that these tests will be run against. It must be unique cross-repo.
+const tableName = "testsqlx"
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ defer sqltest.Prepare(tableName)()
+ os.Exit(m.Run())
+}
+
+func TestMySQL(t *testing.T) {
+ sqltrace.Register("mysql", &mysql.MySQLDriver{}, sqltrace.WithServiceName("mysql-test"))
+ dbx, err := Open("mysql", "test:test@tcp(127.0.0.1:3306)/test")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer dbx.Close()
+
+ testConfig := &sqltest.Config{
+ DB: dbx.DB,
+ DriverName: "mysql",
+ TableName: tableName,
+ ExpectName: "mysql.query",
+ ExpectTags: map[string]interface{}{
+ ext.ServiceName: "mysql-test",
+ ext.SpanType: ext.SpanTypeSQL,
+ ext.TargetHost: "127.0.0.1",
+ ext.TargetPort: "3306",
+ ext.DBUser: "test",
+ ext.DBName: "test",
+ },
+ }
+ sqltest.RunAll(t, testConfig)
+}
+
+func TestPostgres(t *testing.T) {
+ sqltrace.Register("postgres", &pq.Driver{})
+ dbx, err := Open("postgres", "postgres://postgres:postgres@127.0.0.1:5432/postgres?sslmode=disable")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer dbx.Close()
+
+ testConfig := &sqltest.Config{
+ DB: dbx.DB,
+ DriverName: "postgres",
+ TableName: tableName,
+ ExpectName: "postgres.query",
+ ExpectTags: map[string]interface{}{
+ ext.ServiceName: "postgres.db",
+ ext.SpanType: ext.SpanTypeSQL,
+ ext.TargetHost: "127.0.0.1",
+ ext.TargetPort: "5432",
+ ext.DBUser: "postgres",
+ ext.DBName: "postgres",
+ },
+ }
+ sqltest.RunAll(t, testConfig)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/example_test.go
new file mode 100644
index 00000000..42bf1760
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/example_test.go
@@ -0,0 +1,35 @@
+package httprouter_test
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+
+ "github.com/julienschmidt/httprouter"
+
+ httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter"
+)
+
+func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+ fmt.Fprint(w, "Welcome!\n")
+}
+
+func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
+ fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name"))
+}
+
+func Example() {
+ router := httptrace.New()
+ router.GET("/", Index)
+ router.GET("/hello/:name", Hello)
+
+ log.Fatal(http.ListenAndServe(":8080", router))
+}
+
+func Example_withServiceName() {
+ router := httptrace.New(httptrace.WithServiceName("http.router"))
+ router.GET("/", Index)
+ router.GET("/hello/:name", Hello)
+
+ log.Fatal(http.ListenAndServe(":8080", router))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/httprouter.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/httprouter.go
new file mode 100644
index 00000000..a2902ea1
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/httprouter.go
@@ -0,0 +1,39 @@
+// Package httprouter provides functions to trace the julienschmidt/httprouter package (https://github.com/julienschmidt/httprouter).
+package httprouter // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter"
+
+import (
+ "net/http"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil"
+
+ "github.com/julienschmidt/httprouter"
+)
+
+// Router is a traced version of httprouter.Router.
+type Router struct {
+ *httprouter.Router
+ config *routerConfig
+}
+
+// New returns a new router augmented with tracing.
+func New(opts ...RouterOption) *Router {
+ cfg := new(routerConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return &Router{httprouter.New(), cfg}
+}
+
+// ServeHTTP implements http.Handler.
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ // get the resource associated to this request
+ route := req.URL.Path
+ _, ps, _ := r.Router.Lookup(req.Method, route)
+ for _, param := range ps {
+ route = strings.Replace(route, param.Value, ":"+param.Key, 1)
+ }
+ resource := req.Method + " " + route
+ httputil.TraceAndServe(r.Router, w, req, r.config.serviceName, resource)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/httprouter_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/httprouter_test.go
new file mode 100644
index 00000000..24257451
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/httprouter_test.go
@@ -0,0 +1,80 @@
+package httprouter
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+
+ "github.com/julienschmidt/httprouter"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHttpTracer200(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // Send and verify a 200 request
+ url := "/200"
+ r := httptest.NewRequest("GET", url, nil)
+ w := httptest.NewRecorder()
+ router().ServeHTTP(w, r)
+ assert.Equal(200, w.Code)
+ assert.Equal("OK\n", w.Body.String())
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+
+ s := spans[0]
+ assert.Equal("http.request", s.OperationName())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal("GET "+url, s.Tag(ext.ResourceName))
+ assert.Equal("200", s.Tag(ext.HTTPCode))
+ assert.Equal("GET", s.Tag(ext.HTTPMethod))
+ assert.Equal(url, s.Tag(ext.HTTPURL))
+ assert.Equal(nil, s.Tag(ext.Error))
+}
+
+func TestHttpTracer500(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // Send and verify a 500 request
+ url := "/500"
+ r := httptest.NewRequest("GET", url, nil)
+ w := httptest.NewRecorder()
+ router().ServeHTTP(w, r)
+ assert.Equal(500, w.Code)
+ assert.Equal("500!\n", w.Body.String())
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+
+ s := spans[0]
+ assert.Equal("http.request", s.OperationName())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal("GET "+url, s.Tag(ext.ResourceName))
+ assert.Equal("500", s.Tag(ext.HTTPCode))
+ assert.Equal("GET", s.Tag(ext.HTTPMethod))
+ assert.Equal(url, s.Tag(ext.HTTPURL))
+ assert.Equal("500: Internal Server Error", s.Tag(ext.Error).(error).Error())
+}
+
+func router() http.Handler {
+ router := New(WithServiceName("my-service"))
+ router.GET("/200", handler200)
+ router.GET("/500", handler500)
+ return router
+}
+
+func handler200(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+ w.Write([]byte("OK\n"))
+}
+
+func handler500(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {
+ http.Error(w, "500!", http.StatusInternalServerError)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/option.go
new file mode 100644
index 00000000..d5683443
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/julienschmidt/httprouter/option.go
@@ -0,0 +1,17 @@
+package httprouter
+
+type routerConfig struct{ serviceName string }
+
+// RouterOption represents an option that can be passed to New.
+type RouterOption func(*routerConfig)
+
+func defaults(cfg *routerConfig) {
+ cfg.serviceName = "http.router"
+}
+
+// WithServiceName sets the given service name for the returned router.
+func WithServiceName(name string) RouterOption {
+ return func(cfg *routerConfig) {
+ cfg.serviceName = name
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/example_test.go
new file mode 100644
index 00000000..4a015d16
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/example_test.go
@@ -0,0 +1,32 @@
+package kubernetes_test
+
+import (
+ "fmt"
+
+ kubernetestrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes"
+ meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
+ "k8s.io/client-go/rest"
+)
+
+func Example() {
+ cfg, err := rest.InClusterConfig()
+ if err != nil {
+ panic(err.Error())
+ }
+ // Use this to trace all calls made to the Kubernetes API
+ cfg.WrapTransport = kubernetestrace.WrapRoundTripper
+
+ client, err := kubernetes.NewForConfig(cfg)
+ if err != nil {
+ panic(err.Error())
+ }
+
+ pods, err := client.CoreV1().Pods("default").List(meta_v1.ListOptions{})
+ if err != nil {
+ panic(err)
+ }
+
+ fmt.Println(pods.Items)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/kubernetes.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/kubernetes.go
new file mode 100644
index 00000000..206f42b7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/kubernetes.go
@@ -0,0 +1,71 @@
+// Package kubernetes provides functions to trace k8s.io/client-go (https://github.com/kubernetes/client-go).
+package kubernetes // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes"
+
+import (
+ "net/http"
+ "strings"
+
+ httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+const (
+ prefixAPI = "/api/v1/"
+ prefixWatch = "watch/"
+)
+
+// WrapRoundTripper wraps a RoundTripper intended for interfacing with
+// Kubernetes and traces all requests.
+func WrapRoundTripper(rt http.RoundTripper) http.RoundTripper {
+ return httptrace.WrapRoundTripper(rt,
+ httptrace.WithBefore(func(req *http.Request, span ddtrace.Span) {
+ span.SetTag(ext.ServiceName, "kubernetes")
+ span.SetTag(ext.ResourceName, requestToResource(req.Method, req.URL.Path))
+ }))
+}
+
+func requestToResource(method, path string) string {
+ if !strings.HasPrefix(path, prefixAPI) {
+ return method
+ }
+
+ var out strings.Builder
+ out.WriteString(method)
+ out.WriteByte(' ')
+
+ path = strings.TrimPrefix(path, prefixAPI)
+
+ if strings.HasPrefix(path, prefixWatch) {
+ // strip out /watch
+ path = strings.TrimPrefix(path, prefixWatch)
+ out.WriteString(prefixWatch)
+ }
+
+ // {type}/{name}
+ var lastType string
+ for i, str := range strings.Split(path, "/") {
+ if i > 0 {
+ out.WriteByte('/')
+ }
+ if i%2 == 0 {
+ lastType = str
+ out.WriteString(lastType)
+ } else {
+ // parse {name}
+ out.WriteString(typeToPlaceholder(lastType))
+ }
+ }
+ return out.String()
+}
+
+func typeToPlaceholder(typ string) string {
+ switch typ {
+ case "namespaces":
+ return "{namespace}"
+ case "proxy":
+ return "{path}"
+ default:
+ return "{name}"
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/kubernetes_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/kubernetes_test.go
new file mode 100644
index 00000000..75ae2e13
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/k8s.io/client-go/kubernetes/kubernetes_test.go
@@ -0,0 +1,70 @@
+package kubernetes
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/clientcmd"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+func TestPathToResource(t *testing.T) {
+ expected := map[string]string{
+ "/api/v1/componentstatuses": "componentstatuses",
+ "/api/v1/componentstatuses/NAME": "componentstatuses/{name}",
+ "/api/v1/configmaps": "configmaps",
+ "/api/v1/namespaces/default/bindings": "namespaces/{namespace}/bindings",
+ "/api/v1/namespaces/someothernamespace/configmaps": "namespaces/{namespace}/configmaps",
+ "/api/v1/namespaces/default/configmaps/some-config-map": "namespaces/{namespace}/configmaps/{name}",
+ "/api/v1/namespaces/default/persistentvolumeclaims/pvc-abcd/status": "namespaces/{namespace}/persistentvolumeclaims/{name}/status",
+ "/api/v1/namespaces/default/pods/pod-1234/proxy": "namespaces/{namespace}/pods/{name}/proxy",
+ "/api/v1/namespaces/default/pods/pod-5678/proxy/some-path": "namespaces/{namespace}/pods/{name}/proxy/{path}",
+ "/api/v1/watch/configmaps": "watch/configmaps",
+ "/api/v1/watch/namespaces": "watch/namespaces",
+ "/api/v1/watch/namespaces/default/configmaps": "watch/namespaces/{namespace}/configmaps",
+ "/api/v1/watch/namespaces/someothernamespace/configmaps/another-name": "watch/namespaces/{namespace}/configmaps/{name}",
+ }
+
+ for path, expectedResource := range expected {
+ assert.Equal(t, "GET "+expectedResource, requestToResource("GET", path), "mapping %v", path)
+ }
+}
+
+func TestKubernetes(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello World"))
+ }))
+ defer s.Close()
+
+ cfg, err := clientcmd.BuildConfigFromKubeconfigGetter(s.URL, func() (*clientcmdapi.Config, error) {
+ return clientcmdapi.NewConfig(), nil
+ })
+ assert.NoError(t, err)
+ cfg.WrapTransport = WrapRoundTripper
+
+ client, err := kubernetes.NewForConfig(cfg)
+ assert.NoError(t, err)
+
+ client.Core().Namespaces().List(meta_v1.ListOptions{})
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 1)
+ {
+ s := spans[0]
+ assert.Equal(t, "http.request", s.OperationName())
+ assert.Equal(t, "kubernetes", s.Tag(ext.ServiceName))
+ assert.Equal(t, "GET namespaces", s.Tag(ext.ResourceName))
+ assert.Equal(t, "200", s.Tag(ext.HTTPCode))
+ assert.Equal(t, "GET", s.Tag(ext.HTTPMethod))
+ assert.Equal(t, "/api/v1/namespaces", s.Tag(ext.HTTPURL))
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/example_test.go
new file mode 100644
index 00000000..ba9dd611
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/example_test.go
@@ -0,0 +1,23 @@
+package http_test
+
+import (
+ "net/http"
+
+ httptrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
+)
+
+func Example() {
+ mux := httptrace.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello World!\n"))
+ })
+ http.ListenAndServe(":8080", mux)
+}
+
+func Example_withServiceName() {
+ mux := httptrace.NewServeMux(httptrace.WithServiceName("my-service"))
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("Hello World!\n"))
+ })
+ http.ListenAndServe(":8080", mux)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/http.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/http.go
new file mode 100644
index 00000000..11ffeb73
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/http.go
@@ -0,0 +1,46 @@
+// Package http provides functions to trace the net/http package (https://golang.org/pkg/net/http).
+package http // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http"
+
+import (
+ "net/http"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/contrib/internal/httputil"
+)
+
+// ServeMux is an HTTP request multiplexer that traces all the incoming requests.
+type ServeMux struct {
+ *http.ServeMux
+ config *muxConfig
+}
+
+// NewServeMux allocates and returns an http.ServeMux augmented with the
+// global tracer.
+func NewServeMux(opts ...MuxOption) *ServeMux {
+ cfg := new(muxConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return &ServeMux{
+ ServeMux: http.NewServeMux(),
+ config: cfg,
+ }
+}
+
+// ServeHTTP dispatches the request to the handler
+// whose pattern most closely matches the request URL.
+// We only need to rewrite this function to be able to trace
+// all the incoming requests to the underlying multiplexer
+func (mux *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ // get the resource associated to this request
+ _, route := mux.Handler(r)
+ resource := r.Method + " " + route
+ httputil.TraceAndServe(mux.ServeMux, w, r, mux.config.serviceName, resource)
+}
+
+// WrapHandler wraps an http.Handler with tracing using the given service and resource.
+func WrapHandler(h http.Handler, service, resource string) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ httputil.TraceAndServe(h, w, req, service, resource)
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/http_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/http_test.go
new file mode 100644
index 00000000..cc528aa8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/http_test.go
@@ -0,0 +1,106 @@
+package http
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+)
+
+func TestHttpTracer200(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ url := "/200"
+ r := httptest.NewRequest("GET", url, nil)
+ w := httptest.NewRecorder()
+ router().ServeHTTP(w, r)
+
+ assert := assert.New(t)
+ assert.Equal(200, w.Code)
+ assert.Equal("OK\n", w.Body.String())
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+
+ s := spans[0]
+ assert.Equal("http.request", s.OperationName())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal("GET "+url, s.Tag(ext.ResourceName))
+ assert.Equal("200", s.Tag(ext.HTTPCode))
+ assert.Equal("GET", s.Tag(ext.HTTPMethod))
+ assert.Equal(url, s.Tag(ext.HTTPURL))
+ assert.Equal(nil, s.Tag(ext.Error))
+}
+
+func TestHttpTracer500(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // Send and verify a 500 request
+ url := "/500"
+ r := httptest.NewRequest("GET", url, nil)
+ w := httptest.NewRecorder()
+ router().ServeHTTP(w, r)
+
+ assert := assert.New(t)
+ assert.Equal(500, w.Code)
+ assert.Equal("500!\n", w.Body.String())
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+
+ s := spans[0]
+ assert.Equal("http.request", s.OperationName())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal("GET "+url, s.Tag(ext.ResourceName))
+ assert.Equal("500", s.Tag(ext.HTTPCode))
+ assert.Equal("GET", s.Tag(ext.HTTPMethod))
+ assert.Equal(url, s.Tag(ext.HTTPURL))
+ assert.Equal("500: Internal Server Error", s.Tag(ext.Error).(error).Error())
+}
+
+func TestWrapHandler200(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ assert := assert.New(t)
+
+ handler := WrapHandler(http.HandlerFunc(handler200), "my-service", "my-resource")
+
+ url := "/"
+ r := httptest.NewRequest("GET", url, nil)
+ w := httptest.NewRecorder()
+ handler.ServeHTTP(w, r)
+ assert.Equal(200, w.Code)
+ assert.Equal("OK\n", w.Body.String())
+
+ spans := mt.FinishedSpans()
+ assert.Equal(1, len(spans))
+
+ s := spans[0]
+ assert.Equal("http.request", s.OperationName())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal("my-resource", s.Tag(ext.ResourceName))
+ assert.Equal("200", s.Tag(ext.HTTPCode))
+ assert.Equal("GET", s.Tag(ext.HTTPMethod))
+ assert.Equal(url, s.Tag(ext.HTTPURL))
+ assert.Equal(nil, s.Tag(ext.Error))
+}
+
+func router() http.Handler {
+ mux := NewServeMux(WithServiceName("my-service"))
+ mux.HandleFunc("/200", handler200)
+ mux.HandleFunc("/500", handler500)
+ return mux
+}
+
+func handler200(w http.ResponseWriter, r *http.Request) {
+ w.Write([]byte("OK\n"))
+}
+
+func handler500(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, "500!", http.StatusInternalServerError)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go
new file mode 100644
index 00000000..8f0b6057
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/option.go
@@ -0,0 +1,56 @@
+package http
+
+import (
+ "net/http"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+type muxConfig struct{ serviceName string }
+
+// MuxOption represents an option that can be passed to NewServeMux.
+type MuxOption func(*muxConfig)
+
+func defaults(cfg *muxConfig) {
+ cfg.serviceName = "http.router"
+}
+
+// WithServiceName sets the given service name for the returned ServeMux.
+func WithServiceName(name string) MuxOption {
+ return func(cfg *muxConfig) {
+ cfg.serviceName = name
+ }
+}
+
+// A RoundTripperBeforeFunc can be used to modify a span before an http
+// RoundTrip is made.
+type RoundTripperBeforeFunc func(*http.Request, ddtrace.Span)
+
+// A RoundTripperAfterFunc can be used to modify a span after an http
+// RoundTrip is made. It is possible for the http Response to be nil.
+type RoundTripperAfterFunc func(*http.Response, ddtrace.Span)
+
+type roundTripperConfig struct {
+ before RoundTripperBeforeFunc
+ after RoundTripperAfterFunc
+}
+
+// A RoundTripperOption represents an option that can be passed to
+// WrapRoundTripper.
+type RoundTripperOption func(*roundTripperConfig)
+
+// WithBefore adds a RoundTripperBeforeFunc to the RoundTripper
+// config.
+func WithBefore(f RoundTripperBeforeFunc) RoundTripperOption {
+ return func(cfg *roundTripperConfig) {
+ cfg.before = f
+ }
+}
+
+// WithAfter adds a RoundTripperAfterFunc to the RoundTripper
+// config.
+func WithAfter(f RoundTripperAfterFunc) RoundTripperOption {
+ return func(cfg *roundTripperConfig) {
+ cfg.after = f
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go
new file mode 100644
index 00000000..cc759832
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper.go
@@ -0,0 +1,68 @@
+package http
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+const defaultResourceName = "http.request"
+
+type roundTripper struct {
+ base http.RoundTripper
+ cfg *roundTripperConfig
+}
+
+func (rt *roundTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ span, _ := tracer.StartSpanFromContext(req.Context(), "http.request",
+ tracer.SpanType(ext.SpanTypeHTTP),
+ tracer.ResourceName(defaultResourceName),
+ tracer.Tag(ext.HTTPMethod, req.Method),
+ tracer.Tag(ext.HTTPURL, req.URL.Path),
+ )
+ defer func() {
+ if rt.cfg.after != nil {
+ rt.cfg.after(res, span)
+ }
+ span.Finish(tracer.WithError(err))
+ }()
+ if rt.cfg.before != nil {
+ rt.cfg.before(req, span)
+ }
+ // inject the span context into the http request
+ err = tracer.Inject(span.Context(), tracer.HTTPHeadersCarrier(req.Header))
+ if err != nil {
+ // this should never happen
+ fmt.Fprintf(os.Stderr, "failed to inject http headers for round tripper: %v\n", err)
+ }
+ res, err = rt.base.RoundTrip(req)
+ if err != nil {
+ span.SetTag("http.errors", err.Error())
+ } else {
+ span.SetTag(ext.HTTPCode, strconv.Itoa(res.StatusCode))
+ // treat 5XX as errors
+ if res.StatusCode/100 == 5 {
+ span.SetTag("http.errors", res.Status)
+ err = errors.New(res.Status)
+ }
+ }
+ return res, err
+}
+
+// WrapRoundTripper returns a new RoundTripper which traces all requests sent
+// over the transport.
+func WrapRoundTripper(rt http.RoundTripper, opts ...RoundTripperOption) http.RoundTripper {
+ cfg := new(roundTripperConfig)
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ return &roundTripper{
+ base: rt,
+ cfg: cfg,
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper_test.go
new file mode 100644
index 00000000..cc9e4077
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/net/http/roundtripper_test.go
@@ -0,0 +1,61 @@
+package http
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func TestRoundTripper(t *testing.T) {
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ spanctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(r.Header))
+ assert.NoError(t, err)
+
+ span := tracer.StartSpan("test",
+ tracer.ChildOf(spanctx))
+ defer span.Finish()
+
+ w.Write([]byte("Hello World"))
+ }))
+ defer s.Close()
+
+ rt := WrapRoundTripper(http.DefaultTransport,
+ WithBefore(func(req *http.Request, span ddtrace.Span) {
+ span.SetTag("CalledBefore", true)
+ }),
+ WithAfter(func(res *http.Response, span ddtrace.Span) {
+ span.SetTag("CalledAfter", true)
+ }))
+
+ client := &http.Client{
+ Transport: rt,
+ }
+
+ client.Get(s.URL + "/hello/world")
+
+ spans := mt.FinishedSpans()
+ assert.Len(t, spans, 2)
+ assert.Equal(t, spans[0].TraceID(), spans[1].TraceID())
+
+ s0 := spans[0]
+ assert.Equal(t, "test", s0.OperationName())
+ assert.Equal(t, "test", s0.Tag(ext.ResourceName))
+
+ s1 := spans[1]
+ assert.Equal(t, "http.request", s1.OperationName())
+ assert.Equal(t, "http.request", s1.Tag(ext.ResourceName))
+ assert.Equal(t, "200", s1.Tag(ext.HTTPCode))
+ assert.Equal(t, "GET", s1.Tag(ext.HTTPMethod))
+ assert.Equal(t, "/hello/world", s1.Tag(ext.HTTPURL))
+ assert.Equal(t, true, s1.Tag("CalledBefore"))
+ assert.Equal(t, true, s1.Tag("CalledAfter"))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/elastictrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/elastictrace.go
new file mode 100644
index 00000000..fbedfe94
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/elastictrace.go
@@ -0,0 +1,116 @@
+// Package elastic provides functions to trace the gopkg.in/olivere/elastic.v{3,5} packages.
+package elastic // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic"
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "regexp"
+ "strconv"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// NewHTTPClient returns a new http.Client which traces requests under the given service name.
+func NewHTTPClient(opts ...ClientOption) *http.Client {
+ cfg := new(clientConfig)
+ defaults(cfg)
+ for _, fn := range opts {
+ fn(cfg)
+ }
+ return &http.Client{Transport: &httpTransport{config: cfg}}
+}
+
+// httpTransport is a traced HTTP transport that captures Elasticsearch spans.
+type httpTransport struct{ config *clientConfig }
+
+// bodyCutoff specifies the maximum number of bytes that will be stored as a tag
+// value obtained from an HTTP request or response body.
+var bodyCutoff = 5 * 1024
+
+// RoundTrip satisfies the RoundTripper interface, wraps the sub Transport and
+// captures a span of the Elasticsearch request.
+func (t *httpTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ url := req.URL.Path
+ method := req.Method
+ resource := quantize(url, method)
+ span, _ := tracer.StartSpanFromContext(req.Context(), "elasticsearch.query",
+ tracer.ServiceName(t.config.serviceName),
+ tracer.SpanType(ext.SpanTypeElasticSearch),
+ tracer.ResourceName(resource),
+ tracer.Tag("elasticsearch.method", method),
+ tracer.Tag("elasticsearch.url", url),
+ tracer.Tag("elasticsearch.params", req.URL.Query().Encode()),
+ )
+ defer span.Finish()
+
+ snip, rc, err := peek(req.Body, int(req.ContentLength), bodyCutoff)
+ if err == nil {
+ span.SetTag("elasticsearch.body", snip)
+ }
+ req.Body = rc
+ // process using the standard transport
+ res, err := t.config.transport.RoundTrip(req)
+ if err != nil {
+ // roundtrip error
+ span.SetTag(ext.Error, err)
+ } else if res.StatusCode < 200 || res.StatusCode > 299 {
+ // HTTP error
+ snip, rc, err := peek(res.Body, int(res.ContentLength), bodyCutoff)
+ if err != nil {
+ snip = http.StatusText(res.StatusCode)
+ }
+ span.SetTag(ext.Error, errors.New(snip))
+ res.Body = rc
+ }
+ if res != nil {
+ span.SetTag(ext.HTTPCode, strconv.Itoa(res.StatusCode))
+ }
+ return res, err
+}
+
+var (
+ idRegexp = regexp.MustCompile("/([0-9]+)([/\\?]|$)")
+ idPlaceholder = []byte("/?$2")
+ indexRegexp = regexp.MustCompile("[0-9]{2,}")
+ indexPlaceholder = []byte("?")
+)
+
+// quantize quantizes an Elasticsearch to extract a meaningful resource from the request.
+// We quantize based on the method+url with some cleanup applied to the URL.
+// URLs with an ID will be generalized as will (potential) timestamped indices.
+func quantize(url, method string) string {
+ quantizedURL := idRegexp.ReplaceAll([]byte(url), idPlaceholder)
+ quantizedURL = indexRegexp.ReplaceAll(quantizedURL, indexPlaceholder)
+ return fmt.Sprintf("%s %s", method, quantizedURL)
+}
+
+// peek attempts to return the first n bytes, as a string, from the provided io.ReadCloser.
+// It returns a new io.ReadCloser which points to the same underlying stream and can be read
+// from to access the entire data including the snippet. max is used to specify the length
+// of the stream contained in the reader. If unknown, it should be -1. If 0 < max < n it
+// will override n.
+func peek(rc io.ReadCloser, max int, n int) (string, io.ReadCloser, error) {
+ if rc == nil {
+ return "", rc, errors.New("empty stream")
+ }
+ if max > 0 && max < n {
+ n = max
+ }
+ r := bufio.NewReaderSize(rc, n)
+ rc2 := struct {
+ io.Reader
+ io.Closer
+ }{
+ Reader: r,
+ Closer: rc,
+ }
+ snip, err := r.Peek(n)
+ if err == io.EOF {
+ err = nil
+ }
+ return string(snip), rc2, err
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/elastictrace_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/elastictrace_test.go
new file mode 100644
index 00000000..c8fa0b56
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/elastictrace_test.go
@@ -0,0 +1,345 @@
+package elastic
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ elasticv3 "gopkg.in/olivere/elastic.v3"
+ elasticv5 "gopkg.in/olivere/elastic.v5"
+
+ "testing"
+)
+
+const debug = false
+
+func TestMain(m *testing.M) {
+ _, ok := os.LookupEnv("INTEGRATION")
+ if !ok {
+ fmt.Println("--- SKIP: to enable integration test, set the INTEGRATION environment variable")
+ os.Exit(0)
+ }
+ os.Exit(m.Run())
+}
+
+func TestClientV5(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ tc := NewHTTPClient(WithServiceName("my-es-service"))
+ client, err := elasticv5.NewClient(
+ elasticv5.SetURL("http://127.0.0.1:9201"),
+ elasticv5.SetHttpClient(tc),
+ elasticv5.SetSniff(false),
+ elasticv5.SetHealthcheck(false),
+ )
+ assert.NoError(err)
+
+ _, err = client.Index().
+ Index("twitter").Id("1").
+ Type("tweet").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ Do(context.TODO())
+ assert.NoError(err)
+ checkPUTTrace(assert, mt)
+
+ mt.Reset()
+ _, err = client.Get().Index("twitter").Type("tweet").
+ Id("1").Do(context.TODO())
+ assert.NoError(err)
+ checkGETTrace(assert, mt)
+
+ mt.Reset()
+ _, err = client.Get().Index("not-real-index").
+ Id("1").Do(context.TODO())
+ assert.Error(err)
+ checkErrTrace(assert, mt)
+}
+
+func TestClientErrorCutoffV3(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ oldCutoff := bodyCutoff
+ defer func() {
+ bodyCutoff = oldCutoff
+ }()
+ bodyCutoff = 10
+
+ tc := NewHTTPClient(WithServiceName("my-es-service"))
+ client, err := elasticv5.NewClient(
+ elasticv5.SetURL("http://127.0.0.1:9200"),
+ elasticv5.SetHttpClient(tc),
+ elasticv5.SetSniff(false),
+ elasticv5.SetHealthcheck(false),
+ )
+ assert.NoError(err)
+
+ _, err = client.Index().
+ Index("twitter").Id("1").
+ Type("tweet").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ Do(context.TODO())
+ assert.NoError(err)
+
+ span := mt.FinishedSpans()[0]
+ assert.Equal(`{"user": "`, span.Tag("elasticsearch.body"))
+}
+
+func TestClientErrorCutoffV5(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+ oldCutoff := bodyCutoff
+ defer func() {
+ bodyCutoff = oldCutoff
+ }()
+ bodyCutoff = 10
+
+ tc := NewHTTPClient(WithServiceName("my-es-service"))
+ client, err := elasticv5.NewClient(
+ elasticv5.SetURL("http://127.0.0.1:9201"),
+ elasticv5.SetHttpClient(tc),
+ elasticv5.SetSniff(false),
+ elasticv5.SetHealthcheck(false),
+ )
+ assert.NoError(err)
+
+ _, err = client.Get().Index("not-real-index").
+ Id("1").Do(context.TODO())
+ assert.Error(err)
+
+ span := mt.FinishedSpans()[0]
+ assert.Equal(`{"error":{`, span.Tag(ext.Error).(error).Error())
+}
+
+func TestClientV3(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ tc := NewHTTPClient(WithServiceName("my-es-service"))
+ client, err := elasticv3.NewClient(
+ elasticv3.SetURL("http://127.0.0.1:9200"),
+ elasticv3.SetHttpClient(tc),
+ elasticv3.SetSniff(false),
+ elasticv3.SetHealthcheck(false),
+ )
+ assert.NoError(err)
+
+ _, err = client.Index().
+ Index("twitter").Id("1").
+ Type("tweet").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ DoC(context.TODO())
+ assert.NoError(err)
+ checkPUTTrace(assert, mt)
+
+ mt.Reset()
+ _, err = client.Get().Index("twitter").Type("tweet").
+ Id("1").DoC(context.TODO())
+ assert.NoError(err)
+ checkGETTrace(assert, mt)
+
+ mt.Reset()
+ _, err = client.Get().Index("not-real-index").
+ Id("1").DoC(context.TODO())
+ assert.Error(err)
+ checkErrTrace(assert, mt)
+}
+
+func TestClientV3Failure(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ tc := NewHTTPClient(WithServiceName("my-es-service"))
+ client, err := elasticv3.NewClient(
+ // inexistent service, it must fail
+ elasticv3.SetURL("http://127.0.0.1:29200"),
+ elasticv3.SetHttpClient(tc),
+ elasticv3.SetSniff(false),
+ elasticv3.SetHealthcheck(false),
+ )
+ assert.NoError(err)
+
+ _, err = client.Index().
+ Index("twitter").Id("1").
+ Type("tweet").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ DoC(context.TODO())
+ assert.Error(err)
+
+ spans := mt.FinishedSpans()
+ checkPUTTrace(assert, mt)
+
+ assert.NotEmpty(spans[0].Tag(ext.Error))
+ assert.Equal("*net.OpError", fmt.Sprintf("%T", spans[0].Tag(ext.Error).(error)))
+}
+
+func TestClientV5Failure(t *testing.T) {
+ assert := assert.New(t)
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ tc := NewHTTPClient(WithServiceName("my-es-service"))
+ client, err := elasticv5.NewClient(
+ // inexistent service, it must fail
+ elasticv5.SetURL("http://127.0.0.1:29201"),
+ elasticv5.SetHttpClient(tc),
+ elasticv5.SetSniff(false),
+ elasticv5.SetHealthcheck(false),
+ )
+ assert.NoError(err)
+
+ _, err = client.Index().
+ Index("twitter").Id("1").
+ Type("tweet").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ Do(context.TODO())
+ assert.Error(err)
+
+ spans := mt.FinishedSpans()
+ checkPUTTrace(assert, mt)
+
+ assert.NotEmpty(spans[0].Tag(ext.Error))
+ assert.Equal("*net.OpError", fmt.Sprintf("%T", spans[0].Tag(ext.Error).(error)))
+}
+
+func checkPUTTrace(assert *assert.Assertions, mt mocktracer.Tracer) {
+ span := mt.FinishedSpans()[0]
+ assert.Equal("my-es-service", span.Tag(ext.ServiceName))
+ assert.Equal("PUT /twitter/tweet/?", span.Tag(ext.ResourceName))
+ assert.Equal("/twitter/tweet/1", span.Tag("elasticsearch.url"))
+ assert.Equal("PUT", span.Tag("elasticsearch.method"))
+ assert.Equal(`{"user": "test", "message": "hello"}`, span.Tag("elasticsearch.body"))
+}
+
+func checkGETTrace(assert *assert.Assertions, mt mocktracer.Tracer) {
+ span := mt.FinishedSpans()[0]
+ assert.Equal("my-es-service", span.Tag(ext.ServiceName))
+ assert.Equal("GET /twitter/tweet/?", span.Tag(ext.ResourceName))
+ assert.Equal("/twitter/tweet/1", span.Tag("elasticsearch.url"))
+ assert.Equal("GET", span.Tag("elasticsearch.method"))
+}
+
+func checkErrTrace(assert *assert.Assertions, mt mocktracer.Tracer) {
+ span := mt.FinishedSpans()[0]
+ assert.Equal("my-es-service", span.Tag(ext.ServiceName))
+ assert.Equal("GET /not-real-index/_all/?", span.Tag(ext.ResourceName))
+ assert.Equal("/not-real-index/_all/1", span.Tag("elasticsearch.url"))
+ assert.NotEmpty(span.Tag(ext.Error))
+ assert.Equal("*errors.errorString", fmt.Sprintf("%T", span.Tag(ext.Error).(error)))
+}
+
+func TestQuantize(t *testing.T) {
+ for _, tc := range []struct {
+ url, method string
+ expected string
+ }{
+ {
+ url: "/twitter/tweets",
+ method: "POST",
+ expected: "POST /twitter/tweets",
+ },
+ {
+ url: "/logs_2016_05/event/_search",
+ method: "GET",
+ expected: "GET /logs_?_?/event/_search",
+ },
+ {
+ url: "/twitter/tweets/123",
+ method: "GET",
+ expected: "GET /twitter/tweets/?",
+ },
+ {
+ url: "/logs_2016_05/event/123",
+ method: "PUT",
+ expected: "PUT /logs_?_?/event/?",
+ },
+ } {
+ assert.Equal(t, tc.expected, quantize(tc.url, tc.method))
+ }
+}
+
+func TestPeek(t *testing.T) {
+ assert := assert.New(t)
+
+ for _, tt := range [...]struct {
+ max int // content length
+ txt string // stream
+ n int // bytes to peek at
+ snip string // expected snippet
+ err error // expected error
+ }{
+ 0: {
+ // extract 3 bytes from a content of length 7
+ txt: "ABCDEFG",
+ max: 7,
+ n: 3,
+ snip: "ABC",
+ },
+ 1: {
+ // extract 7 bytes from a content of length 7
+ txt: "ABCDEFG",
+ max: 7,
+ n: 7,
+ snip: "ABCDEFG",
+ },
+ 2: {
+ // extract 100 bytes from a content of length 9 (impossible scenario)
+ txt: "ABCDEFG",
+ max: 9,
+ n: 100,
+ snip: "ABCDEFG",
+ },
+ 3: {
+ // extract 5 bytes from a content of length 2 (impossible scenario)
+ txt: "ABCDEFG",
+ max: 2,
+ n: 5,
+ snip: "AB",
+ },
+ 4: {
+ txt: "ABCDEFG",
+ max: 0,
+ n: 1,
+ snip: "A",
+ },
+ 5: {
+ n: 4,
+ max: 4,
+ err: errors.New("empty stream"),
+ },
+ 6: {
+ txt: "ABCDEFG",
+ n: 4,
+ max: -1,
+ snip: "ABCD",
+ },
+ } {
+ var readcloser io.ReadCloser
+ if tt.txt != "" {
+ readcloser = ioutil.NopCloser(bytes.NewBufferString(tt.txt))
+ }
+ snip, rc, err := peek(readcloser, tt.max, tt.n)
+ assert.Equal(tt.err, err)
+ assert.Equal(tt.snip, snip)
+
+ if readcloser != nil {
+ // if a non-nil io.ReadCloser was sent, the returned io.ReadCloser
+ // must always return the entire original content.
+ all, err := ioutil.ReadAll(rc)
+ assert.Nil(err)
+ assert.Equal(tt.txt, string(all))
+ }
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/example_test.go
new file mode 100644
index 00000000..5bde6454
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/example_test.go
@@ -0,0 +1,62 @@
+package elastic_test
+
+import (
+ "context"
+
+ elastictrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+ elasticv3 "gopkg.in/olivere/elastic.v3"
+ elasticv5 "gopkg.in/olivere/elastic.v5"
+)
+
+// To start tracing elastic.v5 requests, create a new TracedHTTPClient that you will
+// use when initializing the elastic.Client.
+func Example_v5() {
+ tc := elastictrace.NewHTTPClient(elastictrace.WithServiceName("my-es-service"))
+ client, _ := elasticv5.NewClient(
+ elasticv5.SetURL("http://127.0.0.1:9200"),
+ elasticv5.SetHttpClient(tc),
+ )
+
+ // Spans are emitted for all
+ client.Index().
+ Index("twitter").Type("tweet").Index("1").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ Do(context.Background())
+
+ // Use a context to pass information down the call chain
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/tweet/1"),
+ )
+ client.Get().
+ Index("twitter").Type("tweet").Index("1").
+ Do(ctx)
+ root.Finish()
+}
+
+// To trace elastic.v3 you create a TracedHTTPClient in the same way but all requests must use
+// the DoC() call to pass the request context.
+func Example_v3() {
+ tc := elastictrace.NewHTTPClient(elastictrace.WithServiceName("my-es-service"))
+ client, _ := elasticv3.NewClient(
+ elasticv3.SetURL("http://127.0.0.1:9200"),
+ elasticv3.SetHttpClient(tc),
+ )
+
+ // Spans are emitted for all
+ client.Index().
+ Index("twitter").Type("tweet").Index("1").
+ BodyString(`{"user": "test", "message": "hello"}`).
+ DoC(context.Background())
+
+ // Use a context to pass information down the call chain
+ root, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
+ tracer.ServiceName("web"),
+ tracer.ResourceName("/tweet/1"),
+ )
+ client.Get().
+ Index("twitter").Type("tweet").Index("1").
+ DoC(ctx)
+ root.Finish()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/option.go
new file mode 100644
index 00000000..6a24607e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/olivere/elastic/option.go
@@ -0,0 +1,30 @@
+package elastic
+
+import "net/http"
+
+type clientConfig struct {
+ serviceName string
+ transport *http.Transport
+}
+
+// ClientOption represents an option that can be used when creating a client.
+type ClientOption func(*clientConfig)
+
+func defaults(cfg *clientConfig) {
+ cfg.serviceName = "elastic.client"
+ cfg.transport = http.DefaultTransport.(*http.Transport)
+}
+
+// WithServiceName sets the given service name for the client.
+func WithServiceName(name string) ClientOption {
+ return func(cfg *clientConfig) {
+ cfg.serviceName = name
+ }
+}
+
+// WithTransport sets the given transport as an http.Transport for the client.
+func WithTransport(t *http.Transport) ClientOption {
+ return func(cfg *clientConfig) {
+ cfg.transport = t
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/Gopkg.toml b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/Gopkg.toml
new file mode 100644
index 00000000..1fb732af
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/Gopkg.toml
@@ -0,0 +1,9 @@
+ignored = [
+ # From these libraries we should always ensure compatibility with the latest:
+ "github.com/opentracing/*",
+ "golang.org/x/*",
+]
+
+[[constraint]]
+ name = "github.com/tinylib/msgp"
+ revision = "3b5c87ab5fb00c660bf85b888445d9a01db64db4" # Feb 15, 2018
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go
new file mode 100644
index 00000000..ea787454
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ddtrace.go
@@ -0,0 +1,108 @@
+// Package ddtrace contains the interfaces that specify the implementations of Datadog's
+// tracing library, as well as a set of sub-packages containing various implementations:
+// our native implementation ("tracer"), a wrapper that can be used with Opentracing
+// ("opentracer") and a mock tracer to be used for testing ("mocktracer"). Additionally,
+// package "ext" provides a set of tag names and values specific to Datadog's APM product.
+//
+// To get started, visit the documentation for any of the packages you'd like to begin
+// with by accessing the subdirectories of this package: https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace#pkg-subdirectories.
+package ddtrace // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+
+import "time"
+
+// Tracer specifies an implementation of the Datadog tracer which allows starting
+// and propagating spans. The official implementation if exposed as functions
+// within the "tracer" package.
+type Tracer interface {
+ // StartSpan starts a span with the given operation name and options.
+ StartSpan(operationName string, opts ...StartSpanOption) Span
+
+ // Extract extracts a span context from a given carrier. Note that baggage item
+ // keys will always be lower-cased to maintain consistency. It is impossible to
+ // maintain the original casing due to MIME header canonicalization standards.
+ Extract(carrier interface{}) (SpanContext, error)
+
+ // Inject injects a span context into the given carrier.
+ Inject(context SpanContext, carrier interface{}) error
+
+ // Stop stops the active tracer and sets the global tracer to a no-op. Calls to
+ // Stop should be idempotent.
+ Stop()
+}
+
+// Span represents a chunk of computation time. Spans have names, durations,
+// timestamps and other metadata. A Tracer is used to create hierarchies of
+// spans in a request, buffer and submit them to the server.
+type Span interface {
+ // SetTag sets a key/value pair as metadata on the span.
+ SetTag(key string, value interface{})
+
+ // SetOperationName sets the operation name for this span. An operation name should be
+ // a representative name for a group of spans (e.g. "grpc.server" or "http.request").
+ SetOperationName(operationName string)
+
+ // BaggageItem returns the baggage item held by the given key.
+ BaggageItem(key string) string
+
+ // SetBaggageItem sets a new baggage item at the given key. The baggage
+ // item should propagate to all descendant spans, both in- and cross-process.
+ SetBaggageItem(key, val string)
+
+ // Finish finishes the current span with the given options. Finish calls should be idempotent.
+ Finish(opts ...FinishOption)
+
+ // Context returns the SpanContext of this Span.
+ Context() SpanContext
+}
+
+// SpanContext represents a span state that can propagate to descendant spans
+// and across process boundaries. It contains all the information needed to
+// spawn a direct descendant of the span that it belongs to. It can be used
+// to create distributed tracing by propagating it using the provided interfaces.
+type SpanContext interface {
+ // SpanID returns the span ID that this context is carrying.
+ SpanID() uint64
+
+ // TraceID returns the trace ID that this context is carrying.
+ TraceID() uint64
+
+ // ForeachBaggageItem provides an iterator over the key/value pairs set as
+ // baggage within this context. Iteration stops when the handler returns
+ // false.
+ ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// StartSpanOption is a configuration option that can be used with a Tracer's StartSpan method.
+type StartSpanOption func(cfg *StartSpanConfig)
+
+// FinishOption is a configuration option that can be used with a Span's Finish method.
+type FinishOption func(cfg *FinishConfig)
+
+// FinishConfig holds the configuration for finishing a span. It is usually passed around by
+// reference to one or more FinishOption functions which shape it into its final form.
+type FinishConfig struct {
+ // FinishTime represents the time that should be set as finishing time for the
+ // span. Implementations should use the current time when FinishTime.IsZero().
+ FinishTime time.Time
+
+ // Error holds an optional error that should be set on the span before
+ // finishing.
+ Error error
+}
+
+// StartSpanConfig holds the configuration for starting a new span. It is usually passed
+// around by reference to one or more StartSpanOption functions which shape it into its
+// final form.
+type StartSpanConfig struct {
+ // Parent holds the SpanContext that should be used as a parent for the
+ // new span. If nil, implementations should return a root span.
+ Parent SpanContext
+
+ // StartTime holds the time that should be used as the start time of the span.
+ // Implementations should use the current time when StartTime.IsZero().
+ StartTime time.Time
+
+ // Tags holds a set of key/value pairs that should be set as metadata on the
+ // new span.
+ Tags map[string]interface{}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/example_test.go
new file mode 100644
index 00000000..96f28437
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/example_test.go
@@ -0,0 +1,73 @@
+package ddtrace_test
+
+import (
+ "io/ioutil"
+ "log"
+
+ opentracing "github.com/opentracing/opentracing-go"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+// The below example illustrates a simple use case using the "tracer" package,
+// our native Datadog APM tracing client integration. For thorough documentation
+// and further examples, visit its own godoc page.
+func Example_datadog() {
+ // Start the tracer and defer the Stop method.
+ tracer.Start(tracer.WithAgentAddr("host:port"))
+ defer tracer.Stop()
+
+ // Start a root span.
+ span := tracer.StartSpan("get.data")
+ defer span.Finish()
+
+ // Create a child of it, computing the time needed to read a file.
+ child := tracer.StartSpan("read.file", tracer.ChildOf(span.Context()))
+ child.SetTag(ext.ResourceName, "test.json")
+
+ // Perform an operation.
+ _, err := ioutil.ReadFile("~/test.json")
+
+ // We may finish the child span using the returned error. If it's
+ // nil, it will be disregarded.
+ child.Finish(tracer.WithError(err))
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// The below example illustrates how to set up an opentracing.Tracer using Datadog's
+// tracer.
+func Example_opentracing() {
+ // Start a Datadog tracer, optionally providing a set of options,
+ // returning an opentracing.Tracer which wraps it.
+ t := opentracer.New(tracer.WithAgentAddr("host:port"))
+ defer tracer.Stop() // important for data integrity (flushes any leftovers)
+
+ // Use it with the Opentracing API. The (already started) Datadog tracer
+ // may be used in parallel with the Opentracing API if desired.
+ opentracing.SetGlobalTracer(t)
+}
+
+// The code below illustrates a scenario of how one could use a mock tracer in tests
+// to assert that spans are created correctly.
+func Example_mocking() {
+ // Setup the test environment: start the mock tracer.
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // Run test code: in this example we will simply create a span to illustrate.
+ tracer.StartSpan("test.span").Finish()
+
+ // Assert the results: query the mock tracer for finished spans.
+ spans := mt.FinishedSpans()
+ if len(spans) != 1 {
+ // fail
+ }
+ if spans[0].OperationName() != "test.span" {
+ // fail
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go
new file mode 100644
index 00000000..b88c0550
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/app_types.go
@@ -0,0 +1,58 @@
+package ext // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+
+// App types determine how to categorize a trace in the Datadog application.
+// For more fine-grained behaviour, use the SpanType* constants.
+const (
+ // DEPRECATED: Use SpanTypeWeb
+ // AppTypeWeb specifies the Web span type and can be used as a tag value
+ // for a span's SpanType tag.
+ AppTypeWeb = "web"
+
+ // AppTypeDB specifies the DB span type and can be used as a tag value
+ // for a span's SpanType tag. If possible, use one of the SpanType*
+ // constants for a more accurate indication.
+ AppTypeDB = "db"
+
+ // AppTypeCache specifies the Cache span type and can be used as a tag value
+ // for a span's SpanType tag. If possible, consider using SpanTypeRedis or
+ // SpanTypeMemcached.
+ AppTypeCache = "cache"
+
+ // AppTypeRPC specifies the RPC span type and can be used as a tag value
+ // for a span's SpanType tag.
+ AppTypeRPC = "rpc"
+)
+
+// Span types have similar behaviour to "app types" and help categorize
+// traces in the Datadog application. They can also help fine grain agent
+// level bahviours such as obfuscation and quantization, when these are
+// enabled in the agent's configuration.
+const (
+ // SpanTypeWeb marks a span as an HTTP server request.
+ SpanTypeWeb = "web"
+
+ // SpanTypeHTTP marks a span as an HTTP client request.
+ SpanTypeHTTP = "http"
+
+ // SpanTypeSQL marks a span as an SQL operation. These spans may
+ // have an "sql.command" tag.
+ SpanTypeSQL = "sql"
+
+ // SpanTypeCassandra marks a span as a Cassandra operation. These
+ // spans may have an "sql.command" tag.
+ SpanTypeCassandra = "cassandra"
+
+ // SpanTypeRedis marks a span as a Redis operation. These spans may
+ // also have a "redis.raw_command" tag.
+ SpanTypeRedis = "redis"
+
+ // SpanTypeMemcached marks a span as a memcached operation.
+ SpanTypeMemcached = "memcached"
+
+ // SpanTypeMongoDB marks a span as a MongoDB operation.
+ SpanTypeMongoDB = "mongodb"
+
+ // SpanTypeElasticSearch marks a span as an ElasticSearch operation.
+ // These spans may also have an "elasticsearch.body" tag.
+ SpanTypeElasticSearch = "elasticsearch"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/cassandra.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/cassandra.go
new file mode 100644
index 00000000..c3569cda
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/cassandra.go
@@ -0,0 +1,21 @@
+package ext
+
+const (
+ // CassandraQuery is the tag name used for cassandra queries.
+ CassandraQuery = "cassandra.query"
+
+ // CassandraConsistencyLevel is the tag name to set for consitency level.
+ CassandraConsistencyLevel = "cassandra.consistency_level"
+
+ // CassandraCluster specifies the tag name that is used to set the cluster.
+ CassandraCluster = "cassandra.cluster"
+
+ // CassandraRowCount specifies the tag name to use when settings the row count.
+ CassandraRowCount = "cassandra.row_count"
+
+ // CassandraKeyspace is used as tag name for setting the key space.
+ CassandraKeyspace = "cassandra.keyspace"
+
+ // CassandraPaginated specifies the tag name for paginated queries.
+ CassandraPaginated = "cassandra.paginated"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/ext_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/ext_test.go
new file mode 100644
index 00000000..c5b3b30d
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/ext_test.go
@@ -0,0 +1,36 @@
+package ext
+
+import "testing"
+
+// TestSpec asserts that the constants represented in this package match the
+// ones that are expected by the rest of our pipeline.
+func TestSpec(t *testing.T) {
+ // tests holds pairs of tests where each i == i+1
+ //
+ // changing any of these should be considered a breaking change and
+ // should require a major version release.
+ tests := []string{
+ AppTypeWeb, "web",
+ AppTypeDB, "db",
+ AppTypeCache, "cache",
+ AppTypeRPC, "rpc",
+ SpanTypeWeb, "web",
+ SpanTypeHTTP, "http",
+ SpanTypeSQL, "sql",
+ SQLType, "sql",
+ SpanTypeCassandra, "cassandra",
+ SpanTypeRedis, "redis",
+ SpanTypeElasticSearch, "elasticsearch",
+ SQLQuery, "sql.query",
+ HTTPURL, "http.url",
+ Environment, "env",
+ }
+ if len(tests)%2 != 0 {
+ t.Fatal("uneven test count")
+ }
+ for i := 0; i < len(tests); i += 2 {
+ if tests[i] != tests[i+1] {
+ t.Fatalf("changed %q", tests[i+1])
+ }
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go
new file mode 100644
index 00000000..cbda4204
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/priority.go
@@ -0,0 +1,22 @@
+package ext
+
+// Priority is a hint given to the backend so that it knows which traces to reject or kept.
+// In a distributed context, it should be set before any context propagation (fork, RPC calls) to be effective.
+
+const (
+ // PriorityUserReject informs the backend that a trace should be rejected and not stored.
+ // This should be used by user code overriding default priority.
+ PriorityUserReject = -1
+
+ // PriorityAutoReject informs the backend that a trace should be rejected and not stored.
+ // This is used by the builtin sampler.
+ PriorityAutoReject = 0
+
+ // PriorityAutoKeep informs the backend that a trace should be kept and not stored.
+ // This is used by the builtin sampler.
+ PriorityAutoKeep = 1
+
+ // PriorityUserKeep informs the backend that a trace should be kept and not stored.
+ // This should be used by user code overriding default priority.
+ PriorityUserKeep = 2
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go
new file mode 100644
index 00000000..9256aa5b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/system.go
@@ -0,0 +1,7 @@
+package ext
+
+// Standard system metadata names
+const (
+ // The pid of the traced process
+ Pid = "system.pid"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go
new file mode 100644
index 00000000..96b6fb99
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext/tags.go
@@ -0,0 +1,64 @@
+// Package ext contains a set of Datadog-specific constants. Most of them are used
+// for setting span metadata.
+package ext
+
+const (
+ // TargetHost sets the target host address.
+ TargetHost = "out.host"
+
+ // TargetPort sets the target host port.
+ TargetPort = "out.port"
+
+ // SamplingPriority is the tag that marks the sampling priority of a span.
+ SamplingPriority = "sampling.priority"
+
+ // SQLType sets the sql type tag.
+ SQLType = "sql"
+
+ // SQLQuery sets the sql query tag on a span.
+ SQLQuery = "sql.query"
+
+ // HTTPMethod specifies the HTTP method used in a span.
+ HTTPMethod = "http.method"
+
+ // HTTPCode sets the HTTP status code as a tag.
+ HTTPCode = "http.status_code"
+
+ // HTTPURL sets the HTTP URL for a span.
+ HTTPURL = "http.url"
+
+ // TODO: In the next major version, suffix these constants (SpanType, etc)
+ // with "*Key" (SpanTypeKey, etc) to more easily differentiate between
+ // constants representing tag values and constants representing keys.
+
+ // SpanType defines the Span type (web, db, cache).
+ SpanType = "span.type"
+
+ // ServiceName defines the Service name for this Span.
+ ServiceName = "service.name"
+
+ // ResourceName defines the Resource name for the Span.
+ ResourceName = "resource.name"
+
+ // Error specifies the error tag. It's value is usually of type "error".
+ Error = "error"
+
+ // ErrorMsg specifies the error message.
+ ErrorMsg = "error.msg"
+
+ // ErrorType specifies the error type.
+ ErrorType = "error.type"
+
+ // ErrorStack specifies the stack dump.
+ ErrorStack = "error.stack"
+
+ // Environment specifies the environment to use with a trace.
+ Environment = "env"
+
+ // DBApplication indicates the application using the database.
+ DBApplication = "db.application"
+ // DBName indicates the database name.
+ DBName = "db.name"
+ // DBUser indicates the user name of Database, e.g. "readonly_user" or "reporting_user".
+ DBUser = "db.user"
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go
new file mode 100644
index 00000000..008f5f03
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal/globaltracer.go
@@ -0,0 +1,98 @@
+package internal // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+
+import (
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+var (
+ mu sync.RWMutex // guards globalTracer
+ globalTracer ddtrace.Tracer = &NoopTracer{}
+)
+
+// SetGlobalTracer sets the global tracer to t.
+func SetGlobalTracer(t ddtrace.Tracer) {
+ mu.Lock()
+ defer mu.Unlock()
+ if !Testing {
+ // avoid infinite loop when calling (*mocktracer.Tracer).Stop
+ globalTracer.Stop()
+ }
+ globalTracer = t
+}
+
+// GetGlobalTracer returns the currently active tracer.
+func GetGlobalTracer() ddtrace.Tracer {
+ mu.RLock()
+ defer mu.RUnlock()
+ return globalTracer
+}
+
+// Testing is set to true when the mock tracer is active. It usually signifies that we are in a test
+// environment. This value is used by tracer.Start to prevent overriding the GlobalTracer in tests.
+var Testing = false
+
+var _ ddtrace.Tracer = (*NoopTracer)(nil)
+
+// NoopTracer is an implementation of ddtrace.Tracer that is a no-op.
+type NoopTracer struct{}
+
+// StartSpan implements ddtrace.Tracer.
+func (NoopTracer) StartSpan(operationName string, opts ...ddtrace.StartSpanOption) ddtrace.Span {
+ return NoopSpan{}
+}
+
+// SetServiceInfo implements ddtrace.Tracer.
+func (NoopTracer) SetServiceInfo(name, app, appType string) {}
+
+// Extract implements ddtrace.Tracer.
+func (NoopTracer) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+ return NoopSpanContext{}, nil
+}
+
+// Inject implements ddtrace.Tracer.
+func (NoopTracer) Inject(context ddtrace.SpanContext, carrier interface{}) error { return nil }
+
+// Stop implements ddtrace.Tracer.
+func (NoopTracer) Stop() {}
+
+var _ ddtrace.Span = (*NoopSpan)(nil)
+
+// NoopSpan is an implementation of ddtrace.Span that is a no-op.
+type NoopSpan struct{}
+
+// SetTag implements ddtrace.Span.
+func (NoopSpan) SetTag(key string, value interface{}) {}
+
+// SetOperationName implements ddtrace.Span.
+func (NoopSpan) SetOperationName(operationName string) {}
+
+// BaggageItem implements ddtrace.Span.
+func (NoopSpan) BaggageItem(key string) string { return "" }
+
+// SetBaggageItem implements ddtrace.Span.
+func (NoopSpan) SetBaggageItem(key, val string) {}
+
+// Finish implements ddtrace.Span.
+func (NoopSpan) Finish(opts ...ddtrace.FinishOption) {}
+
+// Tracer implements ddtrace.Span.
+func (NoopSpan) Tracer() ddtrace.Tracer { return NoopTracer{} }
+
+// Context implements ddtrace.Span.
+func (NoopSpan) Context() ddtrace.SpanContext { return NoopSpanContext{} }
+
+var _ ddtrace.SpanContext = (*NoopSpanContext)(nil)
+
+// NoopSpanContext is an implementation of ddtrace.SpanContext that is a no-op.
+type NoopSpanContext struct{}
+
+// SpanID implements ddtrace.SpanContext.
+func (NoopSpanContext) SpanID() uint64 { return 0 }
+
+// TraceID implements ddtrace.SpanContext.
+func (NoopSpanContext) TraceID() uint64 { return 0 }
+
+// ForeachBaggageItem implements ddtrace.SpanContext.
+func (NoopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/example_test.go
new file mode 100644
index 00000000..76b642ea
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/example_test.go
@@ -0,0 +1,21 @@
+package mocktracer_test
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+)
+
+func Example() {
+ // Start the mock tracer.
+ mt := mocktracer.Start()
+ defer mt.Stop()
+
+ // ...run some code with generates spans.
+
+ // Query the mock tracer for finished spans.
+ spans := mt.FinishedSpans()
+ if len(spans) != 1 {
+ // should only have 1 span
+ }
+
+ // Run assertions...
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspan.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspan.go
new file mode 100644
index 00000000..1e760bb8
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspan.go
@@ -0,0 +1,214 @@
+package mocktracer // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+var _ ddtrace.Span = (*mockspan)(nil)
+var _ Span = (*mockspan)(nil)
+
+// Span is an interface that allows querying a span returned by the mock tracer.
+type Span interface {
+ // SpanID returns the span's ID.
+ SpanID() uint64
+
+ // TraceID returns the span's trace ID.
+ TraceID() uint64
+
+ // ParentID returns the span's parent ID.
+ ParentID() uint64
+
+ // StartTime returns the time when the span has started.
+ StartTime() time.Time
+
+ // FinishTime returns the time when the span has finished.
+ FinishTime() time.Time
+
+ // OperationName returns the operation name held by this span.
+ OperationName() string
+
+ // Tag returns the value of the tag at key k.
+ Tag(k string) interface{}
+
+ // Tags returns a copy of all the tags in this span.
+ Tags() map[string]interface{}
+
+ // Context returns the span's SpanContext.
+ Context() ddtrace.SpanContext
+
+ // Stringer allows pretty-printing the span's fields for debugging.
+ fmt.Stringer
+}
+
+func newSpan(t *mocktracer, operationName string, cfg *ddtrace.StartSpanConfig) *mockspan {
+ if cfg.Tags == nil {
+ cfg.Tags = make(map[string]interface{})
+ }
+ if cfg.Tags[ext.ResourceName] == nil {
+ cfg.Tags[ext.ResourceName] = operationName
+ }
+ s := &mockspan{
+ name: operationName,
+ tracer: t,
+ }
+ if cfg.StartTime.IsZero() {
+ s.startTime = time.Now()
+ } else {
+ s.startTime = cfg.StartTime
+ }
+ id := nextID()
+ s.context = &spanContext{spanID: id, traceID: id, span: s}
+ if ctx, ok := cfg.Parent.(*spanContext); ok {
+ if ctx.span != nil && s.tags[ext.ServiceName] == nil {
+ // if we have a local parent and no service, inherit the parent's
+ s.SetTag(ext.ServiceName, ctx.span.Tag(ext.ServiceName))
+ }
+ if ctx.hasSamplingPriority() {
+ s.SetTag(ext.SamplingPriority, ctx.samplingPriority())
+ }
+ s.parentID = ctx.spanID
+ s.context.priority = ctx.samplingPriority()
+ s.context.hasPriority = ctx.hasSamplingPriority()
+ s.context.traceID = ctx.traceID
+ s.context.baggage = make(map[string]string, len(ctx.baggage))
+ ctx.ForeachBaggageItem(func(k, v string) bool {
+ s.context.baggage[k] = v
+ return true
+ })
+ }
+ for k, v := range cfg.Tags {
+ s.SetTag(k, v)
+ }
+ return s
+}
+
+type mockspan struct {
+ sync.RWMutex // guards below fields
+ name string
+ tags map[string]interface{}
+ finishTime time.Time
+
+ startTime time.Time
+ parentID uint64
+ context *spanContext
+ tracer *mocktracer
+}
+
+// SetTag sets a given tag on the span.
+func (s *mockspan) SetTag(key string, value interface{}) {
+ s.Lock()
+ defer s.Unlock()
+ if s.tags == nil {
+ s.tags = make(map[string]interface{}, 1)
+ }
+ if key == ext.SamplingPriority {
+ switch p := value.(type) {
+ case int:
+ s.context.setSamplingPriority(p)
+ case float64:
+ s.context.setSamplingPriority(int(p))
+ }
+ }
+ s.tags[key] = value
+}
+
+func (s *mockspan) FinishTime() time.Time {
+ s.RLock()
+ defer s.RUnlock()
+ return s.finishTime
+}
+
+func (s *mockspan) StartTime() time.Time { return s.startTime }
+
+func (s *mockspan) Tag(k string) interface{} {
+ s.RLock()
+ defer s.RUnlock()
+ return s.tags[k]
+}
+
+func (s *mockspan) Tags() map[string]interface{} {
+ s.RLock()
+ defer s.RUnlock()
+ // copy
+ cp := make(map[string]interface{}, len(s.tags))
+ for k, v := range s.tags {
+ cp[k] = v
+ }
+ return cp
+}
+
+func (s *mockspan) TraceID() uint64 { return s.context.traceID }
+
+func (s *mockspan) SpanID() uint64 { return s.context.spanID }
+
+func (s *mockspan) ParentID() uint64 { return s.parentID }
+
+func (s *mockspan) OperationName() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.name
+}
+
+// SetOperationName resets the original operation name to the given one.
+func (s *mockspan) SetOperationName(operationName string) {
+ s.Lock()
+ defer s.Unlock()
+ s.name = operationName
+ return
+}
+
+// BaggageItem returns the baggage item with the given key.
+func (s *mockspan) BaggageItem(key string) string {
+ return s.context.baggageItem(key)
+}
+
+// SetBaggageItem sets a new baggage item at the given key. The baggage
+// item should propagate to all descendant spans, both in- and cross-process.
+func (s *mockspan) SetBaggageItem(key, val string) {
+ s.context.setBaggageItem(key, val)
+ return
+}
+
+// Finish finishes the current span with the given options.
+func (s *mockspan) Finish(opts ...ddtrace.FinishOption) {
+ var cfg ddtrace.FinishConfig
+ for _, fn := range opts {
+ fn(&cfg)
+ }
+ var t time.Time
+ if cfg.FinishTime.IsZero() {
+ t = time.Now()
+ } else {
+ t = cfg.FinishTime
+ }
+ if cfg.Error != nil {
+ s.SetTag(ext.Error, cfg.Error)
+ }
+ s.Lock()
+ s.finishTime = t
+ s.Unlock()
+ s.tracer.addFinishedSpan(s)
+}
+
+// String implements fmt.Stringer.
+func (s *mockspan) String() string {
+ sc := s.context
+ return fmt.Sprintf(`
+name: %s
+tags: %#v
+start: %s
+finish: %s
+id: %d
+parent: %d
+trace: %d
+baggage: %#v
+`, s.name, s.tags, s.startTime, s.finishTime, sc.spanID, s.parentID, sc.traceID, sc.baggage)
+}
+
+// Context returns the SpanContext of this Span.
+func (s *mockspan) Context() ddtrace.SpanContext { return s.context }
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspan_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspan_test.go
new file mode 100644
index 00000000..e675d284
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspan_test.go
@@ -0,0 +1,173 @@
+package mocktracer
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// basicSpan returns a span with no configuration, having the set operation name.
+func basicSpan(operationName string) *mockspan {
+ return newSpan(&mocktracer{}, operationName, &ddtrace.StartSpanConfig{})
+}
+
+func TestNewSpan(t *testing.T) {
+ t.Run("basic", func(t *testing.T) {
+ s := basicSpan("http.request")
+
+ assert := assert.New(t)
+ assert.Equal("http.request", s.name)
+ assert.False(s.startTime.IsZero())
+ assert.Zero(s.parentID)
+ assert.NotNil(s.context)
+ assert.NotZero(s.context.spanID)
+ assert.Equal(s.context.spanID, s.context.traceID)
+ })
+
+ t.Run("options", func(t *testing.T) {
+ tr := new(mocktracer)
+ startTime := time.Now()
+ tags := map[string]interface{}{"k": "v", "k1": "v1"}
+ opts := &ddtrace.StartSpanConfig{
+ StartTime: startTime,
+ Tags: tags,
+ }
+ s := newSpan(tr, "http.request", opts)
+
+ assert := assert.New(t)
+ assert.Equal(tr, s.tracer)
+ assert.Equal("http.request", s.name)
+ assert.Equal(startTime, s.startTime)
+ assert.Equal(tags, s.tags)
+ })
+
+ t.Run("parent", func(t *testing.T) {
+ baggage := map[string]string{"A": "B", "C": "D"}
+ parentctx := &spanContext{spanID: 1, traceID: 2, baggage: baggage}
+ opts := &ddtrace.StartSpanConfig{Parent: parentctx}
+ s := newSpan(&mocktracer{}, "http.request", opts)
+
+ assert := assert.New(t)
+ assert.NotNil(s.context)
+ assert.Equal(uint64(1), s.parentID)
+ assert.Equal(uint64(2), s.context.traceID)
+ assert.Equal(baggage, s.context.baggage)
+ })
+}
+
+func TestSpanSetTag(t *testing.T) {
+ s := basicSpan("http.request")
+ s.SetTag("a", "b")
+ s.SetTag("c", "d")
+
+ assert := assert.New(t)
+ assert.Len(s.Tags(), 3)
+ assert.Equal("http.request", s.Tag(ext.ResourceName))
+ assert.Equal("b", s.Tag("a"))
+ assert.Equal("d", s.Tag("c"))
+}
+
+func TestSpanSetTagPriority(t *testing.T) {
+ assert := assert.New(t)
+ s := basicSpan("http.request")
+ assert.False(s.context.hasSamplingPriority())
+ s.SetTag(ext.SamplingPriority, -1)
+ assert.True(s.context.hasSamplingPriority())
+ assert.Equal(-1, s.context.samplingPriority())
+}
+
+func TestSpanTagImmutability(t *testing.T) {
+ s := basicSpan("http.request")
+ s.SetTag("a", "b")
+ tags := s.Tags()
+ tags["a"] = 123
+ tags["b"] = 456
+
+ assert := assert.New(t)
+ assert.Equal("b", s.tags["a"])
+ assert.Zero(s.tags["b"])
+}
+
+func TestSpanStartTime(t *testing.T) {
+ startTime := time.Now()
+ s := newSpan(&mocktracer{}, "http.request", &ddtrace.StartSpanConfig{StartTime: startTime})
+
+ assert := assert.New(t)
+ assert.Equal(startTime, s.startTime)
+ assert.Equal(startTime, s.StartTime())
+}
+
+func TestSpanFinishTime(t *testing.T) {
+ s := basicSpan("http.request")
+ finishTime := time.Now()
+ s.Finish(tracer.FinishTime(finishTime))
+
+ assert := assert.New(t)
+ assert.Equal(finishTime, s.finishTime)
+ assert.Equal(finishTime, s.FinishTime())
+}
+
+func TestSpanOperationName(t *testing.T) {
+ t.Run("default", func(t *testing.T) {
+ s := basicSpan("http.request")
+ assert.Equal(t, "http.request", s.name)
+ assert.Equal(t, "http.request", s.OperationName())
+ })
+
+ t.Run("default", func(t *testing.T) {
+ s := basicSpan("http.request")
+ s.SetOperationName("db.query")
+ assert.Equal(t, "db.query", s.name)
+ assert.Equal(t, "db.query", s.OperationName())
+ })
+}
+
+func TestSpanBaggageFunctions(t *testing.T) {
+ t.Run("SetBaggageItem", func(t *testing.T) {
+ s := basicSpan("http.request")
+ s.SetBaggageItem("a", "b")
+ assert.Equal(t, "b", s.context.baggage["a"])
+ })
+
+ t.Run("BaggageItem", func(t *testing.T) {
+ s := basicSpan("http.request")
+ s.SetBaggageItem("a", "b")
+ assert.Equal(t, "b", s.BaggageItem("a"))
+ })
+}
+
+func TestSpanContext(t *testing.T) {
+ t.Run("Context", func(t *testing.T) {
+ s := basicSpan("http.request")
+ assert.Equal(t, s.context, s.Context())
+ })
+
+ t.Run("IDs", func(t *testing.T) {
+ parent := basicSpan("http.request")
+ child := newSpan(&mocktracer{}, "db.query", &ddtrace.StartSpanConfig{
+ Parent: parent.Context(),
+ })
+
+ assert := assert.New(t)
+ assert.Equal(parent.SpanID(), child.ParentID())
+ assert.Equal(parent.TraceID(), child.TraceID())
+ assert.NotZero(child.SpanID())
+ })
+}
+
+func TestSpanFinish(t *testing.T) {
+ s := basicSpan("http.request")
+ want := errors.New("some error")
+ s.Finish(tracer.WithError(want))
+
+ assert := assert.New(t)
+ assert.False(s.FinishTime().IsZero())
+ assert.True(s.FinishTime().Before(time.Now()))
+ assert.Equal(want, s.Tag(ext.Error))
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspancontext.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspancontext.go
new file mode 100644
index 00000000..49246a09
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspancontext.go
@@ -0,0 +1,73 @@
+package mocktracer
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+var _ ddtrace.SpanContext = (*spanContext)(nil)
+
+type spanContext struct {
+ sync.RWMutex // guards below fields
+ baggage map[string]string
+ priority int
+ hasPriority bool
+
+ spanID uint64
+ traceID uint64
+ span *mockspan // context owner
+}
+
+func (sc *spanContext) TraceID() uint64 { return sc.traceID }
+
+func (sc *spanContext) SpanID() uint64 { return sc.spanID }
+
+func (sc *spanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+ sc.RLock()
+ defer sc.RUnlock()
+ for k, v := range sc.baggage {
+ if !handler(k, v) {
+ break
+ }
+ }
+}
+
+func (sc *spanContext) setBaggageItem(k, v string) {
+ sc.Lock()
+ defer sc.Unlock()
+ if sc.baggage == nil {
+ sc.baggage = make(map[string]string, 1)
+ }
+ sc.baggage[k] = v
+}
+
+func (sc *spanContext) baggageItem(k string) string {
+ sc.RLock()
+ defer sc.RUnlock()
+ return sc.baggage[k]
+}
+
+func (sc *spanContext) setSamplingPriority(p int) {
+ sc.Lock()
+ defer sc.Unlock()
+ sc.priority = p
+ sc.hasPriority = true
+}
+
+func (sc *spanContext) hasSamplingPriority() bool {
+ sc.RLock()
+ defer sc.RUnlock()
+ return sc.hasPriority
+}
+
+func (sc *spanContext) samplingPriority() int {
+ sc.RLock()
+ defer sc.RUnlock()
+ return sc.priority
+}
+
+var mockIDSource uint64 = 123
+
+func nextID() uint64 { return atomic.AddUint64(&mockIDSource, 1) }
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspancontext_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspancontext_test.go
new file mode 100644
index 00000000..98ceca19
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mockspancontext_test.go
@@ -0,0 +1,61 @@
+package mocktracer
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMockIDs(t *testing.T) {
+ last := nextID()
+ for i := 0; i < 10; i++ {
+ // ensure incremental (unique) IDs
+ next := nextID()
+ if next <= last {
+ t.Fail()
+ }
+ last = next
+ }
+}
+
+func TestSpanContextSetBaggage(t *testing.T) {
+ var sc spanContext
+ sc.setBaggageItem("a", "b")
+ sc.setBaggageItem("c", "d")
+ assert.Equal(t, sc.baggage["a"], "b")
+ assert.Equal(t, sc.baggage["c"], "d")
+}
+
+func TestSpanContextGetBaggage(t *testing.T) {
+ var sc spanContext
+ sc.setBaggageItem("a", "b")
+ sc.setBaggageItem("c", "d")
+ assert.Equal(t, sc.baggageItem("a"), "b")
+ assert.Equal(t, sc.baggageItem("c"), "d")
+}
+
+func TestSpanContextIterator(t *testing.T) {
+ var sc spanContext
+ sc.setBaggageItem("a", "b")
+ sc.setBaggageItem("c", "d")
+
+ t.Run("some", func(t *testing.T) {
+ var seen int
+ sc.ForeachBaggageItem(func(k, v string) bool {
+ seen++
+ return false
+ })
+ assert.Equal(t, seen, 1)
+ })
+
+ t.Run("all", func(t *testing.T) {
+ seen := make(map[string]interface{}, 2)
+ sc.ForeachBaggageItem(func(k, v string) bool {
+ seen[k] = v
+ return true
+ })
+ assert.Len(t, seen, 2)
+ assert.Equal(t, seen["a"], "b")
+ assert.Equal(t, seen["c"], "d")
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mocktracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mocktracer.go
new file mode 100644
index 00000000..6d1349c6
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mocktracer.go
@@ -0,0 +1,159 @@
+// Package mocktracer provides a mock implementation of the tracer used in testing. It
+// allows querying spans generated at runtime, without having them actually be sent to
+// an agent. It provides a simple way to test that instrumentation is running correctly
+// in your application.
+//
+// Simply call "Start" at the beginning of your tests to start and obtain an instance
+// of the mock tracer.
+package mocktracer
+
+import (
+ "strconv"
+ "strings"
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+var _ ddtrace.Tracer = (*mocktracer)(nil)
+var _ Tracer = (*mocktracer)(nil)
+
+// Tracer exposes an interface for querying the currently running mock tracer.
+type Tracer interface {
+ // FinishedSpans returns the set of finished spans.
+ FinishedSpans() []Span
+
+ // Reset resets the spans and services recorded in the tracer. This is
+ // especially useful when running tests in a loop, where a clean start
+ // is desired for FinishedSpans calls.
+ Reset()
+
+ // Stop deactivates the mock tracer and allows a normal tracer to take over.
+ // It should always be called when testing has finished.
+ Stop()
+}
+
+// Start sets the internal tracer to a mock and returns an interface
+// which allows querying it. Call Start at the beginning of your tests
+// to activate the mock tracer. When your test runs, use the returned
+// interface to query the tracer's state.
+func Start() Tracer {
+ var t mocktracer
+ internal.SetGlobalTracer(&t)
+ internal.Testing = true
+ return &t
+}
+
+type mocktracer struct {
+ sync.RWMutex // guards below spans
+ finishedSpans []Span
+}
+
+// Stop deactivates the mock tracer and sets the active tracer to a no-op.
+func (*mocktracer) Stop() {
+ internal.SetGlobalTracer(&internal.NoopTracer{})
+ internal.Testing = false
+}
+
+func (t *mocktracer) StartSpan(operationName string, opts ...ddtrace.StartSpanOption) ddtrace.Span {
+ var cfg ddtrace.StartSpanConfig
+ for _, fn := range opts {
+ fn(&cfg)
+ }
+ return newSpan(t, operationName, &cfg)
+}
+
+func (t *mocktracer) FinishedSpans() []Span {
+ t.RLock()
+ defer t.RUnlock()
+ return t.finishedSpans
+}
+
+func (t *mocktracer) Reset() {
+ t.Lock()
+ defer t.Unlock()
+ t.finishedSpans = nil
+}
+
+func (t *mocktracer) addFinishedSpan(s Span) {
+ t.Lock()
+ defer t.Unlock()
+ if t.finishedSpans == nil {
+ t.finishedSpans = make([]Span, 0, 1)
+ }
+ t.finishedSpans = append(t.finishedSpans, s)
+}
+
+const (
+ traceHeader = tracer.DefaultTraceIDHeader
+ spanHeader = tracer.DefaultParentIDHeader
+ priorityHeader = tracer.DefaultPriorityHeader
+ baggagePrefix = tracer.DefaultBaggageHeaderPrefix
+)
+
+func (t *mocktracer) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+ reader, ok := carrier.(tracer.TextMapReader)
+ if !ok {
+ return nil, tracer.ErrInvalidCarrier
+ }
+ var sc spanContext
+ err := reader.ForeachKey(func(key, v string) error {
+ k := strings.ToLower(key)
+ if k == traceHeader {
+ id, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return tracer.ErrSpanContextCorrupted
+ }
+ sc.traceID = id
+ }
+ if k == spanHeader {
+ id, err := strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return tracer.ErrSpanContextCorrupted
+ }
+ sc.spanID = id
+ }
+ if k == priorityHeader {
+ p, err := strconv.Atoi(v)
+ if err != nil {
+ return tracer.ErrSpanContextCorrupted
+ }
+ sc.priority = p
+ sc.hasPriority = true
+ }
+ if strings.HasPrefix(k, baggagePrefix) {
+ sc.setBaggageItem(strings.TrimPrefix(k, baggagePrefix), v)
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ if sc.traceID == 0 || sc.spanID == 0 {
+ return nil, tracer.ErrSpanContextNotFound
+ }
+ return &sc, err
+}
+
+func (t *mocktracer) Inject(context ddtrace.SpanContext, carrier interface{}) error {
+ writer, ok := carrier.(tracer.TextMapWriter)
+ if !ok {
+ return tracer.ErrInvalidCarrier
+ }
+ ctx, ok := context.(*spanContext)
+ if !ok || ctx.traceID == 0 || ctx.spanID == 0 {
+ return tracer.ErrInvalidSpanContext
+ }
+ writer.Set(traceHeader, strconv.FormatUint(ctx.traceID, 10))
+ writer.Set(spanHeader, strconv.FormatUint(ctx.spanID, 10))
+ if ctx.hasSamplingPriority() {
+ writer.Set(priorityHeader, strconv.Itoa(ctx.priority))
+ }
+ ctx.ForeachBaggageItem(func(k, v string) bool {
+ writer.Set(baggagePrefix+k, v)
+ return true
+ })
+ return nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mocktracer_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mocktracer_test.go
new file mode 100644
index 00000000..7c6a654f
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer/mocktracer_test.go
@@ -0,0 +1,238 @@
+package mocktracer
+
+import (
+ "testing"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStart(t *testing.T) {
+ trc := Start()
+ if tt, ok := internal.GetGlobalTracer().(Tracer); !ok || tt != trc {
+ t.Fail()
+ }
+}
+
+func TestTracerStop(t *testing.T) {
+ Start().Stop()
+ if _, ok := internal.GetGlobalTracer().(*internal.NoopTracer); !ok {
+ t.Fail()
+ }
+}
+
+func TestTracerStartSpan(t *testing.T) {
+ parentTags := map[string]interface{}{ext.ServiceName: "root-service", ext.SamplingPriority: -1}
+ startTime := time.Now()
+
+ t.Run("with-service", func(t *testing.T) {
+ var mt mocktracer
+ parent := newSpan(&mt, "http.request", &ddtrace.StartSpanConfig{Tags: parentTags})
+ s, ok := mt.StartSpan(
+ "db.query",
+ tracer.ServiceName("my-service"),
+ tracer.StartTime(startTime),
+ tracer.ChildOf(parent.Context()),
+ ).(*mockspan)
+
+ assert := assert.New(t)
+ assert.True(ok)
+ assert.Equal("db.query", s.OperationName())
+ assert.Equal(startTime, s.StartTime())
+ assert.Equal("my-service", s.Tag(ext.ServiceName))
+ assert.Equal(parent.SpanID(), s.ParentID())
+ assert.Equal(parent.TraceID(), s.TraceID())
+ assert.True(parent.context.hasSamplingPriority())
+ assert.Equal(-1, parent.context.samplingPriority())
+ })
+
+ t.Run("inherit", func(t *testing.T) {
+ var mt mocktracer
+ parent := newSpan(&mt, "http.request", &ddtrace.StartSpanConfig{Tags: parentTags})
+ s, ok := mt.StartSpan("db.query", tracer.ChildOf(parent.Context())).(*mockspan)
+
+ assert := assert.New(t)
+ assert.True(ok)
+ assert.Equal("db.query", s.OperationName())
+ assert.Equal("root-service", s.Tag(ext.ServiceName))
+ assert.Equal(parent.SpanID(), s.ParentID())
+ assert.Equal(parent.TraceID(), s.TraceID())
+ assert.True(s.context.hasSamplingPriority())
+ assert.Equal(-1, s.context.samplingPriority())
+ })
+}
+
+func TestTracerFinishedSpans(t *testing.T) {
+ var mt mocktracer
+ parent := newSpan(&mt, "http.request", &ddtrace.StartSpanConfig{})
+ child := mt.StartSpan("db.query", tracer.ChildOf(parent.Context()))
+ child.Finish()
+ parent.Finish()
+ found := 0
+ for _, s := range mt.FinishedSpans() {
+ switch s.OperationName() {
+ case "http.request":
+ assert.Equal(t, parent, s)
+ found++
+ case "db.query":
+ assert.Equal(t, child, s)
+ found++
+ }
+ }
+ assert.Equal(t, 2, found)
+}
+
+func TestTracerReset(t *testing.T) {
+ var mt mocktracer
+ mt.StartSpan("db.query").Finish()
+
+ assert := assert.New(t)
+ assert.Len(mt.finishedSpans, 1)
+
+ mt.Reset()
+
+ assert.Nil(mt.finishedSpans)
+}
+
+func TestTracerInject(t *testing.T) {
+ t.Run("errors", func(t *testing.T) {
+ var mt mocktracer
+ assert := assert.New(t)
+
+ err := mt.Inject(&spanContext{}, 2)
+ assert.Equal(tracer.ErrInvalidCarrier, err) // 2 is not a carrier
+
+ err = mt.Inject(&spanContext{}, tracer.TextMapCarrier(map[string]string{}))
+ assert.Equal(tracer.ErrInvalidSpanContext, err) // no traceID and spanID
+
+ err = mt.Inject(&spanContext{traceID: 2}, tracer.TextMapCarrier(map[string]string{}))
+ assert.Equal(tracer.ErrInvalidSpanContext, err) // no spanID
+
+ err = mt.Inject(&spanContext{traceID: 2, spanID: 1}, tracer.TextMapCarrier(map[string]string{}))
+ assert.Nil(err) // ok
+ })
+
+ t.Run("ok", func(t *testing.T) {
+ sctx := &spanContext{
+ traceID: 1,
+ spanID: 2,
+ priority: -1,
+ hasPriority: true,
+ baggage: map[string]string{"A": "B", "C": "D"},
+ }
+ carrier := make(map[string]string)
+ err := (&mocktracer{}).Inject(sctx, tracer.TextMapCarrier(carrier))
+
+ assert := assert.New(t)
+ assert.Nil(err)
+ assert.Equal("1", carrier[traceHeader])
+ assert.Equal("2", carrier[spanHeader])
+ assert.Equal("-1", carrier[priorityHeader])
+ assert.Equal("B", carrier[baggagePrefix+"A"])
+ assert.Equal("D", carrier[baggagePrefix+"C"])
+ })
+}
+
+func TestTracerExtract(t *testing.T) {
+ // carry creates a tracer.TextMapCarrier containing the given sequence
+ // of key/value pairs.
+ carry := func(kv ...string) tracer.TextMapCarrier {
+ var k string
+ m := make(map[string]string)
+ if n := len(kv); n%2 == 0 && n >= 2 {
+ for i, v := range kv {
+ if (i+1)%2 == 0 {
+ m[k] = v
+ } else {
+ k = v
+ }
+ }
+ }
+ return tracer.TextMapCarrier(m)
+ }
+
+ // tests carry helper function.
+ t.Run("carry", func(t *testing.T) {
+ for _, tt := range []struct {
+ in []string
+ out tracer.TextMapCarrier
+ }{
+ {in: []string{}, out: map[string]string{}},
+ {in: []string{"A"}, out: map[string]string{}},
+ {in: []string{"A", "B", "C"}, out: map[string]string{}},
+ {in: []string{"A", "B"}, out: map[string]string{"A": "B"}},
+ {in: []string{"A", "B", "C", "D"}, out: map[string]string{"A": "B", "C": "D"}},
+ } {
+ assert.Equal(t, tt.out, carry(tt.in...))
+ }
+ })
+
+ var mt mocktracer
+
+ // tests error return values.
+ t.Run("errors", func(t *testing.T) {
+ assert := assert.New(t)
+
+ _, err := mt.Extract(2)
+ assert.Equal(tracer.ErrInvalidCarrier, err)
+
+ _, err = mt.Extract(carry(traceHeader, "a"))
+ assert.Equal(tracer.ErrSpanContextCorrupted, err)
+
+ _, err = mt.Extract(carry(spanHeader, "a", traceHeader, "2", baggagePrefix+"x", "y"))
+ assert.Equal(tracer.ErrSpanContextCorrupted, err)
+
+ _, err = mt.Extract(carry(spanHeader, "1"))
+ assert.Equal(tracer.ErrSpanContextNotFound, err)
+
+ _, err = mt.Extract(carry())
+ assert.Equal(tracer.ErrSpanContextNotFound, err)
+ })
+
+ t.Run("ok", func(t *testing.T) {
+ assert := assert.New(t)
+
+ ctx, err := mt.Extract(carry(traceHeader, "1", spanHeader, "2"))
+ assert.Nil(err)
+ sc, ok := ctx.(*spanContext)
+ assert.True(ok)
+ assert.Equal(uint64(1), sc.traceID)
+ assert.Equal(uint64(2), sc.spanID)
+
+ ctx, err = mt.Extract(carry(traceHeader, "1", spanHeader, "2", baggagePrefix+"A", "B", baggagePrefix+"C", "D"))
+ assert.Nil(err)
+ sc, ok = ctx.(*spanContext)
+ assert.True(ok)
+ assert.Equal("B", sc.baggageItem("a"))
+ assert.Equal("D", sc.baggageItem("c"))
+
+ ctx, err = mt.Extract(carry(traceHeader, "1", spanHeader, "2", priorityHeader, "-1"))
+ assert.Nil(err)
+ sc, ok = ctx.(*spanContext)
+ assert.True(ok)
+ assert.True(sc.hasSamplingPriority())
+ assert.Equal(-1, sc.samplingPriority())
+ })
+
+ t.Run("consistency", func(t *testing.T) {
+ assert := assert.New(t)
+ want := &spanContext{traceID: 1, spanID: 2, baggage: map[string]string{"a": "B", "C": "D"}}
+ mc := tracer.TextMapCarrier(make(map[string]string))
+ err := mt.Inject(want, mc)
+ assert.Nil(err)
+ sc, err := mt.Extract(mc)
+ assert.Nil(err)
+ got, ok := sc.(*spanContext)
+ assert.True(ok)
+
+ assert.Equal(uint64(1), got.traceID)
+ assert.Equal(uint64(2), got.spanID)
+ assert.Equal("D", got.baggageItem("c"))
+ assert.Equal("B", got.baggageItem("a"))
+ })
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/example_test.go
new file mode 100644
index 00000000..5df6f5b2
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/example_test.go
@@ -0,0 +1,18 @@
+package opentracer_test
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+)
+
+func Example() {
+ // Start a Datadog tracer, optionally providing a set of options,
+ // returning an opentracing.Tracer which wraps it.
+ t := opentracer.New(tracer.WithAgentAddr("host:port"))
+
+ // Use it with the Opentracing API. The (already started) Datadog tracer
+ // may be used in parallel with the Opentracing API if desired.
+ opentracing.SetGlobalTracer(t)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/option.go
new file mode 100644
index 00000000..cbae4518
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/option.go
@@ -0,0 +1,24 @@
+package opentracer // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer"
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// ServiceName can be used with opentracing.StartSpan to set the
+// service name of a span.
+func ServiceName(name string) opentracing.StartSpanOption {
+ return opentracing.Tag{Key: ext.ServiceName, Value: name}
+}
+
+// ResourceName can be used with opentracing.StartSpan to set the
+// resource name of a span.
+func ResourceName(name string) opentracing.StartSpanOption {
+ return opentracing.Tag{Key: ext.ResourceName, Value: name}
+}
+
+// SpanType can be used with opentracing.StartSpan to set the type of a span.
+func SpanType(name string) opentracing.StartSpanOption {
+ return opentracing.Tag{Key: ext.SpanType, Value: name}
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/span.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/span.go
new file mode 100644
index 00000000..1e02764e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/span.go
@@ -0,0 +1,83 @@
+package opentracer
+
+import (
+ "fmt"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ opentracing "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+var _ opentracing.Span = (*span)(nil)
+
+// span implements opentracing.Span on top of ddtrace.Span.
+type span struct {
+ ddtrace.Span
+ *opentracer
+}
+
+func (s *span) Context() opentracing.SpanContext { return s.Span.Context() }
+func (s *span) Finish() { s.Span.Finish() }
+func (s *span) Tracer() opentracing.Tracer { return s.opentracer }
+func (s *span) LogEvent(event string) { /* deprecated */ }
+func (s *span) LogEventWithPayload(event string, payload interface{}) { /* deprecated */ }
+func (s *span) Log(data opentracing.LogData) { /* deprecated */ }
+
+func (s *span) FinishWithOptions(opts opentracing.FinishOptions) {
+ for _, lr := range opts.LogRecords {
+ if len(lr.Fields) > 0 {
+ s.LogFields(lr.Fields...)
+ }
+ }
+ s.Span.Finish(tracer.FinishTime(opts.FinishTime))
+}
+
+func (s *span) LogFields(fields ...log.Field) {
+ // catch standard opentracing keys and adjust to internal ones as per spec:
+ // https://github.com/opentracing/specification/blob/master/semantic_conventions.md#log-fields-table
+ for _, f := range fields {
+ switch f.Key() {
+ case "event":
+ if v, ok := f.Value().(string); ok && v == "error" {
+ s.SetTag("error", true)
+ }
+ case "error", "error.object":
+ if err, ok := f.Value().(error); ok {
+ s.SetTag("error", err)
+ }
+ case "message":
+ s.SetTag(ext.ErrorMsg, fmt.Sprint(f.Value()))
+ case "stack":
+ s.SetTag(ext.ErrorStack, fmt.Sprint(f.Value()))
+ default:
+ // not implemented
+ }
+ }
+}
+
+func (s *span) LogKV(keyVals ...interface{}) {
+ fields, err := log.InterleavedKVToFields(keyVals...)
+ if err != nil {
+ // TODO(gbbr): create a log package
+ return
+ }
+ s.LogFields(fields...)
+}
+
+func (s *span) SetBaggageItem(key, val string) opentracing.Span {
+ s.Span.SetBaggageItem(key, val)
+ return s
+}
+
+func (s *span) SetOperationName(operationName string) opentracing.Span {
+ s.Span.SetOperationName(operationName)
+ return s
+}
+
+func (s *span) SetTag(key string, value interface{}) opentracing.Span {
+ s.Span.SetTag(key, value)
+ return s
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer.go
new file mode 100644
index 00000000..df730280
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer.go
@@ -0,0 +1,81 @@
+// Package opentracer provides a wrapper on top of the Datadog tracer that can be used with Opentracing.
+// It also provides a set of opentracing.StartSpanOption that are specific to Datadog's APM product.
+// To use it, simply call "New".
+//
+// Note that there are currently some small incompatibilities between the Opentracing spec and the Datadog
+// APM product, which we are in the process of addressing on the long term. When using Datadog, the
+// Opentracing operation name is what is called resource in Datadog's terms and the Opentracing "component"
+// tag is Datadog's operation name. Meaning that in order to define (in Opentracing terms) a span that
+// has the operation name "/user/profile" and the component "http.request", one would do:
+// opentracing.StartSpan("http.request", opentracer.ResourceName("/user/profile"))
+//
+// Some libraries and frameworks are supported out-of-the-box by using our integrations. You can see a list
+// of supported integrations here: https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib. They are fully
+// compatible with the Opentracing implementation.
+package opentracer
+
+import (
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
+
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// New creates, instantiates and returns an Opentracing compatible version of the
+// Datadog tracer using the provided set of options.
+func New(opts ...tracer.StartOption) opentracing.Tracer {
+ tracer.Start(opts...)
+ return &opentracer{internal.GetGlobalTracer()}
+}
+
+var _ opentracing.Tracer = (*opentracer)(nil)
+
+// opentracer implements opentracing.Tracer on top of ddtrace.Tracer.
+type opentracer struct{ ddtrace.Tracer }
+
+// StartSpan implements opentracing.Tracer.
+func (t *opentracer) StartSpan(operationName string, options ...opentracing.StartSpanOption) opentracing.Span {
+ var sso opentracing.StartSpanOptions
+ for _, o := range options {
+ o.Apply(&sso)
+ }
+ opts := []ddtrace.StartSpanOption{tracer.StartTime(sso.StartTime)}
+ for _, ref := range sso.References {
+ if v, ok := ref.ReferencedContext.(ddtrace.SpanContext); ok && ref.Type == opentracing.ChildOfRef {
+ opts = append(opts, tracer.ChildOf(v))
+ break // can only have one parent
+ }
+ }
+ for k, v := range sso.Tags {
+ opts = append(opts, tracer.Tag(k, v))
+ }
+ return &span{
+ Span: t.Tracer.StartSpan(operationName, opts...),
+ opentracer: t,
+ }
+}
+
+// Inject implements opentracing.Tracer.
+func (t *opentracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
+ sctx, ok := ctx.(ddtrace.SpanContext)
+ if !ok {
+ return opentracing.ErrUnsupportedFormat
+ }
+ switch format {
+ case opentracing.TextMap, opentracing.HTTPHeaders:
+ return t.Tracer.Inject(sctx, carrier)
+ default:
+ return opentracing.ErrUnsupportedFormat
+ }
+}
+
+// Extract implements opentracing.Tracer.
+func (t *opentracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+ switch format {
+ case opentracing.TextMap, opentracing.HTTPHeaders:
+ return t.Tracer.Extract(carrier)
+ default:
+ return nil, opentracing.ErrUnsupportedFormat
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer_test.go
new file mode 100644
index 00000000..e87e236b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer/tracer_test.go
@@ -0,0 +1,20 @@
+package opentracer
+
+import (
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStart(t *testing.T) {
+ assert := assert.New(t)
+ ot := New()
+ dd, ok := internal.GetGlobalTracer().(ddtrace.Tracer)
+ assert.True(ok)
+ ott, ok := ot.(*opentracer)
+ assert.True(ok)
+ assert.Equal(ott.Tracer, dd)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go
new file mode 100644
index 00000000..b95b1483
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context.go
@@ -0,0 +1,42 @@
+package tracer
+
+import (
+ "context"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+)
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a copy of the given context which includes the span s.
+func ContextWithSpan(ctx context.Context, s Span) context.Context {
+ return context.WithValue(ctx, activeSpanKey, s)
+}
+
+// SpanFromContext returns the span contained in the given context. A second return
+// value indicates if a span was found in the context. If no span is found, a no-op
+// span is returned.
+func SpanFromContext(ctx context.Context) (Span, bool) {
+ if ctx == nil {
+ return &internal.NoopSpan{}, false
+ }
+ v := ctx.Value(activeSpanKey)
+ if s, ok := v.(ddtrace.Span); ok {
+ return s, true
+ }
+ return &internal.NoopSpan{}, false
+}
+
+// StartSpanFromContext returns a new span with the given operation name and options. If a span
+// is found in the context, it will be used as the parent of the resulting span. If the ChildOf
+// option is passed, the span from context will take precedence over it as the parent span.
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ if s, ok := SpanFromContext(ctx); ok {
+ opts = append(opts, ChildOf(s.Context()))
+ }
+ s := StartSpan(operationName, opts...)
+ return s, ContextWithSpan(ctx, s)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context_test.go
new file mode 100644
index 00000000..1f41ee3b
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/context_test.go
@@ -0,0 +1,71 @@
+package tracer
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+)
+
+func TestContextWithSpan(t *testing.T) {
+ want := &span{SpanID: 123}
+ ctx := ContextWithSpan(context.Background(), want)
+ got, ok := ctx.Value(activeSpanKey).(*span)
+ assert := assert.New(t)
+ assert.True(ok)
+ assert.Equal(got, want)
+}
+
+func TestSpanFromContext(t *testing.T) {
+ t.Run("regular", func(t *testing.T) {
+ assert := assert.New(t)
+ want := &span{SpanID: 123}
+ ctx := ContextWithSpan(context.Background(), want)
+ got, ok := SpanFromContext(ctx)
+ assert.True(ok)
+ assert.Equal(got, want)
+ })
+ t.Run("no-op", func(t *testing.T) {
+ assert := assert.New(t)
+ span, ok := SpanFromContext(context.Background())
+ assert.False(ok)
+ _, ok = span.(*internal.NoopSpan)
+ assert.True(ok)
+ span, ok = SpanFromContext(nil)
+ assert.False(ok)
+ _, ok = span.(*internal.NoopSpan)
+ assert.True(ok)
+ })
+}
+
+func TestStartSpanFromContext(t *testing.T) {
+ _, _, stop := startTestTracer()
+ defer stop()
+
+ parent := &span{context: &spanContext{spanID: 123, traceID: 456}}
+ parent2 := &span{context: &spanContext{spanID: 789, traceID: 456}}
+ pctx := ContextWithSpan(context.Background(), parent)
+ child, ctx := StartSpanFromContext(
+ pctx,
+ "http.request",
+ ServiceName("gin"),
+ ResourceName("/"),
+ ChildOf(parent2.Context()), // we do this to assert that the span in pctx takes priority.
+ )
+ assert := assert.New(t)
+
+ got, ok := child.(*span)
+ assert.True(ok)
+ gotctx, ok := SpanFromContext(ctx)
+ assert.True(ok)
+ assert.Equal(gotctx, got)
+ _, ok = gotctx.(*internal.NoopSpan)
+ assert.False(ok)
+
+ assert.Equal(uint64(456), got.TraceID)
+ assert.Equal(uint64(123), got.ParentID)
+ assert.Equal("http.request", got.Name)
+ assert.Equal("gin", got.Service)
+ assert.Equal("/", got.Resource)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go
new file mode 100644
index 00000000..2abd319a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/doc.go
@@ -0,0 +1,50 @@
+// Package tracer contains Datadog's core tracing client. It is used to trace
+// requests as they flow across web servers, databases and microservices, giving
+// developers visibility into bottlenecks and troublesome requests. To start the
+// tracer, simply call the start method along with an optional set of options.
+// By default, the trace agent is considered to be found at "localhost:8126". In a
+// setup where this would be different (let's say 127.0.0.1:1234), we could do:
+// tracer.Start(tracer.WithAgentAddr("127.0.0.1:1234"))
+// defer tracer.Stop()
+//
+// The tracing client can perform trace sampling. While the trace agent
+// already samples traces to reduce bandwidth usage, client sampling reduces
+// performance overhead. To make use of it, the package comes with a ready-to-use
+// rate sampler that can be passed to the tracer. To use it and keep only 30% of the
+// requests, one would do:
+// s := tracer.NewRateSampler(0.3)
+// tracer.Start(tracer.WithSampler(s))
+//
+// All spans created by the tracer contain a context hereby referred to as the span
+// context. Note that this is different from Go's context. The span context is used
+// to package essential information from a span, which is needed when creating child
+// spans that inherit from it. Thus, a child span is created from a span's span context.
+// The span context can originate from within the same process, but also a
+// different process or even a different machine in the case of distributed tracing.
+//
+// To make use of distributed tracing, a span's context may be injected via a carrier
+// into a transport (HTTP, RPC, etc.) to be extracted on the other end and used to
+// create spans that are direct descendants of it. A couple of carrier interfaces
+// which should cover most of the use-case scenarios are readily provided, such as
+// HTTPCarrier and TextMapCarrier. Users are free to create their own, which will work
+// with our propagation algorithm as long as they implement the TextMapReader and TextMapWriter
+// interfaces. An example alternate implementation is the MDCarrier in our gRPC integration.
+//
+// As an example, injecting a span's context into an HTTP request would look like this:
+// req, err := http.NewRequest("GET", "http://example.com", nil)
+// // ...
+// err := tracer.Inject(span.Context(), tracer.HTTPHeadersCarrier(req.Header))
+// // ...
+// http.DefaultClient.Do(req)
+// Then, on the server side, to continue the trace one would do:
+// sctx, err := tracer.Extract(tracer.HTTPHeadersCarrier(req.Header))
+// // ...
+// span := tracer.StartSpan("child.span", tracer.ChildOf(sctx))
+// In the same manner, any means can be used as a carrier to inject a context into a transport. Go's
+// context can also be used as a means to transport spans within the same process. The methods
+// StartSpanFromContext, ContextWithSpan and SpanFromContext exist for this reason.
+//
+// Some libraries and frameworks are supported out-of-the-box by using one
+// of our integrations. You can see a list of supported integrations here:
+// https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib
+package tracer // import "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/errors.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/errors.go
new file mode 100644
index 00000000..35c14c60
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/errors.go
@@ -0,0 +1,69 @@
+package tracer
+
+import (
+ "fmt"
+ "log"
+ "strconv"
+)
+
+var errorPrefix = fmt.Sprintf("Datadog Tracer Error (%s): ", tracerVersion)
+
+type traceEncodingError struct{ context error }
+
+func (e *traceEncodingError) Error() string {
+ return fmt.Sprintf("error encoding trace: %s", e.context)
+}
+
+type spanBufferFullError struct{}
+
+func (e *spanBufferFullError) Error() string {
+ return fmt.Sprintf("trace span cap (%d) reached, dropping trace", traceMaxSize)
+}
+
+type dataLossError struct {
+ count int // number of items lost
+ context error // any context error, if available
+}
+
+func (e *dataLossError) Error() string {
+ return fmt.Sprintf("lost traces (count: %d), error: %v", e.count, e.context)
+}
+
+type errorSummary struct {
+ Count int
+ Example string
+}
+
+func aggregateErrors(errChan <-chan error) map[string]errorSummary {
+ errs := make(map[string]errorSummary, len(errChan))
+ for {
+ select {
+ case err := <-errChan:
+ if err == nil {
+ break
+ }
+ key := fmt.Sprintf("%T", err)
+ summary := errs[key]
+ summary.Count++
+ summary.Example = err.Error()
+ errs[key] = summary
+ default: // stop when there's no more data
+ return errs
+ }
+ }
+}
+
+// logErrors logs the errors, preventing log file flooding, when there
+// are many messages, it caps them and shows a quick summary.
+// As of today it only logs using standard golang log package, but
+// later we could send those stats to agent // TODO(ufoot).
+func logErrors(errChan <-chan error) {
+ errs := aggregateErrors(errChan)
+ for _, v := range errs {
+ var repeat string
+ if v.Count > 1 {
+ repeat = " (repeated " + strconv.Itoa(v.Count) + " times)"
+ }
+ log.Println(errorPrefix + v.Example + repeat)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/errors_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/errors_test.go
new file mode 100644
index 00000000..226c14d4
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/errors_test.go
@@ -0,0 +1,38 @@
+package tracer
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAggregateErrors(t *testing.T) {
+ assert := assert.New(t)
+
+ errChan := make(chan error, 100)
+ errChan <- &traceEncodingError{context: errors.New("couldn't encode at byte 0")}
+ errChan <- &traceEncodingError{context: errors.New("couldn't encode at byte 0")}
+ errChan <- &traceEncodingError{context: errors.New("couldn't encode at byte 0")}
+ errChan <- &traceEncodingError{context: errors.New("couldn't encode at byte 0")}
+ errChan <- &dataLossError{count: 42}
+ errChan <- nil
+ errChan <- errors.New("unexpected error type")
+
+ errs := aggregateErrors(errChan)
+
+ assert.Equal(map[string]errorSummary{
+ "*tracer.traceEncodingError": errorSummary{
+ Count: 4,
+ Example: "error encoding trace: couldn't encode at byte 0",
+ },
+ "*tracer.dataLossError": errorSummary{
+ Count: 1,
+ Example: "lost traces (count: 42), error: ",
+ },
+ "*errors.errorString": errorSummary{
+ Count: 1,
+ Example: "unexpected error type",
+ },
+ }, errs)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/example_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/example_test.go
new file mode 100644
index 00000000..d86b05b9
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/example_test.go
@@ -0,0 +1,34 @@
+package tracer
+
+import (
+ "io/ioutil"
+ "log"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+// A basic example demonstrating how to start the tracer, as well as how
+// to create a root span and a child span that is a descendant of it.
+func Example() {
+ // Start the tracer and defer the Stop method.
+ Start(WithAgentAddr("host:port"))
+ defer Stop()
+
+ // Start a root span.
+ span := StartSpan("get.data")
+ defer span.Finish()
+
+ // Create a child of it, computing the time needed to read a file.
+ child := StartSpan("read.file", ChildOf(span.Context()))
+ child.SetTag(ext.ResourceName, "test.json")
+
+ // Perform an operation.
+ _, err := ioutil.ReadFile("~/test.json")
+
+ // We may finish the child span using the returned error. If it's
+ // nil, it will be disregarded.
+ child.Finish(WithError(err))
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go
new file mode 100644
index 00000000..78fb0fef
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option.go
@@ -0,0 +1,163 @@
+package tracer
+
+import (
+ "os"
+ "path/filepath"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+// config holds the tracer configuration.
+type config struct {
+ // debug, when true, writes details to logs.
+ debug bool
+
+ // serviceName specifies the name of this application.
+ serviceName string
+
+ // sampler specifies the sampler that will be used for sampling traces.
+ sampler Sampler
+
+ // agentAddr specifies the hostname and of the agent where the traces
+ // are sent to.
+ agentAddr string
+
+ // globalTags holds a set of tags that will be automatically applied to
+ // all spans.
+ globalTags map[string]interface{}
+
+ // transport specifies the Transport interface which will be used to send data to the agent.
+ transport transport
+
+ // propagator propagates span context cross-process
+ propagator Propagator
+}
+
+// StartOption represents a function that can be provided as a parameter to Start.
+type StartOption func(*config)
+
+// defaults sets the default values for a config.
+func defaults(c *config) {
+ c.serviceName = filepath.Base(os.Args[0])
+ c.sampler = NewAllSampler()
+ c.agentAddr = defaultAddress
+}
+
+// WithDebugMode enables debug mode on the tracer, resulting in more verbose logging.
+func WithDebugMode(enabled bool) StartOption {
+ return func(c *config) {
+ c.debug = enabled
+ }
+}
+
+// WithPropagator sets an alternative propagator to be used by the tracer.
+func WithPropagator(p Propagator) StartOption {
+ return func(c *config) {
+ c.propagator = p
+ }
+}
+
+// WithServiceName sets the default service name to be used with the tracer.
+func WithServiceName(name string) StartOption {
+ return func(c *config) {
+ c.serviceName = name
+ }
+}
+
+// WithAgentAddr sets the address where the agent is located. The default is
+// localhost:8126. It should contain both host and port.
+func WithAgentAddr(addr string) StartOption {
+ return func(c *config) {
+ c.agentAddr = addr
+ }
+}
+
+// WithGlobalTag sets a key/value pair which will be set as a tag on all spans
+// created by tracer. This option may be used multiple times.
+func WithGlobalTag(k string, v interface{}) StartOption {
+ return func(c *config) {
+ if c.globalTags == nil {
+ c.globalTags = make(map[string]interface{})
+ }
+ c.globalTags[k] = v
+ }
+}
+
+// WithSampler sets the given sampler to be used with the tracer. By default
+// an all-permissive sampler is used.
+func WithSampler(s Sampler) StartOption {
+ return func(c *config) {
+ c.sampler = s
+ }
+}
+
+// StartSpanOption is a configuration option for StartSpan. It is aliased in order
+// to help godoc group all the functions returning it together. It is considered
+// more correct to refer to it as the type as the origin, ddtrace.StartSpanOption.
+type StartSpanOption = ddtrace.StartSpanOption
+
+// Tag sets the given key/value pair as a tag on the started Span.
+func Tag(k string, v interface{}) StartSpanOption {
+ return func(cfg *ddtrace.StartSpanConfig) {
+ if cfg.Tags == nil {
+ cfg.Tags = map[string]interface{}{}
+ }
+ cfg.Tags[k] = v
+ }
+}
+
+// ServiceName sets the given service name on the started span. For example "http.server".
+func ServiceName(name string) StartSpanOption {
+ return Tag(ext.ServiceName, name)
+}
+
+// ResourceName sets the given resource name on the started span. A resource could
+// be an SQL query, a URL, an RPC method or something else.
+func ResourceName(name string) StartSpanOption {
+ return Tag(ext.ResourceName, name)
+}
+
+// SpanType sets the given span type on the started span. Some examples in the case of
+// the Datadog APM product could be "web", "db" or "cache".
+func SpanType(name string) StartSpanOption {
+ return Tag(ext.SpanType, name)
+}
+
+// ChildOf tells StartSpan to use the given span context as a parent for the
+// created span.
+func ChildOf(ctx ddtrace.SpanContext) StartSpanOption {
+ return func(cfg *ddtrace.StartSpanConfig) {
+ cfg.Parent = ctx
+ }
+}
+
+// StartTime sets a custom time as the start time for the created span. By
+// default a span is started using the creation time.
+func StartTime(t time.Time) StartSpanOption {
+ return func(cfg *ddtrace.StartSpanConfig) {
+ cfg.StartTime = t
+ }
+}
+
+// FinishOption is a configuration option for FinishSpan. It is aliased in order
+// to help godoc group all the functions returning it together. It is considered
+// more correct to refer to it as the type as the origin, ddtrace.FinishOption.
+type FinishOption = ddtrace.FinishOption
+
+// FinishTime sets the given time as the finishing time for the span. By default,
+// the current time is used.
+func FinishTime(t time.Time) FinishOption {
+ return func(cfg *ddtrace.FinishConfig) {
+ cfg.FinishTime = t
+ }
+}
+
+// WithError marks the span as having had an error. It uses the information from
+// err to set tags such as the error message, error type and stack trace.
+func WithError(err error) FinishOption {
+ return func(cfg *ddtrace.FinishConfig) {
+ cfg.Error = err
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option_test.go
new file mode 100644
index 00000000..19c29590
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/option_test.go
@@ -0,0 +1,40 @@
+package tracer
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func withTransport(t transport) StartOption {
+ return func(c *config) {
+ c.transport = t
+ }
+}
+
+func TestTracerOptionsDefaults(t *testing.T) {
+ assert := assert.New(t)
+ var c config
+ defaults(&c)
+ assert.Equal(float64(1), c.sampler.(RateSampler).Rate())
+ assert.Equal("tracer.test", c.serviceName)
+ assert.Equal("localhost:8126", c.agentAddr)
+}
+
+func TestTracerOptions(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(
+ WithSampler(NewRateSampler(0.5)),
+ WithServiceName("api-intake"),
+ WithAgentAddr("ddagent.consul.local:58126"),
+ WithGlobalTag("k", "v"),
+ WithDebugMode(true),
+ )
+ c := tracer.config
+ assert.Equal(float64(0.5), c.sampler.(RateSampler).Rate())
+ assert.Equal("api-intake", c.serviceName)
+ assert.Equal("ddagent.consul.local:58126", c.agentAddr)
+ assert.NotNil(c.globalTags)
+ assert.Equal("v", c.globalTags["k"])
+ assert.True(c.debug)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go
new file mode 100644
index 00000000..65ce37fe
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload.go
@@ -0,0 +1,116 @@
+package tracer
+
+import (
+ "bytes"
+ "encoding/binary"
+ "io"
+
+ "github.com/tinylib/msgp/msgp"
+)
+
+// payload is a wrapper on top of the msgpack encoder which allows constructing an
+// encoded array by pushing its entries sequentially, one at a time. It basically
+// allows us to encode as we would with a stream, except that the contents of the stream
+// can be read as a slice by the msgpack decoder at any time. It follows the guidelines
+// from the msgpack array spec:
+// https://github.com/msgpack/msgpack/blob/master/spec.md#array-format-family
+//
+// payload implements io.Reader and can be used with the decoder directly. To create
+// a new payload use the newPayload method.
+//
+// payload is not safe for concurrent use.
+//
+// This structure basically allows us to push traces into the payload one at a time
+// in order to always have knowledge of the payload size, but also making it possible
+// for the agent to decode it as an array.
+type payload struct {
+ // header specifies the first few bytes in the msgpack stream
+ // indicating the type of array (fixarray, array16 or array32)
+ // and the number of items contained in the stream.
+ header []byte
+
+ // off specifies the current read position on the header.
+ off int
+
+ // count specifies the number of items in the stream.
+ count uint64
+
+ // buf holds the sequence of msgpack-encoded items.
+ buf bytes.Buffer
+}
+
+var _ io.Reader = (*payload)(nil)
+
+// newPayload returns a ready to use payload.
+func newPayload() *payload {
+ p := &payload{
+ header: make([]byte, 8),
+ off: 8,
+ }
+ return p
+}
+
+// push pushes a new item into the stream.
+func (p *payload) push(t spanList) error {
+ if err := msgp.Encode(&p.buf, t); err != nil {
+ return err
+ }
+ p.count++
+ p.updateHeader()
+ return nil
+}
+
+// itemCount returns the number of items available in the srteam.
+func (p *payload) itemCount() int {
+ return int(p.count)
+}
+
+// size returns the payload size in bytes. After the first read the value becomes
+// inaccurate by up to 8 bytes.
+func (p *payload) size() int {
+ return p.buf.Len() + len(p.header) - p.off
+}
+
+// reset resets the internal buffer, counter and read offset.
+func (p *payload) reset() {
+ p.off = 8
+ p.count = 0
+ p.buf.Reset()
+}
+
+// https://github.com/msgpack/msgpack/blob/master/spec.md#array-format-family
+const (
+ msgpackArrayFix byte = 144 // up to 15 items
+ msgpackArray16 = 0xdc // up to 2^16-1 items, followed by size in 2 bytes
+ msgpackArray32 = 0xdd // up to 2^32-1 items, followed by size in 4 bytes
+)
+
+// updateHeader updates the payload header based on the number of items currently
+// present in the stream.
+func (p *payload) updateHeader() {
+ n := p.count
+ switch {
+ case n <= 15:
+ p.header[7] = msgpackArrayFix + byte(n)
+ p.off = 7
+ case n <= 1<<16-1:
+ binary.BigEndian.PutUint64(p.header, n) // writes 2 bytes
+ p.header[5] = msgpackArray16
+ p.off = 5
+ default: // n <= 1<<32-1
+ binary.BigEndian.PutUint64(p.header, n) // writes 4 bytes
+ p.header[3] = msgpackArray32
+ p.off = 3
+ }
+}
+
+// Read implements io.Reader. It reads from the msgpack-encoded stream.
+func (p *payload) Read(b []byte) (n int, err error) {
+ if p.off < len(p.header) {
+ // reading header
+ n = copy(b, p.header[p.off:])
+ p.off += n
+ return n, nil
+ }
+ return p.buf.Read(b)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload_test.go
new file mode 100644
index 00000000..6e277bfb
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/payload_test.go
@@ -0,0 +1,100 @@
+package tracer
+
+import (
+ "bytes"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/tinylib/msgp/msgp"
+)
+
+var fixedTime = now()
+
+func newSpanList(count int) spanList {
+ n := count%5 + 1 // max trace size 5
+ itoa := map[int]string{0: "0", 1: "1", 2: "2", 3: "3", 4: "4", 5: "5"}
+ list := make([]*span, n)
+ for i := 0; i < n; i++ {
+ list[i] = newBasicSpan("span.list." + itoa[i])
+ list[i].Start = fixedTime
+ }
+ return list
+}
+
+// TestPayloadIntegrity tests that whatever we push into the payload
+// allows us to read the same content as would have been encoded by
+// the codec.
+func TestPayloadIntegrity(t *testing.T) {
+ assert := assert.New(t)
+ p := newPayload()
+ want := new(bytes.Buffer)
+ for _, n := range []int{10, 1 << 10, 1 << 17} {
+ t.Run(strconv.Itoa(n), func(t *testing.T) {
+ p.reset()
+ lists := make(spanLists, n)
+ for i := 0; i < n; i++ {
+ list := newSpanList(i)
+ lists[i] = list
+ p.push(list)
+ }
+ want.Reset()
+ err := msgp.Encode(want, lists)
+ assert.NoError(err)
+ assert.Equal(want.Len(), p.size())
+ assert.Equal(p.itemCount(), n)
+
+ got, err := ioutil.ReadAll(p)
+ assert.NoError(err)
+ assert.Equal(want.Bytes(), got)
+ })
+ }
+}
+
+// TestPayloadDecode ensures that whatever we push into the payload can
+// be decoded by the codec.
+func TestPayloadDecode(t *testing.T) {
+ assert := assert.New(t)
+ p := newPayload()
+ for _, n := range []int{10, 1 << 10} {
+ t.Run(strconv.Itoa(n), func(t *testing.T) {
+ p.reset()
+ for i := 0; i < n; i++ {
+ p.push(newSpanList(i))
+ }
+ var got spanLists
+ err := msgp.Decode(p, &got)
+ assert.NoError(err)
+ })
+ }
+}
+
+func BenchmarkPayloadThroughput(b *testing.B) {
+ b.Run("10K", benchmarkPayloadThroughput(1))
+ b.Run("100K", benchmarkPayloadThroughput(10))
+ b.Run("1MB", benchmarkPayloadThroughput(100))
+}
+
+// benchmarkPayloadThroughput benchmarks the throughput of the payload by subsequently
+// pushing a trace containing count spans of approximately 10KB in size each.
+func benchmarkPayloadThroughput(count int) func(*testing.B) {
+ return func(b *testing.B) {
+ p := newPayload()
+ s := newBasicSpan("X")
+ s.Meta["key"] = strings.Repeat("X", 10*1024)
+ trace := make(spanList, count)
+ for i := 0; i < count; i++ {
+ trace[i] = s
+ }
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ p.reset()
+ for p.size() < payloadMaxLimit {
+ p.push(trace)
+ }
+ }
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go
new file mode 100644
index 00000000..3c7822d4
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/propagator.go
@@ -0,0 +1,52 @@
+package tracer
+
+import (
+ "errors"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+// Propagator implementations should be able to inject and extract
+// SpanContexts into an implementation specific carrier.
+type Propagator interface {
+ // Inject takes the SpanContext and injects it into the carrier.
+ Inject(context ddtrace.SpanContext, carrier interface{}) error
+
+ // Extract returns the SpanContext from the given carrier.
+ Extract(carrier interface{}) (ddtrace.SpanContext, error)
+}
+
+// TextMapWriter allows setting key/value pairs of strings on the underlying
+// data structure. Carriers implementing TextMapWriter are compatible to be
+// used with Datadog's TextMapPropagator.
+type TextMapWriter interface {
+ // Set sets the given key/value pair.
+ Set(key, val string)
+}
+
+// TextMapReader allows iterating over sets of key/value pairs. Carriers implementing
+// TextMapReader are compatible to be used with Datadog's TextMapPropagator.
+type TextMapReader interface {
+ // ForeachKey iterates over all keys that exist in the underlying
+ // carrier. It takes a callback function which will be called
+ // using all key/value pairs as arguments. ForeachKey will return
+ // the first error returned by the handler.
+ ForeachKey(handler func(key, val string) error) error
+}
+
+var (
+ // ErrInvalidCarrier is returned when the carrier provided to the propagator
+ // does not implemented the correct interfaces.
+ ErrInvalidCarrier = errors.New("invalid carrier")
+
+ // ErrInvalidSpanContext is returned when the span context found in the
+ // carrier is not of the expected type.
+ ErrInvalidSpanContext = errors.New("invalid span context")
+
+ // ErrSpanContextCorrupted is returned when there was a problem parsing
+ // the information found in the carrier.
+ ErrSpanContextCorrupted = errors.New("span context corrupted")
+
+ // ErrSpanContextNotFound represents missing information in the given carrier.
+ ErrSpanContextNotFound = errors.New("span context not found")
+)
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go
new file mode 100644
index 00000000..356ae549
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/rand.go
@@ -0,0 +1,50 @@
+package tracer
+
+import (
+ cryptorand "crypto/rand"
+ "log"
+ "math"
+ "math/big"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+// random holds a thread-safe source of random numbers.
+var random *rand.Rand
+
+func init() {
+ var seed int64
+ n, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64))
+ if err == nil {
+ seed = n.Int64()
+ } else {
+ log.Printf("%scannot generate random seed: %v; using current time\n", errorPrefix, err)
+ seed = time.Now().UnixNano()
+ }
+ random = rand.New(&safeSource{
+ source: rand.NewSource(seed),
+ })
+}
+
+// safeSource holds a thread-safe implementation of rand.Source64.
+type safeSource struct {
+ source rand.Source
+ sync.Mutex
+}
+
+func (rs *safeSource) Int63() int64 {
+ rs.Lock()
+ n := rs.source.Int63()
+ rs.Unlock()
+
+ return n
+}
+
+func (rs *safeSource) Uint64() uint64 { return uint64(rs.Int63()) }
+
+func (rs *safeSource) Seed(seed int64) {
+ rs.Lock()
+ rs.Seed(seed)
+ rs.Unlock()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go
new file mode 100644
index 00000000..253af006
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler.go
@@ -0,0 +1,72 @@
+package tracer
+
+import (
+ "math"
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+// Sampler is the generic interface of any sampler. It must be safe for concurrent use.
+type Sampler interface {
+ // Sample returns true if the given span should be sampled.
+ Sample(span Span) bool
+}
+
+// RateSampler is a sampler implementation which randomly selects spans using a
+// provided rate. For example, a rate of 0.75 will permit 75% of the spans.
+// RateSampler implementations should be safe for concurrent use.
+type RateSampler interface {
+ Sampler
+
+ // Rate returns the current sample rate.
+ Rate() float64
+
+ // SetRate sets a new sample rate.
+ SetRate(rate float64)
+}
+
+// rateSampler samples from a sample rate.
+type rateSampler struct {
+ sync.RWMutex
+ rate float64
+}
+
+// NewAllSampler is a short-hand for NewRateSampler(1). It is all-permissive.
+func NewAllSampler() RateSampler { return NewRateSampler(1) }
+
+// NewRateSampler returns an initialized RateSampler with a given sample rate.
+func NewRateSampler(rate float64) RateSampler {
+ return &rateSampler{rate: rate}
+}
+
+// Rate returns the current rate of the sampler.
+func (r *rateSampler) Rate() float64 {
+ r.RLock()
+ defer r.RUnlock()
+ return r.rate
+}
+
+// SetRate sets a new sampling rate.
+func (r *rateSampler) SetRate(rate float64) {
+ r.Lock()
+ r.rate = rate
+ r.Unlock()
+}
+
+// constants used for the Knuth hashing, same as agent.
+const knuthFactor = uint64(1111111111111111111)
+
+// Sample returns true if the given span should be sampled.
+func (r *rateSampler) Sample(spn ddtrace.Span) bool {
+ s, ok := spn.(*span)
+ if !ok {
+ return false
+ }
+ r.RLock()
+ defer r.RUnlock()
+ if r.rate < 1 {
+ return s.TraceID*knuthFactor < uint64(r.rate*math.MaxUint64)
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler_test.go
new file mode 100644
index 00000000..58e42ede
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/sampler_test.go
@@ -0,0 +1,38 @@
+package tracer
+
+import (
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRateSampler(t *testing.T) {
+ assert := assert.New(t)
+ assert.True(NewRateSampler(1).Sample(newBasicSpan("test")))
+ assert.False(NewRateSampler(0).Sample(newBasicSpan("test")))
+ assert.False(NewRateSampler(0).Sample(newBasicSpan("test")))
+ assert.False(NewRateSampler(1).Sample(internal.NoopSpan{}))
+}
+
+func TestRateSamplerFinishedSpan(t *testing.T) {
+ rs := NewRateSampler(0.9999)
+ tracer := newTracer(WithSampler(rs)) // high probability of sampling
+ span := newBasicSpan("test")
+ span.finished = true
+ tracer.sample(span)
+ if !rs.Sample(span) {
+ t.Skip("wasn't sampled") // no flaky tests
+ }
+ _, ok := span.Metrics[sampleRateMetricKey]
+ assert.False(t, ok)
+}
+
+func TestRateSamplerSetting(t *testing.T) {
+ assert := assert.New(t)
+ rs := NewRateSampler(1)
+ assert.Equal(float64(1), rs.Rate())
+ rs.SetRate(0.5)
+ assert.Equal(float64(0.5), rs.Rate())
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go
new file mode 100644
index 00000000..65cb400e
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span.go
@@ -0,0 +1,231 @@
+//go:generate msgp -unexported -marshal=false -o=span_msgp.go -tests=false
+
+package tracer
+
+import (
+ "fmt"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/tinylib/msgp/msgp"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+)
+
+type (
+ // spanList implements msgp.Encodable on top of a slice of spans.
+ spanList []*span
+
+ // spanLists implements msgp.Decodable on top of a slice of spanList.
+ // This type is only used in tests.
+ spanLists []spanList
+)
+
+var (
+ _ ddtrace.Span = (*span)(nil)
+ _ msgp.Encodable = (*spanList)(nil)
+ _ msgp.Decodable = (*spanLists)(nil)
+)
+
+// span represents a computation. Callers must call Finish when a span is
+// complete to ensure it's submitted.
+type span struct {
+ sync.RWMutex `msg:"-"`
+
+ Name string `msg:"name"` // operation name
+ Service string `msg:"service"` // service name (i.e. "grpc.server", "http.request")
+ Resource string `msg:"resource"` // resource name (i.e. "/user?id=123", "SELECT * FROM users")
+ Type string `msg:"type"` // protocol associated with the span (i.e. "web", "db", "cache")
+ Start int64 `msg:"start"` // span start time expressed in nanoseconds since epoch
+ Duration int64 `msg:"duration"` // duration of the span expressed in nanoseconds
+ Meta map[string]string `msg:"meta,omitempty"` // arbitrary map of metadata
+ Metrics map[string]float64 `msg:"metrics,omitempty"` // arbitrary map of numeric metrics
+ SpanID uint64 `msg:"span_id"` // identifier of this span
+ TraceID uint64 `msg:"trace_id"` // identifier of the root span
+ ParentID uint64 `msg:"parent_id"` // identifier of the span's direct parent
+ Error int32 `msg:"error"` // error status of the span; 0 means no errors
+
+ finished bool `msg:"-"` // true if the span has been submitted to a tracer.
+ context *spanContext `msg:"-"` // span propagation context
+}
+
+// Context yields the SpanContext for this Span. Note that the return
+// value of Context() is still valid after a call to Finish(). This is
+// called the span context and it is different from Go's context.
+func (s *span) Context() ddtrace.SpanContext { return s.context }
+
+// SetBaggageItem sets a key/value pair as baggage on the span. Baggage items
+// are propagated down to descendant spans and injected cross-process. Use with
+// care as it adds extra load onto your tracing layer.
+func (s *span) SetBaggageItem(key, val string) {
+ s.context.setBaggageItem(key, val)
+}
+
+// BaggageItem gets the value for a baggage item given its key. Returns the
+// empty string if the value isn't found in this Span.
+func (s *span) BaggageItem(key string) string {
+ return s.context.baggageItem(key)
+}
+
+// SetTag adds a set of key/value metadata to the span.
+func (s *span) SetTag(key string, value interface{}) {
+ s.Lock()
+ defer s.Unlock()
+ // We don't lock spans when flushing, so we could have a data race when
+ // modifying a span as it's being flushed. This protects us against that
+ // race, since spans are marked `finished` before we flush them.
+ if s.finished {
+ return
+ }
+ if key == ext.Error {
+ s.setTagError(value)
+ return
+ }
+ if v, ok := value.(string); ok {
+ s.setTagString(key, v)
+ return
+ }
+ if v, ok := toFloat64(value); ok {
+ s.setTagNumeric(key, v)
+ return
+ }
+ // not numeric, not a string and not an error, the likelihood of this
+ // happening is close to zero, but we should nevertheless account for it.
+ s.Meta[key] = fmt.Sprint(value)
+}
+
+// setTagError sets the error tag. It accounts for various valid scenarios.
+// This method is not safe for concurrent use.
+func (s *span) setTagError(value interface{}) {
+ switch v := value.(type) {
+ case bool:
+ // bool value as per Opentracing spec.
+ if !v {
+ s.Error = 0
+ } else {
+ s.Error = 1
+ }
+ case error:
+ // if anyone sets an error value as the tag, be nice here
+ // and provide all the benefits.
+ s.Error = 1
+ s.Meta[ext.ErrorMsg] = v.Error()
+ s.Meta[ext.ErrorType] = reflect.TypeOf(v).String()
+ s.Meta[ext.ErrorStack] = string(debug.Stack())
+ case nil:
+ // no error
+ s.Error = 0
+ default:
+ // in all other cases, let's assume that setting this tag
+ // is the result of an error.
+ s.Error = 1
+ }
+}
+
+// setTagString sets a string tag. This method is not safe for concurrent use.
+func (s *span) setTagString(key, v string) {
+ switch key {
+ case ext.ServiceName:
+ s.Service = v
+ case ext.ResourceName:
+ s.Resource = v
+ case ext.SpanType:
+ s.Type = v
+ default:
+ s.Meta[key] = v
+ }
+}
+
+// setTagNumeric sets a numeric tag, in our case called a metric. This method
+// is not safe for concurrent use.
+func (s *span) setTagNumeric(key string, v float64) {
+ switch key {
+ case ext.SamplingPriority:
+ // setting sampling priority per spec
+ s.Metrics[samplingPriorityKey] = v
+ s.context.setSamplingPriority(int(v))
+ default:
+ s.Metrics[key] = v
+ }
+}
+
+// Finish closes this Span (but not its children) providing the duration
+// of its part of the tracing session.
+func (s *span) Finish(opts ...ddtrace.FinishOption) {
+ var cfg ddtrace.FinishConfig
+ for _, fn := range opts {
+ fn(&cfg)
+ }
+ var t int64
+ if cfg.FinishTime.IsZero() {
+ t = now()
+ } else {
+ t = cfg.FinishTime.UnixNano()
+ }
+ if cfg.Error != nil {
+ s.SetTag(ext.Error, cfg.Error)
+ }
+ s.finish(t)
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *span) SetOperationName(operationName string) {
+ s.Lock()
+ defer s.Unlock()
+
+ s.Name = operationName
+}
+
+func (s *span) finish(finishTime int64) {
+ s.Lock()
+ defer s.Unlock()
+ // We don't lock spans when flushing, so we could have a data race when
+ // modifying a span as it's being flushed. This protects us against that
+ // race, since spans are marked `finished` before we flush them.
+ if s.finished {
+ // already finished
+ return
+ }
+ if s.Duration == 0 {
+ s.Duration = finishTime - s.Start
+ }
+ s.finished = true
+
+ if !s.context.sampled {
+ // not sampled
+ return
+ }
+ s.context.finish()
+}
+
+// String returns a human readable representation of the span. Not for
+// production, just debugging.
+func (s *span) String() string {
+ lines := []string{
+ fmt.Sprintf("Name: %s", s.Name),
+ fmt.Sprintf("Service: %s", s.Service),
+ fmt.Sprintf("Resource: %s", s.Resource),
+ fmt.Sprintf("TraceID: %d", s.TraceID),
+ fmt.Sprintf("SpanID: %d", s.SpanID),
+ fmt.Sprintf("ParentID: %d", s.ParentID),
+ fmt.Sprintf("Start: %s", time.Unix(0, s.Start)),
+ fmt.Sprintf("Duration: %s", time.Duration(s.Duration)),
+ fmt.Sprintf("Error: %d", s.Error),
+ fmt.Sprintf("Type: %s", s.Type),
+ "Tags:",
+ }
+ s.RLock()
+ for key, val := range s.Meta {
+ lines = append(lines, fmt.Sprintf("\t%s:%s", key, val))
+ }
+ for key, val := range s.Metrics {
+ lines = append(lines, fmt.Sprintf("\t%s:%f", key, val))
+ }
+ s.RUnlock()
+ return strings.Join(lines, "\n")
+}
+
+const samplingPriorityKey = "_sampling_priority_v1"
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go
new file mode 100644
index 00000000..39cac84a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_msgp.go
@@ -0,0 +1,448 @@
+package tracer
+
+// NOTE: THIS FILE WAS PRODUCED BY THE
+// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
+// DO NOT EDIT
+
+import (
+ "github.com/tinylib/msgp/msgp"
+)
+
+// DecodeMsg implements msgp.Decodable
+func (z *span) DecodeMsg(dc *msgp.Reader) (err error) {
+ var field []byte
+ _ = field
+ var zb0001 uint32
+ zb0001, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, err = dc.ReadMapKeyPtr()
+ if err != nil {
+ return
+ }
+ switch msgp.UnsafeString(field) {
+ case "name":
+ z.Name, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "service":
+ z.Service, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "resource":
+ z.Resource, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "type":
+ z.Type, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ case "start":
+ z.Start, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "duration":
+ z.Duration, err = dc.ReadInt64()
+ if err != nil {
+ return
+ }
+ case "meta":
+ var zb0002 uint32
+ zb0002, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ if z.Meta == nil && zb0002 > 0 {
+ z.Meta = make(map[string]string, zb0002)
+ } else if len(z.Meta) > 0 {
+ for key := range z.Meta {
+ delete(z.Meta, key)
+ }
+ }
+ for zb0002 > 0 {
+ zb0002--
+ var za0001 string
+ var za0002 string
+ za0001, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ za0002, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ z.Meta[za0001] = za0002
+ }
+ case "metrics":
+ var zb0003 uint32
+ zb0003, err = dc.ReadMapHeader()
+ if err != nil {
+ return
+ }
+ if z.Metrics == nil && zb0003 > 0 {
+ z.Metrics = make(map[string]float64, zb0003)
+ } else if len(z.Metrics) > 0 {
+ for key := range z.Metrics {
+ delete(z.Metrics, key)
+ }
+ }
+ for zb0003 > 0 {
+ zb0003--
+ var za0003 string
+ var za0004 float64
+ za0003, err = dc.ReadString()
+ if err != nil {
+ return
+ }
+ za0004, err = dc.ReadFloat64()
+ if err != nil {
+ return
+ }
+ z.Metrics[za0003] = za0004
+ }
+ case "span_id":
+ z.SpanID, err = dc.ReadUint64()
+ if err != nil {
+ return
+ }
+ case "trace_id":
+ z.TraceID, err = dc.ReadUint64()
+ if err != nil {
+ return
+ }
+ case "parent_id":
+ z.ParentID, err = dc.ReadUint64()
+ if err != nil {
+ return
+ }
+ case "error":
+ z.Error, err = dc.ReadInt32()
+ if err != nil {
+ return
+ }
+ default:
+ err = dc.Skip()
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z *span) EncodeMsg(en *msgp.Writer) (err error) {
+ // map header, size 12
+ // write "name"
+ err = en.Append(0x8c, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Name)
+ if err != nil {
+ return
+ }
+ // write "service"
+ err = en.Append(0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Service)
+ if err != nil {
+ return
+ }
+ // write "resource"
+ err = en.Append(0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Resource)
+ if err != nil {
+ return
+ }
+ // write "type"
+ err = en.Append(0xa4, 0x74, 0x79, 0x70, 0x65)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(z.Type)
+ if err != nil {
+ return
+ }
+ // write "start"
+ err = en.Append(0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.Start)
+ if err != nil {
+ return
+ }
+ // write "duration"
+ err = en.Append(0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt64(z.Duration)
+ if err != nil {
+ return
+ }
+ // write "meta"
+ err = en.Append(0xa4, 0x6d, 0x65, 0x74, 0x61)
+ if err != nil {
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Meta)))
+ if err != nil {
+ return
+ }
+ for za0001, za0002 := range z.Meta {
+ err = en.WriteString(za0001)
+ if err != nil {
+ return
+ }
+ err = en.WriteString(za0002)
+ if err != nil {
+ return
+ }
+ }
+ // write "metrics"
+ err = en.Append(0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
+ if err != nil {
+ return
+ }
+ err = en.WriteMapHeader(uint32(len(z.Metrics)))
+ if err != nil {
+ return
+ }
+ for za0003, za0004 := range z.Metrics {
+ err = en.WriteString(za0003)
+ if err != nil {
+ return
+ }
+ err = en.WriteFloat64(za0004)
+ if err != nil {
+ return
+ }
+ }
+ // write "span_id"
+ err = en.Append(0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteUint64(z.SpanID)
+ if err != nil {
+ return
+ }
+ // write "trace_id"
+ err = en.Append(0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteUint64(z.TraceID)
+ if err != nil {
+ return
+ }
+ // write "parent_id"
+ err = en.Append(0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64)
+ if err != nil {
+ return
+ }
+ err = en.WriteUint64(z.ParentID)
+ if err != nil {
+ return
+ }
+ // write "error"
+ err = en.Append(0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
+ if err != nil {
+ return
+ }
+ err = en.WriteInt32(z.Error)
+ if err != nil {
+ return
+ }
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *span) Msgsize() (s int) {
+ s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.StringPrefixSize + len(z.Service) + 9 + msgp.StringPrefixSize + len(z.Resource) + 5 + msgp.StringPrefixSize + len(z.Type) + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 5 + msgp.MapHeaderSize
+ if z.Meta != nil {
+ for za0001, za0002 := range z.Meta {
+ _ = za0002
+ s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
+ }
+ }
+ s += 8 + msgp.MapHeaderSize
+ if z.Metrics != nil {
+ for za0003, za0004 := range z.Metrics {
+ _ = za0004
+ s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size
+ }
+ }
+ s += 8 + msgp.Uint64Size + 9 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int32Size
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *spanList) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0002 uint32
+ zb0002, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zb0002) {
+ (*z) = (*z)[:zb0002]
+ } else {
+ (*z) = make(spanList, zb0002)
+ }
+ for zb0001 := range *z {
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ (*z)[zb0001] = nil
+ } else {
+ if (*z)[zb0001] == nil {
+ (*z)[zb0001] = new(span)
+ }
+ err = (*z)[zb0001].DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z spanList) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteArrayHeader(uint32(len(z)))
+ if err != nil {
+ return
+ }
+ for zb0003 := range z {
+ if z[zb0003] == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = z[zb0003].EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z spanList) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize
+ for zb0003 := range z {
+ if z[zb0003] == nil {
+ s += msgp.NilSize
+ } else {
+ s += z[zb0003].Msgsize()
+ }
+ }
+ return
+}
+
+// DecodeMsg implements msgp.Decodable
+func (z *spanLists) DecodeMsg(dc *msgp.Reader) (err error) {
+ var zb0003 uint32
+ zb0003, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap((*z)) >= int(zb0003) {
+ (*z) = (*z)[:zb0003]
+ } else {
+ (*z) = make(spanLists, zb0003)
+ }
+ for zb0001 := range *z {
+ var zb0004 uint32
+ zb0004, err = dc.ReadArrayHeader()
+ if err != nil {
+ return
+ }
+ if cap((*z)[zb0001]) >= int(zb0004) {
+ (*z)[zb0001] = ((*z)[zb0001])[:zb0004]
+ } else {
+ (*z)[zb0001] = make(spanList, zb0004)
+ }
+ for zb0002 := range (*z)[zb0001] {
+ if dc.IsNil() {
+ err = dc.ReadNil()
+ if err != nil {
+ return
+ }
+ (*z)[zb0001][zb0002] = nil
+ } else {
+ if (*z)[zb0001][zb0002] == nil {
+ (*z)[zb0001][zb0002] = new(span)
+ }
+ err = (*z)[zb0001][zb0002].DecodeMsg(dc)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ return
+}
+
+// EncodeMsg implements msgp.Encodable
+func (z spanLists) EncodeMsg(en *msgp.Writer) (err error) {
+ err = en.WriteArrayHeader(uint32(len(z)))
+ if err != nil {
+ return
+ }
+ for zb0005 := range z {
+ err = en.WriteArrayHeader(uint32(len(z[zb0005])))
+ if err != nil {
+ return
+ }
+ for zb0006 := range z[zb0005] {
+ if z[zb0005][zb0006] == nil {
+ err = en.WriteNil()
+ if err != nil {
+ return
+ }
+ } else {
+ err = z[zb0005][zb0006].EncodeMsg(en)
+ if err != nil {
+ return
+ }
+ }
+ }
+ }
+ return
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z spanLists) Msgsize() (s int) {
+ s = msgp.ArrayHeaderSize
+ for zb0005 := range z {
+ s += msgp.ArrayHeaderSize
+ for zb0006 := range z[zb0005] {
+ if z[zb0005][zb0006] == nil {
+ s += msgp.NilSize
+ } else {
+ s += z[zb0005][zb0006].Msgsize()
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_test.go
new file mode 100644
index 00000000..d25c7514
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/span_test.go
@@ -0,0 +1,348 @@
+package tracer
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// newSpan creates a new span. This is a low-level function, required for testing and advanced usage.
+// Most of the time one should prefer the Tracer NewRootSpan or NewChildSpan methods.
+func newSpan(name, service, resource string, spanID, traceID, parentID uint64) *span {
+ span := &span{
+ Name: name,
+ Service: service,
+ Resource: resource,
+ Meta: map[string]string{},
+ Metrics: map[string]float64{},
+ SpanID: spanID,
+ TraceID: traceID,
+ ParentID: parentID,
+ Start: now(),
+ }
+ span.context = newSpanContext(span, nil)
+ return span
+}
+
+// newBasicSpan is the OpenTracing Span constructor
+func newBasicSpan(operationName string) *span {
+ return newSpan(operationName, "", "", 0, 0, 0)
+}
+
+func TestSpanBaggage(t *testing.T) {
+ assert := assert.New(t)
+
+ span := newBasicSpan("web.request")
+ span.SetBaggageItem("key", "value")
+ assert.Equal("value", span.BaggageItem("key"))
+}
+
+func TestSpanContext(t *testing.T) {
+ assert := assert.New(t)
+
+ span := newBasicSpan("web.request")
+ assert.NotNil(span.Context())
+}
+
+func TestSpanOperationName(t *testing.T) {
+ assert := assert.New(t)
+
+ span := newBasicSpan("web.request")
+ span.SetOperationName("http.request")
+ assert.Equal("http.request", span.Name)
+}
+
+func TestSpanFinish(t *testing.T) {
+ assert := assert.New(t)
+ wait := time.Millisecond * 2
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ // the finish should set finished and the duration
+ time.Sleep(wait)
+ span.Finish()
+ assert.True(span.Duration > int64(wait))
+ assert.True(span.finished)
+}
+
+func TestSpanFinishTwice(t *testing.T) {
+ assert := assert.New(t)
+ wait := time.Millisecond * 2
+
+ tracer, _, stop := startTestTracer()
+ defer stop()
+
+ assert.Equal(tracer.payload.itemCount(), 0)
+
+ // the finish must be idempotent
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+ time.Sleep(wait)
+ span.Finish()
+ assert.Equal(tracer.payload.itemCount(), 1)
+
+ previousDuration := span.Duration
+ time.Sleep(wait)
+ span.Finish()
+ assert.Equal(previousDuration, span.Duration)
+ assert.Equal(tracer.payload.itemCount(), 1)
+}
+
+func TestSpanFinishWithTime(t *testing.T) {
+ assert := assert.New(t)
+
+ finishTime := time.Now().Add(10 * time.Second)
+ span := newBasicSpan("web.request")
+ span.Finish(FinishTime(finishTime))
+
+ duration := finishTime.UnixNano() - span.Start
+ assert.Equal(duration, span.Duration)
+}
+
+func TestSpanFinishWithError(t *testing.T) {
+ assert := assert.New(t)
+
+ err := errors.New("test error")
+ span := newBasicSpan("web.request")
+ span.Finish(WithError(err))
+
+ assert.Equal(int32(1), span.Error)
+ assert.Equal("test error", span.Meta[ext.ErrorMsg])
+ assert.Equal("*errors.errorString", span.Meta[ext.ErrorType])
+ assert.NotEmpty(span.Meta[ext.ErrorStack])
+}
+
+func TestSpanSetTag(t *testing.T) {
+ assert := assert.New(t)
+
+ span := newBasicSpan("web.request")
+ span.SetTag("component", "tracer")
+ assert.Equal("tracer", span.Meta["component"])
+
+ span.SetTag("tagInt", 1234)
+ assert.Equal(float64(1234), span.Metrics["tagInt"])
+
+ span.SetTag("tagStruct", struct{ A, B int }{1, 2})
+ assert.Equal("{1 2}", span.Meta["tagStruct"])
+
+ span.SetTag(ext.Error, true)
+ assert.Equal(int32(1), span.Error)
+
+ span.SetTag(ext.Error, nil)
+ assert.Equal(int32(0), span.Error)
+
+ span.SetTag(ext.Error, errors.New("abc"))
+ assert.Equal(int32(1), span.Error)
+ assert.Equal("abc", span.Meta[ext.ErrorMsg])
+ assert.Equal("*errors.errorString", span.Meta[ext.ErrorType])
+ assert.NotEmpty(span.Meta[ext.ErrorStack])
+
+ span.SetTag(ext.Error, "something else")
+ assert.Equal(int32(1), span.Error)
+
+ span.SetTag(ext.Error, false)
+ assert.Equal(int32(0), span.Error)
+
+ span.SetTag(ext.SamplingPriority, 2)
+ assert.Equal(float64(2), span.Metrics[samplingPriorityKey])
+}
+
+func TestSpanSetDatadogTags(t *testing.T) {
+ assert := assert.New(t)
+
+ span := newBasicSpan("web.request")
+ span.SetTag(ext.SpanType, "http")
+ span.SetTag(ext.ServiceName, "db-cluster")
+ span.SetTag(ext.ResourceName, "SELECT * FROM users;")
+
+ assert.Equal("http", span.Type)
+ assert.Equal("db-cluster", span.Service)
+ assert.Equal("SELECT * FROM users;", span.Resource)
+}
+
+func TestSpanStart(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ // a new span sets the Start after the initialization
+ assert.NotEqual(int64(0), span.Start)
+}
+
+func TestSpanString(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+ // don't bother checking the contents, just make sure it works.
+ assert.NotEqual("", span.String())
+ span.Finish()
+ assert.NotEqual("", span.String())
+}
+
+func TestSpanSetMetric(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ // check the map is properly initialized
+ span.SetTag("bytes", 1024.42)
+ assert.Equal(1, len(span.Metrics))
+ assert.Equal(1024.42, span.Metrics["bytes"])
+
+ // operating on a finished span is a no-op
+ span.Finish()
+ span.SetTag("finished.test", 1337)
+ assert.Equal(1, len(span.Metrics))
+ assert.Equal(0.0, span.Metrics["finished.test"])
+}
+
+func TestSpanError(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ // check the error is set in the default meta
+ err := errors.New("Something wrong")
+ span.SetTag(ext.Error, err)
+ assert.Equal(int32(1), span.Error)
+ assert.Equal("Something wrong", span.Meta["error.msg"])
+ assert.Equal("*errors.errorString", span.Meta["error.type"])
+ assert.NotEqual("", span.Meta["error.stack"])
+
+ // operating on a finished span is a no-op
+ span = tracer.newRootSpan("flask.request", "flask", "/")
+ nMeta := len(span.Meta)
+ span.Finish()
+ span.SetTag(ext.Error, err)
+ assert.Equal(int32(0), span.Error)
+ assert.Equal(nMeta, len(span.Meta))
+ assert.Equal("", span.Meta["error.msg"])
+ assert.Equal("", span.Meta["error.type"])
+ assert.Equal("", span.Meta["error.stack"])
+}
+
+func TestSpanError_Typed(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ // check the error is set in the default meta
+ err := &boomError{}
+ span.SetTag(ext.Error, err)
+ assert.Equal(int32(1), span.Error)
+ assert.Equal("boom", span.Meta["error.msg"])
+ assert.Equal("*tracer.boomError", span.Meta["error.type"])
+ assert.NotEqual("", span.Meta["error.stack"])
+}
+
+func TestSpanErrorNil(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ // don't set the error if it's nil
+ nMeta := len(span.Meta)
+ span.SetTag(ext.Error, nil)
+ assert.Equal(int32(0), span.Error)
+ assert.Equal(nMeta, len(span.Meta))
+}
+
+// Prior to a bug fix, this failed when running `go test -race`
+func TestSpanModifyWhileFlushing(t *testing.T) {
+ tracer, _, stop := startTestTracer()
+ defer stop()
+
+ done := make(chan struct{})
+ go func() {
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+ span.Finish()
+ // It doesn't make much sense to update the span after it's been finished,
+ // but an error in a user's code could lead to this.
+ span.SetTag("race_test", "true")
+ span.SetTag("race_test2", 133.7)
+ span.SetTag("race_test3", 133.7)
+ span.SetTag(ext.Error, errors.New("t"))
+ done <- struct{}{}
+ }()
+
+ for {
+ select {
+ case <-done:
+ return
+ default:
+ tracer.forceFlush()
+ }
+ }
+}
+
+func TestSpanSamplingPriority(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(withTransport(newDefaultTransport()))
+
+ span := tracer.newRootSpan("my.name", "my.service", "my.resource")
+ _, ok := span.Metrics[samplingPriorityKey]
+ assert.False(ok)
+
+ for _, priority := range []int{
+ ext.PriorityUserReject,
+ ext.PriorityAutoReject,
+ ext.PriorityAutoKeep,
+ ext.PriorityUserKeep,
+ 999, // not used, but we should allow it
+ } {
+ span.SetTag(ext.SamplingPriority, priority)
+ v, ok := span.Metrics[samplingPriorityKey]
+ assert.True(ok)
+ assert.EqualValues(priority, v)
+ assert.EqualValues(span.context.priority, v)
+ assert.True(span.context.hasPriority)
+
+ childSpan := tracer.newChildSpan("my.child", span)
+ v0, ok0 := span.Metrics[samplingPriorityKey]
+ v1, ok1 := childSpan.Metrics[samplingPriorityKey]
+ assert.Equal(ok0, ok1)
+ assert.Equal(v0, v1)
+ assert.EqualValues(childSpan.context.priority, v0)
+ assert.EqualValues(childSpan.context.hasPriority, ok0)
+ }
+}
+
+func BenchmarkSetTagMetric(b *testing.B) {
+ span := newBasicSpan("bench.span")
+ keys := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ k := string(keys[i%len(keys)])
+ span.SetTag(k, float64(12.34))
+ }
+}
+
+func BenchmarkSetTagString(b *testing.B) {
+ span := newBasicSpan("bench.span")
+ keys := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ k := string(keys[i%len(keys)])
+ span.SetTag(k, "some text")
+ }
+}
+
+func BenchmarkSetTagField(b *testing.B) {
+ span := newBasicSpan("bench.span")
+ keys := []string{ext.ServiceName, ext.ResourceName, ext.SpanType}
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ k := keys[i%len(keys)]
+ span.SetTag(k, "some text")
+ }
+}
+
+type boomError struct{}
+
+func (e *boomError) Error() string { return "boom" }
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go
new file mode 100644
index 00000000..6758f3ba
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext.go
@@ -0,0 +1,193 @@
+package tracer
+
+import (
+ "sync"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+)
+
+var _ ddtrace.SpanContext = (*spanContext)(nil)
+
+// SpanContext represents a span state that can propagate to descendant spans
+// and across process boundaries. It contains all the information needed to
+// spawn a direct descendant of the span that it belongs to. It can be used
+// to create distributed tracing by propagating it using the provided interfaces.
+type spanContext struct {
+ // the below group should propagate only locally
+
+ trace *trace // reference to the trace that this span belongs too
+ span *span // reference to the span that hosts this context
+ sampled bool // whether this span will be sampled or not
+
+ // the below group should propagate cross-process
+
+ traceID uint64
+ spanID uint64
+
+ mu sync.RWMutex // guards below fields
+ baggage map[string]string
+ priority int
+ hasPriority bool
+}
+
+// newSpanContext creates a new SpanContext to serve as context for the given
+// span. If the provided parent is not nil, the context will inherit the trace,
+// baggage and other values from it. This method also pushes the span into the
+// new context's trace and as a result, it should not be called multiple times
+// for the same span.
+func newSpanContext(span *span, parent *spanContext) *spanContext {
+ context := &spanContext{
+ traceID: span.TraceID,
+ spanID: span.SpanID,
+ sampled: true,
+ span: span,
+ }
+ if v, ok := span.Metrics[samplingPriorityKey]; ok {
+ context.hasPriority = true
+ context.priority = int(v)
+ }
+ if parent != nil {
+ context.trace = parent.trace
+ context.sampled = parent.sampled
+ context.hasPriority = parent.hasSamplingPriority()
+ context.priority = parent.samplingPriority()
+ parent.ForeachBaggageItem(func(k, v string) bool {
+ context.setBaggageItem(k, v)
+ return true
+ })
+ }
+ if context.trace == nil {
+ context.trace = newTrace()
+ }
+ // put span in context's trace
+ context.trace.push(span)
+ return context
+}
+
+// SpanID implements ddtrace.SpanContext.
+func (c *spanContext) SpanID() uint64 { return c.spanID }
+
+// TraceID implements ddtrace.SpanContext.
+func (c *spanContext) TraceID() uint64 { return c.traceID }
+
+// ForeachBaggageItem implements ddtrace.SpanContext.
+func (c *spanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ for k, v := range c.baggage {
+ if !handler(k, v) {
+ break
+ }
+ }
+}
+
+func (c *spanContext) setSamplingPriority(p int) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.priority = p
+ c.hasPriority = true
+}
+
+func (c *spanContext) samplingPriority() int {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.priority
+}
+
+func (c *spanContext) hasSamplingPriority() bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.hasPriority
+}
+
+func (c *spanContext) setBaggageItem(key, val string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.baggage == nil {
+ c.baggage = make(map[string]string, 1)
+ }
+ c.baggage[key] = val
+}
+
+func (c *spanContext) baggageItem(key string) string {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.baggage[key]
+}
+
+// finish marks this span as finished in the trace.
+func (c *spanContext) finish() { c.trace.ackFinish() }
+
+// trace holds information about a specific trace. This structure is shared
+// between all spans in a trace.
+type trace struct {
+ mu sync.RWMutex // guards below fields
+ spans []*span // all the spans that are part of this trace
+ finished int // the number of finished spans
+ full bool // signifies that the span buffer is full
+}
+
+var (
+ // traceStartSize is the initial size of our trace buffer,
+ // by default we allocate for a handful of spans within the trace,
+ // reasonable as span is actually way bigger, and avoids re-allocating
+ // over and over. Could be fine-tuned at runtime.
+ traceStartSize = 10
+ // traceMaxSize is the maximum number of spans we keep in memory.
+ // This is to avoid memory leaks, if above that value, spans are randomly
+ // dropped and ignore, resulting in corrupted tracing data, but ensuring
+ // original program continues to work as expected.
+ traceMaxSize = int(1e5)
+)
+
+// newTrace creates a new trace using the given callback which will be called
+// upon completion of the trace.
+func newTrace() *trace {
+ return &trace{spans: make([]*span, 0, traceStartSize)}
+}
+
+// push pushes a new span into the trace. If the buffer is full, it returns
+// a errBufferFull error.
+func (t *trace) push(sp *span) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.full {
+ return
+ }
+ if len(t.spans) >= traceMaxSize {
+ // capacity is reached, we will not be able to complete this trace.
+ t.full = true
+ t.spans = nil // GC
+ if tr, ok := internal.GetGlobalTracer().(*tracer); ok {
+ // we have a tracer we can submit errors too.
+ tr.pushError(&spanBufferFullError{})
+ }
+ return
+ }
+ t.spans = append(t.spans, sp)
+}
+
+// ackFinish aknowledges that another span in the trace has finished, and checks
+// if the trace is complete, in which case it calls the onFinish function.
+func (t *trace) ackFinish() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.full {
+ // capacity has been reached, the buffer is no longer tracking
+ // all the spans in the trace, so the below conditions will not
+ // be accurate and would trigger a pre-mature flush, exposing us
+ // to a race condition where spans can be modified while flushing.
+ return
+ }
+ t.finished++
+ if len(t.spans) != t.finished {
+ return
+ }
+ if tr, ok := internal.GetGlobalTracer().(*tracer); ok {
+ // we have a tracer that can receive completed traces.
+ tr.pushTrace(t.spans)
+ }
+ t.spans = nil
+ t.finished = 0 // important, because a buffer can be used for several flushes
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext_test.go
new file mode 100644
index 00000000..3670799a
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/spancontext_test.go
@@ -0,0 +1,285 @@
+package tracer
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func setupteardown(start, max int) func() {
+ oldStartSize := traceStartSize
+ oldMaxSize := traceMaxSize
+ traceStartSize = start
+ traceMaxSize = max
+ return func() {
+ traceStartSize = oldStartSize
+ traceMaxSize = oldMaxSize
+ }
+}
+
+func TestNewSpanContextPushError(t *testing.T) {
+ defer setupteardown(2, 2)()
+
+ tracer, _, stop := startTestTracer()
+ defer stop()
+ parent := newBasicSpan("test1") // 1st span in trace
+ parent.context.trace.push(newBasicSpan("test2")) // 2nd span in trace
+ child := newSpan("child", "", "", 0, 0, 0)
+
+ // new context having a parent with a trace of two spans.
+ // One more should overflow.
+ child.context = newSpanContext(child, parent.context)
+
+ select {
+ case err := <-tracer.errorBuffer:
+ assert.Equal(t, &spanBufferFullError{}, err)
+ default:
+ t.Fatal("no error pushed")
+ }
+}
+
+func TestSpanTracePushOne(t *testing.T) {
+ defer setupteardown(2, 5)()
+
+ assert := assert.New(t)
+
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ traceID := random.Uint64()
+ root := newSpan("name1", "a-service", "a-resource", traceID, traceID, 0)
+ trace := root.context.trace
+
+ assert.Len(tracer.errorBuffer, 0)
+ assert.Len(trace.spans, 1)
+ assert.Equal(root, trace.spans[0], "the span is the one pushed before")
+
+ root.Finish()
+ tracer.forceFlush()
+
+ select {
+ case err := <-tracer.errorBuffer:
+ assert.Fail("unexpected error:", err.Error())
+ t.Logf("trace: %v", trace)
+ default:
+ traces := transport.Traces()
+ assert.Len(tracer.errorBuffer, 0)
+ assert.Len(traces, 1)
+ trc := traces[0]
+ assert.Len(trc, 1, "there was a trace in the channel")
+ comparePayloadSpans(t, root, trc[0])
+ assert.Equal(0, len(trace.spans), "no more spans in the trace")
+ }
+}
+
+func TestSpanTracePushNoFinish(t *testing.T) {
+ defer setupteardown(2, 5)()
+
+ assert := assert.New(t)
+
+ tracer, _, stop := startTestTracer()
+ defer stop()
+
+ buffer := newTrace()
+ assert.NotNil(buffer)
+ assert.Len(buffer.spans, 0)
+
+ traceID := random.Uint64()
+ root := newSpan("name1", "a-service", "a-resource", traceID, traceID, 0)
+ root.context.trace = buffer
+
+ buffer.push(root)
+ assert.Len(tracer.errorBuffer, 0)
+ assert.Len(buffer.spans, 1, "there is one span in the buffer")
+ assert.Equal(root, buffer.spans[0], "the span is the one pushed before")
+
+ select {
+ case err := <-tracer.errorBuffer:
+ assert.Fail("unexpected error:", err.Error())
+ t.Logf("buffer: %v", buffer)
+ case <-time.After(time.Second / 10):
+ t.Logf("expected timeout, nothing should show up in buffer as the trace is not finished")
+ }
+}
+
+func TestSpanTracePushSeveral(t *testing.T) {
+ defer setupteardown(2, 5)()
+
+ assert := assert.New(t)
+
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+ buffer := newTrace()
+ assert.NotNil(buffer)
+ assert.Len(buffer.spans, 0)
+
+ traceID := random.Uint64()
+ root := newSpan("name1", "a-service", "a-resource", traceID, traceID, 0)
+ span2 := newSpan("name2", "a-service", "a-resource", random.Uint64(), traceID, root.SpanID)
+ span3 := newSpan("name3", "a-service", "a-resource", random.Uint64(), traceID, root.SpanID)
+ span3a := newSpan("name3", "a-service", "a-resource", random.Uint64(), traceID, span3.SpanID)
+
+ trace := []*span{root, span2, span3, span3a}
+
+ for i, span := range trace {
+ span.context.trace = buffer
+ buffer.push(span)
+ assert.Len(tracer.errorBuffer, 0)
+ assert.Len(buffer.spans, i+1, "there is one more span in the buffer")
+ assert.Equal(span, buffer.spans[i], "the span is the one pushed before")
+ }
+
+ for _, span := range trace {
+ span.Finish()
+ }
+ tracer.forceFlush()
+
+ select {
+ case err := <-tracer.errorBuffer:
+ assert.Fail("unexpected error:", err.Error())
+ default:
+ traces := transport.Traces()
+ assert.Len(traces, 1)
+ trace := traces[0]
+ assert.Len(trace, 4, "there was one trace with the right number of spans in the channel")
+ for _, span := range trace {
+ assert.Contains(trace, span, "the trace contains the spans")
+ }
+ }
+}
+
+func TestNewSpanContext(t *testing.T) {
+ t.Run("basic", func(t *testing.T) {
+ span := &span{
+ TraceID: 1,
+ SpanID: 2,
+ ParentID: 3,
+ }
+ ctx := newSpanContext(span, nil)
+ assert := assert.New(t)
+ assert.Equal(ctx.traceID, span.TraceID)
+ assert.Equal(ctx.spanID, span.SpanID)
+ assert.Equal(ctx.priority, 0)
+ assert.False(ctx.hasPriority)
+ assert.NotNil(ctx.trace)
+ assert.Contains(ctx.trace.spans, span)
+ })
+
+ t.Run("priority", func(t *testing.T) {
+ span := &span{
+ TraceID: 1,
+ SpanID: 2,
+ ParentID: 3,
+ Metrics: map[string]float64{samplingPriorityKey: 1},
+ }
+ ctx := newSpanContext(span, nil)
+ assert := assert.New(t)
+ assert.Equal(ctx.traceID, span.TraceID)
+ assert.Equal(ctx.spanID, span.SpanID)
+ assert.Equal(ctx.TraceID(), span.TraceID)
+ assert.Equal(ctx.SpanID(), span.SpanID)
+ assert.Equal(ctx.priority, 1)
+ assert.True(ctx.hasPriority)
+ assert.NotNil(ctx.trace)
+ assert.Contains(ctx.trace.spans, span)
+ })
+}
+
+func TestSpanContextParent(t *testing.T) {
+ s := &span{
+ TraceID: 1,
+ SpanID: 2,
+ ParentID: 3,
+ }
+ for name, parentCtx := range map[string]*spanContext{
+ "basic": &spanContext{
+ sampled: false,
+ baggage: map[string]string{"A": "A", "B": "B"},
+ trace: newTrace(),
+ },
+ "nil-trace": &spanContext{
+ sampled: false,
+ },
+ "priority": &spanContext{
+ sampled: true,
+ baggage: map[string]string{"A": "A", "B": "B"},
+ trace: &trace{spans: []*span{newBasicSpan("abc")}},
+ hasPriority: true,
+ priority: 2,
+ },
+ } {
+ t.Run(name, func(t *testing.T) {
+ ctx := newSpanContext(s, parentCtx)
+ assert := assert.New(t)
+ assert.Equal(ctx.traceID, s.TraceID)
+ assert.Equal(ctx.spanID, s.SpanID)
+ if parentCtx.trace != nil {
+ assert.Equal(len(ctx.trace.spans), len(parentCtx.trace.spans))
+ }
+ assert.NotNil(ctx.trace)
+ assert.Contains(ctx.trace.spans, s)
+ assert.Equal(ctx.hasPriority, parentCtx.hasPriority)
+ assert.Equal(ctx.priority, parentCtx.priority)
+ assert.Equal(ctx.sampled, parentCtx.sampled)
+ assert.Equal(ctx.baggage, parentCtx.baggage)
+ })
+ }
+}
+
+func TestSpanContextPushFull(t *testing.T) {
+ oldMaxSize := traceMaxSize
+ defer func() {
+ traceMaxSize = oldMaxSize
+ }()
+ traceMaxSize = 2
+ tracer, _, stop := startTestTracer()
+ defer stop()
+
+ span1 := newBasicSpan("span1")
+ span2 := newBasicSpan("span2")
+ span3 := newBasicSpan("span3")
+
+ buffer := newTrace()
+ assert := assert.New(t)
+ buffer.push(span1)
+ assert.Len(tracer.errorBuffer, 0)
+ buffer.push(span2)
+ assert.Len(tracer.errorBuffer, 0)
+ buffer.push(span3)
+ assert.Len(tracer.errorBuffer, 1)
+ err := <-tracer.errorBuffer
+ assert.Equal(&spanBufferFullError{}, err)
+}
+
+func TestSpanContextBaggage(t *testing.T) {
+ assert := assert.New(t)
+
+ var ctx spanContext
+ ctx.setBaggageItem("key", "value")
+ assert.Equal("value", ctx.baggage["key"])
+}
+
+func TestSpanContextIterator(t *testing.T) {
+ assert := assert.New(t)
+
+ got := make(map[string]string)
+ ctx := spanContext{baggage: map[string]string{"key": "value"}}
+ ctx.ForeachBaggageItem(func(k, v string) bool {
+ got[k] = v
+ return true
+ })
+
+ assert.Len(got, 1)
+ assert.Equal("value", got["key"])
+}
+
+func TestSpanContextIteratorBreak(t *testing.T) {
+ got := make(map[string]string)
+ ctx := spanContext{baggage: map[string]string{"key": "value"}}
+ ctx.ForeachBaggageItem(func(k, v string) bool {
+ return false
+ })
+
+ assert.Len(t, got, 0)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go
new file mode 100644
index 00000000..3138a878
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap.go
@@ -0,0 +1,197 @@
+package tracer
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+)
+
+// HTTPHeadersCarrier wraps an http.Header as a TextMapWriter and TextMapReader, allowing
+// it to be used using the provided Propagator implementation.
+type HTTPHeadersCarrier http.Header
+
+var _ TextMapWriter = (*HTTPHeadersCarrier)(nil)
+var _ TextMapReader = (*HTTPHeadersCarrier)(nil)
+
+// Set implements TextMapWriter.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+ http.Header(c).Set(key, val)
+}
+
+// ForeachKey implements TextMapReader.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vals := range c {
+ for _, v := range vals {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// TextMapCarrier allows the use of a regular map[string]string as both TextMapWriter
+// and TextMapReader, making it compatible with the provided Propagator.
+type TextMapCarrier map[string]string
+
+var _ TextMapWriter = (*TextMapCarrier)(nil)
+var _ TextMapReader = (*TextMapCarrier)(nil)
+
+// Set implements TextMapWriter.
+func (c TextMapCarrier) Set(key, val string) {
+ c[key] = val
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range c {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+const (
+ // DefaultBaggageHeaderPrefix specifies the prefix that will be used in
+ // HTTP headers or text maps to prefix baggage keys.
+ DefaultBaggageHeaderPrefix = "ot-baggage-"
+
+ // DefaultTraceIDHeader specifies the key that will be used in HTTP headers
+ // or text maps to store the trace ID.
+ DefaultTraceIDHeader = "x-datadog-trace-id"
+
+ // DefaultParentIDHeader specifies the key that will be used in HTTP headers
+ // or text maps to store the parent ID.
+ DefaultParentIDHeader = "x-datadog-parent-id"
+
+ // DefaultPriorityHeader specifies the key that will be used in HTTP headers
+ // or text maps to store the sampling priority value.
+ DefaultPriorityHeader = "x-datadog-sampling-priority"
+)
+
+// PropagatorConfig defines the configuration for initializing a propagator.
+type PropagatorConfig struct {
+ // BaggagePrefix specifies the prefix that will be used to store baggage
+ // items in a map. It defaults to DefaultBaggageHeaderPrefix.
+ BaggagePrefix string
+
+ // TraceHeader specifies the map key that will be used to store the trace ID.
+ // It defaults to DefaultTraceIDHeader.
+ TraceHeader string
+
+ // ParentHeader specifies the map key that will be used to store the parent ID.
+ // It defaults to DefaultParentIDHeader.
+ ParentHeader string
+
+ // PriorityHeader specifies the map key that will be used to store the sampling priority.
+ // It deafults to DefaultPriorityHeader.
+ PriorityHeader string
+}
+
+// NewPropagator returns a new propagator which uses TextMap to inject
+// and extract values. It propagates trace and span IDs and baggage.
+// To use the defaults, nil may be provided in place of the config.
+func NewPropagator(cfg *PropagatorConfig) Propagator {
+ if cfg == nil {
+ cfg = new(PropagatorConfig)
+ }
+ if cfg.BaggagePrefix == "" {
+ cfg.BaggagePrefix = DefaultBaggageHeaderPrefix
+ }
+ if cfg.TraceHeader == "" {
+ cfg.TraceHeader = DefaultTraceIDHeader
+ }
+ if cfg.ParentHeader == "" {
+ cfg.ParentHeader = DefaultParentIDHeader
+ }
+ if cfg.PriorityHeader == "" {
+ cfg.PriorityHeader = DefaultPriorityHeader
+ }
+ return &propagator{cfg}
+}
+
+// propagator implements a propagator which uses TextMap internally.
+// It propagates the trace and span IDs, as well as the baggage from the
+// context.
+type propagator struct{ cfg *PropagatorConfig }
+
+// Inject defines the Propagator to propagate SpanContext data
+// out of the current process. The implementation propagates the
+// TraceID and the current active SpanID, as well as the Span baggage.
+func (p *propagator) Inject(spanCtx ddtrace.SpanContext, carrier interface{}) error {
+ switch v := carrier.(type) {
+ case TextMapWriter:
+ return p.injectTextMap(spanCtx, v)
+ default:
+ return ErrInvalidCarrier
+ }
+}
+
+func (p *propagator) injectTextMap(spanCtx ddtrace.SpanContext, writer TextMapWriter) error {
+ ctx, ok := spanCtx.(*spanContext)
+ if !ok || ctx.traceID == 0 || ctx.spanID == 0 {
+ return ErrInvalidSpanContext
+ }
+ // propagate the TraceID and the current active SpanID
+ writer.Set(p.cfg.TraceHeader, strconv.FormatUint(ctx.traceID, 10))
+ writer.Set(p.cfg.ParentHeader, strconv.FormatUint(ctx.spanID, 10))
+ if ctx.hasSamplingPriority() {
+ writer.Set(p.cfg.PriorityHeader, strconv.Itoa(ctx.samplingPriority()))
+ }
+ // propagate OpenTracing baggage
+ for k, v := range ctx.baggage {
+ writer.Set(p.cfg.BaggagePrefix+k, v)
+ }
+ return nil
+}
+
+// Extract implements Propagator.
+func (p *propagator) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+ switch v := carrier.(type) {
+ case TextMapReader:
+ return p.extractTextMap(v)
+ default:
+ return nil, ErrInvalidCarrier
+ }
+}
+
+func (p *propagator) extractTextMap(reader TextMapReader) (ddtrace.SpanContext, error) {
+ var ctx spanContext
+ err := reader.ForeachKey(func(k, v string) error {
+ var err error
+ key := strings.ToLower(k)
+ switch key {
+ case p.cfg.TraceHeader:
+ ctx.traceID, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return ErrSpanContextCorrupted
+ }
+ case p.cfg.ParentHeader:
+ ctx.spanID, err = strconv.ParseUint(v, 10, 64)
+ if err != nil {
+ return ErrSpanContextCorrupted
+ }
+ case p.cfg.PriorityHeader:
+ ctx.priority, err = strconv.Atoi(v)
+ if err != nil {
+ return ErrSpanContextCorrupted
+ }
+ ctx.hasPriority = true
+ default:
+ if strings.HasPrefix(key, p.cfg.BaggagePrefix) {
+ ctx.setBaggageItem(strings.TrimPrefix(key, p.cfg.BaggagePrefix), v)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ if ctx.traceID == 0 || ctx.spanID == 0 {
+ return nil, ErrSpanContextNotFound
+ }
+ return &ctx, nil
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap_test.go
new file mode 100644
index 00000000..18564623
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/textmap_test.go
@@ -0,0 +1,170 @@
+package tracer
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "testing"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHTTPHeadersCarrierSet(t *testing.T) {
+ h := http.Header{}
+ c := HTTPHeadersCarrier(h)
+ c.Set("A", "x")
+ assert.Equal(t, "x", h.Get("A"))
+}
+
+func TestHTTPHeadersCarrierForeachKey(t *testing.T) {
+ h := http.Header{}
+ h.Add("A", "x")
+ h.Add("B", "y")
+ got := map[string]string{}
+ err := HTTPHeadersCarrier(h).ForeachKey(func(k, v string) error {
+ got[k] = v
+ return nil
+ })
+ assert := assert.New(t)
+ assert.Nil(err)
+ assert.Equal("x", h.Get("A"))
+ assert.Equal("y", h.Get("B"))
+}
+
+func TestHTTPHeadersCarrierForeachKeyError(t *testing.T) {
+ want := errors.New("random error")
+ h := http.Header{}
+ h.Add("A", "x")
+ h.Add("B", "y")
+ got := HTTPHeadersCarrier(h).ForeachKey(func(k, v string) error {
+ if k == "B" {
+ return want
+ }
+ return nil
+ })
+ assert.Equal(t, want, got)
+}
+
+func TestTextMapCarrierSet(t *testing.T) {
+ m := map[string]string{}
+ c := TextMapCarrier(m)
+ c.Set("a", "b")
+ assert.Equal(t, "b", m["a"])
+}
+
+func TestTextMapCarrierForeachKey(t *testing.T) {
+ want := map[string]string{"A": "x", "B": "y"}
+ got := map[string]string{}
+ err := TextMapCarrier(want).ForeachKey(func(k, v string) error {
+ got[k] = v
+ return nil
+ })
+ assert := assert.New(t)
+ assert.Nil(err)
+ assert.Equal(got, want)
+}
+
+func TestTextMapCarrierForeachKeyError(t *testing.T) {
+ m := map[string]string{"A": "x", "B": "y"}
+ want := errors.New("random error")
+ got := TextMapCarrier(m).ForeachKey(func(k, v string) error {
+ return want
+ })
+ assert.Equal(t, got, want)
+}
+
+func TestTextMapPropagatorErrors(t *testing.T) {
+ propagator := NewPropagator(nil)
+ assert := assert.New(t)
+
+ err := propagator.Inject(&spanContext{}, 2)
+ assert.Equal(ErrInvalidCarrier, err)
+ err = propagator.Inject(internal.NoopSpanContext{}, TextMapCarrier(map[string]string{}))
+ assert.Equal(ErrInvalidSpanContext, err)
+ err = propagator.Inject(&spanContext{}, TextMapCarrier(map[string]string{}))
+ assert.Equal(ErrInvalidSpanContext, err) // no traceID and spanID
+ err = propagator.Inject(&spanContext{traceID: 1}, TextMapCarrier(map[string]string{}))
+ assert.Equal(ErrInvalidSpanContext, err) // no spanID
+
+ _, err = propagator.Extract(2)
+ assert.Equal(ErrInvalidCarrier, err)
+
+ _, err = propagator.Extract(TextMapCarrier(map[string]string{
+ DefaultTraceIDHeader: "1",
+ DefaultParentIDHeader: "A",
+ }))
+ assert.Equal(ErrSpanContextCorrupted, err)
+
+ _, err = propagator.Extract(TextMapCarrier(map[string]string{
+ DefaultTraceIDHeader: "A",
+ DefaultParentIDHeader: "2",
+ }))
+ assert.Equal(ErrSpanContextCorrupted, err)
+
+ _, err = propagator.Extract(TextMapCarrier(map[string]string{
+ DefaultTraceIDHeader: "0",
+ DefaultParentIDHeader: "0",
+ }))
+ assert.Equal(ErrSpanContextNotFound, err)
+}
+
+func TestTextMapPropagatorInjectHeader(t *testing.T) {
+ assert := assert.New(t)
+
+ propagator := NewPropagator(&PropagatorConfig{
+ BaggagePrefix: "bg-",
+ TraceHeader: "tid",
+ ParentHeader: "pid",
+ })
+ tracer := newTracer(WithPropagator(propagator))
+
+ root := tracer.StartSpan("web.request").(*span)
+ root.SetBaggageItem("item", "x")
+ root.SetTag(ext.SamplingPriority, 0)
+ ctx := root.Context()
+ headers := http.Header{}
+
+ carrier := HTTPHeadersCarrier(headers)
+ err := tracer.Inject(ctx, carrier)
+ assert.Nil(err)
+
+ tid := strconv.FormatUint(root.TraceID, 10)
+ pid := strconv.FormatUint(root.SpanID, 10)
+
+ assert.Equal(headers.Get("tid"), tid)
+ assert.Equal(headers.Get("pid"), pid)
+ assert.Equal(headers.Get("bg-item"), "x")
+ assert.Equal(headers.Get(DefaultPriorityHeader), "0")
+}
+
+func TestTextMapPropagatorInjectExtract(t *testing.T) {
+ propagator := NewPropagator(&PropagatorConfig{
+ BaggagePrefix: "bg-",
+ TraceHeader: "tid",
+ ParentHeader: "pid",
+ })
+ tracer := newTracer(WithPropagator(propagator))
+ root := tracer.StartSpan("web.request").(*span)
+ root.SetTag(ext.SamplingPriority, -1)
+ root.SetBaggageItem("item", "x")
+ ctx := root.Context().(*spanContext)
+ headers := TextMapCarrier(map[string]string{})
+ err := tracer.Inject(ctx, headers)
+
+ assert := assert.New(t)
+ assert.Nil(err)
+
+ sctx, err := tracer.Extract(headers)
+ assert.Nil(err)
+
+ xctx, ok := sctx.(*spanContext)
+ assert.True(ok)
+ assert.Equal(xctx.traceID, ctx.traceID)
+ assert.Equal(xctx.spanID, ctx.spanID)
+ assert.Equal(xctx.baggage, ctx.baggage)
+ assert.Equal(xctx.priority, ctx.priority)
+ assert.Equal(xctx.hasPriority, ctx.hasPriority)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go
new file mode 100644
index 00000000..459161c7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package tracer
+
+import "time"
+
+// now returns current UTC time in nanos.
+func now() int64 {
+ return time.Now().UTC().UnixNano()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go
new file mode 100644
index 00000000..f9c56aeb
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows.go
@@ -0,0 +1,35 @@
+package tracer
+
+import (
+ "log"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+// This method is more precise than the go1.8 time.Now on Windows
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/hh706895(v=vs.85).aspx
+// It is however ~10x slower and requires Windows 8+.
+func highPrecisionNow() int64 {
+ var ft windows.Filetime
+ windows.GetSystemTimePreciseAsFileTime(&ft)
+ return ft.Nanoseconds()
+}
+
+func lowPrecisionNow() int64 {
+ return time.Now().UTC().UnixNano()
+}
+
+var now func() int64
+
+// If GetSystemTimePreciseAsFileTime is not available we default to the less
+// precise implementation based on time.Now()
+func init() {
+ if err := windows.LoadGetSystemTimePreciseAsFileTime(); err != nil {
+ log.Printf("Unable to load high precison timer, defaulting to time.Now()")
+ now = lowPrecisionNow
+ } else {
+ log.Printf("Using high precision timer")
+ now = highPrecisionNow
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows_test.go
new file mode 100644
index 00000000..6ffc80ae
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/time_windows_test.go
@@ -0,0 +1,30 @@
+package tracer
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func BenchmarkNormalTimeNow(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ lowPrecisionNow()
+ }
+}
+
+func BenchmarkHighPrecisionTime(b *testing.B) {
+ for n := 0; n < b.N; n++ {
+ highPrecisionNow()
+ }
+}
+
+func TestHighPrecisionTimerIsMoreAccurate(t *testing.T) {
+ startLow := lowPrecisionNow()
+ startHigh := highPrecisionNow()
+ stopHigh := highPrecisionNow()
+ for stopHigh == startHigh {
+ stopHigh = highPrecisionNow()
+ }
+ stopLow := lowPrecisionNow()
+ assert.Equal(t, int64(0), stopLow-startLow)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go
new file mode 100644
index 00000000..6ef9d4db
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer.go
@@ -0,0 +1,370 @@
+package tracer
+
+import (
+ "errors"
+ "log"
+ "os"
+ "strconv"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+)
+
+var _ ddtrace.Tracer = (*tracer)(nil)
+
+// tracer creates, buffers and submits Spans which are used to time blocks of
+// computation. They are accumulated and streamed into an internal payload,
+// which is flushed to the agent whenever its size exceeds a specific threshold
+// or when a certain interval of time has passed, whichever happens first.
+//
+// tracer operates based on a worker loop which responds to various request
+// channels. It additionally holds two buffers which accumulates error and trace
+// queues to be processed by the payload encoder.
+type tracer struct {
+ *config
+ *payload
+
+ flushAllReq chan chan<- struct{}
+ flushTracesReq chan struct{}
+ flushErrorsReq chan struct{}
+ exitReq chan struct{}
+
+ payloadQueue chan []*span
+ errorBuffer chan error
+
+ // stopped is a channel that will be closed when the worker has exited.
+ stopped chan struct{}
+
+ // syncPush is used for testing. When non-nil, it causes pushTrace to become
+ // a synchronous (blocking) operation, meaning that it will only return after
+ // the trace has been fully processed and added onto the payload.
+ syncPush chan struct{}
+}
+
+const (
+ // flushInterval is the interval at which the payload contents will be flushed
+ // to the transport.
+ flushInterval = 2 * time.Second
+
+ // payloadMaxLimit is the maximum payload size allowed and should indicate the
+ // maximum size of the package that the agent can receive.
+ payloadMaxLimit = 9.5 * 1024 * 1024 // 9.5 MB
+
+ // payloadSizeLimit specifies the maximum allowed size of the payload before
+ // it will trigger a flush to the transport.
+ payloadSizeLimit = payloadMaxLimit / 2
+)
+
+// Start starts the tracer with the given set of options. It will stop and replace
+// any running tracer, meaning that calling it several times will result in a restart
+// of the tracer by replacing the current instance with a new one.
+func Start(opts ...StartOption) {
+ if internal.Testing {
+ return // mock tracer active
+ }
+ internal.SetGlobalTracer(newTracer(opts...))
+}
+
+// Stop stops the started tracer. Subsequent calls are valid but become no-op.
+func Stop() {
+ internal.SetGlobalTracer(&internal.NoopTracer{})
+}
+
+// Span is an alias for ddtrace.Span. It is here to allow godoc to group methods returning
+// ddtrace.Span. It is recommended and is considered more correct to refer to this type as
+// ddtrace.Span instead.
+type Span = ddtrace.Span
+
+// StartSpan starts a new span with the given operation name and set of options.
+// If the tracer is not started, calling this function is a no-op.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return internal.GetGlobalTracer().StartSpan(operationName, opts...)
+}
+
+// Extract extracts a SpanContext from the carrier. The carrier is expected
+// to implement TextMapReader, otherwise an error is returned.
+// If the tracer is not started, calling this function is a no-op.
+func Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+ return internal.GetGlobalTracer().Extract(carrier)
+}
+
+// Inject injects the given SpanContext into the carrier. The carrier is
+// expected to implement TextMapWriter, otherwise an error is returned.
+// If the tracer is not started, calling this function is a no-op.
+func Inject(ctx ddtrace.SpanContext, carrier interface{}) error {
+ return internal.GetGlobalTracer().Inject(ctx, carrier)
+}
+
+const (
+ // payloadQueueSize is the buffer size of the trace channel.
+ payloadQueueSize = 1000
+
+ // errorBufferSize is the buffer size of the error channel.
+ errorBufferSize = 200
+)
+
+func newTracer(opts ...StartOption) *tracer {
+ c := new(config)
+ defaults(c)
+ for _, fn := range opts {
+ fn(c)
+ }
+ if c.transport == nil {
+ c.transport = newTransport(c.agentAddr)
+ }
+ if c.propagator == nil {
+ c.propagator = NewPropagator(nil)
+ }
+ t := &tracer{
+ config: c,
+ payload: newPayload(),
+ flushAllReq: make(chan chan<- struct{}),
+ flushTracesReq: make(chan struct{}, 1),
+ flushErrorsReq: make(chan struct{}, 1),
+ exitReq: make(chan struct{}),
+ payloadQueue: make(chan []*span, payloadQueueSize),
+ errorBuffer: make(chan error, errorBufferSize),
+ stopped: make(chan struct{}),
+ }
+
+ go t.worker()
+
+ return t
+}
+
+// worker receives finished traces to be added into the payload, as well
+// as periodically flushes traces to the transport.
+func (t *tracer) worker() {
+ defer close(t.stopped)
+ ticker := time.NewTicker(flushInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case trace := <-t.payloadQueue:
+ t.pushPayload(trace)
+
+ case <-ticker.C:
+ t.flush()
+
+ case done := <-t.flushAllReq:
+ t.flush()
+ done <- struct{}{}
+
+ case <-t.flushTracesReq:
+ t.flushTraces()
+
+ case <-t.flushErrorsReq:
+ t.flushErrors()
+
+ case <-t.exitReq:
+ t.flush()
+ return
+ }
+ }
+}
+
+func (t *tracer) pushTrace(trace []*span) {
+ select {
+ case <-t.stopped:
+ return
+ default:
+ }
+ select {
+ case t.payloadQueue <- trace:
+ default:
+ t.pushError(&dataLossError{
+ context: errors.New("payload queue full, dropping trace"),
+ count: len(trace),
+ })
+ }
+ if t.syncPush != nil {
+ // only in tests
+ <-t.syncPush
+ }
+}
+
+func (t *tracer) pushError(err error) {
+ select {
+ case <-t.stopped:
+ return
+ default:
+ }
+ if len(t.errorBuffer) >= cap(t.errorBuffer)/2 { // starts being full, anticipate, try and flush soon
+ select {
+ case t.flushErrorsReq <- struct{}{}:
+ default: // a flush was already requested, skip
+ }
+ }
+ select {
+ case t.errorBuffer <- err:
+ default:
+ // OK, if we get this, our error error buffer is full,
+ // we can assume it is filled with meaningful messages which
+ // are going to be logged and hopefully read, nothing better
+ // we can do, blocking would make things worse.
+ }
+}
+
+// StartSpan creates, starts, and returns a new Span with the given `operationName`.
+func (t *tracer) StartSpan(operationName string, options ...ddtrace.StartSpanOption) ddtrace.Span {
+ var opts ddtrace.StartSpanConfig
+ for _, fn := range options {
+ fn(&opts)
+ }
+ var startTime int64
+ if opts.StartTime.IsZero() {
+ startTime = now()
+ } else {
+ startTime = opts.StartTime.UnixNano()
+ }
+ var context *spanContext
+ if opts.Parent != nil {
+ if ctx, ok := opts.Parent.(*spanContext); ok {
+ context = ctx
+ }
+ }
+ id := random.Uint64()
+ // span defaults
+ span := &span{
+ Name: operationName,
+ Service: t.config.serviceName,
+ Resource: operationName,
+ Meta: map[string]string{},
+ Metrics: map[string]float64{},
+ SpanID: id,
+ TraceID: id,
+ ParentID: 0,
+ Start: startTime,
+ }
+ if context != nil {
+ // this is a child span
+ span.TraceID = context.traceID
+ span.ParentID = context.spanID
+ if context.hasSamplingPriority() {
+ span.Metrics[samplingPriorityKey] = float64(context.samplingPriority())
+ }
+ if context.span != nil {
+ context.span.RLock()
+ span.Service = context.span.Service
+ context.span.RUnlock()
+ }
+ }
+ span.context = newSpanContext(span, context)
+ if context == nil || context.span == nil {
+ // this is either a global root span or a process-level root span
+ span.SetTag(ext.Pid, strconv.Itoa(os.Getpid()))
+ t.sample(span)
+ }
+ // add tags from options
+ for k, v := range opts.Tags {
+ span.SetTag(k, v)
+ }
+ // add global tags
+ for k, v := range t.config.globalTags {
+ span.SetTag(k, v)
+ }
+ return span
+}
+
+// Stop stops the tracer.
+func (t *tracer) Stop() {
+ select {
+ case <-t.stopped:
+ return
+ default:
+ t.exitReq <- struct{}{}
+ <-t.stopped
+ }
+}
+
+// Inject uses the configured or default TextMap Propagator.
+func (t *tracer) Inject(ctx ddtrace.SpanContext, carrier interface{}) error {
+ return t.config.propagator.Inject(ctx, carrier)
+}
+
+// Extract uses the configured or default TextMap Propagator.
+func (t *tracer) Extract(carrier interface{}) (ddtrace.SpanContext, error) {
+ return t.config.propagator.Extract(carrier)
+}
+
+// flushTraces will push any currently buffered traces to the server.
+func (t *tracer) flushTraces() {
+ if t.payload.itemCount() == 0 {
+ return
+ }
+ size, count := t.payload.size(), t.payload.itemCount()
+ if t.config.debug {
+ log.Printf("Sending payload: size: %d traces: %d\n", size, count)
+ }
+ err := t.config.transport.send(t.payload)
+ if err != nil {
+ t.pushError(&dataLossError{context: err, count: count})
+ }
+ t.payload.reset()
+}
+
+// flushErrors will process log messages that were queued
+func (t *tracer) flushErrors() {
+ logErrors(t.errorBuffer)
+}
+
+func (t *tracer) flush() {
+ t.flushTraces()
+ t.flushErrors()
+}
+
+// forceFlush forces a flush of data (traces and services) to the agent.
+// Flushes are done by a background task on a regular basis, so you never
+// need to call this manually, mostly useful for testing and debugging.
+func (t *tracer) forceFlush() {
+ done := make(chan struct{})
+ t.flushAllReq <- done
+ <-done
+}
+
+// pushPayload pushes the trace onto the payload. If the payload becomes
+// larger than the threshold as a result, it sends a flush request.
+func (t *tracer) pushPayload(trace []*span) {
+ if err := t.payload.push(trace); err != nil {
+ t.pushError(&traceEncodingError{context: err})
+ }
+ if t.payload.size() > payloadSizeLimit {
+ // getting large
+ select {
+ case t.flushTracesReq <- struct{}{}:
+ default:
+ // flush already queued
+ }
+ }
+ if t.syncPush != nil {
+ // only in tests
+ t.syncPush <- struct{}{}
+ }
+}
+
+// sampleRateMetricKey is the metric key holding the applied sample rate. Has to be the same as the Agent.
+const sampleRateMetricKey = "_sample_rate"
+
+// Sample samples a span with the internal sampler.
+func (t *tracer) sample(span *span) {
+ sampler := t.config.sampler
+ sampled := sampler.Sample(span)
+ span.context.sampled = sampled
+ if !sampled {
+ return
+ }
+ if rs, ok := sampler.(RateSampler); ok && rs.Rate() < 1 {
+ // the span was sampled using a rate sampler which wasn't all permissive,
+ // so we make note of the sampling rate.
+ span.Lock()
+ defer span.Unlock()
+ if span.finished {
+ // we don't touch finished span as they might be flushing
+ return
+ }
+ span.Metrics[sampleRateMetricKey] = rs.Rate()
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer_test.go
new file mode 100644
index 00000000..711f0d36
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/tracer_test.go
@@ -0,0 +1,902 @@
+package tracer
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
+ "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/internal"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/tinylib/msgp/msgp"
+)
+
+func (t *tracer) newRootSpan(name, service, resource string) *span {
+ return t.StartSpan(name, SpanType("test"), ServiceName(service), ResourceName(resource)).(*span)
+}
+
+func (t *tracer) newChildSpan(name string, parent *span) *span {
+ if parent == nil {
+ return t.StartSpan(name).(*span)
+ }
+ return t.StartSpan(name, ChildOf(parent.Context())).(*span)
+}
+
+// TestTracerFrenetic does frenetic testing in a scenario where the tracer is started
+// and stopped in parallel with spans being created.
+func TestTracerCleanStop(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+ var wg sync.WaitGroup
+ transport := newDummyTransport()
+
+ n := 5000
+
+ wg.Add(3)
+ for j := 0; j < 3; j++ {
+ go func() {
+ defer wg.Done()
+ for i := 0; i < n; i++ {
+ span := StartSpan("test.span")
+ child := StartSpan("child.span", ChildOf(span.Context()))
+ time.Sleep(time.Millisecond)
+ child.Finish()
+ time.Sleep(time.Millisecond)
+ span.Finish()
+ }
+ }()
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < n; i++ {
+ Start(withTransport(transport))
+ time.Sleep(time.Millisecond)
+ Start(withTransport(transport))
+ Start(withTransport(transport))
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < n; i++ {
+ Stop()
+ Stop()
+ Stop()
+ time.Sleep(time.Millisecond)
+ Stop()
+ Stop()
+ Stop()
+ }
+ }()
+
+ wg.Wait()
+}
+
+func TestTracerStart(t *testing.T) {
+ t.Run("normal", func(t *testing.T) {
+ Start()
+ defer Stop()
+ if _, ok := internal.GetGlobalTracer().(*tracer); !ok {
+ t.Fail()
+ }
+ })
+
+ t.Run("testing", func(t *testing.T) {
+ internal.Testing = true
+ Start()
+ defer Stop()
+ if _, ok := internal.GetGlobalTracer().(*tracer); ok {
+ t.Fail()
+ }
+ if _, ok := internal.GetGlobalTracer().(*internal.NoopTracer); !ok {
+ t.Fail()
+ }
+ internal.Testing = false
+ })
+
+ t.Run("deadlock/api", func(t *testing.T) {
+ Stop()
+ Stop()
+
+ Start()
+ Start()
+ Start()
+
+ // ensure at least one worker started and handles requests
+ internal.GetGlobalTracer().(*tracer).forceFlush()
+
+ Stop()
+ Stop()
+ Stop()
+ Stop()
+ })
+
+ t.Run("deadlock/direct", func(t *testing.T) {
+ tr, _, stop := startTestTracer()
+ defer stop()
+ go tr.worker()
+ tr.forceFlush() // blocks until worker is started
+ select {
+ case <-tr.stopped:
+ t.Fatal("stopped channel should be open")
+ default:
+ // OK
+ }
+ tr.Stop()
+ select {
+ case <-tr.stopped:
+ // OK
+ default:
+ t.Fatal("stopped channel should be closed")
+ }
+ tr.Stop()
+ tr.Stop()
+ })
+}
+
+func TestTracerStartSpan(t *testing.T) {
+ tracer := newTracer()
+ span := tracer.StartSpan("web.request").(*span)
+ assert := assert.New(t)
+ assert.NotEqual(uint64(0), span.TraceID)
+ assert.NotEqual(uint64(0), span.SpanID)
+ assert.Equal(uint64(0), span.ParentID)
+ assert.Equal("web.request", span.Name)
+ assert.Equal("tracer.test", span.Service)
+}
+
+func TestTracerStartSpanOptions(t *testing.T) {
+ tracer := newTracer()
+ now := time.Now()
+ opts := []StartSpanOption{
+ SpanType("test"),
+ ServiceName("test.service"),
+ ResourceName("test.resource"),
+ StartTime(now),
+ }
+ span := tracer.StartSpan("web.request", opts...).(*span)
+ assert := assert.New(t)
+ assert.Equal("test", span.Type)
+ assert.Equal("test.service", span.Service)
+ assert.Equal("test.resource", span.Resource)
+ assert.Equal(now.UnixNano(), span.Start)
+}
+
+func TestTracerStartChildSpan(t *testing.T) {
+ t.Run("own-service", func(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer()
+ root := tracer.StartSpan("web.request", ServiceName("root-service")).(*span)
+ child := tracer.StartSpan("db.query", ChildOf(root.Context()), ServiceName("child-service")).(*span)
+
+ assert.NotEqual(uint64(0), child.TraceID)
+ assert.NotEqual(uint64(0), child.SpanID)
+ assert.Equal(root.SpanID, child.ParentID)
+ assert.Equal(root.TraceID, child.ParentID)
+ assert.Equal("child-service", child.Service)
+ })
+
+ t.Run("inherit-service", func(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer()
+ root := tracer.StartSpan("web.request", ServiceName("root-service")).(*span)
+ child := tracer.StartSpan("db.query", ChildOf(root.Context())).(*span)
+
+ assert.Equal("root-service", child.Service)
+ })
+}
+
+func TestTracerBaggagePropagation(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer()
+ root := tracer.StartSpan("web.request").(*span)
+ root.SetBaggageItem("key", "value")
+ child := tracer.StartSpan("db.query", ChildOf(root.Context())).(*span)
+ context := child.Context().(*spanContext)
+
+ assert.Equal("value", context.baggage["key"])
+}
+
+func TestPropagationDefaults(t *testing.T) {
+ assert := assert.New(t)
+
+ tracer := newTracer()
+ root := tracer.StartSpan("web.request").(*span)
+ root.SetBaggageItem("x", "y")
+ root.SetTag(ext.SamplingPriority, -1)
+ ctx := root.Context().(*spanContext)
+ headers := http.Header{}
+
+ // inject the spanContext
+ carrier := HTTPHeadersCarrier(headers)
+ err := tracer.Inject(ctx, carrier)
+ assert.Nil(err)
+
+ tid := strconv.FormatUint(root.TraceID, 10)
+ pid := strconv.FormatUint(root.SpanID, 10)
+
+ assert.Equal(headers.Get(DefaultTraceIDHeader), tid)
+ assert.Equal(headers.Get(DefaultParentIDHeader), pid)
+ assert.Equal(headers.Get(DefaultBaggageHeaderPrefix+"x"), "y")
+ assert.Equal(headers.Get(DefaultPriorityHeader), "-1")
+
+ // retrieve the spanContext
+ propagated, err := tracer.Extract(carrier)
+ assert.Nil(err)
+ pctx := propagated.(*spanContext)
+
+ // compare if there is a Context match
+ assert.Equal(ctx.traceID, pctx.traceID)
+ assert.Equal(ctx.spanID, pctx.spanID)
+ assert.Equal(ctx.baggage, pctx.baggage)
+ assert.Equal(ctx.priority, -1)
+ assert.True(ctx.hasPriority)
+
+ // ensure a child can be created
+ child := tracer.StartSpan("db.query", ChildOf(propagated)).(*span)
+
+ assert.NotEqual(uint64(0), child.TraceID)
+ assert.NotEqual(uint64(0), child.SpanID)
+ assert.Equal(root.SpanID, child.ParentID)
+ assert.Equal(root.TraceID, child.ParentID)
+ assert.Equal(child.context.priority, -1)
+ assert.True(child.context.hasPriority)
+}
+
+func TestTracerSamplingPriorityPropagation(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer()
+ root := tracer.StartSpan("web.request", Tag(ext.SamplingPriority, 2)).(*span)
+ child := tracer.StartSpan("db.query", ChildOf(root.Context())).(*span)
+ assert.EqualValues(2, root.Metrics[samplingPriorityKey])
+ assert.EqualValues(2, child.Metrics[samplingPriorityKey])
+ assert.EqualValues(2, root.context.priority)
+ assert.EqualValues(2, child.context.priority)
+ assert.True(root.context.hasPriority)
+ assert.True(child.context.hasPriority)
+}
+
+func TestTracerBaggageImmutability(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer()
+ root := tracer.StartSpan("web.request").(*span)
+ root.SetBaggageItem("key", "value")
+ child := tracer.StartSpan("db.query", ChildOf(root.Context())).(*span)
+ child.SetBaggageItem("key", "changed!")
+ parentContext := root.Context().(*spanContext)
+ childContext := child.Context().(*spanContext)
+ assert.Equal("value", parentContext.baggage["key"])
+ assert.Equal("changed!", childContext.baggage["key"])
+}
+
+func TestTracerSpanTags(t *testing.T) {
+ tracer := newTracer()
+ tag := Tag("key", "value")
+ span := tracer.StartSpan("web.request", tag).(*span)
+ assert := assert.New(t)
+ assert.Equal("value", span.Meta["key"])
+}
+
+func TestTracerSpanGlobalTags(t *testing.T) {
+ assert := assert.New(t)
+ tracer := newTracer(WithGlobalTag("key", "value"))
+ s := tracer.StartSpan("web.request").(*span)
+ assert.Equal("value", s.Meta["key"])
+ child := tracer.StartSpan("db.query", ChildOf(s.Context())).(*span)
+ assert.Equal("value", child.Meta["key"])
+}
+
+func TestNewSpan(t *testing.T) {
+ assert := assert.New(t)
+
+ // the tracer must create root spans
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+ assert.Equal(uint64(0), span.ParentID)
+ assert.Equal("pylons", span.Service)
+ assert.Equal("pylons.request", span.Name)
+ assert.Equal("/", span.Resource)
+}
+
+func TestNewSpanChild(t *testing.T) {
+ assert := assert.New(t)
+
+ // the tracer must create child spans
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ parent := tracer.newRootSpan("pylons.request", "pylons", "/")
+ child := tracer.newChildSpan("redis.command", parent)
+ // ids and services are inherited
+ assert.Equal(parent.SpanID, child.ParentID)
+ assert.Equal(parent.TraceID, child.TraceID)
+ assert.Equal(parent.Service, child.Service)
+ // the resource is not inherited and defaults to the name
+ assert.Equal("redis.command", child.Resource)
+}
+
+func TestNewRootSpanHasPid(t *testing.T) {
+ assert := assert.New(t)
+
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ root := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ assert.Equal(strconv.Itoa(os.Getpid()), root.Meta[ext.Pid])
+}
+
+func TestNewChildHasNoPid(t *testing.T) {
+ assert := assert.New(t)
+
+ tracer := newTracer(withTransport(newDefaultTransport()))
+ root := tracer.newRootSpan("pylons.request", "pylons", "/")
+ child := tracer.newChildSpan("redis.command", root)
+
+ assert.Equal("", child.Meta[ext.Pid])
+}
+
+func TestTracerSampler(t *testing.T) {
+ assert := assert.New(t)
+
+ sampler := NewRateSampler(0.9999) // high probability of sampling
+ tracer := newTracer(
+ withTransport(newDefaultTransport()),
+ WithSampler(sampler),
+ )
+
+ span := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ if !sampler.Sample(span) {
+ t.Skip("wasn't sampled") // no flaky tests
+ }
+ // only run test if span was sampled to avoid flaky tests
+ _, ok := span.Metrics[sampleRateMetricKey]
+ assert.True(ok)
+}
+
+func TestTracerEdgeSampler(t *testing.T) {
+ assert := assert.New(t)
+
+ // a sample rate of 0 should sample nothing
+ tracer0, _, stop := startTestTracer(
+ withTransport(newDefaultTransport()),
+ WithSampler(NewRateSampler(0)),
+ )
+ defer stop()
+ // a sample rate of 1 should sample everything
+ tracer1, _, stop := startTestTracer(
+ withTransport(newDefaultTransport()),
+ WithSampler(NewRateSampler(1)),
+ )
+ defer stop()
+
+ count := payloadQueueSize / 3
+
+ for i := 0; i < count; i++ {
+ span0 := tracer0.newRootSpan("pylons.request", "pylons", "/")
+ span0.Finish()
+ span1 := tracer1.newRootSpan("pylons.request", "pylons", "/")
+ span1.Finish()
+ }
+
+ assert.Equal(tracer0.payload.itemCount(), 0)
+ assert.Equal(tracer1.payload.itemCount(), count)
+}
+
+func TestTracerConcurrent(t *testing.T) {
+ assert := assert.New(t)
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ // Wait for three different goroutines that should create
+ // three different traces with one child each
+ var wg sync.WaitGroup
+ wg.Add(3)
+ go func() {
+ defer wg.Done()
+ tracer.newRootSpan("pylons.request", "pylons", "/").Finish()
+ }()
+ go func() {
+ defer wg.Done()
+ tracer.newRootSpan("pylons.request", "pylons", "/home").Finish()
+ }()
+ go func() {
+ defer wg.Done()
+ tracer.newRootSpan("pylons.request", "pylons", "/trace").Finish()
+ }()
+
+ wg.Wait()
+ tracer.forceFlush()
+ traces := transport.Traces()
+ assert.Len(traces, 3)
+ assert.Len(traces[0], 1)
+ assert.Len(traces[1], 1)
+ assert.Len(traces[2], 1)
+}
+
+func TestTracerParentFinishBeforeChild(t *testing.T) {
+ assert := assert.New(t)
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ // Testing an edge case: a child refers to a parent that is already closed.
+
+ parent := tracer.newRootSpan("pylons.request", "pylons", "/")
+ parent.Finish()
+
+ tracer.forceFlush()
+ traces := transport.Traces()
+ assert.Len(traces, 1)
+ assert.Len(traces[0], 1)
+ comparePayloadSpans(t, parent, traces[0][0])
+
+ child := tracer.newChildSpan("redis.command", parent)
+ child.Finish()
+
+ tracer.forceFlush()
+
+ traces = transport.Traces()
+ assert.Len(traces, 1)
+ assert.Len(traces[0], 1)
+ comparePayloadSpans(t, child, traces[0][0])
+ assert.Equal(parent.SpanID, traces[0][0].ParentID, "child should refer to parent, even if they have been flushed separately")
+}
+
+func TestTracerConcurrentMultipleSpans(t *testing.T) {
+ assert := assert.New(t)
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ // Wait for two different goroutines that should create
+ // two traces with two children each
+ var wg sync.WaitGroup
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+ parent := tracer.newRootSpan("pylons.request", "pylons", "/")
+ child := tracer.newChildSpan("redis.command", parent)
+ child.Finish()
+ parent.Finish()
+ }()
+ go func() {
+ defer wg.Done()
+ parent := tracer.newRootSpan("pylons.request", "pylons", "/")
+ child := tracer.newChildSpan("redis.command", parent)
+ child.Finish()
+ parent.Finish()
+ }()
+
+ wg.Wait()
+ tracer.forceFlush()
+ traces := transport.Traces()
+ assert.Len(traces, 2)
+ assert.Len(traces[0], 2)
+ assert.Len(traces[1], 2)
+}
+
+func TestTracerAtomicFlush(t *testing.T) {
+ assert := assert.New(t)
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ // Make sure we don't flush partial bits of traces
+ root := tracer.newRootSpan("pylons.request", "pylons", "/")
+ span := tracer.newChildSpan("redis.command", root)
+ span1 := tracer.newChildSpan("redis.command.1", span)
+ span2 := tracer.newChildSpan("redis.command.2", span)
+ span.Finish()
+ span1.Finish()
+ span2.Finish()
+
+ tracer.forceFlush()
+ traces := transport.Traces()
+ assert.Len(traces, 0, "nothing should be flushed now as span2 is not finished yet")
+
+ root.Finish()
+
+ tracer.forceFlush()
+ traces = transport.Traces()
+ assert.Len(traces, 1)
+ assert.Len(traces[0], 4, "all spans should show up at once")
+}
+
+// TestTracerTraceMaxSize tests a bug that was encountered in environments
+// creating a large volume of spans that reached the trace cap value (traceMaxSize).
+// The bug was that once the cap is reached, no more spans are pushed onto
+// the buffer, yet they are part of the same trace. The trace is considered
+// completed and flushed when the number of finished spans == number of spans
+// in buffer. When reaching the cap, this condition might become true too
+// early, and some spans in the buffer might still not be finished when flushing.
+// Changing these spans at the moment of flush would (and did) cause a race
+// condition.
+func TestTracerTraceMaxSize(t *testing.T) {
+ _, _, stop := startTestTracer()
+ defer stop()
+
+ otss, otms := traceStartSize, traceMaxSize
+ traceStartSize, traceMaxSize = 3, 3
+ defer func() {
+ traceStartSize, traceMaxSize = otss, otms
+ }()
+
+ spans := make([]ddtrace.Span, 5)
+ spans[0] = StartSpan("span0")
+ spans[1] = StartSpan("span1", ChildOf(spans[0].Context()))
+ spans[2] = StartSpan("span2", ChildOf(spans[0].Context()))
+ spans[3] = StartSpan("span3", ChildOf(spans[0].Context()))
+ spans[4] = StartSpan("span4", ChildOf(spans[0].Context()))
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for i := 0; i < 5000; i++ {
+ spans[1].SetTag(strconv.Itoa(i), 1)
+ spans[2].SetTag(strconv.Itoa(i), 1)
+ }
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ spans[0].Finish()
+ spans[3].Finish()
+ spans[4].Finish()
+ }()
+
+ wg.Wait()
+}
+
+func TestTracerRace(t *testing.T) {
+ assert := assert.New(t)
+
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ total := payloadQueueSize / 3
+ var wg sync.WaitGroup
+ wg.Add(total)
+
+ // Trying to be quite brutal here, firing lots of concurrent things, finishing in
+ // different orders, and modifying spans after creation.
+ for n := 0; n < total; n++ {
+ i := n // keep local copy
+ odd := ((i % 2) != 0)
+ go func() {
+ if i%11 == 0 {
+ time.Sleep(time.Microsecond)
+ }
+
+ parent := tracer.newRootSpan("pylons.request", "pylons", "/")
+
+ tracer.newChildSpan("redis.command", parent).Finish()
+ child := tracer.newChildSpan("async.service", parent)
+
+ if i%13 == 0 {
+ time.Sleep(time.Microsecond)
+ }
+
+ if odd {
+ parent.SetTag("odd", "true")
+ parent.SetTag("oddity", 1)
+ parent.Finish()
+ } else {
+ child.SetTag("odd", "false")
+ child.SetTag("oddity", 0)
+ child.Finish()
+ }
+
+ if i%17 == 0 {
+ time.Sleep(time.Microsecond)
+ }
+
+ if odd {
+ child.Resource = "HGETALL"
+ child.SetTag("odd", "false")
+ child.SetTag("oddity", 0)
+ } else {
+ parent.Resource = "/" + strconv.Itoa(i) + ".html"
+ parent.SetTag("odd", "true")
+ parent.SetTag("oddity", 1)
+ }
+
+ if i%19 == 0 {
+ time.Sleep(time.Microsecond)
+ }
+
+ if odd {
+ child.Finish()
+ } else {
+ parent.Finish()
+ }
+
+ wg.Done()
+ }()
+ }
+
+ wg.Wait()
+
+ tracer.forceFlush()
+ traces := transport.Traces()
+ assert.Len(traces, total, "we should have exactly as many traces as expected")
+ for _, trace := range traces {
+ assert.Len(trace, 3, "each trace should have exactly 3 spans")
+ var parent, child, redis *span
+ for _, span := range trace {
+ switch span.Name {
+ case "pylons.request":
+ parent = span
+ case "async.service":
+ child = span
+ case "redis.command":
+ redis = span
+ default:
+ assert.Fail("unexpected span", span)
+ }
+ }
+ assert.NotNil(parent)
+ assert.NotNil(child)
+ assert.NotNil(redis)
+
+ assert.Equal(uint64(0), parent.ParentID)
+ assert.Equal(parent.TraceID, parent.SpanID)
+
+ assert.Equal(parent.TraceID, redis.TraceID)
+ assert.Equal(parent.TraceID, child.TraceID)
+
+ assert.Equal(parent.TraceID, redis.ParentID)
+ assert.Equal(parent.TraceID, child.ParentID)
+ }
+}
+
+// TestWorker is definitely a flaky test, as here we test that the worker
+// background task actually does flush things. Most other tests are and should
+// be using forceFlush() to make sure things are really sent to transport.
+// Here, we just wait until things show up, as we would do with a real program.
+func TestWorker(t *testing.T) {
+ if testing.Short() {
+ return
+ }
+ assert := assert.New(t)
+
+ tracer, transport, stop := startTestTracer()
+ defer stop()
+
+ n := payloadQueueSize * 10 // put more traces than the chan size, on purpose
+ for i := 0; i < n; i++ {
+ root := tracer.newRootSpan("pylons.request", "pylons", "/")
+ child := tracer.newChildSpan("redis.command", root)
+ child.Finish()
+ root.Finish()
+ }
+
+ now := time.Now()
+ count := 0
+ for time.Now().Before(now.Add(time.Minute)) && count < payloadQueueSize {
+ nbTraces := len(transport.Traces())
+ if nbTraces > 0 {
+ t.Logf("popped %d traces", nbTraces)
+ }
+ count += nbTraces
+ time.Sleep(time.Millisecond)
+ }
+ // here we just check that we have "enough traces". In practice, lots of them
+ // are dropped, it's another interesting side-effect of this test: it does
+ // trigger error messages (which are repeated, so it aggregates them etc.)
+ if count < payloadQueueSize {
+ assert.Fail(fmt.Sprintf("timeout, not enough traces in buffer (%d/%d)", count, n))
+ }
+}
+
+func newTracerChannels() *tracer {
+ return &tracer{
+ payload: newPayload(),
+ payloadQueue: make(chan []*span, payloadQueueSize),
+ errorBuffer: make(chan error, errorBufferSize),
+ flushTracesReq: make(chan struct{}, 1),
+ flushErrorsReq: make(chan struct{}, 1),
+ }
+}
+
+func TestPushPayload(t *testing.T) {
+ tracer := newTracerChannels()
+ s := newBasicSpan("3MB")
+ s.Meta["key"] = strings.Repeat("X", payloadSizeLimit/2+10)
+
+ // half payload size reached, we have 1 item, no flush request
+ tracer.pushPayload([]*span{s})
+ assert.Equal(t, tracer.payload.itemCount(), 1)
+ assert.Len(t, tracer.flushTracesReq, 0)
+
+ // payload size exceeded, we have 2 items and a flush request
+ tracer.pushPayload([]*span{s})
+ assert.Equal(t, tracer.payload.itemCount(), 2)
+ assert.Len(t, tracer.flushTracesReq, 1)
+}
+
+func TestPushTrace(t *testing.T) {
+ assert := assert.New(t)
+
+ tracer := newTracerChannels()
+ trace := []*span{
+ &span{
+ Name: "pylons.request",
+ Service: "pylons",
+ Resource: "/",
+ },
+ &span{
+ Name: "pylons.request",
+ Service: "pylons",
+ Resource: "/foo",
+ },
+ }
+ tracer.pushTrace(trace)
+
+ assert.Len(tracer.payloadQueue, 1)
+ assert.Len(tracer.flushTracesReq, 0, "no flush requested yet")
+
+ t0 := <-tracer.payloadQueue
+ assert.Equal(trace, t0)
+
+ many := payloadQueueSize + 2
+ for i := 0; i < many; i++ {
+ tracer.pushTrace(make([]*span, i))
+ }
+ assert.Len(tracer.payloadQueue, payloadQueueSize)
+ assert.Len(tracer.errorBuffer, 2)
+}
+
+func TestPushErr(t *testing.T) {
+ assert := assert.New(t)
+
+ tracer := newTracerChannels()
+
+ err := fmt.Errorf("ooops")
+ tracer.pushError(err)
+
+ assert.Len(tracer.errorBuffer, 1, "there should be data in channel")
+ assert.Len(tracer.flushErrorsReq, 0, "no flush requested yet")
+
+ pushed := <-tracer.errorBuffer
+ assert.Equal(err, pushed)
+
+ many := errorBufferSize/2 + 1
+ for i := 0; i < many; i++ {
+ tracer.pushError(fmt.Errorf("err %d", i))
+ }
+ assert.Len(tracer.errorBuffer, many, "all errs should be in the channel, not yet blocking")
+ assert.Len(tracer.flushErrorsReq, 1, "a err flush should have been requested")
+ for i := 0; i < cap(tracer.errorBuffer); i++ {
+ tracer.pushError(fmt.Errorf("err %d", i))
+ }
+ // if we reach this, means pushError is not blocking, which is what we want to double-check
+}
+
+// BenchmarkConcurrentTracing tests the performance of spawning a lot of
+// goroutines where each one creates a trace with a parent and a child.
+func BenchmarkConcurrentTracing(b *testing.B) {
+ tracer, _, stop := startTestTracer(WithSampler(NewRateSampler(0)))
+ defer stop()
+
+ b.ResetTimer()
+ for n := 0; n < b.N; n++ {
+ go func() {
+ parent := tracer.StartSpan("pylons.request", ServiceName("pylons"), ResourceName("/"))
+ defer parent.Finish()
+
+ for i := 0; i < 10; i++ {
+ tracer.StartSpan("redis.command", ChildOf(parent.Context())).Finish()
+ }
+ }()
+ }
+}
+
+// BenchmarkTracerAddSpans tests the performance of creating and finishing a root
+// span. It should include the encoding overhead.
+func BenchmarkTracerAddSpans(b *testing.B) {
+ tracer, _, stop := startTestTracer(WithSampler(NewRateSampler(0)))
+ defer stop()
+
+ for n := 0; n < b.N; n++ {
+ span := tracer.StartSpan("pylons.request", ServiceName("pylons"), ResourceName("/"))
+ span.Finish()
+ }
+}
+
+// startTestTracer returns a Tracer with a DummyTransport
+func startTestTracer(opts ...StartOption) (*tracer, *dummyTransport, func()) {
+ transport := newDummyTransport()
+ o := append([]StartOption{withTransport(transport)}, opts...)
+ tracer := newTracer(o...)
+ tracer.syncPush = make(chan struct{})
+ internal.SetGlobalTracer(tracer)
+ return tracer, transport, func() {
+ internal.SetGlobalTracer(&internal.NoopTracer{})
+ tracer.Stop()
+ }
+}
+
+// Mock Transport with a real Encoder
+type dummyTransport struct {
+ sync.RWMutex
+ traces spanLists
+}
+
+func newDummyTransport() *dummyTransport {
+ return &dummyTransport{traces: spanLists{}}
+}
+
+func (t *dummyTransport) send(p *payload) error {
+ traces, err := decode(p)
+ if err != nil {
+ return err
+ }
+ t.Lock()
+ t.traces = append(t.traces, traces...)
+ t.Unlock()
+ return nil
+}
+
+func decode(p *payload) (spanLists, error) {
+ var traces spanLists
+ err := msgp.Decode(p, &traces)
+ return traces, err
+}
+
+func encode(traces [][]*span) (*payload, error) {
+ p := newPayload()
+ for _, t := range traces {
+ if err := p.push(t); err != nil {
+ return p, err
+ }
+ }
+ return p, nil
+}
+
+func (t *dummyTransport) Traces() spanLists {
+ t.Lock()
+ defer t.Unlock()
+
+ traces := t.traces
+ t.traces = spanLists{}
+ return traces
+}
+
+// comparePayloadSpans allows comparing two spans which might have been
+// read from the msgpack payload. In that case the private fields will
+// not be available and the maps (meta & metrics will be nil for lengths
+// of 0). This function covers for those cases and correctly compares.
+func comparePayloadSpans(t *testing.T, a, b *span) {
+ assert.Equal(t, cpspan(a), cpspan(b))
+}
+
+func cpspan(s *span) *span {
+ if len(s.Metrics) == 0 {
+ s.Metrics = nil
+ }
+ if len(s.Meta) == 0 {
+ s.Meta = nil
+ }
+ return &span{
+ Name: s.Name,
+ Service: s.Service,
+ Resource: s.Resource,
+ Type: s.Type,
+ Start: s.Start,
+ Duration: s.Duration,
+ Meta: s.Meta,
+ Metrics: s.Metrics,
+ SpanID: s.SpanID,
+ TraceID: s.TraceID,
+ ParentID: s.ParentID,
+ Error: s.Error,
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go
new file mode 100644
index 00000000..6bbceb69
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport.go
@@ -0,0 +1,129 @@
+package tracer
+
+import (
+ "fmt"
+ "net"
+ "net/http"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var tracerVersion = "v1.2"
+
+const (
+ defaultHostname = "localhost"
+ defaultPort = "8126"
+ defaultAddress = defaultHostname + ":" + defaultPort
+ defaultHTTPTimeout = time.Second // defines the current timeout before giving up with the send process
+ traceCountHeader = "X-Datadog-Trace-Count" // header containing the number of traces in the payload
+)
+
+// Transport is an interface for span submission to the agent.
+type transport interface {
+ send(p *payload) error
+}
+
+// newTransport returns a new Transport implementation that sends traces to a
+// trace agent running on the given hostname and port. If the zero values for
+// hostname and port are provided, the default values will be used ("localhost"
+// for hostname, and "8126" for port).
+//
+// In general, using this method is only necessary if you have a trace agent
+// running on a non-default port or if it's located on another machine.
+func newTransport(addr string) transport {
+ return newHTTPTransport(addr)
+}
+
+// newDefaultTransport return a default transport for this tracing client
+func newDefaultTransport() transport {
+ return newHTTPTransport(defaultAddress)
+}
+
+type httpTransport struct {
+ traceURL string // the delivery URL for traces
+ client *http.Client // the HTTP client used in the POST
+ headers map[string]string // the Transport headers
+}
+
+// newHTTPTransport returns an httpTransport for the given endpoint
+func newHTTPTransport(addr string) *httpTransport {
+ // initialize the default EncoderPool with Encoder headers
+ defaultHeaders := map[string]string{
+ "Datadog-Meta-Lang": "go",
+ "Datadog-Meta-Lang-Version": strings.TrimPrefix(runtime.Version(), "go"),
+ "Datadog-Meta-Lang-Interpreter": runtime.Compiler + "-" + runtime.GOARCH + "-" + runtime.GOOS,
+ "Datadog-Meta-Tracer-Version": tracerVersion,
+ "Content-Type": "application/msgpack",
+ }
+ return &httpTransport{
+ traceURL: fmt.Sprintf("http://%s/v0.3/traces", resolveAddr(addr)),
+ client: &http.Client{
+ // We copy the transport to avoid using the default one, as it might be
+ // augmented with tracing and we don't want these calls to be recorded.
+ // See https://golang.org/pkg/net/http/#DefaultTransport .
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ },
+ Timeout: defaultHTTPTimeout,
+ },
+ headers: defaultHeaders,
+ }
+}
+
+func (t *httpTransport) send(p *payload) error {
+ // prepare the client and send the payload
+ req, err := http.NewRequest("POST", t.traceURL, p)
+ if err != nil {
+ return fmt.Errorf("cannot create http request: %v", err)
+ }
+ for header, value := range t.headers {
+ req.Header.Set(header, value)
+ }
+ req.Header.Set(traceCountHeader, strconv.Itoa(p.itemCount()))
+ req.Header.Set("Content-Length", strconv.Itoa(p.size()))
+ response, err := t.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer response.Body.Close()
+ if code := response.StatusCode; code >= 400 {
+ // error, check the body for context information and
+ // return a nice error.
+ msg := make([]byte, 1000)
+ n, _ := response.Body.Read(msg)
+ txt := http.StatusText(code)
+ if n > 0 {
+ return fmt.Errorf("%s (Status: %s)", msg[:n], txt)
+ }
+ return fmt.Errorf("%s", txt)
+ }
+ return nil
+}
+
+// resolveAddr resolves the given agent address and fills in any missing host
+// and port using the defaults.
+func resolveAddr(addr string) string {
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ // no port in addr
+ host = addr
+ }
+ if host == "" {
+ host = defaultHostname
+ }
+ if port == "" {
+ port = defaultPort
+ }
+ return fmt.Sprintf("%s:%s", host, port)
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport_test.go
new file mode 100644
index 00000000..5cbb1dc7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/transport_test.go
@@ -0,0 +1,163 @@
+package tracer
+
+import (
+ "fmt"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// integration indicates if the test suite should run integration tests.
+var integration bool
+
+func TestMain(m *testing.M) {
+ _, integration = os.LookupEnv("INTEGRATION")
+ os.Exit(m.Run())
+}
+
+// getTestSpan returns a Span with different fields set
+func getTestSpan() *span {
+ return &span{
+ TraceID: 42,
+ SpanID: 52,
+ ParentID: 42,
+ Type: "web",
+ Service: "high.throughput",
+ Name: "sending.events",
+ Resource: "SEND /data",
+ Start: 1481215590883401105,
+ Duration: 1000000000,
+ Meta: map[string]string{"http.host": "192.168.0.1"},
+ Metrics: map[string]float64{"http.monitor": 41.99},
+ }
+}
+
+// getTestTrace returns a list of traces that is composed by ``traceN`` number
+// of traces, each one composed by ``size`` number of spans.
+func getTestTrace(traceN, size int) [][]*span {
+ var traces [][]*span
+
+ for i := 0; i < traceN; i++ {
+ trace := []*span{}
+ for j := 0; j < size; j++ {
+ trace = append(trace, getTestSpan())
+ }
+ traces = append(traces, trace)
+ }
+ return traces
+}
+
+type mockDatadogAPIHandler struct {
+ t *testing.T
+}
+
+func (m mockDatadogAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ assert := assert.New(m.t)
+
+ header := r.Header.Get("X-Datadog-Trace-Count")
+ assert.NotEqual("", header, "X-Datadog-Trace-Count header should be here")
+ count, err := strconv.Atoi(header)
+ assert.Nil(err, "header should be an int")
+ assert.NotEqual(0, count, "there should be a non-zero amount of traces")
+}
+
+func mockDatadogAPINewServer(t *testing.T) *httptest.Server {
+ handler := mockDatadogAPIHandler{t: t}
+ server := httptest.NewServer(handler)
+ return server
+}
+
+func TestTracesAgentIntegration(t *testing.T) {
+ if !integration {
+ t.Skip("to enable integration test, set the INTEGRATION environment variable")
+ }
+ assert := assert.New(t)
+
+ testCases := []struct {
+ payload [][]*span
+ }{
+ {getTestTrace(1, 1)},
+ {getTestTrace(10, 1)},
+ {getTestTrace(1, 10)},
+ {getTestTrace(10, 10)},
+ }
+
+ for _, tc := range testCases {
+ transport := newHTTPTransport(defaultAddress)
+ p, err := encode(tc.payload)
+ assert.NoError(err)
+ err = transport.send(p)
+ assert.NoError(err)
+ }
+}
+
+func TestResolveAddr(t *testing.T) {
+ for _, tt := range []struct {
+ in, out string
+ }{
+ {"host", fmt.Sprintf("host:%s", defaultPort)},
+ {"www.my-address.com", fmt.Sprintf("www.my-address.com:%s", defaultPort)},
+ {"localhost", fmt.Sprintf("localhost:%s", defaultPort)},
+ {":1111", fmt.Sprintf("%s:1111", defaultHostname)},
+ {"", defaultAddress},
+ {"custom:1234", "custom:1234"},
+ } {
+ t.Run(tt.in, func(t *testing.T) {
+ assert.Equal(t, resolveAddr(tt.in), tt.out)
+ })
+ }
+}
+
+func TestTransportResponseError(t *testing.T) {
+ assert := assert.New(t)
+ ln, err := net.Listen("tcp4", ":0")
+ assert.Nil(err)
+ go http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusBadRequest)
+ w.Write([]byte(strings.Repeat("X", 1002)))
+ }))
+ defer ln.Close()
+ addr := ln.Addr().String()
+ log.Println(addr)
+ transport := newHTTPTransport(addr)
+ err = transport.send(newPayload())
+ want := fmt.Sprintf("%s (Status: Bad Request)", strings.Repeat("X", 1000))
+ assert.Equal(want, err.Error())
+}
+
+func TestTraceCountHeader(t *testing.T) {
+ assert := assert.New(t)
+
+ testCases := []struct {
+ payload [][]*span
+ }{
+ {getTestTrace(1, 1)},
+ {getTestTrace(10, 1)},
+ {getTestTrace(100, 10)},
+ }
+
+ receiver := mockDatadogAPINewServer(t)
+ parsedURL, err := url.Parse(receiver.URL)
+ assert.NoError(err)
+ host := parsedURL.Host
+ _, port, err := net.SplitHostPort(host)
+ assert.Nil(err)
+ assert.NotEmpty(port, "port should be given, as it's chosen randomly")
+ for _, tc := range testCases {
+ transport := newHTTPTransport(host)
+ p, err := encode(tc.payload)
+ assert.NoError(err)
+ err = transport.send(p)
+ assert.NoError(err)
+ }
+
+ receiver.Close()
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go
new file mode 100644
index 00000000..58f4eae7
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util.go
@@ -0,0 +1,32 @@
+package tracer
+
+// toFloat64 attempts to convert value into a float64. If it succeeds it returns
+// the value and true, otherwise 0 and false.
+func toFloat64(value interface{}) (f float64, ok bool) {
+ switch i := value.(type) {
+ case byte:
+ return float64(i), true
+ case float32:
+ return float64(i), true
+ case float64:
+ return i, true
+ case int:
+ return float64(i), true
+ case int16:
+ return float64(i), true
+ case int32:
+ return float64(i), true
+ case int64:
+ return float64(i), true
+ case uint:
+ return float64(i), true
+ case uint16:
+ return float64(i), true
+ case uint32:
+ return float64(i), true
+ case uint64:
+ return float64(i), true
+ default:
+ return 0, false
+ }
+}
diff --git a/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util_test.go b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util_test.go
new file mode 100644
index 00000000..38c0fba3
--- /dev/null
+++ b/vendor/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer/util_test.go
@@ -0,0 +1,38 @@
+package tracer
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestToFloat64(t *testing.T) {
+ for i, tt := range [...]struct {
+ value interface{}
+ f float64
+ ok bool
+ }{
+ 0: {1, 1, true},
+ 1: {byte(1), 1, true},
+ 2: {int(1), 1, true},
+ 3: {int16(1), 1, true},
+ 4: {int32(1), 1, true},
+ 5: {int64(1), 1, true},
+ 6: {uint(1), 1, true},
+ 7: {uint16(1), 1, true},
+ 8: {uint32(1), 1, true},
+ 9: {uint64(1), 1, true},
+ 10: {"a", 0, false},
+ 11: {float32(1.25), 1.25, true},
+ 12: {float64(1.25), 1.25, true},
+ } {
+ t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
+ f, ok := toFloat64(tt.value)
+ if ok != tt.ok {
+ t.Fatalf("expected ok: %t", tt.ok)
+ }
+ if f != tt.f {
+ t.Fatalf("expected: %#v, got: %#v", tt.f, f)
+ }
+ })
+ }
+}