Add Vat init transformer
This commit is contained in:
parent
9abe3ffa68
commit
ff110592bd
30
Gopkg.lock
generated
30
Gopkg.lock
generated
@ -225,6 +225,12 @@
|
||||
revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/philhofer/fwd"
|
||||
packages = ["."]
|
||||
revision = "bb6d471dc95d4fe11e432687f8b70ff496cf3136"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/rjeczalik/notify"
|
||||
packages = ["."]
|
||||
@ -295,6 +301,12 @@
|
||||
]
|
||||
revision = "adf24ef3f94bd13ec4163060b21a5678f22b429b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/tinylib/msgp"
|
||||
packages = ["msgp"]
|
||||
revision = "b2b6a672cf1e5b90748f79b8b81fc8c5cf0571a1"
|
||||
version = "1.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
@ -326,7 +338,10 @@
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "a0f4589a76f1f83070cb9e5613809e1d07b97c13"
|
||||
|
||||
[[projects]]
|
||||
@ -366,6 +381,17 @@
|
||||
]
|
||||
revision = "8cc4e8a6f4841aa92a8683fca47bc5d64b58875b"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/DataDog/dd-trace-go.v1"
|
||||
packages = [
|
||||
"ddtrace",
|
||||
"ddtrace/ext",
|
||||
"ddtrace/internal",
|
||||
"ddtrace/tracer"
|
||||
]
|
||||
revision = "8efc9a798f2db99a9e00c7e57f45fc13611214e0"
|
||||
version = "v1.2.3"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/fatih/set.v0"
|
||||
packages = ["."]
|
||||
@ -393,6 +419,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "2122d1d04fbc67daf75d751e144926e73f0fd838d51711cccf9604eabb46b95b"
|
||||
inputs-digest = "7a913c984013e026536456baa75bd95e261bbb0d294b7de77785819ac182b465"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
1
db/migrations/1536267596_create_vat_init_table.down.sql
Normal file
1
db/migrations/1536267596_create_vat_init_table.down.sql
Normal file
@ -0,0 +1 @@
|
||||
DROP TABLE maker.vat_init;
|
8
db/migrations/1536267596_create_vat_init_table.up.sql
Normal file
8
db/migrations/1536267596_create_vat_init_table.up.sql
Normal file
@ -0,0 +1,8 @@
|
||||
CREATE TABLE maker.vat_init (
|
||||
id SERIAL PRIMARY KEY,
|
||||
header_id INTEGER NOT NULL REFERENCES headers (id) ON DELETE CASCADE,
|
||||
ilk TEXT,
|
||||
tx_idx INTEGER NOT NUll,
|
||||
raw_log JSONB,
|
||||
UNIQUE (header_id, tx_idx)
|
||||
);
|
@ -367,6 +367,39 @@ CREATE SEQUENCE maker.tend_db_id_seq
|
||||
ALTER SEQUENCE maker.tend_db_id_seq OWNED BY maker.tend.db_id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init; Type: TABLE; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
CREATE TABLE maker.vat_init (
|
||||
id integer NOT NULL,
|
||||
header_id integer NOT NULL,
|
||||
ilk text,
|
||||
tx_idx integer NOT NULL,
|
||||
raw_log jsonb
|
||||
);
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init_id_seq; Type: SEQUENCE; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
CREATE SEQUENCE maker.vat_init_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init_id_seq; Type: SEQUENCE OWNED BY; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
ALTER SEQUENCE maker.vat_init_id_seq OWNED BY maker.vat_init.id;
|
||||
|
||||
|
||||
--
|
||||
-- Name: logs; Type: TABLE; Schema: public; Owner: -
|
||||
--
|
||||
@ -802,6 +835,13 @@ ALTER TABLE ONLY maker.price_feeds ALTER COLUMN id SET DEFAULT nextval('maker.pr
|
||||
ALTER TABLE ONLY maker.tend ALTER COLUMN db_id SET DEFAULT nextval('maker.tend_db_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init id; Type: DEFAULT; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY maker.vat_init ALTER COLUMN id SET DEFAULT nextval('maker.vat_init_id_seq'::regclass);
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks id; Type: DEFAULT; Schema: public; Owner: -
|
||||
--
|
||||
@ -1009,6 +1049,22 @@ ALTER TABLE ONLY maker.tend
|
||||
ADD CONSTRAINT tend_pkey PRIMARY KEY (db_id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init vat_init_header_id_tx_idx_key; Type: CONSTRAINT; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY maker.vat_init
|
||||
ADD CONSTRAINT vat_init_header_id_tx_idx_key UNIQUE (header_id, tx_idx);
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init vat_init_pkey; Type: CONSTRAINT; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY maker.vat_init
|
||||
ADD CONSTRAINT vat_init_pkey PRIMARY KEY (id);
|
||||
|
||||
|
||||
--
|
||||
-- Name: blocks blocks_pkey; Type: CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
@ -1212,6 +1268,14 @@ ALTER TABLE ONLY maker.tend
|
||||
ADD CONSTRAINT tend_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: vat_init vat_init_header_id_fkey; Type: FK CONSTRAINT; Schema: maker; Owner: -
|
||||
--
|
||||
|
||||
ALTER TABLE ONLY maker.vat_init
|
||||
ADD CONSTRAINT vat_init_header_id_fkey FOREIGN KEY (header_id) REFERENCES public.headers(id) ON DELETE CASCADE;
|
||||
|
||||
|
||||
--
|
||||
-- Name: transactions blocks_fk; Type: FK CONSTRAINT; Schema: public; Owner: -
|
||||
--
|
||||
|
2
main.go
2
main.go
@ -3,8 +3,8 @@ package main
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/cmd"
|
||||
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
"github.com/spf13/viper"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -28,8 +28,7 @@ type Converter interface {
|
||||
ToModel(flipKick FrobEntity) (FrobModel, error)
|
||||
}
|
||||
|
||||
type FrobConverter struct {
|
||||
}
|
||||
type FrobConverter struct{}
|
||||
|
||||
func (FrobConverter) ToEntity(contractAddress string, contractAbi string, ethLog types.Log) (FrobEntity, error) {
|
||||
entity := FrobEntity{}
|
||||
|
@ -16,10 +16,6 @@ var _ = Describe("", func() {
|
||||
model, err := converter.ToModel(test_data.PitAddress, shared.PitABI, test_data.EthPitFileDebtCeilingLog)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(model.What).To(Equal(test_data.PitFileDebtCeilingModel.What))
|
||||
Expect(model.Data).To(Equal(test_data.PitFileDebtCeilingModel.Data))
|
||||
Expect(model.TransactionIndex).To(Equal(test_data.PitFileDebtCeilingModel.TransactionIndex))
|
||||
Expect(model.Raw).To(Equal(test_data.PitFileDebtCeilingModel.Raw))
|
||||
Expect(model).To(Equal(test_data.PitFileDebtCeilingModel))
|
||||
})
|
||||
})
|
||||
|
@ -19,15 +19,18 @@ var (
|
||||
dentMethod = "dent(uint256,uint256,uint256)"
|
||||
flipKickMethod = "Kick(uint256,uint256,uint256,address,uint48,bytes32,uint256)"
|
||||
frobMethod = "Frob(bytes32,bytes32,uint256,uint256,int256,int256,uint256)"
|
||||
logValueMethod = "LogValue(bytes32)"
|
||||
pitFileDebtCeilingMethod = "file(bytes32,uint256)"
|
||||
pitFileIlkMethod = "file(bytes32,bytes32,uint256)"
|
||||
pitFileStabilityFeeMethod = "file(bytes32,address)"
|
||||
tendMethod = "tend(uint256,uint256,uint256)"
|
||||
vatInitMethod = "init(bytes32)"
|
||||
|
||||
CatABI = "[{\"constant\":true,\"inputs\":[],\"name\":\"vat\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"vow\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"flips\",\"outputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"lad\",\"type\":\"bytes32\"},{\"name\":\"ink\",\"type\":\"uint256\"},{\"name\":\"tab\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"nflip\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"live\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"address\"}],\"name\":\"wards\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[{\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"ilks\",\"outputs\":[{\"name\":\"flip\",\"type\":\"address\"},{\"name\":\"chop\",\"type\":\"uint256\"},{\"name\":\"lump\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"constant\":true,\"inputs\":[],\"name\":\"pit\",\"outputs\":[{\"name\":\"\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"name\":\"vat_\",\"type\":\"address\"},{\"name\":\"pit_\",\"type\":\"address\"},{\"name\":\"vow_\",\"type\":\"address\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"name\":\"ilk\",\"type\":\"bytes32\"},{\"indexed\":true,\"name\":\"lad\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"ink\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"art\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"tab\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"flip\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"iArt\",\"type\":\"uint256\"}],\"name\":\"Bite\",\"type\":\"event\"},{\"anonymous\":true,\"inputs\":[{\"indexed\":true,\"name\":\"sig\",\"type\":\"bytes4\"},{\"indexed\":true,\"name\":\"guy\",\"type\":\"address\"},{\"indexed\":true,\"name\":\"foo\",\"type\":\"bytes32\"},{\"indexed\":true,\"name\":\"bar\",\"type\":\"bytes32\"},{\"indexed\":false,\"name\":\"wad\",\"type\":\"uint256\"},{\"indexed\":false,\"name\":\"fax\",\"type\":\"bytes\"}],\"name\":\"LogNote\",\"type\":\"event\"},{\"constant\":false,\"inputs\":[{\"name\":\"guy\",\"type\":\"address\"}],\"name\":\"rely\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"guy\",\"type\":\"address\"}],\"name\":\"deny\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"what\",\"type\":\"bytes32\"},{\"name\":\"data\",\"type\":\"uint256\"}],\"name\":\"file\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"what\",\"type\":\"bytes32\"},{\"name\":\"flip\",\"type\":\"address\"}],\"name\":\"file\",\"outputs\":[],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"ilk\",\"type\":\"bytes32\"},{\"name\":\"lad\",\"type\":\"bytes32\"}],\"name\":\"bite\",\"outputs\":[{\"name\":\"\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"constant\":false,\"inputs\":[{\"name\":\"n\",\"type\":\"uint256\"},{\"name\":\"wad\",\"type\":\"uint256\"}],\"name\":\"flip\",\"outputs\":[{\"name\":\"id\",\"type\":\"uint256\"}],\"payable\":false,\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]"
|
||||
FlipperABI = `[{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"bids","outputs":[{"name":"bid","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"guy","type":"address"},{"name":"tic","type":"uint48"},{"name":"end","type":"uint48"},{"name":"urn","type":"bytes32"},{"name":"gal","type":"address"},{"name":"tab","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x4423c5f1"},{"constant":true,"inputs":[],"name":"ttl","outputs":[{"name":"","type":"uint48"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x4e8b1dd5"},{"constant":true,"inputs":[],"name":"gem","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x7bd2bea7"},{"constant":true,"inputs":[],"name":"beg","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x7d780d82"},{"constant":true,"inputs":[],"name":"tau","outputs":[{"name":"","type":"uint48"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xcfc4af55"},{"constant":true,"inputs":[],"name":"kicks","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xcfdd3302"},{"constant":true,"inputs":[],"name":"dai","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xf4b9fa75"},{"inputs":[{"name":"dai_","type":"address"},{"name":"gem_","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor","signature":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"id","type":"uint256"},{"indexed":false,"name":"lot","type":"uint256"},{"indexed":false,"name":"bid","type":"uint256"},{"indexed":false,"name":"gal","type":"address"},{"indexed":false,"name":"end","type":"uint48"},{"indexed":true,"name":"urn","type":"bytes32"},{"indexed":false,"name":"tab","type":"uint256"}],"name":"Kick","type":"event","signature":"0xbac86238bdba81d21995024470425ecb370078fa62b7271b90cf28cbd1e3e87e"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event","signature":"0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31"},{"constant":true,"inputs":[],"name":"era","outputs":[{"name":"","type":"uint48"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x143e55e0"},{"constant":false,"inputs":[{"name":"urn","type":"bytes32"},{"name":"gal","type":"address"},{"name":"tab","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"bid","type":"uint256"}],"name":"kick","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xeae19d9e"},{"constant":false,"inputs":[{"name":"id","type":"uint256"}],"name":"tick","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xfc7b6aee"},{"constant":false,"inputs":[{"name":"id","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"bid","type":"uint256"}],"name":"tend","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x4b43ed12"},{"constant":false,"inputs":[{"name":"id","type":"uint256"},{"name":"lot","type":"uint256"},{"name":"bid","type":"uint256"}],"name":"dent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x5ff3a382"},{"constant":false,"inputs":[{"name":"id","type":"uint256"}],"name":"deal","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xc959c42b"}]`
|
||||
MedianizerABI = `[{"constant":false,"inputs":[{"name":"owner_","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"","type":"bytes32"}],"name":"poke","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[],"name":"poke","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"compute","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"wat","type":"address"}],"name":"set","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"wat","type":"address"}],"name":"unset","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"indexes","outputs":[{"name":"","type":"bytes12"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"next","outputs":[{"name":"","type":"bytes12"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"read","outputs":[{"name":"","type":"bytes32"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"peek","outputs":[{"name":"","type":"bytes32"},{"name":"","type":"bool"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes12"}],"name":"values","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"min_","type":"uint96"}],"name":"setMin","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"authority_","type":"address"}],"name":"setAuthority","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[],"name":"void","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"pos","type":"bytes12"},{"name":"wat","type":"address"}],"name":"set","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"authority","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"pos","type":"bytes12"}],"name":"unset","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"next_","type":"bytes12"}],"name":"setNext","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"min","outputs":[{"name":"","type":"uint96"}],"payable":false,"stateMutability":"view","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"val","type":"bytes32"}],"name":"LogValue","type":"event"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"authority","type":"address"}],"name":"LogSetAuthority","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"owner","type":"address"}],"name":"LogSetOwner","type":"event"}]]`
|
||||
PitABI = `[{"constant":true,"inputs":[],"name":"vat","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x36569e77"},{"constant":true,"inputs":[],"name":"live","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x957aa58c"},{"constant":true,"inputs":[],"name":"drip","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x9f678cca"},{"constant":true,"inputs":[],"name":"Line","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xbabe8a3f"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"wards","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xbf353dbb"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"ilks","outputs":[{"name":"spot","type":"uint256"},{"name":"line","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xd9638d36"},{"inputs":[{"name":"vat_","type":"address"}],"payable":false,"stateMutability":"nonpayable","type":"constructor","signature":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"ilk","type":"bytes32"},{"indexed":true,"name":"urn","type":"bytes32"},{"indexed":false,"name":"ink","type":"uint256"},{"indexed":false,"name":"art","type":"uint256"},{"indexed":false,"name":"dink","type":"int256"},{"indexed":false,"name":"dart","type":"int256"},{"indexed":false,"name":"iArt","type":"uint256"}],"name":"Frob","type":"event","signature":"0xb2afa28318bcc689926b52835d844de174ef8de97e982a85c0199d584920791b"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"guy","type":"address"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":false,"name":"wad","type":"uint256"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"LogNote","type":"event","signature":"0x644843f351d3fba4abcd60109eaff9f54bac8fb8ccf0bab941009c21df21cf31"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"rely","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x65fae35e"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"deny","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x9c52a7f1"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"what","type":"bytes32"},{"name":"data","type":"uint256"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x1a0b287e"},{"constant":false,"inputs":[{"name":"what","type":"bytes32"},{"name":"data","type":"uint256"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x29ae8114"},{"constant":false,"inputs":[{"name":"what","type":"bytes32"},{"name":"data","type":"address"}],"name":"file","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xd4e8be83"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"frob","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x5a984ded"}]`
|
||||
VatABI = `[{"constant":true,"inputs":[],"name":"debt","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x0dca59c1"},{"constant":true,"inputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes32"}],"name":"urns","outputs":[{"name":"ink","type":"uint256"},{"name":"art","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x26e27482"},{"constant":true,"inputs":[],"name":"vice","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0x2d61a355"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"sin","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xa60f1d3e"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"wards","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xbf353dbb"},{"constant":true,"inputs":[{"name":"","type":"bytes32"},{"name":"","type":"bytes32"}],"name":"gem","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xc0912683"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"ilks","outputs":[{"name":"take","type":"uint256"},{"name":"rate","type":"uint256"},{"name":"Ink","type":"uint256"},{"name":"Art","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xd9638d36"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"dai","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function","signature":"0xf53e4e69"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor","signature":"constructor"},{"anonymous":true,"inputs":[{"indexed":true,"name":"sig","type":"bytes4"},{"indexed":true,"name":"foo","type":"bytes32"},{"indexed":true,"name":"bar","type":"bytes32"},{"indexed":true,"name":"too","type":"bytes32"},{"indexed":false,"name":"fax","type":"bytes"}],"name":"Note","type":"event","signature":"0x8c2dbbc2b33ffaa77c104b777e574a8a4ff79829dfee8b66f4dc63e3f8067152"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"rely","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x65fae35e"},{"constant":false,"inputs":[{"name":"guy","type":"address"}],"name":"deny","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x9c52a7f1"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"}],"name":"init","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x3b663195"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"guy","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"slip","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x42066cbb"},{"constant":false,"inputs":[{"name":"ilk","type":"bytes32"},{"name":"src","type":"bytes32"},{"name":"dst","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"flux","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xa6e41821"},{"constant":false,"inputs":[{"name":"src","type":"bytes32"},{"name":"dst","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"move","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x78f19470"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"v","type":"bytes32"},{"name":"w","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"tune","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x5dd6471a"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"v","type":"bytes32"},{"name":"w","type":"bytes32"},{"name":"dink","type":"int256"},{"name":"dart","type":"int256"}],"name":"grab","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x3690ae4c"},{"constant":false,"inputs":[{"name":"u","type":"bytes32"},{"name":"v","type":"bytes32"},{"name":"rad","type":"int256"}],"name":"heal","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x990a5f63"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"rate","type":"int256"}],"name":"fold","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0xe6a6a64d"},{"constant":false,"inputs":[{"name":"i","type":"bytes32"},{"name":"u","type":"bytes32"},{"name":"take","type":"int256"}],"name":"toll","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function","signature":"0x09b7a0b5"}]`
|
||||
|
||||
FlipperContractAddress = "0x6b59c42097e2fff7cad96cb08ceefd601081ad9c" //this is a temporary address deployed locally
|
||||
|
||||
@ -35,9 +38,10 @@ var (
|
||||
DentFunctionSignature = GetLogNoteSignature(dentMethod)
|
||||
FlipKickSignature = GetEventSignature(flipKickMethod)
|
||||
FrobSignature = GetEventSignature(frobMethod)
|
||||
LogValueSignature = GetEventSignature("")
|
||||
LogValueSignature = GetEventSignature(logValueMethod)
|
||||
PitFileDebtCeilingSignature = GetLogNoteSignature(pitFileDebtCeilingMethod)
|
||||
PitFileIlkSignature = GetLogNoteSignature(pitFileIlkMethod)
|
||||
PitFileStabilityFeeSignature = GetLogNoteSignature(pitFileStabilityFeeMethod)
|
||||
TendFunctionSignature = GetLogNoteSignature(tendMethod)
|
||||
VatInitSignature = GetLogNoteSignature(vatInitMethod)
|
||||
)
|
||||
|
21
pkg/transformers/test_data/mocks/vat_init/converter.go
Normal file
21
pkg/transformers/test_data/mocks/vat_init/converter.go
Normal file
@ -0,0 +1,21 @@
|
||||
package vat_init
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
)
|
||||
|
||||
type MockVatInitConverter struct {
|
||||
converterErr error
|
||||
PassedLog types.Log
|
||||
}
|
||||
|
||||
func (converter *MockVatInitConverter) ToModel(ethLog types.Log) (vat_init.VatInitModel, error) {
|
||||
converter.PassedLog = ethLog
|
||||
return test_data.VatInitModel, converter.converterErr
|
||||
}
|
||||
|
||||
func (converter *MockVatInitConverter) SetConverterError(e error) {
|
||||
converter.converterErr = e
|
||||
}
|
40
pkg/transformers/test_data/mocks/vat_init/repository.go
Normal file
40
pkg/transformers/test_data/mocks/vat_init/repository.go
Normal file
@ -0,0 +1,40 @@
|
||||
package vat_init
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
)
|
||||
|
||||
type MockVatInitRepository struct {
|
||||
createErr error
|
||||
missingHeaders []core.Header
|
||||
missingHeadersErr error
|
||||
PassedStartingBlockNumber int64
|
||||
PassedEndingBlockNumber int64
|
||||
PassedHeaderID int64
|
||||
PassedModel vat_init.VatInitModel
|
||||
}
|
||||
|
||||
func (repository *MockVatInitRepository) Create(headerID int64, model vat_init.VatInitModel) error {
|
||||
repository.PassedHeaderID = headerID
|
||||
repository.PassedModel = model
|
||||
return repository.createErr
|
||||
}
|
||||
|
||||
func (repository *MockVatInitRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
||||
repository.PassedStartingBlockNumber = startingBlockNumber
|
||||
repository.PassedEndingBlockNumber = endingBlockNumber
|
||||
return repository.missingHeaders, repository.missingHeadersErr
|
||||
}
|
||||
|
||||
func (repository *MockVatInitRepository) SetMissingHeadersErr(e error) {
|
||||
repository.missingHeadersErr = e
|
||||
}
|
||||
|
||||
func (repository *MockVatInitRepository) SetMissingHeaders(headers []core.Header) {
|
||||
repository.missingHeaders = headers
|
||||
}
|
||||
|
||||
func (repository *MockVatInitRepository) SetCreateError(e error) {
|
||||
repository.createErr = e
|
||||
}
|
35
pkg/transformers/test_data/vat_init.go
Normal file
35
pkg/transformers/test_data/vat_init.go
Normal file
@ -0,0 +1,35 @@
|
||||
package test_data
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
)
|
||||
|
||||
var VatAddress = "0x239E6f0AB02713f1F8AA90ebeDeD9FC66Dc96CD6"
|
||||
|
||||
var EthVatInitLog = types.Log{
|
||||
Address: common.HexToAddress(VatAddress),
|
||||
Topics: []common.Hash{
|
||||
common.HexToHash("0x3b66319500000000000000000000000000000000000000000000000000000000"),
|
||||
common.HexToHash("0x66616b6520696c6b000000000000000000000000000000000000000000000000"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
},
|
||||
Data: hexutil.MustDecode("0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000243b66319566616b6520696c6b000000000000000000000000000000000000000000000000"),
|
||||
BlockNumber: 24,
|
||||
TxHash: common.HexToHash("0xe8f39fbb7fea3621f543868f19b1114e305aff6a063a30d32835ff1012526f91"),
|
||||
TxIndex: 7,
|
||||
BlockHash: common.HexToHash("0xe3dd2e05bd8b92833e20ed83e2171bbc06a9ec823232eca1730a807bd8f5edc0"),
|
||||
Index: 0,
|
||||
Removed: false,
|
||||
}
|
||||
|
||||
var rawVatInitLog, _ = json.Marshal(EthVatInitLog)
|
||||
var VatInitModel = vat_init.VatInitModel{
|
||||
Ilk: "fake ilk",
|
||||
TransactionIndex: EthVatInitLog.TxIndex,
|
||||
Raw: rawVatInitLog,
|
||||
}
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/price_feeds"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/tend"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
)
|
||||
|
||||
func TransformerInitializers() []shared.TransformerInitializer {
|
||||
@ -39,6 +40,8 @@ func TransformerInitializers() []shared.TransformerInitializer {
|
||||
pitFileStabilityFeeTransformerInitializer := stability_fee.PitFileStabilityFeeTransformerInitializer{Config: pitFileConfig}
|
||||
priceFeedTransformerInitializer := price_feeds.PriceFeedTransformerInitializer{Config: price_feeds.PriceFeedConfig}
|
||||
tendTransformerInitializer := tend.TendTransformerInitializer{Config: tend.TendConfig}
|
||||
vatInitConfig := vat_init.VatInitConfig
|
||||
vatInitTransformerInitializer := vat_init.VatInitTransformerInitializer{Config: vatInitConfig}
|
||||
|
||||
return []shared.TransformerInitializer{
|
||||
biteTransformerInitializer.NewBiteTransformer,
|
||||
@ -50,5 +53,6 @@ func TransformerInitializers() []shared.TransformerInitializer {
|
||||
pitFileStabilityFeeTransformerInitializer.NewPitFileStabilityFeeTransformer,
|
||||
priceFeedTransformerInitializer.NewPriceFeedTransformer,
|
||||
tendTransformerInitializer.NewTendTransformer,
|
||||
vatInitTransformerInitializer.NewVatInitTransformer,
|
||||
}
|
||||
}
|
||||
|
13
pkg/transformers/vat_init/config.go
Normal file
13
pkg/transformers/vat_init/config.go
Normal file
@ -0,0 +1,13 @@
|
||||
package vat_init
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
|
||||
)
|
||||
|
||||
var VatInitConfig = shared.TransformerConfig{
|
||||
ContractAddress: "0x239E6f0AB02713f1F8AA90ebeDeD9FC66Dc96CD6", // temporary address from Ganache deploy
|
||||
ContractAbi: shared.VatABI,
|
||||
Topics: []string{shared.VatInitSignature},
|
||||
StartingBlockNumber: 0,
|
||||
EndingBlockNumber: 100,
|
||||
}
|
36
pkg/transformers/vat_init/converter.go
Normal file
36
pkg/transformers/vat_init/converter.go
Normal file
@ -0,0 +1,36 @@
|
||||
package vat_init
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
)
|
||||
|
||||
type Converter interface {
|
||||
ToModel(ethLog types.Log) (VatInitModel, error)
|
||||
}
|
||||
|
||||
type VatInitConverter struct{}
|
||||
|
||||
func (VatInitConverter) ToModel(ethLog types.Log) (VatInitModel, error) {
|
||||
err := verifyLog(ethLog)
|
||||
if err != nil {
|
||||
return VatInitModel{}, err
|
||||
}
|
||||
ilk := string(bytes.Trim(ethLog.Topics[1].Bytes(), "\x00"))
|
||||
raw, err := json.Marshal(ethLog)
|
||||
return VatInitModel{
|
||||
Ilk: ilk,
|
||||
TransactionIndex: ethLog.TxIndex,
|
||||
Raw: raw,
|
||||
}, err
|
||||
}
|
||||
|
||||
func verifyLog(log types.Log) error {
|
||||
if len(log.Topics) < 2 {
|
||||
return errors.New("log missing topics")
|
||||
}
|
||||
return nil
|
||||
}
|
30
pkg/transformers/vat_init/converter_test.go
Normal file
30
pkg/transformers/vat_init/converter_test.go
Normal file
@ -0,0 +1,30 @@
|
||||
package vat_init_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
)
|
||||
|
||||
var _ = Describe("Vat init converter", func() {
|
||||
It("returns err if log missing topics", func() {
|
||||
converter := vat_init.VatInitConverter{}
|
||||
badLog := types.Log{}
|
||||
|
||||
_, err := converter.ToModel(badLog)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("converts a log to an model", func() {
|
||||
converter := vat_init.VatInitConverter{}
|
||||
|
||||
model, err := converter.ToModel(test_data.EthVatInitLog)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(model).To(Equal(test_data.VatInitModel))
|
||||
})
|
||||
})
|
7
pkg/transformers/vat_init/model.go
Normal file
7
pkg/transformers/vat_init/model.go
Normal file
@ -0,0 +1,7 @@
|
||||
package vat_init
|
||||
|
||||
type VatInitModel struct {
|
||||
Ilk string
|
||||
TransactionIndex uint `db:"tx_idx"`
|
||||
Raw []byte `db:"raw_log"`
|
||||
}
|
48
pkg/transformers/vat_init/repository.go
Normal file
48
pkg/transformers/vat_init/repository.go
Normal file
@ -0,0 +1,48 @@
|
||||
package vat_init
|
||||
|
||||
import (
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
)
|
||||
|
||||
type Repository interface {
|
||||
Create(headerID int64, model VatInitModel) error
|
||||
MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error)
|
||||
}
|
||||
|
||||
type VatInitRepository struct {
|
||||
db *postgres.DB
|
||||
}
|
||||
|
||||
func NewVatInitRepository(db *postgres.DB) VatInitRepository {
|
||||
return VatInitRepository{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (repository VatInitRepository) Create(headerID int64, model VatInitModel) error {
|
||||
_, err := repository.db.Exec(
|
||||
`INSERT into maker.vat_init (header_id, ilk, tx_idx, raw_log)
|
||||
VALUES($1, $2, $3, $4)`,
|
||||
headerID, model.Ilk, model.TransactionIndex, model.Raw,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (repository VatInitRepository) MissingHeaders(startingBlockNumber, endingBlockNumber int64) ([]core.Header, error) {
|
||||
var result []core.Header
|
||||
err := repository.db.Select(
|
||||
&result,
|
||||
`SELECT headers.id, headers.block_number FROM headers
|
||||
LEFT JOIN maker.vat_init on headers.id = header_id
|
||||
WHERE header_id ISNULL
|
||||
AND headers.block_number >= $1
|
||||
AND headers.block_number <= $2
|
||||
AND headers.eth_node_fingerprint = $3`,
|
||||
startingBlockNumber,
|
||||
endingBlockNumber,
|
||||
repository.db.Node.ID,
|
||||
)
|
||||
|
||||
return result, err
|
||||
}
|
129
pkg/transformers/vat_init/repository_test.go
Normal file
129
pkg/transformers/vat_init/repository_test.go
Normal file
@ -0,0 +1,129 @@
|
||||
package vat_init_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres/repositories"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
"github.com/vulcanize/vulcanizedb/test_config"
|
||||
)
|
||||
|
||||
var _ = Describe("", func() {
|
||||
Describe("Create", func() {
|
||||
It("adds a vat event", func() {
|
||||
db := test_config.NewTestDB(core.Node{})
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
vatInitRepository := vat_init.NewVatInitRepository(db)
|
||||
|
||||
err = vatInitRepository.Create(headerID, test_data.VatInitModel)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var dbVatInit vat_init.VatInitModel
|
||||
err = db.Get(&dbVatInit, `SELECT ilk,tx_idx, raw_log FROM maker.vat_init WHERE header_id = $1`, headerID)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(dbVatInit.Ilk).To(Equal(test_data.VatInitModel.Ilk))
|
||||
Expect(dbVatInit.TransactionIndex).To(Equal(test_data.VatInitModel.TransactionIndex))
|
||||
Expect(dbVatInit.Raw).To(MatchJSON(test_data.VatInitModel.Raw))
|
||||
})
|
||||
|
||||
It("does not duplicate vat events", func() {
|
||||
db := test_config.NewTestDB(core.Node{})
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
vatInitRepository := vat_init.NewVatInitRepository(db)
|
||||
err = vatInitRepository.Create(headerID, test_data.VatInitModel)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = vatInitRepository.Create(headerID, test_data.VatInitModel)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("pq: duplicate key value violates unique constraint"))
|
||||
})
|
||||
|
||||
It("removes vat if corresponding header is deleted", func() {
|
||||
db := test_config.NewTestDB(core.Node{})
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
vatInitRepository := vat_init.NewVatInitRepository(db)
|
||||
err = vatInitRepository.Create(headerID, test_data.VatInitModel)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = db.Exec(`DELETE FROM headers WHERE id = $1`, headerID)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var dbVatInit vat_init.VatInitModel
|
||||
err = db.Get(&dbVatInit, `SELECT ilk, tx_idx, raw_log FROM maker.vat_init WHERE header_id = $1`, headerID)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(sql.ErrNoRows))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("MissingHeaders", func() {
|
||||
It("returns headers with no associated vat event", func() {
|
||||
db := test_config.NewTestDB(core.Node{})
|
||||
test_config.CleanTestDB(db)
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
startingBlockNumber := int64(1)
|
||||
vatInitBlockNumber := int64(2)
|
||||
endingBlockNumber := int64(3)
|
||||
blockNumbers := []int64{startingBlockNumber, vatInitBlockNumber, endingBlockNumber, endingBlockNumber + 1}
|
||||
var headerIDs []int64
|
||||
for _, n := range blockNumbers {
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
|
||||
headerIDs = append(headerIDs, headerID)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
vatInitRepository := vat_init.NewVatInitRepository(db)
|
||||
err := vatInitRepository.Create(headerIDs[1], test_data.VatInitModel)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
headers, err := vatInitRepository.MissingHeaders(startingBlockNumber, endingBlockNumber)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(headers)).To(Equal(2))
|
||||
Expect(headers[0].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
|
||||
Expect(headers[1].BlockNumber).To(Or(Equal(startingBlockNumber), Equal(endingBlockNumber)))
|
||||
})
|
||||
|
||||
It("only returns headers associated with the current node", func() {
|
||||
db := test_config.NewTestDB(core.Node{})
|
||||
test_config.CleanTestDB(db)
|
||||
blockNumbers := []int64{1, 2, 3}
|
||||
headerRepository := repositories.NewHeaderRepository(db)
|
||||
dbTwo := test_config.NewTestDB(core.Node{ID: "second"})
|
||||
headerRepositoryTwo := repositories.NewHeaderRepository(dbTwo)
|
||||
var headerIDs []int64
|
||||
for _, n := range blockNumbers {
|
||||
headerID, err := headerRepository.CreateOrUpdateHeader(core.Header{BlockNumber: n})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
headerIDs = append(headerIDs, headerID)
|
||||
_, err = headerRepositoryTwo.CreateOrUpdateHeader(core.Header{BlockNumber: n})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
vatInitRepository := vat_init.NewVatInitRepository(db)
|
||||
vatInitRepositoryTwo := vat_init.NewVatInitRepository(dbTwo)
|
||||
err := vatInitRepository.Create(headerIDs[0], test_data.VatInitModel)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
nodeOneMissingHeaders, err := vatInitRepository.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeOneMissingHeaders)).To(Equal(len(blockNumbers) - 1))
|
||||
|
||||
nodeTwoMissingHeaders, err := vatInitRepositoryTwo.MissingHeaders(blockNumbers[0], blockNumbers[len(blockNumbers)-1])
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(nodeTwoMissingHeaders)).To(Equal(len(blockNumbers)))
|
||||
})
|
||||
})
|
||||
})
|
57
pkg/transformers/vat_init/transformer.go
Normal file
57
pkg/transformers/vat_init/transformer.go
Normal file
@ -0,0 +1,57 @@
|
||||
package vat_init
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/datastore/postgres"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
|
||||
)
|
||||
|
||||
type VatInitTransformerInitializer struct {
|
||||
Config shared.TransformerConfig
|
||||
}
|
||||
|
||||
func (initializer VatInitTransformerInitializer) NewVatInitTransformer(db *postgres.DB, blockChain core.BlockChain) shared.Transformer {
|
||||
converter := VatInitConverter{}
|
||||
fetcher := shared.NewFetcher(blockChain)
|
||||
repository := NewVatInitRepository(db)
|
||||
return VatInitTransformer{
|
||||
Config: initializer.Config,
|
||||
Converter: converter,
|
||||
Fetcher: fetcher,
|
||||
Repository: repository,
|
||||
}
|
||||
}
|
||||
|
||||
type VatInitTransformer struct {
|
||||
Config shared.TransformerConfig
|
||||
Converter Converter
|
||||
Fetcher shared.LogFetcher
|
||||
Repository Repository
|
||||
}
|
||||
|
||||
func (transformer VatInitTransformer) Execute() error {
|
||||
missingHeaders, err := transformer.Repository.MissingHeaders(transformer.Config.StartingBlockNumber, transformer.Config.EndingBlockNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, header := range missingHeaders {
|
||||
topics := [][]common.Hash{{common.HexToHash(shared.VatInitSignature)}}
|
||||
matchingLogs, err := transformer.Fetcher.FetchLogs(VatInitConfig.ContractAddress, topics, header.BlockNumber)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, log := range matchingLogs {
|
||||
model, err := transformer.Converter.ToModel(log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = transformer.Repository.Create(header.Id, model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
159
pkg/transformers/vat_init/transformer_test.go
Normal file
159
pkg/transformers/vat_init/transformer_test.go
Normal file
@ -0,0 +1,159 @@
|
||||
package vat_init_test
|
||||
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/core/types"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/core"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/fakes"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/shared"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks"
|
||||
vat_init_mocks "github.com/vulcanize/vulcanizedb/pkg/transformers/test_data/mocks/vat_init"
|
||||
"github.com/vulcanize/vulcanizedb/pkg/transformers/vat_init"
|
||||
)
|
||||
|
||||
var _ = Describe("Vat init transformer", func() {
|
||||
It("gets missing headers for block numbers specified in config", func() {
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Config: vat_init.VatInitConfig,
|
||||
Fetcher: &mocks.MockLogFetcher{},
|
||||
Converter: &vat_init_mocks.MockVatInitConverter{},
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(repository.PassedStartingBlockNumber).To(Equal(vat_init.VatInitConfig.StartingBlockNumber))
|
||||
Expect(repository.PassedEndingBlockNumber).To(Equal(vat_init.VatInitConfig.EndingBlockNumber))
|
||||
})
|
||||
|
||||
It("returns error if repository returns error for missing headers", func() {
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
repository.SetMissingHeadersErr(fakes.FakeError)
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: &mocks.MockLogFetcher{},
|
||||
Converter: &vat_init_mocks.MockVatInitConverter{},
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("fetches logs for missing headers", func() {
|
||||
fetcher := &mocks.MockLogFetcher{}
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}, {BlockNumber: 2}})
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: fetcher,
|
||||
Converter: &vat_init_mocks.MockVatInitConverter{},
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(fetcher.FetchedBlocks).To(Equal([]int64{1, 2}))
|
||||
Expect(fetcher.FetchedContractAddress).To(Equal(vat_init.VatInitConfig.ContractAddress))
|
||||
Expect(fetcher.FetchedTopics).To(Equal([][]common.Hash{{common.HexToHash(shared.VatInitSignature)}}))
|
||||
})
|
||||
|
||||
It("returns error if fetcher returns error", func() {
|
||||
fetcher := &mocks.MockLogFetcher{}
|
||||
fetcher.SetFetcherError(fakes.FakeError)
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: fetcher,
|
||||
Converter: &vat_init_mocks.MockVatInitConverter{},
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("converts matching logs", func() {
|
||||
converter := &vat_init_mocks.MockVatInitConverter{}
|
||||
fetcher := &mocks.MockLogFetcher{}
|
||||
fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: fetcher,
|
||||
Converter: converter,
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(converter.PassedLog).To(Equal(test_data.EthVatInitLog))
|
||||
})
|
||||
|
||||
It("returns error if converter returns error", func() {
|
||||
converter := &vat_init_mocks.MockVatInitConverter{}
|
||||
converter.SetConverterError(fakes.FakeError)
|
||||
fetcher := &mocks.MockLogFetcher{}
|
||||
fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1}})
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: fetcher,
|
||||
Converter: converter,
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
|
||||
It("persists vat init model", func() {
|
||||
converter := &vat_init_mocks.MockVatInitConverter{}
|
||||
fetcher := &mocks.MockLogFetcher{}
|
||||
fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
fakeHeader := core.Header{BlockNumber: 1, Id: 2}
|
||||
repository.SetMissingHeaders([]core.Header{fakeHeader})
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: fetcher,
|
||||
Converter: converter,
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(repository.PassedHeaderID).To(Equal(fakeHeader.Id))
|
||||
Expect(repository.PassedModel).To(Equal(test_data.VatInitModel))
|
||||
})
|
||||
|
||||
It("returns error if repository returns error for create", func() {
|
||||
converter := &vat_init_mocks.MockVatInitConverter{}
|
||||
fetcher := &mocks.MockLogFetcher{}
|
||||
fetcher.SetFetchedLogs([]types.Log{test_data.EthVatInitLog})
|
||||
repository := &vat_init_mocks.MockVatInitRepository{}
|
||||
repository.SetMissingHeaders([]core.Header{{BlockNumber: 1, Id: 2}})
|
||||
repository.SetCreateError(fakes.FakeError)
|
||||
transformer := vat_init.VatInitTransformer{
|
||||
Fetcher: fetcher,
|
||||
Converter: converter,
|
||||
Repository: repository,
|
||||
}
|
||||
|
||||
err := transformer.Execute()
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(MatchError(fakes.FakeError))
|
||||
})
|
||||
})
|
13
pkg/transformers/vat_init/vat_init_suite_test.go
Normal file
13
pkg/transformers/vat_init/vat_init_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package vat_init_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestVatInit(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "VatInit Suite")
|
||||
}
|
7
vendor/github.com/philhofer/fwd/LICENSE.md
generated
vendored
Normal file
7
vendor/github.com/philhofer/fwd/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
Copyright (c) 2014-2015, Philip Hofer
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
315
vendor/github.com/philhofer/fwd/README.md
generated
vendored
Normal file
315
vendor/github.com/philhofer/fwd/README.md
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
||||
|
||||
# fwd
|
||||
import "github.com/philhofer/fwd"
|
||||
|
||||
The `fwd` package provides a buffered reader
|
||||
and writer. Each has methods that help improve
|
||||
the encoding/decoding performance of some binary
|
||||
protocols.
|
||||
|
||||
The `fwd.Writer` and `fwd.Reader` type provide similar
|
||||
functionality to their counterparts in `bufio`, plus
|
||||
a few extra utility methods that simplify read-ahead
|
||||
and write-ahead. I wrote this package to improve serialization
|
||||
performance for <a href="http://github.com/tinylib/msgp">http://github.com/tinylib/msgp</a>,
|
||||
where it provided about a 2x speedup over `bufio` for certain
|
||||
workloads. However, care must be taken to understand the semantics of the
|
||||
extra methods provided by this package, as they allow
|
||||
the user to access and manipulate the buffer memory
|
||||
directly.
|
||||
|
||||
The extra methods for `fwd.Reader` are `Peek`, `Skip`
|
||||
and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
|
||||
will re-allocate the read buffer in order to accommodate arbitrarily
|
||||
large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
|
||||
in the stream, and uses the `io.Seeker` interface if the underlying
|
||||
stream implements it. `(*fwd.Reader).Next` returns a slice pointing
|
||||
to the next `n` bytes in the read buffer (like `Peek`), but also
|
||||
increments the read position. This allows users to process streams
|
||||
in arbitrary block sizes without having to manage appropriately-sized
|
||||
slices. Additionally, obviating the need to copy the data from the
|
||||
buffer to another location in memory can improve performance dramatically
|
||||
in CPU-bound applications.
|
||||
|
||||
`fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
|
||||
returns a slice pointing to the next `n` bytes of the writer, and increments
|
||||
the write position by the length of the returned slice. This allows users
|
||||
to write directly to the end of the buffer.
|
||||
|
||||
|
||||
|
||||
|
||||
## Constants
|
||||
``` go
|
||||
const (
|
||||
// DefaultReaderSize is the default size of the read buffer
|
||||
DefaultReaderSize = 2048
|
||||
)
|
||||
```
|
||||
``` go
|
||||
const (
|
||||
// DefaultWriterSize is the
|
||||
// default write buffer size.
|
||||
DefaultWriterSize = 2048
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
|
||||
## type Reader
|
||||
``` go
|
||||
type Reader struct {
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
Reader is a buffered look-ahead reader
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### func NewReader
|
||||
``` go
|
||||
func NewReader(r io.Reader) *Reader
|
||||
```
|
||||
NewReader returns a new *Reader that reads from 'r'
|
||||
|
||||
|
||||
### func NewReaderSize
|
||||
``` go
|
||||
func NewReaderSize(r io.Reader, n int) *Reader
|
||||
```
|
||||
NewReaderSize returns a new *Reader that
|
||||
reads from 'r' and has a buffer size 'n'
|
||||
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) BufferSize
|
||||
``` go
|
||||
func (r *Reader) BufferSize() int
|
||||
```
|
||||
BufferSize returns the total size of the buffer
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) Buffered
|
||||
``` go
|
||||
func (r *Reader) Buffered() int
|
||||
```
|
||||
Buffered returns the number of bytes currently in the buffer
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) Next
|
||||
``` go
|
||||
func (r *Reader) Next(n int) ([]byte, error)
|
||||
```
|
||||
Next returns the next 'n' bytes in the stream.
|
||||
Unlike Peek, Next advances the reader position.
|
||||
The returned bytes point to the same
|
||||
data as the buffer, so the slice is
|
||||
only valid until the next reader method call.
|
||||
An EOF is considered an unexpected error.
|
||||
If an the returned slice is less than the
|
||||
length asked for, an error will be returned,
|
||||
and the reader position will not be incremented.
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) Peek
|
||||
``` go
|
||||
func (r *Reader) Peek(n int) ([]byte, error)
|
||||
```
|
||||
Peek returns the next 'n' buffered bytes,
|
||||
reading from the underlying reader if necessary.
|
||||
It will only return a slice shorter than 'n' bytes
|
||||
if it also returns an error. Peek does not advance
|
||||
the reader. EOF errors are *not* returned as
|
||||
io.ErrUnexpectedEOF.
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) Read
|
||||
``` go
|
||||
func (r *Reader) Read(b []byte) (int, error)
|
||||
```
|
||||
Read implements `io.Reader`
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) ReadByte
|
||||
``` go
|
||||
func (r *Reader) ReadByte() (byte, error)
|
||||
```
|
||||
ReadByte implements `io.ByteReader`
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) ReadFull
|
||||
``` go
|
||||
func (r *Reader) ReadFull(b []byte) (int, error)
|
||||
```
|
||||
ReadFull attempts to read len(b) bytes into
|
||||
'b'. It returns the number of bytes read into
|
||||
'b', and an error if it does not return len(b).
|
||||
EOF is considered an unexpected error.
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) Reset
|
||||
``` go
|
||||
func (r *Reader) Reset(rd io.Reader)
|
||||
```
|
||||
Reset resets the underlying reader
|
||||
and the read buffer.
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) Skip
|
||||
``` go
|
||||
func (r *Reader) Skip(n int) (int, error)
|
||||
```
|
||||
Skip moves the reader forward 'n' bytes.
|
||||
Returns the number of bytes skipped and any
|
||||
errors encountered. It is analogous to Seek(n, 1).
|
||||
If the underlying reader implements io.Seeker, then
|
||||
that method will be used to skip forward.
|
||||
|
||||
If the reader encounters
|
||||
an EOF before skipping 'n' bytes, it
|
||||
returns io.ErrUnexpectedEOF. If the
|
||||
underlying reader implements io.Seeker, then
|
||||
those rules apply instead. (Many implementations
|
||||
will not return `io.EOF` until the next call
|
||||
to Read.)
|
||||
|
||||
|
||||
|
||||
### func (\*Reader) WriteTo
|
||||
``` go
|
||||
func (r *Reader) WriteTo(w io.Writer) (int64, error)
|
||||
```
|
||||
WriteTo implements `io.WriterTo`
|
||||
|
||||
|
||||
|
||||
## type Writer
|
||||
``` go
|
||||
type Writer struct {
|
||||
// contains filtered or unexported fields
|
||||
}
|
||||
```
|
||||
Writer is a buffered writer
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
### func NewWriter
|
||||
``` go
|
||||
func NewWriter(w io.Writer) *Writer
|
||||
```
|
||||
NewWriter returns a new writer
|
||||
that writes to 'w' and has a buffer
|
||||
that is `DefaultWriterSize` bytes.
|
||||
|
||||
|
||||
### func NewWriterSize
|
||||
``` go
|
||||
func NewWriterSize(w io.Writer, size int) *Writer
|
||||
```
|
||||
NewWriterSize returns a new writer
|
||||
that writes to 'w' and has a buffer
|
||||
that is 'size' bytes.
|
||||
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) BufferSize
|
||||
``` go
|
||||
func (w *Writer) BufferSize() int
|
||||
```
|
||||
BufferSize returns the maximum size of the buffer.
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) Buffered
|
||||
``` go
|
||||
func (w *Writer) Buffered() int
|
||||
```
|
||||
Buffered returns the number of buffered bytes
|
||||
in the reader.
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) Flush
|
||||
``` go
|
||||
func (w *Writer) Flush() error
|
||||
```
|
||||
Flush flushes any buffered bytes
|
||||
to the underlying writer.
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) Next
|
||||
``` go
|
||||
func (w *Writer) Next(n int) ([]byte, error)
|
||||
```
|
||||
Next returns the next 'n' free bytes
|
||||
in the write buffer, flushing the writer
|
||||
as necessary. Next will return `io.ErrShortBuffer`
|
||||
if 'n' is greater than the size of the write buffer.
|
||||
Calls to 'next' increment the write position by
|
||||
the size of the returned buffer.
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) ReadFrom
|
||||
``` go
|
||||
func (w *Writer) ReadFrom(r io.Reader) (int64, error)
|
||||
```
|
||||
ReadFrom implements `io.ReaderFrom`
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) Write
|
||||
``` go
|
||||
func (w *Writer) Write(p []byte) (int, error)
|
||||
```
|
||||
Write implements `io.Writer`
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) WriteByte
|
||||
``` go
|
||||
func (w *Writer) WriteByte(b byte) error
|
||||
```
|
||||
WriteByte implements `io.ByteWriter`
|
||||
|
||||
|
||||
|
||||
### func (\*Writer) WriteString
|
||||
``` go
|
||||
func (w *Writer) WriteString(s string) (int, error)
|
||||
```
|
||||
WriteString is analogous to Write, but it takes a string.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
- - -
|
||||
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
|
383
vendor/github.com/philhofer/fwd/reader.go
generated
vendored
Normal file
383
vendor/github.com/philhofer/fwd/reader.go
generated
vendored
Normal file
@ -0,0 +1,383 @@
|
||||
// The `fwd` package provides a buffered reader
|
||||
// and writer. Each has methods that help improve
|
||||
// the encoding/decoding performance of some binary
|
||||
// protocols.
|
||||
//
|
||||
// The `fwd.Writer` and `fwd.Reader` type provide similar
|
||||
// functionality to their counterparts in `bufio`, plus
|
||||
// a few extra utility methods that simplify read-ahead
|
||||
// and write-ahead. I wrote this package to improve serialization
|
||||
// performance for http://github.com/tinylib/msgp,
|
||||
// where it provided about a 2x speedup over `bufio` for certain
|
||||
// workloads. However, care must be taken to understand the semantics of the
|
||||
// extra methods provided by this package, as they allow
|
||||
// the user to access and manipulate the buffer memory
|
||||
// directly.
|
||||
//
|
||||
// The extra methods for `fwd.Reader` are `Peek`, `Skip`
|
||||
// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`,
|
||||
// will re-allocate the read buffer in order to accommodate arbitrarily
|
||||
// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes
|
||||
// in the stream, and uses the `io.Seeker` interface if the underlying
|
||||
// stream implements it. `(*fwd.Reader).Next` returns a slice pointing
|
||||
// to the next `n` bytes in the read buffer (like `Peek`), but also
|
||||
// increments the read position. This allows users to process streams
|
||||
// in arbitrary block sizes without having to manage appropriately-sized
|
||||
// slices. Additionally, obviating the need to copy the data from the
|
||||
// buffer to another location in memory can improve performance dramatically
|
||||
// in CPU-bound applications.
|
||||
//
|
||||
// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which
|
||||
// returns a slice pointing to the next `n` bytes of the writer, and increments
|
||||
// the write position by the length of the returned slice. This allows users
|
||||
// to write directly to the end of the buffer.
|
||||
//
|
||||
package fwd
|
||||
|
||||
import "io"
|
||||
|
||||
const (
|
||||
// DefaultReaderSize is the default size of the read buffer
|
||||
DefaultReaderSize = 2048
|
||||
|
||||
// minimum read buffer; straight from bufio
|
||||
minReaderSize = 16
|
||||
)
|
||||
|
||||
// NewReader returns a new *Reader that reads from 'r'
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return NewReaderSize(r, DefaultReaderSize)
|
||||
}
|
||||
|
||||
// NewReaderSize returns a new *Reader that
|
||||
// reads from 'r' and has a buffer size 'n'
|
||||
func NewReaderSize(r io.Reader, n int) *Reader {
|
||||
rd := &Reader{
|
||||
r: r,
|
||||
data: make([]byte, 0, max(minReaderSize, n)),
|
||||
}
|
||||
if s, ok := r.(io.Seeker); ok {
|
||||
rd.rs = s
|
||||
}
|
||||
return rd
|
||||
}
|
||||
|
||||
// Reader is a buffered look-ahead reader
|
||||
type Reader struct {
|
||||
r io.Reader // underlying reader
|
||||
|
||||
// data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space
|
||||
data []byte // data
|
||||
n int // read offset
|
||||
state error // last read error
|
||||
|
||||
// if the reader past to NewReader was
|
||||
// also an io.Seeker, this is non-nil
|
||||
rs io.Seeker
|
||||
}
|
||||
|
||||
// Reset resets the underlying reader
|
||||
// and the read buffer.
|
||||
func (r *Reader) Reset(rd io.Reader) {
|
||||
r.r = rd
|
||||
r.data = r.data[0:0]
|
||||
r.n = 0
|
||||
r.state = nil
|
||||
if s, ok := rd.(io.Seeker); ok {
|
||||
r.rs = s
|
||||
} else {
|
||||
r.rs = nil
|
||||
}
|
||||
}
|
||||
|
||||
// more() does one read on the underlying reader
|
||||
func (r *Reader) more() {
|
||||
// move data backwards so that
|
||||
// the read offset is 0; this way
|
||||
// we can supply the maximum number of
|
||||
// bytes to the reader
|
||||
if r.n != 0 {
|
||||
if r.n < len(r.data) {
|
||||
r.data = r.data[:copy(r.data[0:], r.data[r.n:])]
|
||||
} else {
|
||||
r.data = r.data[:0]
|
||||
}
|
||||
r.n = 0
|
||||
}
|
||||
var a int
|
||||
a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)])
|
||||
if a == 0 && r.state == nil {
|
||||
r.state = io.ErrNoProgress
|
||||
return
|
||||
} else if a > 0 && r.state == io.EOF {
|
||||
// discard the io.EOF if we read more than 0 bytes.
|
||||
// the next call to Read should return io.EOF again.
|
||||
r.state = nil
|
||||
}
|
||||
r.data = r.data[:len(r.data)+a]
|
||||
}
|
||||
|
||||
// pop error
|
||||
func (r *Reader) err() (e error) {
|
||||
e, r.state = r.state, nil
|
||||
return
|
||||
}
|
||||
|
||||
// pop error; EOF -> io.ErrUnexpectedEOF
|
||||
func (r *Reader) noEOF() (e error) {
|
||||
e, r.state = r.state, nil
|
||||
if e == io.EOF {
|
||||
e = io.ErrUnexpectedEOF
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// buffered bytes
|
||||
func (r *Reader) buffered() int { return len(r.data) - r.n }
|
||||
|
||||
// Buffered returns the number of bytes currently in the buffer
|
||||
func (r *Reader) Buffered() int { return len(r.data) - r.n }
|
||||
|
||||
// BufferSize returns the total size of the buffer
|
||||
func (r *Reader) BufferSize() int { return cap(r.data) }
|
||||
|
||||
// Peek returns the next 'n' buffered bytes,
|
||||
// reading from the underlying reader if necessary.
|
||||
// It will only return a slice shorter than 'n' bytes
|
||||
// if it also returns an error. Peek does not advance
|
||||
// the reader. EOF errors are *not* returned as
|
||||
// io.ErrUnexpectedEOF.
|
||||
func (r *Reader) Peek(n int) ([]byte, error) {
|
||||
// in the degenerate case,
|
||||
// we may need to realloc
|
||||
// (the caller asked for more
|
||||
// bytes than the size of the buffer)
|
||||
if cap(r.data) < n {
|
||||
old := r.data[r.n:]
|
||||
r.data = make([]byte, n+r.buffered())
|
||||
r.data = r.data[:copy(r.data, old)]
|
||||
r.n = 0
|
||||
}
|
||||
|
||||
// keep filling until
|
||||
// we hit an error or
|
||||
// read enough bytes
|
||||
for r.buffered() < n && r.state == nil {
|
||||
r.more()
|
||||
}
|
||||
|
||||
// we must have hit an error
|
||||
if r.buffered() < n {
|
||||
return r.data[r.n:], r.err()
|
||||
}
|
||||
|
||||
return r.data[r.n : r.n+n], nil
|
||||
}
|
||||
|
||||
// Skip moves the reader forward 'n' bytes.
|
||||
// Returns the number of bytes skipped and any
|
||||
// errors encountered. It is analogous to Seek(n, 1).
|
||||
// If the underlying reader implements io.Seeker, then
|
||||
// that method will be used to skip forward.
|
||||
//
|
||||
// If the reader encounters
|
||||
// an EOF before skipping 'n' bytes, it
|
||||
// returns io.ErrUnexpectedEOF. If the
|
||||
// underlying reader implements io.Seeker, then
|
||||
// those rules apply instead. (Many implementations
|
||||
// will not return `io.EOF` until the next call
|
||||
// to Read.)
|
||||
func (r *Reader) Skip(n int) (int, error) {
|
||||
|
||||
// fast path
|
||||
if r.buffered() >= n {
|
||||
r.n += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// use seeker implementation
|
||||
// if we can
|
||||
if r.rs != nil {
|
||||
return r.skipSeek(n)
|
||||
}
|
||||
|
||||
// loop on filling
|
||||
// and then erasing
|
||||
o := n
|
||||
for r.buffered() < n && r.state == nil {
|
||||
r.more()
|
||||
// we can skip forward
|
||||
// up to r.buffered() bytes
|
||||
step := min(r.buffered(), n)
|
||||
r.n += step
|
||||
n -= step
|
||||
}
|
||||
// at this point, n should be
|
||||
// 0 if everything went smoothly
|
||||
return o - n, r.noEOF()
|
||||
}
|
||||
|
||||
// Next returns the next 'n' bytes in the stream.
|
||||
// Unlike Peek, Next advances the reader position.
|
||||
// The returned bytes point to the same
|
||||
// data as the buffer, so the slice is
|
||||
// only valid until the next reader method call.
|
||||
// An EOF is considered an unexpected error.
|
||||
// If an the returned slice is less than the
|
||||
// length asked for, an error will be returned,
|
||||
// and the reader position will not be incremented.
|
||||
func (r *Reader) Next(n int) ([]byte, error) {
|
||||
|
||||
// in case the buffer is too small
|
||||
if cap(r.data) < n {
|
||||
old := r.data[r.n:]
|
||||
r.data = make([]byte, n+r.buffered())
|
||||
r.data = r.data[:copy(r.data, old)]
|
||||
r.n = 0
|
||||
}
|
||||
|
||||
// fill at least 'n' bytes
|
||||
for r.buffered() < n && r.state == nil {
|
||||
r.more()
|
||||
}
|
||||
|
||||
if r.buffered() < n {
|
||||
return r.data[r.n:], r.noEOF()
|
||||
}
|
||||
out := r.data[r.n : r.n+n]
|
||||
r.n += n
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// skipSeek uses the io.Seeker to seek forward.
|
||||
// only call this function when n > r.buffered()
|
||||
func (r *Reader) skipSeek(n int) (int, error) {
|
||||
o := r.buffered()
|
||||
// first, clear buffer
|
||||
n -= o
|
||||
r.n = 0
|
||||
r.data = r.data[:0]
|
||||
|
||||
// then seek forward remaning bytes
|
||||
i, err := r.rs.Seek(int64(n), 1)
|
||||
return int(i) + o, err
|
||||
}
|
||||
|
||||
// Read implements `io.Reader`
|
||||
func (r *Reader) Read(b []byte) (int, error) {
|
||||
// if we have data in the buffer, just
|
||||
// return that.
|
||||
if r.buffered() != 0 {
|
||||
x := copy(b, r.data[r.n:])
|
||||
r.n += x
|
||||
return x, nil
|
||||
}
|
||||
var n int
|
||||
// we have no buffered data; determine
|
||||
// whether or not to buffer or call
|
||||
// the underlying reader directly
|
||||
if len(b) >= cap(r.data) {
|
||||
n, r.state = r.r.Read(b)
|
||||
} else {
|
||||
r.more()
|
||||
n = copy(b, r.data)
|
||||
r.n = n
|
||||
}
|
||||
if n == 0 {
|
||||
return 0, r.err()
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadFull attempts to read len(b) bytes into
|
||||
// 'b'. It returns the number of bytes read into
|
||||
// 'b', and an error if it does not return len(b).
|
||||
// EOF is considered an unexpected error.
|
||||
func (r *Reader) ReadFull(b []byte) (int, error) {
|
||||
var n int // read into b
|
||||
var nn int // scratch
|
||||
l := len(b)
|
||||
// either read buffered data,
|
||||
// or read directly for the underlying
|
||||
// buffer, or fetch more buffered data.
|
||||
for n < l && r.state == nil {
|
||||
if r.buffered() != 0 {
|
||||
nn = copy(b[n:], r.data[r.n:])
|
||||
n += nn
|
||||
r.n += nn
|
||||
} else if l-n > cap(r.data) {
|
||||
nn, r.state = r.r.Read(b[n:])
|
||||
n += nn
|
||||
} else {
|
||||
r.more()
|
||||
}
|
||||
}
|
||||
if n < l {
|
||||
return n, r.noEOF()
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadByte implements `io.ByteReader`
|
||||
func (r *Reader) ReadByte() (byte, error) {
|
||||
for r.buffered() < 1 && r.state == nil {
|
||||
r.more()
|
||||
}
|
||||
if r.buffered() < 1 {
|
||||
return 0, r.err()
|
||||
}
|
||||
b := r.data[r.n]
|
||||
r.n++
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WriteTo implements `io.WriterTo`
|
||||
func (r *Reader) WriteTo(w io.Writer) (int64, error) {
|
||||
var (
|
||||
i int64
|
||||
ii int
|
||||
err error
|
||||
)
|
||||
// first, clear buffer
|
||||
if r.buffered() > 0 {
|
||||
ii, err = w.Write(r.data[r.n:])
|
||||
i += int64(ii)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
r.data = r.data[0:0]
|
||||
r.n = 0
|
||||
}
|
||||
for r.state == nil {
|
||||
// here we just do
|
||||
// 1:1 reads and writes
|
||||
r.more()
|
||||
if r.buffered() > 0 {
|
||||
ii, err = w.Write(r.data)
|
||||
i += int64(ii)
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
r.data = r.data[0:0]
|
||||
r.n = 0
|
||||
}
|
||||
}
|
||||
if r.state != io.EOF {
|
||||
return i, r.err()
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func min(a int, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max(a int, b int) int {
|
||||
if a < b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
398
vendor/github.com/philhofer/fwd/reader_test.go
generated
vendored
Normal file
398
vendor/github.com/philhofer/fwd/reader_test.go
generated
vendored
Normal file
@ -0,0 +1,398 @@
|
||||
package fwd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// partialReader reads into only
|
||||
// part of the supplied byte slice
|
||||
// to the underlying reader
|
||||
type partialReader struct {
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
func (p partialReader) Read(b []byte) (int, error) {
|
||||
n := max(1, rand.Intn(len(b)))
|
||||
return p.r.Read(b[:n])
|
||||
}
|
||||
|
||||
func randomBts(sz int) []byte {
|
||||
o := make([]byte, sz)
|
||||
for i := 0; i < len(o); i += 8 {
|
||||
j := (*int64)(unsafe.Pointer(&o[i]))
|
||||
*j = rand.Int63()
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
bts := randomBts(512)
|
||||
|
||||
// make the buffer much
|
||||
// smaller than the underlying
|
||||
// bytes to incur multiple fills
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 128)
|
||||
|
||||
if rd.BufferSize() != cap(rd.data) {
|
||||
t.Errorf("BufferSize() returned %d; should return %d", rd.BufferSize(), cap(rd.data))
|
||||
}
|
||||
|
||||
// starting Buffered() should be 0
|
||||
if rd.Buffered() != 0 {
|
||||
t.Errorf("Buffered() should return 0 at initialization; got %d", rd.Buffered())
|
||||
}
|
||||
|
||||
some := make([]byte, 32)
|
||||
n, err := rd.Read(some)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n == 0 {
|
||||
t.Fatal("read 0 bytes w/ a non-nil error!")
|
||||
}
|
||||
some = some[:n]
|
||||
|
||||
more := make([]byte, 64)
|
||||
j, err := rd.Read(more)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if j == 0 {
|
||||
t.Fatal("read 0 bytes w/ a non-nil error")
|
||||
}
|
||||
more = more[:j]
|
||||
|
||||
out, err := ioutil.ReadAll(rd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
all := append(some, more...)
|
||||
all = append(all, out...)
|
||||
|
||||
if !bytes.Equal(bts, all) {
|
||||
t.Errorf("bytes not equal; %d bytes in and %d bytes out", len(bts), len(out))
|
||||
}
|
||||
|
||||
// test filling out of the underlying reader
|
||||
big := randomBts(1 << 21)
|
||||
rd = NewReaderSize(partialReader{bytes.NewReader(big)}, 2048)
|
||||
buf := make([]byte, 3100)
|
||||
|
||||
n, err = rd.ReadFull(buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 3100 {
|
||||
t.Errorf("expected 3100 bytes read by ReadFull; got %d", n)
|
||||
}
|
||||
if !bytes.Equal(buf[:n], big[:n]) {
|
||||
t.Error("data parity")
|
||||
}
|
||||
rest := make([]byte, (1<<21)-3100)
|
||||
n, err = io.ReadFull(rd, rest)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != len(rest) {
|
||||
t.Errorf("expected %d bytes read by io.ReadFull; got %d", len(rest), n)
|
||||
}
|
||||
if !bytes.Equal(append(buf, rest...), big) {
|
||||
t.Fatal("data parity")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadByte(t *testing.T) {
|
||||
bts := randomBts(512)
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 98)
|
||||
|
||||
var (
|
||||
err error
|
||||
i int
|
||||
b byte
|
||||
)
|
||||
|
||||
// scan through the whole
|
||||
// array byte-by-byte
|
||||
for err != io.EOF {
|
||||
b, err = rd.ReadByte()
|
||||
if err == nil {
|
||||
if b != bts[i] {
|
||||
t.Fatalf("offset %d: %d in; %d out", i, b, bts[i])
|
||||
}
|
||||
}
|
||||
i++
|
||||
}
|
||||
if err != io.EOF {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipNoSeek(t *testing.T) {
|
||||
bts := randomBts(1024)
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
|
||||
|
||||
n, err := rd.Skip(512)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 512 {
|
||||
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512)
|
||||
}
|
||||
|
||||
var b byte
|
||||
b, err = rd.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if b != bts[512] {
|
||||
t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b)
|
||||
}
|
||||
|
||||
n, err = rd.Skip(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10)
|
||||
}
|
||||
|
||||
// now try to skip past the end
|
||||
rd = NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
|
||||
|
||||
n, err = rd.Skip(2000)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("expected error %q; got %q", io.EOF, err)
|
||||
}
|
||||
if n != 1024 {
|
||||
t.Fatalf("expected to skip only 1024 bytes; skipped %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkipSeek(t *testing.T) {
|
||||
bts := randomBts(1024)
|
||||
|
||||
// bytes.Reader implements io.Seeker
|
||||
rd := NewReaderSize(bytes.NewReader(bts), 200)
|
||||
|
||||
n, err := rd.Skip(512)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 512 {
|
||||
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512)
|
||||
}
|
||||
|
||||
var b byte
|
||||
b, err = rd.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if b != bts[512] {
|
||||
t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b)
|
||||
}
|
||||
|
||||
n, err = rd.Skip(10)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 10 {
|
||||
t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10)
|
||||
}
|
||||
|
||||
// now try to skip past the end
|
||||
rd.Reset(bytes.NewReader(bts))
|
||||
|
||||
// because of how bytes.Reader
|
||||
// implements Seek, this should
|
||||
// return (2000, nil)
|
||||
n, err = rd.Skip(2000)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 2000 {
|
||||
t.Fatalf("should have returned %d bytes; returned %d", 2000, n)
|
||||
}
|
||||
|
||||
// the next call to Read()
|
||||
// should return io.EOF
|
||||
n, err = rd.Read([]byte{0, 0, 0})
|
||||
if err != io.EOF {
|
||||
t.Errorf("expected %q; got %q", io.EOF, err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Errorf("expected 0 bytes read; got %d", n)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPeek(t *testing.T) {
|
||||
bts := randomBts(1024)
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
|
||||
|
||||
// first, a peek < buffer size
|
||||
var (
|
||||
peek []byte
|
||||
err error
|
||||
)
|
||||
peek, err = rd.Peek(100)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(peek) != 100 {
|
||||
t.Fatalf("asked for %d bytes; got %d", 100, len(peek))
|
||||
}
|
||||
if !bytes.Equal(peek, bts[:100]) {
|
||||
t.Fatal("peeked bytes not equal")
|
||||
}
|
||||
|
||||
// now, a peek > buffer size
|
||||
peek, err = rd.Peek(256)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(peek) != 256 {
|
||||
t.Fatalf("asked for %d bytes; got %d", 100, len(peek))
|
||||
}
|
||||
if !bytes.Equal(peek, bts[:256]) {
|
||||
t.Fatal("peeked bytes not equal")
|
||||
}
|
||||
|
||||
// now try to peek past EOF
|
||||
peek, err = rd.Peek(2048)
|
||||
if err != io.EOF {
|
||||
t.Fatalf("expected error %q; got %q", io.EOF, err)
|
||||
}
|
||||
if len(peek) != 1024 {
|
||||
t.Fatalf("expected %d bytes peek-able; got %d", 1024, len(peek))
|
||||
}
|
||||
}
|
||||
|
||||
func TestNext(t *testing.T) {
|
||||
size := 1024
|
||||
bts := randomBts(size)
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
|
||||
|
||||
chunksize := 256
|
||||
chunks := size / chunksize
|
||||
|
||||
for i := 0; i < chunks; i++ {
|
||||
out, err := rd.Next(chunksize)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
start := chunksize * i
|
||||
if !bytes.Equal(bts[start:start+chunksize], out) {
|
||||
t.Fatalf("chunk %d: chunks not equal", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteTo(t *testing.T) {
|
||||
bts := randomBts(2048)
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200)
|
||||
|
||||
// cause the buffer
|
||||
// to fill a little, just
|
||||
// to complicate things
|
||||
rd.Peek(25)
|
||||
|
||||
var out bytes.Buffer
|
||||
n, err := rd.WriteTo(&out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 2048 {
|
||||
t.Fatalf("should have written %d bytes; wrote %d", 2048, n)
|
||||
}
|
||||
if !bytes.Equal(out.Bytes(), bts) {
|
||||
t.Fatal("bytes not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFull(t *testing.T) {
|
||||
bts := randomBts(1024)
|
||||
rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 256)
|
||||
|
||||
// try to ReadFull() the whole thing
|
||||
out := make([]byte, 1024)
|
||||
n, err := rd.ReadFull(out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != 1024 {
|
||||
t.Fatalf("expected to read %d bytes; read %d", 1024, n)
|
||||
}
|
||||
if !bytes.Equal(bts, out) {
|
||||
t.Fatal("bytes not equal")
|
||||
}
|
||||
|
||||
// we've read everything; this should EOF
|
||||
n, err = rd.Read(out)
|
||||
if err != io.EOF {
|
||||
t.Fatalf("expected %q; got %q", io.EOF, err)
|
||||
}
|
||||
|
||||
rd.Reset(partialReader{bytes.NewReader(bts)})
|
||||
|
||||
// now try to read *past* EOF
|
||||
out = make([]byte, 1500)
|
||||
n, err = rd.ReadFull(out)
|
||||
if err != io.ErrUnexpectedEOF {
|
||||
t.Fatalf("expected error %q; got %q", io.EOF, err)
|
||||
}
|
||||
if n != 1024 {
|
||||
t.Fatalf("expected to read %d bytes; read %d", 1024, n)
|
||||
}
|
||||
}
|
||||
|
||||
type readCounter struct {
|
||||
r io.Reader
|
||||
count int
|
||||
}
|
||||
|
||||
func (r *readCounter) Read(p []byte) (int, error) {
|
||||
r.count++
|
||||
return r.r.Read(p)
|
||||
}
|
||||
|
||||
func TestReadFullPerf(t *testing.T) {
|
||||
const size = 1 << 22
|
||||
data := randomBts(size)
|
||||
|
||||
c := readCounter{
|
||||
r: &partialReader{
|
||||
r: bytes.NewReader(data),
|
||||
},
|
||||
}
|
||||
|
||||
r := NewReader(&c)
|
||||
|
||||
const segments = 4
|
||||
out := make([]byte, size/segments)
|
||||
|
||||
for i := 0; i < segments; i++ {
|
||||
// force an unaligned read
|
||||
_, err := r.Peek(5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
n, err := r.ReadFull(out)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != size/segments {
|
||||
t.Fatalf("read %d bytes, not %d", n, size/segments)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("called Read() on the underlying reader %d times to fill %d buffers", c.count, size/r.BufferSize())
|
||||
}
|
224
vendor/github.com/philhofer/fwd/writer.go
generated
vendored
Normal file
224
vendor/github.com/philhofer/fwd/writer.go
generated
vendored
Normal file
@ -0,0 +1,224 @@
|
||||
package fwd
|
||||
|
||||
import "io"
|
||||
|
||||
const (
|
||||
// DefaultWriterSize is the
|
||||
// default write buffer size.
|
||||
DefaultWriterSize = 2048
|
||||
|
||||
minWriterSize = minReaderSize
|
||||
)
|
||||
|
||||
// Writer is a buffered writer
|
||||
type Writer struct {
|
||||
w io.Writer // writer
|
||||
buf []byte // 0:len(buf) is bufered data
|
||||
}
|
||||
|
||||
// NewWriter returns a new writer
|
||||
// that writes to 'w' and has a buffer
|
||||
// that is `DefaultWriterSize` bytes.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
if wr, ok := w.(*Writer); ok {
|
||||
return wr
|
||||
}
|
||||
return &Writer{
|
||||
w: w,
|
||||
buf: make([]byte, 0, DefaultWriterSize),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWriterSize returns a new writer
|
||||
// that writes to 'w' and has a buffer
|
||||
// that is 'size' bytes.
|
||||
func NewWriterSize(w io.Writer, size int) *Writer {
|
||||
if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size {
|
||||
return wr
|
||||
}
|
||||
return &Writer{
|
||||
w: w,
|
||||
buf: make([]byte, 0, max(size, minWriterSize)),
|
||||
}
|
||||
}
|
||||
|
||||
// Buffered returns the number of buffered bytes
|
||||
// in the reader.
|
||||
func (w *Writer) Buffered() int { return len(w.buf) }
|
||||
|
||||
// BufferSize returns the maximum size of the buffer.
|
||||
func (w *Writer) BufferSize() int { return cap(w.buf) }
|
||||
|
||||
// Flush flushes any buffered bytes
|
||||
// to the underlying writer.
|
||||
func (w *Writer) Flush() error {
|
||||
l := len(w.buf)
|
||||
if l > 0 {
|
||||
n, err := w.w.Write(w.buf)
|
||||
|
||||
// if we didn't write the whole
|
||||
// thing, copy the unwritten
|
||||
// bytes to the beginnning of the
|
||||
// buffer.
|
||||
if n < l && n > 0 {
|
||||
w.pushback(n)
|
||||
if err == nil {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.buf = w.buf[:0]
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write implements `io.Writer`
|
||||
func (w *Writer) Write(p []byte) (int, error) {
|
||||
c, l, ln := cap(w.buf), len(w.buf), len(p)
|
||||
avail := c - l
|
||||
|
||||
// requires flush
|
||||
if avail < ln {
|
||||
if err := w.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
l = len(w.buf)
|
||||
}
|
||||
// too big to fit in buffer;
|
||||
// write directly to w.w
|
||||
if c < ln {
|
||||
return w.w.Write(p)
|
||||
}
|
||||
|
||||
// grow buf slice; copy; return
|
||||
w.buf = w.buf[:l+ln]
|
||||
return copy(w.buf[l:], p), nil
|
||||
}
|
||||
|
||||
// WriteString is analogous to Write, but it takes a string.
|
||||
func (w *Writer) WriteString(s string) (int, error) {
|
||||
c, l, ln := cap(w.buf), len(w.buf), len(s)
|
||||
avail := c - l
|
||||
|
||||
// requires flush
|
||||
if avail < ln {
|
||||
if err := w.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
l = len(w.buf)
|
||||
}
|
||||
// too big to fit in buffer;
|
||||
// write directly to w.w
|
||||
//
|
||||
// yes, this is unsafe. *but*
|
||||
// io.Writer is not allowed
|
||||
// to mutate its input or
|
||||
// maintain a reference to it,
|
||||
// per the spec in package io.
|
||||
//
|
||||
// plus, if the string is really
|
||||
// too big to fit in the buffer, then
|
||||
// creating a copy to write it is
|
||||
// expensive (and, strictly speaking,
|
||||
// unnecessary)
|
||||
if c < ln {
|
||||
return w.w.Write(unsafestr(s))
|
||||
}
|
||||
|
||||
// grow buf slice; copy; return
|
||||
w.buf = w.buf[:l+ln]
|
||||
return copy(w.buf[l:], s), nil
|
||||
}
|
||||
|
||||
// WriteByte implements `io.ByteWriter`
|
||||
func (w *Writer) WriteByte(b byte) error {
|
||||
if len(w.buf) == cap(w.buf) {
|
||||
if err := w.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.buf = append(w.buf, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Next returns the next 'n' free bytes
|
||||
// in the write buffer, flushing the writer
|
||||
// as necessary. Next will return `io.ErrShortBuffer`
|
||||
// if 'n' is greater than the size of the write buffer.
|
||||
// Calls to 'next' increment the write position by
|
||||
// the size of the returned buffer.
|
||||
func (w *Writer) Next(n int) ([]byte, error) {
|
||||
c, l := cap(w.buf), len(w.buf)
|
||||
if n > c {
|
||||
return nil, io.ErrShortBuffer
|
||||
}
|
||||
avail := c - l
|
||||
if avail < n {
|
||||
if err := w.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l = len(w.buf)
|
||||
}
|
||||
w.buf = w.buf[:l+n]
|
||||
return w.buf[l:], nil
|
||||
}
|
||||
|
||||
// take the bytes from w.buf[n:len(w.buf)]
|
||||
// and put them at the beginning of w.buf,
|
||||
// and resize to the length of the copied segment.
|
||||
func (w *Writer) pushback(n int) {
|
||||
w.buf = w.buf[:copy(w.buf, w.buf[n:])]
|
||||
}
|
||||
|
||||
// ReadFrom implements `io.ReaderFrom`
|
||||
func (w *Writer) ReadFrom(r io.Reader) (int64, error) {
|
||||
// anticipatory flush
|
||||
if err := w.Flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
w.buf = w.buf[0:cap(w.buf)] // expand buffer
|
||||
|
||||
var nn int64 // written
|
||||
var err error // error
|
||||
var x int // read
|
||||
|
||||
// 1:1 reads and writes
|
||||
for err == nil {
|
||||
x, err = r.Read(w.buf)
|
||||
if x > 0 {
|
||||
n, werr := w.w.Write(w.buf[:x])
|
||||
nn += int64(n)
|
||||
|
||||
if err != nil {
|
||||
if n < x && n > 0 {
|
||||
w.pushback(n - x)
|
||||
}
|
||||
return nn, werr
|
||||
}
|
||||
if n < x {
|
||||
w.pushback(n - x)
|
||||
return nn, io.ErrShortWrite
|
||||
}
|
||||
} else if err == nil {
|
||||
err = io.ErrNoProgress
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != io.EOF {
|
||||
return nn, err
|
||||
}
|
||||
|
||||
// we only clear here
|
||||
// because we are sure
|
||||
// the writes have
|
||||
// succeeded. otherwise,
|
||||
// we retain the data in case
|
||||
// future writes succeed.
|
||||
w.buf = w.buf[0:0]
|
||||
|
||||
return nn, nil
|
||||
}
|
5
vendor/github.com/philhofer/fwd/writer_appengine.go
generated
vendored
Normal file
5
vendor/github.com/philhofer/fwd/writer_appengine.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
// +build appengine
|
||||
|
||||
package fwd
|
||||
|
||||
func unsafestr(s string) []byte { return []byte(s) }
|
239
vendor/github.com/philhofer/fwd/writer_test.go
generated
vendored
Normal file
239
vendor/github.com/philhofer/fwd/writer_test.go
generated
vendored
Normal file
@ -0,0 +1,239 @@
|
||||
package fwd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type chunkedWriter struct {
|
||||
w *Writer
|
||||
}
|
||||
|
||||
// writes 'p' in randomly-sized chunks
|
||||
func (c chunkedWriter) Write(p []byte) (int, error) {
|
||||
l := len(p)
|
||||
n := 0
|
||||
for n < l {
|
||||
amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1
|
||||
nn, err := c.w.Write(p[n : n+amt]) //
|
||||
n += nn
|
||||
if err == nil && nn < amt {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// analogous to Write(), but w/ str
|
||||
func (c chunkedWriter) WriteString(s string) (int, error) {
|
||||
l := len(s)
|
||||
n := 0
|
||||
for n < l {
|
||||
amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1
|
||||
nn, err := c.w.WriteString(s[n : n+amt]) //
|
||||
n += nn
|
||||
if err == nil && nn < amt {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// writes via random calls to Next()
|
||||
type nextWriter struct {
|
||||
wr *Writer
|
||||
}
|
||||
|
||||
func (c nextWriter) Write(p []byte) (int, error) {
|
||||
l := len(p)
|
||||
n := 0
|
||||
for n < l {
|
||||
amt := max(rand.Intn(l-n), 1) // at least 1 byte
|
||||
fwd, err := c.wr.Next(amt) // get next (amt) bytes
|
||||
if err != nil {
|
||||
|
||||
// this may happen occasionally
|
||||
if err == io.ErrShortBuffer {
|
||||
if cap(c.wr.buf) >= amt {
|
||||
panic("bad io.ErrShortBuffer")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
if len(fwd) != amt {
|
||||
panic("bad Next() len")
|
||||
}
|
||||
n += copy(fwd, p[n:])
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
nbts := 4096
|
||||
bts := randomBts(nbts)
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriterSize(&buf, 512)
|
||||
|
||||
if wr.BufferSize() != 512 {
|
||||
t.Fatalf("expected BufferSize() to be %d; found %d", 512, wr.BufferSize())
|
||||
}
|
||||
|
||||
cwr := chunkedWriter{wr}
|
||||
nb, err := cwr.Write(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nb != nbts {
|
||||
t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if wr.Buffered() != 0 {
|
||||
t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered())
|
||||
}
|
||||
|
||||
if buf.Len() != nbts {
|
||||
t.Fatalf("wrote %d bytes, but buffer is %d bytes long", nbts, buf.Len())
|
||||
}
|
||||
if !bytes.Equal(bts, buf.Bytes()) {
|
||||
t.Fatal("buf.Bytes() is not the same as the input bytes")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteString(t *testing.T) {
|
||||
nbts := 3998
|
||||
str := string(randomBts(nbts))
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriterSize(&buf, 1137)
|
||||
|
||||
if wr.BufferSize() != 1137 {
|
||||
t.Fatalf("expected BufferSize() to return %d; returned %d", 1137, wr.BufferSize())
|
||||
}
|
||||
|
||||
cwr := chunkedWriter{wr}
|
||||
nb, err := cwr.WriteString(str)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nb != nbts {
|
||||
t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb)
|
||||
}
|
||||
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if wr.Buffered() != 0 {
|
||||
t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered())
|
||||
}
|
||||
|
||||
if buf.Len() != nbts {
|
||||
t.Fatalf("wrote %d bytes, buf buffer is %d bytes long", nbts, buf.Len())
|
||||
}
|
||||
if buf.String() != str {
|
||||
t.Fatal("buf.String() is not the same as input string")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteByte(t *testing.T) {
|
||||
nbts := 3200
|
||||
bts := randomBts(nbts)
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
|
||||
if wr.BufferSize() != DefaultWriterSize {
|
||||
t.Fatalf("expected BufferSize() to return %d; returned %d", DefaultWriterSize, wr.BufferSize())
|
||||
}
|
||||
|
||||
// write byte-by-byte
|
||||
for _, b := range bts {
|
||||
if err := wr.WriteByte(b); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err := wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.Len() != nbts {
|
||||
t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Fatal("buf.Bytes() and input are not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriterNext(t *testing.T) {
|
||||
nbts := 1871
|
||||
bts := randomBts(nbts)
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriterSize(&buf, 500)
|
||||
nwr := nextWriter{wr}
|
||||
|
||||
nb, err := nwr.Write(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if nb != nbts {
|
||||
t.Fatalf("expected to write %d bytes; wrote %d", nbts, nb)
|
||||
}
|
||||
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.Len() != nbts {
|
||||
t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Fatal("buf.Bytes() and input are not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFrom(t *testing.T) {
|
||||
nbts := 2139
|
||||
bts := randomBts(nbts)
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriterSize(&buf, 987)
|
||||
|
||||
rd := partialReader{bytes.NewReader(bts)}
|
||||
|
||||
nb, err := wr.ReadFrom(rd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nb != int64(nbts) {
|
||||
t.Fatalf("expeted to write %d bytes; wrote %d", nbts, nb)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if buf.Len() != nbts {
|
||||
t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len())
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Fatal("buf.Bytes() and input are not equal")
|
||||
}
|
||||
|
||||
}
|
18
vendor/github.com/philhofer/fwd/writer_unsafe.go
generated
vendored
Normal file
18
vendor/github.com/philhofer/fwd/writer_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// +build !appengine
|
||||
|
||||
package fwd
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// unsafe cast string as []byte
|
||||
func unsafestr(b string) []byte {
|
||||
l := len(b)
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: l,
|
||||
Cap: l,
|
||||
Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data,
|
||||
}))
|
||||
}
|
7
vendor/github.com/tinylib/msgp/.gitignore
generated
vendored
Normal file
7
vendor/github.com/tinylib/msgp/.gitignore
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
_generated/generated.go
|
||||
_generated/generated_test.go
|
||||
_generated/*_gen.go
|
||||
_generated/*_gen_test.go
|
||||
msgp/defgen_test.go
|
||||
msgp/cover.out
|
||||
*~
|
11
vendor/github.com/tinylib/msgp/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/tinylib/msgp/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.7
|
||||
- tip
|
||||
|
||||
env:
|
||||
- GIMME_ARCH=amd64
|
||||
- GIMME_ARCH=386
|
||||
|
||||
script: "make travis"
|
8
vendor/github.com/tinylib/msgp/LICENSE
generated
vendored
Normal file
8
vendor/github.com/tinylib/msgp/LICENSE
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
Copyright (c) 2014 Philip Hofer
|
||||
Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
55
vendor/github.com/tinylib/msgp/Makefile
generated
vendored
Normal file
55
vendor/github.com/tinylib/msgp/Makefile
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
|
||||
# NOTE: This Makefile is only necessary if you
|
||||
# plan on developing the msgp tool and library.
|
||||
# Installation can still be performed with a
|
||||
# normal `go install`.
|
||||
|
||||
# generated integration test files
|
||||
GGEN = ./_generated/generated.go ./_generated/generated_test.go
|
||||
# generated unit test files
|
||||
MGEN = ./msgp/defgen_test.go
|
||||
|
||||
SHELL := /bin/bash
|
||||
|
||||
BIN = $(GOBIN)/msgp
|
||||
|
||||
.PHONY: clean wipe install get-deps bench all
|
||||
|
||||
$(BIN): */*.go
|
||||
@go install ./...
|
||||
|
||||
install: $(BIN)
|
||||
|
||||
$(GGEN): ./_generated/def.go
|
||||
go generate ./_generated
|
||||
|
||||
$(MGEN): ./msgp/defs_test.go
|
||||
go generate ./msgp
|
||||
|
||||
test: all
|
||||
go test -v ./msgp
|
||||
go test -v ./_generated
|
||||
|
||||
bench: all
|
||||
go test -bench . ./msgp
|
||||
go test -bench . ./_generated
|
||||
|
||||
clean:
|
||||
$(RM) $(GGEN) $(MGEN)
|
||||
|
||||
wipe: clean
|
||||
$(RM) $(BIN)
|
||||
|
||||
get-deps:
|
||||
go get -d -t ./...
|
||||
|
||||
all: install $(GGEN) $(MGEN)
|
||||
|
||||
# travis CI enters here
|
||||
travis:
|
||||
go get -d -t ./...
|
||||
go build -o "$${GOPATH%%:*}/bin/msgp" .
|
||||
go generate ./msgp
|
||||
go generate ./_generated
|
||||
go test ./msgp
|
||||
go test ./_generated
|
102
vendor/github.com/tinylib/msgp/README.md
generated
vendored
Normal file
102
vendor/github.com/tinylib/msgp/README.md
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
||||
MessagePack Code Generator [![Build Status](https://travis-ci.org/tinylib/msgp.svg?branch=master)](https://travis-ci.org/tinylib/msgp)
|
||||
=======
|
||||
|
||||
This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org).
|
||||
|
||||
### Why?
|
||||
|
||||
- Use Go as your schema language
|
||||
- Performance
|
||||
- [JSON interop](http://godoc.org/github.com/tinylib/msgp/msgp#CopyToJSON)
|
||||
- [User-defined extensions](http://github.com/tinylib/msgp/wiki/Using-Extensions)
|
||||
- Type safety
|
||||
- Encoding flexibility
|
||||
|
||||
### Quickstart
|
||||
|
||||
In a source file, include the following directive:
|
||||
|
||||
```go
|
||||
//go:generate msgp
|
||||
```
|
||||
|
||||
The `msgp` command will generate serialization methods for all exported type declarations in the file.
|
||||
|
||||
You can [read more about the code generation options here](http://github.com/tinylib/msgp/wiki/Using-the-Code-Generator).
|
||||
|
||||
### Use
|
||||
|
||||
Field names can be set in much the same way as the `encoding/json` package. For example:
|
||||
|
||||
```go
|
||||
type Person struct {
|
||||
Name string `msg:"name"`
|
||||
Address string `msg:"address"`
|
||||
Age int `msg:"age"`
|
||||
Hidden string `msg:"-"` // this field is ignored
|
||||
unexported bool // this field is also ignored
|
||||
}
|
||||
```
|
||||
|
||||
By default, the code generator will satisfy `msgp.Sizer`, `msgp.Encodable`, `msgp.Decodable`,
|
||||
`msgp.Marshaler`, and `msgp.Unmarshaler`. Carefully-designed applications can use these methods to do
|
||||
marshalling/unmarshalling with zero heap allocations.
|
||||
|
||||
While `msgp.Marshaler` and `msgp.Unmarshaler` are quite similar to the standard library's
|
||||
`json.Marshaler` and `json.Unmarshaler`, `msgp.Encodable` and `msgp.Decodable` are useful for
|
||||
stream serialization. (`*msgp.Writer` and `*msgp.Reader` are essentially protocol-aware versions
|
||||
of `*bufio.Writer` and `*bufio.Reader`, respectively.)
|
||||
|
||||
### Features
|
||||
|
||||
- Extremely fast generated code
|
||||
- Test and benchmark generation
|
||||
- JSON interoperability (see `msgp.CopyToJSON() and msgp.UnmarshalAsJSON()`)
|
||||
- Support for complex type declarations
|
||||
- Native support for Go's `time.Time`, `complex64`, and `complex128` types
|
||||
- Generation of both `[]byte`-oriented and `io.Reader/io.Writer`-oriented methods
|
||||
- Support for arbitrary type system extensions
|
||||
- [Preprocessor directives](http://github.com/tinylib/msgp/wiki/Preprocessor-Directives)
|
||||
- File-based dependency model means fast codegen regardless of source tree size.
|
||||
|
||||
Consider the following:
|
||||
```go
|
||||
const Eight = 8
|
||||
type MyInt int
|
||||
type Data []byte
|
||||
|
||||
type Struct struct {
|
||||
Which map[string]*MyInt `msg:"which"`
|
||||
Other Data `msg:"other"`
|
||||
Nums [Eight]float64 `msg:"nums"`
|
||||
}
|
||||
```
|
||||
As long as the declarations of `MyInt` and `Data` are in the same file as `Struct`, the parser will determine that the type information for `MyInt` and `Data` can be passed into the definition of `Struct` before its methods are generated.
|
||||
|
||||
#### Extensions
|
||||
|
||||
MessagePack supports defining your own types through "extensions," which are just a tuple of
|
||||
the data "type" (`int8`) and the raw binary. You [can see a worked example in the wiki.](http://github.com/tinylib/msgp/wiki/Using-Extensions)
|
||||
|
||||
### Status
|
||||
|
||||
Mostly stable, in that no breaking changes have been made to the `/msgp` library in more than a year. Newer versions
|
||||
of the code may generate different code than older versions for performance reasons. I (@philhofer) am aware of a
|
||||
number of stability-critical commercial applications that use this code with good results. But, caveat emptor.
|
||||
|
||||
You can read more about how `msgp` maps MessagePack types onto Go types [in the wiki](http://github.com/tinylib/msgp/wiki).
|
||||
|
||||
Here some of the known limitations/restrictions:
|
||||
|
||||
- Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile.
|
||||
- Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields.
|
||||
- Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods.
|
||||
- _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation.
|
||||
|
||||
If the output compiles, then there's a pretty good chance things are fine. (Plus, we generate tests for you.) *Please, please, please* file an issue if you think the generator is writing broken code.
|
||||
|
||||
### Performance
|
||||
|
||||
If you like benchmarks, see [here](http://bravenewgeek.com/so-you-wanna-go-fast/) and [here](https://github.com/alecthomas/go_serialization_benchmarks).
|
||||
|
||||
As one might expect, the generated methods that deal with `[]byte` are faster for small objects, but the `io.Reader/Writer` methods are generally more memory-efficient (and, at some point, faster) for large (> 2KB) objects.
|
212
vendor/github.com/tinylib/msgp/_generated/def.go
generated
vendored
Normal file
212
vendor/github.com/tinylib/msgp/_generated/def.go
generated
vendored
Normal file
@ -0,0 +1,212 @@
|
||||
package _generated
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate msgp -o generated.go
|
||||
|
||||
// All of the struct
|
||||
// definitions in this
|
||||
// file are fed to the code
|
||||
// generator when `make test` is
|
||||
// called, followed by an
|
||||
// invocation of `go test -v` in this
|
||||
// directory. A simple way of testing
|
||||
// a struct definition is
|
||||
// by adding it to this file.
|
||||
|
||||
type Block [32]byte
|
||||
|
||||
// tests edge-cases with
|
||||
// compiling size compilation.
|
||||
type X struct {
|
||||
Values [32]byte // should compile to 32*msgp.ByteSize; encoded as Bin
|
||||
ValuesPtr *[32]byte // check (*)[:] deref
|
||||
More Block // should be identical to the above
|
||||
Others [][32]int32 // should compile to len(x.Others)*32*msgp.Int32Size
|
||||
Matrix [][]int32 // should not optimize
|
||||
ManyFixed []Fixed
|
||||
}
|
||||
|
||||
// test fixed-size struct
|
||||
// size compilation
|
||||
type Fixed struct {
|
||||
A float64
|
||||
B bool
|
||||
}
|
||||
|
||||
type TestType struct {
|
||||
F *float64 `msg:"float"`
|
||||
Els map[string]string `msg:"elements"`
|
||||
Obj struct { // test anonymous struct
|
||||
ValueA string `msg:"value_a"`
|
||||
ValueB []byte `msg:"value_b"`
|
||||
} `msg:"object"`
|
||||
Child *TestType `msg:"child"`
|
||||
Time time.Time `msg:"time"`
|
||||
Any interface{} `msg:"any"`
|
||||
Appended msgp.Raw `msg:"appended"`
|
||||
Num msgp.Number `msg:"num"`
|
||||
Slice1 []string
|
||||
Slice2 []string
|
||||
SlicePtr *[]string
|
||||
}
|
||||
|
||||
//msgp:tuple Object
|
||||
type Object struct {
|
||||
ObjectNo string `msg:"objno"`
|
||||
Slice1 []string `msg:"slice1"`
|
||||
Slice2 []string `msg:"slice2"`
|
||||
MapMap map[string]map[string]string
|
||||
}
|
||||
|
||||
//msgp:tuple TestBench
|
||||
|
||||
type TestBench struct {
|
||||
Name string
|
||||
BirthDay time.Time
|
||||
Phone string
|
||||
Siblings int
|
||||
Spouse bool
|
||||
Money float64
|
||||
}
|
||||
|
||||
//msgp:tuple TestFast
|
||||
|
||||
type TestFast struct {
|
||||
Lat, Long, Alt float64 // test inline decl
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Test nested aliases
|
||||
type FastAlias TestFast
|
||||
type AliasContainer struct {
|
||||
Fast FastAlias
|
||||
}
|
||||
|
||||
// Test dependency resolution
|
||||
type IntA int
|
||||
type IntB IntA
|
||||
type IntC IntB
|
||||
|
||||
type TestHidden struct {
|
||||
A string
|
||||
B []float64
|
||||
Bad func(string) bool // This results in a warning: field "Bad" unsupported
|
||||
}
|
||||
|
||||
type Embedded struct {
|
||||
*Embedded // test embedded field
|
||||
Children []Embedded
|
||||
PtrChildren []*Embedded
|
||||
Other string
|
||||
}
|
||||
|
||||
const eight = 8
|
||||
|
||||
type Things struct {
|
||||
Cmplx complex64 `msg:"complex"` // test slices
|
||||
Vals []int32 `msg:"values"`
|
||||
Arr [msgp.ExtensionPrefixSize]float64 `msg:"arr"` // test const array and *ast.SelectorExpr as array size
|
||||
Arr2 [4]float64 `msg:"arr2"` // test basic lit array
|
||||
Ext *msgp.RawExtension `msg:"ext,extension"` // test extension
|
||||
Oext msgp.RawExtension `msg:"oext,extension"` // test extension reference
|
||||
}
|
||||
|
||||
//msgp:shim SpecialID as:[]byte using:toBytes/fromBytes
|
||||
|
||||
type SpecialID string
|
||||
type TestObj struct{ ID1, ID2 SpecialID }
|
||||
|
||||
func toBytes(id SpecialID) []byte { return []byte(string(id)) }
|
||||
func fromBytes(id []byte) SpecialID { return SpecialID(string(id)) }
|
||||
|
||||
type MyEnum byte
|
||||
|
||||
const (
|
||||
A MyEnum = iota
|
||||
B
|
||||
C
|
||||
D
|
||||
invalid
|
||||
)
|
||||
|
||||
// test shim directive (below)
|
||||
|
||||
//msgp:shim MyEnum as:string using:(MyEnum).String/myenumStr
|
||||
|
||||
//msgp:shim *os.File as:string using:filetostr/filefromstr
|
||||
|
||||
func filetostr(f *os.File) string {
|
||||
return f.Name()
|
||||
}
|
||||
|
||||
func filefromstr(s string) *os.File {
|
||||
f, _ := os.Open(s)
|
||||
return f
|
||||
}
|
||||
|
||||
func (m MyEnum) String() string {
|
||||
switch m {
|
||||
case A:
|
||||
return "A"
|
||||
case B:
|
||||
return "B"
|
||||
case C:
|
||||
return "C"
|
||||
case D:
|
||||
return "D"
|
||||
default:
|
||||
return "<invalid>"
|
||||
}
|
||||
}
|
||||
|
||||
func myenumStr(s string) MyEnum {
|
||||
switch s {
|
||||
case "A":
|
||||
return A
|
||||
case "B":
|
||||
return B
|
||||
case "C":
|
||||
return C
|
||||
case "D":
|
||||
return D
|
||||
default:
|
||||
return invalid
|
||||
}
|
||||
}
|
||||
|
||||
// test pass-specific directive
|
||||
//msgp:decode ignore Insane
|
||||
|
||||
type Insane [3]map[string]struct{ A, B CustomInt }
|
||||
|
||||
type Custom struct {
|
||||
Bts CustomBytes `msg:"bts"`
|
||||
Mp map[string]*Embedded `msg:"mp"`
|
||||
Enums []MyEnum `msg:"enums"` // test explicit enum shim
|
||||
Some FileHandle `msg:file_handle`
|
||||
}
|
||||
|
||||
type Files []*os.File
|
||||
|
||||
type FileHandle struct {
|
||||
Relevent Files `msg:"files"`
|
||||
Name string `msg:"name"`
|
||||
}
|
||||
|
||||
type CustomInt int
|
||||
type CustomBytes []byte
|
||||
|
||||
type Wrapper struct {
|
||||
Tree *Tree
|
||||
}
|
||||
|
||||
type Tree struct {
|
||||
Children []Tree
|
||||
Element int
|
||||
Parent *Wrapper
|
||||
}
|
150
vendor/github.com/tinylib/msgp/_generated/gen_test.go
generated
vendored
Normal file
150
vendor/github.com/tinylib/msgp/_generated/gen_test.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
package _generated
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// benchmark encoding a small, "fast" type.
|
||||
// the point here is to see how much garbage
|
||||
// is generated intrinsically by the encoding/
|
||||
// decoding process as opposed to the nature
|
||||
// of the struct.
|
||||
func BenchmarkFastEncode(b *testing.B) {
|
||||
v := &TestFast{
|
||||
Lat: 40.12398,
|
||||
Long: -41.9082,
|
||||
Alt: 201.08290,
|
||||
Data: []byte("whaaaaargharbl"),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, v)
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
// benchmark decoding a small, "fast" type.
|
||||
// the point here is to see how much garbage
|
||||
// is generated intrinsically by the encoding/
|
||||
// decoding process as opposed to the nature
|
||||
// of the struct.
|
||||
func BenchmarkFastDecode(b *testing.B) {
|
||||
v := &TestFast{
|
||||
Lat: 40.12398,
|
||||
Long: -41.9082,
|
||||
Alt: 201.08290,
|
||||
Data: []byte("whaaaaargharbl"),
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, v)
|
||||
dc := msgp.NewReader(msgp.NewEndlessReader(buf.Bytes(), b))
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.DecodeMsg(dc)
|
||||
}
|
||||
}
|
||||
|
||||
func (a *TestType) Equal(b *TestType) bool {
|
||||
// compare times, then zero out those
|
||||
// fields, perform a DeepEqual, and restore them
|
||||
ta, tb := a.Time, b.Time
|
||||
if !ta.Equal(tb) {
|
||||
return false
|
||||
}
|
||||
a.Time, b.Time = time.Time{}, time.Time{}
|
||||
ok := reflect.DeepEqual(a, b)
|
||||
a.Time, b.Time = ta, tb
|
||||
return ok
|
||||
}
|
||||
|
||||
// This covers the following cases:
|
||||
// - Recursive types
|
||||
// - Non-builtin identifiers (and recursive types)
|
||||
// - time.Time
|
||||
// - map[string]string
|
||||
// - anonymous structs
|
||||
//
|
||||
func Test1EncodeDecode(t *testing.T) {
|
||||
f := 32.00
|
||||
tt := &TestType{
|
||||
F: &f,
|
||||
Els: map[string]string{
|
||||
"thing_one": "one",
|
||||
"thing_two": "two",
|
||||
},
|
||||
Obj: struct {
|
||||
ValueA string `msg:"value_a"`
|
||||
ValueB []byte `msg:"value_b"`
|
||||
}{
|
||||
ValueA: "here's the first inner value",
|
||||
ValueB: []byte("here's the second inner value"),
|
||||
},
|
||||
Child: nil,
|
||||
Time: time.Now(),
|
||||
Appended: msgp.Raw([]byte{0xc0}), // 'nil'
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
err := msgp.Encode(&buf, tt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tnew := new(TestType)
|
||||
|
||||
err = msgp.Decode(&buf, tnew)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
if !tt.Equal(tnew) {
|
||||
t.Logf("in: %v", tt)
|
||||
t.Logf("out: %v", tnew)
|
||||
t.Fatal("objects not equal")
|
||||
}
|
||||
|
||||
tanother := new(TestType)
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, tt)
|
||||
|
||||
var left []byte
|
||||
left, err = tanother.UnmarshalMsg(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left", len(left))
|
||||
}
|
||||
|
||||
if !tt.Equal(tanother) {
|
||||
t.Logf("in: %v", tt)
|
||||
t.Logf("out: %v", tanother)
|
||||
t.Fatal("objects not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue168(t *testing.T) {
|
||||
buf := bytes.Buffer{}
|
||||
test := TestObj{}
|
||||
|
||||
msgp.Encode(&buf, &TestObj{ID1: "1", ID2: "2"})
|
||||
msgp.Decode(&buf, &test)
|
||||
|
||||
if test.ID1 != "1" || test.ID2 != "2" {
|
||||
t.Fatalf("got back %+v", test)
|
||||
}
|
||||
}
|
31
vendor/github.com/tinylib/msgp/_generated/issue94.go
generated
vendored
Normal file
31
vendor/github.com/tinylib/msgp/_generated/issue94.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
package _generated
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:generate msgp
|
||||
|
||||
// Issue 94: shims were not propogated recursively,
|
||||
// which caused shims that weren't at the top level
|
||||
// to be silently ignored.
|
||||
//
|
||||
// The following line will generate an error after
|
||||
// the code is generated if the generated code doesn't
|
||||
// have the right identifier in it.
|
||||
|
||||
//go:generate ./search.sh $GOFILE timetostr
|
||||
|
||||
//msgp:shim time.Time as:string using:timetostr/strtotime
|
||||
type T struct {
|
||||
T time.Time
|
||||
}
|
||||
|
||||
func timetostr(t time.Time) string {
|
||||
return t.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func strtotime(s string) time.Time {
|
||||
t, _ := time.Parse(time.RFC3339, s)
|
||||
return t
|
||||
}
|
12
vendor/github.com/tinylib/msgp/_generated/search.sh
generated
vendored
Executable file
12
vendor/github.com/tinylib/msgp/_generated/search.sh
generated
vendored
Executable file
@ -0,0 +1,12 @@
|
||||
#! /bin/sh
|
||||
|
||||
FILE=$(echo $1 | sed s/.go/_gen.go/)
|
||||
echo "searching" $FILE "for" $2
|
||||
grep -q $2 $FILE
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
echo "OK"
|
||||
else
|
||||
echo "whoops!"
|
||||
exit 1
|
||||
fi
|
218
vendor/github.com/tinylib/msgp/gen/decode.go
generated
vendored
Normal file
218
vendor/github.com/tinylib/msgp/gen/decode.go
generated
vendored
Normal file
@ -0,0 +1,218 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func decode(w io.Writer) *decodeGen {
|
||||
return &decodeGen{
|
||||
p: printer{w: w},
|
||||
hasfield: false,
|
||||
}
|
||||
}
|
||||
|
||||
type decodeGen struct {
|
||||
passes
|
||||
p printer
|
||||
hasfield bool
|
||||
}
|
||||
|
||||
func (d *decodeGen) Method() Method { return Decode }
|
||||
|
||||
func (d *decodeGen) needsField() {
|
||||
if d.hasfield {
|
||||
return
|
||||
}
|
||||
d.p.print("\nvar field []byte; _ = field")
|
||||
d.hasfield = true
|
||||
}
|
||||
|
||||
func (d *decodeGen) Execute(p Elem) error {
|
||||
p = d.applyall(p)
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
d.hasfield = false
|
||||
if !d.p.ok() {
|
||||
return d.p.err
|
||||
}
|
||||
|
||||
if !IsPrintable(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d.p.comment("DecodeMsg implements msgp.Decodable")
|
||||
|
||||
d.p.printf("\nfunc (%s %s) DecodeMsg(dc *msgp.Reader) (err error) {", p.Varname(), methodReceiver(p))
|
||||
next(d, p)
|
||||
d.p.nakedReturn()
|
||||
unsetReceiver(p)
|
||||
return d.p.err
|
||||
}
|
||||
|
||||
func (d *decodeGen) gStruct(s *Struct) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
if s.AsTuple {
|
||||
d.structAsTuple(s)
|
||||
} else {
|
||||
d.structAsMap(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decodeGen) assignAndCheck(name string, typ string) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
d.p.printf("\n%s, err = dc.Read%s()", name, typ)
|
||||
d.p.print(errcheck)
|
||||
}
|
||||
|
||||
func (d *decodeGen) structAsTuple(s *Struct) {
|
||||
nfields := len(s.Fields)
|
||||
|
||||
sz := randIdent()
|
||||
d.p.declare(sz, u32)
|
||||
d.assignAndCheck(sz, arrayHeader)
|
||||
d.p.arrayCheck(strconv.Itoa(nfields), sz)
|
||||
for i := range s.Fields {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
next(d, s.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decodeGen) structAsMap(s *Struct) {
|
||||
d.needsField()
|
||||
sz := randIdent()
|
||||
d.p.declare(sz, u32)
|
||||
d.assignAndCheck(sz, mapHeader)
|
||||
|
||||
d.p.printf("\nfor %s > 0 {\n%s--", sz, sz)
|
||||
d.assignAndCheck("field", mapKey)
|
||||
d.p.print("\nswitch msgp.UnsafeString(field) {")
|
||||
for i := range s.Fields {
|
||||
d.p.printf("\ncase \"%s\":", s.Fields[i].FieldTag)
|
||||
next(d, s.Fields[i].FieldElem)
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
}
|
||||
d.p.print("\ndefault:\nerr = dc.Skip()")
|
||||
d.p.print(errcheck)
|
||||
d.p.closeblock() // close switch
|
||||
d.p.closeblock() // close for loop
|
||||
}
|
||||
|
||||
func (d *decodeGen) gBase(b *BaseElem) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
// open block for 'tmp'
|
||||
var tmp string
|
||||
if b.Convert {
|
||||
tmp = randIdent()
|
||||
d.p.printf("\n{ var %s %s", tmp, b.BaseType())
|
||||
}
|
||||
|
||||
vname := b.Varname() // e.g. "z.FieldOne"
|
||||
bname := b.BaseName() // e.g. "Float64"
|
||||
|
||||
// handle special cases
|
||||
// for object type.
|
||||
switch b.Value {
|
||||
case Bytes:
|
||||
if b.Convert {
|
||||
d.p.printf("\n%s, err = dc.ReadBytes([]byte(%s))", tmp, vname)
|
||||
} else {
|
||||
d.p.printf("\n%s, err = dc.ReadBytes(%s)", vname, vname)
|
||||
}
|
||||
case IDENT:
|
||||
d.p.printf("\nerr = %s.DecodeMsg(dc)", vname)
|
||||
case Ext:
|
||||
d.p.printf("\nerr = dc.ReadExtension(%s)", vname)
|
||||
default:
|
||||
if b.Convert {
|
||||
d.p.printf("\n%s, err = dc.Read%s()", tmp, bname)
|
||||
} else {
|
||||
d.p.printf("\n%s, err = dc.Read%s()", vname, bname)
|
||||
}
|
||||
}
|
||||
|
||||
// close block for 'tmp'
|
||||
if b.Convert {
|
||||
d.p.printf("\n%s = %s(%s)\n}", vname, b.FromBase(), tmp)
|
||||
}
|
||||
|
||||
d.p.print(errcheck)
|
||||
}
|
||||
|
||||
func (d *decodeGen) gMap(m *Map) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
sz := randIdent()
|
||||
|
||||
// resize or allocate map
|
||||
d.p.declare(sz, u32)
|
||||
d.assignAndCheck(sz, mapHeader)
|
||||
d.p.resizeMap(sz, m)
|
||||
|
||||
// for element in map, read string/value
|
||||
// pair and assign
|
||||
d.p.printf("\nfor %s > 0 {\n%s--", sz, sz)
|
||||
d.p.declare(m.Keyidx, "string")
|
||||
d.p.declare(m.Validx, m.Value.TypeName())
|
||||
d.assignAndCheck(m.Keyidx, stringTyp)
|
||||
next(d, m.Value)
|
||||
d.p.mapAssign(m)
|
||||
d.p.closeblock()
|
||||
}
|
||||
|
||||
func (d *decodeGen) gSlice(s *Slice) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
sz := randIdent()
|
||||
d.p.declare(sz, u32)
|
||||
d.assignAndCheck(sz, arrayHeader)
|
||||
d.p.resizeSlice(sz, s)
|
||||
d.p.rangeBlock(s.Index, s.Varname(), d, s.Els)
|
||||
}
|
||||
|
||||
func (d *decodeGen) gArray(a *Array) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
// special case if we have [const]byte
|
||||
if be, ok := a.Els.(*BaseElem); ok && (be.Value == Byte || be.Value == Uint8) {
|
||||
d.p.printf("\nerr = dc.ReadExactBytes((%s)[:])", a.Varname())
|
||||
d.p.print(errcheck)
|
||||
return
|
||||
}
|
||||
sz := randIdent()
|
||||
d.p.declare(sz, u32)
|
||||
d.assignAndCheck(sz, arrayHeader)
|
||||
d.p.arrayCheck(a.Size, sz)
|
||||
|
||||
d.p.rangeBlock(a.Index, a.Varname(), d, a.Els)
|
||||
}
|
||||
|
||||
func (d *decodeGen) gPtr(p *Ptr) {
|
||||
if !d.p.ok() {
|
||||
return
|
||||
}
|
||||
d.p.print("\nif dc.IsNil() {")
|
||||
d.p.print("\nerr = dc.ReadNil()")
|
||||
d.p.print(errcheck)
|
||||
d.p.printf("\n%s = nil\n} else {", p.Varname())
|
||||
d.p.initPtr(p)
|
||||
next(d, p.Value)
|
||||
d.p.closeblock()
|
||||
}
|
598
vendor/github.com/tinylib/msgp/gen/elem.go
generated
vendored
Normal file
598
vendor/github.com/tinylib/msgp/gen/elem.go
generated
vendored
Normal file
@ -0,0 +1,598 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
idxChars = "abcdefghijlkmnopqrstuvwxyz"
|
||||
idxLen = 3
|
||||
)
|
||||
|
||||
// generate a random identifier name
|
||||
func randIdent() string {
|
||||
bts := make([]byte, idxLen)
|
||||
for i := range bts {
|
||||
bts[i] = idxChars[rand.Intn(len(idxChars))]
|
||||
}
|
||||
|
||||
// Use a `z` prefix so the randomly generated bytes can't conflict with
|
||||
// Go keywords (such as `int` and `var`).
|
||||
return "z" + string(bts)
|
||||
}
|
||||
|
||||
// This code defines the type declaration tree.
|
||||
//
|
||||
// Consider the following:
|
||||
//
|
||||
// type Marshaler struct {
|
||||
// Thing1 *float64 `msg:"thing1"`
|
||||
// Body []byte `msg:"body"`
|
||||
// }
|
||||
//
|
||||
// A parser using this generator as a backend
|
||||
// should parse the above into:
|
||||
//
|
||||
// var val Elem = &Ptr{
|
||||
// name: "z",
|
||||
// Value: &Struct{
|
||||
// Name: "Marshaler",
|
||||
// Fields: []StructField{
|
||||
// {
|
||||
// FieldTag: "thing1",
|
||||
// FieldElem: &Ptr{
|
||||
// name: "z.Thing1",
|
||||
// Value: &BaseElem{
|
||||
// name: "*z.Thing1",
|
||||
// Value: Float64,
|
||||
// Convert: false,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// {
|
||||
// FieldTag: "body",
|
||||
// FieldElem: &BaseElem{
|
||||
// name: "z.Body",
|
||||
// Value: Bytes,
|
||||
// Convert: false,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// Base is one of the
|
||||
// base types
|
||||
type Primitive uint8
|
||||
|
||||
// this is effectively the
|
||||
// list of currently available
|
||||
// ReadXxxx / WriteXxxx methods.
|
||||
const (
|
||||
Invalid Primitive = iota
|
||||
Bytes
|
||||
String
|
||||
Float32
|
||||
Float64
|
||||
Complex64
|
||||
Complex128
|
||||
Uint
|
||||
Uint8
|
||||
Uint16
|
||||
Uint32
|
||||
Uint64
|
||||
Byte
|
||||
Int
|
||||
Int8
|
||||
Int16
|
||||
Int32
|
||||
Int64
|
||||
Bool
|
||||
Intf // interface{}
|
||||
Time // time.Time
|
||||
Ext // extension
|
||||
|
||||
IDENT // IDENT means an unrecognized identifier
|
||||
)
|
||||
|
||||
// all of the recognized identities
|
||||
// that map to primitive types
|
||||
var primitives = map[string]Primitive{
|
||||
"[]byte": Bytes,
|
||||
"string": String,
|
||||
"float32": Float32,
|
||||
"float64": Float64,
|
||||
"complex64": Complex64,
|
||||
"complex128": Complex128,
|
||||
"uint": Uint,
|
||||
"uint8": Uint8,
|
||||
"uint16": Uint16,
|
||||
"uint32": Uint32,
|
||||
"uint64": Uint64,
|
||||
"byte": Byte,
|
||||
"int": Int,
|
||||
"int8": Int8,
|
||||
"int16": Int16,
|
||||
"int32": Int32,
|
||||
"int64": Int64,
|
||||
"bool": Bool,
|
||||
"interface{}": Intf,
|
||||
"time.Time": Time,
|
||||
"msgp.Extension": Ext,
|
||||
}
|
||||
|
||||
// types built into the library
|
||||
// that satisfy all of the
|
||||
// interfaces.
|
||||
var builtins = map[string]struct{}{
|
||||
"msgp.Raw": struct{}{},
|
||||
"msgp.Number": struct{}{},
|
||||
}
|
||||
|
||||
// common data/methods for every Elem
|
||||
type common struct{ vname, alias string }
|
||||
|
||||
func (c *common) SetVarname(s string) { c.vname = s }
|
||||
func (c *common) Varname() string { return c.vname }
|
||||
func (c *common) Alias(typ string) { c.alias = typ }
|
||||
func (c *common) hidden() {}
|
||||
|
||||
func IsPrintable(e Elem) bool {
|
||||
if be, ok := e.(*BaseElem); ok && !be.Printable() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Elem is a go type capable of being
|
||||
// serialized into MessagePack. It is
|
||||
// implemented by *Ptr, *Struct, *Array,
|
||||
// *Slice, *Map, and *BaseElem.
|
||||
type Elem interface {
|
||||
// SetVarname sets this nodes
|
||||
// variable name and recursively
|
||||
// sets the names of all its children.
|
||||
// In general, this should only be
|
||||
// called on the parent of the tree.
|
||||
SetVarname(s string)
|
||||
|
||||
// Varname returns the variable
|
||||
// name of the element.
|
||||
Varname() string
|
||||
|
||||
// TypeName is the canonical
|
||||
// go type name of the node
|
||||
// e.g. "string", "int", "map[string]float64"
|
||||
// OR the alias name, if it has been set.
|
||||
TypeName() string
|
||||
|
||||
// Alias sets a type (alias) name
|
||||
Alias(typ string)
|
||||
|
||||
// Copy should perform a deep copy of the object
|
||||
Copy() Elem
|
||||
|
||||
// Complexity returns a measure of the
|
||||
// complexity of element (greater than
|
||||
// or equal to 1.)
|
||||
Complexity() int
|
||||
|
||||
hidden()
|
||||
}
|
||||
|
||||
// Ident returns the *BaseElem that corresponds
|
||||
// to the provided identity.
|
||||
func Ident(id string) *BaseElem {
|
||||
p, ok := primitives[id]
|
||||
if ok {
|
||||
return &BaseElem{Value: p}
|
||||
}
|
||||
be := &BaseElem{Value: IDENT}
|
||||
be.Alias(id)
|
||||
return be
|
||||
}
|
||||
|
||||
type Array struct {
|
||||
common
|
||||
Index string // index variable name
|
||||
Size string // array size
|
||||
Els Elem // child
|
||||
}
|
||||
|
||||
func (a *Array) SetVarname(s string) {
|
||||
a.common.SetVarname(s)
|
||||
ridx:
|
||||
a.Index = randIdent()
|
||||
|
||||
// try to avoid using the same
|
||||
// index as a parent slice
|
||||
if strings.Contains(a.Varname(), a.Index) {
|
||||
goto ridx
|
||||
}
|
||||
|
||||
a.Els.SetVarname(fmt.Sprintf("%s[%s]", a.Varname(), a.Index))
|
||||
}
|
||||
|
||||
func (a *Array) TypeName() string {
|
||||
if a.common.alias != "" {
|
||||
return a.common.alias
|
||||
}
|
||||
a.common.Alias(fmt.Sprintf("[%s]%s", a.Size, a.Els.TypeName()))
|
||||
return a.common.alias
|
||||
}
|
||||
|
||||
func (a *Array) Copy() Elem {
|
||||
b := *a
|
||||
b.Els = a.Els.Copy()
|
||||
return &b
|
||||
}
|
||||
|
||||
func (a *Array) Complexity() int { return 1 + a.Els.Complexity() }
|
||||
|
||||
// Map is a map[string]Elem
|
||||
type Map struct {
|
||||
common
|
||||
Keyidx string // key variable name
|
||||
Validx string // value variable name
|
||||
Value Elem // value element
|
||||
}
|
||||
|
||||
func (m *Map) SetVarname(s string) {
|
||||
m.common.SetVarname(s)
|
||||
ridx:
|
||||
m.Keyidx = randIdent()
|
||||
m.Validx = randIdent()
|
||||
|
||||
// just in case
|
||||
if m.Keyidx == m.Validx {
|
||||
goto ridx
|
||||
}
|
||||
|
||||
m.Value.SetVarname(m.Validx)
|
||||
}
|
||||
|
||||
func (m *Map) TypeName() string {
|
||||
if m.common.alias != "" {
|
||||
return m.common.alias
|
||||
}
|
||||
m.common.Alias("map[string]" + m.Value.TypeName())
|
||||
return m.common.alias
|
||||
}
|
||||
|
||||
func (m *Map) Copy() Elem {
|
||||
g := *m
|
||||
g.Value = m.Value.Copy()
|
||||
return &g
|
||||
}
|
||||
|
||||
func (m *Map) Complexity() int { return 2 + m.Value.Complexity() }
|
||||
|
||||
type Slice struct {
|
||||
common
|
||||
Index string
|
||||
Els Elem // The type of each element
|
||||
}
|
||||
|
||||
func (s *Slice) SetVarname(a string) {
|
||||
s.common.SetVarname(a)
|
||||
s.Index = randIdent()
|
||||
varName := s.Varname()
|
||||
if varName[0] == '*' {
|
||||
// Pointer-to-slice requires parenthesis for slicing.
|
||||
varName = "(" + varName + ")"
|
||||
}
|
||||
s.Els.SetVarname(fmt.Sprintf("%s[%s]", varName, s.Index))
|
||||
}
|
||||
|
||||
func (s *Slice) TypeName() string {
|
||||
if s.common.alias != "" {
|
||||
return s.common.alias
|
||||
}
|
||||
s.common.Alias("[]" + s.Els.TypeName())
|
||||
return s.common.alias
|
||||
}
|
||||
|
||||
func (s *Slice) Copy() Elem {
|
||||
z := *s
|
||||
z.Els = s.Els.Copy()
|
||||
return &z
|
||||
}
|
||||
|
||||
func (s *Slice) Complexity() int {
|
||||
return 1 + s.Els.Complexity()
|
||||
}
|
||||
|
||||
type Ptr struct {
|
||||
common
|
||||
Value Elem
|
||||
}
|
||||
|
||||
func (s *Ptr) SetVarname(a string) {
|
||||
s.common.SetVarname(a)
|
||||
|
||||
// struct fields are dereferenced
|
||||
// automatically...
|
||||
switch x := s.Value.(type) {
|
||||
case *Struct:
|
||||
// struct fields are automatically dereferenced
|
||||
x.SetVarname(a)
|
||||
return
|
||||
|
||||
case *BaseElem:
|
||||
// identities have pointer receivers
|
||||
if x.Value == IDENT {
|
||||
x.SetVarname(a)
|
||||
} else {
|
||||
x.SetVarname("*" + a)
|
||||
}
|
||||
return
|
||||
|
||||
default:
|
||||
s.Value.SetVarname("*" + a)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Ptr) TypeName() string {
|
||||
if s.common.alias != "" {
|
||||
return s.common.alias
|
||||
}
|
||||
s.common.Alias("*" + s.Value.TypeName())
|
||||
return s.common.alias
|
||||
}
|
||||
|
||||
func (s *Ptr) Copy() Elem {
|
||||
v := *s
|
||||
v.Value = s.Value.Copy()
|
||||
return &v
|
||||
}
|
||||
|
||||
func (s *Ptr) Complexity() int { return 1 + s.Value.Complexity() }
|
||||
|
||||
func (s *Ptr) Needsinit() bool {
|
||||
if be, ok := s.Value.(*BaseElem); ok && be.needsref {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type Struct struct {
|
||||
common
|
||||
Fields []StructField // field list
|
||||
AsTuple bool // write as an array instead of a map
|
||||
}
|
||||
|
||||
func (s *Struct) TypeName() string {
|
||||
if s.common.alias != "" {
|
||||
return s.common.alias
|
||||
}
|
||||
str := "struct{\n"
|
||||
for i := range s.Fields {
|
||||
str += s.Fields[i].FieldName + " " + s.Fields[i].FieldElem.TypeName() + ";\n"
|
||||
}
|
||||
str += "}"
|
||||
s.common.Alias(str)
|
||||
return s.common.alias
|
||||
}
|
||||
|
||||
func (s *Struct) SetVarname(a string) {
|
||||
s.common.SetVarname(a)
|
||||
writeStructFields(s.Fields, a)
|
||||
}
|
||||
|
||||
func (s *Struct) Copy() Elem {
|
||||
g := *s
|
||||
g.Fields = make([]StructField, len(s.Fields))
|
||||
copy(g.Fields, s.Fields)
|
||||
for i := range s.Fields {
|
||||
g.Fields[i].FieldElem = s.Fields[i].FieldElem.Copy()
|
||||
}
|
||||
return &g
|
||||
}
|
||||
|
||||
func (s *Struct) Complexity() int {
|
||||
c := 1
|
||||
for i := range s.Fields {
|
||||
c += s.Fields[i].FieldElem.Complexity()
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
type StructField struct {
|
||||
FieldTag string // the string inside the `msg:""` tag
|
||||
FieldName string // the name of the struct field
|
||||
FieldElem Elem // the field type
|
||||
}
|
||||
|
||||
// BaseElem is an element that
|
||||
// can be represented by a primitive
|
||||
// MessagePack type.
|
||||
type BaseElem struct {
|
||||
common
|
||||
ShimToBase string // shim to base type, or empty
|
||||
ShimFromBase string // shim from base type, or empty
|
||||
Value Primitive // Type of element
|
||||
Convert bool // should we do an explicit conversion?
|
||||
mustinline bool // must inline; not printable
|
||||
needsref bool // needs reference for shim
|
||||
}
|
||||
|
||||
func (s *BaseElem) Printable() bool { return !s.mustinline }
|
||||
|
||||
func (s *BaseElem) Alias(typ string) {
|
||||
s.common.Alias(typ)
|
||||
if s.Value != IDENT {
|
||||
s.Convert = true
|
||||
}
|
||||
if strings.Contains(typ, ".") {
|
||||
s.mustinline = true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseElem) SetVarname(a string) {
|
||||
// extensions whose parents
|
||||
// are not pointers need to
|
||||
// be explicitly referenced
|
||||
if s.Value == Ext || s.needsref {
|
||||
if strings.HasPrefix(a, "*") {
|
||||
s.common.SetVarname(a[1:])
|
||||
return
|
||||
}
|
||||
s.common.SetVarname("&" + a)
|
||||
return
|
||||
}
|
||||
|
||||
s.common.SetVarname(a)
|
||||
}
|
||||
|
||||
// TypeName returns the syntactically correct Go
|
||||
// type name for the base element.
|
||||
func (s *BaseElem) TypeName() string {
|
||||
if s.common.alias != "" {
|
||||
return s.common.alias
|
||||
}
|
||||
s.common.Alias(s.BaseType())
|
||||
return s.common.alias
|
||||
}
|
||||
|
||||
// ToBase, used if Convert==true, is used as tmp = {{ToBase}}({{Varname}})
|
||||
func (s *BaseElem) ToBase() string {
|
||||
if s.ShimToBase != "" {
|
||||
return s.ShimToBase
|
||||
}
|
||||
return s.BaseType()
|
||||
}
|
||||
|
||||
// FromBase, used if Convert==true, is used as {{Varname}} = {{FromBase}}(tmp)
|
||||
func (s *BaseElem) FromBase() string {
|
||||
if s.ShimFromBase != "" {
|
||||
return s.ShimFromBase
|
||||
}
|
||||
return s.TypeName()
|
||||
}
|
||||
|
||||
// BaseName returns the string form of the
|
||||
// base type (e.g. Float64, Ident, etc)
|
||||
func (s *BaseElem) BaseName() string {
|
||||
// time is a special case;
|
||||
// we strip the package prefix
|
||||
if s.Value == Time {
|
||||
return "Time"
|
||||
}
|
||||
return s.Value.String()
|
||||
}
|
||||
|
||||
func (s *BaseElem) BaseType() string {
|
||||
switch s.Value {
|
||||
case IDENT:
|
||||
return s.TypeName()
|
||||
|
||||
// exceptions to the naming/capitalization
|
||||
// rule:
|
||||
case Intf:
|
||||
return "interface{}"
|
||||
case Bytes:
|
||||
return "[]byte"
|
||||
case Time:
|
||||
return "time.Time"
|
||||
case Ext:
|
||||
return "msgp.Extension"
|
||||
|
||||
// everything else is base.String() with
|
||||
// the first letter as lowercase
|
||||
default:
|
||||
return strings.ToLower(s.BaseName())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *BaseElem) Needsref(b bool) {
|
||||
s.needsref = b
|
||||
}
|
||||
|
||||
func (s *BaseElem) Copy() Elem {
|
||||
g := *s
|
||||
return &g
|
||||
}
|
||||
|
||||
func (s *BaseElem) Complexity() int {
|
||||
if s.Convert && !s.mustinline {
|
||||
return 2
|
||||
}
|
||||
// we need to return 1 if !printable(),
|
||||
// in order to make sure that stuff gets
|
||||
// inlined appropriately
|
||||
return 1
|
||||
}
|
||||
|
||||
// Resolved returns whether or not
|
||||
// the type of the element is
|
||||
// a primitive or a builtin provided
|
||||
// by the package.
|
||||
func (s *BaseElem) Resolved() bool {
|
||||
if s.Value == IDENT {
|
||||
_, ok := builtins[s.TypeName()]
|
||||
return ok
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (k Primitive) String() string {
|
||||
switch k {
|
||||
case String:
|
||||
return "String"
|
||||
case Bytes:
|
||||
return "Bytes"
|
||||
case Float32:
|
||||
return "Float32"
|
||||
case Float64:
|
||||
return "Float64"
|
||||
case Complex64:
|
||||
return "Complex64"
|
||||
case Complex128:
|
||||
return "Complex128"
|
||||
case Uint:
|
||||
return "Uint"
|
||||
case Uint8:
|
||||
return "Uint8"
|
||||
case Uint16:
|
||||
return "Uint16"
|
||||
case Uint32:
|
||||
return "Uint32"
|
||||
case Uint64:
|
||||
return "Uint64"
|
||||
case Byte:
|
||||
return "Byte"
|
||||
case Int:
|
||||
return "Int"
|
||||
case Int8:
|
||||
return "Int8"
|
||||
case Int16:
|
||||
return "Int16"
|
||||
case Int32:
|
||||
return "Int32"
|
||||
case Int64:
|
||||
return "Int64"
|
||||
case Bool:
|
||||
return "Bool"
|
||||
case Intf:
|
||||
return "Intf"
|
||||
case Time:
|
||||
return "time.Time"
|
||||
case Ext:
|
||||
return "Extension"
|
||||
case IDENT:
|
||||
return "Ident"
|
||||
default:
|
||||
return "INVALID"
|
||||
}
|
||||
}
|
||||
|
||||
// writeStructFields is a trampoline for writeBase for
|
||||
// all of the fields in a struct
|
||||
func writeStructFields(s []StructField, name string) {
|
||||
for i := range s {
|
||||
s[i].FieldElem.SetVarname(fmt.Sprintf("%s.%s", name, s[i].FieldName))
|
||||
}
|
||||
}
|
184
vendor/github.com/tinylib/msgp/gen/encode.go
generated
vendored
Normal file
184
vendor/github.com/tinylib/msgp/gen/encode.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"io"
|
||||
)
|
||||
|
||||
func encode(w io.Writer) *encodeGen {
|
||||
return &encodeGen{
|
||||
p: printer{w: w},
|
||||
}
|
||||
}
|
||||
|
||||
type encodeGen struct {
|
||||
passes
|
||||
p printer
|
||||
fuse []byte
|
||||
}
|
||||
|
||||
func (e *encodeGen) Method() Method { return Encode }
|
||||
|
||||
func (e *encodeGen) Apply(dirs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *encodeGen) writeAndCheck(typ string, argfmt string, arg interface{}) {
|
||||
e.p.printf("\nerr = en.Write%s(%s)", typ, fmt.Sprintf(argfmt, arg))
|
||||
e.p.print(errcheck)
|
||||
}
|
||||
|
||||
func (e *encodeGen) fuseHook() {
|
||||
if len(e.fuse) > 0 {
|
||||
e.appendraw(e.fuse)
|
||||
e.fuse = e.fuse[:0]
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encodeGen) Fuse(b []byte) {
|
||||
if len(e.fuse) > 0 {
|
||||
e.fuse = append(e.fuse, b...)
|
||||
} else {
|
||||
e.fuse = b
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encodeGen) Execute(p Elem) error {
|
||||
if !e.p.ok() {
|
||||
return e.p.err
|
||||
}
|
||||
p = e.applyall(p)
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if !IsPrintable(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
e.p.comment("EncodeMsg implements msgp.Encodable")
|
||||
|
||||
e.p.printf("\nfunc (%s %s) EncodeMsg(en *msgp.Writer) (err error) {", p.Varname(), imutMethodReceiver(p))
|
||||
next(e, p)
|
||||
e.p.nakedReturn()
|
||||
return e.p.err
|
||||
}
|
||||
|
||||
func (e *encodeGen) gStruct(s *Struct) {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
if s.AsTuple {
|
||||
e.tuple(s)
|
||||
} else {
|
||||
e.structmap(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *encodeGen) tuple(s *Struct) {
|
||||
nfields := len(s.Fields)
|
||||
data := msgp.AppendArrayHeader(nil, uint32(nfields))
|
||||
e.p.printf("\n// array header, size %d", nfields)
|
||||
e.Fuse(data)
|
||||
for i := range s.Fields {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
next(e, s.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encodeGen) appendraw(bts []byte) {
|
||||
e.p.print("\nerr = en.Append(")
|
||||
for i, b := range bts {
|
||||
if i != 0 {
|
||||
e.p.print(", ")
|
||||
}
|
||||
e.p.printf("0x%x", b)
|
||||
}
|
||||
e.p.print(")\nif err != nil { return err }")
|
||||
}
|
||||
|
||||
func (e *encodeGen) structmap(s *Struct) {
|
||||
nfields := len(s.Fields)
|
||||
data := msgp.AppendMapHeader(nil, uint32(nfields))
|
||||
e.p.printf("\n// map header, size %d", nfields)
|
||||
e.Fuse(data)
|
||||
for i := range s.Fields {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
data = msgp.AppendString(nil, s.Fields[i].FieldTag)
|
||||
e.p.printf("\n// write %q", s.Fields[i].FieldTag)
|
||||
e.Fuse(data)
|
||||
next(e, s.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encodeGen) gMap(m *Map) {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
e.fuseHook()
|
||||
vname := m.Varname()
|
||||
e.writeAndCheck(mapHeader, lenAsUint32, vname)
|
||||
|
||||
e.p.printf("\nfor %s, %s := range %s {", m.Keyidx, m.Validx, vname)
|
||||
e.writeAndCheck(stringTyp, literalFmt, m.Keyidx)
|
||||
next(e, m.Value)
|
||||
e.p.closeblock()
|
||||
}
|
||||
|
||||
func (e *encodeGen) gPtr(s *Ptr) {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
e.fuseHook()
|
||||
e.p.printf("\nif %s == nil { err = en.WriteNil(); if err != nil { return; } } else {", s.Varname())
|
||||
next(e, s.Value)
|
||||
e.p.closeblock()
|
||||
}
|
||||
|
||||
func (e *encodeGen) gSlice(s *Slice) {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
e.fuseHook()
|
||||
e.writeAndCheck(arrayHeader, lenAsUint32, s.Varname())
|
||||
e.p.rangeBlock(s.Index, s.Varname(), e, s.Els)
|
||||
}
|
||||
|
||||
func (e *encodeGen) gArray(a *Array) {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
e.fuseHook()
|
||||
// shortcut for [const]byte
|
||||
if be, ok := a.Els.(*BaseElem); ok && (be.Value == Byte || be.Value == Uint8) {
|
||||
e.p.printf("\nerr = en.WriteBytes((%s)[:])", a.Varname())
|
||||
e.p.print(errcheck)
|
||||
return
|
||||
}
|
||||
|
||||
e.writeAndCheck(arrayHeader, literalFmt, a.Size)
|
||||
e.p.rangeBlock(a.Index, a.Varname(), e, a.Els)
|
||||
}
|
||||
|
||||
func (e *encodeGen) gBase(b *BaseElem) {
|
||||
if !e.p.ok() {
|
||||
return
|
||||
}
|
||||
e.fuseHook()
|
||||
vname := b.Varname()
|
||||
if b.Convert {
|
||||
vname = tobaseConvert(b)
|
||||
}
|
||||
|
||||
if b.Value == IDENT { // unknown identity
|
||||
e.p.printf("\nerr = %s.EncodeMsg(en)", vname)
|
||||
e.p.print(errcheck)
|
||||
} else { // typical case
|
||||
e.writeAndCheck(b.BaseName(), literalFmt, vname)
|
||||
}
|
||||
}
|
198
vendor/github.com/tinylib/msgp/gen/marshal.go
generated
vendored
Normal file
198
vendor/github.com/tinylib/msgp/gen/marshal.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"io"
|
||||
)
|
||||
|
||||
func marshal(w io.Writer) *marshalGen {
|
||||
return &marshalGen{
|
||||
p: printer{w: w},
|
||||
}
|
||||
}
|
||||
|
||||
type marshalGen struct {
|
||||
passes
|
||||
p printer
|
||||
fuse []byte
|
||||
}
|
||||
|
||||
func (m *marshalGen) Method() Method { return Marshal }
|
||||
|
||||
func (m *marshalGen) Apply(dirs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *marshalGen) Execute(p Elem) error {
|
||||
if !m.p.ok() {
|
||||
return m.p.err
|
||||
}
|
||||
p = m.applyall(p)
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if !IsPrintable(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.p.comment("MarshalMsg implements msgp.Marshaler")
|
||||
|
||||
// save the vname before
|
||||
// calling methodReceiver so
|
||||
// that z.Msgsize() is printed correctly
|
||||
c := p.Varname()
|
||||
|
||||
m.p.printf("\nfunc (%s %s) MarshalMsg(b []byte) (o []byte, err error) {", p.Varname(), imutMethodReceiver(p))
|
||||
m.p.printf("\no = msgp.Require(b, %s.Msgsize())", c)
|
||||
next(m, p)
|
||||
m.p.nakedReturn()
|
||||
return m.p.err
|
||||
}
|
||||
|
||||
func (m *marshalGen) rawAppend(typ string, argfmt string, arg interface{}) {
|
||||
m.p.printf("\no = msgp.Append%s(o, %s)", typ, fmt.Sprintf(argfmt, arg))
|
||||
}
|
||||
|
||||
func (m *marshalGen) fuseHook() {
|
||||
if len(m.fuse) > 0 {
|
||||
m.rawbytes(m.fuse)
|
||||
m.fuse = m.fuse[:0]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *marshalGen) Fuse(b []byte) {
|
||||
if len(m.fuse) == 0 {
|
||||
m.fuse = b
|
||||
} else {
|
||||
m.fuse = append(m.fuse, b...)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *marshalGen) gStruct(s *Struct) {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
if s.AsTuple {
|
||||
m.tuple(s)
|
||||
} else {
|
||||
m.mapstruct(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *marshalGen) tuple(s *Struct) {
|
||||
data := make([]byte, 0, 5)
|
||||
data = msgp.AppendArrayHeader(data, uint32(len(s.Fields)))
|
||||
m.p.printf("\n// array header, size %d", len(s.Fields))
|
||||
m.Fuse(data)
|
||||
for i := range s.Fields {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
next(m, s.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *marshalGen) mapstruct(s *Struct) {
|
||||
data := make([]byte, 0, 64)
|
||||
data = msgp.AppendMapHeader(data, uint32(len(s.Fields)))
|
||||
m.p.printf("\n// map header, size %d", len(s.Fields))
|
||||
m.Fuse(data)
|
||||
for i := range s.Fields {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
data = msgp.AppendString(nil, s.Fields[i].FieldTag)
|
||||
|
||||
m.p.printf("\n// string %q", s.Fields[i].FieldTag)
|
||||
m.Fuse(data)
|
||||
|
||||
next(m, s.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
|
||||
// append raw data
|
||||
func (m *marshalGen) rawbytes(bts []byte) {
|
||||
m.p.print("\no = append(o, ")
|
||||
for _, b := range bts {
|
||||
m.p.printf("0x%x,", b)
|
||||
}
|
||||
m.p.print(")")
|
||||
}
|
||||
|
||||
func (m *marshalGen) gMap(s *Map) {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
m.fuseHook()
|
||||
vname := s.Varname()
|
||||
m.rawAppend(mapHeader, lenAsUint32, vname)
|
||||
m.p.printf("\nfor %s, %s := range %s {", s.Keyidx, s.Validx, vname)
|
||||
m.rawAppend(stringTyp, literalFmt, s.Keyidx)
|
||||
next(m, s.Value)
|
||||
m.p.closeblock()
|
||||
}
|
||||
|
||||
func (m *marshalGen) gSlice(s *Slice) {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
m.fuseHook()
|
||||
vname := s.Varname()
|
||||
m.rawAppend(arrayHeader, lenAsUint32, vname)
|
||||
m.p.rangeBlock(s.Index, vname, m, s.Els)
|
||||
}
|
||||
|
||||
func (m *marshalGen) gArray(a *Array) {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
m.fuseHook()
|
||||
if be, ok := a.Els.(*BaseElem); ok && be.Value == Byte {
|
||||
m.rawAppend("Bytes", "(%s)[:]", a.Varname())
|
||||
return
|
||||
}
|
||||
|
||||
m.rawAppend(arrayHeader, literalFmt, a.Size)
|
||||
m.p.rangeBlock(a.Index, a.Varname(), m, a.Els)
|
||||
}
|
||||
|
||||
func (m *marshalGen) gPtr(p *Ptr) {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
m.fuseHook()
|
||||
m.p.printf("\nif %s == nil {\no = msgp.AppendNil(o)\n} else {", p.Varname())
|
||||
next(m, p.Value)
|
||||
m.p.closeblock()
|
||||
}
|
||||
|
||||
func (m *marshalGen) gBase(b *BaseElem) {
|
||||
if !m.p.ok() {
|
||||
return
|
||||
}
|
||||
m.fuseHook()
|
||||
vname := b.Varname()
|
||||
|
||||
if b.Convert {
|
||||
vname = tobaseConvert(b)
|
||||
}
|
||||
|
||||
var echeck bool
|
||||
switch b.Value {
|
||||
case IDENT:
|
||||
echeck = true
|
||||
m.p.printf("\no, err = %s.MarshalMsg(o)", vname)
|
||||
case Intf, Ext:
|
||||
echeck = true
|
||||
m.p.printf("\no, err = msgp.Append%s(o, %s)", b.BaseName(), vname)
|
||||
default:
|
||||
m.rawAppend(b.BaseName(), literalFmt, vname)
|
||||
}
|
||||
|
||||
if echeck {
|
||||
m.p.print(errcheck)
|
||||
}
|
||||
}
|
272
vendor/github.com/tinylib/msgp/gen/size.go
generated
vendored
Normal file
272
vendor/github.com/tinylib/msgp/gen/size.go
generated
vendored
Normal file
@ -0,0 +1,272 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type sizeState uint8
|
||||
|
||||
const (
|
||||
// need to write "s = ..."
|
||||
assign sizeState = iota
|
||||
|
||||
// need to write "s += ..."
|
||||
add
|
||||
|
||||
// can just append "+ ..."
|
||||
expr
|
||||
)
|
||||
|
||||
func sizes(w io.Writer) *sizeGen {
|
||||
return &sizeGen{
|
||||
p: printer{w: w},
|
||||
state: assign,
|
||||
}
|
||||
}
|
||||
|
||||
type sizeGen struct {
|
||||
passes
|
||||
p printer
|
||||
state sizeState
|
||||
}
|
||||
|
||||
func (s *sizeGen) Method() Method { return Size }
|
||||
|
||||
func (s *sizeGen) Apply(dirs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func builtinSize(typ string) string {
|
||||
return "msgp." + typ + "Size"
|
||||
}
|
||||
|
||||
// this lets us chain together addition
|
||||
// operations where possible
|
||||
func (s *sizeGen) addConstant(sz string) {
|
||||
if !s.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
switch s.state {
|
||||
case assign:
|
||||
s.p.print("\ns = " + sz)
|
||||
s.state = expr
|
||||
return
|
||||
case add:
|
||||
s.p.print("\ns += " + sz)
|
||||
s.state = expr
|
||||
return
|
||||
case expr:
|
||||
s.p.print(" + " + sz)
|
||||
return
|
||||
}
|
||||
|
||||
panic("unknown size state")
|
||||
}
|
||||
|
||||
func (s *sizeGen) Execute(p Elem) error {
|
||||
if !s.p.ok() {
|
||||
return s.p.err
|
||||
}
|
||||
p = s.applyall(p)
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if !IsPrintable(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
s.p.comment("Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message")
|
||||
|
||||
s.p.printf("\nfunc (%s %s) Msgsize() (s int) {", p.Varname(), imutMethodReceiver(p))
|
||||
s.state = assign
|
||||
next(s, p)
|
||||
s.p.nakedReturn()
|
||||
return s.p.err
|
||||
}
|
||||
|
||||
func (s *sizeGen) gStruct(st *Struct) {
|
||||
if !s.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
nfields := uint32(len(st.Fields))
|
||||
|
||||
if st.AsTuple {
|
||||
data := msgp.AppendArrayHeader(nil, nfields)
|
||||
s.addConstant(strconv.Itoa(len(data)))
|
||||
for i := range st.Fields {
|
||||
if !s.p.ok() {
|
||||
return
|
||||
}
|
||||
next(s, st.Fields[i].FieldElem)
|
||||
}
|
||||
} else {
|
||||
data := msgp.AppendMapHeader(nil, nfields)
|
||||
s.addConstant(strconv.Itoa(len(data)))
|
||||
for i := range st.Fields {
|
||||
data = data[:0]
|
||||
data = msgp.AppendString(data, st.Fields[i].FieldTag)
|
||||
s.addConstant(strconv.Itoa(len(data)))
|
||||
next(s, st.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sizeGen) gPtr(p *Ptr) {
|
||||
s.state = add // inner must use add
|
||||
s.p.printf("\nif %s == nil {\ns += msgp.NilSize\n} else {", p.Varname())
|
||||
next(s, p.Value)
|
||||
s.state = add // closing block; reset to add
|
||||
s.p.closeblock()
|
||||
}
|
||||
|
||||
func (s *sizeGen) gSlice(sl *Slice) {
|
||||
if !s.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
s.addConstant(builtinSize(arrayHeader))
|
||||
|
||||
// if the slice's element is a fixed size
|
||||
// (e.g. float64, [32]int, etc.), then
|
||||
// print the length times the element size directly
|
||||
if str, ok := fixedsizeExpr(sl.Els); ok {
|
||||
s.addConstant(fmt.Sprintf("(%s * (%s))", lenExpr(sl), str))
|
||||
return
|
||||
}
|
||||
|
||||
// add inside the range block, and immediately after
|
||||
s.state = add
|
||||
s.p.rangeBlock(sl.Index, sl.Varname(), s, sl.Els)
|
||||
s.state = add
|
||||
}
|
||||
|
||||
func (s *sizeGen) gArray(a *Array) {
|
||||
if !s.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
s.addConstant(builtinSize(arrayHeader))
|
||||
|
||||
// if the array's children are a fixed
|
||||
// size, we can compile an expression
|
||||
// that always represents the array's wire size
|
||||
if str, ok := fixedsizeExpr(a); ok {
|
||||
s.addConstant(str)
|
||||
return
|
||||
}
|
||||
|
||||
s.state = add
|
||||
s.p.rangeBlock(a.Index, a.Varname(), s, a.Els)
|
||||
s.state = add
|
||||
}
|
||||
|
||||
func (s *sizeGen) gMap(m *Map) {
|
||||
s.addConstant(builtinSize(mapHeader))
|
||||
vn := m.Varname()
|
||||
s.p.printf("\nif %s != nil {", vn)
|
||||
s.p.printf("\nfor %s, %s := range %s {", m.Keyidx, m.Validx, vn)
|
||||
s.p.printf("\n_ = %s", m.Validx) // we may not use the value
|
||||
s.p.printf("\ns += msgp.StringPrefixSize + len(%s)", m.Keyidx)
|
||||
s.state = expr
|
||||
next(s, m.Value)
|
||||
s.p.closeblock()
|
||||
s.p.closeblock()
|
||||
s.state = add
|
||||
}
|
||||
|
||||
func (s *sizeGen) gBase(b *BaseElem) {
|
||||
if !s.p.ok() {
|
||||
return
|
||||
}
|
||||
s.addConstant(basesizeExpr(b))
|
||||
}
|
||||
|
||||
// returns "len(slice)"
|
||||
func lenExpr(sl *Slice) string {
|
||||
return "len(" + sl.Varname() + ")"
|
||||
}
|
||||
|
||||
// is a given primitive always the same (max)
|
||||
// size on the wire?
|
||||
func fixedSize(p Primitive) bool {
|
||||
switch p {
|
||||
case Intf, Ext, IDENT, Bytes, String:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// strip reference from string
|
||||
func stripRef(s string) string {
|
||||
if s[0] == '&' {
|
||||
return s[1:]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// return a fixed-size expression, if possible.
|
||||
// only possible for *BaseElem and *Array.
|
||||
// returns (expr, ok)
|
||||
func fixedsizeExpr(e Elem) (string, bool) {
|
||||
switch e := e.(type) {
|
||||
case *Array:
|
||||
if str, ok := fixedsizeExpr(e.Els); ok {
|
||||
return fmt.Sprintf("(%s * (%s))", e.Size, str), true
|
||||
}
|
||||
case *BaseElem:
|
||||
if fixedSize(e.Value) {
|
||||
return builtinSize(e.BaseName()), true
|
||||
}
|
||||
case *Struct:
|
||||
var str string
|
||||
for _, f := range e.Fields {
|
||||
if fs, ok := fixedsizeExpr(f.FieldElem); ok {
|
||||
if str == "" {
|
||||
str = fs
|
||||
} else {
|
||||
str += "+" + fs
|
||||
}
|
||||
} else {
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
var hdrlen int
|
||||
mhdr := msgp.AppendMapHeader(nil, uint32(len(e.Fields)))
|
||||
hdrlen += len(mhdr)
|
||||
var strbody []byte
|
||||
for _, f := range e.Fields {
|
||||
strbody = msgp.AppendString(strbody[:0], f.FieldTag)
|
||||
hdrlen += len(strbody)
|
||||
}
|
||||
return fmt.Sprintf("%d + %s", hdrlen, str), true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// print size expression of a variable name
|
||||
func basesizeExpr(b *BaseElem) string {
|
||||
vname := b.Varname()
|
||||
if b.Convert {
|
||||
vname = tobaseConvert(b)
|
||||
}
|
||||
switch b.Value {
|
||||
case Ext:
|
||||
return "msgp.ExtensionPrefixSize + " + stripRef(vname) + ".Len()"
|
||||
case Intf:
|
||||
return "msgp.GuessSize(" + vname + ")"
|
||||
case IDENT:
|
||||
return vname + ".Msgsize()"
|
||||
case Bytes:
|
||||
return "msgp.BytesPrefixSize + len(" + vname + ")"
|
||||
case String:
|
||||
return "msgp.StringPrefixSize + len(" + vname + ")"
|
||||
default:
|
||||
return builtinSize(b.BaseName())
|
||||
}
|
||||
}
|
376
vendor/github.com/tinylib/msgp/gen/spec.go
generated
vendored
Normal file
376
vendor/github.com/tinylib/msgp/gen/spec.go
generated
vendored
Normal file
@ -0,0 +1,376 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
errcheck = "\nif err != nil { return }"
|
||||
lenAsUint32 = "uint32(len(%s))"
|
||||
literalFmt = "%s"
|
||||
intFmt = "%d"
|
||||
quotedFmt = `"%s"`
|
||||
mapHeader = "MapHeader"
|
||||
arrayHeader = "ArrayHeader"
|
||||
mapKey = "MapKeyPtr"
|
||||
stringTyp = "String"
|
||||
u32 = "uint32"
|
||||
)
|
||||
|
||||
// Method is a bitfield representing something that the
|
||||
// generator knows how to print.
|
||||
type Method uint8
|
||||
|
||||
// are the bits in 'f' set in 'm'?
|
||||
func (m Method) isset(f Method) bool { return (m&f == f) }
|
||||
|
||||
// String implements fmt.Stringer
|
||||
func (m Method) String() string {
|
||||
switch m {
|
||||
case 0, invalidmeth:
|
||||
return "<invalid method>"
|
||||
case Decode:
|
||||
return "decode"
|
||||
case Encode:
|
||||
return "encode"
|
||||
case Marshal:
|
||||
return "marshal"
|
||||
case Unmarshal:
|
||||
return "unmarshal"
|
||||
case Size:
|
||||
return "size"
|
||||
case Test:
|
||||
return "test"
|
||||
default:
|
||||
// return e.g. "decode+encode+test"
|
||||
modes := [...]Method{Decode, Encode, Marshal, Unmarshal, Size, Test}
|
||||
any := false
|
||||
nm := ""
|
||||
for _, mm := range modes {
|
||||
if m.isset(mm) {
|
||||
if any {
|
||||
nm += "+" + mm.String()
|
||||
} else {
|
||||
nm += mm.String()
|
||||
any = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nm
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func strtoMeth(s string) Method {
|
||||
switch s {
|
||||
case "encode":
|
||||
return Encode
|
||||
case "decode":
|
||||
return Decode
|
||||
case "marshal":
|
||||
return Marshal
|
||||
case "unmarshal":
|
||||
return Unmarshal
|
||||
case "size":
|
||||
return Size
|
||||
case "test":
|
||||
return Test
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
Decode Method = 1 << iota // msgp.Decodable
|
||||
Encode // msgp.Encodable
|
||||
Marshal // msgp.Marshaler
|
||||
Unmarshal // msgp.Unmarshaler
|
||||
Size // msgp.Sizer
|
||||
Test // generate tests
|
||||
invalidmeth // this isn't a method
|
||||
encodetest = Encode | Decode | Test // tests for Encodable and Decodable
|
||||
marshaltest = Marshal | Unmarshal | Test // tests for Marshaler and Unmarshaler
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
gens []generator
|
||||
}
|
||||
|
||||
func NewPrinter(m Method, out io.Writer, tests io.Writer) *Printer {
|
||||
if m.isset(Test) && tests == nil {
|
||||
panic("cannot print tests with 'nil' tests argument!")
|
||||
}
|
||||
gens := make([]generator, 0, 7)
|
||||
if m.isset(Decode) {
|
||||
gens = append(gens, decode(out))
|
||||
}
|
||||
if m.isset(Encode) {
|
||||
gens = append(gens, encode(out))
|
||||
}
|
||||
if m.isset(Marshal) {
|
||||
gens = append(gens, marshal(out))
|
||||
}
|
||||
if m.isset(Unmarshal) {
|
||||
gens = append(gens, unmarshal(out))
|
||||
}
|
||||
if m.isset(Size) {
|
||||
gens = append(gens, sizes(out))
|
||||
}
|
||||
if m.isset(marshaltest) {
|
||||
gens = append(gens, mtest(tests))
|
||||
}
|
||||
if m.isset(encodetest) {
|
||||
gens = append(gens, etest(tests))
|
||||
}
|
||||
if len(gens) == 0 {
|
||||
panic("NewPrinter called with invalid method flags")
|
||||
}
|
||||
return &Printer{gens: gens}
|
||||
}
|
||||
|
||||
// TransformPass is a pass that transforms individual
|
||||
// elements. (Note that if the returned is different from
|
||||
// the argument, it should not point to the same objects.)
|
||||
type TransformPass func(Elem) Elem
|
||||
|
||||
// IgnoreTypename is a pass that just ignores
|
||||
// types of a given name.
|
||||
func IgnoreTypename(name string) TransformPass {
|
||||
return func(e Elem) Elem {
|
||||
if e.TypeName() == name {
|
||||
return nil
|
||||
}
|
||||
return e
|
||||
}
|
||||
}
|
||||
|
||||
// ApplyDirective applies a directive to a named pass
|
||||
// and all of its dependents.
|
||||
func (p *Printer) ApplyDirective(pass Method, t TransformPass) {
|
||||
for _, g := range p.gens {
|
||||
if g.Method().isset(pass) {
|
||||
g.Add(t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print prints an Elem.
|
||||
func (p *Printer) Print(e Elem) error {
|
||||
for _, g := range p.gens {
|
||||
err := g.Execute(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generator is the interface through
|
||||
// which code is generated.
|
||||
type generator interface {
|
||||
Method() Method
|
||||
Add(p TransformPass)
|
||||
Execute(Elem) error // execute writes the method for the provided object.
|
||||
}
|
||||
|
||||
type passes []TransformPass
|
||||
|
||||
func (p *passes) Add(t TransformPass) {
|
||||
*p = append(*p, t)
|
||||
}
|
||||
|
||||
func (p *passes) applyall(e Elem) Elem {
|
||||
for _, t := range *p {
|
||||
e = t(e)
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
type traversal interface {
|
||||
gMap(*Map)
|
||||
gSlice(*Slice)
|
||||
gArray(*Array)
|
||||
gPtr(*Ptr)
|
||||
gBase(*BaseElem)
|
||||
gStruct(*Struct)
|
||||
}
|
||||
|
||||
// type-switch dispatch to the correct
|
||||
// method given the type of 'e'
|
||||
func next(t traversal, e Elem) {
|
||||
switch e := e.(type) {
|
||||
case *Map:
|
||||
t.gMap(e)
|
||||
case *Struct:
|
||||
t.gStruct(e)
|
||||
case *Slice:
|
||||
t.gSlice(e)
|
||||
case *Array:
|
||||
t.gArray(e)
|
||||
case *Ptr:
|
||||
t.gPtr(e)
|
||||
case *BaseElem:
|
||||
t.gBase(e)
|
||||
default:
|
||||
panic("bad element type")
|
||||
}
|
||||
}
|
||||
|
||||
// possibly-immutable method receiver
|
||||
func imutMethodReceiver(p Elem) string {
|
||||
switch e := p.(type) {
|
||||
case *Struct:
|
||||
// TODO(HACK): actually do real math here.
|
||||
if len(e.Fields) <= 3 {
|
||||
for i := range e.Fields {
|
||||
if be, ok := e.Fields[i].FieldElem.(*BaseElem); !ok || (be.Value == IDENT || be.Value == Bytes) {
|
||||
goto nope
|
||||
}
|
||||
}
|
||||
return p.TypeName()
|
||||
}
|
||||
nope:
|
||||
return "*" + p.TypeName()
|
||||
|
||||
// gets dereferenced automatically
|
||||
case *Array:
|
||||
return "*" + p.TypeName()
|
||||
|
||||
// everything else can be
|
||||
// by-value.
|
||||
default:
|
||||
return p.TypeName()
|
||||
}
|
||||
}
|
||||
|
||||
// if necessary, wraps a type
|
||||
// so that its method receiver
|
||||
// is of the write type.
|
||||
func methodReceiver(p Elem) string {
|
||||
switch p.(type) {
|
||||
|
||||
// structs and arrays are
|
||||
// dereferenced automatically,
|
||||
// so no need to alter varname
|
||||
case *Struct, *Array:
|
||||
return "*" + p.TypeName()
|
||||
// set variable name to
|
||||
// *varname
|
||||
default:
|
||||
p.SetVarname("(*" + p.Varname() + ")")
|
||||
return "*" + p.TypeName()
|
||||
}
|
||||
}
|
||||
|
||||
func unsetReceiver(p Elem) {
|
||||
switch p.(type) {
|
||||
case *Struct, *Array:
|
||||
default:
|
||||
p.SetVarname("z")
|
||||
}
|
||||
}
|
||||
|
||||
// shared utility for generators
|
||||
type printer struct {
|
||||
w io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
// writes "var {{name}} {{typ}};"
|
||||
func (p *printer) declare(name string, typ string) {
|
||||
p.printf("\nvar %s %s", name, typ)
|
||||
}
|
||||
|
||||
// does:
|
||||
//
|
||||
// if m != nil && size > 0 {
|
||||
// m = make(type, size)
|
||||
// } else if len(m) > 0 {
|
||||
// for key, _ := range m { delete(m, key) }
|
||||
// }
|
||||
//
|
||||
func (p *printer) resizeMap(size string, m *Map) {
|
||||
vn := m.Varname()
|
||||
if !p.ok() {
|
||||
return
|
||||
}
|
||||
p.printf("\nif %s == nil && %s > 0 {", vn, size)
|
||||
p.printf("\n%s = make(%s, %s)", vn, m.TypeName(), size)
|
||||
p.printf("\n} else if len(%s) > 0 {", vn)
|
||||
p.clearMap(vn)
|
||||
p.closeblock()
|
||||
}
|
||||
|
||||
// assign key to value based on varnames
|
||||
func (p *printer) mapAssign(m *Map) {
|
||||
if !p.ok() {
|
||||
return
|
||||
}
|
||||
p.printf("\n%s[%s] = %s", m.Varname(), m.Keyidx, m.Validx)
|
||||
}
|
||||
|
||||
// clear map keys
|
||||
func (p *printer) clearMap(name string) {
|
||||
p.printf("\nfor key, _ := range %[1]s { delete(%[1]s, key) }", name)
|
||||
}
|
||||
|
||||
func (p *printer) resizeSlice(size string, s *Slice) {
|
||||
p.printf("\nif cap(%[1]s) >= int(%[2]s) { %[1]s = (%[1]s)[:%[2]s] } else { %[1]s = make(%[3]s, %[2]s) }", s.Varname(), size, s.TypeName())
|
||||
}
|
||||
|
||||
func (p *printer) arrayCheck(want string, got string) {
|
||||
p.printf("\nif %[1]s != %[2]s { err = msgp.ArrayError{Wanted: %[2]s, Got: %[1]s}; return }", got, want)
|
||||
}
|
||||
|
||||
func (p *printer) closeblock() { p.print("\n}") }
|
||||
|
||||
// does:
|
||||
//
|
||||
// for idx := range iter {
|
||||
// {{generate inner}}
|
||||
// }
|
||||
//
|
||||
func (p *printer) rangeBlock(idx string, iter string, t traversal, inner Elem) {
|
||||
p.printf("\n for %s := range %s {", idx, iter)
|
||||
next(t, inner)
|
||||
p.closeblock()
|
||||
}
|
||||
|
||||
func (p *printer) nakedReturn() {
|
||||
if p.ok() {
|
||||
p.print("\nreturn\n}\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *printer) comment(s string) {
|
||||
p.print("\n// " + s)
|
||||
}
|
||||
|
||||
func (p *printer) printf(format string, args ...interface{}) {
|
||||
if p.err == nil {
|
||||
_, p.err = fmt.Fprintf(p.w, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *printer) print(format string) {
|
||||
if p.err == nil {
|
||||
_, p.err = io.WriteString(p.w, format)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *printer) initPtr(pt *Ptr) {
|
||||
if pt.Needsinit() {
|
||||
vname := pt.Varname()
|
||||
p.printf("\nif %s == nil { %s = new(%s); }", vname, vname, pt.Value.TypeName())
|
||||
}
|
||||
}
|
||||
|
||||
func (p *printer) ok() bool { return p.err == nil }
|
||||
|
||||
func tobaseConvert(b *BaseElem) string {
|
||||
return b.ToBase() + "(" + b.Varname() + ")"
|
||||
}
|
182
vendor/github.com/tinylib/msgp/gen/testgen.go
generated
vendored
Normal file
182
vendor/github.com/tinylib/msgp/gen/testgen.go
generated
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"io"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
marshalTestTempl = template.New("MarshalTest")
|
||||
encodeTestTempl = template.New("EncodeTest")
|
||||
)
|
||||
|
||||
// TODO(philhofer):
|
||||
// for simplicity's sake, right now
|
||||
// we can only generate tests for types
|
||||
// that can be initialized with the
|
||||
// "Type{}" syntax.
|
||||
// we should support all the types.
|
||||
|
||||
func mtest(w io.Writer) *mtestGen {
|
||||
return &mtestGen{w: w}
|
||||
}
|
||||
|
||||
type mtestGen struct {
|
||||
passes
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (m *mtestGen) Execute(p Elem) error {
|
||||
p = m.applyall(p)
|
||||
if p != nil && IsPrintable(p) {
|
||||
switch p.(type) {
|
||||
case *Struct, *Array, *Slice, *Map:
|
||||
return marshalTestTempl.Execute(m.w, p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mtestGen) Method() Method { return marshaltest }
|
||||
|
||||
type etestGen struct {
|
||||
passes
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func etest(w io.Writer) *etestGen {
|
||||
return &etestGen{w: w}
|
||||
}
|
||||
|
||||
func (e *etestGen) Execute(p Elem) error {
|
||||
p = e.applyall(p)
|
||||
if p != nil && IsPrintable(p) {
|
||||
switch p.(type) {
|
||||
case *Struct, *Array, *Slice, *Map:
|
||||
return encodeTestTempl.Execute(e.w, p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *etestGen) Method() Method { return encodetest }
|
||||
|
||||
func init() {
|
||||
template.Must(marshalTestTempl.Parse(`func TestMarshalUnmarshal{{.TypeName}}(t *testing.T) {
|
||||
v := {{.TypeName}}{}
|
||||
bts, err := v.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
left, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
|
||||
}
|
||||
|
||||
left, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) > 0 {
|
||||
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkMarshalMsg{{.TypeName}}(b *testing.B) {
|
||||
v := {{.TypeName}}{}
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i:=0; i<b.N; i++ {
|
||||
v.MarshalMsg(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMsg{{.TypeName}}(b *testing.B) {
|
||||
v := {{.TypeName}}{}
|
||||
bts := make([]byte, 0, v.Msgsize())
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i:=0; i<b.N; i++ {
|
||||
bts, _ = v.MarshalMsg(bts[0:0])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshal{{.TypeName}}(b *testing.B) {
|
||||
v := {{.TypeName}}{}
|
||||
bts, _ := v.MarshalMsg(nil)
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ResetTimer()
|
||||
for i:=0; i<b.N; i++ {
|
||||
_, err := v.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
`))
|
||||
|
||||
template.Must(encodeTestTempl.Parse(`func TestEncodeDecode{{.TypeName}}(t *testing.T) {
|
||||
v := {{.TypeName}}{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
|
||||
m := v.Msgsize()
|
||||
if buf.Len() > m {
|
||||
t.Logf("WARNING: Msgsize() for %v is inaccurate", v)
|
||||
}
|
||||
|
||||
vn := {{.TypeName}}{}
|
||||
err := msgp.Decode(&buf, &vn)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
msgp.Encode(&buf, &v)
|
||||
err = msgp.NewReader(&buf).Skip()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncode{{.TypeName}}(b *testing.B) {
|
||||
v := {{.TypeName}}{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
en := msgp.NewWriter(msgp.Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i:=0; i<b.N; i++ {
|
||||
v.EncodeMsg(en)
|
||||
}
|
||||
en.Flush()
|
||||
}
|
||||
|
||||
func BenchmarkDecode{{.TypeName}}(b *testing.B) {
|
||||
v := {{.TypeName}}{}
|
||||
var buf bytes.Buffer
|
||||
msgp.Encode(&buf, &v)
|
||||
b.SetBytes(int64(buf.Len()))
|
||||
rd := msgp.NewEndlessReader(buf.Bytes(), b)
|
||||
dc := msgp.NewReader(rd)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i:=0; i<b.N; i++ {
|
||||
err := v.DecodeMsg(dc)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
`))
|
||||
|
||||
}
|
199
vendor/github.com/tinylib/msgp/gen/unmarshal.go
generated
vendored
Normal file
199
vendor/github.com/tinylib/msgp/gen/unmarshal.go
generated
vendored
Normal file
@ -0,0 +1,199 @@
|
||||
package gen
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func unmarshal(w io.Writer) *unmarshalGen {
|
||||
return &unmarshalGen{
|
||||
p: printer{w: w},
|
||||
}
|
||||
}
|
||||
|
||||
type unmarshalGen struct {
|
||||
passes
|
||||
p printer
|
||||
hasfield bool
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) Method() Method { return Unmarshal }
|
||||
|
||||
func (u *unmarshalGen) needsField() {
|
||||
if u.hasfield {
|
||||
return
|
||||
}
|
||||
u.p.print("\nvar field []byte; _ = field")
|
||||
u.hasfield = true
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) Execute(p Elem) error {
|
||||
u.hasfield = false
|
||||
if !u.p.ok() {
|
||||
return u.p.err
|
||||
}
|
||||
p = u.applyall(p)
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
if !IsPrintable(p) {
|
||||
return nil
|
||||
}
|
||||
|
||||
u.p.comment("UnmarshalMsg implements msgp.Unmarshaler")
|
||||
|
||||
u.p.printf("\nfunc (%s %s) UnmarshalMsg(bts []byte) (o []byte, err error) {", p.Varname(), methodReceiver(p))
|
||||
next(u, p)
|
||||
u.p.print("\no = bts")
|
||||
u.p.nakedReturn()
|
||||
unsetReceiver(p)
|
||||
return u.p.err
|
||||
}
|
||||
|
||||
// does assignment to the variable "name" with the type "base"
|
||||
func (u *unmarshalGen) assignAndCheck(name string, base string) {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
u.p.printf("\n%s, bts, err = msgp.Read%sBytes(bts)", name, base)
|
||||
u.p.print(errcheck)
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) gStruct(s *Struct) {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
if s.AsTuple {
|
||||
u.tuple(s)
|
||||
} else {
|
||||
u.mapstruct(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) tuple(s *Struct) {
|
||||
|
||||
// open block
|
||||
sz := randIdent()
|
||||
u.p.declare(sz, u32)
|
||||
u.assignAndCheck(sz, arrayHeader)
|
||||
u.p.arrayCheck(strconv.Itoa(len(s.Fields)), sz)
|
||||
for i := range s.Fields {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
next(u, s.Fields[i].FieldElem)
|
||||
}
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) mapstruct(s *Struct) {
|
||||
u.needsField()
|
||||
sz := randIdent()
|
||||
u.p.declare(sz, u32)
|
||||
u.assignAndCheck(sz, mapHeader)
|
||||
|
||||
u.p.printf("\nfor %s > 0 {", sz)
|
||||
u.p.printf("\n%s--; field, bts, err = msgp.ReadMapKeyZC(bts)", sz)
|
||||
u.p.print(errcheck)
|
||||
u.p.print("\nswitch msgp.UnsafeString(field) {")
|
||||
for i := range s.Fields {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
u.p.printf("\ncase \"%s\":", s.Fields[i].FieldTag)
|
||||
next(u, s.Fields[i].FieldElem)
|
||||
}
|
||||
u.p.print("\ndefault:\nbts, err = msgp.Skip(bts)")
|
||||
u.p.print(errcheck)
|
||||
u.p.print("\n}\n}") // close switch and for loop
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) gBase(b *BaseElem) {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
refname := b.Varname() // assigned to
|
||||
lowered := b.Varname() // passed as argument
|
||||
if b.Convert {
|
||||
// begin 'tmp' block
|
||||
refname = randIdent()
|
||||
lowered = b.ToBase() + "(" + lowered + ")"
|
||||
u.p.printf("\n{\nvar %s %s", refname, b.BaseType())
|
||||
}
|
||||
|
||||
switch b.Value {
|
||||
case Bytes:
|
||||
u.p.printf("\n%s, bts, err = msgp.ReadBytesBytes(bts, %s)", refname, lowered)
|
||||
case Ext:
|
||||
u.p.printf("\nbts, err = msgp.ReadExtensionBytes(bts, %s)", lowered)
|
||||
case IDENT:
|
||||
u.p.printf("\nbts, err = %s.UnmarshalMsg(bts)", lowered)
|
||||
default:
|
||||
u.p.printf("\n%s, bts, err = msgp.Read%sBytes(bts)", refname, b.BaseName())
|
||||
}
|
||||
if b.Convert {
|
||||
// close 'tmp' block
|
||||
u.p.printf("\n%s = %s(%s)\n}", b.Varname(), b.FromBase(), refname)
|
||||
}
|
||||
|
||||
u.p.print(errcheck)
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) gArray(a *Array) {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
|
||||
// special case for [const]byte objects
|
||||
// see decode.go for symmetry
|
||||
if be, ok := a.Els.(*BaseElem); ok && be.Value == Byte {
|
||||
u.p.printf("\nbts, err = msgp.ReadExactBytes(bts, (%s)[:])", a.Varname())
|
||||
u.p.print(errcheck)
|
||||
return
|
||||
}
|
||||
|
||||
sz := randIdent()
|
||||
u.p.declare(sz, u32)
|
||||
u.assignAndCheck(sz, arrayHeader)
|
||||
u.p.arrayCheck(a.Size, sz)
|
||||
u.p.rangeBlock(a.Index, a.Varname(), u, a.Els)
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) gSlice(s *Slice) {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
sz := randIdent()
|
||||
u.p.declare(sz, u32)
|
||||
u.assignAndCheck(sz, arrayHeader)
|
||||
u.p.resizeSlice(sz, s)
|
||||
u.p.rangeBlock(s.Index, s.Varname(), u, s.Els)
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) gMap(m *Map) {
|
||||
if !u.p.ok() {
|
||||
return
|
||||
}
|
||||
sz := randIdent()
|
||||
u.p.declare(sz, u32)
|
||||
u.assignAndCheck(sz, mapHeader)
|
||||
|
||||
// allocate or clear map
|
||||
u.p.resizeMap(sz, m)
|
||||
|
||||
// loop and get key,value
|
||||
u.p.printf("\nfor %s > 0 {", sz)
|
||||
u.p.printf("\nvar %s string; var %s %s; %s--", m.Keyidx, m.Validx, m.Value.TypeName(), sz)
|
||||
u.assignAndCheck(m.Keyidx, stringTyp)
|
||||
next(u, m.Value)
|
||||
u.p.mapAssign(m)
|
||||
u.p.closeblock()
|
||||
}
|
||||
|
||||
func (u *unmarshalGen) gPtr(p *Ptr) {
|
||||
u.p.printf("\nif msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts); if err != nil { return }; %s = nil; } else { ", p.Varname())
|
||||
u.p.initPtr(p)
|
||||
next(u, p.Value)
|
||||
u.p.closeblock()
|
||||
}
|
119
vendor/github.com/tinylib/msgp/main.go
generated
vendored
Normal file
119
vendor/github.com/tinylib/msgp/main.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
// msgp is a code generation tool for
|
||||
// creating methods to serialize and de-serialize
|
||||
// Go data structures to and from MessagePack.
|
||||
//
|
||||
// This package is targeted at the `go generate` tool.
|
||||
// To use it, include the following directive in a
|
||||
// go source file with types requiring source generation:
|
||||
//
|
||||
// //go:generate msgp
|
||||
//
|
||||
// The go generate tool should set the proper environment variables for
|
||||
// the generator to execute without any command-line flags. However, the
|
||||
// following options are supported, if you need them:
|
||||
//
|
||||
// -o = output file name (default is {input}_gen.go)
|
||||
// -file = input file name (or directory; default is $GOFILE, which is set by the `go generate` command)
|
||||
// -io = satisfy the `msgp.Decodable` and `msgp.Encodable` interfaces (default is true)
|
||||
// -marshal = satisfy the `msgp.Marshaler` and `msgp.Unmarshaler` interfaces (default is true)
|
||||
// -tests = generate tests and benchmarks (default is true)
|
||||
//
|
||||
// For more information, please read README.md, and the wiki at github.com/tinylib/msgp
|
||||
//
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/tinylib/msgp/gen"
|
||||
"github.com/tinylib/msgp/parse"
|
||||
"github.com/tinylib/msgp/printer"
|
||||
"github.com/ttacon/chalk"
|
||||
)
|
||||
|
||||
var (
|
||||
out = flag.String("o", "", "output file")
|
||||
file = flag.String("file", "", "input file")
|
||||
encode = flag.Bool("io", true, "create Encode and Decode methods")
|
||||
marshal = flag.Bool("marshal", true, "create Marshal and Unmarshal methods")
|
||||
tests = flag.Bool("tests", true, "create tests and benchmarks")
|
||||
unexported = flag.Bool("unexported", false, "also process unexported types")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// GOFILE is set by go generate
|
||||
if *file == "" {
|
||||
*file = os.Getenv("GOFILE")
|
||||
if *file == "" {
|
||||
fmt.Println(chalk.Red.Color("No file to parse."))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var mode gen.Method
|
||||
if *encode {
|
||||
mode |= (gen.Encode | gen.Decode | gen.Size)
|
||||
}
|
||||
if *marshal {
|
||||
mode |= (gen.Marshal | gen.Unmarshal | gen.Size)
|
||||
}
|
||||
if *tests {
|
||||
mode |= gen.Test
|
||||
}
|
||||
|
||||
if mode&^gen.Test == 0 {
|
||||
fmt.Println(chalk.Red.Color("No methods to generate; -io=false && -marshal=false"))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := Run(*file, mode, *unexported); err != nil {
|
||||
fmt.Println(chalk.Red.Color(err.Error()))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Run writes all methods using the associated file or path, e.g.
|
||||
//
|
||||
// err := msgp.Run("path/to/myfile.go", gen.Size|gen.Marshal|gen.Unmarshal|gen.Test, false)
|
||||
//
|
||||
func Run(gofile string, mode gen.Method, unexported bool) error {
|
||||
if mode&^gen.Test == 0 {
|
||||
return nil
|
||||
}
|
||||
fmt.Println(chalk.Magenta.Color("======== MessagePack Code Generator ======="))
|
||||
fmt.Printf(chalk.Magenta.Color(">>> Input: \"%s\"\n"), gofile)
|
||||
fs, err := parse.File(gofile, unexported)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(fs.Identities) == 0 {
|
||||
fmt.Println(chalk.Magenta.Color("No types requiring code generation were found!"))
|
||||
return nil
|
||||
}
|
||||
|
||||
return printer.PrintFile(newFilename(gofile, fs.Package), fs, mode)
|
||||
}
|
||||
|
||||
// picks a new file name based on input flags and input filename(s).
|
||||
func newFilename(old string, pkg string) string {
|
||||
if *out != "" {
|
||||
if pre := strings.TrimPrefix(*out, old); len(pre) > 0 &&
|
||||
!strings.HasSuffix(*out, ".go") {
|
||||
return filepath.Join(old, *out)
|
||||
}
|
||||
return *out
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(old); err == nil && fi.IsDir() {
|
||||
old = filepath.Join(old, pkg)
|
||||
}
|
||||
// new file name is old file name + _gen.go
|
||||
return strings.TrimSuffix(old, ".go") + "_gen.go"
|
||||
}
|
24
vendor/github.com/tinylib/msgp/msgp/advise_linux.go
generated
vendored
Normal file
24
vendor/github.com/tinylib/msgp/msgp/advise_linux.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// +build linux,!appengine
|
||||
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func adviseRead(mem []byte) {
|
||||
syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED)
|
||||
}
|
||||
|
||||
func adviseWrite(mem []byte) {
|
||||
syscall.Madvise(mem, syscall.MADV_SEQUENTIAL)
|
||||
}
|
||||
|
||||
func fallocate(f *os.File, sz int64) error {
|
||||
err := syscall.Fallocate(int(f.Fd()), 0, 0, sz)
|
||||
if err == syscall.ENOTSUP {
|
||||
return f.Truncate(sz)
|
||||
}
|
||||
return err
|
||||
}
|
17
vendor/github.com/tinylib/msgp/msgp/advise_other.go
generated
vendored
Normal file
17
vendor/github.com/tinylib/msgp/msgp/advise_other.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
// +build !linux appengine
|
||||
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// TODO: darwin, BSD support
|
||||
|
||||
func adviseRead(mem []byte) {}
|
||||
|
||||
func adviseWrite(mem []byte) {}
|
||||
|
||||
func fallocate(f *os.File, sz int64) error {
|
||||
return f.Truncate(sz)
|
||||
}
|
15
vendor/github.com/tinylib/msgp/msgp/appengine.go
generated
vendored
Normal file
15
vendor/github.com/tinylib/msgp/msgp/appengine.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// +build appengine
|
||||
|
||||
package msgp
|
||||
|
||||
// let's just assume appengine
|
||||
// uses 64-bit hardware...
|
||||
const smallint = false
|
||||
|
||||
func UnsafeString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func UnsafeBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
39
vendor/github.com/tinylib/msgp/msgp/circular.go
generated
vendored
Normal file
39
vendor/github.com/tinylib/msgp/msgp/circular.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
package msgp
|
||||
|
||||
type timer interface {
|
||||
StartTimer()
|
||||
StopTimer()
|
||||
}
|
||||
|
||||
// EndlessReader is an io.Reader
|
||||
// that loops over the same data
|
||||
// endlessly. It is used for benchmarking.
|
||||
type EndlessReader struct {
|
||||
tb timer
|
||||
data []byte
|
||||
offset int
|
||||
}
|
||||
|
||||
// NewEndlessReader returns a new endless reader
|
||||
func NewEndlessReader(b []byte, tb timer) *EndlessReader {
|
||||
return &EndlessReader{tb: tb, data: b, offset: 0}
|
||||
}
|
||||
|
||||
// Read implements io.Reader. In practice, it
|
||||
// always returns (len(p), nil), although it
|
||||
// fills the supplied slice while the benchmark
|
||||
// timer is stopped.
|
||||
func (c *EndlessReader) Read(p []byte) (int, error) {
|
||||
c.tb.StopTimer()
|
||||
var n int
|
||||
l := len(p)
|
||||
m := len(c.data)
|
||||
for n < l {
|
||||
nn := copy(p[n:], c.data[c.offset:])
|
||||
n += nn
|
||||
c.offset += nn
|
||||
c.offset %= m
|
||||
}
|
||||
c.tb.StartTimer()
|
||||
return n, nil
|
||||
}
|
142
vendor/github.com/tinylib/msgp/msgp/defs.go
generated
vendored
Normal file
142
vendor/github.com/tinylib/msgp/msgp/defs.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp).
|
||||
//
|
||||
// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack
|
||||
// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code
|
||||
// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces.
|
||||
//
|
||||
// This package defines four "families" of functions:
|
||||
// - AppendXxxx() appends an object to a []byte in MessagePack encoding.
|
||||
// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes.
|
||||
// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type.
|
||||
// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type.
|
||||
//
|
||||
// Once a type has satisfied the `Encodable` and `Decodable` interfaces,
|
||||
// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using
|
||||
// msgp.Encode(io.Writer, msgp.Encodable)
|
||||
// and
|
||||
// msgp.Decode(io.Reader, msgp.Decodable)
|
||||
//
|
||||
// There are also methods for converting MessagePack to JSON without
|
||||
// an explicit de-serialization step.
|
||||
//
|
||||
// For additional tips, tricks, and gotchas, please visit
|
||||
// the wiki at http://github.com/tinylib/msgp
|
||||
package msgp
|
||||
|
||||
const last4 = 0x0f
|
||||
const first4 = 0xf0
|
||||
const last5 = 0x1f
|
||||
const first3 = 0xe0
|
||||
const last7 = 0x7f
|
||||
|
||||
func isfixint(b byte) bool {
|
||||
return b>>7 == 0
|
||||
}
|
||||
|
||||
func isnfixint(b byte) bool {
|
||||
return b&first3 == mnfixint
|
||||
}
|
||||
|
||||
func isfixmap(b byte) bool {
|
||||
return b&first4 == mfixmap
|
||||
}
|
||||
|
||||
func isfixarray(b byte) bool {
|
||||
return b&first4 == mfixarray
|
||||
}
|
||||
|
||||
func isfixstr(b byte) bool {
|
||||
return b&first3 == mfixstr
|
||||
}
|
||||
|
||||
func wfixint(u uint8) byte {
|
||||
return u & last7
|
||||
}
|
||||
|
||||
func rfixint(b byte) uint8 {
|
||||
return b
|
||||
}
|
||||
|
||||
func wnfixint(i int8) byte {
|
||||
return byte(i) | mnfixint
|
||||
}
|
||||
|
||||
func rnfixint(b byte) int8 {
|
||||
return int8(b)
|
||||
}
|
||||
|
||||
func rfixmap(b byte) uint8 {
|
||||
return b & last4
|
||||
}
|
||||
|
||||
func wfixmap(u uint8) byte {
|
||||
return mfixmap | (u & last4)
|
||||
}
|
||||
|
||||
func rfixstr(b byte) uint8 {
|
||||
return b & last5
|
||||
}
|
||||
|
||||
func wfixstr(u uint8) byte {
|
||||
return (u & last5) | mfixstr
|
||||
}
|
||||
|
||||
func rfixarray(b byte) uint8 {
|
||||
return (b & last4)
|
||||
}
|
||||
|
||||
func wfixarray(u uint8) byte {
|
||||
return (u & last4) | mfixarray
|
||||
}
|
||||
|
||||
// These are all the byte
|
||||
// prefixes defined by the
|
||||
// msgpack standard
|
||||
const (
|
||||
// 0XXXXXXX
|
||||
mfixint uint8 = 0x00
|
||||
|
||||
// 111XXXXX
|
||||
mnfixint uint8 = 0xe0
|
||||
|
||||
// 1000XXXX
|
||||
mfixmap uint8 = 0x80
|
||||
|
||||
// 1001XXXX
|
||||
mfixarray uint8 = 0x90
|
||||
|
||||
// 101XXXXX
|
||||
mfixstr uint8 = 0xa0
|
||||
|
||||
mnil uint8 = 0xc0
|
||||
mfalse uint8 = 0xc2
|
||||
mtrue uint8 = 0xc3
|
||||
mbin8 uint8 = 0xc4
|
||||
mbin16 uint8 = 0xc5
|
||||
mbin32 uint8 = 0xc6
|
||||
mext8 uint8 = 0xc7
|
||||
mext16 uint8 = 0xc8
|
||||
mext32 uint8 = 0xc9
|
||||
mfloat32 uint8 = 0xca
|
||||
mfloat64 uint8 = 0xcb
|
||||
muint8 uint8 = 0xcc
|
||||
muint16 uint8 = 0xcd
|
||||
muint32 uint8 = 0xce
|
||||
muint64 uint8 = 0xcf
|
||||
mint8 uint8 = 0xd0
|
||||
mint16 uint8 = 0xd1
|
||||
mint32 uint8 = 0xd2
|
||||
mint64 uint8 = 0xd3
|
||||
mfixext1 uint8 = 0xd4
|
||||
mfixext2 uint8 = 0xd5
|
||||
mfixext4 uint8 = 0xd6
|
||||
mfixext8 uint8 = 0xd7
|
||||
mfixext16 uint8 = 0xd8
|
||||
mstr8 uint8 = 0xd9
|
||||
mstr16 uint8 = 0xda
|
||||
mstr32 uint8 = 0xdb
|
||||
marray16 uint8 = 0xdc
|
||||
marray32 uint8 = 0xdd
|
||||
mmap16 uint8 = 0xde
|
||||
mmap32 uint8 = 0xdf
|
||||
)
|
12
vendor/github.com/tinylib/msgp/msgp/defs_test.go
generated
vendored
Normal file
12
vendor/github.com/tinylib/msgp/msgp/defs_test.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package msgp_test
|
||||
|
||||
//go:generate msgp -o=defgen_test.go -tests=false
|
||||
|
||||
type Blobs []Blob
|
||||
|
||||
type Blob struct {
|
||||
Name string `msg:"name"`
|
||||
Float float64 `msg:"float"`
|
||||
Bytes []byte `msg:"bytes"`
|
||||
Amount int64 `msg:"amount"`
|
||||
}
|
241
vendor/github.com/tinylib/msgp/msgp/edit.go
generated
vendored
Normal file
241
vendor/github.com/tinylib/msgp/msgp/edit.go
generated
vendored
Normal file
@ -0,0 +1,241 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// Locate returns a []byte pointing to the field
|
||||
// in a messagepack map with the provided key. (The returned []byte
|
||||
// points to a sub-slice of 'raw'; Locate does no allocations.) If the
|
||||
// key doesn't exist in the map, a zero-length []byte will be returned.
|
||||
func Locate(key string, raw []byte) []byte {
|
||||
s, n := locate(raw, key)
|
||||
return raw[s:n]
|
||||
}
|
||||
|
||||
// Replace takes a key ("key") in a messagepack map ("raw")
|
||||
// and replaces its value with the one provided and returns
|
||||
// the new []byte. The returned []byte may point to the same
|
||||
// memory as "raw". Replace makes no effort to evaluate the validity
|
||||
// of the contents of 'val'. It may use up to the full capacity of 'raw.'
|
||||
// Replace returns 'nil' if the field doesn't exist or if the object in 'raw'
|
||||
// is not a map.
|
||||
func Replace(key string, raw []byte, val []byte) []byte {
|
||||
start, end := locate(raw, key)
|
||||
if start == end {
|
||||
return nil
|
||||
}
|
||||
return replace(raw, start, end, val, true)
|
||||
}
|
||||
|
||||
// CopyReplace works similarly to Replace except that the returned
|
||||
// byte slice does not point to the same memory as 'raw'. CopyReplace
|
||||
// returns 'nil' if the field doesn't exist or 'raw' isn't a map.
|
||||
func CopyReplace(key string, raw []byte, val []byte) []byte {
|
||||
start, end := locate(raw, key)
|
||||
if start == end {
|
||||
return nil
|
||||
}
|
||||
return replace(raw, start, end, val, false)
|
||||
}
|
||||
|
||||
// Remove removes a key-value pair from 'raw'. It returns
|
||||
// 'raw' unchanged if the key didn't exist.
|
||||
func Remove(key string, raw []byte) []byte {
|
||||
start, end := locateKV(raw, key)
|
||||
if start == end {
|
||||
return raw
|
||||
}
|
||||
raw = raw[:start+copy(raw[start:], raw[end:])]
|
||||
return resizeMap(raw, -1)
|
||||
}
|
||||
|
||||
// HasKey returns whether the map in 'raw' has
|
||||
// a field with key 'key'
|
||||
func HasKey(key string, raw []byte) bool {
|
||||
sz, bts, err := ReadMapHeaderBytes(raw)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var field []byte
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
field, bts, err = ReadStringZC(bts)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if UnsafeString(field) == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte {
|
||||
ll := end - start // length of segment to replace
|
||||
lv := len(val)
|
||||
|
||||
if inplace {
|
||||
extra := lv - ll
|
||||
|
||||
// fastest case: we're doing
|
||||
// a 1:1 replacement
|
||||
if extra == 0 {
|
||||
copy(raw[start:], val)
|
||||
return raw
|
||||
|
||||
} else if extra < 0 {
|
||||
// 'val' smaller than replaced value
|
||||
// copy in place and shift back
|
||||
|
||||
x := copy(raw[start:], val)
|
||||
y := copy(raw[start+x:], raw[end:])
|
||||
return raw[:start+x+y]
|
||||
|
||||
} else if extra < cap(raw)-len(raw) {
|
||||
// 'val' less than (cap-len) extra bytes
|
||||
// copy in place and shift forward
|
||||
raw = raw[0 : len(raw)+extra]
|
||||
// shift end forward
|
||||
copy(raw[end+extra:], raw[end:])
|
||||
copy(raw[start:], val)
|
||||
return raw
|
||||
}
|
||||
}
|
||||
|
||||
// we have to allocate new space
|
||||
out := make([]byte, len(raw)+len(val)-ll)
|
||||
x := copy(out, raw[:start])
|
||||
y := copy(out[x:], val)
|
||||
copy(out[x+y:], raw[end:])
|
||||
return out
|
||||
}
|
||||
|
||||
// locate does a naive O(n) search for the map key; returns start, end
|
||||
// (returns 0,0 on error)
|
||||
func locate(raw []byte, key string) (start int, end int) {
|
||||
var (
|
||||
sz uint32
|
||||
bts []byte
|
||||
field []byte
|
||||
err error
|
||||
)
|
||||
sz, bts, err = ReadMapHeaderBytes(raw)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// loop and locate field
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
field, bts, err = ReadStringZC(bts)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
if UnsafeString(field) == key {
|
||||
// start location
|
||||
l := len(raw)
|
||||
start = l - len(bts)
|
||||
bts, err = Skip(bts)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
end = l - len(bts)
|
||||
return
|
||||
}
|
||||
bts, err = Skip(bts)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// locate key AND value
|
||||
func locateKV(raw []byte, key string) (start int, end int) {
|
||||
var (
|
||||
sz uint32
|
||||
bts []byte
|
||||
field []byte
|
||||
err error
|
||||
)
|
||||
sz, bts, err = ReadMapHeaderBytes(raw)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
tmp := len(bts)
|
||||
field, bts, err = ReadStringZC(bts)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
if UnsafeString(field) == key {
|
||||
start = len(raw) - tmp
|
||||
bts, err = Skip(bts)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
end = len(raw) - len(bts)
|
||||
return
|
||||
}
|
||||
bts, err = Skip(bts)
|
||||
if err != nil {
|
||||
return 0, 0
|
||||
}
|
||||
}
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// delta is delta on map size
|
||||
func resizeMap(raw []byte, delta int64) []byte {
|
||||
var sz int64
|
||||
switch raw[0] {
|
||||
case mmap16:
|
||||
sz = int64(big.Uint16(raw[1:]))
|
||||
if sz+delta <= math.MaxUint16 {
|
||||
big.PutUint16(raw[1:], uint16(sz+delta))
|
||||
return raw
|
||||
}
|
||||
if cap(raw)-len(raw) >= 2 {
|
||||
raw = raw[0 : len(raw)+2]
|
||||
copy(raw[5:], raw[3:])
|
||||
big.PutUint32(raw[1:], uint32(sz+delta))
|
||||
return raw
|
||||
}
|
||||
n := make([]byte, 0, len(raw)+5)
|
||||
n = AppendMapHeader(n, uint32(sz+delta))
|
||||
return append(n, raw[3:]...)
|
||||
|
||||
case mmap32:
|
||||
sz = int64(big.Uint32(raw[1:]))
|
||||
big.PutUint32(raw[1:], uint32(sz+delta))
|
||||
return raw
|
||||
|
||||
default:
|
||||
sz = int64(rfixmap(raw[0]))
|
||||
if sz+delta < 16 {
|
||||
raw[0] = wfixmap(uint8(sz + delta))
|
||||
return raw
|
||||
} else if sz+delta <= math.MaxUint16 {
|
||||
if cap(raw)-len(raw) >= 2 {
|
||||
raw = raw[0 : len(raw)+2]
|
||||
copy(raw[3:], raw[1:])
|
||||
raw[0] = mmap16
|
||||
big.PutUint16(raw[1:], uint16(sz+delta))
|
||||
return raw
|
||||
}
|
||||
n := make([]byte, 0, len(raw)+5)
|
||||
n = AppendMapHeader(n, uint32(sz+delta))
|
||||
return append(n, raw[1:]...)
|
||||
}
|
||||
if cap(raw)-len(raw) >= 4 {
|
||||
raw = raw[0 : len(raw)+4]
|
||||
copy(raw[5:], raw[1:])
|
||||
raw[0] = mmap32
|
||||
big.PutUint32(raw[1:], uint32(sz+delta))
|
||||
return raw
|
||||
}
|
||||
n := make([]byte, 0, len(raw)+5)
|
||||
n = AppendMapHeader(n, uint32(sz+delta))
|
||||
return append(n, raw[1:]...)
|
||||
}
|
||||
}
|
200
vendor/github.com/tinylib/msgp/msgp/edit_test.go
generated
vendored
Normal file
200
vendor/github.com/tinylib/msgp/msgp/edit_test.go
generated
vendored
Normal file
@ -0,0 +1,200 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
w := NewWriter(&buf)
|
||||
w.WriteMapHeader(3)
|
||||
w.WriteString("first")
|
||||
w.WriteFloat64(-3.1)
|
||||
w.WriteString("second")
|
||||
w.WriteString("DELETE ME!!!")
|
||||
w.WriteString("third")
|
||||
w.WriteBytes([]byte("blah"))
|
||||
w.Flush()
|
||||
|
||||
raw := Remove("second", buf.Bytes())
|
||||
|
||||
m, _, err := ReadMapStrIntfBytes(raw, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(m) != 2 {
|
||||
t.Errorf("expected %d fields; found %d", 2, len(m))
|
||||
}
|
||||
if _, ok := m["first"]; !ok {
|
||||
t.Errorf("field %q not found", "first")
|
||||
}
|
||||
if _, ok := m["third"]; !ok {
|
||||
t.Errorf("field %q not found", "third")
|
||||
}
|
||||
if _, ok := m["second"]; ok {
|
||||
t.Errorf("field %q (deleted field) still present", "second")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocate(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteMapHeader(2)
|
||||
en.WriteString("thing_one")
|
||||
en.WriteString("value_one")
|
||||
en.WriteString("thing_two")
|
||||
en.WriteFloat64(2.0)
|
||||
en.Flush()
|
||||
|
||||
field := Locate("thing_one", buf.Bytes())
|
||||
if len(field) == 0 {
|
||||
t.Fatal("field not found")
|
||||
}
|
||||
|
||||
if !HasKey("thing_one", buf.Bytes()) {
|
||||
t.Fatal("field not found")
|
||||
}
|
||||
|
||||
var zbuf bytes.Buffer
|
||||
w := NewWriter(&zbuf)
|
||||
w.WriteString("value_one")
|
||||
w.Flush()
|
||||
|
||||
if !bytes.Equal(zbuf.Bytes(), field) {
|
||||
t.Errorf("got %q; wanted %q", field, zbuf.Bytes())
|
||||
}
|
||||
|
||||
zbuf.Reset()
|
||||
w.WriteFloat64(2.0)
|
||||
w.Flush()
|
||||
field = Locate("thing_two", buf.Bytes())
|
||||
if len(field) == 0 {
|
||||
t.Fatal("field not found")
|
||||
}
|
||||
if !bytes.Equal(zbuf.Bytes(), field) {
|
||||
t.Errorf("got %q; wanted %q", field, zbuf.Bytes())
|
||||
}
|
||||
|
||||
field = Locate("nope", buf.Bytes())
|
||||
if len(field) != 0 {
|
||||
t.Fatalf("wanted a zero-length returned slice")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
// there are 4 cases that need coverage:
|
||||
// - new value is smaller than old value
|
||||
// - new value is the same size as the old value
|
||||
// - new value is larger than old, but fits within cap(b)
|
||||
// - new value is larger than old, and doesn't fit within cap(b)
|
||||
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteMapHeader(3)
|
||||
en.WriteString("thing_one")
|
||||
en.WriteString("value_one")
|
||||
en.WriteString("thing_two")
|
||||
en.WriteFloat64(2.0)
|
||||
en.WriteString("some_bytes")
|
||||
en.WriteBytes([]byte("here are some bytes"))
|
||||
en.Flush()
|
||||
|
||||
// same-size replacement
|
||||
var fbuf bytes.Buffer
|
||||
w := NewWriter(&fbuf)
|
||||
w.WriteFloat64(4.0)
|
||||
w.Flush()
|
||||
|
||||
// replace 2.0 with 4.0 in field two
|
||||
raw := Replace("thing_two", buf.Bytes(), fbuf.Bytes())
|
||||
if len(raw) == 0 {
|
||||
t.Fatal("field not found")
|
||||
}
|
||||
var err error
|
||||
m := make(map[string]interface{})
|
||||
m, _, err = ReadMapStrIntfBytes(raw, m)
|
||||
if err != nil {
|
||||
t.Logf("%q", raw)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(m["thing_two"], 4.0) {
|
||||
t.Errorf("wanted %v; got %v", 4.0, m["thing_two"])
|
||||
}
|
||||
|
||||
// smaller-size replacement
|
||||
// replace 2.0 with []byte("hi!")
|
||||
fbuf.Reset()
|
||||
w.WriteBytes([]byte("hi!"))
|
||||
w.Flush()
|
||||
raw = Replace("thing_two", raw, fbuf.Bytes())
|
||||
if len(raw) == 0 {
|
||||
t.Fatal("field not found")
|
||||
}
|
||||
|
||||
m, _, err = ReadMapStrIntfBytes(raw, m)
|
||||
if err != nil {
|
||||
t.Logf("%q", raw)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(m["thing_two"], []byte("hi!")) {
|
||||
t.Errorf("wanted %v; got %v", []byte("hi!"), m["thing_two"])
|
||||
}
|
||||
|
||||
// larger-size replacement
|
||||
fbuf.Reset()
|
||||
w.WriteBytes([]byte("some even larger bytes than before"))
|
||||
w.Flush()
|
||||
raw = Replace("some_bytes", raw, fbuf.Bytes())
|
||||
if len(raw) == 0 {
|
||||
t.Logf("%q", raw)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
m, _, err = ReadMapStrIntfBytes(raw, m)
|
||||
if err != nil {
|
||||
t.Logf("%q", raw)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(m["some_bytes"], []byte("some even larger bytes than before")) {
|
||||
t.Errorf("wanted %v; got %v", []byte("hello there!"), m["some_bytes"])
|
||||
}
|
||||
|
||||
// identical in-place replacement
|
||||
field := Locate("some_bytes", raw)
|
||||
newraw := CopyReplace("some_bytes", raw, field)
|
||||
|
||||
if !bytes.Equal(newraw, raw) {
|
||||
t.Logf("in: %q", raw)
|
||||
t.Logf("out: %q", newraw)
|
||||
t.Error("bytes not equal after copyreplace")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLocate(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteMapHeader(3)
|
||||
en.WriteString("thing_one")
|
||||
en.WriteString("value_one")
|
||||
en.WriteString("thing_two")
|
||||
en.WriteFloat64(2.0)
|
||||
en.WriteString("thing_three")
|
||||
en.WriteBytes([]byte("hello!"))
|
||||
en.Flush()
|
||||
|
||||
raw := buf.Bytes()
|
||||
// bytes/s will be the number of bytes traversed per unit of time
|
||||
field := Locate("thing_three", raw)
|
||||
b.SetBytes(int64(len(raw) - len(field)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
Locate("thing_three", raw)
|
||||
}
|
||||
}
|
99
vendor/github.com/tinylib/msgp/msgp/elsize.go
generated
vendored
Normal file
99
vendor/github.com/tinylib/msgp/msgp/elsize.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package msgp
|
||||
|
||||
// size of every object on the wire,
|
||||
// plus type information. gives us
|
||||
// constant-time type information
|
||||
// for traversing composite objects.
|
||||
//
|
||||
var sizes = [256]bytespec{
|
||||
mnil: {size: 1, extra: constsize, typ: NilType},
|
||||
mfalse: {size: 1, extra: constsize, typ: BoolType},
|
||||
mtrue: {size: 1, extra: constsize, typ: BoolType},
|
||||
mbin8: {size: 2, extra: extra8, typ: BinType},
|
||||
mbin16: {size: 3, extra: extra16, typ: BinType},
|
||||
mbin32: {size: 5, extra: extra32, typ: BinType},
|
||||
mext8: {size: 3, extra: extra8, typ: ExtensionType},
|
||||
mext16: {size: 4, extra: extra16, typ: ExtensionType},
|
||||
mext32: {size: 6, extra: extra32, typ: ExtensionType},
|
||||
mfloat32: {size: 5, extra: constsize, typ: Float32Type},
|
||||
mfloat64: {size: 9, extra: constsize, typ: Float64Type},
|
||||
muint8: {size: 2, extra: constsize, typ: UintType},
|
||||
muint16: {size: 3, extra: constsize, typ: UintType},
|
||||
muint32: {size: 5, extra: constsize, typ: UintType},
|
||||
muint64: {size: 9, extra: constsize, typ: UintType},
|
||||
mint8: {size: 2, extra: constsize, typ: IntType},
|
||||
mint16: {size: 3, extra: constsize, typ: IntType},
|
||||
mint32: {size: 5, extra: constsize, typ: IntType},
|
||||
mint64: {size: 9, extra: constsize, typ: IntType},
|
||||
mfixext1: {size: 3, extra: constsize, typ: ExtensionType},
|
||||
mfixext2: {size: 4, extra: constsize, typ: ExtensionType},
|
||||
mfixext4: {size: 6, extra: constsize, typ: ExtensionType},
|
||||
mfixext8: {size: 10, extra: constsize, typ: ExtensionType},
|
||||
mfixext16: {size: 18, extra: constsize, typ: ExtensionType},
|
||||
mstr8: {size: 2, extra: extra8, typ: StrType},
|
||||
mstr16: {size: 3, extra: extra16, typ: StrType},
|
||||
mstr32: {size: 5, extra: extra32, typ: StrType},
|
||||
marray16: {size: 3, extra: array16v, typ: ArrayType},
|
||||
marray32: {size: 5, extra: array32v, typ: ArrayType},
|
||||
mmap16: {size: 3, extra: map16v, typ: MapType},
|
||||
mmap32: {size: 5, extra: map32v, typ: MapType},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// set up fixed fields
|
||||
|
||||
// fixint
|
||||
for i := mfixint; i < 0x80; i++ {
|
||||
sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType}
|
||||
}
|
||||
|
||||
// nfixint
|
||||
for i := uint16(mnfixint); i < 0x100; i++ {
|
||||
sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType}
|
||||
}
|
||||
|
||||
// fixstr gets constsize,
|
||||
// since the prefix yields the size
|
||||
for i := mfixstr; i < 0xc0; i++ {
|
||||
sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType}
|
||||
}
|
||||
|
||||
// fixmap
|
||||
for i := mfixmap; i < 0x90; i++ {
|
||||
sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType}
|
||||
}
|
||||
|
||||
// fixarray
|
||||
for i := mfixarray; i < 0xa0; i++ {
|
||||
sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType}
|
||||
}
|
||||
}
|
||||
|
||||
// a valid bytespsec has
|
||||
// non-zero 'size' and
|
||||
// non-zero 'typ'
|
||||
type bytespec struct {
|
||||
size uint8 // prefix size information
|
||||
extra varmode // extra size information
|
||||
typ Type // type
|
||||
_ byte // makes bytespec 4 bytes (yes, this matters)
|
||||
}
|
||||
|
||||
// size mode
|
||||
// if positive, # elements for composites
|
||||
type varmode int8
|
||||
|
||||
const (
|
||||
constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects)
|
||||
extra8 = -1 // has uint8(p[1]) extra bytes
|
||||
extra16 = -2 // has be16(p[1:]) extra bytes
|
||||
extra32 = -3 // has be32(p[1:]) extra bytes
|
||||
map16v = -4 // use map16
|
||||
map32v = -5 // use map32
|
||||
array16v = -6 // use array16
|
||||
array32v = -7 // use array32
|
||||
)
|
||||
|
||||
func getType(v byte) Type {
|
||||
return sizes[v].typ
|
||||
}
|
142
vendor/github.com/tinylib/msgp/msgp/errors.go
generated
vendored
Normal file
142
vendor/github.com/tinylib/msgp/msgp/errors.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrShortBytes is returned when the
|
||||
// slice being decoded is too short to
|
||||
// contain the contents of the message
|
||||
ErrShortBytes error = errShort{}
|
||||
|
||||
// this error is only returned
|
||||
// if we reach code that should
|
||||
// be unreachable
|
||||
fatal error = errFatal{}
|
||||
)
|
||||
|
||||
// Error is the interface satisfied
|
||||
// by all of the errors that originate
|
||||
// from this package.
|
||||
type Error interface {
|
||||
error
|
||||
|
||||
// Resumable returns whether
|
||||
// or not the error means that
|
||||
// the stream of data is malformed
|
||||
// and the information is unrecoverable.
|
||||
Resumable() bool
|
||||
}
|
||||
|
||||
type errShort struct{}
|
||||
|
||||
func (e errShort) Error() string { return "msgp: too few bytes left to read object" }
|
||||
func (e errShort) Resumable() bool { return false }
|
||||
|
||||
type errFatal struct{}
|
||||
|
||||
func (f errFatal) Error() string { return "msgp: fatal decoding error (unreachable code)" }
|
||||
func (f errFatal) Resumable() bool { return false }
|
||||
|
||||
// ArrayError is an error returned
|
||||
// when decoding a fix-sized array
|
||||
// of the wrong size
|
||||
type ArrayError struct {
|
||||
Wanted uint32
|
||||
Got uint32
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (a ArrayError) Error() string {
|
||||
return fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got)
|
||||
}
|
||||
|
||||
// Resumable is always 'true' for ArrayErrors
|
||||
func (a ArrayError) Resumable() bool { return true }
|
||||
|
||||
// IntOverflow is returned when a call
|
||||
// would downcast an integer to a type
|
||||
// with too few bits to hold its value.
|
||||
type IntOverflow struct {
|
||||
Value int64 // the value of the integer
|
||||
FailedBitsize int // the bit size that the int64 could not fit into
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (i IntOverflow) Error() string {
|
||||
return fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize)
|
||||
}
|
||||
|
||||
// Resumable is always 'true' for overflows
|
||||
func (i IntOverflow) Resumable() bool { return true }
|
||||
|
||||
// UintOverflow is returned when a call
|
||||
// would downcast an unsigned integer to a type
|
||||
// with too few bits to hold its value
|
||||
type UintOverflow struct {
|
||||
Value uint64 // value of the uint
|
||||
FailedBitsize int // the bit size that couldn't fit the value
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (u UintOverflow) Error() string {
|
||||
return fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize)
|
||||
}
|
||||
|
||||
// Resumable is always 'true' for overflows
|
||||
func (u UintOverflow) Resumable() bool { return true }
|
||||
|
||||
// A TypeError is returned when a particular
|
||||
// decoding method is unsuitable for decoding
|
||||
// a particular MessagePack value.
|
||||
type TypeError struct {
|
||||
Method Type // Type expected by method
|
||||
Encoded Type // Type actually encoded
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (t TypeError) Error() string {
|
||||
return fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method)
|
||||
}
|
||||
|
||||
// Resumable returns 'true' for TypeErrors
|
||||
func (t TypeError) Resumable() bool { return true }
|
||||
|
||||
// returns either InvalidPrefixError or
|
||||
// TypeError depending on whether or not
|
||||
// the prefix is recognized
|
||||
func badPrefix(want Type, lead byte) error {
|
||||
t := sizes[lead].typ
|
||||
if t == InvalidType {
|
||||
return InvalidPrefixError(lead)
|
||||
}
|
||||
return TypeError{Method: want, Encoded: t}
|
||||
}
|
||||
|
||||
// InvalidPrefixError is returned when a bad encoding
|
||||
// uses a prefix that is not recognized in the MessagePack standard.
|
||||
// This kind of error is unrecoverable.
|
||||
type InvalidPrefixError byte
|
||||
|
||||
// Error implements the error interface
|
||||
func (i InvalidPrefixError) Error() string {
|
||||
return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i))
|
||||
}
|
||||
|
||||
// Resumable returns 'false' for InvalidPrefixErrors
|
||||
func (i InvalidPrefixError) Resumable() bool { return false }
|
||||
|
||||
// ErrUnsupportedType is returned
|
||||
// when a bad argument is supplied
|
||||
// to a function that takes `interface{}`.
|
||||
type ErrUnsupportedType struct {
|
||||
T reflect.Type
|
||||
}
|
||||
|
||||
// Error implements error
|
||||
func (e *ErrUnsupportedType) Error() string { return fmt.Sprintf("msgp: type %q not supported", e.T) }
|
||||
|
||||
// Resumable returns 'true' for ErrUnsupportedType
|
||||
func (e *ErrUnsupportedType) Resumable() bool { return true }
|
548
vendor/github.com/tinylib/msgp/msgp/extension.go
generated
vendored
Normal file
548
vendor/github.com/tinylib/msgp/msgp/extension.go
generated
vendored
Normal file
@ -0,0 +1,548 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
// Complex64Extension is the extension number used for complex64
|
||||
Complex64Extension = 3
|
||||
|
||||
// Complex128Extension is the extension number used for complex128
|
||||
Complex128Extension = 4
|
||||
|
||||
// TimeExtension is the extension number used for time.Time
|
||||
TimeExtension = 5
|
||||
)
|
||||
|
||||
// our extensions live here
|
||||
var extensionReg = make(map[int8]func() Extension)
|
||||
|
||||
// RegisterExtension registers extensions so that they
|
||||
// can be initialized and returned by methods that
|
||||
// decode `interface{}` values. This should only
|
||||
// be called during initialization. f() should return
|
||||
// a newly-initialized zero value of the extension. Keep in
|
||||
// mind that extensions 3, 4, and 5 are reserved for
|
||||
// complex64, complex128, and time.Time, respectively,
|
||||
// and that MessagePack reserves extension types from -127 to -1.
|
||||
//
|
||||
// For example, if you wanted to register a user-defined struct:
|
||||
//
|
||||
// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} })
|
||||
//
|
||||
// RegisterExtension will panic if you call it multiple times
|
||||
// with the same 'typ' argument, or if you use a reserved
|
||||
// type (3, 4, or 5).
|
||||
func RegisterExtension(typ int8, f func() Extension) {
|
||||
switch typ {
|
||||
case Complex64Extension, Complex128Extension, TimeExtension:
|
||||
panic(fmt.Sprint("msgp: forbidden extension type:", typ))
|
||||
}
|
||||
if _, ok := extensionReg[typ]; ok {
|
||||
panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once"))
|
||||
}
|
||||
extensionReg[typ] = f
|
||||
}
|
||||
|
||||
// ExtensionTypeError is an error type returned
|
||||
// when there is a mis-match between an extension type
|
||||
// and the type encoded on the wire
|
||||
type ExtensionTypeError struct {
|
||||
Got int8
|
||||
Want int8
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (e ExtensionTypeError) Error() string {
|
||||
return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got)
|
||||
}
|
||||
|
||||
// Resumable returns 'true' for ExtensionTypeErrors
|
||||
func (e ExtensionTypeError) Resumable() bool { return true }
|
||||
|
||||
func errExt(got int8, wanted int8) error {
|
||||
return ExtensionTypeError{Got: got, Want: wanted}
|
||||
}
|
||||
|
||||
// Extension is the interface fulfilled
|
||||
// by types that want to define their
|
||||
// own binary encoding.
|
||||
type Extension interface {
|
||||
// ExtensionType should return
|
||||
// a int8 that identifies the concrete
|
||||
// type of the extension. (Types <0 are
|
||||
// officially reserved by the MessagePack
|
||||
// specifications.)
|
||||
ExtensionType() int8
|
||||
|
||||
// Len should return the length
|
||||
// of the data to be encoded
|
||||
Len() int
|
||||
|
||||
// MarshalBinaryTo should copy
|
||||
// the data into the supplied slice,
|
||||
// assuming that the slice has length Len()
|
||||
MarshalBinaryTo([]byte) error
|
||||
|
||||
UnmarshalBinary([]byte) error
|
||||
}
|
||||
|
||||
// RawExtension implements the Extension interface
|
||||
type RawExtension struct {
|
||||
Data []byte
|
||||
Type int8
|
||||
}
|
||||
|
||||
// ExtensionType implements Extension.ExtensionType, and returns r.Type
|
||||
func (r *RawExtension) ExtensionType() int8 { return r.Type }
|
||||
|
||||
// Len implements Extension.Len, and returns len(r.Data)
|
||||
func (r *RawExtension) Len() int { return len(r.Data) }
|
||||
|
||||
// MarshalBinaryTo implements Extension.MarshalBinaryTo,
|
||||
// and returns a copy of r.Data
|
||||
func (r *RawExtension) MarshalBinaryTo(d []byte) error {
|
||||
copy(d, r.Data)
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements Extension.UnmarshalBinary,
|
||||
// and sets r.Data to the contents of the provided slice
|
||||
func (r *RawExtension) UnmarshalBinary(b []byte) error {
|
||||
if cap(r.Data) >= len(b) {
|
||||
r.Data = r.Data[0:len(b)]
|
||||
} else {
|
||||
r.Data = make([]byte, len(b))
|
||||
}
|
||||
copy(r.Data, b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteExtension writes an extension type to the writer
|
||||
func (mw *Writer) WriteExtension(e Extension) error {
|
||||
l := e.Len()
|
||||
var err error
|
||||
switch l {
|
||||
case 0:
|
||||
o, err := mw.require(3)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mext8
|
||||
mw.buf[o+1] = 0
|
||||
mw.buf[o+2] = byte(e.ExtensionType())
|
||||
case 1:
|
||||
o, err := mw.require(2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext1
|
||||
mw.buf[o+1] = byte(e.ExtensionType())
|
||||
case 2:
|
||||
o, err := mw.require(2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext2
|
||||
mw.buf[o+1] = byte(e.ExtensionType())
|
||||
case 4:
|
||||
o, err := mw.require(2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext4
|
||||
mw.buf[o+1] = byte(e.ExtensionType())
|
||||
case 8:
|
||||
o, err := mw.require(2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext8
|
||||
mw.buf[o+1] = byte(e.ExtensionType())
|
||||
case 16:
|
||||
o, err := mw.require(2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext16
|
||||
mw.buf[o+1] = byte(e.ExtensionType())
|
||||
default:
|
||||
switch {
|
||||
case l < math.MaxUint8:
|
||||
o, err := mw.require(3)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mext8
|
||||
mw.buf[o+1] = byte(uint8(l))
|
||||
mw.buf[o+2] = byte(e.ExtensionType())
|
||||
case l < math.MaxUint16:
|
||||
o, err := mw.require(4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mext16
|
||||
big.PutUint16(mw.buf[o+1:], uint16(l))
|
||||
mw.buf[o+3] = byte(e.ExtensionType())
|
||||
default:
|
||||
o, err := mw.require(6)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mext32
|
||||
big.PutUint32(mw.buf[o+1:], uint32(l))
|
||||
mw.buf[o+5] = byte(e.ExtensionType())
|
||||
}
|
||||
}
|
||||
// we can only write directly to the
|
||||
// buffer if we're sure that it
|
||||
// fits the object
|
||||
if l <= mw.bufsize() {
|
||||
o, err := mw.require(l)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return e.MarshalBinaryTo(mw.buf[o:])
|
||||
}
|
||||
// here we create a new buffer
|
||||
// just large enough for the body
|
||||
// and save it as the write buffer
|
||||
err = mw.flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, l)
|
||||
err = e.MarshalBinaryTo(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf = buf
|
||||
mw.wloc = l
|
||||
return nil
|
||||
}
|
||||
|
||||
// peek at the extension type, assuming the next
|
||||
// kind to be read is Extension
|
||||
func (m *Reader) peekExtensionType() (int8, error) {
|
||||
p, err := m.R.Peek(2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
spec := sizes[p[0]]
|
||||
if spec.typ != ExtensionType {
|
||||
return 0, badPrefix(ExtensionType, p[0])
|
||||
}
|
||||
if spec.extra == constsize {
|
||||
return int8(p[1]), nil
|
||||
}
|
||||
size := spec.size
|
||||
p, err = m.R.Peek(int(size))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int8(p[size-1]), nil
|
||||
}
|
||||
|
||||
// peekExtension peeks at the extension encoding type
|
||||
// (must guarantee at least 1 byte in 'b')
|
||||
func peekExtension(b []byte) (int8, error) {
|
||||
spec := sizes[b[0]]
|
||||
size := spec.size
|
||||
if spec.typ != ExtensionType {
|
||||
return 0, badPrefix(ExtensionType, b[0])
|
||||
}
|
||||
if len(b) < int(size) {
|
||||
return 0, ErrShortBytes
|
||||
}
|
||||
// for fixed extensions,
|
||||
// the type information is in
|
||||
// the second byte
|
||||
if spec.extra == constsize {
|
||||
return int8(b[1]), nil
|
||||
}
|
||||
// otherwise, it's in the last
|
||||
// part of the prefix
|
||||
return int8(b[size-1]), nil
|
||||
}
|
||||
|
||||
// ReadExtension reads the next object from the reader
|
||||
// as an extension. ReadExtension will fail if the next
|
||||
// object in the stream is not an extension, or if
|
||||
// e.Type() is not the same as the wire type.
|
||||
func (m *Reader) ReadExtension(e Extension) (err error) {
|
||||
var p []byte
|
||||
p, err = m.R.Peek(2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lead := p[0]
|
||||
var read int
|
||||
var off int
|
||||
switch lead {
|
||||
case mfixext1:
|
||||
if int8(p[1]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[1]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
p, err = m.R.Peek(3)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = e.UnmarshalBinary(p[2:])
|
||||
if err == nil {
|
||||
_, err = m.R.Skip(3)
|
||||
}
|
||||
return
|
||||
|
||||
case mfixext2:
|
||||
if int8(p[1]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[1]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
p, err = m.R.Peek(4)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = e.UnmarshalBinary(p[2:])
|
||||
if err == nil {
|
||||
_, err = m.R.Skip(4)
|
||||
}
|
||||
return
|
||||
|
||||
case mfixext4:
|
||||
if int8(p[1]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[1]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
p, err = m.R.Peek(6)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = e.UnmarshalBinary(p[2:])
|
||||
if err == nil {
|
||||
_, err = m.R.Skip(6)
|
||||
}
|
||||
return
|
||||
|
||||
case mfixext8:
|
||||
if int8(p[1]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[1]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
p, err = m.R.Peek(10)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = e.UnmarshalBinary(p[2:])
|
||||
if err == nil {
|
||||
_, err = m.R.Skip(10)
|
||||
}
|
||||
return
|
||||
|
||||
case mfixext16:
|
||||
if int8(p[1]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[1]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
p, err = m.R.Peek(18)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = e.UnmarshalBinary(p[2:])
|
||||
if err == nil {
|
||||
_, err = m.R.Skip(18)
|
||||
}
|
||||
return
|
||||
|
||||
case mext8:
|
||||
p, err = m.R.Peek(3)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if int8(p[2]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[2]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
read = int(uint8(p[1]))
|
||||
off = 3
|
||||
|
||||
case mext16:
|
||||
p, err = m.R.Peek(4)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if int8(p[3]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[3]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
read = int(big.Uint16(p[1:]))
|
||||
off = 4
|
||||
|
||||
case mext32:
|
||||
p, err = m.R.Peek(6)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if int8(p[5]) != e.ExtensionType() {
|
||||
err = errExt(int8(p[5]), e.ExtensionType())
|
||||
return
|
||||
}
|
||||
read = int(big.Uint32(p[1:]))
|
||||
off = 6
|
||||
|
||||
default:
|
||||
err = badPrefix(ExtensionType, lead)
|
||||
return
|
||||
}
|
||||
|
||||
p, err = m.R.Peek(read + off)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = e.UnmarshalBinary(p[off:])
|
||||
if err == nil {
|
||||
_, err = m.R.Skip(read + off)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AppendExtension appends a MessagePack extension to the provided slice
|
||||
func AppendExtension(b []byte, e Extension) ([]byte, error) {
|
||||
l := e.Len()
|
||||
var o []byte
|
||||
var n int
|
||||
switch l {
|
||||
case 0:
|
||||
o, n = ensure(b, 3)
|
||||
o[n] = mext8
|
||||
o[n+1] = 0
|
||||
o[n+2] = byte(e.ExtensionType())
|
||||
return o[:n+3], nil
|
||||
case 1:
|
||||
o, n = ensure(b, 3)
|
||||
o[n] = mfixext1
|
||||
o[n+1] = byte(e.ExtensionType())
|
||||
n += 2
|
||||
case 2:
|
||||
o, n = ensure(b, 4)
|
||||
o[n] = mfixext2
|
||||
o[n+1] = byte(e.ExtensionType())
|
||||
n += 2
|
||||
case 4:
|
||||
o, n = ensure(b, 6)
|
||||
o[n] = mfixext4
|
||||
o[n+1] = byte(e.ExtensionType())
|
||||
n += 2
|
||||
case 8:
|
||||
o, n = ensure(b, 10)
|
||||
o[n] = mfixext8
|
||||
o[n+1] = byte(e.ExtensionType())
|
||||
n += 2
|
||||
case 16:
|
||||
o, n = ensure(b, 18)
|
||||
o[n] = mfixext16
|
||||
o[n+1] = byte(e.ExtensionType())
|
||||
n += 2
|
||||
}
|
||||
switch {
|
||||
case l < math.MaxUint8:
|
||||
o, n = ensure(b, l+3)
|
||||
o[n] = mext8
|
||||
o[n+1] = byte(uint8(l))
|
||||
o[n+2] = byte(e.ExtensionType())
|
||||
n += 3
|
||||
case l < math.MaxUint16:
|
||||
o, n = ensure(b, l+4)
|
||||
o[n] = mext16
|
||||
big.PutUint16(o[n+1:], uint16(l))
|
||||
o[n+3] = byte(e.ExtensionType())
|
||||
n += 4
|
||||
default:
|
||||
o, n = ensure(b, l+6)
|
||||
o[n] = mext32
|
||||
big.PutUint32(o[n+1:], uint32(l))
|
||||
o[n+5] = byte(e.ExtensionType())
|
||||
n += 6
|
||||
}
|
||||
return o, e.MarshalBinaryTo(o[n:])
|
||||
}
|
||||
|
||||
// ReadExtensionBytes reads an extension from 'b' into 'e'
|
||||
// and returns any remaining bytes.
|
||||
// Possible errors:
|
||||
// - ErrShortBytes ('b' not long enough)
|
||||
// - ExtensionTypeErorr{} (wire type not the same as e.Type())
|
||||
// - TypeErorr{} (next object not an extension)
|
||||
// - InvalidPrefixError
|
||||
// - An umarshal error returned from e.UnmarshalBinary
|
||||
func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) {
|
||||
l := len(b)
|
||||
if l < 3 {
|
||||
return b, ErrShortBytes
|
||||
}
|
||||
lead := b[0]
|
||||
var (
|
||||
sz int // size of 'data'
|
||||
off int // offset of 'data'
|
||||
typ int8
|
||||
)
|
||||
switch lead {
|
||||
case mfixext1:
|
||||
typ = int8(b[1])
|
||||
sz = 1
|
||||
off = 2
|
||||
case mfixext2:
|
||||
typ = int8(b[1])
|
||||
sz = 2
|
||||
off = 2
|
||||
case mfixext4:
|
||||
typ = int8(b[1])
|
||||
sz = 4
|
||||
off = 2
|
||||
case mfixext8:
|
||||
typ = int8(b[1])
|
||||
sz = 8
|
||||
off = 2
|
||||
case mfixext16:
|
||||
typ = int8(b[1])
|
||||
sz = 16
|
||||
off = 2
|
||||
case mext8:
|
||||
sz = int(uint8(b[1]))
|
||||
typ = int8(b[2])
|
||||
off = 3
|
||||
if sz == 0 {
|
||||
return b[3:], e.UnmarshalBinary(b[3:3])
|
||||
}
|
||||
case mext16:
|
||||
if l < 4 {
|
||||
return b, ErrShortBytes
|
||||
}
|
||||
sz = int(big.Uint16(b[1:]))
|
||||
typ = int8(b[3])
|
||||
off = 4
|
||||
case mext32:
|
||||
if l < 6 {
|
||||
return b, ErrShortBytes
|
||||
}
|
||||
sz = int(big.Uint32(b[1:]))
|
||||
typ = int8(b[5])
|
||||
off = 6
|
||||
default:
|
||||
return b, badPrefix(ExtensionType, lead)
|
||||
}
|
||||
|
||||
if typ != e.ExtensionType() {
|
||||
return b, errExt(typ, e.ExtensionType())
|
||||
}
|
||||
|
||||
// the data of the extension starts
|
||||
// at 'off' and is 'sz' bytes long
|
||||
if len(b[off:]) < sz {
|
||||
return b, ErrShortBytes
|
||||
}
|
||||
tot := off + sz
|
||||
return b[tot:], e.UnmarshalBinary(b[off:tot])
|
||||
}
|
49
vendor/github.com/tinylib/msgp/msgp/extension_test.go
generated
vendored
Normal file
49
vendor/github.com/tinylib/msgp/msgp/extension_test.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var extSizes = [...]int{0, 1, 2, 4, 8, 16, int(tint8), int(tuint16), int(tuint32)}
|
||||
|
||||
func randomExt() RawExtension {
|
||||
e := RawExtension{}
|
||||
e.Type = int8(rand.Int())
|
||||
e.Data = RandBytes(extSizes[rand.Intn(len(extSizes))])
|
||||
return e
|
||||
}
|
||||
|
||||
func TestReadWriteExtension(t *testing.T) {
|
||||
rand.Seed(time.Now().Unix())
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
dc := NewReader(&buf)
|
||||
|
||||
for i := 0; i < 25; i++ {
|
||||
buf.Reset()
|
||||
e := randomExt()
|
||||
en.WriteExtension(&e)
|
||||
en.Flush()
|
||||
err := dc.ReadExtension(&e)
|
||||
if err != nil {
|
||||
t.Errorf("error with extension (length %d): %s", len(buf.Bytes()), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWriteExtensionBytes(t *testing.T) {
|
||||
var bts []byte
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
for i := 0; i < 24; i++ {
|
||||
e := randomExt()
|
||||
bts, _ = AppendExtension(bts[0:0], &e)
|
||||
_, err := ReadExtensionBytes(bts, &e)
|
||||
if err != nil {
|
||||
t.Errorf("error with extension (length %d): %s", len(bts), err)
|
||||
}
|
||||
}
|
||||
}
|
92
vendor/github.com/tinylib/msgp/msgp/file.go
generated
vendored
Normal file
92
vendor/github.com/tinylib/msgp/msgp/file.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
||||
// +build linux darwin dragonfly freebsd netbsd openbsd
|
||||
// +build !appengine
|
||||
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// ReadFile reads a file into 'dst' using
|
||||
// a read-only memory mapping. Consequently,
|
||||
// the file must be mmap-able, and the
|
||||
// Unmarshaler should never write to
|
||||
// the source memory. (Methods generated
|
||||
// by the msgp tool obey that constraint, but
|
||||
// user-defined implementations may not.)
|
||||
//
|
||||
// Reading and writing through file mappings
|
||||
// is only efficient for large files; small
|
||||
// files are best read and written using
|
||||
// the ordinary streaming interfaces.
|
||||
//
|
||||
func ReadFile(dst Unmarshaler, file *os.File) error {
|
||||
stat, err := file.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adviseRead(data)
|
||||
_, err = dst.UnmarshalMsg(data)
|
||||
uerr := syscall.Munmap(data)
|
||||
if err == nil {
|
||||
err = uerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalSizer is the combination
|
||||
// of the Marshaler and Sizer
|
||||
// interfaces.
|
||||
type MarshalSizer interface {
|
||||
Marshaler
|
||||
Sizer
|
||||
}
|
||||
|
||||
// WriteFile writes a file from 'src' using
|
||||
// memory mapping. It overwrites the entire
|
||||
// contents of the previous file.
|
||||
// The mapping size is calculated
|
||||
// using the `Msgsize()` method
|
||||
// of 'src', so it must produce a result
|
||||
// equal to or greater than the actual encoded
|
||||
// size of the object. Otherwise,
|
||||
// a fault (SIGBUS) will occur.
|
||||
//
|
||||
// Reading and writing through file mappings
|
||||
// is only efficient for large files; small
|
||||
// files are best read and written using
|
||||
// the ordinary streaming interfaces.
|
||||
//
|
||||
// NOTE: The performance of this call
|
||||
// is highly OS- and filesystem-dependent.
|
||||
// Users should take care to test that this
|
||||
// performs as expected in a production environment.
|
||||
// (Linux users should run a kernel and filesystem
|
||||
// that support fallocate(2) for the best results.)
|
||||
func WriteFile(src MarshalSizer, file *os.File) error {
|
||||
sz := src.Msgsize()
|
||||
err := fallocate(file, int64(sz))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
adviseWrite(data)
|
||||
chunk := data[:0]
|
||||
chunk, err = src.MarshalMsg(chunk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uerr := syscall.Munmap(data)
|
||||
if uerr != nil {
|
||||
return uerr
|
||||
}
|
||||
return file.Truncate(int64(len(chunk)))
|
||||
}
|
47
vendor/github.com/tinylib/msgp/msgp/file_port.go
generated
vendored
Normal file
47
vendor/github.com/tinylib/msgp/msgp/file_port.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
// +build windows appengine
|
||||
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// MarshalSizer is the combination
|
||||
// of the Marshaler and Sizer
|
||||
// interfaces.
|
||||
type MarshalSizer interface {
|
||||
Marshaler
|
||||
Sizer
|
||||
}
|
||||
|
||||
func ReadFile(dst Unmarshaler, file *os.File) error {
|
||||
if u, ok := dst.(Decodable); ok {
|
||||
return u.DecodeMsg(NewReader(file))
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = dst.UnmarshalMsg(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func WriteFile(src MarshalSizer, file *os.File) error {
|
||||
if e, ok := src.(Encodable); ok {
|
||||
w := NewWriter(file)
|
||||
err := e.EncodeMsg(w)
|
||||
if err == nil {
|
||||
err = w.Flush()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := src.MarshalMsg(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = file.Write(raw)
|
||||
return err
|
||||
}
|
103
vendor/github.com/tinylib/msgp/msgp/file_test.go
generated
vendored
Normal file
103
vendor/github.com/tinylib/msgp/msgp/file_test.go
generated
vendored
Normal file
@ -0,0 +1,103 @@
|
||||
// +build linux darwin dragonfly freebsd netbsd openbsd
|
||||
|
||||
package msgp_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
prand "math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type rawBytes []byte
|
||||
|
||||
func (r rawBytes) MarshalMsg(b []byte) ([]byte, error) {
|
||||
return msgp.AppendBytes(b, []byte(r)), nil
|
||||
}
|
||||
|
||||
func (r rawBytes) Msgsize() int {
|
||||
return msgp.BytesPrefixSize + len(r)
|
||||
}
|
||||
|
||||
func (r *rawBytes) UnmarshalMsg(b []byte) ([]byte, error) {
|
||||
tmp, out, err := msgp.ReadBytesBytes(b, (*(*[]byte)(r))[:0])
|
||||
*r = rawBytes(tmp)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func TestReadWriteFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
f, err := os.Create("tmpfile")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
f.Close()
|
||||
os.Remove("tmpfile")
|
||||
}()
|
||||
|
||||
data := make([]byte, 1024*1024)
|
||||
rand.Read(data)
|
||||
|
||||
err = msgp.WriteFile(rawBytes(data), f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var out rawBytes
|
||||
f.Seek(0, os.SEEK_SET)
|
||||
err = msgp.ReadFile(&out, f)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !bytes.Equal([]byte(out), []byte(data)) {
|
||||
t.Fatal("Input and output not equal.")
|
||||
}
|
||||
}
|
||||
|
||||
var blobstrings = []string{"", "a string", "a longer string here!"}
|
||||
var blobfloats = []float64{0.0, -1.0, 1.0, 3.1415926535}
|
||||
var blobints = []int64{0, 1, -1, 80000, 1 << 30}
|
||||
var blobbytes = [][]byte{[]byte{}, []byte("hello"), []byte("{\"is_json\":true,\"is_compact\":\"unable to determine\"}")}
|
||||
|
||||
func BenchmarkWriteReadFile(b *testing.B) {
|
||||
|
||||
// let's not run out of disk space...
|
||||
if b.N > 10000000 {
|
||||
b.N = 10000000
|
||||
}
|
||||
|
||||
fname := "bench-tmpfile"
|
||||
f, err := os.Create(fname)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer func(f *os.File, name string) {
|
||||
f.Close()
|
||||
os.Remove(name)
|
||||
}(f, fname)
|
||||
|
||||
data := make(Blobs, b.N)
|
||||
|
||||
for i := range data {
|
||||
data[i].Name = blobstrings[prand.Intn(len(blobstrings))]
|
||||
data[i].Float = blobfloats[prand.Intn(len(blobfloats))]
|
||||
data[i].Amount = blobints[prand.Intn(len(blobints))]
|
||||
data[i].Bytes = blobbytes[prand.Intn(len(blobbytes))]
|
||||
}
|
||||
|
||||
b.SetBytes(int64(data.Msgsize() / b.N))
|
||||
b.ResetTimer()
|
||||
err = msgp.WriteFile(data, f)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
err = msgp.ReadFile(&data, f)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
25
vendor/github.com/tinylib/msgp/msgp/floatbench_test.go
generated
vendored
Normal file
25
vendor/github.com/tinylib/msgp/msgp/floatbench_test.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkReadWriteFloat32(b *testing.B) {
|
||||
var f float32 = 3.9081
|
||||
bts := AppendFloat32([]byte{}, f)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts = AppendFloat32(bts[0:0], f)
|
||||
f, bts, _ = ReadFloat32Bytes(bts)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadWriteFloat64(b *testing.B) {
|
||||
var f float64 = 3.9081
|
||||
bts := AppendFloat64([]byte{}, f)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
bts = AppendFloat64(bts[0:0], f)
|
||||
f, bts, _ = ReadFloat64Bytes(bts)
|
||||
}
|
||||
}
|
174
vendor/github.com/tinylib/msgp/msgp/integers.go
generated
vendored
Normal file
174
vendor/github.com/tinylib/msgp/msgp/integers.go
generated
vendored
Normal file
@ -0,0 +1,174 @@
|
||||
package msgp
|
||||
|
||||
/* ----------------------------------
|
||||
integer encoding utilities
|
||||
(inline-able)
|
||||
|
||||
TODO(tinylib): there are faster,
|
||||
albeit non-portable solutions
|
||||
to the code below. implement
|
||||
byteswap?
|
||||
---------------------------------- */
|
||||
|
||||
func putMint64(b []byte, i int64) {
|
||||
b[0] = mint64
|
||||
b[1] = byte(i >> 56)
|
||||
b[2] = byte(i >> 48)
|
||||
b[3] = byte(i >> 40)
|
||||
b[4] = byte(i >> 32)
|
||||
b[5] = byte(i >> 24)
|
||||
b[6] = byte(i >> 16)
|
||||
b[7] = byte(i >> 8)
|
||||
b[8] = byte(i)
|
||||
}
|
||||
|
||||
func getMint64(b []byte) int64 {
|
||||
return (int64(b[1]) << 56) | (int64(b[2]) << 48) |
|
||||
(int64(b[3]) << 40) | (int64(b[4]) << 32) |
|
||||
(int64(b[5]) << 24) | (int64(b[6]) << 16) |
|
||||
(int64(b[7]) << 8) | (int64(b[8]))
|
||||
}
|
||||
|
||||
func putMint32(b []byte, i int32) {
|
||||
b[0] = mint32
|
||||
b[1] = byte(i >> 24)
|
||||
b[2] = byte(i >> 16)
|
||||
b[3] = byte(i >> 8)
|
||||
b[4] = byte(i)
|
||||
}
|
||||
|
||||
func getMint32(b []byte) int32 {
|
||||
return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4]))
|
||||
}
|
||||
|
||||
func putMint16(b []byte, i int16) {
|
||||
b[0] = mint16
|
||||
b[1] = byte(i >> 8)
|
||||
b[2] = byte(i)
|
||||
}
|
||||
|
||||
func getMint16(b []byte) (i int16) {
|
||||
return (int16(b[1]) << 8) | int16(b[2])
|
||||
}
|
||||
|
||||
func putMint8(b []byte, i int8) {
|
||||
b[0] = mint8
|
||||
b[1] = byte(i)
|
||||
}
|
||||
|
||||
func getMint8(b []byte) (i int8) {
|
||||
return int8(b[1])
|
||||
}
|
||||
|
||||
func putMuint64(b []byte, u uint64) {
|
||||
b[0] = muint64
|
||||
b[1] = byte(u >> 56)
|
||||
b[2] = byte(u >> 48)
|
||||
b[3] = byte(u >> 40)
|
||||
b[4] = byte(u >> 32)
|
||||
b[5] = byte(u >> 24)
|
||||
b[6] = byte(u >> 16)
|
||||
b[7] = byte(u >> 8)
|
||||
b[8] = byte(u)
|
||||
}
|
||||
|
||||
func getMuint64(b []byte) uint64 {
|
||||
return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) |
|
||||
(uint64(b[3]) << 40) | (uint64(b[4]) << 32) |
|
||||
(uint64(b[5]) << 24) | (uint64(b[6]) << 16) |
|
||||
(uint64(b[7]) << 8) | (uint64(b[8]))
|
||||
}
|
||||
|
||||
func putMuint32(b []byte, u uint32) {
|
||||
b[0] = muint32
|
||||
b[1] = byte(u >> 24)
|
||||
b[2] = byte(u >> 16)
|
||||
b[3] = byte(u >> 8)
|
||||
b[4] = byte(u)
|
||||
}
|
||||
|
||||
func getMuint32(b []byte) uint32 {
|
||||
return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4]))
|
||||
}
|
||||
|
||||
func putMuint16(b []byte, u uint16) {
|
||||
b[0] = muint16
|
||||
b[1] = byte(u >> 8)
|
||||
b[2] = byte(u)
|
||||
}
|
||||
|
||||
func getMuint16(b []byte) uint16 {
|
||||
return (uint16(b[1]) << 8) | uint16(b[2])
|
||||
}
|
||||
|
||||
func putMuint8(b []byte, u uint8) {
|
||||
b[0] = muint8
|
||||
b[1] = byte(u)
|
||||
}
|
||||
|
||||
func getMuint8(b []byte) uint8 {
|
||||
return uint8(b[1])
|
||||
}
|
||||
|
||||
func getUnix(b []byte) (sec int64, nsec int32) {
|
||||
sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) |
|
||||
(int64(b[2]) << 40) | (int64(b[3]) << 32) |
|
||||
(int64(b[4]) << 24) | (int64(b[5]) << 16) |
|
||||
(int64(b[6]) << 8) | (int64(b[7]))
|
||||
|
||||
nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11]))
|
||||
return
|
||||
}
|
||||
|
||||
func putUnix(b []byte, sec int64, nsec int32) {
|
||||
b[0] = byte(sec >> 56)
|
||||
b[1] = byte(sec >> 48)
|
||||
b[2] = byte(sec >> 40)
|
||||
b[3] = byte(sec >> 32)
|
||||
b[4] = byte(sec >> 24)
|
||||
b[5] = byte(sec >> 16)
|
||||
b[6] = byte(sec >> 8)
|
||||
b[7] = byte(sec)
|
||||
b[8] = byte(nsec >> 24)
|
||||
b[9] = byte(nsec >> 16)
|
||||
b[10] = byte(nsec >> 8)
|
||||
b[11] = byte(nsec)
|
||||
}
|
||||
|
||||
/* -----------------------------
|
||||
prefix utilities
|
||||
----------------------------- */
|
||||
|
||||
// write prefix and uint8
|
||||
func prefixu8(b []byte, pre byte, sz uint8) {
|
||||
b[0] = pre
|
||||
b[1] = byte(sz)
|
||||
}
|
||||
|
||||
// write prefix and big-endian uint16
|
||||
func prefixu16(b []byte, pre byte, sz uint16) {
|
||||
b[0] = pre
|
||||
b[1] = byte(sz >> 8)
|
||||
b[2] = byte(sz)
|
||||
}
|
||||
|
||||
// write prefix and big-endian uint32
|
||||
func prefixu32(b []byte, pre byte, sz uint32) {
|
||||
b[0] = pre
|
||||
b[1] = byte(sz >> 24)
|
||||
b[2] = byte(sz >> 16)
|
||||
b[3] = byte(sz >> 8)
|
||||
b[4] = byte(sz)
|
||||
}
|
||||
|
||||
func prefixu64(b []byte, pre byte, sz uint64) {
|
||||
b[0] = pre
|
||||
b[1] = byte(sz >> 56)
|
||||
b[2] = byte(sz >> 48)
|
||||
b[3] = byte(sz >> 40)
|
||||
b[4] = byte(sz >> 32)
|
||||
b[5] = byte(sz >> 24)
|
||||
b[6] = byte(sz >> 16)
|
||||
b[7] = byte(sz >> 8)
|
||||
b[8] = byte(sz)
|
||||
}
|
542
vendor/github.com/tinylib/msgp/msgp/json.go
generated
vendored
Normal file
542
vendor/github.com/tinylib/msgp/msgp/json.go
generated
vendored
Normal file
@ -0,0 +1,542 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strconv"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
null = []byte("null")
|
||||
hex = []byte("0123456789abcdef")
|
||||
)
|
||||
|
||||
var defuns [_maxtype]func(jsWriter, *Reader) (int, error)
|
||||
|
||||
// note: there is an initialization loop if
|
||||
// this isn't set up during init()
|
||||
func init() {
|
||||
// since none of these functions are inline-able,
|
||||
// there is not much of a penalty to the indirect
|
||||
// call. however, this is best expressed as a jump-table...
|
||||
defuns = [_maxtype]func(jsWriter, *Reader) (int, error){
|
||||
StrType: rwString,
|
||||
BinType: rwBytes,
|
||||
MapType: rwMap,
|
||||
ArrayType: rwArray,
|
||||
Float64Type: rwFloat64,
|
||||
Float32Type: rwFloat32,
|
||||
BoolType: rwBool,
|
||||
IntType: rwInt,
|
||||
UintType: rwUint,
|
||||
NilType: rwNil,
|
||||
ExtensionType: rwExtension,
|
||||
Complex64Type: rwExtension,
|
||||
Complex128Type: rwExtension,
|
||||
TimeType: rwTime,
|
||||
}
|
||||
}
|
||||
|
||||
// this is the interface
|
||||
// used to write json
|
||||
type jsWriter interface {
|
||||
io.Writer
|
||||
io.ByteWriter
|
||||
WriteString(string) (int, error)
|
||||
}
|
||||
|
||||
// CopyToJSON reads MessagePack from 'src' and copies it
|
||||
// as JSON to 'dst' until EOF.
|
||||
func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) {
|
||||
r := NewReader(src)
|
||||
n, err = r.WriteToJSON(dst)
|
||||
freeR(r)
|
||||
return
|
||||
}
|
||||
|
||||
// WriteToJSON translates MessagePack from 'r' and writes it as
|
||||
// JSON to 'w' until the underlying reader returns io.EOF. It returns
|
||||
// the number of bytes written, and an error if it stopped before EOF.
|
||||
func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) {
|
||||
var j jsWriter
|
||||
var bf *bufio.Writer
|
||||
if jsw, ok := w.(jsWriter); ok {
|
||||
j = jsw
|
||||
} else {
|
||||
bf = bufio.NewWriter(w)
|
||||
j = bf
|
||||
}
|
||||
var nn int
|
||||
for err == nil {
|
||||
nn, err = rwNext(j, r)
|
||||
n += int64(nn)
|
||||
}
|
||||
if err != io.EOF {
|
||||
if bf != nil {
|
||||
bf.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
err = nil
|
||||
if bf != nil {
|
||||
err = bf.Flush()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rwNext(w jsWriter, src *Reader) (int, error) {
|
||||
t, err := src.NextType()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return defuns[t](w, src)
|
||||
}
|
||||
|
||||
func rwMap(dst jsWriter, src *Reader) (n int, err error) {
|
||||
var comma bool
|
||||
var sz uint32
|
||||
var field []byte
|
||||
|
||||
sz, err = src.ReadMapHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if sz == 0 {
|
||||
return dst.WriteString("{}")
|
||||
}
|
||||
|
||||
err = dst.WriteByte('{')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
var nn int
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
if comma {
|
||||
err = dst.WriteByte(',')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
}
|
||||
|
||||
field, err = src.ReadMapKeyPtr()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nn, err = rwquoted(dst, field)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = dst.WriteByte(':')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
nn, err = rwNext(dst, src)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !comma {
|
||||
comma = true
|
||||
}
|
||||
}
|
||||
|
||||
err = dst.WriteByte('}')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
return
|
||||
}
|
||||
|
||||
func rwArray(dst jsWriter, src *Reader) (n int, err error) {
|
||||
err = dst.WriteByte('[')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var sz uint32
|
||||
var nn int
|
||||
sz, err = src.ReadArrayHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
comma := false
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
if comma {
|
||||
err = dst.WriteByte(',')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err = rwNext(dst, src)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
comma = true
|
||||
}
|
||||
|
||||
err = dst.WriteByte(']')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
return
|
||||
}
|
||||
|
||||
func rwNil(dst jsWriter, src *Reader) (int, error) {
|
||||
err := src.ReadNil()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return dst.Write(null)
|
||||
}
|
||||
|
||||
func rwFloat32(dst jsWriter, src *Reader) (int, error) {
|
||||
f, err := src.ReadFloat32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64)
|
||||
return dst.Write(src.scratch)
|
||||
}
|
||||
|
||||
func rwFloat64(dst jsWriter, src *Reader) (int, error) {
|
||||
f, err := src.ReadFloat64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32)
|
||||
return dst.Write(src.scratch)
|
||||
}
|
||||
|
||||
func rwInt(dst jsWriter, src *Reader) (int, error) {
|
||||
i, err := src.ReadInt64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
src.scratch = strconv.AppendInt(src.scratch[:0], i, 10)
|
||||
return dst.Write(src.scratch)
|
||||
}
|
||||
|
||||
func rwUint(dst jsWriter, src *Reader) (int, error) {
|
||||
u, err := src.ReadUint64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
src.scratch = strconv.AppendUint(src.scratch[:0], u, 10)
|
||||
return dst.Write(src.scratch)
|
||||
}
|
||||
|
||||
func rwBool(dst jsWriter, src *Reader) (int, error) {
|
||||
b, err := src.ReadBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if b {
|
||||
return dst.WriteString("true")
|
||||
}
|
||||
return dst.WriteString("false")
|
||||
}
|
||||
|
||||
func rwTime(dst jsWriter, src *Reader) (int, error) {
|
||||
t, err := src.ReadTime()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
bts, err := t.MarshalJSON()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return dst.Write(bts)
|
||||
}
|
||||
|
||||
func rwExtension(dst jsWriter, src *Reader) (n int, err error) {
|
||||
et, err := src.peekExtensionType()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// registered extensions can override
|
||||
// the JSON encoding
|
||||
if j, ok := extensionReg[et]; ok {
|
||||
var bts []byte
|
||||
e := j()
|
||||
err = src.ReadExtension(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
bts, err = json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return dst.Write(bts)
|
||||
}
|
||||
|
||||
e := RawExtension{}
|
||||
e.Type = et
|
||||
err = src.ReadExtension(&e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var nn int
|
||||
err = dst.WriteByte('{')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
|
||||
nn, err = dst.WriteString(`"type:"`)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10)
|
||||
nn, err = dst.Write(src.scratch)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
nn, err = dst.WriteString(`,"data":"`)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
enc := base64.NewEncoder(base64.StdEncoding, dst)
|
||||
|
||||
nn, err = enc.Write(e.Data)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = enc.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nn, err = dst.WriteString(`"}`)
|
||||
n += nn
|
||||
return
|
||||
}
|
||||
|
||||
func rwString(dst jsWriter, src *Reader) (n int, err error) {
|
||||
var p []byte
|
||||
p, err = src.R.Peek(1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
lead := p[0]
|
||||
var read int
|
||||
|
||||
if isfixstr(lead) {
|
||||
read = int(rfixstr(lead))
|
||||
src.R.Skip(1)
|
||||
goto write
|
||||
}
|
||||
|
||||
switch lead {
|
||||
case mstr8:
|
||||
p, err = src.R.Next(2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
read = int(uint8(p[1]))
|
||||
case mstr16:
|
||||
p, err = src.R.Next(3)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
read = int(big.Uint16(p[1:]))
|
||||
case mstr32:
|
||||
p, err = src.R.Next(5)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
read = int(big.Uint32(p[1:]))
|
||||
default:
|
||||
err = badPrefix(StrType, lead)
|
||||
return
|
||||
}
|
||||
write:
|
||||
p, err = src.R.Next(read)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err = rwquoted(dst, p)
|
||||
return
|
||||
}
|
||||
|
||||
func rwBytes(dst jsWriter, src *Reader) (n int, err error) {
|
||||
var nn int
|
||||
err = dst.WriteByte('"')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
src.scratch, err = src.ReadBytes(src.scratch[:0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
enc := base64.NewEncoder(base64.StdEncoding, dst)
|
||||
nn, err = enc.Write(src.scratch)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = enc.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = dst.WriteByte('"')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
return
|
||||
}
|
||||
|
||||
// Below (c) The Go Authors, 2009-2014
|
||||
// Subject to the BSD-style license found at http://golang.org
|
||||
//
|
||||
// see: encoding/json/encode.go:(*encodeState).stringbytes()
|
||||
func rwquoted(dst jsWriter, s []byte) (n int, err error) {
|
||||
var nn int
|
||||
err = dst.WriteByte('"')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
start := 0
|
||||
for i := 0; i < len(s); {
|
||||
if b := s[i]; b < utf8.RuneSelf {
|
||||
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
|
||||
i++
|
||||
continue
|
||||
}
|
||||
if start < i {
|
||||
nn, err = dst.Write(s[start:i])
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
switch b {
|
||||
case '\\', '"':
|
||||
err = dst.WriteByte('\\')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
err = dst.WriteByte(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
case '\n':
|
||||
err = dst.WriteByte('\\')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
err = dst.WriteByte('n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
case '\r':
|
||||
err = dst.WriteByte('\\')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
err = dst.WriteByte('r')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
default:
|
||||
nn, err = dst.WriteString(`\u00`)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = dst.WriteByte(hex[b>>4])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
err = dst.WriteByte(hex[b&0xF])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
}
|
||||
i++
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
c, size := utf8.DecodeRune(s[i:])
|
||||
if c == utf8.RuneError && size == 1 {
|
||||
if start < i {
|
||||
nn, err = dst.Write(s[start:i])
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nn, err = dst.WriteString(`\ufffd`)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
i += size
|
||||
start = i
|
||||
continue
|
||||
}
|
||||
}
|
||||
if c == '\u2028' || c == '\u2029' {
|
||||
if start < i {
|
||||
nn, err = dst.Write(s[start:i])
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nn, err = dst.WriteString(`\u202`)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = dst.WriteByte(hex[c&0xF])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
i += size
|
||||
}
|
||||
if start < len(s) {
|
||||
nn, err = dst.Write(s[start:])
|
||||
n += nn
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = dst.WriteByte('"')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n++
|
||||
return
|
||||
}
|
363
vendor/github.com/tinylib/msgp/msgp/json_bytes.go
generated
vendored
Normal file
363
vendor/github.com/tinylib/msgp/msgp/json_bytes.go
generated
vendored
Normal file
@ -0,0 +1,363 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error)
|
||||
|
||||
func init() {
|
||||
|
||||
// NOTE(pmh): this is best expressed as a jump table,
|
||||
// but gc doesn't do that yet. revisit post-go1.5.
|
||||
unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){
|
||||
StrType: rwStringBytes,
|
||||
BinType: rwBytesBytes,
|
||||
MapType: rwMapBytes,
|
||||
ArrayType: rwArrayBytes,
|
||||
Float64Type: rwFloat64Bytes,
|
||||
Float32Type: rwFloat32Bytes,
|
||||
BoolType: rwBoolBytes,
|
||||
IntType: rwIntBytes,
|
||||
UintType: rwUintBytes,
|
||||
NilType: rwNullBytes,
|
||||
ExtensionType: rwExtensionBytes,
|
||||
Complex64Type: rwExtensionBytes,
|
||||
Complex128Type: rwExtensionBytes,
|
||||
TimeType: rwTimeBytes,
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalAsJSON takes raw messagepack and writes
|
||||
// it as JSON to 'w'. If an error is returned, the
|
||||
// bytes not translated will also be returned. If
|
||||
// no errors are encountered, the length of the returned
|
||||
// slice will be zero.
|
||||
func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) {
|
||||
var (
|
||||
scratch []byte
|
||||
cast bool
|
||||
dst jsWriter
|
||||
err error
|
||||
)
|
||||
if jsw, ok := w.(jsWriter); ok {
|
||||
dst = jsw
|
||||
cast = true
|
||||
} else {
|
||||
dst = bufio.NewWriterSize(w, 512)
|
||||
}
|
||||
for len(msg) > 0 && err == nil {
|
||||
msg, scratch, err = writeNext(dst, msg, scratch)
|
||||
}
|
||||
if !cast && err == nil {
|
||||
err = dst.(*bufio.Writer).Flush()
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
if len(msg) < 1 {
|
||||
return msg, scratch, ErrShortBytes
|
||||
}
|
||||
t := getType(msg[0])
|
||||
if t == InvalidType {
|
||||
return msg, scratch, InvalidPrefixError(msg[0])
|
||||
}
|
||||
if t == ExtensionType {
|
||||
et, err := peekExtension(msg)
|
||||
if err != nil {
|
||||
return nil, scratch, err
|
||||
}
|
||||
if et == TimeExtension {
|
||||
t = TimeType
|
||||
}
|
||||
}
|
||||
return unfuns[t](w, msg, scratch)
|
||||
}
|
||||
|
||||
func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
sz, msg, err := ReadArrayHeaderBytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
err = w.WriteByte('[')
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
if i != 0 {
|
||||
err = w.WriteByte(',')
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
}
|
||||
msg, scratch, err = writeNext(w, msg, scratch)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
}
|
||||
err = w.WriteByte(']')
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
sz, msg, err := ReadMapHeaderBytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
err = w.WriteByte('{')
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
if i != 0 {
|
||||
err = w.WriteByte(',')
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
}
|
||||
msg, scratch, err = rwMapKeyBytes(w, msg, scratch)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
err = w.WriteByte(':')
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
msg, scratch, err = writeNext(w, msg, scratch)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
}
|
||||
err = w.WriteByte('}')
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
msg, scratch, err := rwStringBytes(w, msg, scratch)
|
||||
if err != nil {
|
||||
if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType {
|
||||
return rwBytesBytes(w, msg, scratch)
|
||||
}
|
||||
}
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
str, msg, err := ReadStringZC(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = rwquoted(w, str)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
bts, msg, err := ReadBytesZC(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
l := base64.StdEncoding.EncodedLen(len(bts))
|
||||
if cap(scratch) >= l {
|
||||
scratch = scratch[0:l]
|
||||
} else {
|
||||
scratch = make([]byte, l)
|
||||
}
|
||||
base64.StdEncoding.Encode(scratch, bts)
|
||||
err = w.WriteByte('"')
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = w.Write(scratch)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
err = w.WriteByte('"')
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
msg, err := ReadNilBytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = w.Write(null)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
b, msg, err := ReadBoolBytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
if b {
|
||||
_, err = w.WriteString("true")
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = w.WriteString("false")
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
i, msg, err := ReadInt64Bytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
scratch = strconv.AppendInt(scratch[0:0], i, 10)
|
||||
_, err = w.Write(scratch)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
u, msg, err := ReadUint64Bytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
scratch = strconv.AppendUint(scratch[0:0], u, 10)
|
||||
_, err = w.Write(scratch)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) {
|
||||
var f float64
|
||||
var err error
|
||||
var sz int
|
||||
if f64 {
|
||||
sz = 64
|
||||
f, msg, err = ReadFloat64Bytes(msg)
|
||||
} else {
|
||||
sz = 32
|
||||
var v float32
|
||||
v, msg, err = ReadFloat32Bytes(msg)
|
||||
f = float64(v)
|
||||
}
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz)
|
||||
_, err = w.Write(scratch)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
var f float32
|
||||
var err error
|
||||
f, msg, err = ReadFloat32Bytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32)
|
||||
_, err = w.Write(scratch)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
var f float64
|
||||
var err error
|
||||
f, msg, err = ReadFloat64Bytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)
|
||||
_, err = w.Write(scratch)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
var t time.Time
|
||||
var err error
|
||||
t, msg, err = ReadTimeBytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
bts, err := t.MarshalJSON()
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = w.Write(bts)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) {
|
||||
var err error
|
||||
var et int8
|
||||
et, err = peekExtension(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
// if it's time.Time
|
||||
if et == TimeExtension {
|
||||
var tm time.Time
|
||||
tm, msg, err = ReadTimeBytes(msg)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
bts, err := tm.MarshalJSON()
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = w.Write(bts)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
// if the extension is registered,
|
||||
// use its canonical JSON form
|
||||
if f, ok := extensionReg[et]; ok {
|
||||
e := f()
|
||||
msg, err = ReadExtensionBytes(msg, e)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
bts, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
_, err = w.Write(bts)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
// otherwise, write `{"type": <num>, "data": "<base64data>"}`
|
||||
r := RawExtension{}
|
||||
r.Type = et
|
||||
msg, err = ReadExtensionBytes(msg, &r)
|
||||
if err != nil {
|
||||
return msg, scratch, err
|
||||
}
|
||||
scratch, err = writeExt(w, r, scratch)
|
||||
return msg, scratch, err
|
||||
}
|
||||
|
||||
func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) {
|
||||
_, err := w.WriteString(`{"type":`)
|
||||
if err != nil {
|
||||
return scratch, err
|
||||
}
|
||||
scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10)
|
||||
_, err = w.Write(scratch)
|
||||
if err != nil {
|
||||
return scratch, err
|
||||
}
|
||||
_, err = w.WriteString(`,"data":"`)
|
||||
if err != nil {
|
||||
return scratch, err
|
||||
}
|
||||
l := base64.StdEncoding.EncodedLen(len(r.Data))
|
||||
if cap(scratch) >= l {
|
||||
scratch = scratch[0:l]
|
||||
} else {
|
||||
scratch = make([]byte, l)
|
||||
}
|
||||
base64.StdEncoding.Encode(scratch, r.Data)
|
||||
_, err = w.Write(scratch)
|
||||
if err != nil {
|
||||
return scratch, err
|
||||
}
|
||||
_, err = w.WriteString(`"}`)
|
||||
return scratch, err
|
||||
}
|
121
vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go
generated
vendored
Normal file
121
vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewWriter(&buf)
|
||||
enc.WriteMapHeader(5)
|
||||
|
||||
enc.WriteString("thing_1")
|
||||
enc.WriteString("a string object")
|
||||
|
||||
enc.WriteString("a_map")
|
||||
enc.WriteMapHeader(2)
|
||||
|
||||
// INNER
|
||||
enc.WriteString("cmplx")
|
||||
enc.WriteComplex64(complex(1.0, 1.0))
|
||||
enc.WriteString("int_b")
|
||||
enc.WriteInt64(-100)
|
||||
|
||||
enc.WriteString("an extension")
|
||||
enc.WriteExtension(&RawExtension{Type: 1, Data: []byte("blaaahhh")})
|
||||
|
||||
enc.WriteString("some bytes")
|
||||
enc.WriteBytes([]byte("here are some bytes"))
|
||||
|
||||
enc.WriteString("now")
|
||||
enc.WriteTime(time.Now())
|
||||
|
||||
enc.Flush()
|
||||
|
||||
var js bytes.Buffer
|
||||
_, err := UnmarshalAsJSON(&js, buf.Bytes())
|
||||
if err != nil {
|
||||
t.Logf("%s", js.Bytes())
|
||||
t.Fatal(err)
|
||||
}
|
||||
mp := make(map[string]interface{})
|
||||
err = json.Unmarshal(js.Bytes(), &mp)
|
||||
if err != nil {
|
||||
t.Log(js.String())
|
||||
t.Fatalf("Error unmarshaling: %s", err)
|
||||
}
|
||||
|
||||
if len(mp) != 5 {
|
||||
t.Errorf("map length should be %d, not %d", 5, len(mp))
|
||||
}
|
||||
|
||||
so, ok := mp["thing_1"]
|
||||
if !ok || so != "a string object" {
|
||||
t.Errorf("expected %q; got %q", "a string object", so)
|
||||
}
|
||||
|
||||
if _, ok := mp["now"]; !ok {
|
||||
t.Error(`"now" field doesn't exist`)
|
||||
}
|
||||
|
||||
c, ok := mp["a_map"]
|
||||
if !ok {
|
||||
t.Error(`"a_map" field doesn't exist`)
|
||||
} else {
|
||||
if m, ok := c.(map[string]interface{}); ok {
|
||||
if _, ok := m["cmplx"]; !ok {
|
||||
t.Error(`"a_map.cmplx" doesn't exist`)
|
||||
}
|
||||
} else {
|
||||
t.Error(`can't type-assert "c" to map[string]interface{}`)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
t.Logf("JSON: %s", js.Bytes())
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalAsJSON(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewWriter(&buf)
|
||||
enc.WriteMapHeader(4)
|
||||
|
||||
enc.WriteString("thing_1")
|
||||
enc.WriteString("a string object")
|
||||
|
||||
enc.WriteString("a_first_map")
|
||||
enc.WriteMapHeader(2)
|
||||
enc.WriteString("float_a")
|
||||
enc.WriteFloat32(1.0)
|
||||
enc.WriteString("int_b")
|
||||
enc.WriteInt64(-100)
|
||||
|
||||
enc.WriteString("an array")
|
||||
enc.WriteArrayHeader(2)
|
||||
enc.WriteBool(true)
|
||||
enc.WriteUint(2089)
|
||||
|
||||
enc.WriteString("a_second_map")
|
||||
enc.WriteMapStrStr(map[string]string{
|
||||
"internal_one": "blah",
|
||||
"internal_two": "blahhh...",
|
||||
})
|
||||
enc.Flush()
|
||||
|
||||
var js bytes.Buffer
|
||||
bts := buf.Bytes()
|
||||
_, err := UnmarshalAsJSON(&js, bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(js.Bytes())))
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
js.Reset()
|
||||
UnmarshalAsJSON(&js, bts)
|
||||
}
|
||||
}
|
142
vendor/github.com/tinylib/msgp/msgp/json_test.go
generated
vendored
Normal file
142
vendor/github.com/tinylib/msgp/msgp/json_test.go
generated
vendored
Normal file
@ -0,0 +1,142 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCopyJSON(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewWriter(&buf)
|
||||
enc.WriteMapHeader(5)
|
||||
|
||||
enc.WriteString("thing_1")
|
||||
enc.WriteString("a string object")
|
||||
|
||||
enc.WriteString("a_map")
|
||||
enc.WriteMapHeader(2)
|
||||
enc.WriteString("float_a")
|
||||
enc.WriteFloat32(1.0)
|
||||
enc.WriteString("int_b")
|
||||
enc.WriteInt64(-100)
|
||||
|
||||
enc.WriteString("some bytes")
|
||||
enc.WriteBytes([]byte("here are some bytes"))
|
||||
enc.WriteString("a bool")
|
||||
enc.WriteBool(true)
|
||||
|
||||
enc.WriteString("a map")
|
||||
enc.WriteMapStrStr(map[string]string{
|
||||
"internal_one": "blah",
|
||||
"internal_two": "blahhh...",
|
||||
})
|
||||
enc.Flush()
|
||||
|
||||
var js bytes.Buffer
|
||||
_, err := CopyToJSON(&js, &buf)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mp := make(map[string]interface{})
|
||||
err = json.Unmarshal(js.Bytes(), &mp)
|
||||
if err != nil {
|
||||
t.Log(js.String())
|
||||
t.Fatalf("Error unmarshaling: %s", err)
|
||||
}
|
||||
|
||||
if len(mp) != 5 {
|
||||
t.Errorf("map length should be %d, not %d", 4, len(mp))
|
||||
}
|
||||
|
||||
so, ok := mp["thing_1"]
|
||||
if !ok || so != "a string object" {
|
||||
t.Errorf("expected %q; got %q", "a string object", so)
|
||||
}
|
||||
|
||||
in, ok := mp["a map"]
|
||||
if !ok {
|
||||
t.Error("no key 'a map'")
|
||||
}
|
||||
if inm, ok := in.(map[string]interface{}); !ok {
|
||||
t.Error("inner map not type-assertable to map[string]interface{}")
|
||||
} else {
|
||||
inm1, ok := inm["internal_one"]
|
||||
if !ok || !reflect.DeepEqual(inm1, "blah") {
|
||||
t.Errorf("inner map field %q should be %q, not %q", "internal_one", "blah", inm1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCopyToJSON(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
enc := NewWriter(&buf)
|
||||
enc.WriteMapHeader(4)
|
||||
|
||||
enc.WriteString("thing_1")
|
||||
enc.WriteString("a string object")
|
||||
|
||||
enc.WriteString("a_first_map")
|
||||
enc.WriteMapHeader(2)
|
||||
enc.WriteString("float_a")
|
||||
enc.WriteFloat32(1.0)
|
||||
enc.WriteString("int_b")
|
||||
enc.WriteInt64(-100)
|
||||
|
||||
enc.WriteString("an array")
|
||||
enc.WriteArrayHeader(2)
|
||||
enc.WriteBool(true)
|
||||
enc.WriteUint(2089)
|
||||
|
||||
enc.WriteString("a_second_map")
|
||||
enc.WriteMapStrStr(map[string]string{
|
||||
"internal_one": "blah",
|
||||
"internal_two": "blahhh...",
|
||||
})
|
||||
enc.Flush()
|
||||
|
||||
var js bytes.Buffer
|
||||
bts := buf.Bytes()
|
||||
_, err := CopyToJSON(&js, &buf)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(js.Bytes())))
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
js.Reset()
|
||||
CopyToJSON(&js, bytes.NewReader(bts))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkStdlibJSON(b *testing.B) {
|
||||
obj := map[string]interface{}{
|
||||
"thing_1": "a string object",
|
||||
"a_first_map": map[string]interface{}{
|
||||
"float_a": float32(1.0),
|
||||
"float_b": -100,
|
||||
},
|
||||
"an array": []interface{}{
|
||||
"part_A",
|
||||
"part_B",
|
||||
},
|
||||
"a_second_map": map[string]interface{}{
|
||||
"internal_one": "blah",
|
||||
"internal_two": "blahhh...",
|
||||
},
|
||||
}
|
||||
var js bytes.Buffer
|
||||
err := json.NewEncoder(&js).Encode(&obj)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.SetBytes(int64(len(js.Bytes())))
|
||||
b.ResetTimer()
|
||||
b.ReportAllocs()
|
||||
for i := 0; i < b.N; i++ {
|
||||
js.Reset()
|
||||
json.NewEncoder(&js).Encode(&obj)
|
||||
}
|
||||
}
|
267
vendor/github.com/tinylib/msgp/msgp/number.go
generated
vendored
Normal file
267
vendor/github.com/tinylib/msgp/msgp/number.go
generated
vendored
Normal file
@ -0,0 +1,267 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// The portable parts of the Number implementation
|
||||
|
||||
// Number can be
|
||||
// an int64, uint64, float32,
|
||||
// or float64 internally.
|
||||
// It can decode itself
|
||||
// from any of the native
|
||||
// messagepack number types.
|
||||
// The zero-value of Number
|
||||
// is Int(0). Using the equality
|
||||
// operator with Number compares
|
||||
// both the type and the value
|
||||
// of the number.
|
||||
type Number struct {
|
||||
// internally, this
|
||||
// is just a tagged union.
|
||||
// the raw bits of the number
|
||||
// are stored the same way regardless.
|
||||
bits uint64
|
||||
typ Type
|
||||
}
|
||||
|
||||
// AsInt sets the number to an int64.
|
||||
func (n *Number) AsInt(i int64) {
|
||||
|
||||
// we always store int(0)
|
||||
// as {0, InvalidType} in
|
||||
// order to preserve
|
||||
// the behavior of the == operator
|
||||
if i == 0 {
|
||||
n.typ = InvalidType
|
||||
n.bits = 0
|
||||
return
|
||||
}
|
||||
|
||||
n.typ = IntType
|
||||
n.bits = uint64(i)
|
||||
}
|
||||
|
||||
// AsUint sets the number to a uint64.
|
||||
func (n *Number) AsUint(u uint64) {
|
||||
n.typ = UintType
|
||||
n.bits = u
|
||||
}
|
||||
|
||||
// AsFloat32 sets the value of the number
|
||||
// to a float32.
|
||||
func (n *Number) AsFloat32(f float32) {
|
||||
n.typ = Float32Type
|
||||
n.bits = uint64(math.Float32bits(f))
|
||||
}
|
||||
|
||||
// AsFloat64 sets the value of the
|
||||
// number to a float64.
|
||||
func (n *Number) AsFloat64(f float64) {
|
||||
n.typ = Float64Type
|
||||
n.bits = math.Float64bits(f)
|
||||
}
|
||||
|
||||
// Int casts the number as an int64, and
|
||||
// returns whether or not that was the
|
||||
// underlying type.
|
||||
func (n *Number) Int() (int64, bool) {
|
||||
return int64(n.bits), n.typ == IntType || n.typ == InvalidType
|
||||
}
|
||||
|
||||
// Uint casts the number as a uint64, and returns
|
||||
// whether or not that was the underlying type.
|
||||
func (n *Number) Uint() (uint64, bool) {
|
||||
return n.bits, n.typ == UintType
|
||||
}
|
||||
|
||||
// Float casts the number to a float64, and
|
||||
// returns whether or not that was the underlying
|
||||
// type (either a float64 or a float32).
|
||||
func (n *Number) Float() (float64, bool) {
|
||||
switch n.typ {
|
||||
case Float32Type:
|
||||
return float64(math.Float32frombits(uint32(n.bits))), true
|
||||
case Float64Type:
|
||||
return math.Float64frombits(n.bits), true
|
||||
default:
|
||||
return 0.0, false
|
||||
}
|
||||
}
|
||||
|
||||
// Type will return one of:
|
||||
// Float64Type, Float32Type, UintType, or IntType.
|
||||
func (n *Number) Type() Type {
|
||||
if n.typ == InvalidType {
|
||||
return IntType
|
||||
}
|
||||
return n.typ
|
||||
}
|
||||
|
||||
// DecodeMsg implements msgp.Decodable
|
||||
func (n *Number) DecodeMsg(r *Reader) error {
|
||||
typ, err := r.NextType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch typ {
|
||||
case Float32Type:
|
||||
f, err := r.ReadFloat32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.AsFloat32(f)
|
||||
return nil
|
||||
case Float64Type:
|
||||
f, err := r.ReadFloat64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.AsFloat64(f)
|
||||
return nil
|
||||
case IntType:
|
||||
i, err := r.ReadInt64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.AsInt(i)
|
||||
return nil
|
||||
case UintType:
|
||||
u, err := r.ReadUint64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.AsUint(u)
|
||||
return nil
|
||||
default:
|
||||
return TypeError{Encoded: typ, Method: IntType}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalMsg implements msgp.Unmarshaler
|
||||
func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) {
|
||||
typ := NextType(b)
|
||||
switch typ {
|
||||
case IntType:
|
||||
i, o, err := ReadInt64Bytes(b)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
n.AsInt(i)
|
||||
return o, nil
|
||||
case UintType:
|
||||
u, o, err := ReadUint64Bytes(b)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
n.AsUint(u)
|
||||
return o, nil
|
||||
case Float64Type:
|
||||
f, o, err := ReadFloat64Bytes(b)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
n.AsFloat64(f)
|
||||
return o, nil
|
||||
case Float32Type:
|
||||
f, o, err := ReadFloat32Bytes(b)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
n.AsFloat32(f)
|
||||
return o, nil
|
||||
default:
|
||||
return b, TypeError{Method: IntType, Encoded: typ}
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (n *Number) MarshalMsg(b []byte) ([]byte, error) {
|
||||
switch n.typ {
|
||||
case IntType:
|
||||
return AppendInt64(b, int64(n.bits)), nil
|
||||
case UintType:
|
||||
return AppendUint64(b, uint64(n.bits)), nil
|
||||
case Float64Type:
|
||||
return AppendFloat64(b, math.Float64frombits(n.bits)), nil
|
||||
case Float32Type:
|
||||
return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil
|
||||
default:
|
||||
return AppendInt64(b, 0), nil
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (n *Number) EncodeMsg(w *Writer) error {
|
||||
switch n.typ {
|
||||
case IntType:
|
||||
return w.WriteInt64(int64(n.bits))
|
||||
case UintType:
|
||||
return w.WriteUint64(n.bits)
|
||||
case Float64Type:
|
||||
return w.WriteFloat64(math.Float64frombits(n.bits))
|
||||
case Float32Type:
|
||||
return w.WriteFloat32(math.Float32frombits(uint32(n.bits)))
|
||||
default:
|
||||
return w.WriteInt64(0)
|
||||
}
|
||||
}
|
||||
|
||||
// Msgsize implements msgp.Sizer
|
||||
func (n *Number) Msgsize() int {
|
||||
switch n.typ {
|
||||
case Float32Type:
|
||||
return Float32Size
|
||||
case Float64Type:
|
||||
return Float64Size
|
||||
case IntType:
|
||||
return Int64Size
|
||||
case UintType:
|
||||
return Uint64Size
|
||||
default:
|
||||
return 1 // fixint(0)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON implements json.Marshaler
|
||||
func (n *Number) MarshalJSON() ([]byte, error) {
|
||||
t := n.Type()
|
||||
if t == InvalidType {
|
||||
return []byte{'0'}, nil
|
||||
}
|
||||
out := make([]byte, 0, 32)
|
||||
switch t {
|
||||
case Float32Type, Float64Type:
|
||||
f, _ := n.Float()
|
||||
return strconv.AppendFloat(out, f, 'f', -1, 64), nil
|
||||
case IntType:
|
||||
i, _ := n.Int()
|
||||
return strconv.AppendInt(out, i, 10), nil
|
||||
case UintType:
|
||||
u, _ := n.Uint()
|
||||
return strconv.AppendUint(out, u, 10), nil
|
||||
default:
|
||||
panic("(*Number).typ is invalid")
|
||||
}
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer
|
||||
func (n *Number) String() string {
|
||||
switch n.typ {
|
||||
case InvalidType:
|
||||
return "0"
|
||||
case Float32Type, Float64Type:
|
||||
f, _ := n.Float()
|
||||
return strconv.FormatFloat(f, 'f', -1, 64)
|
||||
case IntType:
|
||||
i, _ := n.Int()
|
||||
return strconv.FormatInt(i, 10)
|
||||
case UintType:
|
||||
u, _ := n.Uint()
|
||||
return strconv.FormatUint(u, 10)
|
||||
default:
|
||||
panic("(*Number).typ is invalid")
|
||||
}
|
||||
}
|
94
vendor/github.com/tinylib/msgp/msgp/number_test.go
generated
vendored
Normal file
94
vendor/github.com/tinylib/msgp/msgp/number_test.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNumber(t *testing.T) {
|
||||
|
||||
n := Number{}
|
||||
|
||||
if n.Type() != IntType {
|
||||
t.Errorf("expected zero-value type to be %s; got %s", IntType, n.Type())
|
||||
}
|
||||
|
||||
if n.String() != "0" {
|
||||
t.Errorf("expected Number{}.String() to be \"0\" but got %q", n.String())
|
||||
}
|
||||
|
||||
n.AsInt(248)
|
||||
i, ok := n.Int()
|
||||
if !ok || i != 248 || n.Type() != IntType || n.String() != "248" {
|
||||
t.Errorf("%d in; %d out!", 248, i)
|
||||
}
|
||||
|
||||
n.AsFloat64(3.141)
|
||||
f, ok := n.Float()
|
||||
if !ok || f != 3.141 || n.Type() != Float64Type || n.String() != "3.141" {
|
||||
t.Errorf("%f in; %f out!", 3.141, f)
|
||||
}
|
||||
|
||||
n.AsUint(40000)
|
||||
u, ok := n.Uint()
|
||||
if !ok || u != 40000 || n.Type() != UintType || n.String() != "40000" {
|
||||
t.Errorf("%d in; %d out!", 40000, u)
|
||||
}
|
||||
|
||||
nums := []interface{}{
|
||||
float64(3.14159),
|
||||
int64(-29081),
|
||||
uint64(90821983),
|
||||
float32(3.141),
|
||||
}
|
||||
|
||||
var dat []byte
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
for _, n := range nums {
|
||||
dat, _ = AppendIntf(dat, n)
|
||||
wr.WriteIntf(n)
|
||||
}
|
||||
wr.Flush()
|
||||
|
||||
mout := make([]Number, len(nums))
|
||||
dout := make([]Number, len(nums))
|
||||
|
||||
rd := NewReader(&buf)
|
||||
unm := dat
|
||||
for i := range nums {
|
||||
var err error
|
||||
unm, err = mout[i].UnmarshalMsg(unm)
|
||||
if err != nil {
|
||||
t.Fatal("unmarshal error:", err)
|
||||
}
|
||||
err = dout[i].DecodeMsg(rd)
|
||||
if err != nil {
|
||||
t.Fatal("decode error:", err)
|
||||
}
|
||||
if mout[i] != dout[i] {
|
||||
t.Errorf("for %#v, got %#v from unmarshal and %#v from decode", nums[i], mout[i], dout[i])
|
||||
}
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
var odat []byte
|
||||
for i := range nums {
|
||||
var err error
|
||||
odat, err = mout[i].MarshalMsg(odat)
|
||||
if err != nil {
|
||||
t.Fatal("marshal error:", err)
|
||||
}
|
||||
err = dout[i].EncodeMsg(wr)
|
||||
}
|
||||
wr.Flush()
|
||||
|
||||
if !bytes.Equal(dat, odat) {
|
||||
t.Errorf("marshal: expected output %#v; got %#v", dat, odat)
|
||||
}
|
||||
|
||||
if !bytes.Equal(dat, buf.Bytes()) {
|
||||
t.Errorf("encode: expected output %#v; got %#v", dat, buf.Bytes())
|
||||
}
|
||||
|
||||
}
|
85
vendor/github.com/tinylib/msgp/msgp/raw_test.go
generated
vendored
Normal file
85
vendor/github.com/tinylib/msgp/msgp/raw_test.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// all standard interfaces
|
||||
type allifaces interface {
|
||||
Encodable
|
||||
Decodable
|
||||
Marshaler
|
||||
Unmarshaler
|
||||
Sizer
|
||||
}
|
||||
|
||||
func TestRaw(t *testing.T) {
|
||||
bts := make([]byte, 0, 512)
|
||||
bts = AppendMapHeader(bts, 3)
|
||||
bts = AppendString(bts, "key_one")
|
||||
bts = AppendFloat64(bts, -1.0)
|
||||
bts = AppendString(bts, "key_two")
|
||||
bts = AppendString(bts, "value_two")
|
||||
bts = AppendString(bts, "key_three")
|
||||
bts = AppendTime(bts, time.Now())
|
||||
|
||||
var r Raw
|
||||
|
||||
// verify that Raw satisfies
|
||||
// the interfaces we want it to
|
||||
var _ allifaces = &r
|
||||
|
||||
// READ TESTS
|
||||
|
||||
extra, err := r.UnmarshalMsg(bts)
|
||||
if err != nil {
|
||||
t.Fatal("error from UnmarshalMsg:", err)
|
||||
}
|
||||
if len(extra) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(extra))
|
||||
}
|
||||
if !bytes.Equal([]byte(r), bts) {
|
||||
t.Fatal("value of raw and input slice are not equal after UnmarshalMsg")
|
||||
}
|
||||
|
||||
r = r[:0]
|
||||
|
||||
var buf bytes.Buffer
|
||||
buf.Write(bts)
|
||||
|
||||
rd := NewReader(&buf)
|
||||
|
||||
err = r.DecodeMsg(rd)
|
||||
if err != nil {
|
||||
t.Fatal("error from DecodeMsg:", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal([]byte(r), bts) {
|
||||
t.Fatal("value of raw and input slice are not equal after DecodeMsg")
|
||||
}
|
||||
|
||||
// WRITE TESTS
|
||||
|
||||
buf.Reset()
|
||||
wr := NewWriter(&buf)
|
||||
err = r.EncodeMsg(wr)
|
||||
if err != nil {
|
||||
t.Fatal("error from EncodeMsg:", err)
|
||||
}
|
||||
|
||||
wr.Flush()
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Fatal("value of buf.Bytes() and input slice are not equal after EncodeMsg")
|
||||
}
|
||||
|
||||
var outsl []byte
|
||||
outsl, err = r.MarshalMsg(outsl)
|
||||
if err != nil {
|
||||
t.Fatal("error from MarshalMsg:", err)
|
||||
}
|
||||
if !bytes.Equal(outsl, bts) {
|
||||
t.Fatal("value of output and input of MarshalMsg are not equal.")
|
||||
}
|
||||
}
|
1265
vendor/github.com/tinylib/msgp/msgp/read.go
generated
vendored
Normal file
1265
vendor/github.com/tinylib/msgp/msgp/read.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1089
vendor/github.com/tinylib/msgp/msgp/read_bytes.go
generated
vendored
Normal file
1089
vendor/github.com/tinylib/msgp/msgp/read_bytes.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
518
vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go
generated
vendored
Normal file
518
vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go
generated
vendored
Normal file
@ -0,0 +1,518 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestReadMapHeaderBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []uint32{0, 1, 5, 49082}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteMapHeader(v)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadMapHeaderBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
|
||||
if out != v {
|
||||
t.Errorf("%d in; %d out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadMapHeaderBytes(b *testing.B) {
|
||||
sizes := []uint32{1, 100, tuint16, tuint32}
|
||||
buf := make([]byte, 0, 5*len(sizes))
|
||||
for _, sz := range sizes {
|
||||
buf = AppendMapHeader(buf, sz)
|
||||
}
|
||||
b.SetBytes(int64(len(buf) / len(sizes)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
o := buf
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, buf, _ = ReadMapHeaderBytes(buf)
|
||||
if len(buf) == 0 {
|
||||
buf = o
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadArrayHeaderBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []uint32{0, 1, 5, 49082}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteArrayHeader(v)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadArrayHeaderBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
|
||||
if out != v {
|
||||
t.Errorf("%d in; %d out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadArrayHeaderBytes(b *testing.B) {
|
||||
sizes := []uint32{1, 100, tuint16, tuint32}
|
||||
buf := make([]byte, 0, 5*len(sizes))
|
||||
for _, sz := range sizes {
|
||||
buf = AppendArrayHeader(buf, sz)
|
||||
}
|
||||
b.SetBytes(int64(len(buf) / len(sizes)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
o := buf
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, buf, _ = ReadArrayHeaderBytes(buf)
|
||||
if len(buf) == 0 {
|
||||
buf = o
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadNilBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteNil()
|
||||
en.Flush()
|
||||
|
||||
left, err := ReadNilBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadNilByte(b *testing.B) {
|
||||
buf := []byte{mnil}
|
||||
b.SetBytes(1)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ReadNilBytes(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFloat64Bytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteFloat64(3.14159)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadFloat64Bytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if out != 3.14159 {
|
||||
t.Errorf("%f in; %f out", 3.14159, out)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadFloat64Bytes(b *testing.B) {
|
||||
f := float64(3.14159)
|
||||
buf := make([]byte, 0, 9)
|
||||
buf = AppendFloat64(buf, f)
|
||||
b.SetBytes(int64(len(buf)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ReadFloat64Bytes(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFloat32Bytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteFloat32(3.1)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadFloat32Bytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if out != 3.1 {
|
||||
t.Errorf("%f in; %f out", 3.1, out)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadFloat32Bytes(b *testing.B) {
|
||||
f := float32(3.14159)
|
||||
buf := make([]byte, 0, 5)
|
||||
buf = AppendFloat32(buf, f)
|
||||
b.SetBytes(int64(len(buf)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ReadFloat32Bytes(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBoolBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []bool{true, false}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteBool(v)
|
||||
en.Flush()
|
||||
out, left, err := ReadBoolBytes(buf.Bytes())
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
|
||||
if out != v {
|
||||
t.Errorf("%t in; %t out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadBoolBytes(b *testing.B) {
|
||||
buf := []byte{mtrue, mfalse, mtrue, mfalse}
|
||||
b.SetBytes(1)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
o := buf
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, buf, _ = ReadBoolBytes(buf)
|
||||
if len(buf) == 0 {
|
||||
buf = o
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadInt64Bytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []int64{-5, -30, 0, 1, 127, 300, 40921, 34908219}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteInt64(v)
|
||||
en.Flush()
|
||||
out, left, err := ReadInt64Bytes(buf.Bytes())
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
|
||||
if out != v {
|
||||
t.Errorf("%d in; %d out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadUint64Bytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []uint64{0, 1, 127, 300, 40921, 34908219}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteUint64(v)
|
||||
en.Flush()
|
||||
out, left, err := ReadUint64Bytes(buf.Bytes())
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
|
||||
if out != v {
|
||||
t.Errorf("%d in; %d out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytesBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")}
|
||||
var scratch []byte
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteBytes(v)
|
||||
en.Flush()
|
||||
out, left, err := ReadBytesBytes(buf.Bytes(), scratch)
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if !bytes.Equal(out, v) {
|
||||
t.Errorf("%q in; %q out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadZCBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteBytes(v)
|
||||
en.Flush()
|
||||
out, left, err := ReadBytesZC(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if !bytes.Equal(out, v) {
|
||||
t.Errorf("%q in; %q out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadZCString(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []string{"", "hello", "here's another string......"}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteString(v)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadStringZC(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if string(out) != v {
|
||||
t.Errorf("%q in; %q out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadStringBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []string{"", "hello", "here's another string......"}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteString(v)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadStringBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if out != v {
|
||||
t.Errorf("%q in; %q out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadComplex128Bytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []complex128{complex(0, 0), complex(12.8, 32.0)}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteComplex128(v)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadComplex128Bytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if out != v {
|
||||
t.Errorf("%f in; %f out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadComplex64Bytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := []complex64{complex(0, 0), complex(12.8, 32.0)}
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
en.WriteComplex64(v)
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadComplex64Bytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if out != v {
|
||||
t.Errorf("%f in; %f out", v, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadTimeBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
now := time.Now()
|
||||
en.WriteTime(now)
|
||||
en.Flush()
|
||||
out, left, err := ReadTimeBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if !now.Equal(out) {
|
||||
t.Errorf("%s in; %s out", now, out)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadTimeBytes(b *testing.B) {
|
||||
data := AppendTime(nil, time.Now())
|
||||
b.SetBytes(15)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ReadTimeBytes(data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadIntfBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
tests := make([]interface{}, 0, 10)
|
||||
tests = append(tests, float64(3.5))
|
||||
tests = append(tests, int64(-49082))
|
||||
tests = append(tests, uint64(34908))
|
||||
tests = append(tests, string("hello!"))
|
||||
tests = append(tests, []byte("blah."))
|
||||
tests = append(tests, map[string]interface{}{
|
||||
"key_one": 3.5,
|
||||
"key_two": "hi.",
|
||||
})
|
||||
|
||||
for i, v := range tests {
|
||||
buf.Reset()
|
||||
if err := en.WriteIntf(v); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
en.Flush()
|
||||
|
||||
out, left, err := ReadIntfBytes(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if len(left) != 0 {
|
||||
t.Errorf("expected 0 bytes left; found %d", len(left))
|
||||
}
|
||||
if !reflect.DeepEqual(v, out) {
|
||||
t.Errorf("ReadIntf(): %v in; %v out", v, out)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkSkipBytes(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteMapHeader(6)
|
||||
|
||||
en.WriteString("thing_one")
|
||||
en.WriteString("value_one")
|
||||
|
||||
en.WriteString("thing_two")
|
||||
en.WriteFloat64(3.14159)
|
||||
|
||||
en.WriteString("some_bytes")
|
||||
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
|
||||
|
||||
en.WriteString("the_time")
|
||||
en.WriteTime(time.Now())
|
||||
|
||||
en.WriteString("what?")
|
||||
en.WriteBool(true)
|
||||
|
||||
en.WriteString("ext")
|
||||
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
|
||||
en.Flush()
|
||||
|
||||
bts := buf.Bytes()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := Skip(bts)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
770
vendor/github.com/tinylib/msgp/msgp/read_test.go
generated
vendored
Normal file
770
vendor/github.com/tinylib/msgp/msgp/read_test.go
generated
vendored
Normal file
@ -0,0 +1,770 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestSanity(t *testing.T) {
|
||||
if !isfixint(0) {
|
||||
t.Fatal("WUT.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadIntf(t *testing.T) {
|
||||
// NOTE: if you include cases
|
||||
// with, say, int32s, the test
|
||||
// will fail, b/c integers are
|
||||
// always read out as int64, and
|
||||
// unsigned integers as uint64
|
||||
|
||||
var testCases = []interface{}{
|
||||
float64(128.032),
|
||||
float32(9082.092),
|
||||
int64(-40),
|
||||
uint64(9082981),
|
||||
time.Now(),
|
||||
"hello!",
|
||||
[]byte("hello!"),
|
||||
map[string]interface{}{
|
||||
"thing-1": "thing-1-value",
|
||||
"thing-2": int64(800),
|
||||
"thing-3": []byte("some inner bytes..."),
|
||||
"thing-4": false,
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var v interface{}
|
||||
dec := NewReader(&buf)
|
||||
enc := NewWriter(&buf)
|
||||
|
||||
for i, ts := range testCases {
|
||||
buf.Reset()
|
||||
err := enc.WriteIntf(ts)
|
||||
if err != nil {
|
||||
t.Errorf("Test case %d: %s", i, err)
|
||||
continue
|
||||
}
|
||||
err = enc.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
v, err = dec.ReadIntf()
|
||||
if err != nil {
|
||||
t.Errorf("Test case: %d: %s", i, err)
|
||||
}
|
||||
|
||||
/* for time, use time.Equal instead of reflect.DeepEqual */
|
||||
if tm, ok := v.(time.Time); ok {
|
||||
if !tm.Equal(v.(time.Time)) {
|
||||
t.Errorf("%v != %v", ts, v)
|
||||
}
|
||||
} else if !reflect.DeepEqual(v, ts) {
|
||||
t.Errorf("%v in; %v out", ts, v)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestReadMapHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
Sz uint32
|
||||
}{
|
||||
{0},
|
||||
{1},
|
||||
{tuint16},
|
||||
{tuint32},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var sz uint32
|
||||
var err error
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
for i, test := range tests {
|
||||
buf.Reset()
|
||||
err = wr.WriteMapHeader(test.Sz)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sz, err = rd.ReadMapHeader()
|
||||
if err != nil {
|
||||
t.Errorf("Test case %d: got error %s", i, err)
|
||||
}
|
||||
if sz != test.Sz {
|
||||
t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadMapHeader(b *testing.B) {
|
||||
sizes := []uint32{0, 1, tuint16, tuint32}
|
||||
data := make([]byte, 0, len(sizes)*5)
|
||||
for _, d := range sizes {
|
||||
data = AppendMapHeader(data, d)
|
||||
}
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data) / len(sizes)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
rd.ReadMapHeader()
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadArrayHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
Sz uint32
|
||||
}{
|
||||
{0},
|
||||
{1},
|
||||
{tuint16},
|
||||
{tuint32},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var sz uint32
|
||||
var err error
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
for i, test := range tests {
|
||||
buf.Reset()
|
||||
err = wr.WriteArrayHeader(test.Sz)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sz, err = rd.ReadArrayHeader()
|
||||
if err != nil {
|
||||
t.Errorf("Test case %d: got error %s", i, err)
|
||||
}
|
||||
if sz != test.Sz {
|
||||
t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadArrayHeader(b *testing.B) {
|
||||
sizes := []uint32{0, 1, tuint16, tuint32}
|
||||
data := make([]byte, 0, len(sizes)*5)
|
||||
for _, d := range sizes {
|
||||
data = AppendArrayHeader(data, d)
|
||||
}
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data) / len(sizes)))
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
rd.ReadArrayHeader()
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadNil(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
wr.WriteNil()
|
||||
wr.Flush()
|
||||
err := rd.ReadNil()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadNil(b *testing.B) {
|
||||
data := AppendNil(nil)
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := rd.ReadNil()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFloat64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
buf.Reset()
|
||||
|
||||
flt := (rand.Float64() - 0.5) * math.MaxFloat64
|
||||
err := wr.WriteFloat64(flt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out, err := rd.ReadFloat64()
|
||||
if err != nil {
|
||||
t.Errorf("Error reading %f: %s", flt, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if out != flt {
|
||||
t.Errorf("Put in %f but got out %f", flt, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadFloat64(b *testing.B) {
|
||||
fs := []float64{rand.Float64(), rand.Float64(), rand.Float64(), rand.Float64()}
|
||||
data := make([]byte, 0, 9*len(fs))
|
||||
for _, f := range fs {
|
||||
data = AppendFloat64(data, f)
|
||||
}
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadFloat64()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFloat32(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
buf.Reset()
|
||||
|
||||
flt := (rand.Float32() - 0.5) * math.MaxFloat32
|
||||
err := wr.WriteFloat32(flt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out, err := rd.ReadFloat32()
|
||||
if err != nil {
|
||||
t.Errorf("Error reading %f: %s", flt, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if out != flt {
|
||||
t.Errorf("Put in %f but got out %f", flt, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadFloat32(b *testing.B) {
|
||||
fs := []float32{rand.Float32(), rand.Float32(), rand.Float32(), rand.Float32()}
|
||||
data := make([]byte, 0, 5*len(fs))
|
||||
for _, f := range fs {
|
||||
data = AppendFloat32(data, f)
|
||||
}
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(5)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadFloat32()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadInt64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
ints := []int64{-100000, -5000, -5, 0, 8, 240, int64(tuint16), int64(tuint32), int64(tuint64)}
|
||||
|
||||
for i, num := range ints {
|
||||
buf.Reset()
|
||||
|
||||
err := wr.WriteInt64(num)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out, err := rd.ReadInt64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if out != num {
|
||||
t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadInt64(b *testing.B) {
|
||||
is := []int64{0, 1, 65000, rand.Int63()}
|
||||
data := make([]byte, 0, 9*len(is))
|
||||
for _, n := range is {
|
||||
data = AppendInt64(data, n)
|
||||
}
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data) / len(is)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadInt64()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadUint64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
ints := []uint64{0, 8, 240, uint64(tuint16), uint64(tuint32), uint64(tuint64)}
|
||||
|
||||
for i, num := range ints {
|
||||
buf.Reset()
|
||||
|
||||
err := wr.WriteUint64(num)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out, err := rd.ReadUint64()
|
||||
if out != num {
|
||||
t.Errorf("Test case %d: put %d in and got %d out", i, num, out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadUint64(b *testing.B) {
|
||||
us := []uint64{0, 1, 10000, uint64(rand.Uint32() * 4)}
|
||||
data := make([]byte, 0, 9*len(us))
|
||||
for _, n := range us {
|
||||
data = AppendUint64(data, n)
|
||||
}
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data) / len(us)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadUint64()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
sizes := []int{0, 1, 225, int(tuint32)}
|
||||
var scratch []byte
|
||||
for i, size := range sizes {
|
||||
buf.Reset()
|
||||
bts := RandBytes(size)
|
||||
|
||||
err := wr.WriteBytes(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := rd.ReadBytes(scratch)
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !bytes.Equal(bts, out) {
|
||||
t.Errorf("test case %d: Bytes not equal.", i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func benchBytes(size uint32, b *testing.B) {
|
||||
data := make([]byte, 0, size+5)
|
||||
data = AppendBytes(data, RandBytes(int(size)))
|
||||
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
var scratch []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
scratch, err = rd.ReadBytes(scratch)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRead16Bytes(b *testing.B) {
|
||||
benchBytes(16, b)
|
||||
}
|
||||
|
||||
func BenchmarkRead256Bytes(b *testing.B) {
|
||||
benchBytes(256, b)
|
||||
}
|
||||
|
||||
// This particular case creates
|
||||
// an object larger than the default
|
||||
// read buffer size, so it's a decent
|
||||
// indicator of worst-case performance.
|
||||
func BenchmarkRead2048Bytes(b *testing.B) {
|
||||
benchBytes(2048, b)
|
||||
}
|
||||
|
||||
func TestReadString(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
sizes := []int{0, 1, 225, int(math.MaxUint16 + 5)}
|
||||
for i, size := range sizes {
|
||||
buf.Reset()
|
||||
in := string(RandBytes(size))
|
||||
|
||||
err := wr.WriteString(in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := rd.ReadString()
|
||||
if err != nil {
|
||||
t.Errorf("test case %d: %s", i, err)
|
||||
}
|
||||
if out != in {
|
||||
t.Errorf("test case %d: strings not equal.", i)
|
||||
t.Errorf("string (len = %d) in; string (len = %d) out", size, len(out))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func benchString(size uint32, b *testing.B) {
|
||||
str := string(RandBytes(int(size)))
|
||||
data := make([]byte, 0, len(str)+5)
|
||||
data = AppendString(data, str)
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadString()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchStringAsBytes(size uint32, b *testing.B) {
|
||||
str := string(RandBytes(int(size)))
|
||||
data := make([]byte, 0, len(str)+5)
|
||||
data = AppendString(data, str)
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
var scratch []byte
|
||||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
scratch, err = rd.ReadStringAsBytes(scratch)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRead16StringAsBytes(b *testing.B) {
|
||||
benchStringAsBytes(16, b)
|
||||
}
|
||||
|
||||
func BenchmarkRead256StringAsBytes(b *testing.B) {
|
||||
benchStringAsBytes(256, b)
|
||||
}
|
||||
|
||||
func BenchmarkRead16String(b *testing.B) {
|
||||
benchString(16, b)
|
||||
}
|
||||
|
||||
func BenchmarkRead256String(b *testing.B) {
|
||||
benchString(256, b)
|
||||
}
|
||||
|
||||
func TestReadComplex64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
buf.Reset()
|
||||
f := complex(rand.Float32()*math.MaxFloat32, rand.Float32()*math.MaxFloat32)
|
||||
|
||||
wr.WriteComplex64(f)
|
||||
err := wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := rd.ReadComplex64()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if out != f {
|
||||
t.Errorf("Wrote %f; read %f", f, out)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadComplex64(b *testing.B) {
|
||||
f := complex(rand.Float32(), rand.Float32())
|
||||
data := AppendComplex64(nil, f)
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadComplex64()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadComplex128(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
buf.Reset()
|
||||
f := complex(rand.Float64()*math.MaxFloat64, rand.Float64()*math.MaxFloat64)
|
||||
|
||||
wr.WriteComplex128(f)
|
||||
err := wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := rd.ReadComplex128()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if out != f {
|
||||
t.Errorf("Wrote %f; read %f", f, out)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadComplex128(b *testing.B) {
|
||||
f := complex(rand.Float64(), rand.Float64())
|
||||
data := AppendComplex128(nil, f)
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadComplex128()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTime(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
now := time.Now()
|
||||
en := NewWriter(&buf)
|
||||
dc := NewReader(&buf)
|
||||
|
||||
err := en.WriteTime(now)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = en.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := dc.ReadTime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// check for equivalence
|
||||
if !now.Equal(out) {
|
||||
t.Fatalf("%s in; %s out", now, out)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkReadTime(b *testing.B) {
|
||||
t := time.Now()
|
||||
data := AppendTime(nil, t)
|
||||
rd := NewReader(NewEndlessReader(data, b))
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := rd.ReadTime()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSkip(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
rd := NewReader(&buf)
|
||||
|
||||
wr.WriteMapHeader(4)
|
||||
wr.WriteString("key_1")
|
||||
wr.WriteBytes([]byte("value_1"))
|
||||
wr.WriteString("key_2")
|
||||
wr.WriteFloat64(2.0)
|
||||
wr.WriteString("key_3")
|
||||
wr.WriteComplex128(3.0i)
|
||||
wr.WriteString("key_4")
|
||||
wr.WriteInt64(49080432189)
|
||||
wr.Flush()
|
||||
|
||||
// this should skip the whole map
|
||||
err := rd.Skip()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tp, err := rd.NextType()
|
||||
if err != io.EOF {
|
||||
t.Errorf("expected %q; got %q", io.EOF, err)
|
||||
t.Errorf("returned type %q", tp)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkSkip(b *testing.B) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
en.WriteMapHeader(6)
|
||||
|
||||
en.WriteString("thing_one")
|
||||
en.WriteString("value_one")
|
||||
|
||||
en.WriteString("thing_two")
|
||||
en.WriteFloat64(3.14159)
|
||||
|
||||
en.WriteString("some_bytes")
|
||||
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
|
||||
|
||||
en.WriteString("the_time")
|
||||
en.WriteTime(time.Now())
|
||||
|
||||
en.WriteString("what?")
|
||||
en.WriteBool(true)
|
||||
|
||||
en.WriteString("ext")
|
||||
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
|
||||
en.Flush()
|
||||
|
||||
bts := buf.Bytes()
|
||||
b.SetBytes(int64(len(bts)))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
|
||||
rd := NewReader(NewEndlessReader(bts, b))
|
||||
for i := 0; i < b.N; i++ {
|
||||
err := rd.Skip()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopyNext(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
en.WriteMapHeader(6)
|
||||
|
||||
en.WriteString("thing_one")
|
||||
en.WriteString("value_one")
|
||||
|
||||
en.WriteString("thing_two")
|
||||
en.WriteFloat64(3.14159)
|
||||
|
||||
en.WriteString("some_bytes")
|
||||
en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd"))
|
||||
|
||||
en.WriteString("the_time")
|
||||
en.WriteTime(time.Now())
|
||||
|
||||
en.WriteString("what?")
|
||||
en.WriteBool(true)
|
||||
|
||||
en.WriteString("ext")
|
||||
en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")})
|
||||
|
||||
en.Flush()
|
||||
|
||||
// Read from a copy of the original buf.
|
||||
de := NewReader(bytes.NewReader(buf.Bytes()))
|
||||
|
||||
w := new(bytes.Buffer)
|
||||
|
||||
n, err := de.CopyNext(w)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n != int64(buf.Len()) {
|
||||
t.Fatalf("CopyNext returned the wrong value (%d != %d)",
|
||||
n, buf.Len())
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), w.Bytes()) {
|
||||
t.Fatalf("not equal! %v, %v", buf.Bytes(), w.Bytes())
|
||||
}
|
||||
}
|
38
vendor/github.com/tinylib/msgp/msgp/size.go
generated
vendored
Normal file
38
vendor/github.com/tinylib/msgp/msgp/size.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package msgp
|
||||
|
||||
// The sizes provided
|
||||
// are the worst-case
|
||||
// encoded sizes for
|
||||
// each type. For variable-
|
||||
// length types ([]byte, string),
|
||||
// the total encoded size is
|
||||
// the prefix size plus the
|
||||
// length of the object.
|
||||
const (
|
||||
Int64Size = 9
|
||||
IntSize = Int64Size
|
||||
UintSize = Int64Size
|
||||
Int8Size = 2
|
||||
Int16Size = 3
|
||||
Int32Size = 5
|
||||
Uint8Size = 2
|
||||
ByteSize = Uint8Size
|
||||
Uint16Size = 3
|
||||
Uint32Size = 5
|
||||
Uint64Size = Int64Size
|
||||
Float64Size = 9
|
||||
Float32Size = 5
|
||||
Complex64Size = 10
|
||||
Complex128Size = 18
|
||||
|
||||
TimeSize = 15
|
||||
BoolSize = 1
|
||||
NilSize = 1
|
||||
|
||||
MapHeaderSize = 5
|
||||
ArrayHeaderSize = 5
|
||||
|
||||
BytesPrefixSize = 5
|
||||
StringPrefixSize = 5
|
||||
ExtensionPrefixSize = 6
|
||||
)
|
40
vendor/github.com/tinylib/msgp/msgp/unsafe.go
generated
vendored
Normal file
40
vendor/github.com/tinylib/msgp/msgp/unsafe.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
// +build !appengine
|
||||
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// NOTE:
|
||||
// all of the definition in this file
|
||||
// should be repeated in appengine.go,
|
||||
// but without using unsafe
|
||||
|
||||
const (
|
||||
// spec says int and uint are always
|
||||
// the same size, but that int/uint
|
||||
// size may not be machine word size
|
||||
smallint = unsafe.Sizeof(int(0)) == 4
|
||||
)
|
||||
|
||||
// UnsafeString returns the byte slice as a volatile string
|
||||
// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
|
||||
// THIS IS EVIL CODE.
|
||||
// YOU HAVE BEEN WARNED.
|
||||
func UnsafeString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)}))
|
||||
}
|
||||
|
||||
// UnsafeBytes returns the string as a byte slice
|
||||
// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR.
|
||||
// THIS IS EVIL CODE.
|
||||
// YOU HAVE BEEN WARNED.
|
||||
func UnsafeBytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
|
||||
Len: len(s),
|
||||
Cap: len(s),
|
||||
Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data,
|
||||
}))
|
||||
}
|
845
vendor/github.com/tinylib/msgp/msgp/write.go
generated
vendored
Normal file
845
vendor/github.com/tinylib/msgp/msgp/write.go
generated
vendored
Normal file
@ -0,0 +1,845 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Sizer is an interface implemented
|
||||
// by types that can estimate their
|
||||
// size when MessagePack encoded.
|
||||
// This interface is optional, but
|
||||
// encoding/marshaling implementations
|
||||
// may use this as a way to pre-allocate
|
||||
// memory for serialization.
|
||||
type Sizer interface {
|
||||
Msgsize() int
|
||||
}
|
||||
|
||||
var (
|
||||
// Nowhere is an io.Writer to nowhere
|
||||
Nowhere io.Writer = nwhere{}
|
||||
|
||||
btsType = reflect.TypeOf(([]byte)(nil))
|
||||
writerPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &Writer{buf: make([]byte, 2048)}
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func popWriter(w io.Writer) *Writer {
|
||||
wr := writerPool.Get().(*Writer)
|
||||
wr.Reset(w)
|
||||
return wr
|
||||
}
|
||||
|
||||
func pushWriter(wr *Writer) {
|
||||
wr.w = nil
|
||||
wr.wloc = 0
|
||||
writerPool.Put(wr)
|
||||
}
|
||||
|
||||
// freeW frees a writer for use
|
||||
// by other processes. It is not necessary
|
||||
// to call freeW on a writer. However, maintaining
|
||||
// a reference to a *Writer after calling freeW on
|
||||
// it will cause undefined behavior.
|
||||
func freeW(w *Writer) { pushWriter(w) }
|
||||
|
||||
// Require ensures that cap(old)-len(old) >= extra.
|
||||
func Require(old []byte, extra int) []byte {
|
||||
l := len(old)
|
||||
c := cap(old)
|
||||
r := l + extra
|
||||
if c >= r {
|
||||
return old
|
||||
} else if l == 0 {
|
||||
return make([]byte, 0, extra)
|
||||
}
|
||||
// the new size is the greater
|
||||
// of double the old capacity
|
||||
// and the sum of the old length
|
||||
// and the number of new bytes
|
||||
// necessary.
|
||||
c <<= 1
|
||||
if c < r {
|
||||
c = r
|
||||
}
|
||||
n := make([]byte, l, c)
|
||||
copy(n, old)
|
||||
return n
|
||||
}
|
||||
|
||||
// nowhere writer
|
||||
type nwhere struct{}
|
||||
|
||||
func (n nwhere) Write(p []byte) (int, error) { return len(p), nil }
|
||||
|
||||
// Marshaler is the interface implemented
|
||||
// by types that know how to marshal themselves
|
||||
// as MessagePack. MarshalMsg appends the marshalled
|
||||
// form of the object to the provided
|
||||
// byte slice, returning the extended
|
||||
// slice and any errors encountered.
|
||||
type Marshaler interface {
|
||||
MarshalMsg([]byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// Encodable is the interface implemented
|
||||
// by types that know how to write themselves
|
||||
// as MessagePack using a *msgp.Writer.
|
||||
type Encodable interface {
|
||||
EncodeMsg(*Writer) error
|
||||
}
|
||||
|
||||
// Writer is a buffered writer
|
||||
// that can be used to write
|
||||
// MessagePack objects to an io.Writer.
|
||||
// You must call *Writer.Flush() in order
|
||||
// to flush all of the buffered data
|
||||
// to the underlying writer.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
buf []byte
|
||||
wloc int
|
||||
}
|
||||
|
||||
// NewWriter returns a new *Writer.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
if wr, ok := w.(*Writer); ok {
|
||||
return wr
|
||||
}
|
||||
return popWriter(w)
|
||||
}
|
||||
|
||||
// NewWriterSize returns a writer with a custom buffer size.
|
||||
func NewWriterSize(w io.Writer, sz int) *Writer {
|
||||
// we must be able to require() 18
|
||||
// contiguous bytes, so that is the
|
||||
// practical minimum buffer size
|
||||
if sz < 18 {
|
||||
sz = 18
|
||||
}
|
||||
|
||||
return &Writer{
|
||||
w: w,
|
||||
buf: make([]byte, sz),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode encodes an Encodable to an io.Writer.
|
||||
func Encode(w io.Writer, e Encodable) error {
|
||||
wr := NewWriter(w)
|
||||
err := e.EncodeMsg(wr)
|
||||
if err == nil {
|
||||
err = wr.Flush()
|
||||
}
|
||||
freeW(wr)
|
||||
return err
|
||||
}
|
||||
|
||||
func (mw *Writer) flush() error {
|
||||
if mw.wloc == 0 {
|
||||
return nil
|
||||
}
|
||||
n, err := mw.w.Write(mw.buf[:mw.wloc])
|
||||
if err != nil {
|
||||
if n > 0 {
|
||||
mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc])
|
||||
}
|
||||
return err
|
||||
}
|
||||
mw.wloc = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush flushes all of the buffered
|
||||
// data to the underlying writer.
|
||||
func (mw *Writer) Flush() error { return mw.flush() }
|
||||
|
||||
// Buffered returns the number bytes in the write buffer
|
||||
func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc }
|
||||
|
||||
func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc }
|
||||
|
||||
func (mw *Writer) bufsize() int { return len(mw.buf) }
|
||||
|
||||
// NOTE: this should only be called with
|
||||
// a number that is guaranteed to be less than
|
||||
// len(mw.buf). typically, it is called with a constant.
|
||||
//
|
||||
// NOTE: this is a hot code path
|
||||
func (mw *Writer) require(n int) (int, error) {
|
||||
c := len(mw.buf)
|
||||
wl := mw.wloc
|
||||
if c-wl < n {
|
||||
if err := mw.flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
wl = mw.wloc
|
||||
}
|
||||
mw.wloc += n
|
||||
return wl, nil
|
||||
}
|
||||
|
||||
func (mw *Writer) Append(b ...byte) error {
|
||||
if mw.avail() < len(b) {
|
||||
err := mw.flush()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mw.wloc += copy(mw.buf[mw.wloc:], b)
|
||||
return nil
|
||||
}
|
||||
|
||||
// push one byte onto the buffer
|
||||
//
|
||||
// NOTE: this is a hot code path
|
||||
func (mw *Writer) push(b byte) error {
|
||||
if mw.wloc == len(mw.buf) {
|
||||
if err := mw.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mw.buf[mw.wloc] = b
|
||||
mw.wloc++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mw *Writer) prefix8(b byte, u uint8) error {
|
||||
const need = 2
|
||||
if len(mw.buf)-mw.wloc < need {
|
||||
if err := mw.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
prefixu8(mw.buf[mw.wloc:], b, u)
|
||||
mw.wloc += need
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mw *Writer) prefix16(b byte, u uint16) error {
|
||||
const need = 3
|
||||
if len(mw.buf)-mw.wloc < need {
|
||||
if err := mw.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
prefixu16(mw.buf[mw.wloc:], b, u)
|
||||
mw.wloc += need
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mw *Writer) prefix32(b byte, u uint32) error {
|
||||
const need = 5
|
||||
if len(mw.buf)-mw.wloc < need {
|
||||
if err := mw.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
prefixu32(mw.buf[mw.wloc:], b, u)
|
||||
mw.wloc += need
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mw *Writer) prefix64(b byte, u uint64) error {
|
||||
const need = 9
|
||||
if len(mw.buf)-mw.wloc < need {
|
||||
if err := mw.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
prefixu64(mw.buf[mw.wloc:], b, u)
|
||||
mw.wloc += need
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write implements io.Writer, and writes
|
||||
// data directly to the buffer.
|
||||
func (mw *Writer) Write(p []byte) (int, error) {
|
||||
l := len(p)
|
||||
if mw.avail() < l {
|
||||
if err := mw.flush(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if l > len(mw.buf) {
|
||||
return mw.w.Write(p)
|
||||
}
|
||||
}
|
||||
mw.wloc += copy(mw.buf[mw.wloc:], p)
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// implements io.WriteString
|
||||
func (mw *Writer) writeString(s string) error {
|
||||
l := len(s)
|
||||
if mw.avail() < l {
|
||||
if err := mw.flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if l > len(mw.buf) {
|
||||
_, err := io.WriteString(mw.w, s)
|
||||
return err
|
||||
}
|
||||
}
|
||||
mw.wloc += copy(mw.buf[mw.wloc:], s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reset changes the underlying writer used by the Writer
|
||||
func (mw *Writer) Reset(w io.Writer) {
|
||||
mw.buf = mw.buf[:cap(mw.buf)]
|
||||
mw.w = w
|
||||
mw.wloc = 0
|
||||
}
|
||||
|
||||
// WriteMapHeader writes a map header of the given
|
||||
// size to the writer
|
||||
func (mw *Writer) WriteMapHeader(sz uint32) error {
|
||||
switch {
|
||||
case sz <= 15:
|
||||
return mw.push(wfixmap(uint8(sz)))
|
||||
case sz <= math.MaxUint16:
|
||||
return mw.prefix16(mmap16, uint16(sz))
|
||||
default:
|
||||
return mw.prefix32(mmap32, sz)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteArrayHeader writes an array header of the
|
||||
// given size to the writer
|
||||
func (mw *Writer) WriteArrayHeader(sz uint32) error {
|
||||
switch {
|
||||
case sz <= 15:
|
||||
return mw.push(wfixarray(uint8(sz)))
|
||||
case sz <= math.MaxUint16:
|
||||
return mw.prefix16(marray16, uint16(sz))
|
||||
default:
|
||||
return mw.prefix32(marray32, sz)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteNil writes a nil byte to the buffer
|
||||
func (mw *Writer) WriteNil() error {
|
||||
return mw.push(mnil)
|
||||
}
|
||||
|
||||
// WriteFloat64 writes a float64 to the writer
|
||||
func (mw *Writer) WriteFloat64(f float64) error {
|
||||
return mw.prefix64(mfloat64, math.Float64bits(f))
|
||||
}
|
||||
|
||||
// WriteFloat32 writes a float32 to the writer
|
||||
func (mw *Writer) WriteFloat32(f float32) error {
|
||||
return mw.prefix32(mfloat32, math.Float32bits(f))
|
||||
}
|
||||
|
||||
// WriteInt64 writes an int64 to the writer
|
||||
func (mw *Writer) WriteInt64(i int64) error {
|
||||
if i >= 0 {
|
||||
switch {
|
||||
case i <= math.MaxInt8:
|
||||
return mw.push(wfixint(uint8(i)))
|
||||
case i <= math.MaxInt16:
|
||||
return mw.prefix16(mint16, uint16(i))
|
||||
case i <= math.MaxInt32:
|
||||
return mw.prefix32(mint32, uint32(i))
|
||||
default:
|
||||
return mw.prefix64(mint64, uint64(i))
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case i >= -32:
|
||||
return mw.push(wnfixint(int8(i)))
|
||||
case i >= math.MinInt8:
|
||||
return mw.prefix8(mint8, uint8(i))
|
||||
case i >= math.MinInt16:
|
||||
return mw.prefix16(mint16, uint16(i))
|
||||
case i >= math.MinInt32:
|
||||
return mw.prefix32(mint32, uint32(i))
|
||||
default:
|
||||
return mw.prefix64(mint64, uint64(i))
|
||||
}
|
||||
}
|
||||
|
||||
// WriteInt8 writes an int8 to the writer
|
||||
func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) }
|
||||
|
||||
// WriteInt16 writes an int16 to the writer
|
||||
func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) }
|
||||
|
||||
// WriteInt32 writes an int32 to the writer
|
||||
func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) }
|
||||
|
||||
// WriteInt writes an int to the writer
|
||||
func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) }
|
||||
|
||||
// WriteUint64 writes a uint64 to the writer
|
||||
func (mw *Writer) WriteUint64(u uint64) error {
|
||||
switch {
|
||||
case u <= (1<<7)-1:
|
||||
return mw.push(wfixint(uint8(u)))
|
||||
case u <= math.MaxUint8:
|
||||
return mw.prefix8(muint8, uint8(u))
|
||||
case u <= math.MaxUint16:
|
||||
return mw.prefix16(muint16, uint16(u))
|
||||
case u <= math.MaxUint32:
|
||||
return mw.prefix32(muint32, uint32(u))
|
||||
default:
|
||||
return mw.prefix64(muint64, u)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteByte is analogous to WriteUint8
|
||||
func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) }
|
||||
|
||||
// WriteUint8 writes a uint8 to the writer
|
||||
func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) }
|
||||
|
||||
// WriteUint16 writes a uint16 to the writer
|
||||
func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) }
|
||||
|
||||
// WriteUint32 writes a uint32 to the writer
|
||||
func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) }
|
||||
|
||||
// WriteUint writes a uint to the writer
|
||||
func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) }
|
||||
|
||||
// WriteBytes writes binary as 'bin' to the writer
|
||||
func (mw *Writer) WriteBytes(b []byte) error {
|
||||
sz := uint32(len(b))
|
||||
var err error
|
||||
switch {
|
||||
case sz <= math.MaxUint8:
|
||||
err = mw.prefix8(mbin8, uint8(sz))
|
||||
case sz <= math.MaxUint16:
|
||||
err = mw.prefix16(mbin16, uint16(sz))
|
||||
default:
|
||||
err = mw.prefix32(mbin32, sz)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = mw.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteBytesHeader writes just the size header
|
||||
// of a MessagePack 'bin' object. The user is responsible
|
||||
// for then writing 'sz' more bytes into the stream.
|
||||
func (mw *Writer) WriteBytesHeader(sz uint32) error {
|
||||
switch {
|
||||
case sz <= math.MaxUint8:
|
||||
return mw.prefix8(mbin8, uint8(sz))
|
||||
case sz <= math.MaxUint16:
|
||||
return mw.prefix16(mbin16, uint16(sz))
|
||||
default:
|
||||
return mw.prefix32(mbin32, sz)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteBool writes a bool to the writer
|
||||
func (mw *Writer) WriteBool(b bool) error {
|
||||
if b {
|
||||
return mw.push(mtrue)
|
||||
}
|
||||
return mw.push(mfalse)
|
||||
}
|
||||
|
||||
// WriteString writes a messagepack string to the writer.
|
||||
// (This is NOT an implementation of io.StringWriter)
|
||||
func (mw *Writer) WriteString(s string) error {
|
||||
sz := uint32(len(s))
|
||||
var err error
|
||||
switch {
|
||||
case sz <= 31:
|
||||
err = mw.push(wfixstr(uint8(sz)))
|
||||
case sz <= math.MaxUint8:
|
||||
err = mw.prefix8(mstr8, uint8(sz))
|
||||
case sz <= math.MaxUint16:
|
||||
err = mw.prefix16(mstr16, uint16(sz))
|
||||
default:
|
||||
err = mw.prefix32(mstr32, sz)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return mw.writeString(s)
|
||||
}
|
||||
|
||||
// WriteStringHeader writes just the string size
|
||||
// header of a MessagePack 'str' object. The user
|
||||
// is responsible for writing 'sz' more valid UTF-8
|
||||
// bytes to the stream.
|
||||
func (mw *Writer) WriteStringHeader(sz uint32) error {
|
||||
switch {
|
||||
case sz <= 31:
|
||||
return mw.push(wfixstr(uint8(sz)))
|
||||
case sz <= math.MaxUint8:
|
||||
return mw.prefix8(mstr8, uint8(sz))
|
||||
case sz <= math.MaxUint16:
|
||||
return mw.prefix16(mstr16, uint16(sz))
|
||||
default:
|
||||
return mw.prefix32(mstr32, sz)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteStringFromBytes writes a 'str' object
|
||||
// from a []byte.
|
||||
func (mw *Writer) WriteStringFromBytes(str []byte) error {
|
||||
sz := uint32(len(str))
|
||||
var err error
|
||||
switch {
|
||||
case sz <= 31:
|
||||
err = mw.push(wfixstr(uint8(sz)))
|
||||
case sz <= math.MaxUint8:
|
||||
err = mw.prefix8(mstr8, uint8(sz))
|
||||
case sz <= math.MaxUint16:
|
||||
err = mw.prefix16(mstr16, uint16(sz))
|
||||
default:
|
||||
err = mw.prefix32(mstr32, sz)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = mw.Write(str)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteComplex64 writes a complex64 to the writer
|
||||
func (mw *Writer) WriteComplex64(f complex64) error {
|
||||
o, err := mw.require(10)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext8
|
||||
mw.buf[o+1] = Complex64Extension
|
||||
big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f)))
|
||||
big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteComplex128 writes a complex128 to the writer
|
||||
func (mw *Writer) WriteComplex128(f complex128) error {
|
||||
o, err := mw.require(18)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mfixext16
|
||||
mw.buf[o+1] = Complex128Extension
|
||||
big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f)))
|
||||
big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMapStrStr writes a map[string]string to the writer
|
||||
func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) {
|
||||
err = mw.WriteMapHeader(uint32(len(mp)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for key, val := range mp {
|
||||
err = mw.WriteString(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = mw.WriteString(val)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMapStrIntf writes a map[string]interface to the writer
|
||||
func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) {
|
||||
err = mw.WriteMapHeader(uint32(len(mp)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for key, val := range mp {
|
||||
err = mw.WriteString(key)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = mw.WriteIntf(val)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteTime writes a time.Time object to the wire.
|
||||
//
|
||||
// Time is encoded as Unix time, which means that
|
||||
// location (time zone) data is removed from the object.
|
||||
// The encoded object itself is 12 bytes: 8 bytes for
|
||||
// a big-endian 64-bit integer denoting seconds
|
||||
// elapsed since "zero" Unix time, followed by 4 bytes
|
||||
// for a big-endian 32-bit signed integer denoting
|
||||
// the nanosecond offset of the time. This encoding
|
||||
// is intended to ease portability across languages.
|
||||
// (Note that this is *not* the standard time.Time
|
||||
// binary encoding, because its implementation relies
|
||||
// heavily on the internal representation used by the
|
||||
// time package.)
|
||||
func (mw *Writer) WriteTime(t time.Time) error {
|
||||
t = t.UTC()
|
||||
o, err := mw.require(15)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mw.buf[o] = mext8
|
||||
mw.buf[o+1] = 12
|
||||
mw.buf[o+2] = TimeExtension
|
||||
putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond()))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteIntf writes the concrete type of 'v'.
|
||||
// WriteIntf will error if 'v' is not one of the following:
|
||||
// - A bool, float, string, []byte, int, uint, or complex
|
||||
// - A map of supported types (with string keys)
|
||||
// - An array or slice of supported types
|
||||
// - A pointer to a supported type
|
||||
// - A type that satisfies the msgp.Encodable interface
|
||||
// - A type that satisfies the msgp.Extension interface
|
||||
func (mw *Writer) WriteIntf(v interface{}) error {
|
||||
if v == nil {
|
||||
return mw.WriteNil()
|
||||
}
|
||||
switch v := v.(type) {
|
||||
|
||||
// preferred interfaces
|
||||
|
||||
case Encodable:
|
||||
return v.EncodeMsg(mw)
|
||||
case Extension:
|
||||
return mw.WriteExtension(v)
|
||||
|
||||
// concrete types
|
||||
|
||||
case bool:
|
||||
return mw.WriteBool(v)
|
||||
case float32:
|
||||
return mw.WriteFloat32(v)
|
||||
case float64:
|
||||
return mw.WriteFloat64(v)
|
||||
case complex64:
|
||||
return mw.WriteComplex64(v)
|
||||
case complex128:
|
||||
return mw.WriteComplex128(v)
|
||||
case uint8:
|
||||
return mw.WriteUint8(v)
|
||||
case uint16:
|
||||
return mw.WriteUint16(v)
|
||||
case uint32:
|
||||
return mw.WriteUint32(v)
|
||||
case uint64:
|
||||
return mw.WriteUint64(v)
|
||||
case uint:
|
||||
return mw.WriteUint(v)
|
||||
case int8:
|
||||
return mw.WriteInt8(v)
|
||||
case int16:
|
||||
return mw.WriteInt16(v)
|
||||
case int32:
|
||||
return mw.WriteInt32(v)
|
||||
case int64:
|
||||
return mw.WriteInt64(v)
|
||||
case int:
|
||||
return mw.WriteInt(v)
|
||||
case string:
|
||||
return mw.WriteString(v)
|
||||
case []byte:
|
||||
return mw.WriteBytes(v)
|
||||
case map[string]string:
|
||||
return mw.WriteMapStrStr(v)
|
||||
case map[string]interface{}:
|
||||
return mw.WriteMapStrIntf(v)
|
||||
case time.Time:
|
||||
return mw.WriteTime(v)
|
||||
}
|
||||
|
||||
val := reflect.ValueOf(v)
|
||||
if !isSupported(val.Kind()) || !val.IsValid() {
|
||||
return fmt.Errorf("msgp: type %s not supported", val)
|
||||
}
|
||||
|
||||
switch val.Kind() {
|
||||
case reflect.Ptr:
|
||||
if val.IsNil() {
|
||||
return mw.WriteNil()
|
||||
}
|
||||
return mw.WriteIntf(val.Elem().Interface())
|
||||
case reflect.Slice:
|
||||
return mw.writeSlice(val)
|
||||
case reflect.Map:
|
||||
return mw.writeMap(val)
|
||||
}
|
||||
return &ErrUnsupportedType{val.Type()}
|
||||
}
|
||||
|
||||
func (mw *Writer) writeMap(v reflect.Value) (err error) {
|
||||
if v.Type().Key().Kind() != reflect.String {
|
||||
return errors.New("msgp: map keys must be strings")
|
||||
}
|
||||
ks := v.MapKeys()
|
||||
err = mw.WriteMapHeader(uint32(len(ks)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, key := range ks {
|
||||
val := v.MapIndex(key)
|
||||
err = mw.WriteString(key.String())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = mw.WriteIntf(val.Interface())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (mw *Writer) writeSlice(v reflect.Value) (err error) {
|
||||
// is []byte
|
||||
if v.Type().ConvertibleTo(btsType) {
|
||||
return mw.WriteBytes(v.Bytes())
|
||||
}
|
||||
|
||||
sz := uint32(v.Len())
|
||||
err = mw.WriteArrayHeader(sz)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
err = mw.WriteIntf(v.Index(int(i)).Interface())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (mw *Writer) writeStruct(v reflect.Value) error {
|
||||
if enc, ok := v.Interface().(Encodable); ok {
|
||||
return enc.EncodeMsg(mw)
|
||||
}
|
||||
return fmt.Errorf("msgp: unsupported type: %s", v.Type())
|
||||
}
|
||||
|
||||
func (mw *Writer) writeVal(v reflect.Value) error {
|
||||
if !isSupported(v.Kind()) {
|
||||
return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type())
|
||||
}
|
||||
|
||||
// shortcut for nil values
|
||||
if v.IsNil() {
|
||||
return mw.WriteNil()
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return mw.WriteBool(v.Bool())
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return mw.WriteFloat64(v.Float())
|
||||
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return mw.WriteComplex128(v.Complex())
|
||||
|
||||
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
|
||||
return mw.WriteInt64(v.Int())
|
||||
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
mw.WriteNil()
|
||||
}
|
||||
return mw.writeVal(v.Elem())
|
||||
|
||||
case reflect.Map:
|
||||
return mw.writeMap(v)
|
||||
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8:
|
||||
return mw.WriteUint64(v.Uint())
|
||||
|
||||
case reflect.String:
|
||||
return mw.WriteString(v.String())
|
||||
|
||||
case reflect.Slice, reflect.Array:
|
||||
return mw.writeSlice(v)
|
||||
|
||||
case reflect.Struct:
|
||||
return mw.writeStruct(v)
|
||||
|
||||
}
|
||||
return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type())
|
||||
}
|
||||
|
||||
// is the reflect.Kind encodable?
|
||||
func isSupported(k reflect.Kind) bool {
|
||||
switch k {
|
||||
case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// GuessSize guesses the size of the underlying
|
||||
// value of 'i'. If the underlying value is not
|
||||
// a simple builtin (or []byte), GuessSize defaults
|
||||
// to 512.
|
||||
func GuessSize(i interface{}) int {
|
||||
if i == nil {
|
||||
return NilSize
|
||||
}
|
||||
|
||||
switch i := i.(type) {
|
||||
case Sizer:
|
||||
return i.Msgsize()
|
||||
case Extension:
|
||||
return ExtensionPrefixSize + i.Len()
|
||||
case float64:
|
||||
return Float64Size
|
||||
case float32:
|
||||
return Float32Size
|
||||
case uint8, uint16, uint32, uint64, uint:
|
||||
return UintSize
|
||||
case int8, int16, int32, int64, int:
|
||||
return IntSize
|
||||
case []byte:
|
||||
return BytesPrefixSize + len(i)
|
||||
case string:
|
||||
return StringPrefixSize + len(i)
|
||||
case complex64:
|
||||
return Complex64Size
|
||||
case complex128:
|
||||
return Complex128Size
|
||||
case bool:
|
||||
return BoolSize
|
||||
case map[string]interface{}:
|
||||
s := MapHeaderSize
|
||||
for key, val := range i {
|
||||
s += StringPrefixSize + len(key) + GuessSize(val)
|
||||
}
|
||||
return s
|
||||
case map[string]string:
|
||||
s := MapHeaderSize
|
||||
for key, val := range i {
|
||||
s += 2*StringPrefixSize + len(key) + len(val)
|
||||
}
|
||||
return s
|
||||
default:
|
||||
return 512
|
||||
}
|
||||
}
|
411
vendor/github.com/tinylib/msgp/msgp/write_bytes.go
generated
vendored
Normal file
411
vendor/github.com/tinylib/msgp/msgp/write_bytes.go
generated
vendored
Normal file
@ -0,0 +1,411 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b)
|
||||
func ensure(b []byte, sz int) ([]byte, int) {
|
||||
l := len(b)
|
||||
c := cap(b)
|
||||
if c-l < sz {
|
||||
o := make([]byte, (2*c)+sz) // exponential growth
|
||||
n := copy(o, b)
|
||||
return o[:n+sz], n
|
||||
}
|
||||
return b[:l+sz], l
|
||||
}
|
||||
|
||||
// AppendMapHeader appends a map header with the
|
||||
// given size to the slice
|
||||
func AppendMapHeader(b []byte, sz uint32) []byte {
|
||||
switch {
|
||||
case sz <= 15:
|
||||
return append(b, wfixmap(uint8(sz)))
|
||||
|
||||
case sz <= math.MaxUint16:
|
||||
o, n := ensure(b, 3)
|
||||
prefixu16(o[n:], mmap16, uint16(sz))
|
||||
return o
|
||||
|
||||
default:
|
||||
o, n := ensure(b, 5)
|
||||
prefixu32(o[n:], mmap32, sz)
|
||||
return o
|
||||
}
|
||||
}
|
||||
|
||||
// AppendArrayHeader appends an array header with
|
||||
// the given size to the slice
|
||||
func AppendArrayHeader(b []byte, sz uint32) []byte {
|
||||
switch {
|
||||
case sz <= 15:
|
||||
return append(b, wfixarray(uint8(sz)))
|
||||
|
||||
case sz <= math.MaxUint16:
|
||||
o, n := ensure(b, 3)
|
||||
prefixu16(o[n:], marray16, uint16(sz))
|
||||
return o
|
||||
|
||||
default:
|
||||
o, n := ensure(b, 5)
|
||||
prefixu32(o[n:], marray32, sz)
|
||||
return o
|
||||
}
|
||||
}
|
||||
|
||||
// AppendNil appends a 'nil' byte to the slice
|
||||
func AppendNil(b []byte) []byte { return append(b, mnil) }
|
||||
|
||||
// AppendFloat64 appends a float64 to the slice
|
||||
func AppendFloat64(b []byte, f float64) []byte {
|
||||
o, n := ensure(b, Float64Size)
|
||||
prefixu64(o[n:], mfloat64, math.Float64bits(f))
|
||||
return o
|
||||
}
|
||||
|
||||
// AppendFloat32 appends a float32 to the slice
|
||||
func AppendFloat32(b []byte, f float32) []byte {
|
||||
o, n := ensure(b, Float32Size)
|
||||
prefixu32(o[n:], mfloat32, math.Float32bits(f))
|
||||
return o
|
||||
}
|
||||
|
||||
// AppendInt64 appends an int64 to the slice
|
||||
func AppendInt64(b []byte, i int64) []byte {
|
||||
if i >= 0 {
|
||||
switch {
|
||||
case i <= math.MaxInt8:
|
||||
return append(b, wfixint(uint8(i)))
|
||||
case i <= math.MaxInt16:
|
||||
o, n := ensure(b, 3)
|
||||
putMint16(o[n:], int16(i))
|
||||
return o
|
||||
case i <= math.MaxInt32:
|
||||
o, n := ensure(b, 5)
|
||||
putMint32(o[n:], int32(i))
|
||||
return o
|
||||
default:
|
||||
o, n := ensure(b, 9)
|
||||
putMint64(o[n:], i)
|
||||
return o
|
||||
}
|
||||
}
|
||||
switch {
|
||||
case i >= -32:
|
||||
return append(b, wnfixint(int8(i)))
|
||||
case i >= math.MinInt8:
|
||||
o, n := ensure(b, 2)
|
||||
putMint8(o[n:], int8(i))
|
||||
return o
|
||||
case i >= math.MinInt16:
|
||||
o, n := ensure(b, 3)
|
||||
putMint16(o[n:], int16(i))
|
||||
return o
|
||||
case i >= math.MinInt32:
|
||||
o, n := ensure(b, 5)
|
||||
putMint32(o[n:], int32(i))
|
||||
return o
|
||||
default:
|
||||
o, n := ensure(b, 9)
|
||||
putMint64(o[n:], i)
|
||||
return o
|
||||
}
|
||||
}
|
||||
|
||||
// AppendInt appends an int to the slice
|
||||
func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) }
|
||||
|
||||
// AppendInt8 appends an int8 to the slice
|
||||
func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) }
|
||||
|
||||
// AppendInt16 appends an int16 to the slice
|
||||
func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) }
|
||||
|
||||
// AppendInt32 appends an int32 to the slice
|
||||
func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) }
|
||||
|
||||
// AppendUint64 appends a uint64 to the slice
|
||||
func AppendUint64(b []byte, u uint64) []byte {
|
||||
switch {
|
||||
case u <= (1<<7)-1:
|
||||
return append(b, wfixint(uint8(u)))
|
||||
|
||||
case u <= math.MaxUint8:
|
||||
o, n := ensure(b, 2)
|
||||
putMuint8(o[n:], uint8(u))
|
||||
return o
|
||||
|
||||
case u <= math.MaxUint16:
|
||||
o, n := ensure(b, 3)
|
||||
putMuint16(o[n:], uint16(u))
|
||||
return o
|
||||
|
||||
case u <= math.MaxUint32:
|
||||
o, n := ensure(b, 5)
|
||||
putMuint32(o[n:], uint32(u))
|
||||
return o
|
||||
|
||||
default:
|
||||
o, n := ensure(b, 9)
|
||||
putMuint64(o[n:], u)
|
||||
return o
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// AppendUint appends a uint to the slice
|
||||
func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) }
|
||||
|
||||
// AppendUint8 appends a uint8 to the slice
|
||||
func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) }
|
||||
|
||||
// AppendByte is analogous to AppendUint8
|
||||
func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) }
|
||||
|
||||
// AppendUint16 appends a uint16 to the slice
|
||||
func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) }
|
||||
|
||||
// AppendUint32 appends a uint32 to the slice
|
||||
func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) }
|
||||
|
||||
// AppendBytes appends bytes to the slice as MessagePack 'bin' data
|
||||
func AppendBytes(b []byte, bts []byte) []byte {
|
||||
sz := len(bts)
|
||||
var o []byte
|
||||
var n int
|
||||
switch {
|
||||
case sz <= math.MaxUint8:
|
||||
o, n = ensure(b, 2+sz)
|
||||
prefixu8(o[n:], mbin8, uint8(sz))
|
||||
n += 2
|
||||
case sz <= math.MaxUint16:
|
||||
o, n = ensure(b, 3+sz)
|
||||
prefixu16(o[n:], mbin16, uint16(sz))
|
||||
n += 3
|
||||
default:
|
||||
o, n = ensure(b, 5+sz)
|
||||
prefixu32(o[n:], mbin32, uint32(sz))
|
||||
n += 5
|
||||
}
|
||||
return o[:n+copy(o[n:], bts)]
|
||||
}
|
||||
|
||||
// AppendBool appends a bool to the slice
|
||||
func AppendBool(b []byte, t bool) []byte {
|
||||
if t {
|
||||
return append(b, mtrue)
|
||||
}
|
||||
return append(b, mfalse)
|
||||
}
|
||||
|
||||
// AppendString appends a string as a MessagePack 'str' to the slice
|
||||
func AppendString(b []byte, s string) []byte {
|
||||
sz := len(s)
|
||||
var n int
|
||||
var o []byte
|
||||
switch {
|
||||
case sz <= 31:
|
||||
o, n = ensure(b, 1+sz)
|
||||
o[n] = wfixstr(uint8(sz))
|
||||
n++
|
||||
case sz <= math.MaxUint8:
|
||||
o, n = ensure(b, 2+sz)
|
||||
prefixu8(o[n:], mstr8, uint8(sz))
|
||||
n += 2
|
||||
case sz <= math.MaxUint16:
|
||||
o, n = ensure(b, 3+sz)
|
||||
prefixu16(o[n:], mstr16, uint16(sz))
|
||||
n += 3
|
||||
default:
|
||||
o, n = ensure(b, 5+sz)
|
||||
prefixu32(o[n:], mstr32, uint32(sz))
|
||||
n += 5
|
||||
}
|
||||
return o[:n+copy(o[n:], s)]
|
||||
}
|
||||
|
||||
// AppendStringFromBytes appends a []byte
|
||||
// as a MessagePack 'str' to the slice 'b.'
|
||||
func AppendStringFromBytes(b []byte, str []byte) []byte {
|
||||
sz := len(str)
|
||||
var n int
|
||||
var o []byte
|
||||
switch {
|
||||
case sz <= 31:
|
||||
o, n = ensure(b, 1+sz)
|
||||
o[n] = wfixstr(uint8(sz))
|
||||
n++
|
||||
case sz <= math.MaxUint8:
|
||||
o, n = ensure(b, 2+sz)
|
||||
prefixu8(o[n:], mstr8, uint8(sz))
|
||||
n += 2
|
||||
case sz <= math.MaxUint16:
|
||||
o, n = ensure(b, 3+sz)
|
||||
prefixu16(o[n:], mstr16, uint16(sz))
|
||||
n += 3
|
||||
default:
|
||||
o, n = ensure(b, 5+sz)
|
||||
prefixu32(o[n:], mstr32, uint32(sz))
|
||||
n += 5
|
||||
}
|
||||
return o[:n+copy(o[n:], str)]
|
||||
}
|
||||
|
||||
// AppendComplex64 appends a complex64 to the slice as a MessagePack extension
|
||||
func AppendComplex64(b []byte, c complex64) []byte {
|
||||
o, n := ensure(b, Complex64Size)
|
||||
o[n] = mfixext8
|
||||
o[n+1] = Complex64Extension
|
||||
big.PutUint32(o[n+2:], math.Float32bits(real(c)))
|
||||
big.PutUint32(o[n+6:], math.Float32bits(imag(c)))
|
||||
return o
|
||||
}
|
||||
|
||||
// AppendComplex128 appends a complex128 to the slice as a MessagePack extension
|
||||
func AppendComplex128(b []byte, c complex128) []byte {
|
||||
o, n := ensure(b, Complex128Size)
|
||||
o[n] = mfixext16
|
||||
o[n+1] = Complex128Extension
|
||||
big.PutUint64(o[n+2:], math.Float64bits(real(c)))
|
||||
big.PutUint64(o[n+10:], math.Float64bits(imag(c)))
|
||||
return o
|
||||
}
|
||||
|
||||
// AppendTime appends a time.Time to the slice as a MessagePack extension
|
||||
func AppendTime(b []byte, t time.Time) []byte {
|
||||
o, n := ensure(b, TimeSize)
|
||||
t = t.UTC()
|
||||
o[n] = mext8
|
||||
o[n+1] = 12
|
||||
o[n+2] = TimeExtension
|
||||
putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond()))
|
||||
return o
|
||||
}
|
||||
|
||||
// AppendMapStrStr appends a map[string]string to the slice
|
||||
// as a MessagePack map with 'str'-type keys and values
|
||||
func AppendMapStrStr(b []byte, m map[string]string) []byte {
|
||||
sz := uint32(len(m))
|
||||
b = AppendMapHeader(b, sz)
|
||||
for key, val := range m {
|
||||
b = AppendString(b, key)
|
||||
b = AppendString(b, val)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AppendMapStrIntf appends a map[string]interface{} to the slice
|
||||
// as a MessagePack map with 'str'-type keys.
|
||||
func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) {
|
||||
sz := uint32(len(m))
|
||||
b = AppendMapHeader(b, sz)
|
||||
var err error
|
||||
for key, val := range m {
|
||||
b = AppendString(b, key)
|
||||
b, err = AppendIntf(b, val)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// AppendIntf appends the concrete type of 'i' to the
|
||||
// provided []byte. 'i' must be one of the following:
|
||||
// - 'nil'
|
||||
// - A bool, float, string, []byte, int, uint, or complex
|
||||
// - A map[string]interface{} or map[string]string
|
||||
// - A []T, where T is another supported type
|
||||
// - A *T, where T is another supported type
|
||||
// - A type that satisfieds the msgp.Marshaler interface
|
||||
// - A type that satisfies the msgp.Extension interface
|
||||
func AppendIntf(b []byte, i interface{}) ([]byte, error) {
|
||||
if i == nil {
|
||||
return AppendNil(b), nil
|
||||
}
|
||||
|
||||
// all the concrete types
|
||||
// for which we have methods
|
||||
switch i := i.(type) {
|
||||
case Marshaler:
|
||||
return i.MarshalMsg(b)
|
||||
case Extension:
|
||||
return AppendExtension(b, i)
|
||||
case bool:
|
||||
return AppendBool(b, i), nil
|
||||
case float32:
|
||||
return AppendFloat32(b, i), nil
|
||||
case float64:
|
||||
return AppendFloat64(b, i), nil
|
||||
case complex64:
|
||||
return AppendComplex64(b, i), nil
|
||||
case complex128:
|
||||
return AppendComplex128(b, i), nil
|
||||
case string:
|
||||
return AppendString(b, i), nil
|
||||
case []byte:
|
||||
return AppendBytes(b, i), nil
|
||||
case int8:
|
||||
return AppendInt8(b, i), nil
|
||||
case int16:
|
||||
return AppendInt16(b, i), nil
|
||||
case int32:
|
||||
return AppendInt32(b, i), nil
|
||||
case int64:
|
||||
return AppendInt64(b, i), nil
|
||||
case int:
|
||||
return AppendInt64(b, int64(i)), nil
|
||||
case uint:
|
||||
return AppendUint64(b, uint64(i)), nil
|
||||
case uint8:
|
||||
return AppendUint8(b, i), nil
|
||||
case uint16:
|
||||
return AppendUint16(b, i), nil
|
||||
case uint32:
|
||||
return AppendUint32(b, i), nil
|
||||
case uint64:
|
||||
return AppendUint64(b, i), nil
|
||||
case time.Time:
|
||||
return AppendTime(b, i), nil
|
||||
case map[string]interface{}:
|
||||
return AppendMapStrIntf(b, i)
|
||||
case map[string]string:
|
||||
return AppendMapStrStr(b, i), nil
|
||||
case []interface{}:
|
||||
b = AppendArrayHeader(b, uint32(len(i)))
|
||||
var err error
|
||||
for _, k := range i {
|
||||
b, err = AppendIntf(b, k)
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
v := reflect.ValueOf(i)
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice:
|
||||
l := v.Len()
|
||||
b = AppendArrayHeader(b, uint32(l))
|
||||
for i := 0; i < l; i++ {
|
||||
b, err = AppendIntf(b, v.Index(i).Interface())
|
||||
if err != nil {
|
||||
return b, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return AppendNil(b), err
|
||||
}
|
||||
b, err = AppendIntf(b, v.Elem().Interface())
|
||||
return b, err
|
||||
default:
|
||||
return b, &ErrUnsupportedType{T: v.Type()}
|
||||
}
|
||||
}
|
319
vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go
generated
vendored
Normal file
319
vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go
generated
vendored
Normal file
@ -0,0 +1,319 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestIssue116(t *testing.T) {
|
||||
data := AppendInt64(nil, math.MinInt64)
|
||||
i, _, err := ReadInt64Bytes(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i != math.MinInt64 {
|
||||
t.Errorf("put %d in and got %d out", int64(math.MinInt64), i)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
w := NewWriter(&buf)
|
||||
w.WriteInt64(math.MinInt64)
|
||||
w.Flush()
|
||||
i, err = NewReader(&buf).ReadInt64()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if i != math.MinInt64 {
|
||||
t.Errorf("put %d in and got %d out", int64(math.MinInt64), i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendMapHeader(t *testing.T) {
|
||||
szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
var bts []byte
|
||||
for _, sz := range szs {
|
||||
buf.Reset()
|
||||
en.WriteMapHeader(sz)
|
||||
en.Flush()
|
||||
bts = AppendMapHeader(bts[0:0], sz)
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendMapHeader(b *testing.B) {
|
||||
buf := make([]byte, 0, 9)
|
||||
N := b.N / 4
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < N; i++ {
|
||||
AppendMapHeader(buf[:0], 0)
|
||||
AppendMapHeader(buf[:0], uint32(tint8))
|
||||
AppendMapHeader(buf[:0], tuint16)
|
||||
AppendMapHeader(buf[:0], tuint32)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendArrayHeader(t *testing.T) {
|
||||
szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
var bts []byte
|
||||
for _, sz := range szs {
|
||||
buf.Reset()
|
||||
en.WriteArrayHeader(sz)
|
||||
en.Flush()
|
||||
bts = AppendArrayHeader(bts[0:0], sz)
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendArrayHeader(b *testing.B) {
|
||||
buf := make([]byte, 0, 9)
|
||||
N := b.N / 4
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < N; i++ {
|
||||
AppendArrayHeader(buf[:0], 0)
|
||||
AppendArrayHeader(buf[:0], uint32(tint8))
|
||||
AppendArrayHeader(buf[:0], tuint16)
|
||||
AppendArrayHeader(buf[:0], tuint32)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendNil(t *testing.T) {
|
||||
var bts []byte
|
||||
bts = AppendNil(bts[0:0])
|
||||
if bts[0] != mnil {
|
||||
t.Fatal("bts[0] is not 'nil'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendFloat64(t *testing.T) {
|
||||
f := float64(3.14159)
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
var bts []byte
|
||||
en.WriteFloat64(f)
|
||||
en.Flush()
|
||||
bts = AppendFloat64(bts[0:0], f)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendFloat64(b *testing.B) {
|
||||
f := float64(3.14159)
|
||||
buf := make([]byte, 0, 9)
|
||||
b.SetBytes(9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendFloat64(buf[0:0], f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendFloat32(t *testing.T) {
|
||||
f := float32(3.14159)
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
var bts []byte
|
||||
en.WriteFloat32(f)
|
||||
en.Flush()
|
||||
bts = AppendFloat32(bts[0:0], f)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendFloat32(b *testing.B) {
|
||||
f := float32(3.14159)
|
||||
buf := make([]byte, 0, 5)
|
||||
b.SetBytes(5)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendFloat32(buf[0:0], f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendInt64(t *testing.T) {
|
||||
is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
|
||||
var bts []byte
|
||||
for _, i := range is {
|
||||
buf.Reset()
|
||||
en.WriteInt64(i)
|
||||
en.Flush()
|
||||
bts = AppendInt64(bts[0:0], i)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for int64 %d, encoder wrote %q; append wrote %q", i, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendInt64(b *testing.B) {
|
||||
is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)}
|
||||
l := len(is)
|
||||
buf := make([]byte, 0, 9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendInt64(buf[0:0], is[i%l])
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendUint64(t *testing.T) {
|
||||
us := []uint64{0, 1, uint64(tuint16), uint64(tuint32), tuint64}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
var bts []byte
|
||||
|
||||
for _, u := range us {
|
||||
buf.Reset()
|
||||
en.WriteUint64(u)
|
||||
en.Flush()
|
||||
bts = AppendUint64(bts[0:0], u)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for uint64 %d, encoder wrote %q; append wrote %q", u, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendUint64(b *testing.B) {
|
||||
us := []uint64{0, 1, 15, uint64(tuint16), uint64(tuint32), tuint64}
|
||||
buf := make([]byte, 0, 9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
l := len(us)
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendUint64(buf[0:0], us[i%l])
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendBytes(t *testing.T) {
|
||||
sizes := []int{0, 1, 225, int(tuint32)}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
var bts []byte
|
||||
|
||||
for _, sz := range sizes {
|
||||
buf.Reset()
|
||||
b := RandBytes(sz)
|
||||
en.WriteBytes(b)
|
||||
en.Flush()
|
||||
bts = AppendBytes(b[0:0], b)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for bytes of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchappendBytes(size uint32, b *testing.B) {
|
||||
bts := RandBytes(int(size))
|
||||
buf := make([]byte, 0, len(bts)+5)
|
||||
b.SetBytes(int64(len(bts) + 5))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendBytes(buf[0:0], bts)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppend16Bytes(b *testing.B) { benchappendBytes(16, b) }
|
||||
|
||||
func BenchmarkAppend256Bytes(b *testing.B) { benchappendBytes(256, b) }
|
||||
|
||||
func BenchmarkAppend2048Bytes(b *testing.B) { benchappendBytes(2048, b) }
|
||||
|
||||
func TestAppendString(t *testing.T) {
|
||||
sizes := []int{0, 1, 225, int(tuint32)}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
var bts []byte
|
||||
|
||||
for _, sz := range sizes {
|
||||
buf.Reset()
|
||||
s := string(RandBytes(sz))
|
||||
en.WriteString(s)
|
||||
en.Flush()
|
||||
bts = AppendString(bts[0:0], s)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for string of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts))
|
||||
t.Errorf("WriteString prefix: %x", buf.Bytes()[0:5])
|
||||
t.Errorf("Appendstring prefix: %x", bts[0:5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchappendString(size uint32, b *testing.B) {
|
||||
str := string(RandBytes(int(size)))
|
||||
buf := make([]byte, 0, len(str)+5)
|
||||
b.SetBytes(int64(len(str) + 5))
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendString(buf[0:0], str)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppend16String(b *testing.B) { benchappendString(16, b) }
|
||||
|
||||
func BenchmarkAppend256String(b *testing.B) { benchappendString(256, b) }
|
||||
|
||||
func BenchmarkAppend2048String(b *testing.B) { benchappendString(2048, b) }
|
||||
|
||||
func TestAppendBool(t *testing.T) {
|
||||
vs := []bool{true, false}
|
||||
var buf bytes.Buffer
|
||||
en := NewWriter(&buf)
|
||||
var bts []byte
|
||||
|
||||
for _, v := range vs {
|
||||
buf.Reset()
|
||||
en.WriteBool(v)
|
||||
en.Flush()
|
||||
bts = AppendBool(bts[0:0], v)
|
||||
if !bytes.Equal(buf.Bytes(), bts) {
|
||||
t.Errorf("for %t, encoder wrote %q and append wrote %q", v, buf.Bytes(), bts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendBool(b *testing.B) {
|
||||
vs := []bool{true, false}
|
||||
buf := make([]byte, 0, 1)
|
||||
b.SetBytes(1)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendBool(buf[0:0], vs[i%2])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAppendTime(b *testing.B) {
|
||||
t := time.Now()
|
||||
b.SetBytes(15)
|
||||
buf := make([]byte, 0, 15)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
AppendTime(buf[0:0], t)
|
||||
}
|
||||
}
|
405
vendor/github.com/tinylib/msgp/msgp/write_test.go
generated
vendored
Normal file
405
vendor/github.com/tinylib/msgp/msgp/write_test.go
generated
vendored
Normal file
@ -0,0 +1,405 @@
|
||||
package msgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
tint8 int8 = 126 // cannot be most fix* types
|
||||
tint16 int16 = 150 // cannot be int8
|
||||
tint32 int32 = math.MaxInt16 + 100 // cannot be int16
|
||||
tint64 int64 = math.MaxInt32 + 100 // cannot be int32
|
||||
tuint16 uint32 = 300 // cannot be uint8
|
||||
tuint32 uint32 = math.MaxUint16 + 100 // cannot be uint16
|
||||
tuint64 uint64 = math.MaxUint32 + 100 // cannot be uint32
|
||||
)
|
||||
|
||||
func RandBytes(sz int) []byte {
|
||||
out := make([]byte, sz)
|
||||
for i := range out {
|
||||
out[i] = byte(rand.Int63n(math.MaxInt64) % 256)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestWriteMapHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
Sz uint32
|
||||
Outbytes []byte
|
||||
}{
|
||||
{0, []byte{mfixmap}},
|
||||
{1, []byte{mfixmap | byte(1)}},
|
||||
{100, []byte{mmap16, byte(uint16(100) >> 8), byte(uint16(100))}},
|
||||
{tuint32,
|
||||
[]byte{mmap32,
|
||||
byte(tuint32 >> 24),
|
||||
byte(tuint32 >> 16),
|
||||
byte(tuint32 >> 8),
|
||||
byte(tuint32),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
wr := NewWriter(&buf)
|
||||
for _, test := range tests {
|
||||
buf.Reset()
|
||||
err = wr.WriteMapHeader(test.Sz)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.Outbytes) {
|
||||
t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteMapHeader(b *testing.B) {
|
||||
wr := NewWriter(Nowhere)
|
||||
N := b.N / 4
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < N; i++ {
|
||||
wr.WriteMapHeader(0)
|
||||
wr.WriteMapHeader(8)
|
||||
wr.WriteMapHeader(tuint16)
|
||||
wr.WriteMapHeader(tuint32)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteArrayHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
Sz uint32
|
||||
Outbytes []byte
|
||||
}{
|
||||
{0, []byte{mfixarray}},
|
||||
{1, []byte{mfixarray | byte(1)}},
|
||||
{tuint16, []byte{marray16, byte(tuint16 >> 8), byte(tuint16)}},
|
||||
{tuint32, []byte{marray32, byte(tuint32 >> 24), byte(tuint32 >> 16), byte(tuint32 >> 8), byte(tuint32)}},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
wr := NewWriter(&buf)
|
||||
for _, test := range tests {
|
||||
buf.Reset()
|
||||
err = wr.WriteArrayHeader(test.Sz)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(buf.Bytes(), test.Outbytes) {
|
||||
t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWriteStringHeader(t *testing.T) {
|
||||
sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32}
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
wr := NewWriter(&buf)
|
||||
for _, sz := range sizes {
|
||||
buf.Reset()
|
||||
err = wr.WriteStringHeader(sz)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var nsz uint32
|
||||
nsz, err = NewReader(&buf).ReadStringHeader()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nsz != sz {
|
||||
t.Errorf("put in size %d but got out size %d", sz, nsz)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadWriteBytesHeader(t *testing.T) {
|
||||
sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32}
|
||||
var buf bytes.Buffer
|
||||
var err error
|
||||
wr := NewWriter(&buf)
|
||||
for _, sz := range sizes {
|
||||
buf.Reset()
|
||||
err = wr.WriteBytesHeader(sz)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var nsz uint32
|
||||
nsz, err = NewReader(&buf).ReadBytesHeader()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if nsz != sz {
|
||||
t.Errorf("put in size %d but got out size %d", sz, nsz)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteArrayHeader(b *testing.B) {
|
||||
wr := NewWriter(Nowhere)
|
||||
N := b.N / 4
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < N; i++ {
|
||||
wr.WriteArrayHeader(0)
|
||||
wr.WriteArrayHeader(16)
|
||||
wr.WriteArrayHeader(tuint16)
|
||||
wr.WriteArrayHeader(tuint32)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteNil(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
|
||||
err := wr.WriteNil()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bts := buf.Bytes()
|
||||
if bts[0] != mnil {
|
||||
t.Errorf("Expected %x; wrote %x", mnil, bts[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteFloat64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
buf.Reset()
|
||||
flt := (rand.Float64() - 0.5) * math.MaxFloat64
|
||||
err := wr.WriteFloat64(flt)
|
||||
if err != nil {
|
||||
t.Errorf("Error with %f: %s", flt, err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bts := buf.Bytes()
|
||||
|
||||
if bts[0] != mfloat64 {
|
||||
t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteFloat64(b *testing.B) {
|
||||
f := rand.Float64()
|
||||
wr := NewWriter(Nowhere)
|
||||
b.SetBytes(9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.WriteFloat64(f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteFloat32(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
buf.Reset()
|
||||
flt := (rand.Float32() - 0.5) * math.MaxFloat32
|
||||
err := wr.WriteFloat32(flt)
|
||||
if err != nil {
|
||||
t.Errorf("Error with %f: %s", flt, err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bts := buf.Bytes()
|
||||
|
||||
if bts[0] != mfloat32 {
|
||||
t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteFloat32(b *testing.B) {
|
||||
f := rand.Float32()
|
||||
wr := NewWriter(Nowhere)
|
||||
b.SetBytes(5)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.WriteFloat32(f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteInt64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
buf.Reset()
|
||||
|
||||
num := (rand.Int63n(math.MaxInt64)) - (math.MaxInt64 / 2)
|
||||
|
||||
err := wr.WriteInt64(num)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.Len() > 9 {
|
||||
t.Errorf("buffer length should be <= 9; it's %d", buf.Len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteInt64(b *testing.B) {
|
||||
wr := NewWriter(Nowhere)
|
||||
b.SetBytes(9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.WriteInt64(int64(tint64))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteUint64(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
|
||||
for i := 0; i < 10000; i++ {
|
||||
buf.Reset()
|
||||
|
||||
num := uint64(rand.Int63n(math.MaxInt64))
|
||||
|
||||
err := wr.WriteUint64(num)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if buf.Len() > 9 {
|
||||
t.Errorf("buffer length should be <= 9; it's %d", buf.Len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteUint64(b *testing.B) {
|
||||
wr := NewWriter(Nowhere)
|
||||
b.SetBytes(9)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.WriteUint64(uint64(tuint64))
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteBytes(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
sizes := []int{0, 1, 225, int(tuint32)}
|
||||
|
||||
for _, size := range sizes {
|
||||
buf.Reset()
|
||||
bts := RandBytes(size)
|
||||
|
||||
err := wr.WriteBytes(bts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if buf.Len() < len(bts) {
|
||||
t.Errorf("somehow, %d bytes were encoded in %d bytes", len(bts), buf.Len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func benchwrBytes(size uint32, b *testing.B) {
|
||||
bts := RandBytes(int(size))
|
||||
wr := NewWriter(Nowhere)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.WriteBytes(bts)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWrite16Bytes(b *testing.B) { benchwrBytes(16, b) }
|
||||
|
||||
func BenchmarkWrite256Bytes(b *testing.B) { benchwrBytes(256, b) }
|
||||
|
||||
func BenchmarkWrite2048Bytes(b *testing.B) { benchwrBytes(2048, b) }
|
||||
|
||||
func TestWriteTime(t *testing.T) {
|
||||
var buf bytes.Buffer
|
||||
wr := NewWriter(&buf)
|
||||
tm := time.Now()
|
||||
err := wr.WriteTime(tm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = wr.Flush()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if buf.Len() != 15 {
|
||||
t.Errorf("expected time.Time to be %d bytes; got %d", 15, buf.Len())
|
||||
}
|
||||
|
||||
newt, err := NewReader(&buf).ReadTime()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !newt.Equal(tm) {
|
||||
t.Errorf("in/out not equal; %s in and %s out", tm, newt)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWriteTime(b *testing.B) {
|
||||
t := time.Now()
|
||||
wr := NewWriter(Nowhere)
|
||||
b.SetBytes(15)
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
wr.WriteTime(t)
|
||||
}
|
||||
}
|
117
vendor/github.com/tinylib/msgp/parse/directives.go
generated
vendored
Normal file
117
vendor/github.com/tinylib/msgp/parse/directives.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/tinylib/msgp/gen"
|
||||
"go/ast"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const linePrefix = "//msgp:"
|
||||
|
||||
// func(args, fileset)
|
||||
type directive func([]string, *FileSet) error
|
||||
|
||||
// func(passName, args, printer)
|
||||
type passDirective func(gen.Method, []string, *gen.Printer) error
|
||||
|
||||
// map of all recognized directives
|
||||
//
|
||||
// to add a directive, define a func([]string, *FileSet) error
|
||||
// and then add it to this list.
|
||||
var directives = map[string]directive{
|
||||
"shim": applyShim,
|
||||
"ignore": ignore,
|
||||
"tuple": astuple,
|
||||
}
|
||||
|
||||
var passDirectives = map[string]passDirective{
|
||||
"ignore": passignore,
|
||||
}
|
||||
|
||||
func passignore(m gen.Method, text []string, p *gen.Printer) error {
|
||||
pushstate(m.String())
|
||||
for _, a := range text {
|
||||
p.ApplyDirective(m, gen.IgnoreTypename(a))
|
||||
infof("ignoring %s\n", a)
|
||||
}
|
||||
popstate()
|
||||
return nil
|
||||
}
|
||||
|
||||
// find all comment lines that begin with //msgp:
|
||||
func yieldComments(c []*ast.CommentGroup) []string {
|
||||
var out []string
|
||||
for _, cg := range c {
|
||||
for _, line := range cg.List {
|
||||
if strings.HasPrefix(line.Text, linePrefix) {
|
||||
out = append(out, strings.TrimPrefix(line.Text, linePrefix))
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
//msgp:shim {Type} as:{Newtype} using:{toFunc/fromFunc}
|
||||
func applyShim(text []string, f *FileSet) error {
|
||||
if len(text) != 4 {
|
||||
return fmt.Errorf("shim directive should have 3 arguments; found %d", len(text)-1)
|
||||
}
|
||||
|
||||
name := text[1]
|
||||
be := gen.Ident(strings.TrimPrefix(strings.TrimSpace(text[2]), "as:")) // parse as::{base}
|
||||
if name[0] == '*' {
|
||||
name = name[1:]
|
||||
be.Needsref(true)
|
||||
}
|
||||
be.Alias(name)
|
||||
|
||||
usestr := strings.TrimPrefix(strings.TrimSpace(text[3]), "using:") // parse using::{method/method}
|
||||
|
||||
methods := strings.Split(usestr, "/")
|
||||
if len(methods) != 2 {
|
||||
return fmt.Errorf("expected 2 using::{} methods; found %d (%q)", len(methods), text[3])
|
||||
}
|
||||
|
||||
be.ShimToBase = methods[0]
|
||||
be.ShimFromBase = methods[1]
|
||||
|
||||
infof("%s -> %s\n", name, be.Value.String())
|
||||
f.findShim(name, be)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//msgp:ignore {TypeA} {TypeB}...
|
||||
func ignore(text []string, f *FileSet) error {
|
||||
if len(text) < 2 {
|
||||
return nil
|
||||
}
|
||||
for _, item := range text[1:] {
|
||||
name := strings.TrimSpace(item)
|
||||
if _, ok := f.Identities[name]; ok {
|
||||
delete(f.Identities, name)
|
||||
infof("ignoring %s\n", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//msgp:tuple {TypeA} {TypeB}...
|
||||
func astuple(text []string, f *FileSet) error {
|
||||
if len(text) < 2 {
|
||||
return nil
|
||||
}
|
||||
for _, item := range text[1:] {
|
||||
name := strings.TrimSpace(item)
|
||||
if el, ok := f.Identities[name]; ok {
|
||||
if st, ok := el.(*gen.Struct); ok {
|
||||
st.AsTuple = true
|
||||
infoln(name)
|
||||
} else {
|
||||
warnf("%s: only structs can be tuples\n", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
589
vendor/github.com/tinylib/msgp/parse/getast.go
generated
vendored
Normal file
589
vendor/github.com/tinylib/msgp/parse/getast.go
generated
vendored
Normal file
@ -0,0 +1,589 @@
|
||||
package parse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/tinylib/msgp/gen"
|
||||
"github.com/ttacon/chalk"
|
||||
)
|
||||
|
||||
// A FileSet is the in-memory representation of a
|
||||
// parsed file.
|
||||
type FileSet struct {
|
||||
Package string // package name
|
||||
Specs map[string]ast.Expr // type specs in file
|
||||
Identities map[string]gen.Elem // processed from specs
|
||||
Directives []string // raw preprocessor directives
|
||||
Imports []*ast.ImportSpec // imports
|
||||
}
|
||||
|
||||
// File parses a file at the relative path
|
||||
// provided and produces a new *FileSet.
|
||||
// If you pass in a path to a directory, the entire
|
||||
// directory will be parsed.
|
||||
// If unexport is false, only exported identifiers are included in the FileSet.
|
||||
// If the resulting FileSet would be empty, an error is returned.
|
||||
func File(name string, unexported bool) (*FileSet, error) {
|
||||
pushstate(name)
|
||||
defer popstate()
|
||||
fs := &FileSet{
|
||||
Specs: make(map[string]ast.Expr),
|
||||
Identities: make(map[string]gen.Elem),
|
||||
}
|
||||
|
||||
fset := token.NewFileSet()
|
||||
finfo, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if finfo.IsDir() {
|
||||
pkgs, err := parser.ParseDir(fset, name, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pkgs) != 1 {
|
||||
return nil, fmt.Errorf("multiple packages in directory: %s", name)
|
||||
}
|
||||
var one *ast.Package
|
||||
for _, nm := range pkgs {
|
||||
one = nm
|
||||
break
|
||||
}
|
||||
fs.Package = one.Name
|
||||
for _, fl := range one.Files {
|
||||
pushstate(fl.Name.Name)
|
||||
fs.Directives = append(fs.Directives, yieldComments(fl.Comments)...)
|
||||
if !unexported {
|
||||
ast.FileExports(fl)
|
||||
}
|
||||
fs.getTypeSpecs(fl)
|
||||
popstate()
|
||||
}
|
||||
} else {
|
||||
f, err := parser.ParseFile(fset, name, nil, parser.ParseComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Package = f.Name.Name
|
||||
fs.Directives = yieldComments(f.Comments)
|
||||
if !unexported {
|
||||
ast.FileExports(f)
|
||||
}
|
||||
fs.getTypeSpecs(f)
|
||||
}
|
||||
|
||||
if len(fs.Specs) == 0 {
|
||||
return nil, fmt.Errorf("no definitions in %s", name)
|
||||
}
|
||||
|
||||
fs.process()
|
||||
fs.applyDirectives()
|
||||
fs.propInline()
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// applyDirectives applies all of the directives that
|
||||
// are known to the parser. additional method-specific
|
||||
// directives remain in f.Directives
|
||||
func (f *FileSet) applyDirectives() {
|
||||
newdirs := make([]string, 0, len(f.Directives))
|
||||
for _, d := range f.Directives {
|
||||
chunks := strings.Split(d, " ")
|
||||
if len(chunks) > 0 {
|
||||
if fn, ok := directives[chunks[0]]; ok {
|
||||
pushstate(chunks[0])
|
||||
err := fn(chunks, f)
|
||||
if err != nil {
|
||||
warnln(err.Error())
|
||||
}
|
||||
popstate()
|
||||
} else {
|
||||
newdirs = append(newdirs, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
f.Directives = newdirs
|
||||
}
|
||||
|
||||
// A linkset is a graph of unresolved
|
||||
// identities.
|
||||
//
|
||||
// Since gen.Ident can only represent
|
||||
// one level of type indirection (e.g. Foo -> uint8),
|
||||
// type declarations like `type Foo Bar`
|
||||
// aren't resolve-able until we've processed
|
||||
// everything else.
|
||||
//
|
||||
// The goal of this dependency resolution
|
||||
// is to distill the type declaration
|
||||
// into just one level of indirection.
|
||||
// In other words, if we have:
|
||||
//
|
||||
// type A uint64
|
||||
// type B A
|
||||
// type C B
|
||||
// type D C
|
||||
//
|
||||
// ... then we want to end up
|
||||
// figuring out that D is just a uint64.
|
||||
type linkset map[string]*gen.BaseElem
|
||||
|
||||
func (f *FileSet) resolve(ls linkset) {
|
||||
progress := true
|
||||
for progress && len(ls) > 0 {
|
||||
progress = false
|
||||
for name, elem := range ls {
|
||||
real, ok := f.Identities[elem.TypeName()]
|
||||
if ok {
|
||||
// copy the old type descriptor,
|
||||
// alias it to the new value,
|
||||
// and insert it into the resolved
|
||||
// identities list
|
||||
progress = true
|
||||
nt := real.Copy()
|
||||
nt.Alias(name)
|
||||
f.Identities[name] = nt
|
||||
delete(ls, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// what's left can't be resolved
|
||||
for name, elem := range ls {
|
||||
warnf("couldn't resolve type %s (%s)\n", name, elem.TypeName())
|
||||
}
|
||||
}
|
||||
|
||||
// process takes the contents of f.Specs and
|
||||
// uses them to populate f.Identities
|
||||
func (f *FileSet) process() {
|
||||
|
||||
deferred := make(linkset)
|
||||
parse:
|
||||
for name, def := range f.Specs {
|
||||
pushstate(name)
|
||||
el := f.parseExpr(def)
|
||||
if el == nil {
|
||||
warnln("failed to parse")
|
||||
popstate()
|
||||
continue parse
|
||||
}
|
||||
// push unresolved identities into
|
||||
// the graph of links and resolve after
|
||||
// we've handled every possible named type.
|
||||
if be, ok := el.(*gen.BaseElem); ok && be.Value == gen.IDENT {
|
||||
deferred[name] = be
|
||||
popstate()
|
||||
continue parse
|
||||
}
|
||||
el.Alias(name)
|
||||
f.Identities[name] = el
|
||||
popstate()
|
||||
}
|
||||
|
||||
if len(deferred) > 0 {
|
||||
f.resolve(deferred)
|
||||
}
|
||||
}
|
||||
|
||||
func strToMethod(s string) gen.Method {
|
||||
switch s {
|
||||
case "encode":
|
||||
return gen.Encode
|
||||
case "decode":
|
||||
return gen.Decode
|
||||
case "test":
|
||||
return gen.Test
|
||||
case "size":
|
||||
return gen.Size
|
||||
case "marshal":
|
||||
return gen.Marshal
|
||||
case "unmarshal":
|
||||
return gen.Unmarshal
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileSet) applyDirs(p *gen.Printer) {
|
||||
// apply directives of the form
|
||||
//
|
||||
// //msgp:encode ignore {{TypeName}}
|
||||
//
|
||||
loop:
|
||||
for _, d := range f.Directives {
|
||||
chunks := strings.Split(d, " ")
|
||||
if len(chunks) > 1 {
|
||||
for i := range chunks {
|
||||
chunks[i] = strings.TrimSpace(chunks[i])
|
||||
}
|
||||
m := strToMethod(chunks[0])
|
||||
if m == 0 {
|
||||
warnf("unknown pass name: %q\n", chunks[0])
|
||||
continue loop
|
||||
}
|
||||
if fn, ok := passDirectives[chunks[1]]; ok {
|
||||
pushstate(chunks[1])
|
||||
err := fn(m, chunks[2:], p)
|
||||
if err != nil {
|
||||
warnf("error applying directive: %s\n", err)
|
||||
}
|
||||
popstate()
|
||||
} else {
|
||||
warnf("unrecognized directive %q\n", chunks[1])
|
||||
}
|
||||
} else {
|
||||
warnf("empty directive: %q\n", d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileSet) PrintTo(p *gen.Printer) error {
|
||||
f.applyDirs(p)
|
||||
names := make([]string, 0, len(f.Identities))
|
||||
for name := range f.Identities {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
for _, name := range names {
|
||||
el := f.Identities[name]
|
||||
el.SetVarname("z")
|
||||
pushstate(el.TypeName())
|
||||
err := p.Print(el)
|
||||
popstate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getTypeSpecs extracts all of the *ast.TypeSpecs in the file
|
||||
// into fs.Identities, but does not set the actual element
|
||||
func (fs *FileSet) getTypeSpecs(f *ast.File) {
|
||||
|
||||
// collect all imports...
|
||||
fs.Imports = append(fs.Imports, f.Imports...)
|
||||
|
||||
// check all declarations...
|
||||
for i := range f.Decls {
|
||||
|
||||
// for GenDecls...
|
||||
if g, ok := f.Decls[i].(*ast.GenDecl); ok {
|
||||
|
||||
// and check the specs...
|
||||
for _, s := range g.Specs {
|
||||
|
||||
// for ast.TypeSpecs....
|
||||
if ts, ok := s.(*ast.TypeSpec); ok {
|
||||
switch ts.Type.(type) {
|
||||
|
||||
// this is the list of parse-able
|
||||
// type specs
|
||||
case *ast.StructType,
|
||||
*ast.ArrayType,
|
||||
*ast.StarExpr,
|
||||
*ast.MapType,
|
||||
*ast.Ident:
|
||||
fs.Specs[ts.Name.Name] = ts.Type
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fieldName(f *ast.Field) string {
|
||||
switch len(f.Names) {
|
||||
case 0:
|
||||
return stringify(f.Type)
|
||||
case 1:
|
||||
return f.Names[0].Name
|
||||
default:
|
||||
return f.Names[0].Name + " (and others)"
|
||||
}
|
||||
}
|
||||
|
||||
func (fs *FileSet) parseFieldList(fl *ast.FieldList) []gen.StructField {
|
||||
if fl == nil || fl.NumFields() == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]gen.StructField, 0, fl.NumFields())
|
||||
for _, field := range fl.List {
|
||||
pushstate(fieldName(field))
|
||||
fds := fs.getField(field)
|
||||
if len(fds) > 0 {
|
||||
out = append(out, fds...)
|
||||
} else {
|
||||
warnln("ignored.")
|
||||
}
|
||||
popstate()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// translate *ast.Field into []gen.StructField
|
||||
func (fs *FileSet) getField(f *ast.Field) []gen.StructField {
|
||||
sf := make([]gen.StructField, 1)
|
||||
var extension bool
|
||||
// parse tag; otherwise field name is field tag
|
||||
if f.Tag != nil {
|
||||
body := reflect.StructTag(strings.Trim(f.Tag.Value, "`")).Get("msg")
|
||||
tags := strings.Split(body, ",")
|
||||
if len(tags) == 2 && tags[1] == "extension" {
|
||||
extension = true
|
||||
}
|
||||
// ignore "-" fields
|
||||
if tags[0] == "-" {
|
||||
return nil
|
||||
}
|
||||
sf[0].FieldTag = tags[0]
|
||||
}
|
||||
|
||||
ex := fs.parseExpr(f.Type)
|
||||
if ex == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// parse field name
|
||||
switch len(f.Names) {
|
||||
case 0:
|
||||
sf[0].FieldName = embedded(f.Type)
|
||||
case 1:
|
||||
sf[0].FieldName = f.Names[0].Name
|
||||
default:
|
||||
// this is for a multiple in-line declaration,
|
||||
// e.g. type A struct { One, Two int }
|
||||
sf = sf[0:0]
|
||||
for _, nm := range f.Names {
|
||||
sf = append(sf, gen.StructField{
|
||||
FieldTag: nm.Name,
|
||||
FieldName: nm.Name,
|
||||
FieldElem: ex.Copy(),
|
||||
})
|
||||
}
|
||||
return sf
|
||||
}
|
||||
sf[0].FieldElem = ex
|
||||
if sf[0].FieldTag == "" {
|
||||
sf[0].FieldTag = sf[0].FieldName
|
||||
}
|
||||
|
||||
// validate extension
|
||||
if extension {
|
||||
switch ex := ex.(type) {
|
||||
case *gen.Ptr:
|
||||
if b, ok := ex.Value.(*gen.BaseElem); ok {
|
||||
b.Value = gen.Ext
|
||||
} else {
|
||||
warnln("couldn't cast to extension.")
|
||||
return nil
|
||||
}
|
||||
case *gen.BaseElem:
|
||||
ex.Value = gen.Ext
|
||||
default:
|
||||
warnln("couldn't cast to extension.")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return sf
|
||||
}
|
||||
|
||||
// extract embedded field name
|
||||
//
|
||||
// so, for a struct like
|
||||
//
|
||||
// type A struct {
|
||||
// io.Writer
|
||||
// }
|
||||
//
|
||||
// we want "Writer"
|
||||
func embedded(f ast.Expr) string {
|
||||
switch f := f.(type) {
|
||||
case *ast.Ident:
|
||||
return f.Name
|
||||
case *ast.StarExpr:
|
||||
return embedded(f.X)
|
||||
case *ast.SelectorExpr:
|
||||
return f.Sel.Name
|
||||
default:
|
||||
// other possibilities are disallowed
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// stringify a field type name
|
||||
func stringify(e ast.Expr) string {
|
||||
switch e := e.(type) {
|
||||
case *ast.Ident:
|
||||
return e.Name
|
||||
case *ast.StarExpr:
|
||||
return "*" + stringify(e.X)
|
||||
case *ast.SelectorExpr:
|
||||
return stringify(e.X) + "." + e.Sel.Name
|
||||
case *ast.ArrayType:
|
||||
if e.Len == nil {
|
||||
return "[]" + stringify(e.Elt)
|
||||
}
|
||||
return fmt.Sprintf("[%s]%s", stringify(e.Len), stringify(e.Elt))
|
||||
case *ast.InterfaceType:
|
||||
if e.Methods == nil || e.Methods.NumFields() == 0 {
|
||||
return "interface{}"
|
||||
}
|
||||
}
|
||||
return "<BAD>"
|
||||
}
|
||||
|
||||
// recursively translate ast.Expr to gen.Elem; nil means type not supported
|
||||
// expected input types:
|
||||
// - *ast.MapType (map[T]J)
|
||||
// - *ast.Ident (name)
|
||||
// - *ast.ArrayType ([(sz)]T)
|
||||
// - *ast.StarExpr (*T)
|
||||
// - *ast.StructType (struct {})
|
||||
// - *ast.SelectorExpr (a.B)
|
||||
// - *ast.InterfaceType (interface {})
|
||||
func (fs *FileSet) parseExpr(e ast.Expr) gen.Elem {
|
||||
switch e := e.(type) {
|
||||
|
||||
case *ast.MapType:
|
||||
if k, ok := e.Key.(*ast.Ident); ok && k.Name == "string" {
|
||||
if in := fs.parseExpr(e.Value); in != nil {
|
||||
return &gen.Map{Value: in}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case *ast.Ident:
|
||||
b := gen.Ident(e.Name)
|
||||
|
||||
// work to resove this expression
|
||||
// can be done later, once we've resolved
|
||||
// everything else.
|
||||
if b.Value == gen.IDENT {
|
||||
if _, ok := fs.Specs[e.Name]; !ok {
|
||||
warnf("non-local identifier: %s\n", e.Name)
|
||||
}
|
||||
}
|
||||
return b
|
||||
|
||||
case *ast.ArrayType:
|
||||
|
||||
// special case for []byte
|
||||
if e.Len == nil {
|
||||
if i, ok := e.Elt.(*ast.Ident); ok && i.Name == "byte" {
|
||||
return &gen.BaseElem{Value: gen.Bytes}
|
||||
}
|
||||
}
|
||||
|
||||
// return early if we don't know
|
||||
// what the slice element type is
|
||||
els := fs.parseExpr(e.Elt)
|
||||
if els == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// array and not a slice
|
||||
if e.Len != nil {
|
||||
switch s := e.Len.(type) {
|
||||
case *ast.BasicLit:
|
||||
return &gen.Array{
|
||||
Size: s.Value,
|
||||
Els: els,
|
||||
}
|
||||
|
||||
case *ast.Ident:
|
||||
return &gen.Array{
|
||||
Size: s.String(),
|
||||
Els: els,
|
||||
}
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
return &gen.Array{
|
||||
Size: stringify(s),
|
||||
Els: els,
|
||||
}
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return &gen.Slice{Els: els}
|
||||
|
||||
case *ast.StarExpr:
|
||||
if v := fs.parseExpr(e.X); v != nil {
|
||||
return &gen.Ptr{Value: v}
|
||||
}
|
||||
return nil
|
||||
|
||||
case *ast.StructType:
|
||||
if fields := fs.parseFieldList(e.Fields); len(fields) > 0 {
|
||||
return &gen.Struct{Fields: fields}
|
||||
}
|
||||
return nil
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
return gen.Ident(stringify(e))
|
||||
|
||||
case *ast.InterfaceType:
|
||||
// support `interface{}`
|
||||
if len(e.Methods.List) == 0 {
|
||||
return &gen.BaseElem{Value: gen.Intf}
|
||||
}
|
||||
return nil
|
||||
|
||||
default: // other types not supported
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func infof(s string, v ...interface{}) {
|
||||
pushstate(s)
|
||||
fmt.Printf(chalk.Green.Color(strings.Join(logctx, ": ")), v...)
|
||||
popstate()
|
||||
}
|
||||
|
||||
func infoln(s string) {
|
||||
pushstate(s)
|
||||
fmt.Println(chalk.Green.Color(strings.Join(logctx, ": ")))
|
||||
popstate()
|
||||
}
|
||||
|
||||
func warnf(s string, v ...interface{}) {
|
||||
pushstate(s)
|
||||
fmt.Printf(chalk.Yellow.Color(strings.Join(logctx, ": ")), v...)
|
||||
popstate()
|
||||
}
|
||||
|
||||
func warnln(s string) {
|
||||
pushstate(s)
|
||||
fmt.Println(chalk.Yellow.Color(strings.Join(logctx, ": ")))
|
||||
popstate()
|
||||
}
|
||||
|
||||
func fatalf(s string, v ...interface{}) {
|
||||
pushstate(s)
|
||||
fmt.Printf(chalk.Red.Color(strings.Join(logctx, ": ")), v...)
|
||||
popstate()
|
||||
}
|
||||
|
||||
var logctx []string
|
||||
|
||||
// push logging state
|
||||
func pushstate(s string) {
|
||||
logctx = append(logctx, s)
|
||||
}
|
||||
|
||||
// pop logging state
|
||||
func popstate() {
|
||||
logctx = logctx[:len(logctx)-1]
|
||||
}
|
146
vendor/github.com/tinylib/msgp/parse/inline.go
generated
vendored
Normal file
146
vendor/github.com/tinylib/msgp/parse/inline.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
package parse
|
||||
|
||||
import (
|
||||
"github.com/tinylib/msgp/gen"
|
||||
)
|
||||
|
||||
// This file defines when and how we
|
||||
// propagate type information from
|
||||
// one type declaration to another.
|
||||
// After the processing pass, every
|
||||
// non-primitive type is marshalled/unmarshalled/etc.
|
||||
// through a function call. Here, we propagate
|
||||
// the type information into the caller's type
|
||||
// tree *if* the child type is simple enough.
|
||||
//
|
||||
// For example, types like
|
||||
//
|
||||
// type A [4]int
|
||||
//
|
||||
// will get pushed into parent methods,
|
||||
// whereas types like
|
||||
//
|
||||
// type B [3]map[string]struct{A, B [4]string}
|
||||
//
|
||||
// will not.
|
||||
|
||||
// this is an approximate measure
|
||||
// of the number of children in a node
|
||||
const maxComplex = 5
|
||||
|
||||
// begin recursive search for identities with the
|
||||
// given name and replace them with be
|
||||
func (f *FileSet) findShim(id string, be *gen.BaseElem) {
|
||||
for name, el := range f.Identities {
|
||||
pushstate(name)
|
||||
switch el := el.(type) {
|
||||
case *gen.Struct:
|
||||
for i := range el.Fields {
|
||||
f.nextShim(&el.Fields[i].FieldElem, id, be)
|
||||
}
|
||||
case *gen.Array:
|
||||
f.nextShim(&el.Els, id, be)
|
||||
case *gen.Slice:
|
||||
f.nextShim(&el.Els, id, be)
|
||||
case *gen.Map:
|
||||
f.nextShim(&el.Value, id, be)
|
||||
case *gen.Ptr:
|
||||
f.nextShim(&el.Value, id, be)
|
||||
}
|
||||
popstate()
|
||||
}
|
||||
// we'll need this at the top level as well
|
||||
f.Identities[id] = be
|
||||
}
|
||||
|
||||
func (f *FileSet) nextShim(ref *gen.Elem, id string, be *gen.BaseElem) {
|
||||
if (*ref).TypeName() == id {
|
||||
vn := (*ref).Varname()
|
||||
*ref = be.Copy()
|
||||
(*ref).SetVarname(vn)
|
||||
} else {
|
||||
switch el := (*ref).(type) {
|
||||
case *gen.Struct:
|
||||
for i := range el.Fields {
|
||||
f.nextShim(&el.Fields[i].FieldElem, id, be)
|
||||
}
|
||||
case *gen.Array:
|
||||
f.nextShim(&el.Els, id, be)
|
||||
case *gen.Slice:
|
||||
f.nextShim(&el.Els, id, be)
|
||||
case *gen.Map:
|
||||
f.nextShim(&el.Value, id, be)
|
||||
case *gen.Ptr:
|
||||
f.nextShim(&el.Value, id, be)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// propInline identifies and inlines candidates
|
||||
func (f *FileSet) propInline() {
|
||||
for name, el := range f.Identities {
|
||||
pushstate(name)
|
||||
switch el := el.(type) {
|
||||
case *gen.Struct:
|
||||
for i := range el.Fields {
|
||||
f.nextInline(&el.Fields[i].FieldElem, name)
|
||||
}
|
||||
case *gen.Array:
|
||||
f.nextInline(&el.Els, name)
|
||||
case *gen.Slice:
|
||||
f.nextInline(&el.Els, name)
|
||||
case *gen.Map:
|
||||
f.nextInline(&el.Value, name)
|
||||
case *gen.Ptr:
|
||||
f.nextInline(&el.Value, name)
|
||||
}
|
||||
popstate()
|
||||
}
|
||||
}
|
||||
|
||||
const fatalloop = `detected infinite recursion in inlining loop!
|
||||
Please file a bug at github.com/tinylib/msgp/issues!
|
||||
Thanks!
|
||||
`
|
||||
|
||||
func (f *FileSet) nextInline(ref *gen.Elem, root string) {
|
||||
switch el := (*ref).(type) {
|
||||
case *gen.BaseElem:
|
||||
// ensure that we're not inlining
|
||||
// a type into itself
|
||||
typ := el.TypeName()
|
||||
if el.Value == gen.IDENT && typ != root {
|
||||
if node, ok := f.Identities[typ]; ok && node.Complexity() < maxComplex {
|
||||
infof("inlining %s\n", typ)
|
||||
|
||||
// This should never happen; it will cause
|
||||
// infinite recursion.
|
||||
if node == *ref {
|
||||
panic(fatalloop)
|
||||
}
|
||||
|
||||
*ref = node.Copy()
|
||||
f.nextInline(ref, node.TypeName())
|
||||
} else if !ok && !el.Resolved() {
|
||||
// this is the point at which we're sure that
|
||||
// we've got a type that isn't a primitive,
|
||||
// a library builtin, or a processed type
|
||||
warnf("unresolved identifier: %s\n", typ)
|
||||
}
|
||||
}
|
||||
case *gen.Struct:
|
||||
for i := range el.Fields {
|
||||
f.nextInline(&el.Fields[i].FieldElem, root)
|
||||
}
|
||||
case *gen.Array:
|
||||
f.nextInline(&el.Els, root)
|
||||
case *gen.Slice:
|
||||
f.nextInline(&el.Els, root)
|
||||
case *gen.Map:
|
||||
f.nextInline(&el.Value, root)
|
||||
case *gen.Ptr:
|
||||
f.nextInline(&el.Value, root)
|
||||
default:
|
||||
panic("bad elem type")
|
||||
}
|
||||
}
|
128
vendor/github.com/tinylib/msgp/printer/print.go
generated
vendored
Normal file
128
vendor/github.com/tinylib/msgp/printer/print.go
generated
vendored
Normal file
@ -0,0 +1,128 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/tinylib/msgp/gen"
|
||||
"github.com/tinylib/msgp/parse"
|
||||
"github.com/ttacon/chalk"
|
||||
"golang.org/x/tools/imports"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func infof(s string, v ...interface{}) {
|
||||
fmt.Printf(chalk.Magenta.Color(s), v...)
|
||||
}
|
||||
|
||||
// PrintFile prints the methods for the provided list
|
||||
// of elements to the given file name and canonical
|
||||
// package path.
|
||||
func PrintFile(file string, f *parse.FileSet, mode gen.Method) error {
|
||||
out, tests, err := generate(f, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we'll run goimports on the main file
|
||||
// in another goroutine, and run it here
|
||||
// for the test file. empirically, this
|
||||
// takes about the same amount of time as
|
||||
// doing them in serial when GOMAXPROCS=1,
|
||||
// and faster otherwise.
|
||||
res := goformat(file, out.Bytes())
|
||||
if tests != nil {
|
||||
testfile := strings.TrimSuffix(file, ".go") + "_test.go"
|
||||
err = format(testfile, tests.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
infof(">>> Wrote and formatted \"%s\"\n", testfile)
|
||||
}
|
||||
err = <-res
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func format(file string, data []byte) error {
|
||||
out, err := imports.Process(file, data, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(file, out, 0600)
|
||||
}
|
||||
|
||||
func goformat(file string, data []byte) <-chan error {
|
||||
out := make(chan error, 1)
|
||||
go func(file string, data []byte, end chan error) {
|
||||
end <- format(file, data)
|
||||
infof(">>> Wrote and formatted \"%s\"\n", file)
|
||||
}(file, data, out)
|
||||
return out
|
||||
}
|
||||
|
||||
func dedupImports(imp []string) []string {
|
||||
m := make(map[string]struct{})
|
||||
for i := range imp {
|
||||
m[imp[i]] = struct{}{}
|
||||
}
|
||||
r := []string{}
|
||||
for k := range m {
|
||||
r = append(r, k)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func generate(f *parse.FileSet, mode gen.Method) (*bytes.Buffer, *bytes.Buffer, error) {
|
||||
outbuf := bytes.NewBuffer(make([]byte, 0, 4096))
|
||||
writePkgHeader(outbuf, f.Package)
|
||||
|
||||
myImports := []string{"github.com/tinylib/msgp/msgp"}
|
||||
for _, imp := range f.Imports {
|
||||
if imp.Name != nil {
|
||||
// have an alias, include it.
|
||||
myImports = append(myImports, imp.Name.Name+` `+imp.Path.Value)
|
||||
} else {
|
||||
myImports = append(myImports, imp.Path.Value)
|
||||
}
|
||||
}
|
||||
dedup := dedupImports(myImports)
|
||||
writeImportHeader(outbuf, dedup...)
|
||||
|
||||
var testbuf *bytes.Buffer
|
||||
var testwr io.Writer
|
||||
if mode&gen.Test == gen.Test {
|
||||
testbuf = bytes.NewBuffer(make([]byte, 0, 4096))
|
||||
writePkgHeader(testbuf, f.Package)
|
||||
if mode&(gen.Encode|gen.Decode) != 0 {
|
||||
writeImportHeader(testbuf, "bytes", "github.com/tinylib/msgp/msgp", "testing")
|
||||
} else {
|
||||
writeImportHeader(testbuf, "github.com/tinylib/msgp/msgp", "testing")
|
||||
}
|
||||
testwr = testbuf
|
||||
}
|
||||
return outbuf, testbuf, f.PrintTo(gen.NewPrinter(mode, outbuf, testwr))
|
||||
}
|
||||
|
||||
func writePkgHeader(b *bytes.Buffer, name string) {
|
||||
b.WriteString("package ")
|
||||
b.WriteString(name)
|
||||
b.WriteByte('\n')
|
||||
b.WriteString("// NOTE: THIS FILE WAS PRODUCED BY THE\n// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)\n// DO NOT EDIT\n\n")
|
||||
}
|
||||
|
||||
func writeImportHeader(b *bytes.Buffer, imports ...string) {
|
||||
b.WriteString("import (\n")
|
||||
for _, im := range imports {
|
||||
if im[len(im)-1] == '"' {
|
||||
// support aliased imports
|
||||
fmt.Fprintf(b, "\t%s\n", im)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\t%q\n", im)
|
||||
}
|
||||
}
|
||||
b.WriteString(")\n\n")
|
||||
}
|
104
vendor/gopkg.in/DataDog/dd-trace-go.v1/.circleci/config.yml
generated
vendored
Normal file
104
vendor/gopkg.in/DataDog/dd-trace-go.v1/.circleci/config.yml
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
version: 2
|
||||
jobs:
|
||||
build:
|
||||
working_directory: /go/src/gopkg.in/DataDog/dd-trace-go.v1
|
||||
resource_class: xlarge
|
||||
|
||||
docker:
|
||||
- image: circleci/golang:latest
|
||||
- image: cassandra:3.7
|
||||
- image: circleci/mysql:5.7
|
||||
environment:
|
||||
MYSQL_ROOT_PASSWORD: admin
|
||||
MYSQL_PASSWORD: test
|
||||
MYSQL_USER: test
|
||||
MYSQL_DATABASE: test
|
||||
- image: circleci/postgres:9.5
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
- image: redis:3.2
|
||||
- image: elasticsearch:2
|
||||
environment:
|
||||
ES_JAVA_OPTS: "-Xms750m -Xmx750m" # https://github.com/10up/wp-local-docker/issues/6
|
||||
- image: elasticsearch:5
|
||||
environment:
|
||||
ES_JAVA_OPTS: "-Xms750m -Xmx750m" # https://github.com/10up/wp-local-docker/issues/6
|
||||
- image: datadog/docker-dd-agent
|
||||
environment:
|
||||
DD_APM_ENABLED: "true"
|
||||
DD_BIND_HOST: "0.0.0.0"
|
||||
DD_API_KEY: invalid_key_but_this_is_fine
|
||||
- image: circleci/mongo:latest-ram
|
||||
- image: memcached:1.5.9
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Vendor gRPC v1.2.0
|
||||
# This step vendors gRPC v1.2.0 inside our gRPC.v12 contrib
|
||||
# to allow running the tests against the correct version of
|
||||
# the gRPC library. The library is not committed into the
|
||||
# repository to avoid conflicts with the user's imports.
|
||||
environment:
|
||||
GRPC_DEST: contrib/google.golang.org/grpc.v12/vendor/google.golang.org/grpc
|
||||
command: |
|
||||
mkdir -p $GRPC_DEST
|
||||
git clone --branch v1.2.0 https://github.com/grpc/grpc-go $GRPC_DEST
|
||||
|
||||
- run:
|
||||
name: Fetching dependencies
|
||||
command: |
|
||||
go get -v -t ./...
|
||||
go get -v -u golang.org/x/lint/golint
|
||||
go get -v -u github.com/alecthomas/gometalinter
|
||||
|
||||
- run:
|
||||
name: Wait for MySQL
|
||||
command: dockerize -wait tcp://localhost:3306 -timeout 1m
|
||||
|
||||
- run:
|
||||
name: Wait for Postgres
|
||||
command: dockerize -wait tcp://localhost:5432 -timeout 1m
|
||||
|
||||
- run:
|
||||
name: Wait for Redis
|
||||
command: dockerize -wait tcp://localhost:6379 -timeout 1m
|
||||
|
||||
- run:
|
||||
name: Wait for ElasticSearch (1)
|
||||
command: dockerize -wait http://localhost:9200 -timeout 1m
|
||||
|
||||
- run:
|
||||
name: Wait for ElasticSearch (2)
|
||||
command: dockerize -wait http://localhost:9201 -timeout 1m
|
||||
|
||||
- run:
|
||||
name: Wait for Datadog Agent
|
||||
command: dockerize -wait tcp://127.0.0.1:8126 -timeout 1m
|
||||
|
||||
- run:
|
||||
name: Wait for Cassandra
|
||||
command: dockerize -wait tcp://localhost:9042 -timeout 2m
|
||||
|
||||
- run:
|
||||
name: Linting
|
||||
command: |
|
||||
gometalinter --disable-all --vendor --deadline=60s \
|
||||
--enable=golint \
|
||||
--enable=vet \
|
||||
./...
|
||||
|
||||
- run:
|
||||
name: Testing
|
||||
command: |
|
||||
INTEGRATION=1 go test -v -race `go list ./... | grep -v contrib/go-redis/redis`
|
||||
|
||||
- run:
|
||||
name: Testing contrib/go-redis/redis
|
||||
command: |
|
||||
(cd $GOPATH/src/github.com/go-redis/redis && git checkout v6.13.2)
|
||||
INTEGRATION=1 go test -v -race ./contrib/go-redis/redis/...
|
||||
(cd $GOPATH/src/github.com/go-redis/redis && git checkout master)
|
||||
INTEGRATION=1 go test -v -race ./contrib/go-redis/redis/...
|
14
vendor/gopkg.in/DataDog/dd-trace-go.v1/.gitignore
generated
vendored
Normal file
14
vendor/gopkg.in/DataDog/dd-trace-go.v1/.gitignore
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
# go
|
||||
bin/
|
||||
|
||||
# profiling
|
||||
*.test
|
||||
*.out
|
||||
|
||||
# generic
|
||||
.DS_Store
|
||||
*.cov
|
||||
*.lock
|
||||
*.swp
|
||||
|
||||
/contrib/google.golang.org/grpc.v12/vendor/
|
17
vendor/gopkg.in/DataDog/dd-trace-go.v1/CONTRIBUTING.md
generated
vendored
Normal file
17
vendor/gopkg.in/DataDog/dd-trace-go.v1/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
### Contributing
|
||||
|
||||
Pull requests for bug fixes are welcome, but before submitting new features or changes to current functionalities [open an issue](https://github.com/DataDog/dd-trace-go/issues/new)
|
||||
and discuss your ideas or propose the changes you wish to make. After a resolution is reached a PR can be submitted for review.
|
||||
|
||||
For commit messages, try to use the same conventions as most Go projects, for example:
|
||||
```
|
||||
contrib/database/sql: use method context on QueryContext and ExecContext
|
||||
|
||||
QueryContext and ExecContext were using the wrong context to create
|
||||
spans. Instead of using the method's argument they were using the
|
||||
Prepare context, which was wrong.
|
||||
|
||||
Fixes #113
|
||||
```
|
||||
Please apply the same logic for Pull Requests, start with the package name, followed by a colon and a description of the change, just like
|
||||
the official [Go language](https://github.com/golang/go/pulls).
|
24
vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE
generated
vendored
Normal file
24
vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2016, Datadog <info@datadoghq.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of Datadog nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
2
vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv
generated
vendored
Normal file
2
vendor/gopkg.in/DataDog/dd-trace-go.v1/LICENSE-3rdparty.csv
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
Component,Origin,License,Copyright
|
||||
import,io.opentracing,Apache-2.0,Copyright 2016-2017 The OpenTracing Authors
|
|
104
vendor/gopkg.in/DataDog/dd-trace-go.v1/MIGRATING.md
generated
vendored
Normal file
104
vendor/gopkg.in/DataDog/dd-trace-go.v1/MIGRATING.md
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
# Migration Guide
|
||||
|
||||
This document outlines migrating from an older version of the Datadog tracer (0.6.x) to v1.
|
||||
|
||||
Datadog's v1 version of the Go tracer provides not only an overhauled core that comes with huge performance improvements, but also the promise of a new and stable API to be relied on. It is the result of continuous feedback from customers, the community, as well as our extensive internal usage.
|
||||
|
||||
As is common and recommended in the Go community, the best way to approach migrating to this new API is by using the [gradual code repair](https://talks.golang.org/2016/refactor.article) method. We have done the same internally and it has worked just great! For this exact reason we have provided a new, [semver](https://semver.org/) friendly import path to help with using both tracers in parallel, without conflict, for the duration of the migration. This new path is `gopkg.in/DataDog/dd-trace-go.v1`.
|
||||
|
||||
Our [godoc page](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace) should deem helpful during this process. We also have the [official documentation](https://docs.datadoghq.com/tracing/setup/go/), which contains a couple of examples.
|
||||
|
||||
This document will further outline some _before_ and _after_ examples.
|
||||
|
||||
## Starting the tracer
|
||||
|
||||
The new tracer needs to be started before it can be used. A default started tracer is no longer available. The default tracer is now a no-op.
|
||||
|
||||
Here is an example of starting a custom tracer with a non-default agent endpoint using the old API:
|
||||
|
||||
```go
|
||||
t := tracer.NewTracerTransport(tracer.NewTransport("localhost", "8199"))
|
||||
t.SetDebugLogging(true)
|
||||
defer t.ForceFlush()
|
||||
```
|
||||
|
||||
This would now become:
|
||||
|
||||
```go
|
||||
tracer.Start(
|
||||
tracer.WithAgentAddr("localhost:8199"),
|
||||
tracer.WithDebugMode(true),
|
||||
)
|
||||
defer tracer.Stop()
|
||||
```
|
||||
|
||||
Notice that the tracer object is no longer returned. Consult the documentation to see [all possible parameters](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartOption) to the `Start` call.
|
||||
|
||||
## Service Information
|
||||
|
||||
The [`tracer.SetServiceInfo`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Tracer.SetServiceInfo) method has been deprecated. The service information is now set automatically based on the value of the [`ext.SpanType`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext#SpanType) tag that was set on the root span of a trace.
|
||||
|
||||
## Spans
|
||||
|
||||
Starting spans is now possible with [functional options](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartSpanOption). Which means that all span properties (or none) can be set when starting a span dynamically. Before:
|
||||
|
||||
```go
|
||||
span := tracer.NewRootSpan("web.request", "my_service", "resource_name")
|
||||
```
|
||||
|
||||
Becomes:
|
||||
|
||||
```go
|
||||
span := tracer.StartSpan("web.request", tracer.ServiceName("my_service"), tracer.ResourceName("resource_name"))
|
||||
```
|
||||
|
||||
We've done this because in many cases the extra parameters could become tedious, given that service names can be inherited and resource names can default to the operation name. This also allows us to have one single, more dynamic API for starting both root and child spans. Check out all possible [StartSpanOption](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#StartSpanOption) values to get an idea.
|
||||
|
||||
### Children
|
||||
|
||||
Here is an example for spawning a child of the previously declared span:
|
||||
```go
|
||||
child := tracer.StartSpan("process.user", tracer.ChildOf(span.Context()))
|
||||
```
|
||||
You will notice that the new tracer also introduces the concept of [SpanContext](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace#SpanContext), which is different from Go's context and is used to carry information needed to spawn children of a specific span and can be propagated cross-process. To learn more about distributed tracing check the package-level [documentation](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#ChildOf) of the `tracer` package.
|
||||
|
||||
### Using Go's context
|
||||
|
||||
It is also possible to create children of spans that live inside Go's [context](https://golang.org/pkg/context/):
|
||||
```go
|
||||
child, ctx := tracer.StartSpanFromContext(ctx, "process.user", tracer.Tag("key", "value"))
|
||||
```
|
||||
This will create a child of the span which exists inside the passed context and return it, along with a new context which contains the new span. To add or retrieve a span from a context use the [`ContextWithSpan`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#ContextWithSpan) or [`SpanFromContext`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#SpanFromContext) functions.
|
||||
|
||||
### Setting errors
|
||||
|
||||
The [`SetError`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Span.SetError) has been deprecated in favour of the [`ext.Error`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext#Error) tag value which matches other tracing libraries in the wild. Whereas before we had:
|
||||
|
||||
```go
|
||||
span.SetError(err)
|
||||
```
|
||||
|
||||
Now we have:
|
||||
|
||||
```go
|
||||
span.SetTag(ext.Error, err)
|
||||
```
|
||||
|
||||
Note that this tag can accept value of the types `error`, `string` and `bool` as well for setting errors.
|
||||
|
||||
### Finishing
|
||||
|
||||
The [`FinishWithErr`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Span.FinishWithErr) and [`FinishWithTime`](https://godoc.org/github.com/DataDog/dd-trace-go/tracer#Span.FinishWithTime) methods have been removed in favour of a set of [`FinishOption`](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer#FinishOption). For example, this would now become:
|
||||
|
||||
```go
|
||||
span.Finish(tracer.WithError(err), tracer.FinishTime(t))
|
||||
```
|
||||
|
||||
Providing a `nil` value as an error is perfectly fine and will not mark the span as erroneous.
|
||||
|
||||
## Further reading
|
||||
|
||||
The new version of the tracer also comes with a lot of new features, such as support for distributed tracing and distributed sampling priority.
|
||||
|
||||
* package level documentation of the [`tracer` package](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer) for a better overview.
|
||||
* [official documentation](https://docs.datadoghq.com/tracing/setup/go/)
|
30
vendor/gopkg.in/DataDog/dd-trace-go.v1/README.md
generated
vendored
Normal file
30
vendor/gopkg.in/DataDog/dd-trace-go.v1/README.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
[![CircleCI](https://circleci.com/gh/DataDog/dd-trace-go/tree/v1.svg?style=svg)](https://circleci.com/gh/DataDog/dd-trace-go/tree/v1)
|
||||
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace)
|
||||
|
||||
### Installing
|
||||
|
||||
```bash
|
||||
go get gopkg.in/DataDog/dd-trace-go.v1/ddtrace
|
||||
```
|
||||
|
||||
Requires:
|
||||
|
||||
* Go 1.9
|
||||
* Datadog's Trace Agent >= 5.21.1
|
||||
|
||||
### Documentation
|
||||
|
||||
The API is documented on [godoc](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/ddtrace) as well as Datadog's [official documentation](https://docs.datadoghq.com/tracing/setup/go/). If you are migrating
|
||||
from an older version of the tracer (e.g. 0.6.x) you may also find the [migration document](https://github.com/DataDog/dd-trace-go/blob/v1/MIGRATING.md) we've put together helpful.
|
||||
|
||||
### Testing
|
||||
|
||||
Tests can be run locally using the Go toolset. The grpc.v12 integration will fail (and this is normal), because it covers for deprecated methods. In the CI environment
|
||||
we vendor this version of the library inside the integration. Under normal circumstances this is not something that we want to do, because users using this integration
|
||||
might be running versions different from the vendored one, creating hard to debug conflicts.
|
||||
|
||||
To run integration tests locally, you should set the `INTEGRATION` environment variable. The dependencies of the integration tests are best run via Docker. To get an
|
||||
idea about the versions and the set-up take a look at our [CI config](https://github.com/DataDog/dd-trace-go/blob/v1/.circleci/config.yml).
|
||||
|
||||
The best way to run the entire test suite is using the [CircleCI CLI](https://circleci.com/docs/2.0/local-jobs/). Simply run `circleci build`
|
||||
in the repository root. Note that you might have to increase the resources dedicated to Docker to around 4GB.
|
19
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/README.md
generated
vendored
Normal file
19
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/README.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib)
|
||||
|
||||
The purpose of these packages is to provide tracing on top of commonly used packages from the standard library as well as the
|
||||
community in a "plug-and-play" manner. This means that by simply importing the appropriate path, functions are exposed having
|
||||
the same signature as the original package. These functions return structures which embed the original return value, allowing
|
||||
them to be used as they normally would with tracing activated out of the box.
|
||||
|
||||
All of these libraries are supported by our [APM product](https://www.datadoghq.com/apm/).
|
||||
|
||||
### Usage
|
||||
|
||||
First, find the library which you'd like to integrate with. The naming convention for the integration packages is:
|
||||
|
||||
* If the package is from the standard library (eg. `database/sql`), it will be located at the same path.
|
||||
* If the package is hosted on Github (eg. `github.com/user/repo`), it will be located at the shorthand path `user/repo`.
|
||||
* If the package is from anywhere else (eg. `google.golang.org/grpc`), it can be found under the full import path.
|
||||
|
||||
Each integration comes with thorough documentation and usage examples. A good overview can be seen on our
|
||||
[godoc](https://godoc.org/gopkg.in/DataDog/dd-trace-go.v1/contrib) page.
|
99
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws.go
generated
vendored
Normal file
99
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
// Package aws provides functions to trace aws/aws-sdk-go (https://github.com/aws/aws-sdk-go).
|
||||
package aws // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
const (
|
||||
tagAWSAgent = "aws.agent"
|
||||
tagAWSOperation = "aws.operation"
|
||||
tagAWSRegion = "aws.region"
|
||||
)
|
||||
|
||||
type handlers struct {
|
||||
cfg *config
|
||||
}
|
||||
|
||||
// WrapSession wraps a session.Session, causing requests and responses to be traced.
|
||||
func WrapSession(s *session.Session, opts ...Option) *session.Session {
|
||||
cfg := new(config)
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
h := &handlers{cfg: cfg}
|
||||
s = s.Copy()
|
||||
s.Handlers.Send.PushFrontNamed(request.NamedHandler{
|
||||
Name: "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/handlers.Send",
|
||||
Fn: h.Send,
|
||||
})
|
||||
s.Handlers.Complete.PushBackNamed(request.NamedHandler{
|
||||
Name: "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/handlers.Complete",
|
||||
Fn: h.Complete,
|
||||
})
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *handlers) Send(req *request.Request) {
|
||||
_, ctx := tracer.StartSpanFromContext(req.Context(), h.operationName(req),
|
||||
tracer.SpanType(ext.SpanTypeHTTP),
|
||||
tracer.ServiceName(h.serviceName(req)),
|
||||
tracer.ResourceName(h.resourceName(req)),
|
||||
tracer.Tag(tagAWSAgent, h.awsAgent(req)),
|
||||
tracer.Tag(tagAWSOperation, h.awsOperation(req)),
|
||||
tracer.Tag(tagAWSRegion, h.awsRegion(req)),
|
||||
tracer.Tag(ext.HTTPMethod, req.Operation.HTTPMethod),
|
||||
tracer.Tag(ext.HTTPURL, req.HTTPRequest.URL.String()),
|
||||
)
|
||||
req.SetContext(ctx)
|
||||
}
|
||||
|
||||
func (h *handlers) Complete(req *request.Request) {
|
||||
span, ok := tracer.SpanFromContext(req.Context())
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if req.HTTPResponse != nil {
|
||||
span.SetTag(ext.HTTPCode, strconv.Itoa(req.HTTPResponse.StatusCode))
|
||||
}
|
||||
span.Finish(tracer.WithError(req.Error))
|
||||
}
|
||||
|
||||
func (h *handlers) operationName(req *request.Request) string {
|
||||
return h.awsService(req) + ".command"
|
||||
}
|
||||
|
||||
func (h *handlers) resourceName(req *request.Request) string {
|
||||
return h.awsService(req) + "." + req.Operation.Name
|
||||
}
|
||||
|
||||
func (h *handlers) serviceName(req *request.Request) string {
|
||||
if h.cfg.serviceName != "" {
|
||||
return h.cfg.serviceName
|
||||
}
|
||||
return "aws." + h.awsService(req)
|
||||
}
|
||||
|
||||
func (h *handlers) awsAgent(req *request.Request) string {
|
||||
if agent := req.HTTPRequest.Header.Get("User-Agent"); agent != "" {
|
||||
return agent
|
||||
}
|
||||
return "aws-sdk-go"
|
||||
}
|
||||
|
||||
func (h *handlers) awsOperation(req *request.Request) string {
|
||||
return req.Operation.Name
|
||||
}
|
||||
|
||||
func (h *handlers) awsRegion(req *request.Request) string {
|
||||
return req.ClientInfo.SigningRegion
|
||||
}
|
||||
|
||||
func (h *handlers) awsService(req *request.Request) string {
|
||||
return req.ClientInfo.ServiceName
|
||||
}
|
77
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws_test.go
generated
vendored
Normal file
77
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/aws_test.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
package aws
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
func TestAWS(t *testing.T) {
|
||||
cfg := aws.NewConfig().
|
||||
WithRegion("us-west-2").
|
||||
WithDisableSSL(true).
|
||||
WithCredentials(credentials.AnonymousCredentials)
|
||||
|
||||
session := WrapSession(session.Must(session.NewSession(cfg)))
|
||||
|
||||
t.Run("s3", func(t *testing.T) {
|
||||
mt := mocktracer.Start()
|
||||
defer mt.Stop()
|
||||
|
||||
root, ctx := tracer.StartSpanFromContext(context.Background(), "test")
|
||||
s3api := s3.New(session)
|
||||
s3api.CreateBucketWithContext(ctx, &s3.CreateBucketInput{
|
||||
Bucket: aws.String("BUCKET"),
|
||||
})
|
||||
root.Finish()
|
||||
|
||||
spans := mt.FinishedSpans()
|
||||
assert.Len(t, spans, 2)
|
||||
assert.Equal(t, spans[1].TraceID(), spans[0].TraceID())
|
||||
|
||||
s := spans[0]
|
||||
assert.Equal(t, "s3.command", s.OperationName())
|
||||
assert.Contains(t, s.Tag(tagAWSAgent), "aws-sdk-go")
|
||||
assert.Equal(t, "CreateBucket", s.Tag(tagAWSOperation))
|
||||
assert.Equal(t, "us-west-2", s.Tag(tagAWSRegion))
|
||||
assert.Equal(t, "s3.CreateBucket", s.Tag(ext.ResourceName))
|
||||
assert.Equal(t, "aws.s3", s.Tag(ext.ServiceName))
|
||||
assert.Equal(t, "403", s.Tag(ext.HTTPCode))
|
||||
assert.Equal(t, "PUT", s.Tag(ext.HTTPMethod))
|
||||
assert.Equal(t, "http://s3.us-west-2.amazonaws.com/BUCKET", s.Tag(ext.HTTPURL))
|
||||
})
|
||||
|
||||
t.Run("ec2", func(t *testing.T) {
|
||||
mt := mocktracer.Start()
|
||||
defer mt.Stop()
|
||||
|
||||
root, ctx := tracer.StartSpanFromContext(context.Background(), "test")
|
||||
ec2api := ec2.New(session)
|
||||
ec2api.DescribeInstancesWithContext(ctx, &ec2.DescribeInstancesInput{})
|
||||
root.Finish()
|
||||
|
||||
spans := mt.FinishedSpans()
|
||||
assert.Len(t, spans, 2)
|
||||
assert.Equal(t, spans[1].TraceID(), spans[0].TraceID())
|
||||
|
||||
s := spans[0]
|
||||
assert.Equal(t, "ec2.command", s.OperationName())
|
||||
assert.Contains(t, s.Tag(tagAWSAgent), "aws-sdk-go")
|
||||
assert.Equal(t, "DescribeInstances", s.Tag(tagAWSOperation))
|
||||
assert.Equal(t, "us-west-2", s.Tag(tagAWSRegion))
|
||||
assert.Equal(t, "ec2.DescribeInstances", s.Tag(ext.ResourceName))
|
||||
assert.Equal(t, "aws.ec2", s.Tag(ext.ServiceName))
|
||||
assert.Equal(t, "400", s.Tag(ext.HTTPCode))
|
||||
assert.Equal(t, "POST", s.Tag(ext.HTTPMethod))
|
||||
assert.Equal(t, "http://ec2.us-west-2.amazonaws.com/", s.Tag(ext.HTTPURL))
|
||||
})
|
||||
}
|
21
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/example_test.go
generated
vendored
Normal file
21
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/example_test.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
package aws_test
|
||||
|
||||
import (
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
awstrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
// To start tracing requests, wrap the AWS session.Session by invoking
|
||||
// awstrace.WrapSession.
|
||||
func Example() {
|
||||
cfg := aws.NewConfig().WithRegion("us-west-2")
|
||||
sess := session.Must(session.NewSession(cfg))
|
||||
sess = awstrace.WrapSession(sess)
|
||||
|
||||
s3api := s3.New(sess)
|
||||
s3api.CreateBucket(&s3.CreateBucketInput{
|
||||
Bucket: aws.String("some-bucket-name"),
|
||||
})
|
||||
}
|
17
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/option.go
generated
vendored
Normal file
17
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/aws/aws-sdk-go/aws/option.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package aws
|
||||
|
||||
type config struct {
|
||||
serviceName string
|
||||
}
|
||||
|
||||
// Option represents an option that can be passed to Dial.
|
||||
type Option func(*config)
|
||||
|
||||
// WithServiceName sets the given service name for the dialled connection.
|
||||
// When the service name is not explicitly set it will be inferred based on the
|
||||
// request to AWS.
|
||||
func WithServiceName(name string) Option {
|
||||
return func(cfg *config) {
|
||||
cfg.serviceName = name
|
||||
}
|
||||
}
|
22
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/example_test.go
generated
vendored
Normal file
22
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/example_test.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
package memcache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
memcachetrace "gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
span, ctx := tracer.StartSpanFromContext(context.Background(), "parent.request",
|
||||
tracer.ServiceName("web"),
|
||||
tracer.ResourceName("/home"),
|
||||
)
|
||||
defer span.Finish()
|
||||
|
||||
mc := memcachetrace.WrapClient(memcache.New("127.0.0.1:11211"))
|
||||
// you can use WithContext to set the parent span
|
||||
mc.WithContext(ctx).Set(&memcache.Item{Key: "my key", Value: []byte("my value")})
|
||||
|
||||
}
|
162
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache.go
generated
vendored
Normal file
162
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache.go
generated
vendored
Normal file
@ -0,0 +1,162 @@
|
||||
// Package memcache provides functions to trace the bradfitz/gomemcache package (https://github.com/bradfitz/gomemcache).
|
||||
//
|
||||
// `WrapClient` will wrap a memcache `Client` and return a new struct with all
|
||||
// the same methods, so should be seamless for existing applications. It also
|
||||
// has an additional `WithContext` method which can be used to connect a span
|
||||
// to an existing trace.
|
||||
package memcache // import "gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache"
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
// WrapClient wraps a memcache.Client so that all requests are traced using the
|
||||
// default tracer with the service name "memcached".
|
||||
func WrapClient(client *memcache.Client, opts ...ClientOption) *Client {
|
||||
cfg := new(clientConfig)
|
||||
defaults(cfg)
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
return &Client{
|
||||
Client: client,
|
||||
cfg: cfg,
|
||||
context: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
// A Client is used to trace requests to the memcached server.
|
||||
type Client struct {
|
||||
*memcache.Client
|
||||
cfg *clientConfig
|
||||
context context.Context
|
||||
}
|
||||
|
||||
// WithContext creates a copy of the Client with the given context.
|
||||
func (c *Client) WithContext(ctx context.Context) *Client {
|
||||
// the existing memcache client doesn't support context, but may in the
|
||||
// future, so we do a runtime check to detect this
|
||||
mc := c.Client
|
||||
if wc, ok := (interface{})(c.Client).(interface {
|
||||
WithContext(context.Context) *memcache.Client
|
||||
}); ok {
|
||||
mc = wc.WithContext(ctx)
|
||||
}
|
||||
return &Client{
|
||||
Client: mc,
|
||||
cfg: c.cfg,
|
||||
context: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// startSpan starts a span from the context set with WithContext.
|
||||
func (c *Client) startSpan(resourceName string) ddtrace.Span {
|
||||
span, _ := tracer.StartSpanFromContext(c.context, operationName,
|
||||
tracer.SpanType(ext.SpanTypeMemcached),
|
||||
tracer.ServiceName(c.cfg.serviceName),
|
||||
tracer.ResourceName(resourceName))
|
||||
return span
|
||||
}
|
||||
|
||||
// wrapped methods:
|
||||
|
||||
// Add invokes and traces Client.Add.
|
||||
func (c *Client) Add(item *memcache.Item) error {
|
||||
span := c.startSpan("Add")
|
||||
err := c.Client.Add(item)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// CompareAndSwap invokes and traces Client.CompareAndSwap.
|
||||
func (c *Client) CompareAndSwap(item *memcache.Item) error {
|
||||
span := c.startSpan("CompareAndSwap")
|
||||
err := c.Client.CompareAndSwap(item)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Decrement invokes and traces Client.Decrement.
|
||||
func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
|
||||
span := c.startSpan("Decrement")
|
||||
newValue, err = c.Client.Decrement(key, delta)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return newValue, err
|
||||
}
|
||||
|
||||
// Delete invokes and traces Client.Delete.
|
||||
func (c *Client) Delete(key string) error {
|
||||
span := c.startSpan("Delete")
|
||||
err := c.Client.Delete(key)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAll invokes and traces Client.DeleteAll.
|
||||
func (c *Client) DeleteAll() error {
|
||||
span := c.startSpan("DeleteAll")
|
||||
err := c.Client.DeleteAll()
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// FlushAll invokes and traces Client.FlushAll.
|
||||
func (c *Client) FlushAll() error {
|
||||
span := c.startSpan("FlushAll")
|
||||
err := c.Client.FlushAll()
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Get invokes and traces Client.Get.
|
||||
func (c *Client) Get(key string) (item *memcache.Item, err error) {
|
||||
span := c.startSpan("Get")
|
||||
item, err = c.Client.Get(key)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return item, err
|
||||
}
|
||||
|
||||
// GetMulti invokes and traces Client.GetMulti.
|
||||
func (c *Client) GetMulti(keys []string) (map[string]*memcache.Item, error) {
|
||||
span := c.startSpan("GetMulti")
|
||||
items, err := c.Client.GetMulti(keys)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return items, err
|
||||
}
|
||||
|
||||
// Increment invokes and traces Client.Increment.
|
||||
func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
|
||||
span := c.startSpan("Increment")
|
||||
newValue, err = c.Client.Increment(key, delta)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return newValue, err
|
||||
}
|
||||
|
||||
// Replace invokes and traces Client.Replace.
|
||||
func (c *Client) Replace(item *memcache.Item) error {
|
||||
span := c.startSpan("Replace")
|
||||
err := c.Client.Replace(item)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Set invokes and traces Client.Set.
|
||||
func (c *Client) Set(item *memcache.Item) error {
|
||||
span := c.startSpan("Set")
|
||||
err := c.Client.Set(item)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
||||
|
||||
// Touch invokes and traces Client.Touch.
|
||||
func (c *Client) Touch(key string, seconds int32) error {
|
||||
span := c.startSpan("Touch")
|
||||
err := c.Client.Touch(key, seconds)
|
||||
span.Finish(tracer.WithError(err))
|
||||
return err
|
||||
}
|
146
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache_test.go
generated
vendored
Normal file
146
vendor/gopkg.in/DataDog/dd-trace-go.v1/contrib/bradfitz/gomemcache/memcache/memcache_test.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
package memcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/bradfitz/gomemcache/memcache"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/mocktracer"
|
||||
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
|
||||
)
|
||||
|
||||
func TestMemcache(t *testing.T) {
|
||||
li := makeFakeServer(t)
|
||||
defer li.Close()
|
||||
|
||||
testMemcache(t, li.Addr().String())
|
||||
}
|
||||
|
||||
func TestMemcacheIntegration(t *testing.T) {
|
||||
if _, ok := os.LookupEnv("INTEGRATION"); !ok {
|
||||
t.Skip("to enable integration test, set the INTEGRATION environment variable")
|
||||
}
|
||||
|
||||
testMemcache(t, "localhost:11211")
|
||||
}
|
||||
|
||||
func testMemcache(t *testing.T, addr string) {
|
||||
client := WrapClient(memcache.New(addr), WithServiceName("test-memcache"))
|
||||
defer client.DeleteAll()
|
||||
|
||||
validateMemcacheSpan := func(t *testing.T, span mocktracer.Span, resourceName string) {
|
||||
assert.Equal(t, "test-memcache", span.Tag(ext.ServiceName),
|
||||
"service name should be set to test-memcache")
|
||||
assert.Equal(t, "memcached.query", span.OperationName(),
|
||||
"operation name should be set to memcached.query")
|
||||
assert.Equal(t, resourceName, span.Tag(ext.ResourceName),
|
||||
"resource name should be set to the memcache command")
|
||||
}
|
||||
|
||||
t.Run("traces without context", func(t *testing.T) {
|
||||
mt := mocktracer.Start()
|
||||
defer mt.Stop()
|
||||
|
||||
err := client.
|
||||
Add(&memcache.Item{
|
||||
Key: "key1",
|
||||
Value: []byte("value1"),
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
spans := mt.FinishedSpans()
|
||||
assert.Len(t, spans, 1)
|
||||
validateMemcacheSpan(t, spans[0], "Add")
|
||||
})
|
||||
|
||||
t.Run("traces with context", func(t *testing.T) {
|
||||
mt := mocktracer.Start()
|
||||
defer mt.Stop()
|
||||
|
||||
ctx := context.Background()
|
||||
span, ctx := tracer.StartSpanFromContext(ctx, "parent")
|
||||
|
||||
err := client.
|
||||
WithContext(ctx).
|
||||
Add(&memcache.Item{
|
||||
Key: "key2",
|
||||
Value: []byte("value2"),
|
||||
})
|
||||
assert.Nil(t, err)
|
||||
|
||||
span.Finish()
|
||||
|
||||
spans := mt.FinishedSpans()
|
||||
assert.Len(t, spans, 2)
|
||||
validateMemcacheSpan(t, spans[0], "Add")
|
||||
assert.Equal(t, span, spans[1])
|
||||
assert.Equal(t, spans[1].TraceID(), spans[0].TraceID(),
|
||||
"memcache span should be part of the parent trace")
|
||||
})
|
||||
}
|
||||
|
||||
func TestFakeServer(t *testing.T) {
|
||||
li := makeFakeServer(t)
|
||||
defer li.Close()
|
||||
|
||||
conn, err := net.Dial("tcp", li.Addr().String())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
fmt.Fprintf(conn, "add %s\r\n%s\r\n", "key", "value")
|
||||
s := bufio.NewScanner(conn)
|
||||
assert.True(t, s.Scan())
|
||||
assert.Equal(t, "STORED", s.Text())
|
||||
}
|
||||
|
||||
func makeFakeServer(t *testing.T) net.Listener {
|
||||
li, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
c, err := li.Accept()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
go func() {
|
||||
defer c.Close()
|
||||
|
||||
// the memcache textual protocol is line-oriented with each
|
||||
// command being space separated:
|
||||
//
|
||||
// command1 arg1 arg2
|
||||
// command2 arg1 arg2
|
||||
// ...
|
||||
//
|
||||
s := bufio.NewScanner(c)
|
||||
for s.Scan() {
|
||||
args := strings.Split(s.Text(), " ")
|
||||
switch args[0] {
|
||||
case "add":
|
||||
if !s.Scan() {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(c, "STORED\r\n")
|
||||
default:
|
||||
fmt.Fprintf(c, "SERVER ERROR unknown command: %v \r\n", args[0])
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
return li
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user